repo_id
stringlengths
15
89
file_path
stringlengths
27
180
content
stringlengths
1
2.23M
__index_level_0__
int64
0
0
hf_public_repos/transformers/src/transformers/models
hf_public_repos/transformers/src/transformers/models/pix2struct/modeling_pix2struct.py
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. & Google team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Pix2Struct modeling file""" import math from typing import Dict, List, Optional, Tuple, Union import torch import torch.utils.checkpoint from torch import nn from ...activations import ACT2FN from ...modeling_outputs import ( BaseModelOutput, BaseModelOutputWithPooling, CausalLMOutputWithCrossAttentions, Seq2SeqLMOutput, Seq2SeqModelOutput, ) from ...modeling_utils import PreTrainedModel from ...pytorch_utils import ALL_LAYERNORM_LAYERS from ...utils import ( DUMMY_INPUTS, DUMMY_MASK, add_start_docstrings, add_start_docstrings_to_model_forward, is_torch_fx_proxy, logging, replace_return_docstrings, ) from .configuration_pix2struct import Pix2StructConfig, Pix2StructTextConfig, Pix2StructVisionConfig logger = logging.get_logger(__name__) # General docstring _CONFIG_FOR_DOC = "Pix2StructConfig" PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST = [ "google/pix2struct-textcaps-base", "google/pix2struct-textcaps-large", "google/pix2struct-base", "google/pix2struct-large", "google/pix2struct-ai2d-base", "google/pix2struct-ai2d-large", "google/pix2struct-widget-captioning-base", "google/pix2struct-widget-captioning-large", "google/pix2struct-screen2words-base", "google/pix2struct-screen2words-large", "google/pix2struct-docvqa-base", "google/pix2struct-docvqa-large", "google/pix2struct-ocrvqa-base", "google/pix2struct-ocrvqa-large", "google/pix2struct-chartqa-base", "google/pix2struct-inforgraphics-vqa-base", "google/pix2struct-inforgraphics-vqa-large", # See all Pix2StructVision models at https://huggingface.co/models?filter=pix2struct ] # Adapted from transformers.models.t5.modeling_t5.T5LayerNorm with T5->Pix2Struct class Pix2StructLayerNorm(nn.Module): def __init__(self, hidden_size, eps=1e-6): """ Construct a layernorm module in the T5 style. No bias and no subtraction of mean. """ super().__init__() self.weight = nn.Parameter(torch.ones(hidden_size)) self.variance_epsilon = eps def forward(self, hidden_states): # T5 uses a layer_norm which only scales and doesn't shift, which is also known as Root Mean # Square Layer Normalization https://arxiv.org/abs/1910.07467 thus varience is calculated # w/o mean and there is no bias. Additionally we want to make sure that the accumulation for # half-precision inputs is done in fp32 variance = hidden_states.to(torch.float32).pow(2).mean(-1, keepdim=True) hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon) # convert into half-precision if necessary if self.weight.dtype in [torch.float16, torch.bfloat16]: hidden_states = hidden_states.to(self.weight.dtype) return self.weight * hidden_states try: from apex.normalization import FusedRMSNorm Pix2StructLayerNorm = FusedRMSNorm # noqa logger.info("Discovered apex.normalization.FusedRMSNorm - will use it instead of Pix2StructLayerNorm") except ImportError: # using the normal Pix2StructLayerNorm pass except Exception: logger.warning("Discovered apex but it failed to load, falling back to Pix2StructLayerNorm") pass ALL_LAYERNORM_LAYERS.append(Pix2StructLayerNorm) class Pix2StructVisionEmbeddings(nn.Module): r""" Construct the embeddings from patch. In `Pix2Struct` the input is different from classic Vision-transformer models. Here the input is a sequence of `seq_len` flattened patches that also combines padding patches (tokens). Each patch is represented by a vector of `hidden_size` values. """ def __init__(self, config: Pix2StructConfig) -> None: super().__init__() self.patch_projection = nn.Linear(config.patch_embed_hidden_size, config.hidden_size) self.row_embedder = nn.Embedding(config.seq_len, config.hidden_size) self.column_embedder = nn.Embedding(config.seq_len, config.hidden_size) self.dropout = nn.Dropout(config.dropout_rate) def forward(self, flattened_patches: torch.Tensor) -> torch.Tensor: # the row and column indices are stored in the first and second position of the flattened_patches # flattened_patches: `batch_size`, `seq_len`, `hidden_size` + 2 row_indices = flattened_patches[:, :, 0].long() col_indices = flattened_patches[:, :, 1].long() flattened_patches = flattened_patches[:, :, 2:] embeddings = self.patch_projection(flattened_patches) row_embeddings = self.row_embedder(row_indices) col_embeddings = self.column_embedder(col_indices) # sum all embeddings together embeddings = embeddings + row_embeddings + col_embeddings embeddings = self.dropout(embeddings) return embeddings class Pix2StructVisionAttention(nn.Module): def __init__(self, config): super().__init__() self.hidden_size = config.hidden_size self.key_value_proj_dim = config.d_kv self.n_heads = config.num_attention_heads self.dropout = config.attention_dropout self.inner_dim = self.n_heads * self.key_value_proj_dim # Mesh TensorFlow initialization to avoid scaling before softmax self.query = nn.Linear(self.hidden_size, self.inner_dim, bias=False) self.key = nn.Linear(self.hidden_size, self.inner_dim, bias=False) self.value = nn.Linear(self.hidden_size, self.inner_dim, bias=False) self.output = nn.Linear(self.inner_dim, self.hidden_size, bias=False) self.gradient_checkpointing = False def forward( self, hidden_states, attention_mask=None, position_bias=None, layer_head_mask=None, output_attentions=False, ): """ Self-attention block """ # Input is (batch_size, seq_length, dim) # Mask is (batch_size, key_length) (non-causal) or (batch_size, key_length, key_length) # past_key_value[0] is (batch_size, n_heads, q_len - 1, dim_per_head) batch_size, seq_length = hidden_states.shape[:2] def to_projection_shape(states): """projection""" return states.contiguous().view(batch_size, -1, self.n_heads, self.key_value_proj_dim).transpose(1, 2) # get query states # (batch_size, n_heads, seq_length, dim_per_head) query_states = to_projection_shape(self.query(hidden_states)) # get key/value states key_states = to_projection_shape(self.key(hidden_states)) value_states = to_projection_shape(self.value(hidden_states)) # compute scores # equivalent of torch.einsum("bnqd,bnkd->bnqk", query_states, key_states), compatible with onnx op>9 scores = torch.matmul(query_states, key_states.transpose(3, 2)) if position_bias is None: position_bias = torch.zeros( (1, self.n_heads, seq_length, seq_length), device=scores.device, dtype=scores.dtype ) if self.gradient_checkpointing and self.training: position_bias.requires_grad = True if attention_mask is None: attention_mask = torch.ones((batch_size, seq_length), device=scores.device, dtype=scores.dtype) if attention_mask.dim() == 2: position_bias = position_bias + attention_mask[:, None, None, :].to(position_bias.device) else: # (batch_size, n_heads, seq_length, key_length) position_bias = position_bias + attention_mask.to(position_bias.device) position_bias = 1 - position_bias position_bias_masked = position_bias.masked_fill(position_bias == 1, torch.finfo(scores.dtype).min) scores += position_bias_masked scores = torch.max(scores, torch.tensor(torch.finfo(scores.dtype).min)) # (batch_size, n_heads, seq_length, key_length) attn_weights = nn.functional.softmax(scores, dim=-1, dtype=torch.float32).type_as(scores) # (batch_size, n_heads, seq_length, key_length) attn_weights = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training) # Mask heads if we want to if layer_head_mask is not None: attn_weights = attn_weights * layer_head_mask attn_output = torch.matmul(attn_weights, value_states) # (batch_size, seq_length, dim) attn_output = attn_output.transpose(1, 2).contiguous().view(batch_size, -1, self.inner_dim) attn_output = self.output(attn_output) outputs = (attn_output,) + (position_bias,) if output_attentions: outputs = outputs + (attn_weights,) return outputs # Copied from transformers.models.t5.modeling_t5.T5DenseGatedActDense with T5DenseGatedActDense->Pix2StructVisionMlp,T5Config->Pix2StructVisionConfig,config.d_model->config.hidden_size,dropout_rate->dropout_rate class Pix2StructVisionMlp(nn.Module): def __init__(self, config: Pix2StructVisionConfig): super().__init__() self.wi_0 = nn.Linear(config.hidden_size, config.d_ff, bias=False) self.wi_1 = nn.Linear(config.hidden_size, config.d_ff, bias=False) self.wo = nn.Linear(config.d_ff, config.hidden_size, bias=False) self.dropout = nn.Dropout(config.dropout_rate) self.act = ACT2FN[config.dense_act_fn] def forward(self, hidden_states): hidden_gelu = self.act(self.wi_0(hidden_states)) hidden_linear = self.wi_1(hidden_states) hidden_states = hidden_gelu * hidden_linear hidden_states = self.dropout(hidden_states) # To make 8bit quantization work for google/flan-t5-xxl, self.wo is kept in float32. # See https://github.com/huggingface/transformers/issues/20287 # we also make sure the weights are not in `int8` in case users will force `_keep_in_fp32_modules` to be `None`` if ( isinstance(self.wo.weight, torch.Tensor) and hidden_states.dtype != self.wo.weight.dtype and self.wo.weight.dtype != torch.int8 ): hidden_states = hidden_states.to(self.wo.weight.dtype) hidden_states = self.wo(hidden_states) return hidden_states class Pix2StructVisionLayer(nn.Module): def __init__(self, config: Pix2StructConfig) -> None: super().__init__() self.chunk_size_feed_forward = config.chunk_size_feed_forward self.seq_len_dim = 1 self.attention = Pix2StructVisionAttention(config) self.mlp = Pix2StructVisionMlp(config) self.pre_mlp_layer_norm = Pix2StructLayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.pre_attention_layer_norm = Pix2StructLayerNorm(config.hidden_size, eps=config.layer_norm_eps) def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, output_attentions: bool = False, ) -> Union[Tuple[torch.Tensor, torch.Tensor], Tuple[torch.Tensor]]: residual = hidden_states # in Pix2StructVision, layernorm is applied before self-attention hidden_states = self.pre_attention_layer_norm(hidden_states) self_attention_outputs = self.attention( hidden_states, attention_mask=attention_mask, layer_head_mask=head_mask, output_attentions=output_attentions, ) attention_output = self_attention_outputs[0] outputs = self_attention_outputs[1:] # add self attentions if we output attention weights # first residual connection hidden_states = attention_output + residual # in Pix2StructVision, layernorm is also applied after self-attention layer_output = self.pre_mlp_layer_norm(hidden_states) layer_output = self.mlp(layer_output) + hidden_states # second residual connection outputs = (layer_output,) + outputs return outputs class Pix2StructVisionEncoder(nn.Module): def __init__(self, config: Pix2StructConfig) -> None: super().__init__() self.config = config self.layer = nn.ModuleList([Pix2StructVisionLayer(config) for _ in range(config.num_hidden_layers)]) self.gradient_checkpointing = False def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, ) -> Union[tuple, BaseModelOutput]: all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None for i, layer_module in enumerate(self.layer): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_head_mask = head_mask[i] if head_mask is not None else None if self.gradient_checkpointing and self.training: layer_outputs = self._gradient_checkpointing_func( layer_module.__call__, hidden_states, attention_mask, layer_head_mask, output_attentions, ) else: layer_outputs = layer_module(hidden_states, attention_mask, layer_head_mask, output_attentions) hidden_states = layer_outputs[0] if output_attentions: all_self_attentions = all_self_attentions + (layer_outputs[1],) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None) return BaseModelOutput( last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_self_attentions, ) class Pix2StructPreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = Pix2StructConfig @property def dummy_inputs(self): input_ids = torch.tensor(DUMMY_INPUTS) input_mask = torch.tensor(DUMMY_MASK) dummy_inputs = { "decoder_input_ids": input_ids, "input_ids": input_ids, "decoder_attention_mask": input_mask, } return dummy_inputs def _init_weights(self, module): """Initialize the weights""" factor = self.config.initializer_factor # Used for testing weights initialization if isinstance(module, Pix2StructLayerNorm): module.weight.data.fill_(factor * 1.0) elif isinstance(module, Pix2StructTextDenseGatedActDense): hidden_size = ( self.config.text_config.hidden_size if isinstance(self.config, Pix2StructConfig) else self.config.hidden_size ) d_ff = self.config.text_config.d_ff if isinstance(self.config, Pix2StructConfig) else self.config.d_ff module.wi_0.weight.data.normal_(mean=0.0, std=factor * ((hidden_size) ** -0.5)) if hasattr(module.wi_0, "bias") and module.wi_0.bias is not None: module.wi_0.bias.data.zero_() module.wi_1.weight.data.normal_(mean=0.0, std=factor * ((hidden_size) ** -0.5)) if hasattr(module.wi_1, "bias") and module.wi_1.bias is not None: module.wi_1.bias.data.zero_() module.wo.weight.data.normal_(mean=0.0, std=factor * ((d_ff) ** -0.5)) if hasattr(module.wo, "bias") and module.wo.bias is not None: module.wo.bias.data.zero_() elif isinstance(module, Pix2StructTextAttention): # Mesh TensorFlow attention initialization to avoid scaling before softmax # See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/transformer/attention.py#L136 hidden_size = ( self.config.text_config.hidden_size if isinstance(self.config, Pix2StructConfig) else self.config.hidden_size ) key_value_proj_dim = ( self.config.text_config.d_kv if isinstance(self.config, Pix2StructConfig) else self.config.hidden_size ) n_heads = ( self.config.text_config.num_heads if isinstance(self.config, Pix2StructConfig) else self.config.num_heads ) module.query.weight.data.normal_(mean=0.0, std=factor * ((hidden_size * key_value_proj_dim) ** -0.5)) module.key.weight.data.normal_(mean=0.0, std=factor * (hidden_size**-0.5)) module.value.weight.data.normal_(mean=0.0, std=factor * (hidden_size**-0.5)) module.output.weight.data.normal_(mean=0.0, std=factor * ((n_heads * key_value_proj_dim) ** -0.5)) if module.has_relative_attention_bias: module.relative_attention_bias.weight.data.normal_(mean=0.0, std=factor * ((hidden_size) ** -0.5)) elif isinstance(module, nn.Embedding): hidden_size = ( self.config.text_config.hidden_size if isinstance(self.config, Pix2StructConfig) else self.config.hidden_size ) module.weight.data.normal_(mean=0.0, std=factor * ((hidden_size) ** -0.5)) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() elif isinstance(module, Pix2StructTextModel): hidden_size = ( self.config.text_config.hidden_size if isinstance(self.config, Pix2StructConfig) else self.config.hidden_size ) module.lm_head.weight.data.normal_(mean=0.0, std=factor * ((hidden_size) ** -0.5)) elif isinstance(module, (nn.Linear, nn.Conv2d)): # Upcast the input in `fp32` and cast it back to desired `dtype` to avoid # `trunc_normal_cpu` not implemented in `half` issues module.weight.data = nn.init.trunc_normal_( module.weight.data.to(torch.float32), mean=0.0, std=self.config.initializer_range ).to(module.weight.dtype) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, Pix2StructLayerNorm): if module.weight is not None: module.weight.data.fill_(1.0) elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() # Copied from transformers.models.t5.modeling_t5.T5PreTrainedModel._shift_right with T5->Pix2Struct def _shift_right(self, input_ids): decoder_start_token_id = self.config.decoder_start_token_id pad_token_id = self.config.pad_token_id if decoder_start_token_id is None: raise ValueError( "self.model.config.decoder_start_token_id has to be defined. In Pix2Struct it is usually set to the pad_token_id. " "See Pix2Struct docs for more information." ) # shift inputs to the right if is_torch_fx_proxy(input_ids): # Item assignment is not supported natively for proxies. shifted_input_ids = torch.full(input_ids.shape[:-1] + (1,), decoder_start_token_id) shifted_input_ids = torch.cat([shifted_input_ids, input_ids[..., :-1]], dim=-1) else: shifted_input_ids = input_ids.new_zeros(input_ids.shape) shifted_input_ids[..., 1:] = input_ids[..., :-1].clone() shifted_input_ids[..., 0] = decoder_start_token_id if pad_token_id is None: raise ValueError("self.model.config.pad_token_id has to be defined.") # replace possible -100 values in labels by `pad_token_id` shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id) return shifted_input_ids PIX2STRUCT_VISION_START_DOCSTRING = r""" This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`Pix2StructConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ PIX2STRUCT_VISION_INPUTS_DOCSTRING = r""" Args: flattened_patches (`torch.FloatTensor` of shape `(batch_size, sequence_length, num_channels x patch_height x patch_width)`): Flattened and padded pixel values. These values can be obtained using [`AutoImageProcessor`]. See [`Pix2StructVisionImageProcessor.__call__`] for details. Check the [original paper](https://arxiv.org/abs/2210.03347) (figure 5) for more details. attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding pixel values. Mask values selected in `[0, 1]`: head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ @add_start_docstrings( "The bare Pix2StructVision Model transformer outputting raw hidden-states without any specific head on top.", PIX2STRUCT_VISION_START_DOCSTRING, ) class Pix2StructVisionModel(Pix2StructPreTrainedModel): config_class = Pix2StructVisionConfig main_input_name = "flattened_patches" supports_gradient_checkpointing = True _no_split_modules = ["Pix2StructVisionLayer"] def __init__(self, config: Pix2StructConfig): super().__init__(config) self.config = config self.embeddings = Pix2StructVisionEmbeddings(config) self.encoder = Pix2StructVisionEncoder(config) self.layernorm = Pix2StructLayerNorm(config.hidden_size, eps=config.layer_norm_eps) # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.embeddings.patch_projection def _prune_heads(self, heads_to_prune: Dict[int, List[int]]) -> None: """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) @add_start_docstrings_to_model_forward(PIX2STRUCT_VISION_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=_CONFIG_FOR_DOC) def forward( self, flattened_patches: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, BaseModelOutputWithPooling]: r""" Returns: Example: ```python >>> import requests >>> from PIL import Image >>> from transformers import AutoProcessor, Pix2StructVisionModel >>> image_processor = AutoProcessor.from_pretrained("google/pix2struct-textcaps-base") >>> model = Pix2StructVisionModel.from_pretrained("google/pix2struct-textcaps-base") >>> url = "https://www.ilankelman.org/stopsigns/australia.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> inputs = image_processor(images=image, return_tensors="pt") >>> with torch.no_grad(): ... outputs = model(**inputs) >>> last_hidden_states = outputs.last_hidden_state >>> list(last_hidden_states.shape) [1, 2048, 768] ``` """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if flattened_patches is None: raise ValueError("You have to specify flattened_patches") if attention_mask is None: # check where `flattened_patches` is not 0 attention_mask = (flattened_patches.sum(dim=-1) != 0).float() # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) embedding_output = self.embeddings(flattened_patches) encoder_outputs = self.encoder( embedding_output, attention_mask=attention_mask, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = encoder_outputs[0] sequence_output = self.layernorm(sequence_output) if not return_dict: head_outputs = (sequence_output,) return head_outputs + encoder_outputs[1:] return BaseModelOutput( last_hidden_state=sequence_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, ) # Copied from transformers.models.t5.modeling_t5.T5DenseGatedActDense with T5->Pix2StructText,d_model->hidden_size class Pix2StructTextDenseGatedActDense(nn.Module): def __init__(self, config: Pix2StructTextConfig): super().__init__() self.wi_0 = nn.Linear(config.hidden_size, config.d_ff, bias=False) self.wi_1 = nn.Linear(config.hidden_size, config.d_ff, bias=False) self.wo = nn.Linear(config.d_ff, config.hidden_size, bias=False) self.dropout = nn.Dropout(config.dropout_rate) self.act = ACT2FN[config.dense_act_fn] def forward(self, hidden_states): hidden_gelu = self.act(self.wi_0(hidden_states)) hidden_linear = self.wi_1(hidden_states) hidden_states = hidden_gelu * hidden_linear hidden_states = self.dropout(hidden_states) # To make 8bit quantization work for google/flan-t5-xxl, self.wo is kept in float32. # See https://github.com/huggingface/transformers/issues/20287 # we also make sure the weights are not in `int8` in case users will force `_keep_in_fp32_modules` to be `None`` if ( isinstance(self.wo.weight, torch.Tensor) and hidden_states.dtype != self.wo.weight.dtype and self.wo.weight.dtype != torch.int8 ): hidden_states = hidden_states.to(self.wo.weight.dtype) hidden_states = self.wo(hidden_states) return hidden_states class Pix2StructTextLayerFF(nn.Module): def __init__(self, config: Pix2StructTextConfig): super().__init__() self.DenseReluDense = Pix2StructTextDenseGatedActDense(config) self.layer_norm = Pix2StructLayerNorm(config.hidden_size, eps=config.layer_norm_epsilon) self.dropout = nn.Dropout(config.dropout_rate) # Copied from transformers.models.t5.modeling_t5.T5LayerFF.forward def forward(self, hidden_states): forwarded_states = self.layer_norm(hidden_states) forwarded_states = self.DenseReluDense(forwarded_states) hidden_states = hidden_states + self.dropout(forwarded_states) return hidden_states class Pix2StructTextAttention(nn.Module): def __init__(self, config: Pix2StructTextConfig, has_relative_attention_bias=False): super().__init__() self.has_relative_attention_bias = has_relative_attention_bias self.relative_attention_num_buckets = config.relative_attention_num_buckets self.relative_attention_max_distance = config.relative_attention_max_distance self.hidden_size = config.hidden_size self.key_value_proj_dim = config.d_kv self.n_heads = config.num_heads self.dropout = config.dropout_rate self.inner_dim = self.n_heads * self.key_value_proj_dim # Mesh TensorFlow initialization to avoid scaling before softmax self.query = nn.Linear(self.hidden_size, self.hidden_size, bias=False) self.key = nn.Linear(self.hidden_size, self.hidden_size, bias=False) self.value = nn.Linear(self.hidden_size, self.hidden_size, bias=False) self.output = nn.Linear(self.hidden_size, self.hidden_size, bias=False) if self.has_relative_attention_bias: self.relative_attention_bias = nn.Embedding(self.relative_attention_num_buckets, self.n_heads) self.pruned_heads = set() self.gradient_checkpointing = False @staticmethod # Copied from transformers.models.t5.modeling_t5.T5Attention._relative_position_bucket def _relative_position_bucket(relative_position, bidirectional=True, num_buckets=32, max_distance=128): """ Adapted from Mesh Tensorflow: https://github.com/tensorflow/mesh/blob/0cb87fe07da627bf0b7e60475d59f95ed6b5be3d/mesh_tensorflow/transformer/transformer_layers.py#L593 Translate relative position to a bucket number for relative attention. The relative position is defined as memory_position - query_position, i.e. the distance in tokens from the attending position to the attended-to position. If bidirectional=False, then positive relative positions are invalid. We use smaller buckets for small absolute relative_position and larger buckets for larger absolute relative_positions. All relative positions >=max_distance map to the same bucket. All relative positions <=-max_distance map to the same bucket. This should allow for more graceful generalization to longer sequences than the model has been trained on Args: relative_position: an int32 Tensor bidirectional: a boolean - whether the attention is bidirectional num_buckets: an integer max_distance: an integer Returns: a Tensor with the same shape as relative_position, containing int32 values in the range [0, num_buckets) """ relative_buckets = 0 if bidirectional: num_buckets //= 2 relative_buckets += (relative_position > 0).to(torch.long) * num_buckets relative_position = torch.abs(relative_position) else: relative_position = -torch.min(relative_position, torch.zeros_like(relative_position)) # now relative_position is in the range [0, inf) # half of the buckets are for exact increments in positions max_exact = num_buckets // 2 is_small = relative_position < max_exact # The other half of the buckets are for logarithmically bigger bins in positions up to max_distance relative_position_if_large = max_exact + ( torch.log(relative_position.float() / max_exact) / math.log(max_distance / max_exact) * (num_buckets - max_exact) ).to(torch.long) relative_position_if_large = torch.min( relative_position_if_large, torch.full_like(relative_position_if_large, num_buckets - 1) ) relative_buckets += torch.where(is_small, relative_position, relative_position_if_large) return relative_buckets # Adapted from transformers.models.t5.modeling_t5.T5Attention.compute_bias def compute_bias(self, query_length, key_length, device=None): """Compute binned relative position bias""" if device is None: device = self.relative_attention_bias.weight.device context_position = torch.arange(query_length, dtype=torch.long, device=device)[:, None] memory_position = torch.arange(key_length, dtype=torch.long, device=device)[None, :] relative_position = memory_position - context_position # shape (query_length, key_length) relative_position_bucket = self._relative_position_bucket( relative_position, # shape (query_length, key_length) bidirectional=False, num_buckets=self.relative_attention_num_buckets, max_distance=self.relative_attention_max_distance, ) values = self.relative_attention_bias(relative_position_bucket) # shape (query_length, key_length, num_heads) values = values.permute([2, 0, 1]).unsqueeze(0) # shape (1, num_heads, query_length, key_length) return values def forward( self, hidden_states, mask=None, key_value_states=None, position_bias=None, past_key_value=None, layer_head_mask=None, query_length=None, use_cache=False, output_attentions=False, ): """ Self-attention (if key_value_states is None) or attention over source sentence (provided by key_value_states). """ # Input is (batch_size, seq_length, dim) # Mask is (batch_size, key_length) (non-causal) or (batch_size, key_length, key_length) # past_key_value[0] is (batch_size, n_heads, q_len - 1, dim_per_head) batch_size, seq_length = hidden_states.shape[:2] real_seq_length = seq_length if past_key_value is not None: if len(past_key_value) != 2: raise ValueError( f"past_key_value should have 2 past states: keys and values. Got { len(past_key_value)} past states" ) real_seq_length += past_key_value[0].shape[2] if query_length is None else query_length key_length = real_seq_length if key_value_states is None else key_value_states.shape[1] def to_projection_shape(states): """projection""" return states.contiguous().view(batch_size, -1, self.n_heads, self.key_value_proj_dim).transpose(1, 2) def project(hidden_states, proj_layer, key_value_states, past_key_value): """projects hidden states correctly to key/query states""" if key_value_states is None: # self-attn # (batch_size, n_heads, seq_length, dim_per_head) hidden_states = to_projection_shape(proj_layer(hidden_states)) elif past_key_value is None: # cross-attn # (batch_size, n_heads, seq_length, dim_per_head) hidden_states = to_projection_shape(proj_layer(key_value_states)) if past_key_value is not None: if key_value_states is None: # self-attn # (batch_size, n_heads, key_length, dim_per_head) hidden_states = torch.cat([past_key_value, hidden_states], dim=2) elif past_key_value.shape[2] != key_value_states.shape[1]: # checking that the `sequence_length` of the `past_key_value` is the same as # the provided `key_value_states` to support prefix tuning # cross-attn # (batch_size, n_heads, seq_length, dim_per_head) hidden_states = to_projection_shape(proj_layer(key_value_states)) else: # cross-attn hidden_states = past_key_value return hidden_states # get query states # (batch_size, n_heads, seq_length, dim_per_head) query_states = to_projection_shape(self.query(hidden_states)) # get key/value states key_states = project( hidden_states, self.key, key_value_states, past_key_value[0] if past_key_value is not None else None ) value_states = project( hidden_states, self.value, key_value_states, past_key_value[1] if past_key_value is not None else None ) # compute scores scores = torch.matmul( query_states, key_states.transpose(3, 2) ) # equivalent of torch.einsum("bnqd,bnkd->bnqk", query_states, key_states), compatible with onnx op>9 if position_bias is None: if not self.has_relative_attention_bias: position_bias = torch.zeros( (1, self.n_heads, real_seq_length, key_length), device=scores.device, dtype=scores.dtype ) if self.gradient_checkpointing and self.training: position_bias.requires_grad = True else: position_bias = self.compute_bias(real_seq_length, key_length, device=scores.device) # if key and values are already calculated # we want only the last query position bias if past_key_value is not None: position_bias = position_bias[:, :, -hidden_states.size(1) :, :] if mask is not None: position_bias = position_bias + mask # (batch_size, n_heads, seq_length, key_length) if self.pruned_heads: mask = torch.ones(position_bias.shape[1]) mask[list(self.pruned_heads)] = 0 position_bias_masked = position_bias[:, mask.bool()] else: position_bias_masked = position_bias scores += position_bias_masked # (batch_size, n_heads, seq_length, key_length) attn_weights = nn.functional.softmax(scores.float(), dim=-1).type_as(scores) # (batch_size, n_heads, seq_length, key_length) attn_weights = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training) # Mask heads if we want to if layer_head_mask is not None: attn_weights = attn_weights * layer_head_mask attn_output = torch.matmul(attn_weights, value_states) # (batch_size, seq_length, dim) attn_output = attn_output.transpose(1, 2).contiguous().view(batch_size, -1, self.inner_dim) attn_output = self.output(attn_output) present_key_value_state = (key_states, value_states) if use_cache else None outputs = (attn_output,) + (present_key_value_state,) + (position_bias,) if output_attentions: outputs = outputs + (attn_weights,) return outputs # Copied from transformers.models.t5.modeling_t5.T5LayerSelfAttention with T5LayerNorm->Pix2StructLayerNorm,T5Attention->Pix2StructTextAttention,self.SelfAttention->self.attention,config.d_model->config.hidden_size class Pix2StructTextLayerSelfAttention(nn.Module): def __init__(self, config, has_relative_attention_bias=False): super().__init__() self.attention = Pix2StructTextAttention(config, has_relative_attention_bias=has_relative_attention_bias) self.layer_norm = Pix2StructLayerNorm(config.hidden_size, eps=config.layer_norm_epsilon) self.dropout = nn.Dropout(config.dropout_rate) def forward( self, hidden_states, attention_mask=None, position_bias=None, layer_head_mask=None, past_key_value=None, use_cache=False, output_attentions=False, ): normed_hidden_states = self.layer_norm(hidden_states) attention_output = self.attention( normed_hidden_states, mask=attention_mask, position_bias=position_bias, layer_head_mask=layer_head_mask, past_key_value=past_key_value, use_cache=use_cache, output_attentions=output_attentions, ) hidden_states = hidden_states + self.dropout(attention_output[0]) outputs = (hidden_states,) + attention_output[1:] # add attentions if we output them return outputs # Copied from transformers.models.t5.modeling_t5.T5LayerCrossAttention with T5LayerNorm->Pix2StructLayerNorm,T5Attention->Pix2StructTextAttention,self.EncDecAttention->self.attention,config.d_model->config.hidden_size class Pix2StructTextLayerCrossAttention(nn.Module): def __init__(self, config): super().__init__() self.attention = Pix2StructTextAttention(config, has_relative_attention_bias=False) self.layer_norm = Pix2StructLayerNorm(config.hidden_size, eps=config.layer_norm_epsilon) self.dropout = nn.Dropout(config.dropout_rate) def forward( self, hidden_states, key_value_states, attention_mask=None, position_bias=None, layer_head_mask=None, past_key_value=None, use_cache=False, query_length=None, output_attentions=False, ): normed_hidden_states = self.layer_norm(hidden_states) attention_output = self.attention( normed_hidden_states, mask=attention_mask, key_value_states=key_value_states, position_bias=position_bias, layer_head_mask=layer_head_mask, past_key_value=past_key_value, use_cache=use_cache, query_length=query_length, output_attentions=output_attentions, ) layer_output = hidden_states + self.dropout(attention_output[0]) outputs = (layer_output,) + attention_output[1:] # add attentions if we output them return outputs class Pix2StructTextBlock(nn.Module): def __init__(self, config, has_relative_attention_bias=False): super().__init__() self.self_attention = Pix2StructTextLayerSelfAttention( config, has_relative_attention_bias=has_relative_attention_bias ) self.encoder_decoder_attention = Pix2StructTextLayerCrossAttention(config) self.mlp = Pix2StructTextLayerFF(config) def forward( self, hidden_states, attention_mask=None, position_bias=None, encoder_hidden_states=None, encoder_attention_mask=None, encoder_decoder_position_bias=None, layer_head_mask=None, cross_attn_layer_head_mask=None, past_key_value=None, use_cache=False, output_attentions=False, return_dict=True, ): if past_key_value is not None: expected_num_past_key_values = 2 if encoder_hidden_states is None else 4 if len(past_key_value) != expected_num_past_key_values: raise ValueError( f"There should be {expected_num_past_key_values} past states. " f"{'2 (past / key) for cross attention. ' if expected_num_past_key_values == 4 else ''}" f"Got {len(past_key_value)} past key / value states" ) self_attn_past_key_value = past_key_value[:2] cross_attn_past_key_value = past_key_value[2:] else: self_attn_past_key_value, cross_attn_past_key_value = None, None self_attention_outputs = self.self_attention( hidden_states, attention_mask=attention_mask, position_bias=position_bias, layer_head_mask=layer_head_mask, past_key_value=self_attn_past_key_value, use_cache=use_cache, output_attentions=output_attentions, ) hidden_states, present_key_value_state = self_attention_outputs[:2] attention_outputs = self_attention_outputs[2:] # Keep self-attention outputs and relative position weights # clamp inf values to enable fp16 training if hidden_states.dtype == torch.float16 and torch.isinf(hidden_states).any(): clamp_value = torch.finfo(hidden_states.dtype).max - 1000 hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value) do_cross_attention = encoder_hidden_states is not None if do_cross_attention: # the actual query length is unknown for cross attention # if using past key value states. Need to inject it here if present_key_value_state is not None: query_length = present_key_value_state[0].shape[2] else: query_length = None cross_attention_outputs = self.encoder_decoder_attention( hidden_states, key_value_states=encoder_hidden_states, attention_mask=encoder_attention_mask, position_bias=encoder_decoder_position_bias, layer_head_mask=cross_attn_layer_head_mask, past_key_value=cross_attn_past_key_value, query_length=query_length, use_cache=use_cache, output_attentions=output_attentions, ) hidden_states = cross_attention_outputs[0] # clamp inf values to enable fp16 training if hidden_states.dtype == torch.float16 and torch.isinf(hidden_states).any(): clamp_value = torch.finfo(hidden_states.dtype).max - 1000 hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value) # Combine self attn and cross attn key value states if present_key_value_state is not None: present_key_value_state = present_key_value_state + cross_attention_outputs[1] # Keep cross-attention outputs and relative position weights attention_outputs = attention_outputs + cross_attention_outputs[2:] # Apply Feed Forward layer hidden_states = self.mlp(hidden_states) # clamp inf values to enable fp16 training if hidden_states.dtype == torch.float16 and torch.isinf(hidden_states).any(): clamp_value = torch.finfo(hidden_states.dtype).max - 1000 hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value) outputs = (hidden_states,) if use_cache: outputs = outputs + (present_key_value_state,) + attention_outputs else: outputs = outputs + attention_outputs return outputs PIX2STRUCT_START_DOCSTRING = r""" The Pix2Struct model was proposed in [Pix2Struct: Screenshot Parsing as Pretraining for Visual Language Understanding](https://arxiv.org/abs/2210.03347) by Kenton Lee, Mandar Joshi, Iulia Turc, Hexiang Hu, Fangyu Liu, Julian Eisenschlos, Urvashi Khandelwal, Peter Shaw, Ming-Wei Chang, Kristina Toutanova. It's an encoder decoder transformer pre-trained in a image-to-text setting. This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config (Union[`Pix2StructConfig`, `Pix2StructTextConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ PIX2STRUCT_TEXT_INPUTS_DOCSTRING = r""" Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Pix2StructText is a model with relative position embeddings so you should be able to pad the inputs on both the right and the left. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for detail. [What are input IDs?](../glossary#input-ids) To know more on how to prepare `input_ids` for pretraining take a look a [Pix2StructText Training](./t5#training). attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Indices of decoder input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are decoder input IDs?](../glossary#decoder-input-ids) Pix2StructText uses the `pad_token_id` as the starting token for `decoder_input_ids` generation. If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see `past_key_values`). To know more on how to prepare `decoder_input_ids` for pretraining take a look at [Pix2StructText Training](./t5#training). decoder_attention_mask (`torch.BoolTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also be used by default. head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the self-attention modules in the encoder. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. decoder_head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the self-attention modules in the decoder. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. cross_attn_head_mask (`torch.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. encoder_outputs (`tuple(tuple(torch.FloatTensor)`, *optional*): Tuple consists of (`last_hidden_state`, `optional`: *hidden_states*, `optional`: *attentions*) `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)` is a sequence of hidden states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): Contains precomputed key and value hidden states of the attention layers. Can be used to speed up decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. decoder_inputs_embeds (`torch.FloatTensor` of shape `(batch_size, target_sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `decoder_input_ids` you can choose to directly pass an embedded representation. If `past_key_values` is used, optionally only the last `decoder_inputs_embeds` have to be input (see `past_key_values`). This is useful if you want more control over how to convert `decoder_input_ids` indices into associated vectors than the model's internal embedding lookup matrix. If `decoder_input_ids` and `decoder_inputs_embeds` are both unset, `decoder_inputs_embeds` takes the value of `inputs_embeds`. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ PIX2STRUCT_INPUTS_DOCSTRING = r""" Args: flattened_patches (`torch.FloatTensor` of shape `(batch_size, seq_length, hidden_size)`): Flattened pixel patches. the `hidden_size` is obtained by the following formula: `hidden_size` = `num_channels` * `patch_size` * `patch_size` The process of flattening the pixel patches is done by `Pix2StructProcessor`. attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Indices of decoder input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are decoder input IDs?](../glossary#decoder-input-ids) Pix2StructText uses the `pad_token_id` as the starting token for `decoder_input_ids` generation. If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see `past_key_values`). To know more on how to prepare `decoder_input_ids` for pretraining take a look at [Pix2StructText Training](./t5#training). decoder_attention_mask (`torch.BoolTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also be used by default. head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the self-attention modules in the encoder. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. decoder_head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the self-attention modules in the decoder. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. cross_attn_head_mask (`torch.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. encoder_outputs (`tuple(tuple(torch.FloatTensor)`, *optional*): Tuple consists of (`last_hidden_state`, `optional`: *hidden_states*, `optional`: *attentions*) `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)` is a sequence of hidden states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): Contains precomputed key and value hidden states of the attention layers. Can be used to speed up decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. decoder_inputs_embeds (`torch.FloatTensor` of shape `(batch_size, target_sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `decoder_input_ids` you can choose to directly pass an embedded representation. If `past_key_values` is used, optionally only the last `decoder_inputs_embeds` have to be input (see `past_key_values`). This is useful if you want more control over how to convert `decoder_input_ids` indices into associated vectors than the model's internal embedding lookup matrix. If `decoder_input_ids` and `decoder_inputs_embeds` are both unset, `decoder_inputs_embeds` takes the value of `inputs_embeds`. labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss for the decoder. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ @add_start_docstrings( "The standalone text decoder of Pix2Struct", PIX2STRUCT_START_DOCSTRING, ) class Pix2StructTextModel(Pix2StructPreTrainedModel): config_class = Pix2StructTextConfig _no_split_modules = ["Pix2StructTextBlock"] _tied_weights_keys = ["lm_head.weight"] supports_gradient_checkpointing = True def __init__(self, config): super().__init__(config) self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size) self.layer = nn.ModuleList( [Pix2StructTextBlock(config, has_relative_attention_bias=bool(i == 0)) for i in range(config.num_layers)] ) self.final_layer_norm = Pix2StructLayerNorm(config.hidden_size, eps=config.layer_norm_epsilon) self.dropout = nn.Dropout(config.dropout_rate) self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) # Initialize weights and apply final processing self.post_init() self.gradient_checkpointing = False # Copied from transformers.models.t5.modeling_t5.T5PreTrainedModel._reorder_cache def _reorder_cache(self, past_key_values, beam_idx): # if decoder past is not included in output # speedy decoding is disabled and no need to reorder if past_key_values is None: logger.warning("You might want to consider setting `use_cache=True` to speed up decoding") return past_key_values reordered_decoder_past = () for layer_past_states in past_key_values: # get the correct batch idx from layer past batch dim # batch dim of `past` is at 2nd position reordered_layer_past_states = () for layer_past_state in layer_past_states: # need to set correct `past` for each of the four key / value states reordered_layer_past_states = reordered_layer_past_states + ( layer_past_state.index_select(0, beam_idx.to(layer_past_state.device)), ) if reordered_layer_past_states[0].shape != layer_past_states[0].shape: raise ValueError( f"reordered_layer_past_states[0] shape {reordered_layer_past_states[0].shape} and layer_past_states[0] shape {layer_past_states[0].shape} mismatched" ) if len(reordered_layer_past_states) != len(layer_past_states): raise ValueError( f"length of reordered_layer_past_states {len(reordered_layer_past_states)} and length of layer_past_states {len(layer_past_states)} mismatched" ) reordered_decoder_past = reordered_decoder_past + (reordered_layer_past_states,) return reordered_decoder_past def get_input_embeddings(self): return self.embed_tokens def set_input_embeddings(self, new_embeddings): self.embed_tokens = new_embeddings def get_output_embeddings(self): return self.lm_head def set_output_embeddings(self, new_embeddings): self.lm_head = new_embeddings @add_start_docstrings_to_model_forward(PIX2STRUCT_TEXT_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=CausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, encoder_hidden_states: Optional[torch.FloatTensor] = None, encoder_attention_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, cross_attn_head_mask: Optional[torch.Tensor] = None, past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, labels: Optional[torch.LongTensor] = None, return_dict: Optional[bool] = None, **kwargs, ) -> Union[Tuple[torch.FloatTensor, ...], CausalLMOutputWithCrossAttentions]: r""" Returns: Example: ```python >>> from transformers import AutoProcessor, Pix2StructTextModel >>> processor = AutoProcessor.from_pretrained("google/pix2struct-textcaps-base") >>> model = Pix2StructTextModel.from_pretrained("google/pix2struct-textcaps-base") >>> inputs = processor(text="Hello, my dog is cute", return_tensors="pt") >>> outputs = model(**inputs) >>> loss = outputs.loss ``` """ use_cache = use_cache if use_cache is not None else self.config.use_cache output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time") elif input_ids is not None: input_shape = input_ids.size() input_ids = input_ids.view(-1, input_shape[-1]) elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds") if inputs_embeds is None: assert self.embed_tokens is not None, "You have to initialize the model with valid token embeddings" inputs_embeds = self.embed_tokens(input_ids) batch_size, seq_length = input_shape # required mask seq length can be calculated via length of past mask_seq_length = past_key_values[0][0].shape[2] + seq_length if past_key_values is not None else seq_length if attention_mask is None: attention_mask = torch.ones(batch_size, mask_seq_length, device=inputs_embeds.device) if encoder_attention_mask is None and encoder_hidden_states is not None: encoder_seq_length = encoder_hidden_states.shape[1] encoder_attention_mask = torch.ones( batch_size, encoder_seq_length, device=inputs_embeds.device, dtype=torch.long ) # initialize past_key_values with `None` if past does not exist if past_key_values is None: past_key_values = [None] * len(self.layer) # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] # ourselves in which case we just need to make it broadcastable to all heads. extended_attention_mask = self.get_extended_attention_mask(attention_mask, input_shape) # If a 2D or 3D attention mask is provided for the cross-attention # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length] if encoder_hidden_states is not None: encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size() encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length) if encoder_attention_mask is None: encoder_attention_mask = torch.ones(encoder_hidden_shape, device=inputs_embeds.device) encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask) else: encoder_extended_attention_mask = None # Prepare head mask if needed head_mask = self.get_head_mask(head_mask, self.config.num_layers) cross_attn_head_mask = self.get_head_mask(cross_attn_head_mask, self.config.num_layers) present_key_value_states = () if use_cache else None all_hidden_states = () if output_hidden_states else None all_attentions = () if output_attentions else None all_cross_attentions = () if (output_attentions) else None position_bias = None encoder_decoder_position_bias = None hidden_states = self.dropout(inputs_embeds) for i, (layer_module, past_key_value) in enumerate(zip(self.layer, past_key_values)): layer_head_mask = head_mask[i] cross_attn_layer_head_mask = cross_attn_head_mask[i] if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if self.gradient_checkpointing and self.training: if use_cache: logger.warning( "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." ) use_cache = False layer_outputs = self._gradient_checkpointing_func( layer_module.forward, hidden_states, extended_attention_mask, position_bias, encoder_hidden_states, encoder_extended_attention_mask, encoder_decoder_position_bias, layer_head_mask, cross_attn_layer_head_mask, None, # past_key_value is always None with gradient checkpointing use_cache, output_attentions, ) else: layer_outputs = layer_module( hidden_states, attention_mask=extended_attention_mask, position_bias=position_bias, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_extended_attention_mask, encoder_decoder_position_bias=encoder_decoder_position_bias, layer_head_mask=layer_head_mask, cross_attn_layer_head_mask=cross_attn_layer_head_mask, past_key_value=past_key_value, use_cache=use_cache, output_attentions=output_attentions, ) # layer_outputs is a tuple with: # hidden-states, key-value-states, (self-attention position bias), (self-attention weights), (cross-attention position bias), (cross-attention weights) if use_cache is False: layer_outputs = layer_outputs[:1] + (None,) + layer_outputs[1:] hidden_states, present_key_value_state = layer_outputs[:2] # We share the position biases between the layers - the first layer store them # layer_outputs = hidden-states, key-value-states (self-attention position bias), (self-attention weights), # (cross-attention position bias), (cross-attention weights) position_bias = layer_outputs[2] if encoder_hidden_states is not None: encoder_decoder_position_bias = layer_outputs[4 if output_attentions else 3] # append next layer key value states if use_cache: present_key_value_states = present_key_value_states + (present_key_value_state,) if output_attentions: all_attentions = all_attentions + (layer_outputs[3],) if encoder_hidden_states is not None: all_cross_attentions = all_cross_attentions + (layer_outputs[5],) hidden_states = self.final_layer_norm(hidden_states) hidden_states = self.dropout(hidden_states) logits = self.lm_head(hidden_states) # Add last layer if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) loss = None if labels is not None: # move labels to correct device to enable model parallelism labels = labels.to(logits.device) loss_fct = nn.CrossEntropyLoss(ignore_index=-100, reduction="mean") loss = loss_fct(logits.contiguous().view(-1, logits.size(-1)), labels.contiguous().view(-1)) if not return_dict: return tuple( v for v in [ loss, logits, present_key_value_states, all_hidden_states, all_attentions, all_cross_attentions, ] if v is not None ) return CausalLMOutputWithCrossAttentions( loss=loss, logits=logits, past_key_values=present_key_value_states, hidden_states=all_hidden_states, attentions=all_attentions, cross_attentions=all_cross_attentions, ) @add_start_docstrings( "A conditional generation model with a language modeling head. Can be used for sequence generation tasks.", PIX2STRUCT_START_DOCSTRING, ) class Pix2StructForConditionalGeneration(Pix2StructPreTrainedModel): config_class = Pix2StructConfig main_input_name = "flattened_patches" _tied_weights_keys = ["decoder.lm_head.weight"] def __init__(self, config: Pix2StructConfig): super().__init__(config) self.encoder = Pix2StructVisionModel(config.vision_config) self.decoder = Pix2StructTextModel(config.text_config) self.is_vqa = config.is_vqa # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.decoder.get_input_embeddings() def set_input_embeddings(self, new_embeddings): self.decoder.set_input_embeddings(new_embeddings) def get_output_embeddings(self) -> nn.Module: return self.decoder.get_output_embeddings() def set_output_embeddings(self, new_embeddings): self.decoder.set_output_embeddings(new_embeddings) def resize_token_embeddings(self, new_num_tokens: Optional[int] = None) -> nn.Embedding: model_embeds = self.decoder.resize_token_embeddings(new_num_tokens) # update vocab size self.config.text_config.vocab_size = new_num_tokens return model_embeds def get_decoder(self): return self.decoder def get_encoder(self): return self.encoder @add_start_docstrings_to_model_forward(PIX2STRUCT_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=Seq2SeqModelOutput, config_class=_CONFIG_FOR_DOC) def forward( self, flattened_patches: Optional[torch.FloatTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, decoder_input_ids: Optional[torch.LongTensor] = None, decoder_attention_mask: Optional[torch.BoolTensor] = None, head_mask: Optional[torch.FloatTensor] = None, decoder_head_mask: Optional[torch.FloatTensor] = None, cross_attn_head_mask: Optional[torch.Tensor] = None, encoder_outputs: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, labels: Optional[torch.LongTensor] = None, decoder_inputs_embeds: Optional[torch.Tensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple[torch.FloatTensor], Seq2SeqModelOutput]: r""" Returns: Example: Inference: ```python >>> from PIL import Image >>> import requests >>> from transformers import AutoProcessor, Pix2StructForConditionalGeneration >>> processor = AutoProcessor.from_pretrained("google/pix2struct-textcaps-base") >>> model = Pix2StructForConditionalGeneration.from_pretrained("google/pix2struct-textcaps-base") >>> url = "https://www.ilankelman.org/stopsigns/australia.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> inputs = processor(images=image, return_tensors="pt") >>> # autoregressive generation >>> generated_ids = model.generate(**inputs, max_new_tokens=50) >>> generated_text = processor.batch_decode(generated_ids, skip_special_tokens=True)[0] >>> print(generated_text) A stop sign is on a street corner. >>> # conditional generation >>> text = "A picture of" >>> inputs = processor(text=text, images=image, return_tensors="pt", add_special_tokens=False) >>> generated_ids = model.generate(**inputs, max_new_tokens=50) >>> generated_text = processor.batch_decode(generated_ids, skip_special_tokens=True)[0] >>> print(generated_text) A picture of a stop sign with a red stop sign ``` Training: ```python >>> from PIL import Image >>> import requests >>> from transformers import AutoProcessor, Pix2StructForConditionalGeneration >>> processor = AutoProcessor.from_pretrained("google/pix2struct-base") >>> model = Pix2StructForConditionalGeneration.from_pretrained("google/pix2struct-base") >>> url = "https://www.ilankelman.org/stopsigns/australia.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> text = "A stop sign is on the street corner." >>> inputs = processor(images=image, return_tensors="pt") >>> labels = processor(text=text, return_tensors="pt").input_ids >>> # forward pass >>> outputs = model(**inputs, labels=labels) >>> loss = outputs.loss >>> print(f"{loss.item():.5f}") 5.94282 ```""" use_cache = use_cache if use_cache is not None else self.config.text_config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict # Encode if needed (training, first prediction pass) if encoder_outputs is None: encoder_outputs = self.encoder( flattened_patches=flattened_patches, attention_mask=attention_mask, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) elif return_dict and not isinstance(encoder_outputs, BaseModelOutput): encoder_outputs = BaseModelOutput( last_hidden_state=encoder_outputs[0], hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None, attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None, ) hidden_states = encoder_outputs[0] if labels is not None and decoder_input_ids is None and decoder_inputs_embeds is None: # get decoder inputs from shifting lm labels to the right decoder_input_ids = self._shift_right(labels) decoder_attention_mask = ( decoder_attention_mask if decoder_attention_mask is not None else decoder_input_ids.ne(self.config.pad_token_id).float() ) # Always attend to the first token decoder_attention_mask[:, 0] = 1 # Decode decoder_outputs = self.decoder( input_ids=decoder_input_ids, attention_mask=decoder_attention_mask, inputs_embeds=decoder_inputs_embeds, past_key_values=past_key_values, encoder_hidden_states=hidden_states, encoder_attention_mask=attention_mask, head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, labels=labels, return_dict=return_dict, ) if not return_dict: return decoder_outputs + encoder_outputs return Seq2SeqLMOutput( loss=decoder_outputs.loss, logits=decoder_outputs.logits, past_key_values=decoder_outputs.past_key_values, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions, ) def prepare_inputs_for_generation( self, input_ids, flattened_patches: Optional[torch.FloatTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, decoder_attention_mask: Optional[torch.BoolTensor] = None, past_key_values=None, head_mask=None, decoder_head_mask=None, cross_attn_head_mask=None, use_cache=None, encoder_outputs=None, **kwargs, ): if decoder_attention_mask is None: decoder_attention_mask = torch.ones_like(input_ids).to(input_ids.device) # cut decoder_input_ids if past_key_values is used if past_key_values is not None: past_length = past_key_values[0][0].shape[2] # Some generation methods already pass only the last input ID if input_ids.shape[1] > past_length: remove_prefix_length = past_length else: # Default to old behavior: keep only final ID remove_prefix_length = input_ids.shape[1] - 1 input_ids = input_ids[:, remove_prefix_length:] return { "flattened_patches": flattened_patches, "decoder_input_ids": input_ids, "past_key_values": past_key_values, "encoder_outputs": encoder_outputs, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, "use_cache": use_cache, }
0
hf_public_repos/transformers/src/transformers/models
hf_public_repos/transformers/src/transformers/models/pix2struct/image_processing_pix2struct.py
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Image processor class for Pix2Struct.""" import io import math from typing import Dict, Optional, Union import numpy as np from huggingface_hub import hf_hub_download from ...image_processing_utils import BaseImageProcessor, BatchFeature from ...image_transforms import convert_to_rgb, normalize, to_channel_dimension_format, to_pil_image from ...image_utils import ( ChannelDimension, ImageInput, get_image_size, infer_channel_dimension_format, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_torch_available, is_vision_available, logging from ...utils.import_utils import requires_backends if is_vision_available(): import textwrap from PIL import Image, ImageDraw, ImageFont if is_torch_available(): import torch logger = logging.get_logger(__name__) DEFAULT_FONT_PATH = "ybelkada/fonts" # adapted from: https://discuss.pytorch.org/t/tf-image-extract-patches-in-pytorch/171409/2 def torch_extract_patches(image_tensor, patch_height, patch_width): """ Utiliy function to extract patches from a given image tensor. Returns a tensor of shape (1, `patch_height`, `patch_width`, `num_channels`x `patch_height` x `patch_width`) Args: image_tensor (torch.Tensor): The image tensor to extract patches from. patch_height (int): The height of the patches to extract. patch_width (int): The width of the patches to extract. """ requires_backends(torch_extract_patches, ["torch"]) image_tensor = image_tensor.unsqueeze(0) patches = torch.nn.functional.unfold(image_tensor, (patch_height, patch_width), stride=(patch_height, patch_width)) patches = patches.reshape(image_tensor.size(0), image_tensor.size(1), patch_height, patch_width, -1) patches = patches.permute(0, 4, 2, 3, 1).reshape( image_tensor.size(2) // patch_height, image_tensor.size(3) // patch_width, image_tensor.size(1) * patch_height * patch_width, ) return patches.unsqueeze(0) # Adapted from https://github.com/google-research/pix2struct/blob/0e1779af0f4db4b652c1d92b3bbd2550a7399123/pix2struct/preprocessing/preprocessing_utils.py#L106 def render_text( text: str, text_size: int = 36, text_color: str = "black", background_color: str = "white", left_padding: int = 5, right_padding: int = 5, top_padding: int = 5, bottom_padding: int = 5, font_bytes: Optional[bytes] = None, font_path: Optional[str] = None, ) -> Image.Image: """ Render text. This script is entirely adapted from the original script that can be found here: https://github.com/google-research/pix2struct/blob/main/pix2struct/preprocessing/preprocessing_utils.py Args: text (`str`, *optional*, defaults to ): Text to render. text_size (`int`, *optional*, defaults to 36): Size of the text. text_color (`str`, *optional*, defaults to `"black"`): Color of the text. background_color (`str`, *optional*, defaults to `"white"`): Color of the background. left_padding (`int`, *optional*, defaults to 5): Padding on the left. right_padding (`int`, *optional*, defaults to 5): Padding on the right. top_padding (`int`, *optional*, defaults to 5): Padding on the top. bottom_padding (`int`, *optional*, defaults to 5): Padding on the bottom. font_bytes (`bytes`, *optional*): Bytes of the font to use. If `None`, the default font will be used. font_path (`str`, *optional*): Path to the font to use. If `None`, the default font will be used. """ requires_backends(render_text, "vision") # Add new lines so that each line is no more than 80 characters. wrapper = textwrap.TextWrapper(width=80) lines = wrapper.wrap(text=text) wrapped_text = "\n".join(lines) if font_bytes is not None and font_path is None: font = io.BytesIO(font_bytes) elif font_path is not None: font = font_path else: font = hf_hub_download(DEFAULT_FONT_PATH, "Arial.TTF") font = ImageFont.truetype(font, encoding="UTF-8", size=text_size) # Use a temporary canvas to determine the width and height in pixels when # rendering the text. temp_draw = ImageDraw.Draw(Image.new("RGB", (1, 1), background_color)) _, _, text_width, text_height = temp_draw.textbbox((0, 0), wrapped_text, font) # Create the actual image with a bit of padding around the text. image_width = text_width + left_padding + right_padding image_height = text_height + top_padding + bottom_padding image = Image.new("RGB", (image_width, image_height), background_color) draw = ImageDraw.Draw(image) draw.text(xy=(left_padding, top_padding), text=wrapped_text, fill=text_color, font=font) return image # Adapted from https://github.com/google-research/pix2struct/blob/0e1779af0f4db4b652c1d92b3bbd2550a7399123/pix2struct/preprocessing/preprocessing_utils.py#L87 def render_header( image: np.ndarray, header: str, input_data_format: Optional[Union[str, ChildProcessError]] = None, **kwargs ): """ Renders the input text as a header on the input image. Args: image (`np.ndarray`): The image to render the header on. header (`str`): The header text. data_format (`Union[ChannelDimension, str]`, *optional*): The data format of the image. Can be either "ChannelDimension.channels_first" or "ChannelDimension.channels_last". Returns: `np.ndarray`: The image with the header rendered. """ requires_backends(render_header, "vision") # Convert to PIL image if necessary image = to_pil_image(image, input_data_format=input_data_format) header_image = render_text(header, **kwargs) new_width = max(header_image.width, image.width) new_height = int(image.height * (new_width / image.width)) new_header_height = int(header_image.height * (new_width / header_image.width)) new_image = Image.new("RGB", (new_width, new_height + new_header_height), "white") new_image.paste(header_image.resize((new_width, new_header_height)), (0, 0)) new_image.paste(image.resize((new_width, new_height)), (0, new_header_height)) # Convert back to the original framework if necessary new_image = to_numpy_array(new_image) if infer_channel_dimension_format(new_image) == ChannelDimension.LAST: new_image = to_channel_dimension_format(new_image, ChannelDimension.LAST) return new_image class Pix2StructImageProcessor(BaseImageProcessor): r""" Constructs a Pix2Struct image processor. Args: do_convert_rgb (`bool`, *optional*, defaults to `True`): Whether to convert the image to RGB. do_normalize (`bool`, *optional*, defaults to `True`): Whether to normalize the image. Can be overridden by the `do_normalize` parameter in the `preprocess` method. According to Pix2Struct paper and code, the image is normalized with its own mean and standard deviation. patch_size (`Dict[str, int]`, *optional*, defaults to `{"height": 16, "width": 16}`): The patch size to use for the image. According to Pix2Struct paper and code, the patch size is 16x16. max_patches (`int`, *optional*, defaults to 2048): The maximum number of patches to extract from the image as per the [Pix2Struct paper](https://arxiv.org/pdf/2210.03347.pdf). is_vqa (`bool`, *optional*, defaults to `False`): Whether or not the image processor is for the VQA task. If `True` and `header_text` is passed in, text is rendered onto the input images. """ model_input_names = ["flattened_patches"] def __init__( self, do_convert_rgb: bool = True, do_normalize: bool = True, patch_size: Dict[str, int] = None, max_patches: int = 2048, is_vqa: bool = False, **kwargs, ) -> None: super().__init__(**kwargs) self.patch_size = patch_size if patch_size is not None else {"height": 16, "width": 16} self.do_normalize = do_normalize self.do_convert_rgb = do_convert_rgb self.max_patches = max_patches self.is_vqa = is_vqa def extract_flattened_patches( self, image: np.ndarray, max_patches: int, patch_size: dict, input_data_format: Optional[Union[str, ChannelDimension]] = None, **kwargs, ) -> np.ndarray: """ Extract flattened patches from an image. Args: image (`np.ndarray`): Image to extract flattened patches from. max_patches (`int`): Maximum number of patches to extract. patch_size (`dict`): Dictionary containing the patch height and width. Returns: result (`np.ndarray`): A sequence of `max_patches` flattened patches. """ requires_backends(self.extract_flattened_patches, "torch") # convert to torch image = to_channel_dimension_format(image, ChannelDimension.FIRST, input_data_format) image = torch.from_numpy(image) patch_height, patch_width = patch_size["height"], patch_size["width"] image_height, image_width = get_image_size(image, ChannelDimension.FIRST) # maximize scale s.t. scale = math.sqrt(max_patches * (patch_height / image_height) * (patch_width / image_width)) num_feasible_rows = max(min(math.floor(scale * image_height / patch_height), max_patches), 1) num_feasible_cols = max(min(math.floor(scale * image_width / patch_width), max_patches), 1) resized_height = max(num_feasible_rows * patch_height, 1) resized_width = max(num_feasible_cols * patch_width, 1) image = torch.nn.functional.interpolate( image.unsqueeze(0), size=(resized_height, resized_width), mode="bilinear", align_corners=False, antialias=True, ).squeeze(0) # [1, rows, columns, patch_height * patch_width * image_channels] patches = torch_extract_patches(image, patch_height, patch_width) patches_shape = patches.shape rows = patches_shape[1] columns = patches_shape[2] depth = patches_shape[3] # [rows * columns, patch_height * patch_width * image_channels] patches = patches.reshape([rows * columns, depth]) # [rows * columns, 1] row_ids = torch.arange(rows).reshape([rows, 1]).repeat(1, columns).reshape([rows * columns, 1]) col_ids = torch.arange(columns).reshape([1, columns]).repeat(rows, 1).reshape([rows * columns, 1]) # Offset by 1 so the ids do not contain zeros, which represent padding. row_ids += 1 col_ids += 1 # Prepare additional patch features. # [rows * columns, 1] row_ids = row_ids.to(torch.float32) col_ids = col_ids.to(torch.float32) # [rows * columns, 2 + patch_height * patch_width * image_channels] result = torch.cat([row_ids, col_ids, patches], -1) # [max_patches, 2 + patch_height * patch_width * image_channels] result = torch.nn.functional.pad(result, [0, 0, 0, max_patches - (rows * columns)]).float() result = to_numpy_array(result) return result def normalize( self, image: np.ndarray, data_format: Optional[Union[str, ChannelDimension]] = None, input_data_format: Optional[Union[str, ChannelDimension]] = None, **kwargs, ) -> np.ndarray: """ Normalize an image. image = (image - image_mean) / image_std. The image std is to mimic the tensorflow implementation of the `per_image_standardization`: https://www.tensorflow.org/api_docs/python/tf/image/per_image_standardization Args: image (`np.ndarray`): Image to normalize. data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format for the output image. If unset, the channel dimension format of the input image is used. input_data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format of the input image. If not provided, it will be inferred. """ if image.dtype == np.uint8: image = image.astype(np.float32) # take mean across the whole `image` mean = np.mean(image) std = np.std(image) adjusted_stddev = max(std, 1.0 / math.sqrt(np.prod(image.shape))) return normalize( image, mean=mean, std=adjusted_stddev, data_format=data_format, input_data_format=input_data_format, **kwargs, ) def preprocess( self, images: ImageInput, header_text: Optional[str] = None, do_convert_rgb: bool = None, do_normalize: Optional[bool] = None, max_patches: Optional[int] = None, patch_size: Optional[Dict[str, int]] = None, return_tensors: Optional[Union[str, TensorType]] = None, data_format: ChannelDimension = ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]] = None, **kwargs, ) -> ImageInput: """ Preprocess an image or batch of images. The processor first computes the maximum possible number of aspect-ratio preserving patches of size `patch_size` that can be extracted from the image. It then pads the image with zeros to make the image respect the constraint of `max_patches`. Before extracting the patches the images are standardized following the tensorflow implementation of `per_image_standardization` (https://www.tensorflow.org/api_docs/python/tf/image/per_image_standardization). Args: images (`ImageInput`): Image to preprocess. Expects a single or batch of images. header_text (`Union[List[str], str]`, *optional*): Text to render as a header. Only has an effect if `image_processor.is_vqa` is `True`. do_convert_rgb (`bool`, *optional*, defaults to `self.do_convert_rgb`): Whether to convert the image to RGB. do_normalize (`bool`, *optional*, defaults to `self.do_normalize`): Whether to normalize the image. max_patches (`int`, *optional*, defaults to `self.max_patches`): Maximum number of patches to extract. patch_size (`dict`, *optional*, defaults to `self.patch_size`): Dictionary containing the patch height and width. return_tensors (`str` or `TensorType`, *optional*): The type of tensors to return. Can be one of: - Unset: Return a list of `np.ndarray`. - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`. - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`. - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`. - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`. data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`): The channel dimension format for the output image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - Unset: Use the channel dimension format of the input image. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format for the input image. If unset, the channel dimension format is inferred from the input image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. """ do_normalize = do_normalize if do_normalize is not None else self.do_normalize do_convert_rgb = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb patch_size = patch_size if patch_size is not None else self.patch_size max_patches = max_patches if max_patches is not None else self.max_patches is_vqa = self.is_vqa if kwargs.get("data_format", None) is not None: raise ValueError("data_format is not an accepted input as the outputs are ") images = make_list_of_images(images) if not valid_images(images): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) # PIL RGBA images are converted to RGB if do_convert_rgb: images = [convert_to_rgb(image) for image in images] # All transformations expect numpy arrays. images = [to_numpy_array(image) for image in images] if input_data_format is None: # We assume that all images have the same channel dimension format. input_data_format = infer_channel_dimension_format(images[0]) if is_vqa: if header_text is None: raise ValueError("A header text must be provided for VQA models.") font_bytes = kwargs.pop("font_bytes", None) font_path = kwargs.pop("font_path", None) if isinstance(header_text, str): header_text = [header_text] * len(images) images = [ render_header(image, header_text[i], font_bytes=font_bytes, font_path=font_path) for i, image in enumerate(images) ] if do_normalize: images = [self.normalize(image=image, input_data_format=input_data_format) for image in images] # convert to torch tensor and permute images = [ self.extract_flattened_patches( image=image, max_patches=max_patches, patch_size=patch_size, input_data_format=input_data_format ) for image in images ] # create attention mask in numpy attention_masks = [(image.sum(axis=-1) != 0).astype(np.float32) for image in images] encoded_outputs = BatchFeature( data={"flattened_patches": images, "attention_mask": attention_masks}, tensor_type=return_tensors ) return encoded_outputs
0
hf_public_repos/transformers/src/transformers/models
hf_public_repos/transformers/src/transformers/models/pix2struct/__init__.py
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available _import_structure = { "configuration_pix2struct": [ "PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP", "Pix2StructConfig", "Pix2StructTextConfig", "Pix2StructVisionConfig", ], "processing_pix2struct": ["Pix2StructProcessor"], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["image_processing_pix2struct"] = ["Pix2StructImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["modeling_pix2struct"] = [ "PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST", "Pix2StructPreTrainedModel", "Pix2StructForConditionalGeneration", "Pix2StructVisionModel", "Pix2StructTextModel", ] if TYPE_CHECKING: from .configuration_pix2struct import ( PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP, Pix2StructConfig, Pix2StructTextConfig, Pix2StructVisionConfig, ) from .processing_pix2struct import Pix2StructProcessor try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_pix2struct import Pix2StructImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_pix2struct import ( PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST, Pix2StructForConditionalGeneration, Pix2StructPreTrainedModel, Pix2StructTextModel, Pix2StructVisionModel, ) else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
0
hf_public_repos/transformers/src/transformers/models
hf_public_repos/transformers/src/transformers/models/pix2struct/convert_pix2struct_original_pytorch_to_hf.py
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os import re import torch from flax.traverse_util import flatten_dict from t5x import checkpoints from transformers import ( AutoTokenizer, Pix2StructConfig, Pix2StructForConditionalGeneration, Pix2StructImageProcessor, Pix2StructProcessor, Pix2StructTextConfig, Pix2StructVisionConfig, ) def get_flax_param(t5x_checkpoint_path): flax_params = checkpoints.load_t5x_checkpoint(t5x_checkpoint_path) flax_params = flatten_dict(flax_params) return flax_params def rename_and_convert_flax_params(flax_dict): converted_dict = {} CONVERSION_MAPPING = { "token_embedder": "embeddings", "encoder_norm": "layernorm", "kernel": "weight", ".out": ".output", "scale": "weight", "embedders_0.pos_embedding": "row_embedder.weight", "embedders_1.pos_embedding": "column_embedder.weight", } DECODER_CONVERSION_MAPPING = { "query": "attention.query", "key": "attention.key", "value": "attention.value", "output.dense": "output", "encoder_decoder_attention.o": "encoder_decoder_attention.attention.o", "pre_self_attention_layer_norm": "self_attention.layer_norm", "pre_cross_attention_layer_norm": "encoder_decoder_attention.layer_norm", "mlp.": "mlp.DenseReluDense.", "pre_mlp_layer_norm": "mlp.layer_norm", "self_attention.o": "self_attention.attention.o", "decoder.embeddings.embedding": "decoder.embed_tokens.weight", "decoder.relpos_bias.rel_embedding": "decoder.layer.0.self_attention.attention.relative_attention_bias.weight", "decoder.decoder_norm.weight": "decoder.final_layer_norm.weight", "decoder.logits_dense.weight": "decoder.lm_head.weight", } for key in flax_dict.keys(): if "target" in key: # remove the first prefix from the key new_key = ".".join(key[1:]) # rename the key for old, new in CONVERSION_MAPPING.items(): new_key = new_key.replace(old, new) if "decoder" in new_key: for old, new in DECODER_CONVERSION_MAPPING.items(): new_key = new_key.replace(old, new) if "layers" in new_key and "decoder" not in new_key: # use regex to replace the layer number new_key = re.sub(r"layers_(\d+)", r"layer.\1", new_key) new_key = new_key.replace("encoder", "encoder.encoder") elif "layers" in new_key and "decoder" in new_key: # use regex to replace the layer number new_key = re.sub(r"layers_(\d+)", r"layer.\1", new_key) converted_dict[new_key] = flax_dict[key] converted_torch_dict = {} # convert converted_dict into torch format for key in converted_dict.keys(): if ("embed_tokens" not in key) and ("embedder" not in key): converted_torch_dict[key] = torch.from_numpy(converted_dict[key].T) else: converted_torch_dict[key] = torch.from_numpy(converted_dict[key]) return converted_torch_dict def convert_pix2struct_original_pytorch_checkpoint_to_hf( t5x_checkpoint_path, pytorch_dump_folder_path, use_large=False, is_vqa=False ): flax_params = get_flax_param(t5x_checkpoint_path) if not use_large: encoder_config = Pix2StructVisionConfig() decoder_config = Pix2StructTextConfig() else: encoder_config = Pix2StructVisionConfig( hidden_size=1536, d_ff=3968, num_attention_heads=24, num_hidden_layers=18 ) decoder_config = Pix2StructTextConfig(hidden_size=1536, d_ff=3968, num_heads=24, num_layers=18) config = Pix2StructConfig( vision_config=encoder_config.to_dict(), text_config=decoder_config.to_dict(), is_vqa=is_vqa ) model = Pix2StructForConditionalGeneration(config) torch_params = rename_and_convert_flax_params(flax_params) model.load_state_dict(torch_params) tok = AutoTokenizer.from_pretrained("ybelkada/test-pix2struct-tokenizer") image_processor = Pix2StructImageProcessor() processor = Pix2StructProcessor(image_processor=image_processor, tokenizer=tok) if use_large: processor.image_processor.max_patches = 4096 processor.image_processor.is_vqa = True # mkdir if needed os.makedirs(pytorch_dump_folder_path, exist_ok=True) model.save_pretrained(pytorch_dump_folder_path) processor.save_pretrained(pytorch_dump_folder_path) print("Model saved in {}".format(pytorch_dump_folder_path)) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--t5x_checkpoint_path", default=None, type=str, help="Path to the original T5x checkpoint.") parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument("--use_large", action="store_true", help="Use large model.") parser.add_argument("--is_vqa", action="store_true", help="Use large model.") args = parser.parse_args() convert_pix2struct_original_pytorch_checkpoint_to_hf( args.t5x_checkpoint_path, args.pytorch_dump_folder_path, args.use_large )
0
hf_public_repos/transformers/src/transformers/models
hf_public_repos/transformers/src/transformers/models/pix2struct/configuration_pix2struct.py
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Pix2Struct model configuration""" import os from typing import Union from ...configuration_utils import PretrainedConfig from ...utils import logging logger = logging.get_logger(__name__) PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP = { "google/pix2struct-textcaps-base": ( "https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json" ), } class Pix2StructTextConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`Pix2StructTextModel`]. It is used to instantiate a Pix2Struct text model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the Pix2Struct text decoder used by the [google/pix2struct-base](https://huggingface.co/google/pix2struct-base) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 50244): Vocabulary size of the `Pix2Struct` text model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`Pix2StructTextModel`]. hidden_size (`int`, *optional*, defaults to 768): Dimensionality of the encoder layers and the pooler layer. d_kv (`int`, *optional*, defaults to 64): Dimensionality of the key, query, value projections in each attention head. d_ff (`int`, *optional*, defaults to 2048): Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. num_layers (`int`, *optional*, defaults to 12): Number of hidden layers in the Transformer encoder. num_heads (`int`, *optional*, defaults to 12): Number of attention heads for each attention layer in the Transformer encoder. relative_attention_num_buckets (`int`, *optional*, defaults to 32): The number of buckets to use for each attention layer. relative_attention_max_distance (`int`, *optional*, defaults to 128): The maximum distance of the longer sequences for the bucket separation. dropout_rate (`float`, *optional*, defaults to 0.1): The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler. layer_norm_epsilon (`float`, *optional*, defaults to 1e-6): The epsilon used by the layer normalization layers. initializer_factor (`float`, *optional*, defaults to 1.0): A factor for initializing all weight matrices (should be kept to 1, used internally for initialization testing). dense_act_fn (`Union[Callable, str]`, *optional*, defaults to `"gelu_new"`): The non-linear activation function (function or string). decoder_start_token_id (`int`, *optional*, defaults to 0): The id of the `decoder_start_token_id` token. use_cache (`bool`, *optional*, defaults to `False`): Whether or not the model should return the last key/values attentions (not used by all models). pad_token_id (`int`, *optional*, defaults to 0): The id of the `padding` token. eos_token_id (`int`, *optional*, defaults to 1): The id of the `end-of-sequence` token. Example: ```python >>> from transformers import Pix2StructTextConfig, Pix2StructTextModel >>> # Initializing a Pix2StructTextConfig with google/pix2struct-base style configuration >>> configuration = Pix2StructTextConfig() >>> # Initializing a Pix2StructTextModel (with random weights) from the google/pix2struct-base style configuration >>> model = Pix2StructTextModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "pix2struct_text_model" keys_to_ignore_at_inference = ["past_key_values"] attribute_map = { "hidden_size": "hidden_size", "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers", } def __init__( self, vocab_size=50244, hidden_size=768, d_kv=64, d_ff=2048, num_layers=12, num_heads=12, relative_attention_num_buckets=32, relative_attention_max_distance=128, dropout_rate=0.1, layer_norm_epsilon=1e-6, initializer_factor=1.0, dense_act_fn="gelu_new", decoder_start_token_id=0, use_cache=False, pad_token_id=0, eos_token_id=1, tie_word_embeddings=False, is_decoder=True, **kwargs, ): self.vocab_size = vocab_size self.hidden_size = hidden_size self.d_kv = d_kv self.d_ff = d_ff self.num_layers = num_layers self.num_heads = num_heads self.relative_attention_num_buckets = relative_attention_num_buckets self.relative_attention_max_distance = relative_attention_max_distance self.dropout_rate = dropout_rate self.layer_norm_epsilon = layer_norm_epsilon self.initializer_factor = initializer_factor self.use_cache = use_cache self.eos_token_id = eos_token_id self.decoder_start_token_id = decoder_start_token_id # for backwards compatibility self.dense_act_fn = dense_act_fn super().__init__( pad_token_id=pad_token_id, eos_token_id=eos_token_id, decoder_start_token_id=decoder_start_token_id, tie_word_embeddings=tie_word_embeddings, is_decoder=is_decoder, **kwargs, ) @classmethod def from_pretrained( cls, pretrainehidden_size_name_or_path: Union[str, os.PathLike], **kwargs ) -> "PretrainedConfig": cls._set_token_in_kwargs(kwargs) config_dict, kwargs = cls.get_config_dict(pretrainehidden_size_name_or_path, **kwargs) # get the text config dict if we are loading from Pix2StructConfig if config_dict.get("model_type") == "pix2struct": config_dict = config_dict["text_config"] if "model_type" in config_dict and hasattr(cls, "model_type") and config_dict["model_type"] != cls.model_type: logger.warning( f"You are using a model of type {config_dict['model_type']} to instantiate a model of type " f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." ) return cls.from_dict(config_dict, **kwargs) class Pix2StructVisionConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`Pix2StructVisionModel`]. It is used to instantiate a Pix2Struct vision model according to the specified arguments, defining the model architecture. Instantiating a configuration defaults will yield a similar configuration to that of the Pix2Struct-base [google/pix2struct-base](https://huggingface.co/google/pix2struct-base) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: hidden_size (`int`, *optional*, defaults to 768): Dimensionality of the encoder layers and the pooler layer. patch_embed_hidden_size (`int`, *optional*, defaults to 768): Dimensionality of the input patch_embedding layer in the Transformer encoder. d_ff (`int`, *optional*, defaults to 2048): Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. d_kv (`int`, *optional*, defaults to 64): Dimensionality of the key, query, value projections per attention head. num_hidden_layers (`int`, *optional*, defaults to 12): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 12): Number of attention heads for each attention layer in the Transformer encoder. dense_act_fn (`str` or `function`, *optional*, defaults to `"gelu_new"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` ``"gelu"` are supported. layer_norm_eps (`float`, *optional*, defaults to 1e-06): The epsilon used by the layer normalization layers. dropout_rate (`float`, *optional*, defaults to 0.0): The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler. attention_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. initializer_range (`float`, *optional*, defaults to 1e-10): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. initializer_factor (`float`, *optional*, defaults to 1.0): A factor for initializing all weight matrices (should be kept to 1, used internally for initialization testing). seq_len (`int`, *optional*, defaults to 4096): Maximum sequence length (here number of patches) supported by the model. relative_attention_num_buckets (`int`, *optional*, defaults to 32): The number of buckets to use for each attention layer. relative_attention_max_distance (`int`, *optional*, defaults to 128): The maximum distance (in tokens) to use for each attention layer. Example: ```python >>> from transformers import Pix2StructVisionConfig, Pix2StructVisionModel >>> # Initializing a Pix2StructVisionConfig with google/pix2struct-base style configuration >>> configuration = Pix2StructVisionConfig() >>> # Initializing a Pix2StructVisionModel (with random weights) from the google/pix2struct-base style configuration >>> model = Pix2StructVisionModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "pix2struct_vision_model" def __init__( self, hidden_size=768, patch_embed_hidden_size=768, d_ff=2048, d_kv=64, num_hidden_layers=12, num_attention_heads=12, dense_act_fn="gelu_new", layer_norm_eps=1e-6, dropout_rate=0.0, attention_dropout=0.0, initializer_range=1e-10, initializer_factor=1.0, seq_len=4096, relative_attention_num_buckets=32, relative_attention_max_distance=128, **kwargs, ): super().__init__(**kwargs) self.hidden_size = hidden_size self.patch_embed_hidden_size = patch_embed_hidden_size self.d_ff = d_ff self.dropout_rate = dropout_rate self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.initializer_range = initializer_range self.initializer_factor = initializer_factor self.attention_dropout = attention_dropout self.layer_norm_eps = layer_norm_eps self.dense_act_fn = dense_act_fn self.seq_len = seq_len self.relative_attention_num_buckets = relative_attention_num_buckets self.relative_attention_max_distance = relative_attention_max_distance self.d_kv = d_kv @classmethod def from_pretrained( cls, pretrainehidden_size_name_or_path: Union[str, os.PathLike], **kwargs ) -> "PretrainedConfig": cls._set_token_in_kwargs(kwargs) config_dict, kwargs = cls.get_config_dict(pretrainehidden_size_name_or_path, **kwargs) # get the vision config dict if we are loading from Pix2StructConfig if config_dict.get("model_type") == "pix2struct": config_dict = config_dict["vision_config"] if "model_type" in config_dict and hasattr(cls, "model_type") and config_dict["model_type"] != cls.model_type: logger.warning( f"You are using a model of type {config_dict['model_type']} to instantiate a model of type " f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." ) return cls.from_dict(config_dict, **kwargs) class Pix2StructConfig(PretrainedConfig): r""" [`Pix2StructConfig`] is the configuration class to store the configuration of a [`Pix2StructForConditionalGeneration`]. It is used to instantiate a Pix2Struct model according to the specified arguments, defining the text model and vision model configs. Instantiating a configuration with the defaults will yield a similar configuration to that of the Pix2Struct-base [google/pix2struct-base](https://huggingface.co/google/pix2struct-base) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: text_config (`dict`, *optional*): Dictionary of configuration options used to initialize [`Pix2StructTextConfig`]. vision_config (`dict`, *optional*): Dictionary of configuration options used to initialize [`Pix2StructVisionConfig`]. initializer_factor (`float`, *optional*, defaults to 1.0): Factor to multiply the initialization range with. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. is_vqa (`bool`, *optional*, defaults to `False`): Whether the model has been fine-tuned for VQA or not. kwargs (*optional*): Dictionary of keyword arguments. Example: ```python >>> from transformers import Pix2StructConfig, Pix2StructForConditionalGeneration >>> # Initializing a Pix2StructConfig with google/pix2struct-base style configuration >>> configuration = Pix2StructConfig() >>> # Initializing a Pix2StructForConditionalGeneration (with random weights) from the google/pix2struct-base style configuration >>> model = Pix2StructForConditionalGeneration(configuration) >>> # Accessing the model configuration >>> configuration = model.config >>> # We can also initialize a Pix2StructConfig from a Pix2StructTextConfig and a Pix2StructVisionConfig >>> # Initializing a Pix2Struct text and Pix2Struct vision configuration >>> config_text = Pix2StructTextConfig() >>> config_vision = Pix2StructVisionConfig() >>> config = Pix2StructConfig.from_text_vision_configs(config_text, config_vision) ```""" model_type = "pix2struct" def __init__( self, text_config=None, vision_config=None, initializer_factor=1.0, initializer_range=0.02, is_vqa=False, tie_word_embeddings=False, is_encoder_decoder=True, **kwargs, ): super().__init__(tie_word_embeddings=tie_word_embeddings, is_encoder_decoder=is_encoder_decoder, **kwargs) if text_config is None: text_config = {} logger.info("text_config is None. Initializing the Pix2StructTextConfig with default values.") if vision_config is None: vision_config = {} logger.info("vision_config is None. Initializing the Pix2StructVisionConfig with default values.") self.text_config = Pix2StructTextConfig(**text_config) self.vision_config = Pix2StructVisionConfig(**vision_config) self.decoder_start_token_id = self.text_config.decoder_start_token_id self.pad_token_id = self.text_config.pad_token_id self.eos_token_id = self.text_config.eos_token_id self.initializer_factor = initializer_factor self.initializer_range = initializer_range self.text_config.initializer_range = self.initializer_range self.vision_config.initializer_range = self.initializer_range self.is_vqa = is_vqa @classmethod def from_text_vision_configs( cls, text_config: Pix2StructTextConfig, vision_config: Pix2StructVisionConfig, **kwargs ): r""" Instantiate a [`Pix2StructConfig`] (or a derived class) from pix2struct text model configuration and pix2struct vision model configuration. Returns: [`Pix2StructConfig`]: An instance of a configuration object """ return cls(text_config=text_config.to_dict(), vision_config=vision_config.to_dict(), **kwargs)
0
hf_public_repos/transformers/src/transformers/models
hf_public_repos/transformers/src/transformers/models/fsmt/__init__.py
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _import_structure = { "configuration_fsmt": ["FSMT_PRETRAINED_CONFIG_ARCHIVE_MAP", "FSMTConfig"], "tokenization_fsmt": ["FSMTTokenizer"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["modeling_fsmt"] = ["FSMTForConditionalGeneration", "FSMTModel", "PretrainedFSMTModel"] if TYPE_CHECKING: from .configuration_fsmt import FSMT_PRETRAINED_CONFIG_ARCHIVE_MAP, FSMTConfig from .tokenization_fsmt import FSMTTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_fsmt import FSMTForConditionalGeneration, FSMTModel, PretrainedFSMTModel else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
0
hf_public_repos/transformers/src/transformers/models
hf_public_repos/transformers/src/transformers/models/fsmt/modeling_fsmt.py
# coding=utf-8 # Copyright 2020 The Facebook AI Research Team Authors and The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Original implementation: https://github.com/pytorch/fairseq/tree/master/examples/wmt19 # Authors: # - @alexeib Alexei Baevski # - @edunov Sergey Edunov # - @michaelauli Michael Auli # - @myleott Myle Ott # - @nng555 Nathan Ng # - David Grangier # - Kyra Yee # # Paper: Facebook FAIR's WMT19 News Translation Task Submission https://arxiv.org/abs/1907.06616 # """PyTorch Fairseq model, ported from https://github.com/pytorch/fairseq/tree/master/examples/wmt19""" import math from typing import Any, Dict, List, Optional, Tuple, Union import torch from torch import Tensor, nn from torch.nn import CrossEntropyLoss, LayerNorm from ...activations import ACT2FN from ...integrations.deepspeed import is_deepspeed_zero3_enabled from ...modeling_outputs import ( BaseModelOutput, BaseModelOutputWithPastAndCrossAttentions, Seq2SeqLMOutput, Seq2SeqModelOutput, ) from ...modeling_utils import PreTrainedModel from ...utils import ( add_code_sample_docstrings, add_end_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings, ) from .configuration_fsmt import FSMTConfig logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = "facebook/wmt19-ru-en" _CONFIG_FOR_DOC = "FSMTConfig" # See all FSMT models at https://huggingface.co/models?filter=fsmt # Porting notes: # this one is modeled after BartModel* # # Currently only translation (fairseq also has weights for LM) # # fairseq provides weights for ru-en, en-ru and de-en, en-de pairs. All have been ported. # - ru-en, en-ru use asymmetric vocab # - de-en, en-de use a merged single vocab (but the code works as if they are separate) # # Differences with Bart: # - not using bos token # - 2 separate vocabs (src and target) # - embed weights aren't tied # - uses a model Ensemble (but that part isn't ported/implemented yet) - so we # aren't getting as good of a BLEU score # - uses a projection layer at the end of the decoder # - doesn't use final_logits_bias # - beam search: stops as soon as num_beams == len(hypos) (whereas transformers # is not satisfied there and will continue searching until the next cycles # aren't promising something better), comparing BLEU scores - the transformers # algorithm is slightly superior, therefore using the latter. But if you want # to match fairseq outputs, you need to pass ``early_stopping=True`` to ``generate()``. # # SinusoidalPositionalEmbedding is slightly different from Bart's - generates # different embeddings. This implementation is copied verbatim from fairseq with # some small changes to make it work here. # # Other changes: # - doesn't support use_cache as Bart's version does # # # FSMTConfig changes with BartConfig # # Differences with BART: # - src/tgt vocabs aren't shared # - token embeddings aren't shared # - needs a language pair # - scale_embedding are True # # some unused args were removed too # # # TODO: # - port model ensemble (fs uses 4 model checkpoints) # - solve beam search discrepancies # docstyle-ignore """ Here is how to compare BLEU scores against fairseq implementation: # en-ru export PAIR=en-ru export DATA_DIR=data/$PAIR export SAVE_DIR=data/$PAIR export BS=8 export NUM_BEAMS=50 mkdir -p $DATA_DIR sacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source sacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target echo $PAIR PYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS # (fairseq BLEU: 36.4 http://matrix.statmt.org/matrix/output/1914?score_id=37605) # ru-en export PAIR=ru-en export DATA_DIR=data/$PAIR export SAVE_DIR=data/$PAIR export BS=8 export NUM_BEAMS=50 mkdir -p $DATA_DIR sacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source sacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target PYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS # (fairseq BLEU: 41.3 http://matrix.statmt.org/matrix/output/1907?run_id=6937) # de-en export PAIR=de-en export DATA_DIR=data/$PAIR export SAVE_DIR=data/$PAIR export BS=8 export NUM_BEAMS=50 mkdir -p $DATA_DIR sacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source sacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target echo $PAIR PYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS # (fairseq BLEU: 42.3 http://matrix.statmt.org/matrix/output/1902?run_id=6750) # en-de export PAIR=en-de export DATA_DIR=data/$PAIR export SAVE_DIR=data/$PAIR export BS=8 mkdir -p $DATA_DIR sacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source sacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target echo $PAIR PYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS # (fairseq BLEU: 43.1 http://matrix.statmt.org/matrix/output/1909?run_id=6862) """ FSMT_START_DOCSTRING = r""" This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`FSMTConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ FSMT_GENERATION_EXAMPLE = r""" Translation example:: ```python >>> from transformers import AutoTokenizer, FSMTForConditionalGeneration >>> mname = "facebook/wmt19-ru-en" >>> model = FSMTForConditionalGeneration.from_pretrained(mname) >>> tokenizer = AutoTokenizer.from_pretrained(mname) >>> src_text = "Машинное обучение - это здорово, не так ли?" >>> input_ids = tokenizer(src_text, return_tensors="pt").input_ids >>> outputs = model.generate(input_ids, num_beams=5, num_return_sequences=3) >>> tokenizer.decode(outputs[0], skip_special_tokens=True) "Machine learning is great, isn't it?" ``` """ FSMT_INPUTS_DOCSTRING = r""" Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`FSTMTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Indices of decoder input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are decoder input IDs?](../glossary#decoder-input-ids) FSMT uses the `eos_token_id` as the starting token for `decoder_input_ids` generation. If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see `past_key_values`). decoder_attention_mask (`torch.BoolTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also be used by default. head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*): Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. decoder_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. encoder_outputs (`Tuple(torch.FloatTensor)`, *optional*): Tuple consists of (`last_hidden_state`, *optional*: `hidden_states`, *optional*: `attentions`) `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)` is a sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. past_key_values (`Tuple(torch.FloatTensor)` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): Contains precomputed key and value hidden-states of the attention blocks. Can be used to speed up decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. decoder_inputs_embeds (`torch.FloatTensor` of shape `(batch_size, target_sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `decoder_input_ids` you can choose to directly pass an embedded representation. If `past_key_values` is used, optionally only the last `decoder_inputs_embeds` have to be input (see `past_key_values`). This is useful if you want more control over how to convert `decoder_input_ids` indices into associated vectors than the model's internal embedding lookup matrix. If `decoder_input_ids` and `decoder_inputs_embeds` are both unset, `decoder_inputs_embeds` takes the value of `inputs_embeds`. use_cache (`bool`, *optional*, defaults to `True`): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ def invert_mask(attention_mask): """Turns 1->0, 0->1, False->True, True-> False""" assert attention_mask.dim() == 2 return attention_mask.eq(0) def triu_onnx(x, diagonal=0): l = x.shape[0] arange = torch.arange(l, device=x.device) mask = arange.expand(l, l) arange = arange.unsqueeze(-1) if diagonal: arange = arange + diagonal mask = mask >= arange return x.masked_fill(mask == 0, 0) def _prepare_fsmt_decoder_inputs( config, input_ids, decoder_input_ids=None, decoder_padding_mask=None, causal_mask_dtype=torch.float32, ): """ Prepare masks that ignore padding tokens in the decoder and a causal mask for the decoder if none are provided. This mimics the default behavior in fairseq. To override it pass in masks. Note: this is not called during generation """ pad_token_id = config.pad_token_id if decoder_input_ids is None: decoder_input_ids = shift_tokens_right(input_ids, pad_token_id) bsz, tgt_len = decoder_input_ids.size() if decoder_padding_mask is None: decoder_padding_mask = make_padding_mask(decoder_input_ids, pad_token_id) else: decoder_padding_mask = invert_mask(decoder_padding_mask) causal_mask = triu_onnx(fill_with_neg_inf(torch.zeros(tgt_len, tgt_len, dtype=causal_mask_dtype)), 1).to( device=decoder_input_ids.device ) return decoder_input_ids, decoder_padding_mask, causal_mask class PretrainedFSMTModel(PreTrainedModel): config_class = FSMTConfig base_model_prefix = "model" def _init_weights(self, module): std = self.config.init_std if isinstance(module, nn.Linear): module.weight.data.normal_(mean=0.0, std=std) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, SinusoidalPositionalEmbedding): pass elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=std) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() @property def dummy_inputs(self): pad_token = self.config.pad_token_id input_ids = torch.tensor([[0, 6, 10, 4, 2], [0, 8, 12, 2, pad_token]], device=self.device) dummy_inputs = { "attention_mask": input_ids.ne(pad_token), "input_ids": input_ids, } return dummy_inputs def _make_linear_from_emb(emb): vocab_size, emb_size = emb.weight.shape lin_layer = nn.Linear(vocab_size, emb_size, bias=False) lin_layer.weight.data = emb.weight.data return lin_layer # Helper Functions, mostly for making masks def _check_shapes(shape_1, shape2): if shape_1 != shape2: raise AssertionError(f"shape mismatch: {shape_1} != {shape2}") def shift_tokens_right(input_ids, pad_token_id): """Shift input ids one token to the right, and wrap the last non pad token (usually <eos>).""" # replace possible -100 values in labels by `pad_token_id` input_ids.masked_fill_(input_ids == -100, pad_token_id) prev_output_tokens = input_ids.clone() index_of_eos = (input_ids.ne(pad_token_id).sum(dim=1) - 1).unsqueeze(-1) prev_output_tokens[:, 0] = input_ids.gather(1, index_of_eos).squeeze() prev_output_tokens[:, 1:] = input_ids[:, :-1] return prev_output_tokens def make_padding_mask(input_ids, padding_idx=1): """True for pad tokens""" padding_mask = input_ids.eq(padding_idx) if not padding_mask.any(): padding_mask = None return padding_mask # Helper Modules class EncoderLayer(nn.Module): def __init__(self, config: FSMTConfig): super().__init__() self.embed_dim = config.d_model self.self_attn = Attention(self.embed_dim, config.encoder_attention_heads, dropout=config.attention_dropout) self.self_attn_layer_norm = LayerNorm(self.embed_dim) self.dropout = config.dropout self.activation_fn = ACT2FN[config.activation_function] self.activation_dropout = config.activation_dropout self.fc1 = nn.Linear(self.embed_dim, config.encoder_ffn_dim) self.fc2 = nn.Linear(config.encoder_ffn_dim, self.embed_dim) self.final_layer_norm = LayerNorm(self.embed_dim) def forward(self, x, encoder_padding_mask, layer_head_mask, output_attentions=False): """ Args: x (`torch.Tensor`): input to the layer of shape *(seq_len, batch, embed_dim)* encoder_padding_mask (`torch.ByteTensor`): binary ByteTensor of shape *(batch, src_len)* where padding elements are indicated by `1`. for t_tgt, t_src is excluded (or masked out), =0 means it is included in attention layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size *(config.encoder_attention_heads,)*. Returns: encoded output of shape *(seq_len, batch, embed_dim)* """ residual = x x, attn_weights = self.self_attn( query=x, key=x, key_padding_mask=encoder_padding_mask, layer_head_mask=layer_head_mask, output_attentions=output_attentions, ) x = nn.functional.dropout(x, p=self.dropout, training=self.training) x = residual + x x = self.self_attn_layer_norm(x) residual = x x = self.activation_fn(self.fc1(x)) x = nn.functional.dropout(x, p=self.activation_dropout, training=self.training) x = self.fc2(x) x = nn.functional.dropout(x, p=self.dropout, training=self.training) x = residual + x x = self.final_layer_norm(x) return x, attn_weights class FSMTEncoder(nn.Module): """ Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a [`EncoderLayer`]. Args: config: FSMTConfig """ def __init__(self, config: FSMTConfig, embed_tokens): super().__init__() self.dropout = config.dropout self.layerdrop = config.encoder_layerdrop self.padding_idx = embed_tokens.padding_idx self.embed_tokens = embed_tokens embed_dim = embed_tokens.embedding_dim self.embed_scale = math.sqrt(embed_dim) if config.scale_embedding else 1.0 self.embed_positions = SinusoidalPositionalEmbedding( config.max_position_embeddings + self.padding_idx + 1, embed_dim, self.padding_idx ) self.layers = nn.ModuleList([EncoderLayer(config) for _ in range(config.encoder_layers)]) # type: List[EncoderLayer] def forward( self, input_ids: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, inputs_embeds: torch.Tensor = None, head_mask: Optional[torch.Tensor] = None, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, ): """ Args: input_ids (`torch.LongTensor`): tokens in the source language of shape *(batch, src_len)* attention_mask (`torch.LongTensor`): indicating which indices are padding tokens inputs_embeds (`torch.FloatTensor`): embedding vectors of shape *(batch, src_len, embed_dim)* head_mask (`torch.Tensor` of shape `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. Returns: BaseModelOutput or Tuple comprised of: - **x** (`torch.Tensor`): the last encoder layer's output of shape *(src_len, batch, embed_dim)* - **encoder_states** (`Tuple(torch.FloatTensor`)): all intermediate hidden states of shape *(src_len, batch, embed_dim)*. Only populated if *output_hidden_states:* is True. - **all_attentions** (`Tuple(torch.FloatTensor`)): Attention weights for each layer. During training might not be of length n_layers because of layer dropout. """ # check attention mask and invert if attention_mask is not None: attention_mask = invert_mask(attention_mask) if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale embed_pos = self.embed_positions(input_ids) elif inputs_embeds is not None: inputs_embeds = inputs_embeds * self.embed_scale # We assume zeros hidden states correspond to padding tokens # and create `position_ids` where inputs_embeds[:, :, 0] == 0 position_ids = inputs_embeds[:, :, 0].masked_fill( inputs_embeds[:, :, 0].eq(0), self.embed_positions.padding_idx ) embed_pos = self.embed_positions(position_ids) else: raise ValueError("You have to specify either input_ids or inputs_embeds") x = inputs_embeds + embed_pos x = nn.functional.dropout(x, p=self.dropout, training=self.training) # B x T x C -> T x B x C x = x.transpose(0, 1) encoder_states = () if output_hidden_states else None all_attentions = () if output_attentions else None # check if head_mask has a correct number of layers specified if desired if head_mask is not None: assert head_mask.size()[0] == ( len(self.layers) ), f"The head_mask should be specified for {len(self.layers)} layers, but it is for {head_mask.size()[0]}." for idx, encoder_layer in enumerate(self.layers): if output_hidden_states: x = x.transpose(0, 1) # T x B x C -> B x T x C encoder_states += (x,) x = x.transpose(0, 1) # B x T x C -> T x B x C # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description) dropout_probability = torch.rand([]) if self.training and (dropout_probability < self.layerdrop): # skip the layer attn = None else: x, attn = encoder_layer( x, attention_mask, layer_head_mask=(head_mask[idx] if head_mask is not None else None), output_attentions=output_attentions, ) if output_attentions: all_attentions = all_attentions + (attn,) # T x B x C -> B x T x C x = x.transpose(0, 1) if output_hidden_states: encoder_states += (x,) if not return_dict: return tuple(v for v in [x, encoder_states, all_attentions] if v is not None) return BaseModelOutput(last_hidden_state=x, hidden_states=encoder_states, attentions=all_attentions) class DecoderLayer(nn.Module): def __init__(self, config: FSMTConfig): super().__init__() self.embed_dim = config.d_model self.self_attn = Attention( embed_dim=self.embed_dim, num_heads=config.decoder_attention_heads, dropout=config.attention_dropout, ) self.dropout = config.dropout self.activation_fn = ACT2FN[config.activation_function] self.activation_dropout = config.activation_dropout self.self_attn_layer_norm = LayerNorm(self.embed_dim) self.encoder_attn = Attention( self.embed_dim, config.decoder_attention_heads, dropout=config.attention_dropout, encoder_decoder_attention=True, ) self.encoder_attn_layer_norm = LayerNorm(self.embed_dim) self.fc1 = nn.Linear(self.embed_dim, config.decoder_ffn_dim) self.fc2 = nn.Linear(config.decoder_ffn_dim, self.embed_dim) self.final_layer_norm = LayerNorm(self.embed_dim) def forward( self, x, encoder_hidden_states, encoder_attn_mask=None, layer_state=None, causal_mask=None, layer_head_mask=None, cross_attn_layer_head_mask=None, decoder_padding_mask=None, output_attentions=False, ): residual = x if layer_state is None: layer_state = {} # Self Attention x, self_attn_weights = self.self_attn( query=x, key=x, layer_state=layer_state, # adds keys to layer state key_padding_mask=decoder_padding_mask, attn_mask=causal_mask, layer_head_mask=layer_head_mask, output_attentions=output_attentions, ) x = nn.functional.dropout(x, p=self.dropout, training=self.training) x = residual + x x = self.self_attn_layer_norm(x) # Cross attention residual = x assert self.encoder_attn.cache_key != self.self_attn.cache_key x, cross_attn_weights = self.encoder_attn( query=x, key=encoder_hidden_states, key_padding_mask=encoder_attn_mask, layer_state=layer_state, # mutates layer state layer_head_mask=cross_attn_layer_head_mask, output_attentions=output_attentions, ) x = nn.functional.dropout(x, p=self.dropout, training=self.training) x = residual + x x = self.encoder_attn_layer_norm(x) # Fully Connected residual = x x = self.activation_fn(self.fc1(x)) x = nn.functional.dropout(x, p=self.activation_dropout, training=self.training) x = self.fc2(x) x = nn.functional.dropout(x, p=self.dropout, training=self.training) x = residual + x x = self.final_layer_norm(x) return ( x, self_attn_weights, layer_state, cross_attn_weights, ) # layer_state = cache for decoding class FSMTDecoder(nn.Module): """ Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a [`DecoderLayer`] Args: config: FSMTConfig embed_tokens (nn.Embedding): output embedding """ def __init__(self, config: FSMTConfig, embed_tokens: nn.Embedding): super().__init__() self.dropout = config.dropout self.layerdrop = config.decoder_layerdrop self.padding_idx = embed_tokens.padding_idx self.embed_scale = math.sqrt(config.d_model) if config.scale_embedding else 1.0 self.embed_tokens = embed_tokens embed_dim = embed_tokens.embedding_dim self.embed_positions = SinusoidalPositionalEmbedding( config.max_position_embeddings + self.padding_idx + 1, embed_dim, self.padding_idx ) self.layers = nn.ModuleList([DecoderLayer(config) for _ in range(config.decoder_layers)]) # type: List[DecoderLayer] if is_deepspeed_zero3_enabled(): import deepspeed with deepspeed.zero.GatheredParameters(self.embed_tokens.weight, modifier_rank=None): embed_tokens_weight_shape = self.embed_tokens.weight.shape else: embed_tokens_weight_shape = self.embed_tokens.weight.shape self.output_projection = nn.Linear(embed_tokens_weight_shape[1], embed_tokens_weight_shape[0], bias=False) self.output_projection.weight = self.embed_tokens.weight def forward( self, input_ids: torch.Tensor, encoder_hidden_states: torch.Tensor, encoder_padding_mask: torch.Tensor, decoder_padding_mask: torch.Tensor, decoder_causal_mask: torch.Tensor, head_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, cross_attn_head_mask: Optional[torch.Tensor] = None, past_key_values: Optional[List[torch.FloatTensor]] = None, use_cache: bool = False, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, ): """ Includes several features from "Jointly Learning to Align and Translate with Transformer Models" (Garg et al., EMNLP 2019). Args: input_ids (`torch.LongTensor` of shape `(batch, tgt_len)`): previous decoder outputs for teacher forcing encoder_hidden_states: output from the encoder, used for encoder-side attention encoder_padding_mask: for ignoring pad tokens past_key_values (dict or None): dictionary used for storing state during generation head_mask (`torch.Tensor` of shape `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. cross_attn_head_mask (`torch.Tensor` of shape `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. Returns: BaseModelOutputWithPast or tuple: - the decoder's features of shape *(batch, tgt_len, embed_dim)* - the cache - hidden states - attentions """ # check attention mask and invert if encoder_padding_mask is not None: encoder_padding_mask = invert_mask(encoder_padding_mask) if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time") elif input_ids is not None: # embed positions positions = self.embed_positions(input_ids) if use_cache: input_ids = input_ids[:, -1:] positions = positions[:, -1:] # happens after we embed them x = self.embed_tokens(input_ids) * self.embed_scale elif inputs_embeds is not None: # We assume zeros hidden states correspond to padding tokens # and create `position_ids` where inputs_embeds[:, :, 0] == 0 position_ids = inputs_embeds[:, :, 0].masked_fill( inputs_embeds[:, :, 0].eq(0), self.embed_positions.padding_idx ) positions = self.embed_positions(position_ids) x = inputs_embeds * self.embed_scale else: raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds") x += positions x = nn.functional.dropout(x, p=self.dropout, training=self.training) # Convert to FSMT output format: (BS, seq_len, model_dim) -> (seq_len, BS, model_dim) x = x.transpose(0, 1) encoder_hidden_states = encoder_hidden_states.transpose(0, 1) # decoder layers all_hidden_states = () if output_hidden_states else None all_self_attns = () if output_attentions else None all_cross_attns = () if output_attentions else None next_decoder_cache = [] # check if head_mask has a correct number of layers specified if desired for attn_mask, mask_name in zip([head_mask, cross_attn_head_mask], ["head_mask", "cross_attn_head_mask"]): if attn_mask is not None: assert attn_mask.size()[0] == (len(self.layers)), ( f"The `{mask_name}` should be specified for {len(self.layers)} layers, but it is for" f" {head_mask.size()[0]}." ) for idx, decoder_layer in enumerate(self.layers): # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description) if output_hidden_states: x = x.transpose(0, 1) all_hidden_states += (x,) x = x.transpose(0, 1) if self.training: dropout_probability = torch.rand([]) if dropout_probability < self.layerdrop: continue layer_state = past_key_values[idx] if past_key_values is not None else None x, layer_self_attn, layer_past, layer_cross_attn = decoder_layer( x, encoder_hidden_states, encoder_attn_mask=encoder_padding_mask, decoder_padding_mask=decoder_padding_mask, layer_state=layer_state, causal_mask=decoder_causal_mask, layer_head_mask=(head_mask[idx] if head_mask is not None else None), cross_attn_layer_head_mask=(cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None), output_attentions=output_attentions, ) if use_cache: next_decoder_cache.append(layer_past.copy()) if output_attentions: all_self_attns += (layer_self_attn,) all_cross_attns += (layer_cross_attn,) # add hidden states from the last decoder layer if output_hidden_states: x = x.transpose(0, 1) all_hidden_states += (x,) x = x.transpose(0, 1) # Convert to standard output format: (seq_len, BS, model_dim) -> (BS, seq_len, model_dim) x = x.transpose(0, 1) encoder_hidden_states = encoder_hidden_states.transpose(0, 1) x = self.output_projection(x) next_cache = next_decoder_cache if use_cache else None if not return_dict: return tuple( v for v in [x, next_cache, all_hidden_states, all_self_attns, all_cross_attns] if v is not None ) return BaseModelOutputWithPastAndCrossAttentions( last_hidden_state=x, past_key_values=next_cache, hidden_states=all_hidden_states, attentions=all_self_attns, cross_attentions=all_cross_attns, ) def _reorder_buffer(attn_cache, new_order): for k, input_buffer_k in attn_cache.items(): if input_buffer_k is not None: attn_cache[k] = input_buffer_k.index_select(0, new_order) return attn_cache class Attention(nn.Module): """Multi-headed attention from 'Attention Is All You Need' paper""" def __init__( self, embed_dim, num_heads, dropout=0.0, bias=True, encoder_decoder_attention=False, # otherwise self_attention ): super().__init__() self.embed_dim = embed_dim self.num_heads = num_heads self.dropout = dropout self.head_dim = embed_dim // num_heads assert self.head_dim * num_heads == self.embed_dim, "embed_dim must be divisible by num_heads" self.scaling = self.head_dim**-0.5 self.encoder_decoder_attention = encoder_decoder_attention self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.cache_key = "encoder_decoder" if self.encoder_decoder_attention else "self" def _shape(self, tensor, seq_len, bsz): return tensor.contiguous().view(seq_len, bsz * self.num_heads, self.head_dim).transpose(0, 1) def forward( self, query, key: Optional[Tensor], key_padding_mask: Optional[Tensor] = None, layer_state: Optional[Dict[str, Optional[Tensor]]] = None, attn_mask: Optional[Tensor] = None, layer_head_mask: Optional[Tensor] = None, output_attentions=False, ) -> Tuple[Tensor, Optional[Tensor]]: """Input shape: Time(SeqLen) x Batch x Channel""" static_kv: bool = self.encoder_decoder_attention tgt_len, bsz, embed_dim = query.size() assert embed_dim == self.embed_dim assert list(query.size()) == [tgt_len, bsz, embed_dim] # get here for encoder decoder cause of static_kv if layer_state is not None: # reuse k,v and encoder_padding_mask saved_state = layer_state.get(self.cache_key, {}) if "prev_key" in saved_state and static_kv: # previous time steps are cached - no need to recompute key and value if they are static key = None else: saved_state = None layer_state = {} q = self.q_proj(query) * self.scaling if static_kv: if key is None: k = v = None else: k = self.k_proj(key) v = self.v_proj(key) else: k = self.k_proj(query) v = self.v_proj(query) q = self._shape(q, tgt_len, bsz) if k is not None: k = self._shape(k, -1, bsz) if v is not None: v = self._shape(v, -1, bsz) if saved_state is not None: k, v, key_padding_mask = self._use_saved_state(k, v, saved_state, key_padding_mask, static_kv, bsz) # Update cache layer_state[self.cache_key] = { "prev_key": k.view(bsz, self.num_heads, -1, self.head_dim), "prev_value": v.view(bsz, self.num_heads, -1, self.head_dim), "prev_key_padding_mask": key_padding_mask if not static_kv else None, } assert k is not None src_len = k.size(1) attn_weights = torch.bmm(q, k.transpose(1, 2)) assert attn_weights.size() == (bsz * self.num_heads, tgt_len, src_len) if attn_mask is not None: attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attn_mask attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) # This is part of a workaround to get around fork/join parallelism not supporting Optional types. if key_padding_mask is not None and key_padding_mask.dim() == 0: key_padding_mask = None assert key_padding_mask is None or key_padding_mask.size()[:2] == ( bsz, src_len, ) if key_padding_mask is not None: # don't attend to padding symbols attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) reshaped = key_padding_mask.unsqueeze(1).unsqueeze(2) attn_weights = attn_weights.masked_fill(reshaped, torch.finfo(attn_weights.dtype).min) attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) attn_weights = nn.functional.softmax(attn_weights, dim=-1) if layer_head_mask is not None: assert layer_head_mask.size() == ( self.num_heads, ), f"Head mask for a single layer should be of size {(self.num_heads,)}, but is {layer_head_mask.size()}" attn_weights = layer_head_mask.view(1, -1, 1, 1) * attn_weights.view(bsz, self.num_heads, tgt_len, src_len) attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) if output_attentions: # make sure that attn_weights are included in graph attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len) else: attn_weights_reshaped = None attn_probs = nn.functional.dropout( attn_weights, p=self.dropout, training=self.training, ) assert v is not None attn_output = torch.bmm(attn_probs, v) assert attn_output.size() == (bsz * self.num_heads, tgt_len, self.head_dim) attn_output = attn_output.transpose(0, 1).contiguous().view(tgt_len, bsz, embed_dim) attn_output = self.out_proj(attn_output) return attn_output, attn_weights_reshaped def _use_saved_state(self, k, v, saved_state, key_padding_mask, static_kv, bsz): # saved states are stored with shape (bsz, num_heads, seq_len, head_dim) if "prev_key" in saved_state: _prev_key = saved_state["prev_key"] assert _prev_key is not None prev_key = _prev_key.view(bsz * self.num_heads, -1, self.head_dim) if static_kv: k = prev_key else: assert k is not None k = torch.cat([prev_key, k], dim=1) if "prev_value" in saved_state: _prev_value = saved_state["prev_value"] assert _prev_value is not None prev_value = _prev_value.view(bsz * self.num_heads, -1, self.head_dim) if static_kv: v = prev_value else: assert v is not None v = torch.cat([prev_value, v], dim=1) assert k is not None and v is not None prev_key_padding_mask: Optional[Tensor] = saved_state.get("prev_key_padding_mask", None) if prev_key_padding_mask is not None: if static_kv: new_key_padding_mask = prev_key_padding_mask else: new_key_padding_mask = torch.cat([prev_key_padding_mask, key_padding_mask], dim=1) else: new_key_padding_mask = key_padding_mask return k, v, new_key_padding_mask def fill_with_neg_inf(t): """FP16-compatible function that fills a input_ids with -inf.""" return t.float().fill_(torch.finfo(t.dtype).min).type_as(t) # Public API def _get_shape(t): return getattr(t, "shape", None) @add_start_docstrings( "The bare FSMT Model outputting raw hidden-states without any specific head on top.", FSMT_START_DOCSTRING, ) class FSMTModel(PretrainedFSMTModel): _tied_weights_keys = ["decoder.embed_tokens.weight", "decoder.output_projection.weight"] def __init__(self, config: FSMTConfig): super().__init__(config) padding_idx = config.pad_token_id encoder_embed_tokens = nn.Embedding(config.src_vocab_size, config.d_model, padding_idx) decoder_embed_tokens = nn.Embedding(config.tgt_vocab_size, config.d_model, padding_idx) self.encoder = FSMTEncoder(config, encoder_embed_tokens) self.decoder = FSMTDecoder(config, decoder_embed_tokens) # Initialize weights and apply final processing self.post_init() def get_encoder(self): return self.encoder def get_decoder(self): return self.decoder def _tie_weights(self): if self.config.tie_word_embeddings: self._tie_or_clone_weights(self.decoder.embed_tokens, self.get_input_embeddings()) self._tie_or_clone_weights(self.decoder.output_projection, self.get_input_embeddings()) @add_start_docstrings_to_model_forward(FSMT_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=Seq2SeqModelOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids: torch.LongTensor, attention_mask: Optional[torch.Tensor] = None, decoder_input_ids: Optional[torch.LongTensor] = None, decoder_attention_mask: Optional[torch.BoolTensor] = None, head_mask: Optional[torch.Tensor] = None, decoder_head_mask: Optional[torch.Tensor] = None, cross_attn_head_mask: Optional[torch.Tensor] = None, encoder_outputs: Optional[Tuple[torch.FloatTensor]] = None, past_key_values: Optional[Tuple[torch.FloatTensor]] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, inputs_embeds: Optional[torch.FloatTensor] = None, decoder_inputs_embeds: Optional[torch.FloatTensor] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple[torch.Tensor], Seq2SeqModelOutput]: if decoder_input_ids is None: use_cache = False output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict # make masks if user doesn't supply if not use_cache and input_ids is not None: decoder_input_ids, decoder_padding_mask, causal_mask = _prepare_fsmt_decoder_inputs( self.config, input_ids, decoder_input_ids=decoder_input_ids, decoder_padding_mask=decoder_attention_mask, causal_mask_dtype=self.decoder.embed_tokens.weight.dtype, ) else: decoder_padding_mask, causal_mask = None, None if decoder_input_ids is None and decoder_inputs_embeds is None: raise ValueError("Make sure that `decoder_input_ids` or `decoder_inputs_embeds` are passed.") if encoder_outputs is None: encoder_outputs = self.encoder( input_ids=input_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) # If the user passed a tuple for encoder_outputs, we wrap it in a BaseModelOutput when return_dict=False elif return_dict and not isinstance(encoder_outputs, BaseModelOutput): encoder_outputs = BaseModelOutput( last_hidden_state=encoder_outputs[0], hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None, attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None, ) # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn) decoder_outputs = self.decoder( decoder_input_ids, encoder_outputs[0], attention_mask, decoder_padding_mask, decoder_causal_mask=causal_mask, inputs_embeds=decoder_inputs_embeds, head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) if not return_dict: return decoder_outputs + encoder_outputs return Seq2SeqModelOutput( last_hidden_state=decoder_outputs.last_hidden_state, past_key_values=decoder_outputs.past_key_values, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions, ) def get_input_embeddings(self): return self.encoder.embed_tokens def set_input_embeddings(self, value): self.encoder.embed_tokens = value def get_output_embeddings(self): return self.decoder.embed_tokens def set_output_embeddings(self, value): self.decoder.embed_tokens = value @add_start_docstrings( "The FSMT Model with a language modeling head. Can be used for summarization.", FSMT_START_DOCSTRING ) class FSMTForConditionalGeneration(PretrainedFSMTModel): base_model_prefix = "model" _tied_weights_keys = ["decoder.embed_tokens.weight", "decoder.output_projection.weight"] def __init__(self, config: FSMTConfig): super().__init__(config) base_model = FSMTModel(config) self.model = base_model # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(FSMT_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=Seq2SeqLMOutput, config_class=_CONFIG_FOR_DOC) @add_end_docstrings(FSMT_GENERATION_EXAMPLE) def forward( self, input_ids: torch.LongTensor, attention_mask: Optional[torch.Tensor] = None, decoder_input_ids: Optional[torch.LongTensor] = None, decoder_attention_mask: Optional[torch.BoolTensor] = None, head_mask: Optional[torch.Tensor] = None, decoder_head_mask: Optional[torch.Tensor] = None, cross_attn_head_mask: Optional[torch.Tensor] = None, encoder_outputs: Optional[Tuple[torch.FloatTensor]] = None, past_key_values: Optional[Tuple[torch.FloatTensor]] = None, inputs_embeds: Optional[torch.Tensor] = None, decoder_inputs_embeds: Optional[torch.Tensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple[torch.Tensor], Seq2SeqLMOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. Returns: """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict if labels is not None: use_cache = False outputs = self.model( input_ids, inputs_embeds=inputs_embeds, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, decoder_inputs_embeds=decoder_inputs_embeds, encoder_outputs=encoder_outputs, decoder_attention_mask=decoder_attention_mask, head_mask=head_mask, decoder_head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) lm_logits = outputs[0] masked_lm_loss = None if labels is not None: loss_fct = CrossEntropyLoss() # TODO(SS): do we need to ignore pad tokens in labels? masked_lm_loss = loss_fct(lm_logits.view(-1, self.config.tgt_vocab_size), labels.view(-1)) if not return_dict: output = (lm_logits,) + outputs[1:] return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output return Seq2SeqLMOutput( loss=masked_lm_loss, logits=lm_logits, past_key_values=outputs.past_key_values, decoder_hidden_states=outputs.decoder_hidden_states, decoder_attentions=outputs.decoder_attentions, cross_attentions=outputs.cross_attentions, encoder_last_hidden_state=outputs.encoder_last_hidden_state, encoder_hidden_states=outputs.encoder_hidden_states, encoder_attentions=outputs.encoder_attentions, ) def prepare_inputs_for_generation( self, decoder_input_ids, past_key_values=None, attention_mask=None, head_mask=None, decoder_head_mask=None, cross_attn_head_mask=None, use_cache=None, encoder_outputs=None, **kwargs, ): return { "input_ids": None, # encoder_outputs is defined. input_ids not needed "encoder_outputs": encoder_outputs, "past_key_values": past_key_values, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, "use_cache": use_cache, # change this to avoid caching (presumably for debugging) } def prepare_decoder_input_ids_from_labels(self, labels: torch.Tensor): return shift_tokens_right(labels, self.config.pad_token_id) @staticmethod def _reorder_cache(past_key_values, beam_idx): reordered_past = [] for layer_past in past_key_values: # get the correct batch idx from decoder layer's batch dim for cross and self-attn layer_past_new = { attn_key: _reorder_buffer(attn_cache, beam_idx) for attn_key, attn_cache in layer_past.items() } reordered_past.append(layer_past_new) return reordered_past def get_encoder(self): return self.model.encoder def get_decoder(self): return self.model.decoder def get_output_embeddings(self): return self.model.decoder.embed_tokens def set_output_embeddings(self, value): self.model.decoder.embed_tokens = value class SinusoidalPositionalEmbedding(nn.Embedding): """ This module produces sinusoidal positional embeddings of any length. We don't want to save the weight of this embedding since it's not trained (deterministic) and it can be huge. Padding symbols are ignored. These embeddings get automatically extended in forward if more positions is needed. """ def __init__(self, num_positions, embedding_dim, padding_idx): self.make_weight(num_positions, embedding_dim, padding_idx) def make_weight(self, num_positions, embedding_dim, padding_idx): weight = self.get_embedding(num_positions, embedding_dim, padding_idx) if not hasattr(self, "weight"): # in ___init__ super().__init__(num_positions, embedding_dim, padding_idx, _weight=weight) else: # in forward put the weights on the correct dtype and device of the param weight = weight.to(dtype=self.weight.dtype, device=self.weight.device) self.weight = nn.Parameter(weight) self.weight.detach_() self.weight.requires_grad = False @staticmethod def get_embedding(num_embeddings, embedding_dim, padding_idx): """ Build sinusoidal embeddings. This matches the implementation in tensor2tensor, but differs slightly from the description in Section 3.5 of "Attention Is All You Need". """ half_dim = embedding_dim // 2 emb = math.log(10000) / (half_dim - 1) emb = torch.exp(torch.arange(half_dim, dtype=torch.float) * -emb) emb = torch.arange(num_embeddings, dtype=torch.float).unsqueeze(1) * emb.unsqueeze(0) emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1).view(num_embeddings, -1) if embedding_dim % 2 == 1: # zero pad emb = torch.cat([emb, torch.zeros(num_embeddings, 1)], dim=1) if padding_idx is not None: emb[padding_idx, :] = 0 return emb @staticmethod def make_positions(tensor, padding_idx: int): """ Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding symbols are ignored. """ # The series of casts and type-conversions here are carefully # balanced to both work with ONNX export and XLA. In particular XLA # prefers ints, cumsum defaults to output longs, and ONNX doesn't know # how to handle the dtype kwarg in cumsum. mask = tensor.ne(padding_idx).int() return (torch.cumsum(mask, dim=1).type_as(mask) * mask).long() + padding_idx def forward( self, input, incremental_state: Optional[Any] = None, timestep: Optional[Tensor] = None, ): """Input is expected to be of size [bsz x seqlen].""" bsz, seq_len = input.shape[:2] max_pos = self.padding_idx + 1 + seq_len if max_pos > self.weight.size(0): # expand embeddings if needed self.make_weight(max_pos, self.embedding_dim, self.padding_idx) positions = self.make_positions(input, self.padding_idx) return super().forward(positions)
0
hf_public_repos/transformers/src/transformers/models
hf_public_repos/transformers/src/transformers/models/fsmt/tokenization_fsmt.py
# coding=utf-8 # Copyright 2019 The Open AI Team Authors and The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tokenization classes for FSMT.""" import json import os import re import unicodedata from typing import Dict, List, Optional, Tuple from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging logger = logging.get_logger(__name__) VOCAB_FILES_NAMES = { "src_vocab_file": "vocab-src.json", "tgt_vocab_file": "vocab-tgt.json", "merges_file": "merges.txt", } PRETRAINED_VOCAB_FILES_MAP = { "src_vocab_file": { "stas/tiny-wmt19-en-de": "https://huggingface.co/stas/tiny-wmt19-en-de/resolve/main/vocab-src.json" }, "tgt_vocab_file": { "stas/tiny-wmt19-en-de": "https://huggingface.co/stas/tiny-wmt19-en-de/resolve/main/vocab-tgt.json" }, "merges_file": {"stas/tiny-wmt19-en-de": "https://huggingface.co/stas/tiny-wmt19-en-de/resolve/main/merges.txt"}, } PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {"stas/tiny-wmt19-en-de": 1024} PRETRAINED_INIT_CONFIGURATION = { "stas/tiny-wmt19-en-de": { "langs": ["en", "de"], "model_max_length": 1024, "special_tokens_map_file": None, "full_tokenizer_file": None, } } def get_pairs(word): """ Return set of symbol pairs in a word. word is represented as tuple of symbols (symbols being variable-length strings) """ pairs = set() prev_char = word[0] for char in word[1:]: pairs.add((prev_char, char)) prev_char = char return pairs def replace_unicode_punct(text): """ Port of https://github.com/moses-smt/mosesdecoder/blob/master/scripts/tokenizer/replace-unicode-punctuation.perl """ text = text.replace(",", ",") text = re.sub(r"。\s*", ". ", text) text = text.replace("、", ",") text = text.replace("”", '"') text = text.replace("“", '"') text = text.replace("∶", ":") text = text.replace(":", ":") text = text.replace("?", "?") text = text.replace("《", '"') text = text.replace("》", '"') text = text.replace(")", ")") text = text.replace("!", "!") text = text.replace("(", "(") text = text.replace(";", ";") text = text.replace("1", "1") text = text.replace("」", '"') text = text.replace("「", '"') text = text.replace("0", "0") text = text.replace("3", "3") text = text.replace("2", "2") text = text.replace("5", "5") text = text.replace("6", "6") text = text.replace("9", "9") text = text.replace("7", "7") text = text.replace("8", "8") text = text.replace("4", "4") text = re.sub(r".\s*", ". ", text) text = text.replace("~", "~") text = text.replace("’", "'") text = text.replace("…", "...") text = text.replace("━", "-") text = text.replace("〈", "<") text = text.replace("〉", ">") text = text.replace("【", "[") text = text.replace("】", "]") text = text.replace("%", "%") return text def remove_non_printing_char(text): """ Port of https://github.com/moses-smt/mosesdecoder/blob/master/scripts/tokenizer/remove-non-printing-char.perl """ output = [] for char in text: cat = unicodedata.category(char) if cat.startswith("C"): continue output.append(char) return "".join(output) # Porting notes: # this one is modeled after XLMTokenizer # # added: # - src_vocab_file, # - tgt_vocab_file, # - langs, class FSMTTokenizer(PreTrainedTokenizer): """ Construct an FAIRSEQ Transformer tokenizer. Based on Byte-Pair Encoding. The tokenization process is the following: - Moses preprocessing and tokenization. - Normalizing all inputs text. - The arguments `special_tokens` and the function `set_special_tokens`, can be used to add additional symbols (like "__classify__") to a vocabulary. - The argument `langs` defines a pair of languages. This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. Args: langs (`List[str]`, *optional*): A list of two languages to translate from and to, for instance `["en", "ru"]`. src_vocab_file (`str`, *optional*): File containing the vocabulary for the source language. tgt_vocab_file (`st`, *optional*): File containing the vocabulary for the target language. merges_file (`str`, *optional*): File containing the merges. do_lower_case (`bool`, *optional*, defaults to `False`): Whether or not to lowercase the input when tokenizing. unk_token (`str`, *optional*, defaults to `"<unk>"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. bos_token (`str`, *optional*, defaults to `"<s>"`): The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token. <Tip> When building a sequence using special tokens, this is not the token that is used for the beginning of sequence. The token used is the `cls_token`. </Tip> sep_token (`str`, *optional*, defaults to `"</s>"`): The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for sequence classification or for a text and a question for question answering. It is also used as the last token of a sequence built with special tokens. pad_token (`str`, *optional*, defaults to `"<pad>"`): The token used for padding, for example when batching sequences of different lengths. """ vocab_files_names = VOCAB_FILES_NAMES pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES model_input_names = ["input_ids", "attention_mask"] def __init__( self, langs=None, src_vocab_file=None, tgt_vocab_file=None, merges_file=None, do_lower_case=False, unk_token="<unk>", bos_token="<s>", sep_token="</s>", pad_token="<pad>", **kwargs, ): try: import sacremoses except ImportError: raise ImportError( "You need to install sacremoses to use XLMTokenizer. " "See https://pypi.org/project/sacremoses/ for installation." ) self.sm = sacremoses self.src_vocab_file = src_vocab_file self.tgt_vocab_file = tgt_vocab_file self.merges_file = merges_file self.do_lower_case = do_lower_case # cache of sm.MosesPunctNormalizer instance self.cache_moses_punct_normalizer = {} # cache of sm.MosesTokenizer instance self.cache_moses_tokenizer = {} self.cache_moses_detokenizer = {} if langs and len(langs) == 2: self.src_lang, self.tgt_lang = langs else: raise ValueError( f"arg `langs` needs to be a list of 2 langs, e.g. ['en', 'ru'], but got {langs}. " "Usually that means that tokenizer can't find a mapping for the given model path " "in PRETRAINED_VOCAB_FILES_MAP, and other maps of this tokenizer." ) with open(src_vocab_file, encoding="utf-8") as src_vocab_handle: self.encoder = json.load(src_vocab_handle) with open(tgt_vocab_file, encoding="utf-8") as tgt_vocab_handle: tgt_vocab = json.load(tgt_vocab_handle) self.decoder = {v: k for k, v in tgt_vocab.items()} with open(merges_file, encoding="utf-8") as merges_handle: merges = merges_handle.read().split("\n")[:-1] merges = [tuple(merge.split()[:2]) for merge in merges] self.bpe_ranks = dict(zip(merges, range(len(merges)))) self.cache = {} super().__init__( langs=langs, src_vocab_file=src_vocab_file, tgt_vocab_file=tgt_vocab_file, merges_file=merges_file, do_lower_case=do_lower_case, unk_token=unk_token, bos_token=bos_token, sep_token=sep_token, pad_token=pad_token, **kwargs, ) # hack override def get_vocab(self) -> Dict[str, int]: return self.get_src_vocab() # hack override @property def vocab_size(self) -> int: return self.src_vocab_size def moses_punct_norm(self, text, lang): if lang not in self.cache_moses_punct_normalizer: punct_normalizer = self.sm.MosesPunctNormalizer(lang=lang) self.cache_moses_punct_normalizer[lang] = punct_normalizer return self.cache_moses_punct_normalizer[lang].normalize(text) def moses_tokenize(self, text, lang): if lang not in self.cache_moses_tokenizer: moses_tokenizer = self.sm.MosesTokenizer(lang=lang) self.cache_moses_tokenizer[lang] = moses_tokenizer return self.cache_moses_tokenizer[lang].tokenize( text, aggressive_dash_splits=True, return_str=False, escape=True ) def moses_detokenize(self, tokens, lang): if lang not in self.cache_moses_detokenizer: moses_detokenizer = self.sm.MosesDetokenizer(lang=lang) self.cache_moses_detokenizer[lang] = moses_detokenizer return self.cache_moses_detokenizer[lang].detokenize(tokens) def moses_pipeline(self, text, lang): text = replace_unicode_punct(text) text = self.moses_punct_norm(text, lang) text = remove_non_printing_char(text) return text @property def src_vocab_size(self): return len(self.encoder) @property def tgt_vocab_size(self): return len(self.decoder) def get_src_vocab(self): return dict(self.encoder, **self.added_tokens_encoder) def get_tgt_vocab(self): return dict(self.decoder, **self.added_tokens_decoder) def bpe(self, token): word = tuple(token[:-1]) + (token[-1] + "</w>",) if token in self.cache: return self.cache[token] pairs = get_pairs(word) if not pairs: return token + "</w>" while True: bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float("inf"))) if bigram not in self.bpe_ranks: break first, second = bigram new_word = [] i = 0 while i < len(word): try: j = word.index(first, i) except ValueError: new_word.extend(word[i:]) break else: new_word.extend(word[i:j]) i = j if word[i] == first and i < len(word) - 1 and word[i + 1] == second: new_word.append(first + second) i += 2 else: new_word.append(word[i]) i += 1 new_word = tuple(new_word) word = new_word if len(word) == 1: break else: pairs = get_pairs(word) word = " ".join(word) if word == "\n </w>": word = "\n</w>" self.cache[token] = word return word def _tokenize(self, text, lang="en", bypass_tokenizer=False): """ Tokenize a string given language code using Moses. Details of tokenization: - [sacremoses](https://github.com/alvations/sacremoses): port of Moses - Install with `pip install sacremoses` Args: - lang: ISO language code (default = 'en') (string). Languages should belong of the model supported languages. However, we don't enforce it. - bypass_tokenizer: Allow users to preprocess and tokenize the sentences externally (default = False) (bool). If True, we only apply BPE. Returns: List of tokens. """ # ignore `lang` which is currently isn't explicitly passed in tokenization_utils.py and always results in lang=en # if lang != self.src_lang: # raise ValueError(f"Expected lang={self.src_lang}, but got {lang}") lang = self.src_lang if self.do_lower_case: text = text.lower() if bypass_tokenizer: text = text.split() else: text = self.moses_pipeline(text, lang=lang) text = self.moses_tokenize(text, lang=lang) split_tokens = [] for token in text: if token: split_tokens.extend(list(self.bpe(token).split(" "))) return split_tokens def _convert_token_to_id(self, token): """Converts a token (str) in an id using the vocab.""" return self.encoder.get(token, self.encoder.get(self.unk_token)) def _convert_id_to_token(self, index): """Converts an index (integer) in a token (str) using the vocab.""" return self.decoder.get(index, self.unk_token) def convert_tokens_to_string(self, tokens): """Converts a sequence of tokens (string) in a single string.""" # remove BPE tokens = [t.replace(" ", "").replace("</w>", " ") for t in tokens] tokens = "".join(tokens).split() # detokenize text = self.moses_detokenize(tokens, self.tgt_lang) return text def build_inputs_with_special_tokens( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: """ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A FAIRSEQ Transformer sequence has the following format: - single sequence: `<s> X </s>` - pair of sequences: `<s> A </s> B </s>` Args: token_ids_0 (`List[int]`): List of IDs to which the special tokens will be added. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens. """ sep = [self.sep_token_id] # no bos used in fairseq if token_ids_1 is None: return token_ids_0 + sep return token_ids_0 + sep + token_ids_1 + sep def get_special_tokens_mask( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False ) -> List[int]: """ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `prepare_for_model` method. Args: token_ids_0 (`List[int]`): List of IDs. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. already_has_special_tokens (`bool`, *optional*, defaults to `False`): Whether or not the token list is already formatted with special tokens for the model. Returns: `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. """ if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True ) # no bos used in fairseq if token_ids_1 is not None: return ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1] return ([0] * len(token_ids_0)) + [1] def create_token_type_ids_from_sequences( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: """ Create a mask from the two sequences passed to be used in a sequence-pair classification task. A FAIRSEQ Transformer sequence pair mask has the following format: ``` 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 | first sequence | second sequence | ``` If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s). Args: token_ids_0 (`List[int]`): List of IDs. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s). Creates a mask from the two sequences passed to be used in a sequence-pair classification task. An FAIRSEQ_TRANSFORMER sequence pair mask has the following format: """ sep = [self.sep_token_id] # no bos used in fairseq if token_ids_1 is None: return len(token_ids_0 + sep) * [0] return len(token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1] def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: if not os.path.isdir(save_directory): logger.error(f"Vocabulary path ({save_directory}) should be a directory") return src_vocab_file = os.path.join( save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["src_vocab_file"] ) tgt_vocab_file = os.path.join( save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["tgt_vocab_file"] ) merges_file = os.path.join( save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] ) with open(src_vocab_file, "w", encoding="utf-8") as f: f.write(json.dumps(self.encoder, indent=2, sort_keys=True, ensure_ascii=False) + "\n") with open(tgt_vocab_file, "w", encoding="utf-8") as f: tgt_vocab = {v: k for k, v in self.decoder.items()} f.write(json.dumps(tgt_vocab, indent=2, sort_keys=True, ensure_ascii=False) + "\n") index = 0 with open(merges_file, "w", encoding="utf-8") as writer: for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda kv: kv[1]): if index != token_index: logger.warning( f"Saving vocabulary to {merges_file}: BPE merge indices are not consecutive." " Please check that the tokenizer is not corrupted!" ) index = token_index writer.write(" ".join(bpe_tokens) + "\n") index += 1 return src_vocab_file, tgt_vocab_file, merges_file def __getstate__(self): state = self.__dict__.copy() state["sm"] = None return state def __setstate__(self, d): self.__dict__ = d try: import sacremoses except ImportError: raise ImportError( "You need to install sacremoses to use XLMTokenizer. " "See https://pypi.org/project/sacremoses/ for installation." ) self.sm = sacremoses
0
hf_public_repos/transformers/src/transformers/models
hf_public_repos/transformers/src/transformers/models/fsmt/configuration_fsmt.py
# coding=utf-8 # Copyright 2019-present, Facebook, Inc and the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ FSMT configuration""" from ...configuration_utils import PretrainedConfig from ...utils import logging logger = logging.get_logger(__name__) FSMT_PRETRAINED_CONFIG_ARCHIVE_MAP = {} class DecoderConfig(PretrainedConfig): r""" Configuration class for FSMT's decoder specific things. note: this is a private helper class """ model_type = "fsmt_decoder" def __init__(self, vocab_size=0, bos_token_id=0): super().__init__() self.vocab_size = vocab_size self.bos_token_id = bos_token_id class FSMTConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`FSMTModel`]. It is used to instantiate a FSMT model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the FSMT [facebook/wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: langs (`List[str]`): A list with source language and target_language (e.g., ['en', 'ru']). src_vocab_size (`int`): Vocabulary size of the encoder. Defines the number of different tokens that can be represented by the `inputs_ids` passed to the forward method in the encoder. tgt_vocab_size (`int`): Vocabulary size of the decoder. Defines the number of different tokens that can be represented by the `inputs_ids` passed to the forward method in the decoder. d_model (`int`, *optional*, defaults to 1024): Dimensionality of the layers and the pooler layer. encoder_layers (`int`, *optional*, defaults to 12): Number of encoder layers. decoder_layers (`int`, *optional*, defaults to 12): Number of decoder layers. encoder_attention_heads (`int`, *optional*, defaults to 16): Number of attention heads for each attention layer in the Transformer encoder. decoder_attention_heads (`int`, *optional*, defaults to 16): Number of attention heads for each attention layer in the Transformer decoder. decoder_ffn_dim (`int`, *optional*, defaults to 4096): Dimensionality of the "intermediate" (often named feed-forward) layer in decoder. encoder_ffn_dim (`int`, *optional*, defaults to 4096): Dimensionality of the "intermediate" (often named feed-forward) layer in decoder. activation_function (`str` or `Callable`, *optional*, defaults to `"relu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"silu"` and `"gelu_new"` are supported. dropout (`float`, *optional*, defaults to 0.1): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. attention_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. activation_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for activations inside the fully connected layer. max_position_embeddings (`int`, *optional*, defaults to 1024): The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). init_std (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. scale_embedding (`bool`, *optional*, defaults to `True`): Scale embeddings by diving by sqrt(d_model). bos_token_id (`int`, *optional*, defaults to 0) Beginning of stream token id. pad_token_id (`int`, *optional*, defaults to 1) Padding token id. eos_token_id (`int`, *optional*, defaults to 2) End of stream token id. decoder_start_token_id (`int`, *optional*): This model starts decoding with `eos_token_id` encoder_layerdrop (`float`, *optional*, defaults to 0.0): Google "layerdrop arxiv", as its not explainable in one line. decoder_layerdrop (`float`, *optional*, defaults to 0.0): Google "layerdrop arxiv", as its not explainable in one line. is_encoder_decoder (`bool`, *optional*, defaults to `True`): Whether this is an encoder/decoder model. tie_word_embeddings (`bool`, *optional*, defaults to `False`): Whether to tie input and output embeddings. num_beams (`int`, *optional*, defaults to 5) Number of beams for beam search that will be used by default in the `generate` method of the model. 1 means no beam search. length_penalty (`float`, *optional*, defaults to 1) Exponential penalty to the length that is used with beam-based generation. It is applied as an exponent to the sequence length, which in turn is used to divide the score of the sequence. Since the score is the log likelihood of the sequence (i.e. negative), `length_penalty` > 0.0 promotes longer sequences, while `length_penalty` < 0.0 encourages shorter sequences. early_stopping (`bool`, *optional*, defaults to `False`) Flag that will be used by default in the `generate` method of the model. Whether to stop the beam search when at least `num_beams` sentences are finished per batch or not. use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). forced_eos_token_id (`int`, *optional*, defaults to 2): The id of the token to force as the last generated token when `max_length` is reached. Usually set to `eos_token_id`. Examples: ```python >>> from transformers import FSMTConfig, FSMTModel >>> # Initializing a FSMT facebook/wmt19-en-ru style configuration >>> config = FSMTConfig() >>> # Initializing a model (with random weights) from the configuration >>> model = FSMTModel(config) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "fsmt" attribute_map = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"} # update the defaults from config file def __init__( self, langs=["en", "de"], src_vocab_size=42024, tgt_vocab_size=42024, activation_function="relu", d_model=1024, max_length=200, max_position_embeddings=1024, encoder_ffn_dim=4096, encoder_layers=12, encoder_attention_heads=16, encoder_layerdrop=0.0, decoder_ffn_dim=4096, decoder_layers=12, decoder_attention_heads=16, decoder_layerdrop=0.0, attention_dropout=0.0, dropout=0.1, activation_dropout=0.0, init_std=0.02, decoder_start_token_id=2, is_encoder_decoder=True, scale_embedding=True, tie_word_embeddings=False, num_beams=5, length_penalty=1.0, early_stopping=False, use_cache=True, pad_token_id=1, bos_token_id=0, eos_token_id=2, forced_eos_token_id=2, **common_kwargs, ): self.langs = langs self.src_vocab_size = src_vocab_size self.tgt_vocab_size = tgt_vocab_size self.d_model = d_model # encoder_embed_dim and decoder_embed_dim self.encoder_ffn_dim = encoder_ffn_dim self.encoder_layers = self.num_hidden_layers = encoder_layers self.encoder_attention_heads = encoder_attention_heads self.encoder_layerdrop = encoder_layerdrop self.decoder_layerdrop = decoder_layerdrop self.decoder_ffn_dim = decoder_ffn_dim self.decoder_layers = decoder_layers self.decoder_attention_heads = decoder_attention_heads self.max_position_embeddings = max_position_embeddings self.init_std = init_std # Normal(0, this parameter) self.activation_function = activation_function self.decoder = DecoderConfig(vocab_size=tgt_vocab_size, bos_token_id=eos_token_id) if "decoder" in common_kwargs: del common_kwargs["decoder"] self.scale_embedding = scale_embedding # scale factor will be sqrt(d_model) if True # 3 Types of Dropout self.attention_dropout = attention_dropout self.activation_dropout = activation_dropout self.dropout = dropout self.use_cache = use_cache super().__init__( pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, decoder_start_token_id=decoder_start_token_id, is_encoder_decoder=is_encoder_decoder, tie_word_embeddings=tie_word_embeddings, forced_eos_token_id=forced_eos_token_id, max_length=max_length, num_beams=num_beams, length_penalty=length_penalty, early_stopping=early_stopping, **common_kwargs, )
0
hf_public_repos/transformers/src/transformers/models
hf_public_repos/transformers/src/transformers/models/fsmt/convert_fsmt_original_pytorch_checkpoint_to_pytorch.py
# coding=utf-8 # Copyright 2018 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Note: if you intend to run this script make sure you look under scripts/fsmt/ # to locate the appropriate script to do the work correctly. There is a set of scripts to: # - download and prepare data and run the conversion script # - perform eval to get the best hparam into the config # - generate model_cards - useful if you have multiple models from the same paper import argparse import json import os import re from collections import OrderedDict from os.path import basename, dirname import fairseq import torch from fairseq import hub_utils from fairseq.data.dictionary import Dictionary from transformers import FSMTConfig, FSMTForConditionalGeneration from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE from transformers.utils import WEIGHTS_NAME, logging logging.set_verbosity_warning() json_indent = 2 # based on the results of a search on a range of `num_beams`, `length_penalty` and `early_stopping` # values against wmt19 test data to obtain the best BLEU scores, we will use the following defaults: # # * `num_beams`: 5 (higher scores better, but requires more memory/is slower, can be adjusted by users) # * `early_stopping`: `False` consistently scored better # * `length_penalty` varied, so will assign the best one depending on the model best_score_hparams = { # fairseq: "wmt19-ru-en": {"length_penalty": 1.1}, "wmt19-en-ru": {"length_penalty": 1.15}, "wmt19-en-de": {"length_penalty": 1.0}, "wmt19-de-en": {"length_penalty": 1.1}, # allenai: "wmt16-en-de-dist-12-1": {"length_penalty": 0.6}, "wmt16-en-de-dist-6-1": {"length_penalty": 0.6}, "wmt16-en-de-12-1": {"length_penalty": 0.8}, "wmt19-de-en-6-6-base": {"length_penalty": 0.6}, "wmt19-de-en-6-6-big": {"length_penalty": 0.6}, } # this remaps the different models to their organization names org_names = {} for m in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]: org_names[m] = "facebook" for m in [ "wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1", "wmt19-de-en-6-6-base", "wmt19-de-en-6-6-big", ]: org_names[m] = "allenai" def rewrite_dict_keys(d): # (1) remove word breaking symbol, (2) add word ending symbol where the word is not broken up, # e.g.: d = {'le@@': 5, 'tt@@': 6, 'er': 7} => {'le': 5, 'tt': 6, 'er</w>': 7} d2 = dict((re.sub(r"@@$", "", k), v) if k.endswith("@@") else (re.sub(r"$", "</w>", k), v) for k, v in d.items()) keep_keys = "<s> <pad> </s> <unk>".split() # restore the special tokens for k in keep_keys: del d2[f"{k}</w>"] d2[k] = d[k] # restore return d2 def convert_fsmt_checkpoint_to_pytorch(fsmt_checkpoint_path, pytorch_dump_folder_path): # prep assert os.path.exists(fsmt_checkpoint_path) os.makedirs(pytorch_dump_folder_path, exist_ok=True) print(f"Writing results to {pytorch_dump_folder_path}") # handle various types of models checkpoint_file = basename(fsmt_checkpoint_path) fsmt_folder_path = dirname(fsmt_checkpoint_path) cls = fairseq.model_parallel.models.transformer.ModelParallelTransformerModel models = cls.hub_models() kwargs = {"bpe": "fastbpe", "tokenizer": "moses"} data_name_or_path = "." # note: since the model dump is old, fairseq has upgraded its model some # time later, and it does a whole lot of rewrites and splits on the saved # weights, therefore we can't use torch.load() directly on the model file. # see: upgrade_state_dict(state_dict) in fairseq_model.py print(f"using checkpoint {checkpoint_file}") chkpt = hub_utils.from_pretrained( fsmt_folder_path, checkpoint_file, data_name_or_path, archive_map=models, **kwargs ) args = vars(chkpt["args"]["model"]) src_lang = args["source_lang"] tgt_lang = args["target_lang"] data_root = dirname(pytorch_dump_folder_path) model_dir = basename(pytorch_dump_folder_path) # dicts src_dict_file = os.path.join(fsmt_folder_path, f"dict.{src_lang}.txt") tgt_dict_file = os.path.join(fsmt_folder_path, f"dict.{tgt_lang}.txt") src_dict = Dictionary.load(src_dict_file) src_vocab = rewrite_dict_keys(src_dict.indices) src_vocab_size = len(src_vocab) src_vocab_file = os.path.join(pytorch_dump_folder_path, "vocab-src.json") print(f"Generating {src_vocab_file} of {src_vocab_size} of {src_lang} records") with open(src_vocab_file, "w", encoding="utf-8") as f: f.write(json.dumps(src_vocab, ensure_ascii=False, indent=json_indent)) # detect whether this is a do_lower_case situation, which can be derived by checking whether we # have at least one uppercase letter in the source vocab do_lower_case = True for k in src_vocab.keys(): if not k.islower(): do_lower_case = False break tgt_dict = Dictionary.load(tgt_dict_file) tgt_vocab = rewrite_dict_keys(tgt_dict.indices) tgt_vocab_size = len(tgt_vocab) tgt_vocab_file = os.path.join(pytorch_dump_folder_path, "vocab-tgt.json") print(f"Generating {tgt_vocab_file} of {tgt_vocab_size} of {tgt_lang} records") with open(tgt_vocab_file, "w", encoding="utf-8") as f: f.write(json.dumps(tgt_vocab, ensure_ascii=False, indent=json_indent)) # merges_file (bpecodes) merges_file = os.path.join(pytorch_dump_folder_path, VOCAB_FILES_NAMES["merges_file"]) for fn in ["bpecodes", "code"]: # older fairseq called the merges file "code" fsmt_merges_file = os.path.join(fsmt_folder_path, fn) if os.path.exists(fsmt_merges_file): break with open(fsmt_merges_file, encoding="utf-8") as fin: merges = fin.read() merges = re.sub(r" \d+$", "", merges, 0, re.M) # remove frequency number print(f"Generating {merges_file}") with open(merges_file, "w", encoding="utf-8") as fout: fout.write(merges) # model config fsmt_model_config_file = os.path.join(pytorch_dump_folder_path, "config.json") # validate bpe/tokenizer config, as currently it's hardcoded to moses+fastbpe - # may have to modify the tokenizer if a different type is used by a future model assert args["bpe"] == "fastbpe", f"need to extend tokenizer to support bpe={args['bpe']}" assert args["tokenizer"] == "moses", f"need to extend tokenizer to support bpe={args['tokenizer']}" model_conf = { "architectures": ["FSMTForConditionalGeneration"], "model_type": "fsmt", "activation_dropout": args["activation_dropout"], "activation_function": "relu", "attention_dropout": args["attention_dropout"], "d_model": args["decoder_embed_dim"], "dropout": args["dropout"], "init_std": 0.02, "max_position_embeddings": args["max_source_positions"], "num_hidden_layers": args["encoder_layers"], "src_vocab_size": src_vocab_size, "tgt_vocab_size": tgt_vocab_size, "langs": [src_lang, tgt_lang], "encoder_attention_heads": args["encoder_attention_heads"], "encoder_ffn_dim": args["encoder_ffn_embed_dim"], "encoder_layerdrop": args["encoder_layerdrop"], "encoder_layers": args["encoder_layers"], "decoder_attention_heads": args["decoder_attention_heads"], "decoder_ffn_dim": args["decoder_ffn_embed_dim"], "decoder_layerdrop": args["decoder_layerdrop"], "decoder_layers": args["decoder_layers"], "bos_token_id": 0, "pad_token_id": 1, "eos_token_id": 2, "is_encoder_decoder": True, "scale_embedding": not args["no_scale_embedding"], "tie_word_embeddings": args["share_all_embeddings"], } # good hparam defaults to start with model_conf["num_beams"] = 5 model_conf["early_stopping"] = False if model_dir in best_score_hparams and "length_penalty" in best_score_hparams[model_dir]: model_conf["length_penalty"] = best_score_hparams[model_dir]["length_penalty"] else: model_conf["length_penalty"] = 1.0 print(f"Generating {fsmt_model_config_file}") with open(fsmt_model_config_file, "w", encoding="utf-8") as f: f.write(json.dumps(model_conf, ensure_ascii=False, indent=json_indent)) # tokenizer config fsmt_tokenizer_config_file = os.path.join(pytorch_dump_folder_path, TOKENIZER_CONFIG_FILE) tokenizer_conf = { "langs": [src_lang, tgt_lang], "model_max_length": 1024, "do_lower_case": do_lower_case, } print(f"Generating {fsmt_tokenizer_config_file}") with open(fsmt_tokenizer_config_file, "w", encoding="utf-8") as f: f.write(json.dumps(tokenizer_conf, ensure_ascii=False, indent=json_indent)) # model model = chkpt["models"][0] model_state_dict = model.state_dict() # rename keys to start with 'model.' model_state_dict = OrderedDict(("model." + k, v) for k, v in model_state_dict.items()) # remove unneeded keys ignore_keys = [ "model.model", "model.encoder.version", "model.decoder.version", "model.encoder_embed_tokens.weight", "model.decoder_embed_tokens.weight", "model.encoder.embed_positions._float_tensor", "model.decoder.embed_positions._float_tensor", ] for k in ignore_keys: model_state_dict.pop(k, None) config = FSMTConfig.from_pretrained(pytorch_dump_folder_path) model_new = FSMTForConditionalGeneration(config) # check that it loads ok model_new.load_state_dict(model_state_dict, strict=False) # save pytorch_weights_dump_path = os.path.join(pytorch_dump_folder_path, WEIGHTS_NAME) print(f"Generating {pytorch_weights_dump_path}") torch.save(model_state_dict, pytorch_weights_dump_path) print("Conversion is done!") print("\nLast step is to upload the files to s3") print(f"cd {data_root}") print(f"transformers-cli upload {model_dir}") if __name__ == "__main__": parser = argparse.ArgumentParser() # Required parameters parser.add_argument( "--fsmt_checkpoint_path", default=None, type=str, required=True, help=( "Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts," " bpecodes, etc." ), ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) args = parser.parse_args() convert_fsmt_checkpoint_to_pytorch(args.fsmt_checkpoint_path, args.pytorch_dump_folder_path)
0
hf_public_repos/transformers/src/transformers/models
hf_public_repos/transformers/src/transformers/models/prophetnet/convert_prophetnet_original_pytorch_checkpoint_to_pytorch.py
# coding=utf-8 # Copyright 2020 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Convert ProphetNet checkpoint.""" import argparse from torch import nn # transformers_old should correspond to branch `save_old_prophetnet_model_structure` here # original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively from transformers_old.modeling_prophetnet import ( ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld, ) from transformers_old.modeling_xlm_prophetnet import ( XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld, ) from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging logger = logging.get_logger(__name__) logging.set_verbosity_info() def convert_prophetnet_checkpoint_to_pytorch(prophetnet_checkpoint_path: str, pytorch_dump_folder_path: str): """ Copy/paste/tweak prohpetnet's weights to our prophetnet structure. """ if "xprophetnet" in prophetnet_checkpoint_path: prophet_old = XLMProphetNetForConditionalGenerationOld.from_pretrained(prophetnet_checkpoint_path) prophet, loading_info = XLMProphetNetForConditionalGeneration.from_pretrained( prophetnet_checkpoint_path, output_loading_info=True ) else: prophet_old = ProphetNetForConditionalGenerationOld.from_pretrained(prophetnet_checkpoint_path) prophet, loading_info = ProphetNetForConditionalGeneration.from_pretrained( prophetnet_checkpoint_path, output_loading_info=True ) special_keys = ["key_proj", "value_proj", "query_proj"] mapping = { "self_attn": "ngram_self_attn", "cross_attn": "encoder_attn", "cross_attn_layer_norm": "encoder_attn_layer_norm", "feed_forward_layer_norm": "final_layer_norm", "feed_forward": "", "intermediate": "fc1", "output": "fc2", "key_proj": "k_proj", "query_proj": "q_proj", "value_proj": "v_proj", "word_embeddings": "embed_tokens", "embeddings_layer_norm": "emb_layer_norm", "relative_pos_embeddings": "relative_linear", "ngram_embeddings": "ngram_input_embed", "position_embeddings": "embed_positions", } for key in loading_info["missing_keys"]: attributes = key.split(".") if attributes[0] == "lm_head": model = prophet old_model = prophet_old else: model = prophet.prophetnet old_model = prophet_old.model is_key_init = False for attribute in attributes: if attribute in mapping: old_attribute = mapping[attribute] if not hasattr(old_model, old_attribute) and len(old_attribute) > 0: old_attribute = attribute elif hasattr(old_model, attribute): old_attribute = attribute if attribute == "weight": assert old_model.weight.shape == model.weight.shape, "Shapes have to match!" model.weight = old_model.weight logger.info(f"{attribute} is initialized.") is_key_init = True break elif attribute == "bias": assert old_model.bias.shape == model.bias.shape, "Shapes have to match!" model.bias = old_model.bias logger.info(f"{attribute} is initialized") is_key_init = True break elif attribute in special_keys and hasattr(old_model, "in_proj_weight"): embed_dim = old_model.in_proj_weight.shape[0] // 3 param = getattr(model, attribute) param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match" param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match" if attribute == "query_proj": model.query_proj.weight = nn.Parameter(old_model.in_proj_weight[:embed_dim, :]) model.query_proj.bias = nn.Parameter(old_model.in_proj_bias[:embed_dim]) elif attribute == "key_proj": model.key_proj.weight = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :]) model.key_proj.bias = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim]) elif attribute == "value_proj": model.value_proj.weight = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :]) model.value_proj.bias = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :]) is_key_init = True break elif attribute == "position_embeddings": assert ( model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1] ), "Hidden size has to match" assert model.position_embeddings.weight.shape[0] == 512, "We want 512 position_embeddings." model.position_embeddings.weight = nn.Parameter(old_model.embed_positions.weight[:512, :]) is_key_init = True break if attribute.isdigit(): model = model[int(attribute)] old_model = old_model[int(old_attribute)] else: model = getattr(model, attribute) if old_attribute == "": old_model = old_model else: if not hasattr(old_model, old_attribute): raise ValueError(f"{old_model} does not have {old_attribute}") old_model = getattr(old_model, old_attribute) if not is_key_init: raise ValueError(f"{key} was not correctly initialized!") print(f"Saving model to {pytorch_dump_folder_path}") prophet.save_pretrained(pytorch_dump_folder_path) if __name__ == "__main__": parser = argparse.ArgumentParser() # Required parameters parser.add_argument( "--prophetnet_checkpoint_path", default=None, type=str, required=True, help="Path the official PyTorch dump." ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) args = parser.parse_args() convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
0
hf_public_repos/transformers/src/transformers/models
hf_public_repos/transformers/src/transformers/models/prophetnet/modeling_prophetnet.py
# coding=utf-8 # Copyright 2020 The Microsoft Authors and The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ PyTorch ProphetNet model, ported from ProphetNet repo(fairsequery_states version).""" import copy import math import warnings from dataclasses import dataclass from typing import Optional, Tuple, Union import torch import torch.utils.checkpoint from torch import Tensor, nn from torch.nn import LayerNorm from ...activations import ACT2FN from ...modeling_outputs import BaseModelOutput from ...modeling_utils import PreTrainedModel from ...utils import ( ModelOutput, add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings, ) from .configuration_prophetnet import ProphetNetConfig logger = logging.get_logger(__name__) _CONFIG_FOR_DOC = "ProphenetConfig" _CHECKPOINT_FOR_DOC = "microsoft/prophetnet-large-uncased" PROPHETNET_PRETRAINED_MODEL_ARCHIVE_LIST = [ "microsoft/prophetnet-large-uncased", # See all ProphetNet models at https://huggingface.co/models?filter=prophetnet ] PROPHETNET_START_DOCSTRING = r""" This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) Original ProphetNet code can be found [here](https://github.com/microsoft/ProphetNet). Checkpoints were converted from original Fairseq checkpoints. For more information on the checkpoint conversion, please take a look at the file `convert_prophetnet_original_pytorch_checkpoint_to_pytorch.py`. This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matters related to general usage and behavior. Parameters: config ([`ProphetNetConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ PROPHETNET_INPUTS_DOCSTRING = r""" Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Indices of decoder input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are decoder input IDs?](../glossary#decoder-input-ids) ProphetNet uses the `eos_token_id` as the starting token for `decoder_input_ids` generation. If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see `past_key_values`). decoder_attention_mask (`torch.BoolTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also be used by default. head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*): Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. decoder_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. encoder_outputs (`tuple(tuple(torch.FloatTensor)`, *optional*): Tuple consists of (`last_hidden_state`, *optional*: `hidden_states`, *optional*: `attentions`) `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)`, *optional*) is a sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): Contains precomputed key and value hidden-states of the attention blocks. Can be used to speed up decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ PROPHETNET_STANDALONE_INPUTS_DOCSTRING = r""" Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*): Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ def softmax(hidden_state, dim, onnx_trace=False): if onnx_trace: return nn.functional.softmax(hidden_state.float(), dim=dim) else: return nn.functional.softmax(hidden_state, dim=dim, dtype=torch.float32) def ngram_attention_bias(sequence_length, ngram, device, dtype): """ This function computes the bias for the predict stream """ left_block = ( torch.ones((ngram, sequence_length, sequence_length), device=device, dtype=dtype) * torch.finfo(dtype).min ) right_block = left_block.detach().clone() # create bias for stream_idx in range(ngram): right_block[stream_idx].fill_diagonal_(0, wrap=False) left_block[stream_idx].triu_(-stream_idx + 1) left_block[:, :, 0] = 0 return torch.cat([left_block, right_block], dim=2) def compute_relative_buckets(num_buckets, max_distance, relative_positions, is_bidirectional=False): """ This function computes individual parts of the relative position buckets. For more detail, see paper. """ inv_relative_positions = -relative_positions rel_positions_bucket = 0 if is_bidirectional: num_buckets = num_buckets // 2 rel_positions_bucket = ( rel_positions_bucket + torch.lt(inv_relative_positions, torch.zeros_like(inv_relative_positions)).int() * num_buckets ) inv_relative_positions = torch.abs(inv_relative_positions) else: inv_relative_positions = torch.max(inv_relative_positions, torch.zeros_like(inv_relative_positions)) max_exact = num_buckets // 2 is_small = torch.lt(inv_relative_positions, max_exact) val_if_large = max_exact + torch.log(inv_relative_positions.float() / max_exact) / math.log( max_distance / max_exact ) * (num_buckets - max_exact) val_if_large = torch.min(val_if_large, torch.ones_like(val_if_large) * (num_buckets - 1)).int() rel_positions_bucket = rel_positions_bucket + torch.where(is_small, inv_relative_positions.int(), val_if_large) return rel_positions_bucket def compute_all_stream_relative_buckets(num_buckets, max_distance, position_ids): """ This function computes both main and predict relative position buckets. For more detail, see paper. """ # main stream main_stream_relative_positions = position_ids.unsqueeze(1).repeat(1, position_ids.size(-1), 1) main_stream_relative_positions = main_stream_relative_positions - position_ids.unsqueeze(-1) # predicting stream predicting_stream_relative_positions = torch.cat((position_ids - 1, position_ids), dim=-1).unsqueeze(1) predicting_stream_relative_positions = predicting_stream_relative_positions.repeat(1, position_ids.size(-1), 1) predicting_stream_relative_positions = predicting_stream_relative_positions - position_ids.unsqueeze(-1) # get both position buckets main_relative_position_buckets = compute_relative_buckets( num_buckets, max_distance, main_stream_relative_positions, is_bidirectional=False ) predict_relative_position_buckets = compute_relative_buckets( num_buckets, max_distance, predicting_stream_relative_positions, is_bidirectional=False ) return main_relative_position_buckets, predict_relative_position_buckets @dataclass class ProphetNetSeq2SeqLMOutput(ModelOutput): """ Base class for sequence-to-sequence language models outputs. Args: loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Language modeling loss. logits (`torch.FloatTensor` of shape `(batch_size, decoder_sequence_length, config.vocab_size)`): Prediction scores of the main stream language modeling head (scores for each vocabulary token before SoftMax). logits_ngram (`torch.FloatTensor` of shape `(batch_size, ngram * decoder_sequence_length, config.vocab_size)`): Prediction scores of the predict stream language modeling head (scores for each vocabulary token before SoftMax). past_key_values (`List[torch.FloatTensor]`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): List of `torch.FloatTensor` of length `config.n_layers`, with each tensor of shape `(2, batch_size, num_attn_heads, decoder_sequence_length, embed_size_per_head)`). Contains pre-computed hidden-states (key and values in the attention blocks) of the decoder that can be used (see `past_key_values` input) to speed up sequential decoding. decoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, decoder_sequence_length, hidden_size)`. Hidden-states of main stream of the decoder at the output of each layer plus the initial embedding outputs. decoder_ngram_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, ngram * decoder_sequence_length, hidden_size)`. Hidden-states of the predict stream of the decoder at the output of each layer plus the initial embedding outputs. decoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_attn_heads, decoder_sequence_length, decoder_sequence_length)`. Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads. decoder_ngram_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_attn_heads, decoder_sequence_length, decoder_sequence_length)`. Attentions weights of the predict stream of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads. cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_attn_heads, encoder_sequence_length, decoder_sequence_length)`. Attentions weights of the cross-attention layer of the decoder, after the attention softmax, used to compute the weighted average in the encoder_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder of the model. encoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, encoder_sequence_length, hidden_size)`. Hidden-states of the encoder at the output of each layer plus the initial embedding outputs. encoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_attn_heads, encoder_sequence_length, encoder_sequence_length)`. Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: Optional[torch.FloatTensor] = None logits: torch.FloatTensor = None logits_ngram: Optional[torch.FloatTensor] = None past_key_values: Optional[Tuple[torch.FloatTensor]] = None decoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None decoder_ngram_hidden_states: Optional[Tuple[torch.FloatTensor]] = None decoder_attentions: Optional[Tuple[torch.FloatTensor]] = None decoder_ngram_attentions: Optional[Tuple[torch.FloatTensor]] = None cross_attentions: Optional[Tuple[torch.FloatTensor]] = None encoder_last_hidden_state: Optional[torch.FloatTensor] = None encoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None encoder_attentions: Optional[Tuple[torch.FloatTensor]] = None @property def decoder_cross_attentions(self): warnings.warn( "`decoder_cross_attentions` is deprecated and will be removed soon. Please use `cross_attentions`" " instead.", FutureWarning, ) return self.cross_attentions @dataclass class ProphetNetSeq2SeqModelOutput(ModelOutput): """ Base class for model encoder's outputs that also contains : pre-computed hidden states that can speed up sequential decoding. Args: last_hidden_state (`torch.FloatTensor` of shape `(batch_size, decoder_sequence_length, hidden_size)`): Sequence of main stream hidden-states at the output of the last layer of the decoder of the model. If `past_key_values` is used only the last hidden-state of the sequences of shape `(batch_size, 1, hidden_size)` is output. last_hidden_state_ngram (`torch.FloatTensor` of shape `(batch_size,ngram * decoder_sequence_length, config.vocab_size)`, *optional*): Sequence of predict stream hidden-states at the output of the last layer of the decoder of the model. past_key_values (`List[torch.FloatTensor]`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): List of `torch.FloatTensor` of length `config.n_layers`, with each tensor of shape `(2, batch_size, num_attn_heads, decoder_sequence_length, embed_size_per_head)`). Contains pre-computed hidden-states (key and values in the attention blocks) of the decoder that can be used (see `past_key_values` input) to speed up sequential decoding. decoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, decoder_sequence_length, hidden_size)`. Hidden-states of main stream of the decoder at the output of each layer plus the initial embedding outputs. decoder_ngram_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, ngram * decoder_sequence_length, hidden_size)`. Hidden-states of the predict stream of the decoder at the output of each layer plus the initial embedding outputs. decoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_attn_heads, decoder_sequence_length, decoder_sequence_length)`. Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads. decoder_ngram_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_attn_heads, decoder_sequence_length, decoder_sequence_length)`. Attentions weights of the predict stream of the decoder, after the attention softmax, used to compute the weighted average in the cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_attn_heads, encoder_sequence_length, decoder_sequence_length)`. Attentions weights of the cross-attention layer of the decoder, after the attention softmax, used to compute the weighted average in the encoder_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder of the model. encoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, encoder_sequence_length, hidden_size)`. Hidden-states of the encoder at the output of each layer plus the initial embedding outputs. encoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_attn_heads, encoder_sequence_length, encoder_sequence_length)`. Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads. """ last_hidden_state: torch.FloatTensor last_hidden_state_ngram: Optional[torch.FloatTensor] = None past_key_values: Optional[Tuple[torch.FloatTensor]] = None decoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None decoder_ngram_hidden_states: Optional[Tuple[torch.FloatTensor]] = None decoder_attentions: Optional[Tuple[torch.FloatTensor]] = None decoder_ngram_attentions: Optional[Tuple[torch.FloatTensor]] = None cross_attentions: Optional[Tuple[torch.FloatTensor]] = None encoder_last_hidden_state: Optional[torch.FloatTensor] = None encoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None encoder_attentions: Optional[Tuple[torch.FloatTensor]] = None @property def decoder_cross_attentions(self): warnings.warn( "`decoder_cross_attentions` is deprecated and will be removed soon. Please use `cross_attentions`" " instead.", FutureWarning, ) return self.cross_attentions @dataclass class ProphetNetDecoderModelOutput(ModelOutput): """ Base class for model's outputs that may also contain a past key/values (to speed up sequential decoding). Args: last_hidden_state (`torch.FloatTensor` of shape `(batch_size, decoder_sequence_length, hidden_size)`): Sequence of main stream hidden-states at the output of the last layer of the decoder of the model. If `past_key_values` is used only the last hidden-state of the sequences of shape `(batch_size, 1, hidden_size)` is output. last_hidden_state_ngram (`torch.FloatTensor` of shape `(batch_size, ngram * decoder_sequence_length, config.vocab_size)`): Sequence of predict stream hidden-states at the output of the last layer of the decoder of the model. past_key_values (`List[torch.FloatTensor]`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): List of `torch.FloatTensor` of length `config.n_layers`, with each tensor of shape `(2, batch_size, num_attn_heads, decoder_sequence_length, embed_size_per_head)`). Contains pre-computed hidden-states (key and values in the attention blocks) of the decoder that can be used (see `past_key_values` input) to speed up sequential decoding. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, decoder_sequence_length, hidden_size)`. Hidden-states of main stream of the decoder at the output of each layer plus the initial embedding outputs. ngram_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, ngram * decoder_sequence_length, hidden_size)`. Hidden-states of the predict stream of the decoder at the output of each layer plus the initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_attn_heads, decoder_sequence_length, decoder_sequence_length)`. Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads. ngram_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_attn_heads, decoder_sequence_length, decoder_sequence_length)`. Attentions weights of the predict stream of the decoder, after the attention softmax, used to compute the weighted average in the cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_attn_heads, encoder_sequence_length, decoder_sequence_length)`. Attentions weights of the cross-attention layer of the decoder, after the attention softmax, used to compute the weighted average in the """ last_hidden_state: torch.FloatTensor last_hidden_state_ngram: Optional[torch.FloatTensor] = None past_key_values: Optional[Tuple[torch.FloatTensor]] = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None hidden_states_ngram: Optional[Tuple[torch.FloatTensor]] = None attentions: Optional[Tuple[torch.FloatTensor]] = None ngram_attentions: Optional[Tuple[torch.FloatTensor]] = None cross_attentions: Optional[Tuple[torch.FloatTensor]] = None @dataclass class ProphetNetDecoderLMOutput(ModelOutput): """ Base class for model's outputs that may also contain a past key/values (to speed up sequential decoding). Args: loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Language modeling loss. logits (`torch.FloatTensor` of shape `(batch_size, decoder_sequence_length, config.vocab_size)`): Prediction scores of the main stream language modeling head (scores for each vocabulary token before SoftMax). logits_ngram (`torch.FloatTensor` of shape `(batch_size, ngram * decoder_sequence_length, config.vocab_size)`): Prediction scores of the predict stream language modeling head (scores for each vocabulary token before SoftMax). past_key_values (`List[torch.FloatTensor]`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): List of `torch.FloatTensor` of length `config.n_layers`, with each tensor of shape `(2, batch_size, num_attn_heads, decoder_sequence_length, embed_size_per_head)`). Contains pre-computed hidden-states (key and values in the attention blocks) of the decoder that can be used (see `past_key_values` input) to speed up sequential decoding. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, decoder_sequence_length, hidden_size)`. Hidden-states of main stream of the decoder at the output of each layer plus the initial embedding outputs. ngram_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, ngram * decoder_sequence_length, hidden_size)`. Hidden-states of the predict stream of the decoder at the output of each layer plus the initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_attn_heads, decoder_sequence_length, decoder_sequence_length)`. Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads. ngram_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_attn_heads, decoder_sequence_length, decoder_sequence_length)`. Attentions weights of the predict stream of the decoder, after the attention softmax, used to compute the weighted average in the cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_attn_heads, encoder_sequence_length, decoder_sequence_length)`. Attentions weights of the cross-attention layer of the decoder, after the attention softmax, used to compute the weighted average in the """ loss: Optional[torch.FloatTensor] = None logits: torch.FloatTensor = None logits_ngram: Optional[torch.FloatTensor] = None past_key_values: Optional[Tuple[torch.FloatTensor]] = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None hidden_states_ngram: Optional[Tuple[torch.FloatTensor]] = None attentions: Optional[Tuple[torch.FloatTensor]] = None ngram_attentions: Optional[Tuple[torch.FloatTensor]] = None cross_attentions: Optional[Tuple[torch.FloatTensor]] = None class ProphetNetPreTrainedModel(PreTrainedModel): config_class = ProphetNetConfig base_model_prefix = "prophetnet" supports_gradient_checkpointing = True def _init_weights(self, module): if isinstance(module, nn.Linear): module.weight.data.normal_(mean=0.0, std=self.config.init_std) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=self.config.init_std) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() def _shift_right(self, input_ids): decoder_start_token_id = self.config.decoder_start_token_id pad_token_id = self.config.pad_token_id assert decoder_start_token_id is not None, ( "self.model.config.decoder_start_token_id has to be defined. In ProphetNet it is usually set to the" " pad_token_id. See ProphetNet docs for more information" ) # shift inputs to the right shifted_input_ids = input_ids.new_zeros(input_ids.shape) shifted_input_ids[..., 1:] = input_ids[..., :-1].clone() shifted_input_ids[..., 0] = decoder_start_token_id assert pad_token_id is not None, "self.model.config.pad_token_id has to be defined." # replace possible -100 values in labels by `pad_token_id` shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id) assert torch.all(shifted_input_ids >= 0).item(), "Verify that `shifted_input_ids` has only positive values" return shifted_input_ids class ProphetNetPositionalEmbeddings(nn.Embedding): """ This module learns positional embeddings up to a fixed maximum size. Padding ids are ignored by either offsetting based on padding_idx or by setting padding_idx to None and ensuring that the appropriate position ids are passed to the forward function. """ def __init__(self, config: ProphetNetConfig) -> None: self.max_length = config.max_position_embeddings super().__init__(config.max_position_embeddings, config.hidden_size, config.pad_token_id) def forward(self, inputs_shape, device, attention_mask=None, past_key_values=None, position_ids=None): assert (position_ids is None) or ( self.padding_idx is None ), "If position_ids is pre-computed then padding_idx should not be set." if position_ids is None: if past_key_values is not None: # position_ids is the same for every token when decoding a single step # Without the int() cast, it doesn't work in some cases when exporting to ONNX prev_num_input_ids = past_key_values[0][0].shape[2] num_input_ids = inputs_shape[1] + prev_num_input_ids position_ids = torch.ones((1, 1), dtype=torch.long, device=device) * ( int(self.padding_idx + num_input_ids) ) else: if attention_mask is None: attention_mask = torch.ones(inputs_shape, dtype=torch.long, device=device) # retrieve position_ids from input_ids / attention_mask position_ids = ( torch.cumsum(attention_mask, dim=1).type_as(attention_mask) * attention_mask ).long() + self.padding_idx # make sure position_ids are not bigger then max_length position_ids = position_ids.clamp(0, self.max_length - 1) return super().forward(position_ids), position_ids def _forward(self, position_ids): return super().forward(position_ids) class ProphetNetAttention(nn.Module): """Multi-headed attention from 'Attention Is All You Need' paper""" def __init__( self, config: ProphetNetConfig, num_attn_heads: int, ): super().__init__() hidden_size = config.hidden_size self.attention_dropout = config.attention_dropout self.dropout = config.dropout self.num_attn_heads = num_attn_heads self.head_dim = hidden_size // num_attn_heads assert self.head_dim * num_attn_heads == hidden_size, ( "`config.hidden_size` must be divisible by `config.num_encoder_attention_heads` and" " `config.num_decoder_attention_heads`" ) self.key_proj = nn.Linear(hidden_size, hidden_size) self.value_proj = nn.Linear(hidden_size, hidden_size) self.query_proj = nn.Linear(hidden_size, hidden_size) self.out_proj = nn.Linear(hidden_size, hidden_size) def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int): return tensor.view(bsz, seq_len, self.num_attn_heads, self.head_dim).transpose(1, 2).contiguous() def forward( self, hidden_states, key_value_states: Optional[Tensor] = None, attention_mask: Optional[Tensor] = None, layer_head_mask: Optional[Tensor] = None, past_key_value: Optional[Tuple[Tensor]] = None, output_attentions: bool = False, ) -> Tuple[Tensor, Optional[Tensor]]: batch_size, tgt_len, hidden_size = hidden_states.size() # if key_value_states are provided this layer is used as a cross-attention layer # for the decoder is_cross_attention = key_value_states is not None assert list(hidden_states.size()) == [ batch_size, tgt_len, hidden_size, ], f"Size of hidden states should be {batch_size, tgt_len, hidden_size}, but is {hidden_states.size()}" # previous time steps are cached - no need to recompute key and value if they are static query_states = self.query_proj(hidden_states) / (self.head_dim**0.5) if is_cross_attention and past_key_value is not None: # reuse k,v, cross_attentions key_states = past_key_value[0] value_states = past_key_value[1] elif is_cross_attention: # cross_attentions key_states = self._shape(self.key_proj(key_value_states), -1, batch_size) value_states = self._shape(self.value_proj(key_value_states), -1, batch_size) else: # self_attention key_states = self._shape(self.key_proj(hidden_states), -1, batch_size) value_states = self._shape(self.value_proj(hidden_states), -1, batch_size) if is_cross_attention: # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states. # Further calls to cross_attention layer can then reuse all cross-attention # key/value_states (first "if" case) # if encoder bi-directional self-attention `past_key_value` is always `None` past_key_value = (key_states, value_states) # project states into the correct shape proj_shape = (batch_size, self.num_attn_heads, -1, self.head_dim) query_states = self._shape(query_states, tgt_len, batch_size).view(*proj_shape) key_states = key_states.view(*proj_shape) value_states = value_states.view(*proj_shape) src_len = key_states.size(2) attn_weights = torch.einsum("bsij,bsjk->bsik", query_states, key_states.transpose(2, 3)) expected_shape = (batch_size, self.num_attn_heads, tgt_len, src_len) if attn_weights.size() != expected_shape: raise ValueError(f"Attention weights should have size {expected_shape}, but is {attn_weights.size()}") # This is part of a workaround to get around fork/join parallelism not supporting Optional types. if attention_mask is not None and attention_mask.dim() == 0: attention_mask = None expected_shape = (batch_size, self.num_attn_heads, 1, src_len) if attention_mask is not None and attention_mask.size() != expected_shape: raise ValueError(f"Attention mask should have size {expected_shape}, but is {attention_mask.size()}") if attention_mask is not None: # don't attend to padding symbols attn_weights = attn_weights + attention_mask if output_attentions: attn_weights_reshaped = attn_weights else: attn_weights_reshaped = None attn_weights = nn.functional.softmax(attn_weights, dim=-1) if layer_head_mask is not None: assert layer_head_mask.size() == (self.num_attn_heads,), ( f"Head mask for a single layer should be of size {(self.num_attn_heads,)}, but is" f" {layer_head_mask.size()}" ) attn_weights = layer_head_mask.view(1, -1, 1, 1) * attn_weights.view( batch_size, self.num_attn_heads, tgt_len, src_len ) # apply head_mask also on attn_weights_reshaped which is used for n-gram attention inside the model attn_weights_reshaped = layer_head_mask.view(1, -1, 1, 1) * attn_weights_reshaped attn_probs = nn.functional.dropout( attn_weights, p=self.attention_dropout, training=self.training, ) attn_output = torch.einsum("bsij,bsjk->bsik", attn_probs, value_states) expected_shape = (batch_size, self.num_attn_heads, tgt_len, self.head_dim) if attn_output.size() != expected_shape: raise ValueError(f"`attn_output` should have shape {expected_shape}, but is of shape {attn_output.size()}") attn_output = attn_output.transpose(1, 2).reshape(batch_size, tgt_len, hidden_size) attn_output = self.out_proj(attn_output) attn_output = nn.functional.dropout(attn_output, p=self.dropout, training=self.training) return attn_output, attn_weights_reshaped, past_key_value class ProphetNetFeedForward(nn.Module): """ This is the residual two feed-forward layer block based on the original Transformer implementation. """ def __init__(self, config: ProphetNetConfig, ffn_dim: int): super().__init__() self.activation_fn = ACT2FN[config.activation_function] self.intermediate = nn.Linear(config.hidden_size, ffn_dim) self.output = nn.Linear(ffn_dim, config.hidden_size) self.activation_dropout = config.activation_dropout self.dropout = config.dropout def forward(self, hidden_states): hidden_states = self.intermediate(hidden_states) hidden_states = self.activation_fn(hidden_states) hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training) hidden_states = self.output(hidden_states) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) return hidden_states class ProphetNetNgramSelfAttention(nn.Module): def __init__(self, config: ProphetNetConfig): super().__init__() self.hidden_size = config.hidden_size self.num_buckets = config.num_buckets self.relative_max_distance = config.relative_max_distance self.num_attn_heads = config.num_decoder_attention_heads self.dropout = config.dropout self.attention_dropout = config.attention_dropout self.head_dim = config.hidden_size // self.num_attn_heads self.ngram = config.ngram assert ( self.head_dim * self.num_attn_heads == config.hidden_size ), "config.hidden_size must be divisible by num_attn_heads" # key, value, query projection self.key_proj = nn.Linear(config.hidden_size, config.hidden_size) self.value_proj = nn.Linear(config.hidden_size, config.hidden_size) self.query_proj = nn.Linear(config.hidden_size, config.hidden_size) # out projection self.out_proj = nn.Linear(config.hidden_size, config.hidden_size) # rel position embeddings self.relative_pos_embeddings = nn.Linear(config.hidden_size, self.num_buckets * self.num_attn_heads) # for onnx runtime self.onnx_trace = False def _shape(self, tensor, seq_len, batch_size): return tensor.view(batch_size, seq_len, self.num_attn_heads, self.head_dim).transpose(1, 2).contiguous() def prepare_for_onnx_export_(self): self.onnx_trace = True def forward( self, hidden_states, past_key_value: Optional[Tuple[Tensor]] = None, attention_mask=None, layer_head_mask=None, extended_predict_attention_mask=None, main_relative_position_buckets=None, predict_relative_position_buckets=None, position_ids=None, ): batch_size, ngram_sequence_length, hidden_size = hidden_states.size() assert list(hidden_states.size()) == [batch_size, ngram_sequence_length, hidden_size], ( f"`hidden_states` should be of shape {batch_size, ngram_sequence_length, hidden_size}, but is of shape" f" {hidden_states.shape}" ) # project query_states = self.query_proj(hidden_states) key_states = self.key_proj(hidden_states) value_states = self.value_proj(hidden_states) # normalize query_states = query_states / (self.head_dim**0.5) # reshape query_states = self._shape(query_states, ngram_sequence_length, batch_size) key_states = self._shape(key_states, -1, batch_size) value_states = self._shape(value_states, -1, batch_size) proj_shape = (batch_size, self.num_attn_heads, -1, self.head_dim) query_states = query_states.view(*proj_shape) key_states = key_states.view(*proj_shape) value_states = value_states.view(*proj_shape) # chunk into main stream and predict stream hidden_states_list = hidden_states.chunk(1 + self.ngram, dim=1) query_states_list = query_states.chunk(1 + self.ngram, dim=2) key_states_list = key_states.chunk(1 + self.ngram, dim=2) value_states_list = value_states.chunk(1 + self.ngram, dim=2) main_hidden_states, hidden_states_predict_list = hidden_states_list[0], hidden_states_list[1:] main_query_states, predict_query_states_list = query_states_list[0], query_states_list[1:] main_key_states, predict_key_states_list = key_states_list[0], key_states_list[1:] main_value_states, predict_value_states_list = value_states_list[0], value_states_list[1:] # saved states are stored with shape (batch_size, num_attn_heads, seq_len, head_dim) if past_key_value is not None: prev_main_key_states = past_key_value[0] main_key_states = torch.cat((prev_main_key_states, main_key_states), dim=2) prev_main_value_states = past_key_value[1] main_value_states = torch.cat((prev_main_value_states, main_value_states), dim=2) # Update cache past_key_value = (main_key_states, main_value_states) # get seq_length of main stream only sequence_length = ngram_sequence_length // (1 + self.ngram) # MAIN-STREAM # main attn weights # [batch_size, number_heads, sequence_length, head_dimesion] # x [batch_size, number_heads, head_dimesion, sequence_length] # -> [batch_size, number_heads, sequence_length, sequence_length] main_attn_weights = torch.einsum("bntc,bncs->bnts", main_query_states, main_key_states.transpose(2, 3)) # retrieve relative position embeddings for each layer -> see paper for more details main_relative_pos_embeddings = self.get_main_relative_pos_embeddings( main_hidden_states, main_attn_weights, position_ids, main_relative_position_buckets ) main_attn_weights = main_attn_weights + main_relative_pos_embeddings if attention_mask is not None: main_attn_weights = main_attn_weights + attention_mask main_attn_probs = softmax( main_attn_weights, dim=-1, onnx_trace=self.onnx_trace, ).type_as(main_attn_weights) if layer_head_mask is not None: assert layer_head_mask.size() == (self.num_attn_heads,), ( f"Head mask for a single layer should be of size {(self.num_attn_heads,)}, but is" f" {layer_head_mask.size()}" ) main_attn_probs = layer_head_mask.view(1, -1, 1, 1) * main_attn_probs.view( batch_size, self.num_attn_heads, -1, sequence_length ) main_attn_probs = nn.functional.dropout(main_attn_probs, p=self.attention_dropout, training=self.training) # project to attn_output # [batch_size, number_heads, sequence_length, sequence_length] # x [batch_size, number_heads, sequence_length, head_dimesion] # -> [batch_size, number_heads, sequence_length, head_dimesion] main_attn_output = torch.einsum("bntc,bncs->bnts", main_attn_probs, main_value_states) # reshape so that num_heads dim is merged into last `head_dim` axis main_attn_output = main_attn_output.transpose(1, 2).reshape(batch_size, 1, sequence_length, hidden_size) main_attn_output = self.out_proj(main_attn_output) # PREDICT-STREAM # [batch_size, ngram, number_heads, sequence_length, head_dimesion] predict_query_states = torch.stack(predict_query_states_list, 1).view( batch_size, self.ngram, self.num_attn_heads, sequence_length, self.head_dim ) # [batch_size, ngram, number_heads, 2*sequence_length, head_dimesion] predict_key_states = torch.stack([torch.cat([main_key_states, key], 2) for key in predict_key_states_list], 1) # [batch_size, sequence_length, ngram, hidden_size] predict_hidden_states = torch.stack(hidden_states_predict_list, dim=2) # [batch_size, number_heads, ngram, 2*sequence_length, head_dimesion] predict_value_states = torch.cat( [torch.cat([main_value_states, v_p], 2).unsqueeze(2) for v_p in predict_value_states_list], 2 ) # [batch_size, ngram, number_heads, sequence_length, head_dimesion] # x [batch_size, ngram, number_heads, 2*sequence_length, head_dimesion] # -> [batch_size, ngram, number_heads, sequence_length, 2*sequence_length] predict_attn_weights = torch.einsum("bnhtc,bnhsc->bnhts", (predict_query_states, predict_key_states)) # retrieve relative position embeddings for each layer -> see paper for more details # [batch_size, ngram, number_heads, sequence_length, predict_relative_pos_embeddings] predict_relative_pos_embeddings = self.get_predict_relative_pos_embeddings( predict_hidden_states, predict_attn_weights, position_ids, predict_relative_position_buckets ) # [batch_size, ngram, number_heads, sequence_length, 2*sequence_length] predict_attn_weights = predict_attn_weights + predict_relative_pos_embeddings if extended_predict_attention_mask is not None: # Permuting Predict attention mask to [batch_size, ngram, number_heads, sequence_length, 2*sequence_length] extended_predict_attention_mask = extended_predict_attention_mask.permute(0, 2, 1, 3, 4) extended_predict_attention_mask = extended_predict_attention_mask.to(predict_attn_weights.dtype) predict_attn_weights = predict_attn_weights + extended_predict_attention_mask predict_attn_probs = softmax( predict_attn_weights, dim=-1, onnx_trace=self.onnx_trace, ).type_as(predict_attn_weights) if layer_head_mask is not None: assert layer_head_mask.size() == (self.num_attn_heads,), ( f"Head mask for a single layer should be of size {(self.num_attn_heads,)}, but is" f" {layer_head_mask.size()}" ) predict_attn_probs = layer_head_mask.view(1, 1, -1, 1, 1) * predict_attn_probs predict_attn_probs = nn.functional.dropout( predict_attn_probs, p=self.attention_dropout, training=self.training ) # project to attention output # [batch_size, ngram, number_heads, sequence_length, 2*sequence_length] # x [batch_size, ngram, number_heads, 2*sequence_length, head_dimesion] # -> [batch_size, ngram, number_heads, sequence_length, head_dimesion] predict_attn_output = torch.einsum( "bnhts,bnhsc->bnhtc", (predict_attn_probs, predict_value_states.transpose(1, 2)) ) # reshape so that num_heads dim is merged into last `head_dim` axis # [batch_size, ngram, number_heads, sequence_length, head_dimesion] -> [batch_size, ngram, sequence_length, hidden_size] predict_attn_output = predict_attn_output.transpose(2, 3) predict_attn_output = predict_attn_output.reshape(batch_size, self.ngram, sequence_length, hidden_size) predict_attn_output = self.out_proj(predict_attn_output) # concat to single attn output # [batch_size, (1+ngram)*sequence_length, hidden_size] attn_output = torch.cat([main_attn_output, predict_attn_output], 1).view(batch_size, -1, hidden_size) # reshape into better form for `config.output_attentions` main_attn_probs = main_attn_probs.view(batch_size, self.num_attn_heads, sequence_length, -1) attn_output = nn.functional.dropout(attn_output, p=self.dropout, training=self.training) return attn_output, main_attn_probs, predict_attn_probs, past_key_value def get_main_relative_pos_embeddings( self, hidden_states, attn_weights, position_ids, main_relative_position_buckets ): # input hidden_states [batch_size, sequence_length, hidden_size] # input attn_weights [batch_size, num_heads, sequence_length, sequence_length] # input position_ids [batch_size, sequence_length] or [1,1] batch_size, num_attn_heads, tgt_len, src_len = attn_weights.shape attn_weights = attn_weights.view(batch_size, num_attn_heads, tgt_len, src_len) if main_relative_position_buckets is None: batch_size, sequence_length = hidden_states.shape[:2] relative_positions = ( torch.arange(1, attn_weights.shape[-1] + 1) .unsqueeze(0) .unsqueeze(0) .repeat(batch_size, sequence_length, 1) .to(position_ids.device) ) # [batch_size, sequence_length, sequence_length+1] relative_positions = relative_positions - position_ids.unsqueeze(0).repeat(batch_size, sequence_length, 1) main_relative_position_buckets = compute_relative_buckets( self.num_buckets, self.relative_max_distance, relative_positions, False ) # [batch_size, sequence_length, num_buckets * num_heads] rel_pos_embeddings = self.relative_pos_embeddings(hidden_states) rel_pos_embeddings = rel_pos_embeddings.view( rel_pos_embeddings.shape[:2] + (self.num_buckets, self.num_attn_heads) ) rel_pos_embeddings = rel_pos_embeddings.permute(0, 3, 1, 2) # [batch_size, num_heads, sequence_length, num_buckets] rel_pos_embeddings = rel_pos_embeddings.reshape(attn_weights.shape[:3] + (-1,)) main_relative_position_buckets = main_relative_position_buckets.repeat(1, self.num_attn_heads, 1) # [batch_size * num_heads * sequence_length, sequence_length] main_relative_position_buckets = main_relative_position_buckets.view( -1, main_relative_position_buckets.shape[-1] ) main_relative_position_buckets = main_relative_position_buckets.long() # [batch_size * num_heads * sequence_length, sequence_length] rel_pos_embeddings = rel_pos_embeddings.reshape(-1, rel_pos_embeddings.size(-1)) main_relative_pos_embeddings = torch.gather(rel_pos_embeddings, dim=1, index=main_relative_position_buckets) main_relative_pos_embeddings = main_relative_pos_embeddings.view(batch_size, num_attn_heads, tgt_len, -1) return main_relative_pos_embeddings def get_predict_relative_pos_embeddings( self, hidden_states, attn_weights, position_ids, predict_relative_position_buckets ): # input hidden_states [batch_size, sequence_length, ngram, hidden_size] # input attn_weights [batch_size, ngram, num_heads, sequence_length, 2*sequence_length] # input position_ids [batch_size, sequence_length] or [1,1] # input predict_relative_position_buckets [batch_size, sequence_length, 2*sequence_length] or None batch_size, sequence_length = hidden_states.shape[0:2] if predict_relative_position_buckets is None: key_sequence_length = attn_weights.shape[-1] assert ( position_ids[0][0] == key_sequence_length - 1 ), "`position_ids` are incorrect. They should be of the format 1 2 3 4 5 ... (key_sequence_length - 1)" relative_positions = ( torch.arange(0, key_sequence_length) .unsqueeze(0) .unsqueeze(0) .repeat(batch_size, sequence_length, 1) .to(position_ids.device) ) relative_positions = relative_positions - position_ids.unsqueeze(0).repeat(batch_size, sequence_length, 1) predict_relative_position_buckets = compute_relative_buckets( self.num_buckets, self.relative_max_distance, relative_positions, False ) # [batch_size, ngram, sequence_length, hidden_size] hidden_states = hidden_states.transpose(1, 2) rel_pos_embeddings = self.relative_pos_embeddings(hidden_states) # [batch_size, ngram, sequence_length, num_buckets, num_heads] rel_pos_embeddings = rel_pos_embeddings.view( hidden_states.shape[:-1] + (self.num_buckets, self.num_attn_heads) ) rel_pos_embeddings = rel_pos_embeddings.permute(0, 2, 1, 4, 3) # [batch_size * ngram * sequence_length * num_heads, num_buckets] rel_pos_embeddings = rel_pos_embeddings.reshape(-1, self.num_buckets) # [ngram, batch_size, num_heads * sequence_length, -1] predict_relative_position_buckets = predict_relative_position_buckets.unsqueeze(0) predict_relative_position_buckets = predict_relative_position_buckets.repeat( self.ngram, 1, self.num_attn_heads, 1 ) # [ngram * batch_size * num_heads * sequence_length, -1] predict_relative_position_buckets = predict_relative_position_buckets.view( -1, predict_relative_position_buckets.size(-1) ).long() predict_relative_pos_embeddings = torch.gather( rel_pos_embeddings, dim=1, index=predict_relative_position_buckets ) # [batch_size, gram, num_heads, sequence_length, -1] predict_relative_pos_embeddings = predict_relative_pos_embeddings.view( batch_size, self.ngram, self.num_attn_heads, sequence_length, -1 ) return predict_relative_pos_embeddings class ProphetNetEncoderLayer(nn.Module): """ Encoder block for Prophetnet """ def __init__(self, config: ProphetNetConfig): super().__init__() # 1st residual block self.self_attn = ProphetNetAttention(config, config.num_encoder_attention_heads) self.self_attn_layer_norm = LayerNorm(config.hidden_size) # 2nd residual block self.feed_forward = ProphetNetFeedForward(config, config.encoder_ffn_dim) self.feed_forward_layer_norm = LayerNorm(config.hidden_size) def forward( self, hidden_states, attention_mask, layer_head_mask, output_attentions: bool = False, ): # 1st residual block attention_output, attn_weights, _ = self.self_attn( hidden_states=hidden_states, attention_mask=attention_mask, layer_head_mask=layer_head_mask, output_attentions=output_attentions, ) hidden_states = self.self_attn_layer_norm(attention_output + hidden_states) # 2nd residual block feed_forward_output = self.feed_forward(hidden_states) hidden_states = self.feed_forward_layer_norm(feed_forward_output + hidden_states) outputs = (hidden_states,) if output_attentions: outputs += (attn_weights,) return outputs class ProphetNetDecoderLayer(nn.Module): """ Decoder block for Prophetnet """ def __init__(self, config: ProphetNetConfig): super().__init__() # 1st residual block self.self_attn = ProphetNetNgramSelfAttention(config) self.self_attn_layer_norm = LayerNorm(config.hidden_size) # 2nd residual block if config.add_cross_attention: self.cross_attn = ProphetNetAttention(config, config.num_decoder_attention_heads) self.cross_attn_layer_norm = LayerNorm(config.hidden_size) # 3rd residual block self.feed_forward = ProphetNetFeedForward(config, config.decoder_ffn_dim) self.feed_forward_layer_norm = LayerNorm(config.hidden_size) def forward( self, hidden_states, attention_mask=None, encoder_hidden_states=None, encoder_attn_mask=None, layer_head_mask=None, cross_attn_layer_head_mask=None, extended_predict_attention_mask=None, main_relative_position_buckets=None, predict_relative_position_buckets=None, position_ids=None, past_key_value=None, use_cache: bool = True, output_attentions: bool = False, ): # 1st residual block # decoder uni-directional self-attention cached key/values tuple is at positions 1,2 self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None ngram_attention_output, self_attn_weights, self_attn_weights_ngram, present_key_value = self.self_attn( hidden_states=hidden_states, past_key_value=self_attn_past_key_value, attention_mask=attention_mask, layer_head_mask=layer_head_mask, extended_predict_attention_mask=extended_predict_attention_mask, main_relative_position_buckets=main_relative_position_buckets, predict_relative_position_buckets=predict_relative_position_buckets, position_ids=position_ids, ) hidden_states = self.self_attn_layer_norm(hidden_states + ngram_attention_output) # cross_attn cached key/values tuple is at positions 3,4 of present_key_value tuple cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None cross_attn_weights = None if encoder_hidden_states is not None: # 2nd residual block attention_output, cross_attn_weights, cross_attn_present_key_value = self.cross_attn( hidden_states=hidden_states, key_value_states=encoder_hidden_states, attention_mask=encoder_attn_mask, layer_head_mask=cross_attn_layer_head_mask, past_key_value=cross_attn_past_key_value, output_attentions=output_attentions, ) hidden_states = self.cross_attn_layer_norm(attention_output + hidden_states) # add cross-attn to positions 3,4 of present_key_value tuple present_key_value = present_key_value + cross_attn_present_key_value # 3rd residual block feed_forward_output = self.feed_forward(hidden_states) hidden_states = self.feed_forward_layer_norm(feed_forward_output + hidden_states) outputs = (hidden_states,) if output_attentions: outputs += (self_attn_weights, self_attn_weights_ngram, cross_attn_weights) if use_cache: outputs += (present_key_value,) return outputs @add_start_docstrings( "The standalone encoder part of the ProphetNetModel.", PROPHETNET_START_DOCSTRING, ) class ProphetNetEncoder(ProphetNetPreTrainedModel): r""" word_embeddings (`torch.nn.Embeddings` of shape `(config.vocab_size, config.hidden_size)`, *optional*): The word embedding parameters. This can be used to initialize [`ProphetNetEncoder`] with pre-defined word embeddings instead of randomly initialized word embeddings. """ def __init__(self, config: ProphetNetConfig, word_embeddings: nn.Embedding = None): super().__init__(config) self.word_embeddings = ( word_embeddings if word_embeddings is not None else nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id) ) self.position_embeddings = ProphetNetPositionalEmbeddings(config) self.embeddings_layer_norm = LayerNorm(config.hidden_size) self.layers = nn.ModuleList([ProphetNetEncoderLayer(config) for _ in range(config.num_encoder_layers)]) self.gradient_checkpointing = False # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.word_embeddings def set_input_embeddings(self, value): self.word_embeddings = value @add_start_docstrings_to_model_forward(PROPHETNET_STANDALONE_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=BaseModelOutput, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, BaseModelOutput]: r""" Returns: Example: ```python >>> from transformers import AutoTokenizer, ProphetNetEncoder >>> import torch >>> tokenizer = AutoTokenizer.from_pretrained("microsoft/prophetnet-large-uncased") >>> model = ProphetNetEncoder.from_pretrained("patrickvonplaten/prophetnet-large-uncased-standalone") >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") >>> outputs = model(**inputs) >>> last_hidden_states = outputs.last_hidden_state ```""" output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is None and inputs_embeds is None: raise ValueError("Either input_ids or inputs_embeds has to be passed.") elif input_ids is not None and inputs_embeds is not None: raise ValueError("Make sure to only pass input_ids or inputs_embeds.") elif input_ids is not None and inputs_embeds is None: inputs_embeds = self.word_embeddings(input_ids) # prepare attention mask if attention_mask is not None: extended_attention_mask = ( 1.0 - attention_mask[:, None, None, :].repeat(1, self.config.num_encoder_attention_heads, 1, 1) ) * torch.finfo(self.dtype).min extended_attention_mask = extended_attention_mask.to(inputs_embeds.dtype) else: extended_attention_mask = None position_embeddings, position_ids = self.position_embeddings(inputs_embeds.shape[:2], inputs_embeds.device) hidden_states = inputs_embeds + position_embeddings hidden_states = self.embeddings_layer_norm(hidden_states) hidden_states = nn.functional.dropout(hidden_states, p=self.config.dropout, training=self.training) encoder_hidden_states = () if output_hidden_states else None all_attentions = () if output_attentions else None # check if head_mask has a correct number of layers specified if desired if head_mask is not None: assert head_mask.size()[0] == ( len(self.layers) ), f"The head_mask should be specified for {len(self.layers)} layers, but it is for {head_mask.size()[0]}." for idx, encoder_layer in enumerate(self.layers): if output_hidden_states: encoder_hidden_states = encoder_hidden_states + (hidden_states,) if self.gradient_checkpointing and self.training: layer_outputs = self._gradient_checkpointing_func( encoder_layer.__call__, hidden_states, extended_attention_mask, (head_mask[idx] if head_mask is not None else None), output_attentions, ) else: layer_outputs = encoder_layer( hidden_states, attention_mask=extended_attention_mask, layer_head_mask=(head_mask[idx] if head_mask is not None else None), output_attentions=output_attentions, ) hidden_states = layer_outputs[0] if output_attentions: all_attentions = all_attentions + (layer_outputs[1],) if output_hidden_states: encoder_hidden_states = encoder_hidden_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, encoder_hidden_states, all_attentions] if v is not None) return BaseModelOutput( last_hidden_state=hidden_states, hidden_states=encoder_hidden_states, attentions=all_attentions ) @add_start_docstrings( "The standalone decoder part of the ProphetNetModel.", PROPHETNET_START_DOCSTRING, ) class ProphetNetDecoder(ProphetNetPreTrainedModel): r""" word_embeddings (`torch.nn.Embeddings` of shape `(config.vocab_size, config.hidden_size)`, *optional*): The word embedding parameters. This can be used to initialize [`ProphetNetEncoder`] with pre-defined word embeddings instead of randomly initialized word embeddings. """ def __init__(self, config: ProphetNetConfig, word_embeddings: Optional[nn.Embedding] = None): super().__init__(config) self.ngram = config.ngram self.num_buckets = config.num_buckets self.relative_max_distance = config.relative_max_distance self.dropout = config.dropout self.max_target_positions = config.max_position_embeddings self.word_embeddings = ( word_embeddings if word_embeddings is not None else nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id) ) self.position_embeddings = ProphetNetPositionalEmbeddings(config) self.ngram_embeddings = nn.Embedding(self.ngram, config.hidden_size, None) self.layers = nn.ModuleList([ProphetNetDecoderLayer(config) for _ in range(config.num_decoder_layers)]) self.embeddings_layer_norm = LayerNorm(config.hidden_size) self.gradient_checkpointing = False # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.word_embeddings def set_input_embeddings(self, value): self.word_embeddings = value @add_start_docstrings_to_model_forward(PROPHETNET_STANDALONE_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=ProphetNetDecoderModelOutput, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, encoder_hidden_states: Optional[torch.Tensor] = None, encoder_attention_mask: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, cross_attn_head_mask: Optional[torch.Tensor] = None, past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None, inputs_embeds: Optional[torch.Tensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, ProphetNetDecoderModelOutput]: r""" encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if the model is configured as a decoder. encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`: cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): Contains precomputed key and value hidden-states of the attention blocks. Can be used to speed up decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. Returns: Example: ```python >>> from transformers import AutoTokenizer, ProphetNetDecoder >>> import torch >>> tokenizer = AutoTokenizer.from_pretrained("microsoft/prophetnet-large-uncased") >>> model = ProphetNetDecoder.from_pretrained("microsoft/prophetnet-large-uncased", add_cross_attention=False) >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") >>> outputs = model(**inputs) >>> last_hidden_states = outputs.last_hidden_state ```""" use_cache = use_cache if use_cache is not None else self.config.use_cache output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is None and inputs_embeds is None: raise ValueError("Either `decoder_input_ids` or `decoder_inputs_embeds` has to be passed.") elif input_ids is not None and inputs_embeds is not None: raise ValueError("Make sure to only pass `decoder_input_ids` or `decoder_inputs_embeds`.") elif input_ids is not None and inputs_embeds is None: inputs_embeds = self.word_embeddings(input_ids) batch_size, sequence_length = inputs_embeds.shape[:2] main_stream_pos_embed, position_ids = self.position_embeddings( (batch_size, sequence_length), device=inputs_embeds.device, past_key_values=past_key_values, ) if past_key_values is not None: main_relative_position_buckets, predict_relative_position_buckets = None, None else: ( main_relative_position_buckets, predict_relative_position_buckets, ) = self.compute_buffered_relative_buckets(position_ids) predicting_stream_pos_embed = self.position_embeddings._forward(position_ids + 1) # add position embeddings hidden_states = inputs_embeds + main_stream_pos_embed ngram_embeddings = self.ngram_embeddings.weight # prepare attention mask if past_key_values is not None: assert ( hidden_states.size(1) == 1 ), "At the moment `use_cache` is only supported for `decoder_input_ids` of length 1" ngram_hidden_states = [ (ngram_embeddings[ngram - 1] + predicting_stream_pos_embed).repeat(batch_size, 1, 1) for ngram in range(self.ngram) ] extended_attention_mask = None extended_predict_attention_mask = None else: ngram_hidden_states = [ (ngram_embeddings[ngram - 1] + predicting_stream_pos_embed) for ngram in range(self.ngram) ] extended_attention_mask = self.prepare_attention_mask(hidden_states, attention_mask) extended_predict_attention_mask = self.prepare_predict_attention_mask(hidden_states, attention_mask) # prepare encoder attention mask if encoder_attention_mask is not None: extended_encoder_attention_mask = ( 1.0 - encoder_attention_mask[:, None, None, :].repeat(1, self.config.num_decoder_attention_heads, 1, 1) ) * torch.finfo(self.dtype).min extended_encoder_attention_mask = extended_encoder_attention_mask.to(inputs_embeds.dtype) else: extended_encoder_attention_mask = None hidden_states = torch.cat([hidden_states] + ngram_hidden_states, 1) if self.embeddings_layer_norm: hidden_states = self.embeddings_layer_norm(hidden_states) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) # init attentions, hidden_states and cache with empty tuples all_main_stream_hidden_states = () if output_hidden_states else None all_ngram_stream_hidden_states = () if output_hidden_states and self.config.ngram > 0 else None all_main_stream_attns = () if output_attentions else None all_ngram_stream_attns = () if output_attentions else None all_cross_attns = () if output_attentions and self.config.add_cross_attention else None if self.gradient_checkpointing and self.training: if use_cache: logger.warning_once( "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." ) use_cache = False present_key_values = () if use_cache else None # check if head_mask/cross_attn_head_mask has a correct number of layers specified if desired for attn_mask, mask_name in zip([head_mask, cross_attn_head_mask], ["head_mask", "cross_attn_head_mask"]): if attn_mask is not None: assert attn_mask.size()[0] == (len(self.layers)), ( f"The `{mask_name}` should be specified for {len(self.layers)} layers, but it is for" f" {head_mask.size()[0]}." ) for idx, decoder_layer in enumerate(self.layers): if output_hidden_states: # grad cannot be kept because tensor is sliced all_main_stream_hidden_states += (hidden_states[:, :sequence_length],) if self.config.ngram > 0: all_ngram_stream_hidden_states += (hidden_states[:, sequence_length:],) past_key_value = past_key_values[idx] if past_key_values is not None else None if self.gradient_checkpointing and self.training: layer_outputs = self._gradient_checkpointing_func( decoder_layer.__call__, hidden_states, extended_attention_mask, encoder_hidden_states, extended_encoder_attention_mask, (head_mask[idx] if head_mask is not None else None), (cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None), extended_predict_attention_mask, main_relative_position_buckets, predict_relative_position_buckets, position_ids, None, use_cache, output_attentions, ) else: layer_outputs = decoder_layer( hidden_states, attention_mask=extended_attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attn_mask=extended_encoder_attention_mask, layer_head_mask=(head_mask[idx] if head_mask is not None else None), cross_attn_layer_head_mask=( cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None ), extended_predict_attention_mask=extended_predict_attention_mask, main_relative_position_buckets=main_relative_position_buckets, predict_relative_position_buckets=predict_relative_position_buckets, position_ids=position_ids, past_key_value=past_key_value, use_cache=use_cache, output_attentions=output_attentions, ) hidden_states = layer_outputs[0] if use_cache: present_key_values += (layer_outputs[4 if output_attentions else 1],) if output_attentions: all_main_stream_attns += (layer_outputs[1],) all_ngram_stream_attns += (layer_outputs[2],) if self.config.add_cross_attention: all_cross_attns += (layer_outputs[3],) if output_hidden_states: all_main_stream_hidden_states += (hidden_states[:, :sequence_length],) if self.config.ngram > 0: all_ngram_stream_hidden_states += (hidden_states[:, sequence_length:],) # split last_hidden_state for return last_hidden_state = hidden_states[:, :sequence_length] last_hidden_state_ngram = hidden_states[:, sequence_length:] if self.config.ngram > 0 else None if not return_dict: return tuple( v for v in [ last_hidden_state, last_hidden_state_ngram, present_key_values, all_main_stream_hidden_states, all_ngram_stream_hidden_states, all_main_stream_attns, all_ngram_stream_attns, all_cross_attns, ] if v is not None ) return ProphetNetDecoderModelOutput( last_hidden_state=last_hidden_state, last_hidden_state_ngram=last_hidden_state_ngram, past_key_values=present_key_values, hidden_states=all_main_stream_hidden_states, hidden_states_ngram=all_ngram_stream_hidden_states, attentions=all_main_stream_attns, ngram_attentions=all_ngram_stream_attns, cross_attentions=all_cross_attns, ) def compute_buffered_relative_buckets(self, position_ids): batch_size, sequence_length = position_ids.shape position_ids = torch.arange(1, self.max_target_positions).to(position_ids.device).repeat(1, 1) main_relative_buckets, predict_relative_buckets = compute_all_stream_relative_buckets( self.num_buckets, self.relative_max_distance, position_ids ) # buffer relative buckets main_relative_buckets = main_relative_buckets[:, :sequence_length, :sequence_length].repeat(batch_size, 1, 1) predict_relative_buckets = torch.cat( [ predict_relative_buckets[:, :sequence_length, :sequence_length], predict_relative_buckets[ :, :sequence_length, self.max_target_positions : self.max_target_positions + sequence_length ], ], 2, ).repeat(batch_size, 1, 1) return main_relative_buckets, predict_relative_buckets def prepare_attention_mask(self, hidden_states, attention_mask): batch_size, seq_length = hidden_states.shape[:2] # get causal mask causal_mask = torch.full( (seq_length, seq_length), torch.finfo(hidden_states.dtype).min, dtype=hidden_states.dtype, device=hidden_states.device, ) causal_mask = torch.triu(causal_mask, 1) extended_causal_mask = causal_mask[:seq_length, :seq_length][None, None, :, :].expand( (batch_size, self.config.num_decoder_attention_heads) + causal_mask.shape ) # add usual attention mask if attention_mask is not None: extended_attention_mask = (1.0 - attention_mask[:, None, None, :]) * torch.finfo(self.dtype).min extended_attention_mask = extended_causal_mask + extended_attention_mask else: extended_attention_mask = extended_causal_mask return extended_attention_mask.to(hidden_states.dtype) def prepare_predict_attention_mask(self, hidden_states, attention_mask): batch_size, seq_length = hidden_states.shape[:2] # get causal mask predict_causal_mask = ngram_attention_bias( self.max_target_positions, self.ngram, hidden_states.device, hidden_states.dtype ) predict_causal_mask = torch.cat( [ predict_causal_mask[:, :seq_length, :seq_length], predict_causal_mask[ :, :seq_length, self.max_target_positions : self.max_target_positions + seq_length ], ], dim=-1, ) extended_predict_causal_mask = predict_causal_mask[None, None, :, :, :].expand( (batch_size, self.config.num_decoder_attention_heads) + predict_causal_mask.shape ) # add usual attention mask if attention_mask is not None: extended_attention_mask = (1.0 - attention_mask[:, None, None, None, :]) * torch.finfo(self.dtype).min extended_attention_mask = extended_attention_mask.expand( (batch_size, self.config.num_decoder_attention_heads, self.ngram, seq_length, seq_length) ) # predicted stream attention_mask should always be 0 extended_attention_mask = torch.cat( [extended_attention_mask, torch.zeros_like(extended_attention_mask)], dim=-1 ) extended_predict_attention_mask = extended_predict_causal_mask + extended_attention_mask else: extended_predict_attention_mask = extended_predict_causal_mask return extended_predict_attention_mask.to(hidden_states.dtype) @add_start_docstrings( "The bare ProphetNet Model outputting raw hidden-states without any specific head on top.", PROPHETNET_START_DOCSTRING, ) class ProphetNetModel(ProphetNetPreTrainedModel): _tied_weights_keys = ["encoder.word_embeddings.weight", "decoder.word_embeddings.weight"] def __init__(self, config: ProphetNetConfig): super().__init__(config) self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id) encoder_config = copy.deepcopy(config) encoder_config.is_encoder_decoder = False encoder_config.use_cache = False self.encoder = ProphetNetEncoder(encoder_config, self.word_embeddings) decoder_config = copy.deepcopy(config) decoder_config.is_decoder = True decoder_config.is_encoder_decoder = False self.decoder = ProphetNetDecoder(decoder_config, self.word_embeddings) # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.word_embeddings def set_input_embeddings(self, value): self.word_embeddings = value self.encoder.word_embeddings = self.word_embeddings self.decoder.word_embeddings = self.word_embeddings def _tie_weights(self): if self.config.tie_word_embeddings: self._tie_or_clone_weights(self.encoder.word_embeddings, self.word_embeddings) self._tie_or_clone_weights(self.decoder.word_embeddings, self.word_embeddings) def get_encoder(self): return self.encoder def get_decoder(self): return self.decoder @add_start_docstrings_to_model_forward(PROPHETNET_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=ProphetNetSeq2SeqModelOutput, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, decoder_input_ids: Optional[torch.Tensor] = None, decoder_attention_mask: Optional[torch.BoolTensor] = None, head_mask: Optional[torch.Tensor] = None, decoder_head_mask: Optional[torch.Tensor] = None, cross_attn_head_mask: Optional[torch.Tensor] = None, encoder_outputs: Optional[Tuple] = None, past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None, inputs_embeds: Optional[torch.Tensor] = None, decoder_inputs_embeds: Optional[torch.Tensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, ProphetNetSeq2SeqModelOutput]: r""" Returns: Example: ```python >>> from transformers import AutoTokenizer, ProphetNetModel >>> tokenizer = AutoTokenizer.from_pretrained("microsoft/prophetnet-large-uncased") >>> model = ProphetNetModel.from_pretrained("microsoft/prophetnet-large-uncased") >>> input_ids = tokenizer( ... "Studies have been shown that owning a dog is good for you", return_tensors="pt" ... ).input_ids # Batch size 1 >>> decoder_input_ids = tokenizer("Studies show that", return_tensors="pt").input_ids # Batch size 1 >>> outputs = model(input_ids=input_ids, decoder_input_ids=decoder_input_ids) >>> last_hidden_states = outputs.last_hidden_state # main stream hidden states >>> last_hidden_states_ngram = outputs.last_hidden_state_ngram # predict hidden states ```""" use_cache = use_cache if use_cache is not None else self.config.use_cache output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if encoder_outputs is None: encoder_outputs = self.encoder( input_ids=input_ids, attention_mask=attention_mask, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) # decoder outputs consists of (dec_features, past_key_values, dec_hidden, dec_attn) decoder_outputs = self.decoder( input_ids=decoder_input_ids, attention_mask=decoder_attention_mask, encoder_hidden_states=encoder_outputs[0], encoder_attention_mask=attention_mask, head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, past_key_values=past_key_values, inputs_embeds=decoder_inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, use_cache=use_cache, return_dict=return_dict, ) if not return_dict: return decoder_outputs + encoder_outputs return ProphetNetSeq2SeqModelOutput( last_hidden_state=decoder_outputs.last_hidden_state, last_hidden_state_ngram=decoder_outputs.last_hidden_state_ngram, past_key_values=decoder_outputs.past_key_values, decoder_hidden_states=decoder_outputs.hidden_states, decoder_ngram_hidden_states=decoder_outputs.hidden_states_ngram, decoder_attentions=decoder_outputs.attentions, decoder_ngram_attentions=decoder_outputs.ngram_attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions, ) @add_start_docstrings( "The ProphetNet Model with a language modeling head. Can be used for sequence generation tasks.", PROPHETNET_START_DOCSTRING, ) class ProphetNetForConditionalGeneration(ProphetNetPreTrainedModel): _tied_weights_keys = ["encoder.word_embeddings.weight", "decoder.word_embeddings.weight", "lm_head.weight"] def __init__(self, config: ProphetNetConfig): super().__init__(config) self.prophetnet = ProphetNetModel(config) self.padding_idx = config.pad_token_id self.disable_ngram_loss = config.disable_ngram_loss self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) # Initialize weights and apply final processing self.post_init() def get_output_embeddings(self): return self.lm_head def set_output_embeddings(self, new_embeddings): self.lm_head = new_embeddings def _tie_weights(self): if self.config.tie_word_embeddings: self._tie_or_clone_weights(self.prophetnet.word_embeddings, self.lm_head) def get_input_embeddings(self): return self.prophetnet.word_embeddings @add_start_docstrings_to_model_forward(PROPHETNET_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=ProphetNetSeq2SeqLMOutput, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, decoder_input_ids: Optional[torch.Tensor] = None, decoder_attention_mask: Optional[torch.BoolTensor] = None, head_mask: Optional[torch.Tensor] = None, decoder_head_mask: Optional[torch.Tensor] = None, cross_attn_head_mask: Optional[torch.Tensor] = None, encoder_outputs: Optional[torch.Tensor] = None, past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None, inputs_embeds: Optional[torch.Tensor] = None, decoder_inputs_embeds: Optional[torch.Tensor] = None, labels: Optional[torch.Tensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, ProphetNetSeq2SeqLMOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[-100, 0, ..., config.vocab_size - 1]`. All labels set to `-100` are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]` Returns: Example: ```python >>> from transformers import AutoTokenizer, ProphetNetForConditionalGeneration >>> tokenizer = AutoTokenizer.from_pretrained("microsoft/prophetnet-large-uncased") >>> model = ProphetNetForConditionalGeneration.from_pretrained("microsoft/prophetnet-large-uncased") >>> input_ids = tokenizer( ... "Studies have been shown that owning a dog is good for you", return_tensors="pt" ... ).input_ids # Batch size 1 >>> decoder_input_ids = tokenizer("Studies show that", return_tensors="pt").input_ids # Batch size 1 >>> outputs = model(input_ids=input_ids, decoder_input_ids=decoder_input_ids) >>> logits_next_token = outputs.logits # logits to predict next token as usual >>> logits_ngram_next_tokens = outputs.logits_ngram # logits to predict 2nd, 3rd, ... next tokens ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict if labels is not None and decoder_input_ids is None and decoder_inputs_embeds is None: # get decoder inputs from shifting lm labels to the right decoder_input_ids = self._shift_right(labels) outputs = self.prophetnet( input_ids=input_ids, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, head_mask=head_mask, decoder_head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, encoder_outputs=encoder_outputs, past_key_values=past_key_values, inputs_embeds=inputs_embeds, decoder_inputs_embeds=decoder_inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) batch_size, sequence_length = ( decoder_input_ids.shape if decoder_input_ids is not None else decoder_inputs_embeds.shape[:2] ) predicting_streams = outputs[1].view(batch_size, self.config.ngram, sequence_length, -1) predict_logits = self.lm_head(predicting_streams) logits = predict_logits[:, 0] logits_ngram = predict_logits[:, 1:] if self.config.ngram > 1 else None # To use .view in loss computation, make sure that logits is contiguous. if not logits.is_contiguous(): logits = logits.contiguous() loss = None if labels is not None: loss = self._compute_loss(predict_logits, labels) if not return_dict: all_logits = tuple(v for v in [logits, logits_ngram] if v is not None) return (loss,) + all_logits + outputs[2:] if loss is not None else all_logits + outputs[2:] else: return ProphetNetSeq2SeqLMOutput( loss=loss, logits=logits, logits_ngram=logits_ngram, past_key_values=outputs.past_key_values, decoder_hidden_states=outputs.decoder_hidden_states, decoder_ngram_hidden_states=outputs.decoder_ngram_hidden_states, decoder_attentions=outputs.decoder_attentions, decoder_ngram_attentions=outputs.decoder_ngram_attentions, cross_attentions=outputs.cross_attentions, encoder_last_hidden_state=outputs.encoder_last_hidden_state, encoder_hidden_states=outputs.encoder_hidden_states, encoder_attentions=outputs.encoder_attentions, ) def _compute_loss(self, logits, labels, ignore_index=-100): expend_targets = labels.new_zeros(self.config.ngram, labels.size(0), labels.size(1)).fill_(ignore_index) for i in range(self.config.ngram): if i > 0 and self.disable_ngram_loss: break expend_targets[i, :, :] = labels logits = logits.transpose(0, 1).contiguous() lprobs = nn.functional.log_softmax( logits.view(-1, logits.size(-1)), dim=-1, dtype=torch.float32, ) loss = nn.functional.nll_loss(lprobs, expend_targets.view(-1), reduction="mean") if self.config.eps > 0.0: smooth_loss = -lprobs.sum(dim=-1, keepdim=True) non_masked_tokens = expend_targets.ne(ignore_index).view(-1) smooth_loss = smooth_loss[non_masked_tokens] smooth_loss = smooth_loss.mean() eps_i = self.config.eps / lprobs.size(-1) loss = (1.0 - self.config.eps) * loss + eps_i * smooth_loss return loss def prepare_inputs_for_generation( self, decoder_input_ids, past_key_values=None, attention_mask=None, head_mask=None, decoder_head_mask=None, cross_attn_head_mask=None, use_cache=None, encoder_outputs=None, **kwargs, ): assert encoder_outputs is not None, "`encoder_outputs` have to be passed for generation." if past_key_values: decoder_input_ids = decoder_input_ids[:, -1:] # first step, decoder_cached_states are empty return { "input_ids": None, # encoder_outputs is defined. input_ids not needed "encoder_outputs": encoder_outputs, "past_key_values": past_key_values, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, "use_cache": use_cache, } def prepare_decoder_input_ids_from_labels(self, labels: torch.Tensor): return self._shift_right(labels) @staticmethod # Copied from transformers.models.bart.modeling_bart.BartForConditionalGeneration._reorder_cache def _reorder_cache(past_key_values, beam_idx): reordered_past = () for layer_past in past_key_values: # cached cross_attention states don't have to be reordered -> they are always the same reordered_past += ( tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past[:2]) + layer_past[2:], ) return reordered_past def get_encoder(self): return self.prophetnet.encoder def get_decoder(self): return self.prophetnet.decoder @add_start_docstrings( "The standalone decoder part of the ProphetNetModel with a lm head on top. The model can be used for causal" " language modeling.", PROPHETNET_START_DOCSTRING, ) class ProphetNetForCausalLM(ProphetNetPreTrainedModel): _tied_weights_keys = [ "prophetnet.word_embeddings.weight", "prophetnet.decoder.word_embeddings.weight", "lm_head.weight", ] def __init__(self, config: ProphetNetConfig): # set config for CLM config = copy.deepcopy(config) config.is_decoder = True config.is_encoder_decoder = False super().__init__(config) self.prophetnet = ProphetNetDecoderWrapper(config) self.padding_idx = config.pad_token_id self.disable_ngram_loss = config.disable_ngram_loss self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.prophetnet.decoder.word_embeddings def set_input_embeddings(self, value): self.prophetnet.decoder.word_embeddings = value def get_output_embeddings(self): return self.lm_head def set_output_embeddings(self, new_embeddings): self.lm_head = new_embeddings def _tie_weights(self): if self.config.tie_word_embeddings: self._tie_or_clone_weights(self.prophetnet.decoder.word_embeddings, self.lm_head) def set_decoder(self, decoder): self.prophetnet.decoder = decoder def get_decoder(self): return self.prophetnet.decoder @add_start_docstrings_to_model_forward(PROPHETNET_STANDALONE_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=ProphetNetDecoderLMOutput, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, encoder_hidden_states: Optional[torch.Tensor] = None, encoder_attention_mask: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, cross_attn_head_mask: Optional[torch.Tensor] = None, past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None, inputs_embeds: Optional[torch.Tensor] = None, labels: Optional[torch.Tensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, ProphetNetDecoderLMOutput]: r""" encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if the model is configured as a decoder. encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`: cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): Contains precomputed key and value hidden-states of the attention blocks. Can be used to speed up decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels n `[0, ..., config.vocab_size]` Returns: Example: ```python >>> from transformers import AutoTokenizer, ProphetNetForCausalLM >>> import torch >>> tokenizer = AutoTokenizer.from_pretrained("microsoft/prophetnet-large-uncased") >>> model = ProphetNetForCausalLM.from_pretrained("microsoft/prophetnet-large-uncased") >>> assert model.config.is_decoder, f"{model.__class__} has to be configured as a decoder." >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") >>> outputs = model(**inputs) >>> logits = outputs.logits >>> # Model can also be used with EncoderDecoder framework >>> from transformers import BertTokenizer, EncoderDecoderModel, AutoTokenizer >>> import torch >>> tokenizer_enc = BertTokenizer.from_pretrained("bert-large-uncased") >>> tokenizer_dec = AutoTokenizer.from_pretrained("microsoft/prophetnet-large-uncased") >>> model = EncoderDecoderModel.from_encoder_decoder_pretrained( ... "bert-large-uncased", "microsoft/prophetnet-large-uncased" ... ) >>> ARTICLE = ( ... "the us state department said wednesday it had received no " ... "formal word from bolivia that it was expelling the us ambassador there " ... "but said the charges made against him are `` baseless ." ... ) >>> input_ids = tokenizer_enc(ARTICLE, return_tensors="pt").input_ids >>> labels = tokenizer_dec( ... "us rejects charges against its ambassador in bolivia", return_tensors="pt" ... ).input_ids >>> outputs = model(input_ids=input_ids, decoder_input_ids=labels[:, :-1], labels=labels[:, 1:]) >>> loss = outputs.loss ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict # decoder outputs consists of (dec_features, past_key_values, dec_hidden, dec_attn) outputs = self.prophetnet.decoder( input_ids=input_ids, attention_mask=attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, head_mask=head_mask, cross_attn_head_mask=cross_attn_head_mask, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) batch_size, sequence_length = input_ids.shape if input_ids is not None else inputs_embeds.shape[:2] predicting_streams = outputs[1].view(batch_size, self.config.ngram, sequence_length, -1) predict_logits = self.lm_head(predicting_streams) logits = predict_logits[:, 0] logits_ngram = predict_logits[:, 1:] if self.config.ngram > 1 else None loss = None if labels is not None: loss = self._compute_loss(predict_logits, labels) if not return_dict: all_logits = tuple(v for v in [logits, logits_ngram] if v is not None) return (loss,) + all_logits + outputs[2:] if loss is not None else all_logits + outputs[2:] else: return ProphetNetDecoderLMOutput( loss=loss, logits=logits, logits_ngram=logits_ngram, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, hidden_states_ngram=outputs.hidden_states_ngram, attentions=outputs.attentions, ngram_attentions=outputs.ngram_attentions, cross_attentions=outputs.cross_attentions, ) def _compute_loss(self, logits, labels, ignore_index=-100): expend_targets = labels.new_zeros(self.config.ngram, labels.size(0), labels.size(1)).fill_(ignore_index) for i in range(self.config.ngram): if i > 0 and self.disable_ngram_loss: break expend_targets[i, :, :] = labels logits = logits.transpose(0, 1).contiguous() lprobs = nn.functional.log_softmax( logits.view(-1, logits.size(-1)), dim=-1, dtype=torch.float32, ) loss = nn.functional.nll_loss(lprobs, expend_targets.view(-1), reduction="mean") if self.config.eps > 0.0: smooth_loss = -lprobs.sum(dim=-1, keepdim=True) non_masked_tokens = expend_targets.ne(ignore_index).view(-1) smooth_loss = smooth_loss[non_masked_tokens] smooth_loss = smooth_loss.mean() eps_i = self.config.eps / lprobs.size(-1) loss = (1.0 - self.config.eps) * loss + eps_i * smooth_loss return loss def prepare_inputs_for_generation( self, input_ids, past_key_values=None, attention_mask=None, head_mask=None, use_cache=None, **kwargs, ): # if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly if attention_mask is None: attention_mask = input_ids.new_ones(input_ids.shape) if past_key_values: input_ids = input_ids[:, -1:] # first step, decoder_cached_states are empty return { "input_ids": input_ids, # encoder_outputs is defined. input_ids not needed "attention_mask": attention_mask, "head_mask": head_mask, "past_key_values": past_key_values, "use_cache": use_cache, } @staticmethod # Copied from transformers.models.bart.modeling_bart.BartForCausalLM._reorder_cache def _reorder_cache(past_key_values, beam_idx): reordered_past = () for layer_past in past_key_values: reordered_past += ( tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past), ) return reordered_past class ProphetNetDecoderWrapper(ProphetNetPreTrainedModel): """ This is a wrapper class, so that [`ProphetNetForCausalLM`] can correctly be loaded from pretrained prophetnet classes. """ def __init__(self, config: ProphetNetConfig): super().__init__(config) self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id) self.decoder = ProphetNetDecoder(config, word_embeddings=self.word_embeddings) # Initialize weights and apply final processing self.post_init() def _tie_weights(self): self._tie_or_clone_weights(self.word_embeddings, self.decoder.get_input_embeddings()) def forward(self, *args, **kwargs): return self.decoder(*args, **kwargs)
0
hf_public_repos/transformers/src/transformers/models
hf_public_repos/transformers/src/transformers/models/prophetnet/__init__.py
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _import_structure = { "configuration_prophetnet": ["PROPHETNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "ProphetNetConfig"], "tokenization_prophetnet": ["ProphetNetTokenizer"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["modeling_prophetnet"] = [ "PROPHETNET_PRETRAINED_MODEL_ARCHIVE_LIST", "ProphetNetDecoder", "ProphetNetEncoder", "ProphetNetForCausalLM", "ProphetNetForConditionalGeneration", "ProphetNetModel", "ProphetNetPreTrainedModel", ] if TYPE_CHECKING: from .configuration_prophetnet import PROPHETNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ProphetNetConfig from .tokenization_prophetnet import ProphetNetTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_prophetnet import ( PROPHETNET_PRETRAINED_MODEL_ARCHIVE_LIST, ProphetNetDecoder, ProphetNetEncoder, ProphetNetForCausalLM, ProphetNetForConditionalGeneration, ProphetNetModel, ProphetNetPreTrainedModel, ) else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
0
hf_public_repos/transformers/src/transformers/models
hf_public_repos/transformers/src/transformers/models/prophetnet/configuration_prophetnet.py
# coding=utf-8 # Copyright 2020 The Microsoft Authors and The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ ProphetNet model configuration""" from typing import Callable, Optional, Union from ...configuration_utils import PretrainedConfig from ...utils import logging logger = logging.get_logger(__name__) PROPHETNET_PRETRAINED_CONFIG_ARCHIVE_MAP = { "microsoft/prophetnet-large-uncased": ( "https://huggingface.co/microsoft/prophetnet-large-uncased/resolve/main/config.json" ), } class ProphetNetConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`ProphetNetModel`]. It is used to instantiate a ProphetNet model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the ProphetNet [microsoft/prophetnet-large-uncased](https://huggingface.co/microsoft/prophetnet-large-uncased) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: activation_dropout (`float`, *optional*, defaults to 0.1): The dropout ratio for activations inside the fully connected layer. activation_function (`str` or `function`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"silu"` and `"gelu_new"` are supported. vocab_size (`int`, *optional*, defaults to 30522): Vocabulary size of the ProphetNET model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`ProphetNetModel`]. hidden_size (`int`, *optional*, defaults to 1024): Dimensionality of the layers and the pooler layer. encoder_ffn_dim (`int`, *optional*, defaults to 4096): Dimensionality of the "intermediate" (often named feed-forward) layer in decoder. num_encoder_layers (`int`, *optional*, defaults to 12): Number of encoder layers. num_encoder_attention_heads (`int`, *optional*, defaults to 16): Number of attention heads for each attention layer in the Transformer encoder. decoder_ffn_dim (`int`, *optional*, defaults to 4096): Dimensionality of the `intermediate` (often named feed-forward) layer in decoder. num_decoder_layers (`int`, *optional*, defaults to 12): Number of decoder layers. num_decoder_attention_heads (`int`, *optional*, defaults to 16): Number of attention heads for each attention layer in the Transformer decoder. attention_dropout (`float`, *optional*, defaults to 0.1): The dropout ratio for the attention probabilities. dropout (`float`, *optional*, defaults to 0.1): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. max_position_embeddings (`int`, *optional*, defaults to 512): The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). init_std (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. add_cross_attention (`bool`, *optional*, defaults to `True`): Whether cross-attention layers should be added to the model. is_encoder_decoder (`bool`, *optional*, defaults to `True`): Whether this is an encoder/decoder model. pad_token_id (`int`, *optional*, defaults to 1) Padding token id. bos_token_id (`int`, *optional*, defaults to 0) Beginning of stream token id. eos_token_id (`int`, *optional*, defaults to 2) End of stream token id. ngram (`int`, *optional*, defaults to 2) Number of future tokens to predict. Set to 1 to be same as traditional Language model to predict next first token. num_buckets (`int`, *optional*, defaults to 32) The number of buckets to use for each attention layer. This is for relative position calculation. See the [T5 paper](see https://arxiv.org/abs/1910.10683) for more details. relative_max_distance (`int`, *optional*, defaults to 128) Relative distances greater than this number will be put into the last same bucket. This is for relative position calculation. See the [T5 paper](see https://arxiv.org/abs/1910.10683) for more details. disable_ngram_loss (`bool`, *optional*, defaults to `False`): Whether be trained predicting only the next first token. eps (`float`, *optional*, defaults to 0.0): Controls the `epsilon` parameter value for label smoothing in the loss calculation. If set to 0, no label smoothing is performed. use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). """ model_type = "prophetnet" keys_to_ignore_at_inference = ["past_key_values"] attribute_map = { "num_attention_heads": "num_encoder_attention_heads", } def __init__( self, activation_dropout: Optional[float] = 0.1, activation_function: Optional[Union[str, Callable]] = "gelu", vocab_size: Optional[int] = 30522, hidden_size: Optional[int] = 1024, encoder_ffn_dim: Optional[int] = 4096, num_encoder_layers: Optional[int] = 12, num_encoder_attention_heads: Optional[int] = 16, decoder_ffn_dim: Optional[int] = 4096, num_decoder_layers: Optional[int] = 12, num_decoder_attention_heads: Optional[int] = 16, attention_dropout: Optional[float] = 0.1, dropout: Optional[float] = 0.1, max_position_embeddings: Optional[int] = 512, init_std: Optional[float] = 0.02, is_encoder_decoder: Optional[bool] = True, add_cross_attention: Optional[bool] = True, decoder_start_token_id: Optional[int] = 0, ngram: Optional[int] = 2, num_buckets: Optional[int] = 32, relative_max_distance: Optional[int] = 128, disable_ngram_loss: Optional[bool] = False, eps: Optional[float] = 0.0, use_cache: Optional[bool] = True, pad_token_id: Optional[int] = 0, bos_token_id: Optional[int] = 1, eos_token_id: Optional[int] = 2, **kwargs, ): self.vocab_size = vocab_size self.hidden_size = hidden_size self.encoder_ffn_dim = encoder_ffn_dim self.num_encoder_layers = num_encoder_layers self.num_encoder_attention_heads = num_encoder_attention_heads self.decoder_ffn_dim = decoder_ffn_dim self.num_decoder_layers = num_decoder_layers self.num_decoder_attention_heads = num_decoder_attention_heads self.max_position_embeddings = max_position_embeddings self.init_std = init_std # Normal(0, this parameter) self.activation_function = activation_function # parameters for prophetnet self.ngram = ngram self.num_buckets = num_buckets self.relative_max_distance = relative_max_distance self.disable_ngram_loss = disable_ngram_loss self.eps = eps # 3 Types of Dropout self.attention_dropout = attention_dropout self.activation_dropout = activation_dropout self.dropout = dropout self.use_cache = use_cache super().__init__( pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, is_encoder_decoder=is_encoder_decoder, add_cross_attention=add_cross_attention, decoder_start_token_id=decoder_start_token_id, **kwargs, ) @property def num_hidden_layers(self) -> int: return self.num_encoder_layers + self.num_decoder_layers @num_hidden_layers.setter def num_hidden_layers(self, value): raise NotImplementedError( "This model does not support the setting of `num_hidden_layers`. Please set `num_encoder_layers` and" " `num_decoder_layers`." )
0
hf_public_repos/transformers/src/transformers/models
hf_public_repos/transformers/src/transformers/models/prophetnet/tokenization_prophetnet.py
# coding=utf-8 # Copyright 2020 The Microsoft Authors and The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import collections import os import unicodedata from typing import Iterable, List, Optional, Tuple from ...tokenization_utils import PreTrainedTokenizer, _is_control, _is_punctuation, _is_whitespace from ...utils import logging logger = logging.get_logger(__name__) VOCAB_FILES_NAMES = {"vocab_file": "prophetnet.tokenizer"} PRETRAINED_VOCAB_FILES_MAP = { "vocab_file": { "microsoft/prophetnet-large-uncased": ( "https://huggingface.co/microsoft/prophetnet-large-uncased/resolve/main/prophetnet.tokenizer" ), } } PRETRAINED_INIT_CONFIGURATION = { "microsoft/prophetnet-large-uncased": {"do_lower_case": True}, } PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = { "microsoft/prophetnet-large-uncased": 512, } # Copied from transformers.models.bert.tokenization_bert.whitespace_tokenize def whitespace_tokenize(text): """Runs basic whitespace cleaning and splitting on a piece of text.""" text = text.strip() if not text: return [] tokens = text.split() return tokens # Copied from transformers.models.bert.tokenization_bert.BasicTokenizer class BasicTokenizer(object): """ Constructs a BasicTokenizer that will run basic tokenization (punctuation splitting, lower casing, etc.). Args: do_lower_case (`bool`, *optional*, defaults to `True`): Whether or not to lowercase the input when tokenizing. never_split (`Iterable`, *optional*): Collection of tokens which will never be split during tokenization. Only has an effect when `do_basic_tokenize=True` tokenize_chinese_chars (`bool`, *optional*, defaults to `True`): Whether or not to tokenize Chinese characters. This should likely be deactivated for Japanese (see this [issue](https://github.com/huggingface/transformers/issues/328)). strip_accents (`bool`, *optional*): Whether or not to strip all accents. If this option is not specified, then it will be determined by the value for `lowercase` (as in the original BERT). do_split_on_punc (`bool`, *optional*, defaults to `True`): In some instances we want to skip the basic punctuation splitting so that later tokenization can capture the full context of the words, such as contractions. """ def __init__( self, do_lower_case=True, never_split=None, tokenize_chinese_chars=True, strip_accents=None, do_split_on_punc=True, ): if never_split is None: never_split = [] self.do_lower_case = do_lower_case self.never_split = set(never_split) self.tokenize_chinese_chars = tokenize_chinese_chars self.strip_accents = strip_accents self.do_split_on_punc = do_split_on_punc def tokenize(self, text, never_split=None): """ Basic Tokenization of a piece of text. For sub-word tokenization, see WordPieceTokenizer. Args: never_split (`List[str]`, *optional*) Kept for backward compatibility purposes. Now implemented directly at the base class level (see [`PreTrainedTokenizer.tokenize`]) List of token not to split. """ # union() returns a new set by concatenating the two sets. never_split = self.never_split.union(set(never_split)) if never_split else self.never_split text = self._clean_text(text) # This was added on November 1st, 2018 for the multilingual and Chinese # models. This is also applied to the English models now, but it doesn't # matter since the English models were not trained on any Chinese data # and generally don't have any Chinese data in them (there are Chinese # characters in the vocabulary because Wikipedia does have some Chinese # words in the English Wikipedia.). if self.tokenize_chinese_chars: text = self._tokenize_chinese_chars(text) # prevents treating the same character with different unicode codepoints as different characters unicode_normalized_text = unicodedata.normalize("NFC", text) orig_tokens = whitespace_tokenize(unicode_normalized_text) split_tokens = [] for token in orig_tokens: if token not in never_split: if self.do_lower_case: token = token.lower() if self.strip_accents is not False: token = self._run_strip_accents(token) elif self.strip_accents: token = self._run_strip_accents(token) split_tokens.extend(self._run_split_on_punc(token, never_split)) output_tokens = whitespace_tokenize(" ".join(split_tokens)) return output_tokens def _run_strip_accents(self, text): """Strips accents from a piece of text.""" text = unicodedata.normalize("NFD", text) output = [] for char in text: cat = unicodedata.category(char) if cat == "Mn": continue output.append(char) return "".join(output) def _run_split_on_punc(self, text, never_split=None): """Splits punctuation on a piece of text.""" if not self.do_split_on_punc or (never_split is not None and text in never_split): return [text] chars = list(text) i = 0 start_new_word = True output = [] while i < len(chars): char = chars[i] if _is_punctuation(char): output.append([char]) start_new_word = True else: if start_new_word: output.append([]) start_new_word = False output[-1].append(char) i += 1 return ["".join(x) for x in output] def _tokenize_chinese_chars(self, text): """Adds whitespace around any CJK character.""" output = [] for char in text: cp = ord(char) if self._is_chinese_char(cp): output.append(" ") output.append(char) output.append(" ") else: output.append(char) return "".join(output) def _is_chinese_char(self, cp): """Checks whether CP is the codepoint of a CJK character.""" # This defines a "chinese character" as anything in the CJK Unicode block: # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block) # # Note that the CJK Unicode block is NOT all Japanese and Korean characters, # despite its name. The modern Korean Hangul alphabet is a different block, # as is Japanese Hiragana and Katakana. Those alphabets are used to write # space-separated words, so they are not treated specially and handled # like the all of the other languages. if ( (cp >= 0x4E00 and cp <= 0x9FFF) or (cp >= 0x3400 and cp <= 0x4DBF) # or (cp >= 0x20000 and cp <= 0x2A6DF) # or (cp >= 0x2A700 and cp <= 0x2B73F) # or (cp >= 0x2B740 and cp <= 0x2B81F) # or (cp >= 0x2B820 and cp <= 0x2CEAF) # or (cp >= 0xF900 and cp <= 0xFAFF) or (cp >= 0x2F800 and cp <= 0x2FA1F) # ): # return True return False def _clean_text(self, text): """Performs invalid character removal and whitespace cleanup on text.""" output = [] for char in text: cp = ord(char) if cp == 0 or cp == 0xFFFD or _is_control(char): continue if _is_whitespace(char): output.append(" ") else: output.append(char) return "".join(output) # Copied from transformers.models.bert.tokenization_bert.WordpieceTokenizer class WordpieceTokenizer(object): """Runs WordPiece tokenization.""" def __init__(self, vocab, unk_token, max_input_chars_per_word=100): self.vocab = vocab self.unk_token = unk_token self.max_input_chars_per_word = max_input_chars_per_word def tokenize(self, text): """ Tokenizes a piece of text into its word pieces. This uses a greedy longest-match-first algorithm to perform tokenization using the given vocabulary. For example, `input = "unaffable"` wil return as output `["un", "##aff", "##able"]`. Args: text: A single token or whitespace separated tokens. This should have already been passed through *BasicTokenizer*. Returns: A list of wordpiece tokens. """ output_tokens = [] for token in whitespace_tokenize(text): chars = list(token) if len(chars) > self.max_input_chars_per_word: output_tokens.append(self.unk_token) continue is_bad = False start = 0 sub_tokens = [] while start < len(chars): end = len(chars) cur_substr = None while start < end: substr = "".join(chars[start:end]) if start > 0: substr = "##" + substr if substr in self.vocab: cur_substr = substr break end -= 1 if cur_substr is None: is_bad = True break sub_tokens.append(cur_substr) start = end if is_bad: output_tokens.append(self.unk_token) else: output_tokens.extend(sub_tokens) return output_tokens def load_vocab(vocab_file): """Loads a vocabulary file into a dictionary.""" vocab = collections.OrderedDict() with open(vocab_file, "r", encoding="utf-8") as reader: tokens = reader.readlines() for index, token in enumerate(tokens): token = token.rstrip("\n") vocab[token] = index return vocab class ProphetNetTokenizer(PreTrainedTokenizer): r""" Construct a ProphetNetTokenizer. Based on WordPiece. This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. Args: vocab_file (`str`): File containing the vocabulary. do_lower_case (`bool`, *optional*, defaults to `True`): Whether or not to lowercase the input when tokenizing. do_basic_tokenize (`bool`, *optional*, defaults to `True`): Whether or not to do basic tokenization before WordPiece. never_split (`Iterable`, *optional*): Collection of tokens which will never be split during tokenization. Only has an effect when `do_basic_tokenize=True` unk_token (`str`, *optional*, defaults to `"[UNK]"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. sep_token (`str`, *optional*, defaults to `"[SEP]"`): The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for sequence classification or for a text and a question for question answering. It is also used as the last token of a sequence built with special tokens. x_sep_token (`str`, *optional*, defaults to `"[X_SEP]"`): Special second separator token, which can be generated by [`ProphetNetForConditionalGeneration`]. It is used to separate bullet-point like sentences in summarization, *e.g.*. pad_token (`str`, *optional*, defaults to `"[PAD]"`): The token used for padding, for example when batching sequences of different lengths. mask_token (`str`, *optional*, defaults to `"[MASK]"`): The token used for masking values. This is the token used when training this model with masked language modeling. This is the token which the model will try to predict. tokenize_chinese_chars (`bool`, *optional*, defaults to `True`): Whether or not to tokenize Chinese characters. This should likely be deactivated for Japanese (see this [issue](https://github.com/huggingface/transformers/issues/328)). strip_accents (`bool`, *optional*): Whether or not to strip all accents. If this option is not specified, then it will be determined by the value for `lowercase` (as in the original BERT). """ vocab_files_names = VOCAB_FILES_NAMES pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES # first name has to correspond to main model input name # to make sure `tokenizer.pad(...)` works correctly # `ProphetNet` doesn't have `token_type_ids` as argument. model_input_names: List[str] = ["input_ids", "attention_mask"] def __init__( self, vocab_file: str, do_lower_case: Optional[bool] = True, do_basic_tokenize: Optional[bool] = True, never_split: Optional[Iterable] = None, unk_token: Optional[str] = "[UNK]", sep_token: Optional[str] = "[SEP]", x_sep_token: Optional[str] = "[X_SEP]", pad_token: Optional[str] = "[PAD]", mask_token: Optional[str] = "[MASK]", tokenize_chinese_chars: Optional[bool] = True, strip_accents: Optional[bool] = None, **kwargs, ): if not os.path.isfile(vocab_file): raise ValueError( f"Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained" " model use `tokenizer = AutoTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`" ) self.vocab = load_vocab(vocab_file) self.ids_to_tokens = collections.OrderedDict([(ids, tok) for tok, ids in self.vocab.items()]) self.do_basic_tokenize = do_basic_tokenize if do_basic_tokenize: self.basic_tokenizer = BasicTokenizer( do_lower_case=do_lower_case, never_split=never_split, tokenize_chinese_chars=tokenize_chinese_chars, strip_accents=strip_accents, ) self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab, unk_token=str(unk_token)) super().__init__( do_lower_case=do_lower_case, do_basic_tokenize=do_basic_tokenize, never_split=never_split, unk_token=unk_token, sep_token=sep_token, x_sep_token=x_sep_token, pad_token=pad_token, mask_token=mask_token, tokenize_chinese_chars=tokenize_chinese_chars, strip_accents=strip_accents, **kwargs, ) @property def vocab_size(self): return len(self.vocab) def get_vocab(self): return dict(self.vocab, **self.added_tokens_encoder) def _tokenize(self, text): split_tokens = [] if self.do_basic_tokenize: for token in self.basic_tokenizer.tokenize(text, never_split=self.all_special_tokens): # If the token is part of the never_split set if token in self.basic_tokenizer.never_split: split_tokens.append(token) else: split_tokens += self.wordpiece_tokenizer.tokenize(token) else: split_tokens = self.wordpiece_tokenizer.tokenize(text) return split_tokens def _convert_token_to_id(self, token: str): """Converts a token (str) in an id using the vocab.""" return self.vocab.get(token, self.vocab.get(self.unk_token)) def _convert_id_to_token(self, index: int): """Converts an index (integer) in a token (str) using the vocab.""" return self.ids_to_tokens.get(index, self.unk_token) def convert_tokens_to_string(self, tokens: str): """Converts a sequence of tokens (string) in a single string.""" out_string = " ".join(tokens).replace(" ##", "").strip() return out_string def get_special_tokens_mask( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: Optional[bool] = False, ) -> List[int]: """ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `prepare_for_model` method. Args: token_ids_0 (`List[int]`): List of IDs. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. already_has_special_tokens (`bool`, *optional*, defaults to `False`): Whether or not the token list is already formatted with special tokens for the model. Returns: `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. """ if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True ) if token_ids_1 is None: return ([0] * len(token_ids_0)) + [1] return ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1] def create_token_type_ids_from_sequences( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: """ Create a mask from the two sequences passed to be used in a sequence-pair classification task. A ProphetNet sequence pair mask has the following format: ``` 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 | first sequence | second sequence | ``` If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s). Args: token_ids_0 (`List[int]`): List of IDs. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s). """ sep = [self.sep_token_id] if token_ids_1 is None: return len(token_ids_0 + sep) * [0] return len(token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1] def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: index = 0 if os.path.isdir(save_directory): vocab_file = os.path.join( save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) else: vocab_file = (filename_prefix + "-" if filename_prefix else "") + save_directory with open(vocab_file, "w", encoding="utf-8") as writer: for token, token_index in sorted(self.vocab.items(), key=lambda kv: kv[1]): if index != token_index: logger.warning( f"Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive." " Please check that the vocabulary is not corrupted!" ) index = token_index writer.write(token + "\n") index += 1 return (vocab_file,) def build_inputs_with_special_tokens( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: """ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A BERT sequence has the following format: - single sequence: `[CLS] X [SEP]` - pair of sequences: `[CLS] A [SEP] B [SEP]` Args: token_ids_0 (`List[int]`): List of IDs to which the special tokens will be added. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens. """ if token_ids_1 is None: return token_ids_0 + [self.sep_token_id] sep = [self.sep_token_id] return token_ids_0 + sep + token_ids_1 + sep
0
hf_public_repos/transformers/src/transformers/models
hf_public_repos/transformers/src/transformers/models/mbart/modeling_mbart.py
# coding=utf-8 # Copyright 2021, The Facebook AI Research Team and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ PyTorch MBART model.""" import copy import math from typing import List, Optional, Tuple, Union import torch import torch.nn.functional as F import torch.utils.checkpoint from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACT2FN from ...modeling_attn_mask_utils import _prepare_4d_attention_mask, _prepare_4d_causal_attention_mask from ...modeling_outputs import ( BaseModelOutput, BaseModelOutputWithPastAndCrossAttentions, CausalLMOutputWithCrossAttentions, Seq2SeqLMOutput, Seq2SeqModelOutput, Seq2SeqQuestionAnsweringModelOutput, Seq2SeqSequenceClassifierOutput, ) from ...modeling_utils import PreTrainedModel from ...utils import ( add_code_sample_docstrings, add_end_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, is_flash_attn_2_available, is_flash_attn_greater_or_equal_2_10, logging, replace_return_docstrings, ) from .configuration_mbart import MBartConfig if is_flash_attn_2_available(): from flash_attn import flash_attn_func, flash_attn_varlen_func from flash_attn.bert_padding import index_first_axis, pad_input, unpad_input # noqa logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = "facebook/mbart-large-cc25" _CONFIG_FOR_DOC = "MBartConfig" # Base model docstring _EXPECTED_OUTPUT_SHAPE = [1, 8, 1024] MBART_PRETRAINED_MODEL_ARCHIVE_LIST = [ "facebook/mbart-large-cc25", # See all MBART models at https://huggingface.co/models?filter=mbart ] # Copied from transformers.models.llama.modeling_llama._get_unpad_data def _get_unpad_data(attention_mask): seqlens_in_batch = attention_mask.sum(dim=-1, dtype=torch.int32) indices = torch.nonzero(attention_mask.flatten(), as_tuple=False).flatten() max_seqlen_in_batch = seqlens_in_batch.max().item() cu_seqlens = F.pad(torch.cumsum(seqlens_in_batch, dim=0, dtype=torch.torch.int32), (1, 0)) return ( indices, cu_seqlens, max_seqlen_in_batch, ) def shift_tokens_right(input_ids: torch.Tensor, pad_token_id: int): """ Shift input ids one token to the right, and wrap the last non pad token (the <LID> token) Note that MBart does not have a single `decoder_start_token_id` in contrast to other Bart-like models. """ prev_output_tokens = input_ids.clone() if pad_token_id is None: raise ValueError("self.model.config.pad_token_id has to be defined.") # replace possible -100 values in labels by `pad_token_id` prev_output_tokens.masked_fill_(prev_output_tokens == -100, pad_token_id) index_of_eos = (prev_output_tokens.ne(pad_token_id).sum(dim=1) - 1).unsqueeze(-1) decoder_start_tokens = prev_output_tokens.gather(1, index_of_eos).squeeze() prev_output_tokens[:, 1:] = prev_output_tokens[:, :-1].clone() prev_output_tokens[:, 0] = decoder_start_tokens return prev_output_tokens # Copied from transformers.models.bart.modeling_bart.BartLearnedPositionalEmbedding with Bart->MBart class MBartLearnedPositionalEmbedding(nn.Embedding): """ This module learns positional embeddings up to a fixed maximum size. """ def __init__(self, num_embeddings: int, embedding_dim: int): # MBart is set up so that if padding_idx is specified then offset the embedding ids by 2 # and adjust num_embeddings appropriately. Other models don't have this hack self.offset = 2 super().__init__(num_embeddings + self.offset, embedding_dim) def forward(self, input_ids: torch.Tensor, past_key_values_length: int = 0): """`input_ids' shape is expected to be [bsz x seqlen].""" bsz, seq_len = input_ids.shape[:2] positions = torch.arange( past_key_values_length, past_key_values_length + seq_len, dtype=torch.long, device=self.weight.device ).expand(bsz, -1) return super().forward(positions + self.offset) # Copied from transformers.models.bart.modeling_bart.BartAttention with Bart->MBart class MBartAttention(nn.Module): """Multi-headed attention from 'Attention Is All You Need' paper""" def __init__( self, embed_dim: int, num_heads: int, dropout: float = 0.0, is_decoder: bool = False, bias: bool = True, is_causal: bool = False, config: Optional[MBartConfig] = None, ): super().__init__() self.embed_dim = embed_dim self.num_heads = num_heads self.dropout = dropout self.head_dim = embed_dim // num_heads self.config = config if (self.head_dim * num_heads) != self.embed_dim: raise ValueError( f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}" f" and `num_heads`: {num_heads})." ) self.scaling = self.head_dim**-0.5 self.is_decoder = is_decoder self.is_causal = is_causal self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias) def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int): return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous() def forward( self, hidden_states: torch.Tensor, key_value_states: Optional[torch.Tensor] = None, past_key_value: Optional[Tuple[torch.Tensor]] = None, attention_mask: Optional[torch.Tensor] = None, layer_head_mask: Optional[torch.Tensor] = None, output_attentions: bool = False, ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: """Input shape: Batch x Time x Channel""" # if key_value_states are provided this layer is used as a cross-attention layer # for the decoder is_cross_attention = key_value_states is not None bsz, tgt_len, _ = hidden_states.size() # get query proj query_states = self.q_proj(hidden_states) * self.scaling # get key, value proj # `past_key_value[0].shape[2] == key_value_states.shape[1]` # is checking that the `sequence_length` of the `past_key_value` is the same as # the provided `key_value_states` to support prefix tuning if ( is_cross_attention and past_key_value is not None and past_key_value[0].shape[2] == key_value_states.shape[1] ): # reuse k,v, cross_attentions key_states = past_key_value[0] value_states = past_key_value[1] elif is_cross_attention: # cross_attentions key_states = self._shape(self.k_proj(key_value_states), -1, bsz) value_states = self._shape(self.v_proj(key_value_states), -1, bsz) elif past_key_value is not None: # reuse k, v, self_attention key_states = self._shape(self.k_proj(hidden_states), -1, bsz) value_states = self._shape(self.v_proj(hidden_states), -1, bsz) key_states = torch.cat([past_key_value[0], key_states], dim=2) value_states = torch.cat([past_key_value[1], value_states], dim=2) else: # self_attention key_states = self._shape(self.k_proj(hidden_states), -1, bsz) value_states = self._shape(self.v_proj(hidden_states), -1, bsz) if self.is_decoder: # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states. # Further calls to cross_attention layer can then reuse all cross-attention # key/value_states (first "if" case) # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of # all previous decoder key/value_states. Further calls to uni-directional self-attention # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case) # if encoder bi-directional self-attention `past_key_value` is always `None` past_key_value = (key_states, value_states) proj_shape = (bsz * self.num_heads, -1, self.head_dim) query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape) key_states = key_states.reshape(*proj_shape) value_states = value_states.reshape(*proj_shape) src_len = key_states.size(1) attn_weights = torch.bmm(query_states, key_states.transpose(1, 2)) if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len): raise ValueError( f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is" f" {attn_weights.size()}" ) if attention_mask is not None: if attention_mask.size() != (bsz, 1, tgt_len, src_len): raise ValueError( f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}" ) attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) attn_weights = nn.functional.softmax(attn_weights, dim=-1) if layer_head_mask is not None: if layer_head_mask.size() != (self.num_heads,): raise ValueError( f"Head mask for a single layer should be of size {(self.num_heads,)}, but is" f" {layer_head_mask.size()}" ) attn_weights = layer_head_mask.view(1, -1, 1, 1) * attn_weights.view(bsz, self.num_heads, tgt_len, src_len) attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) if output_attentions: # this operation is a bit awkward, but it's required to # make sure that attn_weights keeps its gradient. # In order to do so, attn_weights have to be reshaped # twice and have to be reused in the following attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len) else: attn_weights_reshaped = None attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training) attn_output = torch.bmm(attn_probs, value_states) if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim): raise ValueError( f"`attn_output` should be of size {(bsz * self.num_heads, tgt_len, self.head_dim)}, but is" f" {attn_output.size()}" ) attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim) attn_output = attn_output.transpose(1, 2) # Use the `embed_dim` from the config (stored in the class) rather than `hidden_state` because `attn_output` can be # partitioned across GPUs when using tensor-parallelism. attn_output = attn_output.reshape(bsz, tgt_len, self.embed_dim) attn_output = self.out_proj(attn_output) return attn_output, attn_weights_reshaped, past_key_value # Copied from transformers.models.bart.modeling_bart.BartFlashAttention2 with Bart->MBart class MBartFlashAttention2(MBartAttention): """ MBart flash attention module. This module inherits from `MBartAttention` as the weights of the module stays untouched. The only required change would be on the forward pass where it needs to correctly call the public API of flash attention and deal with padding tokens in case the input contains any of them. """ # Copied from transformers.models.llama.modeling_llama.LlamaFlashAttention2.__init__ def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) # TODO: Should be removed once Flash Attention for RoCm is bumped to 2.1. # flash_attn<2.1 generates top-left aligned causal mask, while what is needed here is bottom-right alignement, that was made default for flash_attn>=2.1. This attribute is used to handle this difference. Reference: https://github.com/Dao-AILab/flash-attention/releases/tag/v2.1.0. # Beware that with flash_attn<2.1, using q_seqlen != k_seqlen (except for the case q_seqlen == 1) produces a wrong mask (top-left). self._flash_attn_uses_top_left_mask = not is_flash_attn_greater_or_equal_2_10() def _reshape(self, tensor: torch.Tensor, seq_len: int, bsz: int): return tensor.view(bsz, seq_len, self.num_heads, self.head_dim) def forward( self, hidden_states: torch.Tensor, key_value_states: Optional[torch.Tensor] = None, past_key_value: Optional[Tuple[torch.Tensor]] = None, attention_mask: Optional[torch.Tensor] = None, layer_head_mask: Optional[torch.Tensor] = None, output_attentions: bool = False, ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: # MBartFlashAttention2 attention does not support output_attentions if output_attentions: raise ValueError("MBartFlashAttention2 attention does not support output_attentions") # if key_value_states are provided this layer is used as a cross-attention layer # for the decoder is_cross_attention = key_value_states is not None bsz, q_len, _ = hidden_states.size() # get query proj query_states = self._reshape(self.q_proj(hidden_states), -1, bsz) # get key, value proj # `past_key_value[0].shape[2] == key_value_states.shape[1]` # is checking that the `sequence_length` of the `past_key_value` is the same as # the provided `key_value_states` to support prefix tuning if ( is_cross_attention and past_key_value is not None and past_key_value[0].shape[2] == key_value_states.shape[1] ): # reuse k,v, cross_attentions key_states = past_key_value[0].transpose(1, 2) value_states = past_key_value[1].transpose(1, 2) elif is_cross_attention: # cross_attentions key_states = self._reshape(self.k_proj(key_value_states), -1, bsz) value_states = self._reshape(self.v_proj(key_value_states), -1, bsz) elif past_key_value is not None: # reuse k, v, self_attention key_states = self._reshape(self.k_proj(hidden_states), -1, bsz) value_states = self._reshape(self.v_proj(hidden_states), -1, bsz) key_states = torch.cat([past_key_value[0].transpose(1, 2), key_states], dim=1) value_states = torch.cat([past_key_value[1].transpose(1, 2), value_states], dim=1) else: # self_attention key_states = self._reshape(self.k_proj(hidden_states), -1, bsz) value_states = self._reshape(self.v_proj(hidden_states), -1, bsz) if self.is_decoder: # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states. # Further calls to cross_attention layer can then reuse all cross-attention # key/value_states (first "if" case) # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of # all previous decoder key/value_states. Further calls to uni-directional self-attention # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case) # if encoder bi-directional self-attention `past_key_value` is always `None` past_key_value = (key_states.transpose(1, 2), value_states.transpose(1, 2)) kv_seq_len = key_states.shape[-2] if past_key_value is not None: kv_seq_len += past_key_value[0].shape[-2] # In PEFT, usually we cast the layer norms in float32 for training stability reasons # therefore the input hidden states gets silently casted in float32. Hence, we need # cast them back in the correct dtype just to be sure everything works as expected. # This might slowdown training & inference so it is recommended to not cast the LayerNorms # in fp32. (LlamaRMSNorm handles it correctly) input_dtype = query_states.dtype if input_dtype == torch.float32: if torch.is_autocast_enabled(): target_dtype = torch.get_autocast_gpu_dtype() # Handle the case where the model is quantized elif hasattr(self.config, "_pre_quantization_dtype"): target_dtype = self.config._pre_quantization_dtype else: target_dtype = self.q_proj.weight.dtype logger.warning_once( f"The input hidden states seems to be silently casted in float32, this might be related to" f" the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in" f" {target_dtype}." ) query_states = query_states.to(target_dtype) key_states = key_states.to(target_dtype) value_states = value_states.to(target_dtype) attn_output = self._flash_attention_forward( query_states, key_states, value_states, attention_mask, q_len, dropout=self.dropout ) attn_output = attn_output.reshape(bsz, q_len, -1) attn_output = self.out_proj(attn_output) if not output_attentions: attn_weights = None return attn_output, attn_weights, past_key_value # Copied from transformers.models.llama.modeling_llama.LlamaFlashAttention2._flash_attention_forward def _flash_attention_forward( self, query_states, key_states, value_states, attention_mask, query_length, dropout=0.0, softmax_scale=None ): """ Calls the forward method of Flash Attention - if the input hidden states contain at least one padding token first unpad the input, then computes the attention scores and pad the final attention scores. Args: query_states (`torch.Tensor`): Input query states to be passed to Flash Attention API key_states (`torch.Tensor`): Input key states to be passed to Flash Attention API value_states (`torch.Tensor`): Input value states to be passed to Flash Attention API attention_mask (`torch.Tensor`): The padding mask - corresponds to a tensor of size `(batch_size, seq_len)` where 0 stands for the position of padding tokens and 1 for the position of non-padding tokens. dropout (`int`, *optional*): Attention dropout softmax_scale (`float`, *optional*): The scaling of QK^T before applying softmax. Default to 1 / sqrt(head_dim) """ if not self._flash_attn_uses_top_left_mask: causal = self.is_causal else: # TODO: Remove the `query_length != 1` check once Flash Attention for RoCm is bumped to 2.1. For details, please see the comment in LlamaFlashAttention2 __init__. causal = self.is_causal and query_length != 1 # Contains at least one padding token in the sequence if attention_mask is not None: batch_size = query_states.shape[0] query_states, key_states, value_states, indices_q, cu_seq_lens, max_seq_lens = self._upad_input( query_states, key_states, value_states, attention_mask, query_length ) cu_seqlens_q, cu_seqlens_k = cu_seq_lens max_seqlen_in_batch_q, max_seqlen_in_batch_k = max_seq_lens attn_output_unpad = flash_attn_varlen_func( query_states, key_states, value_states, cu_seqlens_q=cu_seqlens_q, cu_seqlens_k=cu_seqlens_k, max_seqlen_q=max_seqlen_in_batch_q, max_seqlen_k=max_seqlen_in_batch_k, dropout_p=dropout, softmax_scale=softmax_scale, causal=causal, ) attn_output = pad_input(attn_output_unpad, indices_q, batch_size, query_length) else: attn_output = flash_attn_func( query_states, key_states, value_states, dropout, softmax_scale=softmax_scale, causal=causal ) return attn_output # Copied from transformers.models.llama.modeling_llama.LlamaFlashAttention2._upad_input def _upad_input(self, query_layer, key_layer, value_layer, attention_mask, query_length): indices_k, cu_seqlens_k, max_seqlen_in_batch_k = _get_unpad_data(attention_mask) batch_size, kv_seq_len, num_key_value_heads, head_dim = key_layer.shape key_layer = index_first_axis( key_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k ) value_layer = index_first_axis( value_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k ) if query_length == kv_seq_len: query_layer = index_first_axis( query_layer.reshape(batch_size * kv_seq_len, self.num_heads, head_dim), indices_k ) cu_seqlens_q = cu_seqlens_k max_seqlen_in_batch_q = max_seqlen_in_batch_k indices_q = indices_k elif query_length == 1: max_seqlen_in_batch_q = 1 cu_seqlens_q = torch.arange( batch_size + 1, dtype=torch.int32, device=query_layer.device ) # There is a memcpy here, that is very bad. indices_q = cu_seqlens_q[:-1] query_layer = query_layer.squeeze(1) else: # The -q_len: slice assumes left padding. attention_mask = attention_mask[:, -query_length:] query_layer, indices_q, cu_seqlens_q, max_seqlen_in_batch_q = unpad_input(query_layer, attention_mask) return ( query_layer, key_layer, value_layer, indices_q, (cu_seqlens_q, cu_seqlens_k), (max_seqlen_in_batch_q, max_seqlen_in_batch_k), ) MBART_ATTENTION_CLASSES = { "eager": MBartAttention, "flash_attention_2": MBartFlashAttention2, } class MBartEncoderLayer(nn.Module): def __init__(self, config: MBartConfig): super().__init__() self.embed_dim = config.d_model self.self_attn = MBART_ATTENTION_CLASSES[config._attn_implementation]( embed_dim=self.embed_dim, num_heads=config.encoder_attention_heads, dropout=config.attention_dropout, config=config, ) self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim) self.dropout = config.dropout self.activation_fn = ACT2FN[config.activation_function] self.activation_dropout = config.activation_dropout self.fc1 = nn.Linear(self.embed_dim, config.encoder_ffn_dim) self.fc2 = nn.Linear(config.encoder_ffn_dim, self.embed_dim) self.final_layer_norm = nn.LayerNorm(self.embed_dim) def forward( self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, layer_head_mask: torch.Tensor, output_attentions: bool = False, ) -> torch.Tensor: """ Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` attention_mask (`torch.FloatTensor`): attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size `(encoder_attention_heads,)`. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. """ residual = hidden_states hidden_states = self.self_attn_layer_norm(hidden_states) hidden_states, attn_weights, _ = self.self_attn( hidden_states=hidden_states, attention_mask=attention_mask, layer_head_mask=layer_head_mask, output_attentions=output_attentions, ) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states residual = hidden_states hidden_states = self.final_layer_norm(hidden_states) hidden_states = self.activation_fn(self.fc1(hidden_states)) hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training) hidden_states = self.fc2(hidden_states) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states if hidden_states.dtype == torch.float16 and ( torch.isinf(hidden_states).any() or torch.isnan(hidden_states).any() ): clamp_value = torch.finfo(hidden_states.dtype).max - 1000 hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value) outputs = (hidden_states,) if output_attentions: outputs += (attn_weights,) return outputs class MBartDecoderLayer(nn.Module): def __init__(self, config: MBartConfig): super().__init__() self.embed_dim = config.d_model self.self_attn = MBART_ATTENTION_CLASSES[config._attn_implementation]( embed_dim=self.embed_dim, num_heads=config.decoder_attention_heads, dropout=config.attention_dropout, is_decoder=True, is_causal=True, config=config, ) self.dropout = config.dropout self.activation_fn = ACT2FN[config.activation_function] self.activation_dropout = config.activation_dropout self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim) self.encoder_attn = MBART_ATTENTION_CLASSES[config._attn_implementation]( self.embed_dim, config.decoder_attention_heads, dropout=config.attention_dropout, is_decoder=True, config=config, ) self.encoder_attn_layer_norm = nn.LayerNorm(self.embed_dim) self.fc1 = nn.Linear(self.embed_dim, config.decoder_ffn_dim) self.fc2 = nn.Linear(config.decoder_ffn_dim, self.embed_dim) self.final_layer_norm = nn.LayerNorm(self.embed_dim) def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, encoder_hidden_states: Optional[torch.Tensor] = None, encoder_attention_mask: Optional[torch.Tensor] = None, layer_head_mask: Optional[torch.Tensor] = None, cross_attn_layer_head_mask: Optional[torch.Tensor] = None, past_key_value: Optional[Tuple[torch.Tensor]] = None, output_attentions: Optional[bool] = False, use_cache: Optional[bool] = True, ) -> torch.Tensor: """ Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` attention_mask (`torch.FloatTensor`): attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. encoder_hidden_states (`torch.FloatTensor`): cross attention input to the layer of shape `(batch, seq_len, embed_dim)` encoder_attention_mask (`torch.FloatTensor`): encoder attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size `(encoder_attention_heads,)`. cross_attn_layer_head_mask (`torch.FloatTensor`): mask for cross-attention heads in a given layer of size `(decoder_attention_heads,)`. past_key_value (`Tuple(torch.FloatTensor)`): cached past key and value projection states output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. """ residual = hidden_states hidden_states = self.self_attn_layer_norm(hidden_states) # Self Attention # decoder uni-directional self-attention cached key/values tuple is at positions 1,2 self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None # add present self-attn cache to positions 1,2 of present_key_value tuple hidden_states, self_attn_weights, present_key_value = self.self_attn( hidden_states=hidden_states, past_key_value=self_attn_past_key_value, attention_mask=attention_mask, layer_head_mask=layer_head_mask, output_attentions=output_attentions, ) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states # Cross-Attention Block cross_attn_present_key_value = None cross_attn_weights = None if encoder_hidden_states is not None: residual = hidden_states hidden_states = self.encoder_attn_layer_norm(hidden_states) # cross_attn cached key/values tuple is at positions 3,4 of present_key_value tuple cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None hidden_states, cross_attn_weights, cross_attn_present_key_value = self.encoder_attn( hidden_states=hidden_states, key_value_states=encoder_hidden_states, attention_mask=encoder_attention_mask, layer_head_mask=cross_attn_layer_head_mask, past_key_value=cross_attn_past_key_value, output_attentions=output_attentions, ) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states # add cross-attn to positions 3,4 of present_key_value tuple present_key_value = present_key_value + cross_attn_present_key_value # Fully Connected residual = hidden_states hidden_states = self.final_layer_norm(hidden_states) hidden_states = self.activation_fn(self.fc1(hidden_states)) hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training) hidden_states = self.fc2(hidden_states) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states outputs = (hidden_states,) if output_attentions: outputs += (self_attn_weights, cross_attn_weights) if use_cache: outputs += (present_key_value,) return outputs # Copied from transformers.models.bart.modeling_bart.BartClassificationHead with Bart->MBart class MBartClassificationHead(nn.Module): """Head for sentence-level classification tasks.""" def __init__( self, input_dim: int, inner_dim: int, num_classes: int, pooler_dropout: float, ): super().__init__() self.dense = nn.Linear(input_dim, inner_dim) self.dropout = nn.Dropout(p=pooler_dropout) self.out_proj = nn.Linear(inner_dim, num_classes) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dropout(hidden_states) hidden_states = self.dense(hidden_states) hidden_states = torch.tanh(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.out_proj(hidden_states) return hidden_states class MBartPreTrainedModel(PreTrainedModel): config_class = MBartConfig base_model_prefix = "model" supports_gradient_checkpointing = True _no_split_modules = ["MBartDecoderLayer", "MBartAttention"] _supports_flash_attn_2 = True def _init_weights(self, module): std = self.config.init_std if isinstance(module, nn.Linear): module.weight.data.normal_(mean=0.0, std=std) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=std) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() @property def dummy_inputs(self): pad_token = self.config.pad_token_id input_ids = torch.tensor([[0, 6, 10, 4, 2], [0, 8, 12, 2, pad_token]], device=self.device) dummy_inputs = { "attention_mask": input_ids.ne(pad_token), "input_ids": input_ids, } return dummy_inputs MBART_START_DOCSTRING = r""" This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`MBartConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ MBART_GENERATION_EXAMPLE = r""" Translation example: ```python >>> from transformers import AutoTokenizer, MBartForConditionalGeneration >>> model = MBartForConditionalGeneration.from_pretrained("facebook/mbart-large-en-ro") >>> tokenizer = AutoTokenizer.from_pretrained("facebook/mbart-large-en-ro") >>> example_english_phrase = "42 is the answer" >>> inputs = tokenizer(example_english_phrase, return_tensors="pt") >>> # Translate >>> generated_ids = model.generate(**inputs, num_beams=4, max_length=5) >>> tokenizer.batch_decode(generated_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0] '42 este răspuns' ``` Mask filling example: ```python >>> from transformers import AutoTokenizer, MBartForConditionalGeneration >>> model = MBartForConditionalGeneration.from_pretrained("facebook/mbart-large-cc25") >>> tokenizer = AutoTokenizer.from_pretrained("facebook/mbart-large-cc25") >>> # de_DE is the language symbol id <LID> for German >>> TXT = "</s> Meine Freunde sind <mask> nett aber sie essen zu viel Kuchen. </s> de_DE" >>> input_ids = tokenizer([TXT], add_special_tokens=False, return_tensors="pt")["input_ids"] >>> logits = model(input_ids).logits >>> masked_index = (input_ids[0] == tokenizer.mask_token_id).nonzero().item() >>> probs = logits[0, masked_index].softmax(dim=0) >>> values, predictions = probs.topk(5) >>> tokenizer.decode(predictions).split() ['nett', 'sehr', 'ganz', 'nicht', 'so'] ``` """ MBART_INPUTS_DOCSTRING = r""" Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Indices of decoder input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are decoder input IDs?](../glossary#decoder-input-ids) MBart uses a specific language id token as the starting token for `decoder_input_ids` generation that varies according to source and target language, *e.g.* 25004 for *en_XX*, and 25003 for *de_DE*. If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see `past_key_values`). For translation and summarization training, `decoder_input_ids` should be provided. If no `decoder_input_ids` is provided, the model will create this tensor by shifting the `input_ids` to the right for denoising pre-training following the paper. decoder_attention_mask (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also be used by default. head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*): Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. decoder_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. encoder_outputs (`tuple(tuple(torch.FloatTensor)`, *optional*): Tuple consists of (`last_hidden_state`, *optional*: `hidden_states`, *optional*: `attentions`) `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)`, *optional*) is a sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. decoder_inputs_embeds (`torch.FloatTensor` of shape `(batch_size, target_sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `decoder_input_ids` you can choose to directly pass an embedded representation. If `past_key_values` is used, optionally only the last `decoder_inputs_embeds` have to be input (see `past_key_values`). This is useful if you want more control over how to convert `decoder_input_ids` indices into associated vectors than the model's internal embedding lookup matrix. If `decoder_input_ids` and `decoder_inputs_embeds` are both unset, `decoder_inputs_embeds` takes the value of `inputs_embeds`. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ class MBartEncoder(MBartPreTrainedModel): """ Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a [`MBartEncoderLayer`]. Args: config: MBartConfig embed_tokens (nn.Embedding): output embedding """ def __init__(self, config: MBartConfig, embed_tokens: Optional[nn.Embedding] = None): super().__init__(config) self.dropout = config.dropout self.layerdrop = config.encoder_layerdrop embed_dim = config.d_model self.padding_idx = config.pad_token_id self.max_source_positions = config.max_position_embeddings self.embed_scale = math.sqrt(embed_dim) if config.scale_embedding else 1.0 self.embed_tokens = nn.Embedding(config.vocab_size, embed_dim, self.padding_idx) if embed_tokens is not None: self.embed_tokens.weight = embed_tokens.weight self.embed_positions = MBartLearnedPositionalEmbedding( config.max_position_embeddings, embed_dim, ) self.layers = nn.ModuleList([MBartEncoderLayer(config) for _ in range(config.encoder_layers)]) self._use_flash_attention_2 = config._attn_implementation == "flash_attention_2" self.layernorm_embedding = nn.LayerNorm(embed_dim) self.layer_norm = nn.LayerNorm(config.d_model) self.gradient_checkpointing = False # Initialize weights and apply final processing self.post_init() def _backward_compatibility_gradient_checkpointing(self): # Override to not delete the attribute from the config if self.supports_gradient_checkpointing and getattr(self.config, "gradient_checkpointing", False): self.gradient_checkpointing_enable() def forward( self, input_ids: torch.LongTensor = None, attention_mask: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, BaseModelOutput]: r""" Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*): Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict # retrieve input_ids and inputs_embeds if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: input = input_ids input_shape = input.shape input_ids = input_ids.view(-1, input_shape[-1]) elif inputs_embeds is not None: input = inputs_embeds[:, :, -1] else: raise ValueError("You have to specify either input_ids or inputs_embeds") if inputs_embeds is None: inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale embed_pos = self.embed_positions(input) hidden_states = inputs_embeds + embed_pos.to(inputs_embeds.device) hidden_states = self.layernorm_embedding(hidden_states) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) # expand attention_mask if attention_mask is not None: # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] if self._use_flash_attention_2: attention_mask = attention_mask if 0 in attention_mask else None else: # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] attention_mask = _prepare_4d_attention_mask(attention_mask, inputs_embeds.dtype) encoder_states = () if output_hidden_states else None all_attentions = () if output_attentions else None # check if head_mask has a correct number of layers specified if desired if head_mask is not None: if head_mask.size()[0] != len(self.layers): raise ValueError( f"The head_mask should be specified for {len(self.layers)} layers, but it is for" f" {head_mask.size()[0]}." ) for idx, encoder_layer in enumerate(self.layers): if output_hidden_states: encoder_states = encoder_states + (hidden_states,) # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description) to_drop = False if self.training: dropout_probability = torch.rand([]) if dropout_probability < self.layerdrop: # skip the layer to_drop = True if to_drop: layer_outputs = (None, None) else: if self.gradient_checkpointing and self.training: layer_outputs = self._gradient_checkpointing_func( encoder_layer.__call__, hidden_states, attention_mask, (head_mask[idx] if head_mask is not None else None), output_attentions, ) else: layer_outputs = encoder_layer( hidden_states, attention_mask, layer_head_mask=(head_mask[idx] if head_mask is not None else None), output_attentions=output_attentions, ) hidden_states = layer_outputs[0] if output_attentions: all_attentions = all_attentions + (layer_outputs[1],) hidden_states = self.layer_norm(hidden_states) if output_hidden_states: encoder_states = encoder_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None) return BaseModelOutput( last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions ) class MBartDecoder(MBartPreTrainedModel): """ Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a [`MBartDecoderLayer`] Args: config: MBartConfig embed_tokens (nn.Embedding): output embedding """ def __init__(self, config: MBartConfig, embed_tokens: Optional[nn.Embedding] = None): super().__init__(config) self.dropout = config.dropout self.layerdrop = config.decoder_layerdrop self.padding_idx = config.pad_token_id self.max_target_positions = config.max_position_embeddings self.embed_scale = math.sqrt(config.d_model) if config.scale_embedding else 1.0 self.embed_tokens = nn.Embedding(config.vocab_size, config.d_model, self.padding_idx) if embed_tokens is not None: self.embed_tokens.weight = embed_tokens.weight self.embed_positions = MBartLearnedPositionalEmbedding( config.max_position_embeddings, config.d_model, ) self.layers = nn.ModuleList([MBartDecoderLayer(config) for _ in range(config.decoder_layers)]) self._use_flash_attention_2 = config._attn_implementation == "flash_attention_2" self.layernorm_embedding = nn.LayerNorm(config.d_model) self.layer_norm = nn.LayerNorm(config.d_model) self.gradient_checkpointing = False # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.embed_tokens def set_input_embeddings(self, value): self.embed_tokens = value def forward( self, input_ids: torch.LongTensor = None, attention_mask: Optional[torch.Tensor] = None, encoder_hidden_states: Optional[torch.FloatTensor] = None, encoder_attention_mask: Optional[torch.LongTensor] = None, head_mask: Optional[torch.Tensor] = None, cross_attn_head_mask: Optional[torch.Tensor] = None, past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, BaseModelOutputWithPastAndCrossAttentions]: r""" Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. encoder_attention_mask (`torch.LongTensor` of shape `(batch_size, encoder_sequence_length)`, *optional*): Mask to avoid performing cross-attention on padding tokens indices of encoder input_ids. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the cross-attention modules in the decoder to avoid performing cross-attention on hidden heads. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict # retrieve input_ids and inputs_embeds if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time") elif input_ids is not None: input = input_ids input_shape = input.size() input_ids = input_ids.view(-1, input_shape[-1]) elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] input = inputs_embeds[:, :, -1] else: raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds") # past_key_values_length past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0 if inputs_embeds is None: inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale if self._use_flash_attention_2: # 2d mask is passed through the layers attention_mask = attention_mask if (attention_mask is not None and 0 in attention_mask) else None else: # 4d mask is passed through the layers attention_mask = _prepare_4d_causal_attention_mask( attention_mask, input_shape, inputs_embeds, past_key_values_length ) # expand encoder attention mask if encoder_hidden_states is not None and encoder_attention_mask is not None: if self._use_flash_attention_2: encoder_attention_mask = encoder_attention_mask if 0 in encoder_attention_mask else None else: # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] encoder_attention_mask = _prepare_4d_attention_mask( encoder_attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1] ) # embed positions positions = self.embed_positions(input, past_key_values_length) hidden_states = inputs_embeds + positions.to(inputs_embeds.device) hidden_states = self.layernorm_embedding(hidden_states) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) if self.gradient_checkpointing and self.training: if use_cache: logger.warning_once( "`use_cache=True` is incompatible with gradient checkpointing`. Setting `use_cache=False`..." ) use_cache = False # decoder layers all_hidden_states = () if output_hidden_states else None all_self_attns = () if output_attentions else None all_cross_attentions = () if (output_attentions and encoder_hidden_states is not None) else None next_decoder_cache = () if use_cache else None # check if head_mask/cross_attn_head_mask has a correct number of layers specified if desired for attn_mask, mask_name in zip([head_mask, cross_attn_head_mask], ["head_mask", "cross_attn_head_mask"]): if attn_mask is not None: if attn_mask.size()[0] != len(self.layers): raise ValueError( f"The `{mask_name}` should be specified for {len(self.layers)} layers, but it is for" f" {attn_mask.size()[0]}." ) for idx, decoder_layer in enumerate(self.layers): # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description) if output_hidden_states: all_hidden_states += (hidden_states,) if self.training: dropout_probability = torch.rand([]) if dropout_probability < self.layerdrop: continue past_key_value = past_key_values[idx] if past_key_values is not None else None if self.gradient_checkpointing and self.training: layer_outputs = self._gradient_checkpointing_func( decoder_layer.__call__, hidden_states, attention_mask, encoder_hidden_states, encoder_attention_mask, head_mask[idx] if head_mask is not None else None, cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None, None, output_attentions, use_cache, ) else: layer_outputs = decoder_layer( hidden_states, attention_mask=attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, layer_head_mask=(head_mask[idx] if head_mask is not None else None), cross_attn_layer_head_mask=( cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None ), past_key_value=past_key_value, output_attentions=output_attentions, use_cache=use_cache, ) hidden_states = layer_outputs[0] if use_cache: next_decoder_cache += (layer_outputs[3 if output_attentions else 1],) if output_attentions: all_self_attns += (layer_outputs[1],) if encoder_hidden_states is not None: all_cross_attentions += (layer_outputs[2],) hidden_states = self.layer_norm(hidden_states) # add hidden states from the last decoder layer if output_hidden_states: all_hidden_states += (hidden_states,) next_cache = next_decoder_cache if use_cache else None if not return_dict: return tuple( v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns, all_cross_attentions] if v is not None ) return BaseModelOutputWithPastAndCrossAttentions( last_hidden_state=hidden_states, past_key_values=next_cache, hidden_states=all_hidden_states, attentions=all_self_attns, cross_attentions=all_cross_attentions, ) @add_start_docstrings( "The bare MBART Model outputting raw hidden-states without any specific head on top.", MBART_START_DOCSTRING, ) class MBartModel(MBartPreTrainedModel): _tied_weights_keys = ["encoder.embed_tokens.weight", "decoder.embed_tokens.weight"] def __init__(self, config: MBartConfig): super().__init__(config) padding_idx, vocab_size = config.pad_token_id, config.vocab_size self.shared = nn.Embedding(vocab_size, config.d_model, padding_idx) self.encoder = MBartEncoder(config, self.shared) self.decoder = MBartDecoder(config, self.shared) # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.shared def set_input_embeddings(self, value): self.shared = value self.encoder.embed_tokens = self.shared self.decoder.embed_tokens = self.shared def get_encoder(self): return self.encoder def get_decoder(self): return self.decoder def _tie_weights(self): if self.config.tie_word_embeddings: self._tie_or_clone_weights(self.encoder.embed_tokens, self.get_input_embeddings()) self._tie_or_clone_weights(self.decoder.embed_tokens, self.get_input_embeddings()) @add_start_docstrings_to_model_forward(MBART_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=Seq2SeqModelOutput, config_class=_CONFIG_FOR_DOC, expected_output=_EXPECTED_OUTPUT_SHAPE, ) def forward( self, input_ids: torch.LongTensor = None, attention_mask: Optional[torch.Tensor] = None, decoder_input_ids: Optional[torch.LongTensor] = None, decoder_attention_mask: Optional[torch.LongTensor] = None, head_mask: Optional[torch.Tensor] = None, decoder_head_mask: Optional[torch.Tensor] = None, cross_attn_head_mask: Optional[torch.Tensor] = None, encoder_outputs: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, decoder_inputs_embeds: Optional[torch.FloatTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Seq2SeqModelOutput, Tuple[torch.FloatTensor]]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict # different to other models, MBart automatically creates decoder_input_ids from # input_ids if no decoder_input_ids are provided if decoder_input_ids is None and decoder_inputs_embeds is None: decoder_input_ids = shift_tokens_right(input_ids, self.config.pad_token_id) if encoder_outputs is None: encoder_outputs = self.encoder( input_ids=input_ids, attention_mask=attention_mask, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) # If the user passed a tuple for encoder_outputs, we wrap it in a BaseModelOutput when return_dict=True elif return_dict and not isinstance(encoder_outputs, BaseModelOutput): encoder_outputs = BaseModelOutput( last_hidden_state=encoder_outputs[0], hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None, attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None, ) # decoder outputs consists of (dec_features, past_key_value, dec_hidden, dec_attn) decoder_outputs = self.decoder( input_ids=decoder_input_ids, attention_mask=decoder_attention_mask, encoder_hidden_states=encoder_outputs[0], encoder_attention_mask=attention_mask, head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, past_key_values=past_key_values, inputs_embeds=decoder_inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) if not return_dict: return decoder_outputs + encoder_outputs return Seq2SeqModelOutput( last_hidden_state=decoder_outputs.last_hidden_state, past_key_values=decoder_outputs.past_key_values, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions, ) @add_start_docstrings( "The MBART Model with a language modeling head. Can be used for summarization, after fine-tuning the pretrained models.", MBART_START_DOCSTRING, ) class MBartForConditionalGeneration(MBartPreTrainedModel): base_model_prefix = "model" _keys_to_ignore_on_load_missing = ["final_logits_bias"] _tied_weights_keys = ["model.encoder.embed_tokens.weight", "model.decoder.embed_tokens.weight", "lm_head.weight"] def __init__(self, config: MBartConfig): super().__init__(config) self.model = MBartModel(config) self.register_buffer("final_logits_bias", torch.zeros((1, self.model.shared.num_embeddings))) self.lm_head = nn.Linear(config.d_model, self.model.shared.num_embeddings, bias=False) # Initialize weights and apply final processing self.post_init() def get_encoder(self): return self.model.get_encoder() def get_decoder(self): return self.model.get_decoder() def resize_token_embeddings(self, new_num_tokens: int, pad_to_multiple_of: Optional[int] = None) -> nn.Embedding: new_embeddings = super().resize_token_embeddings(new_num_tokens, pad_to_multiple_of) self._resize_final_logits_bias(new_embeddings.weight.shape[0]) return new_embeddings def _resize_final_logits_bias(self, new_num_tokens: int) -> None: old_num_tokens = self.final_logits_bias.shape[-1] if new_num_tokens <= old_num_tokens: new_bias = self.final_logits_bias[:, :new_num_tokens] else: extra_bias = torch.zeros((1, new_num_tokens - old_num_tokens), device=self.final_logits_bias.device) new_bias = torch.cat([self.final_logits_bias, extra_bias], dim=1) self.register_buffer("final_logits_bias", new_bias) def get_output_embeddings(self): return self.lm_head def set_output_embeddings(self, new_embeddings): self.lm_head = new_embeddings @add_start_docstrings_to_model_forward(MBART_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=Seq2SeqLMOutput, config_class=_CONFIG_FOR_DOC) @add_end_docstrings(MBART_GENERATION_EXAMPLE) def forward( self, input_ids: torch.LongTensor = None, attention_mask: Optional[torch.Tensor] = None, decoder_input_ids: Optional[torch.LongTensor] = None, decoder_attention_mask: Optional[torch.LongTensor] = None, head_mask: Optional[torch.Tensor] = None, decoder_head_mask: Optional[torch.Tensor] = None, cross_attn_head_mask: Optional[torch.Tensor] = None, encoder_outputs: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, decoder_inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Seq2SeqLMOutput, Tuple[torch.FloatTensor]]: r""" labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. Returns: """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict if labels is not None: if use_cache: logger.warning("The `use_cache` argument is changed to `False` since `labels` is provided.") use_cache = False if decoder_input_ids is None and decoder_inputs_embeds is None: decoder_input_ids = shift_tokens_right(labels, self.config.pad_token_id) outputs = self.model( input_ids, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, encoder_outputs=encoder_outputs, decoder_attention_mask=decoder_attention_mask, head_mask=head_mask, decoder_head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, past_key_values=past_key_values, inputs_embeds=inputs_embeds, decoder_inputs_embeds=decoder_inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) lm_logits = self.lm_head(outputs[0]) + self.final_logits_bias masked_lm_loss = None if labels is not None: loss_fct = CrossEntropyLoss() masked_lm_loss = loss_fct(lm_logits.view(-1, self.config.vocab_size), labels.view(-1)) if not return_dict: output = (lm_logits,) + outputs[1:] return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output return Seq2SeqLMOutput( loss=masked_lm_loss, logits=lm_logits, past_key_values=outputs.past_key_values, decoder_hidden_states=outputs.decoder_hidden_states, decoder_attentions=outputs.decoder_attentions, cross_attentions=outputs.cross_attentions, encoder_last_hidden_state=outputs.encoder_last_hidden_state, encoder_hidden_states=outputs.encoder_hidden_states, encoder_attentions=outputs.encoder_attentions, ) def prepare_inputs_for_generation( self, decoder_input_ids, past_key_values=None, attention_mask=None, head_mask=None, decoder_head_mask=None, cross_attn_head_mask=None, use_cache=None, encoder_outputs=None, **kwargs, ): # cut decoder_input_ids if past is used if past_key_values is not None: past_length = past_key_values[0][0].shape[2] # Some generation methods already pass only the last input ID if decoder_input_ids.shape[1] > past_length: remove_prefix_length = past_length else: # Default to old behavior: keep only final ID remove_prefix_length = decoder_input_ids.shape[1] - 1 decoder_input_ids = decoder_input_ids[:, remove_prefix_length:] return { "input_ids": None, # encoder_outputs is defined. input_ids not needed "encoder_outputs": encoder_outputs, "past_key_values": past_key_values, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, "use_cache": use_cache, # change this to avoid caching (presumably for debugging) } def prepare_decoder_input_ids_from_labels(self, labels: torch.Tensor): return shift_tokens_right(labels, self.config.pad_token_id) @staticmethod def _reorder_cache(past_key_values, beam_idx): reordered_past = () for layer_past in past_key_values: # cached cross_attention states don't have to be reordered -> they are always the same reordered_past += ( tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past[:2]) + layer_past[2:], ) return reordered_past @add_start_docstrings( """ MBart model with a sequence classification/head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks. """, MBART_START_DOCSTRING, ) class MBartForSequenceClassification(MBartPreTrainedModel): _tied_weights_keys = ["model.encoder.embed_tokens.weight", "model.decoder.embed_tokens.weight"] def __init__(self, config: MBartConfig, **kwargs): super().__init__(config, **kwargs) self.model = MBartModel(config) self.classification_head = MBartClassificationHead( config.d_model, config.d_model, config.num_labels, config.classifier_dropout, ) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(MBART_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=Seq2SeqSequenceClassifierOutput, config_class=_CONFIG_FOR_DOC, ) # Copied from transformers.models.bart.modeling_bart.BartForSequenceClassification.forward def forward( self, input_ids: torch.LongTensor = None, attention_mask: Optional[torch.Tensor] = None, decoder_input_ids: Optional[torch.LongTensor] = None, decoder_attention_mask: Optional[torch.LongTensor] = None, head_mask: Optional[torch.Tensor] = None, decoder_head_mask: Optional[torch.Tensor] = None, cross_attn_head_mask: Optional[torch.Tensor] = None, encoder_outputs: Optional[List[torch.FloatTensor]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, decoder_inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, Seq2SeqSequenceClassifierOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict if labels is not None: use_cache = False if input_ids is None and inputs_embeds is not None: raise NotImplementedError( f"Passing input embeddings is currently not supported for {self.__class__.__name__}" ) outputs = self.model( input_ids, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, head_mask=head_mask, decoder_head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, encoder_outputs=encoder_outputs, inputs_embeds=inputs_embeds, decoder_inputs_embeds=decoder_inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) hidden_states = outputs[0] # last hidden state eos_mask = input_ids.eq(self.config.eos_token_id).to(hidden_states.device) if len(torch.unique_consecutive(eos_mask.sum(1))) > 1: raise ValueError("All examples must have the same number of <eos> tokens.") sentence_representation = hidden_states[eos_mask, :].view(hidden_states.size(0), -1, hidden_states.size(-1))[ :, -1, : ] logits = self.classification_head(sentence_representation) loss = None if labels is not None: labels = labels.to(logits.device) if self.config.problem_type is None: if self.config.num_labels == 1: self.config.problem_type = "regression" elif self.config.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = "single_label_classification" else: self.config.problem_type = "multi_label_classification" if self.config.problem_type == "regression": loss_fct = MSELoss() if self.config.num_labels == 1: loss = loss_fct(logits.squeeze(), labels.squeeze()) else: loss = loss_fct(logits, labels) elif self.config.problem_type == "single_label_classification": loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.config.num_labels), labels.view(-1)) elif self.config.problem_type == "multi_label_classification": loss_fct = BCEWithLogitsLoss() loss = loss_fct(logits, labels) if not return_dict: output = (logits,) + outputs[1:] return ((loss,) + output) if loss is not None else output return Seq2SeqSequenceClassifierOutput( loss=loss, logits=logits, past_key_values=outputs.past_key_values, decoder_hidden_states=outputs.decoder_hidden_states, decoder_attentions=outputs.decoder_attentions, cross_attentions=outputs.cross_attentions, encoder_last_hidden_state=outputs.encoder_last_hidden_state, encoder_hidden_states=outputs.encoder_hidden_states, encoder_attentions=outputs.encoder_attentions, ) @add_start_docstrings( """ MBART Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layer on top of the hidden-states output to compute `span start logits` and `span end logits`). """, MBART_START_DOCSTRING, ) class MBartForQuestionAnswering(MBartPreTrainedModel): _tied_weights_keys = ["model.encoder.embed_tokens.weight", "model.decoder.embed_tokens.weight"] def __init__(self, config): super().__init__(config) config.num_labels = 2 self.num_labels = config.num_labels self.model = MBartModel(config) self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(MBART_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=Seq2SeqQuestionAnsweringModelOutput, config_class=_CONFIG_FOR_DOC, ) # Copied from transformers.models.bart.modeling_bart.BartForQuestionAnswering.forward def forward( self, input_ids: torch.Tensor = None, attention_mask: Optional[torch.Tensor] = None, decoder_input_ids: Optional[torch.LongTensor] = None, decoder_attention_mask: Optional[torch.LongTensor] = None, head_mask: Optional[torch.Tensor] = None, decoder_head_mask: Optional[torch.Tensor] = None, cross_attn_head_mask: Optional[torch.Tensor] = None, encoder_outputs: Optional[List[torch.FloatTensor]] = None, start_positions: Optional[torch.LongTensor] = None, end_positions: Optional[torch.LongTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, decoder_inputs_embeds: Optional[torch.FloatTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, Seq2SeqQuestionAnsweringModelOutput]: r""" start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for position (index) of the start of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (*sequence_length*). Position outside of the sequence are not taken into account for computing the loss. end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for position (index) of the end of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (*sequence_length*). Position outside of the sequence are not taken into account for computing the loss. """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict if start_positions is not None and end_positions is not None: use_cache = False outputs = self.model( input_ids, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, head_mask=head_mask, decoder_head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, encoder_outputs=encoder_outputs, inputs_embeds=inputs_embeds, decoder_inputs_embeds=decoder_inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] logits = self.qa_outputs(sequence_output) start_logits, end_logits = logits.split(1, dim=-1) start_logits = start_logits.squeeze(-1).contiguous() end_logits = end_logits.squeeze(-1).contiguous() total_loss = None if start_positions is not None and end_positions is not None: # If we are on multi-GPU, split add a dimension if len(start_positions.size()) > 1: start_positions = start_positions.squeeze(-1) if len(end_positions.size()) > 1: end_positions = end_positions.squeeze(-1) # sometimes the start/end positions are outside our model inputs, we ignore these terms ignored_index = start_logits.size(1) start_positions = start_positions.clamp(0, ignored_index) end_positions = end_positions.clamp(0, ignored_index) loss_fct = CrossEntropyLoss(ignore_index=ignored_index) start_loss = loss_fct(start_logits, start_positions) end_loss = loss_fct(end_logits, end_positions) total_loss = (start_loss + end_loss) / 2 if not return_dict: output = ( start_logits, end_logits, ) + outputs[1:] return ((total_loss,) + output) if total_loss is not None else output return Seq2SeqQuestionAnsweringModelOutput( loss=total_loss, start_logits=start_logits, end_logits=end_logits, past_key_values=outputs.past_key_values, decoder_hidden_states=outputs.decoder_hidden_states, decoder_attentions=outputs.decoder_attentions, cross_attentions=outputs.cross_attentions, encoder_last_hidden_state=outputs.encoder_last_hidden_state, encoder_hidden_states=outputs.encoder_hidden_states, encoder_attentions=outputs.encoder_attentions, ) # Copied from transformers.models.bart.modeling_bart.BartDecoderWrapper with Bart->MBart class MBartDecoderWrapper(MBartPreTrainedModel): """ This wrapper class is a helper class to correctly load pretrained checkpoints when the causal language model is used in combination with the [`EncoderDecoderModel`] framework. """ def __init__(self, config): super().__init__(config) self.decoder = MBartDecoder(config) def forward(self, *args, **kwargs): return self.decoder(*args, **kwargs) # Copied from transformers.models.bart.modeling_bart.BartForCausalLM with Bart->MBart, facebook/bart-base->facebook/mbart-large-cc25 class MBartForCausalLM(MBartPreTrainedModel): _tied_weights_keys = ["lm_head.weight"] def __init__(self, config): config = copy.deepcopy(config) config.is_decoder = True config.is_encoder_decoder = False super().__init__(config) self.model = MBartDecoderWrapper(config) self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.model.decoder.embed_tokens def set_input_embeddings(self, value): self.model.decoder.embed_tokens = value def get_output_embeddings(self): return self.lm_head def set_output_embeddings(self, new_embeddings): self.lm_head = new_embeddings def set_decoder(self, decoder): self.model.decoder = decoder def get_decoder(self): return self.model.decoder @replace_return_docstrings(output_type=CausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids: torch.LongTensor = None, attention_mask: Optional[torch.Tensor] = None, encoder_hidden_states: Optional[torch.FloatTensor] = None, encoder_attention_mask: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.Tensor] = None, cross_attn_head_mask: Optional[torch.Tensor] = None, past_key_values: Optional[List[torch.FloatTensor]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, CausalLMOutputWithCrossAttentions]: r""" Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if the model is configured as a decoder. encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`: head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. The two additional tensors are only required when the model is used as a decoder in a Sequence to Sequence model. Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. Returns: Example: ```python >>> from transformers import AutoTokenizer, MBartForCausalLM >>> tokenizer = AutoTokenizer.from_pretrained("facebook/mbart-large-cc25") >>> model = MBartForCausalLM.from_pretrained("facebook/mbart-large-cc25", add_cross_attention=False) >>> assert model.config.is_decoder, f"{model.__class__} has to be configured as a decoder." >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") >>> outputs = model(**inputs) >>> logits = outputs.logits >>> expected_shape = [1, inputs.input_ids.shape[-1], model.config.vocab_size] >>> list(logits.shape) == expected_shape True ```""" output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn) outputs = self.model.decoder( input_ids=input_ids, attention_mask=attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, head_mask=head_mask, cross_attn_head_mask=cross_attn_head_mask, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) logits = self.lm_head(outputs[0]) loss = None if labels is not None: labels = labels.to(logits.device) loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.config.vocab_size), labels.view(-1)) if not return_dict: output = (logits,) + outputs[1:] return (loss,) + output if loss is not None else output return CausalLMOutputWithCrossAttentions( loss=loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, cross_attentions=outputs.cross_attentions, ) def prepare_inputs_for_generation( self, input_ids, past_key_values=None, attention_mask=None, use_cache=None, **kwargs ): # if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly if attention_mask is None: attention_mask = input_ids.new_ones(input_ids.shape) if past_key_values: past_length = past_key_values[0][0].shape[2] # Some generation methods already pass only the last input ID if input_ids.shape[1] > past_length: remove_prefix_length = past_length else: # Default to old behavior: keep only final ID remove_prefix_length = input_ids.shape[1] - 1 input_ids = input_ids[:, remove_prefix_length:] # first step, decoder_cached_states are empty return { "input_ids": input_ids, # encoder_outputs is defined. input_ids not needed "attention_mask": attention_mask, "past_key_values": past_key_values, "use_cache": use_cache, } @staticmethod def _reorder_cache(past_key_values, beam_idx): reordered_past = () for layer_past in past_key_values: reordered_past += ( tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past), ) return reordered_past
0
hf_public_repos/transformers/src/transformers/models
hf_public_repos/transformers/src/transformers/models/mbart/modeling_tf_mbart.py
# coding=utf-8 # Copyright 2021 The Fairseq Authors and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ TF 2.0 MBart model.""" from __future__ import annotations import random from typing import Optional, Tuple, Union import tensorflow as tf from ...activations_tf import get_tf_activation from ...modeling_tf_outputs import ( TFBaseModelOutput, TFBaseModelOutputWithPastAndCrossAttentions, TFSeq2SeqLMOutput, TFSeq2SeqModelOutput, ) # Public API from ...modeling_tf_utils import ( TFCausalLanguageModelingLoss, TFModelInputType, TFPreTrainedModel, keras_serializable, unpack_inputs, ) from ...tf_utils import check_embeddings_within_bounds, shape_list, stable_softmax from ...utils import ( add_code_sample_docstrings, add_end_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings, ) from .configuration_mbart import MBartConfig logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = "facebook/mbart-large-cc25" _CONFIG_FOR_DOC = "MBartConfig" LARGE_NEGATIVE = -1e8 def shift_tokens_right(input_ids: tf.Tensor, pad_token_id: int): """ Shift input ids one token to the right, and wrap the last non pad token (the <LID> token) Note that MBart does not have a single `decoder_start_token_id` in contrast to other Bart-like models. """ if pad_token_id is None: raise ValueError("self.model.config.pad_token_id has to be defined.") # replace possible -100 values in labels by `pad_token_id` input_ids = tf.where( input_ids == -100, tf.fill(shape_list(input_ids), tf.cast(pad_token_id, input_ids.dtype)), input_ids ) language_id_index = ( tf.reduce_sum(tf.cast(tf.math.not_equal(input_ids, pad_token_id), dtype=input_ids.dtype), axis=-1) - 1 ) language_id_index = tf.stack( [tf.range(shape_list(input_ids)[0], dtype=input_ids.dtype), language_id_index], axis=-1 ) languages_ids = tf.gather_nd(input_ids, language_id_index) shifted_input_ids = tf.concat([tf.expand_dims(languages_ids, axis=-1), input_ids[:, :-1]], axis=-1) return shifted_input_ids # Copied from transformers.models.bart.modeling_tf_bart._make_causal_mask def _make_causal_mask(input_ids_shape: tf.TensorShape, past_key_values_length: int = 0): """ Make causal mask used for bi-directional self-attention. """ bsz = input_ids_shape[0] tgt_len = input_ids_shape[1] mask = tf.ones((tgt_len, tgt_len)) * LARGE_NEGATIVE mask_cond = tf.range(shape_list(mask)[-1]) mask = tf.where(mask_cond < tf.reshape(mask_cond + 1, (shape_list(mask)[-1], 1)), 0.0, mask) if past_key_values_length > 0: mask = tf.concat([tf.zeros((tgt_len, past_key_values_length)), mask], axis=-1) return tf.tile(mask[None, None, :, :], (bsz, 1, 1, 1)) # Copied from transformers.models.bart.modeling_tf_bart._expand_mask def _expand_mask(mask: tf.Tensor, tgt_len: Optional[int] = None): """ Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`. """ src_len = shape_list(mask)[1] tgt_len = tgt_len if tgt_len is not None else src_len one_cst = tf.constant(1.0) mask = tf.cast(mask, dtype=one_cst.dtype) expanded_mask = tf.tile(mask[:, None, None, :], (1, 1, tgt_len, 1)) return (one_cst - expanded_mask) * LARGE_NEGATIVE # Copied from transformers.models.bart.modeling_tf_bart.TFBartLearnedPositionalEmbedding with Bart->MBart class TFMBartLearnedPositionalEmbedding(tf.keras.layers.Embedding): """ This module learns positional embeddings up to a fixed maximum size. """ def __init__(self, num_embeddings: int, embedding_dim: int, **kwargs): # MBart is set up so that if padding_idx is specified then offset the embedding ids by 2 # and adjust num_embeddings appropriately. Other models don't have this hack self.offset = 2 super().__init__(num_embeddings + self.offset, embedding_dim, **kwargs) def call( self, input_shape: Optional[tf.TensorShape] = None, past_key_values_length: int = 0, position_ids: tf.Tensor | None = None, ): """Input is expected to be of size [bsz x seqlen].""" if position_ids is None: seq_len = input_shape[1] position_ids = tf.range(seq_len, delta=1, name="range") position_ids += past_key_values_length offset_dtype = position_ids.dtype if isinstance(position_ids, tf.Tensor) else tf.int32 return super().call(position_ids + tf.constant(self.offset, dtype=offset_dtype)) # Copied from transformers.models.bart.modeling_tf_bart.TFBartAttention with Bart->MBart class TFMBartAttention(tf.keras.layers.Layer): """Multi-headed attention from "Attention Is All You Need""" def __init__( self, embed_dim: int, num_heads: int, dropout: float = 0.0, is_decoder: bool = False, bias: bool = True, **kwargs, ): super().__init__(**kwargs) self.embed_dim = embed_dim self.num_heads = num_heads self.dropout = tf.keras.layers.Dropout(dropout) self.head_dim = embed_dim // num_heads if (self.head_dim * num_heads) != self.embed_dim: raise ValueError( f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}" f" and `num_heads`: {num_heads})." ) self.scaling = self.head_dim**-0.5 self.is_decoder = is_decoder self.k_proj = tf.keras.layers.Dense(embed_dim, use_bias=bias, name="k_proj") self.q_proj = tf.keras.layers.Dense(embed_dim, use_bias=bias, name="q_proj") self.v_proj = tf.keras.layers.Dense(embed_dim, use_bias=bias, name="v_proj") self.out_proj = tf.keras.layers.Dense(embed_dim, use_bias=bias, name="out_proj") def _shape(self, tensor: tf.Tensor, seq_len: int, bsz: int): return tf.transpose(tf.reshape(tensor, (bsz, seq_len, self.num_heads, self.head_dim)), (0, 2, 1, 3)) def call( self, hidden_states: tf.Tensor, key_value_states: tf.Tensor | None = None, past_key_value: Tuple[Tuple[tf.Tensor]] | None = None, attention_mask: tf.Tensor | None = None, layer_head_mask: tf.Tensor | None = None, training: Optional[bool] = False, ) -> Tuple[tf.Tensor, tf.Tensor | None]: """Input shape: Batch x Time x Channel""" # if key_value_states are provided this layer is used as a cross-attention layer # for the decoder is_cross_attention = key_value_states is not None bsz, tgt_len, embed_dim = shape_list(hidden_states) # get query proj query_states = self.q_proj(hidden_states) * self.scaling # get key, value proj if is_cross_attention and past_key_value is not None: # reuse k,v, cross_attentions key_states = past_key_value[0] value_states = past_key_value[1] elif is_cross_attention: # cross_attentions key_states = self._shape(self.k_proj(key_value_states), -1, bsz) value_states = self._shape(self.v_proj(key_value_states), -1, bsz) elif past_key_value is not None: # reuse k, v, self_attention key_states = self._shape(self.k_proj(hidden_states), -1, bsz) value_states = self._shape(self.v_proj(hidden_states), -1, bsz) key_states = tf.concat([past_key_value[0], key_states], axis=2) value_states = tf.concat([past_key_value[1], value_states], axis=2) else: # self_attention key_states = self._shape(self.k_proj(hidden_states), -1, bsz) value_states = self._shape(self.v_proj(hidden_states), -1, bsz) if self.is_decoder: # if cross_attention save Tuple(tf.Tensor, tf.Tensor) of all cross attention key/value_states. # Further calls to cross_attention layer can then reuse all cross-attention # key/value_states (first "if" case) # if uni-directional self-attention (decoder) save Tuple(tf.Tensor, tf.Tensor) of # all previous decoder key/value_states. Further calls to uni-directional self-attention # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case) # if encoder bi-directional self-attention `past_key_value` is always `None` past_key_value = (key_states, value_states) proj_shape = (bsz * self.num_heads, -1, self.head_dim) query_states = tf.reshape(self._shape(query_states, tgt_len, bsz), proj_shape) key_states = tf.reshape(key_states, proj_shape) value_states = tf.reshape(value_states, proj_shape) src_len = shape_list(key_states)[1] attn_weights = tf.matmul(query_states, key_states, transpose_b=True) tf.debugging.assert_equal( shape_list(attn_weights), [bsz * self.num_heads, tgt_len, src_len], message=( f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is" f" {shape_list(attn_weights)}" ), ) if attention_mask is not None: tf.debugging.assert_equal( shape_list(attention_mask), [bsz, 1, tgt_len, src_len], message=( f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is" f" {shape_list(attention_mask)}" ), ) attention_mask = tf.cast(attention_mask, dtype=attn_weights.dtype) attn_weights = tf.reshape(attn_weights, (bsz, self.num_heads, tgt_len, src_len)) + attention_mask attn_weights = tf.reshape(attn_weights, (bsz * self.num_heads, tgt_len, src_len)) attn_weights = stable_softmax(attn_weights, axis=-1) if layer_head_mask is not None: tf.debugging.assert_equal( shape_list(layer_head_mask), [self.num_heads], message=( f"Head mask for a single layer should be of size {(self.num_heads)}, but is" f" {shape_list(layer_head_mask)}" ), ) attn_weights = tf.reshape(layer_head_mask, (1, -1, 1, 1)) * tf.reshape( attn_weights, (bsz, self.num_heads, tgt_len, src_len) ) attn_weights = tf.reshape(attn_weights, (bsz * self.num_heads, tgt_len, src_len)) attn_probs = self.dropout(attn_weights, training=training) attn_output = tf.matmul(attn_probs, value_states) tf.debugging.assert_equal( shape_list(attn_output), [bsz * self.num_heads, tgt_len, self.head_dim], message=( f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is" f" {shape_list(attn_output)}" ), ) attn_output = tf.transpose( tf.reshape(attn_output, (bsz, self.num_heads, tgt_len, self.head_dim)), (0, 2, 1, 3) ) attn_output = tf.reshape(attn_output, (bsz, tgt_len, embed_dim)) attn_output = self.out_proj(attn_output) attn_weights: tf.Tensor = tf.reshape(attn_weights, (bsz, self.num_heads, tgt_len, src_len)) return attn_output, attn_weights, past_key_value def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "k_proj", None) is not None: with tf.name_scope(self.k_proj.name): self.k_proj.build([None, None, self.embed_dim]) if getattr(self, "q_proj", None) is not None: with tf.name_scope(self.q_proj.name): self.q_proj.build([None, None, self.embed_dim]) if getattr(self, "v_proj", None) is not None: with tf.name_scope(self.v_proj.name): self.v_proj.build([None, None, self.embed_dim]) if getattr(self, "out_proj", None) is not None: with tf.name_scope(self.out_proj.name): self.out_proj.build([None, None, self.embed_dim]) class TFMBartEncoderLayer(tf.keras.layers.Layer): def __init__(self, config: MBartConfig, **kwargs): super().__init__(**kwargs) self.embed_dim = config.d_model self.self_attn = TFMBartAttention( self.embed_dim, config.encoder_attention_heads, dropout=config.attention_dropout, name="self_attn" ) self.self_attn_layer_norm = tf.keras.layers.LayerNormalization(epsilon=1e-5, name="self_attn_layer_norm") self.dropout = tf.keras.layers.Dropout(config.dropout) self.activation_fn = get_tf_activation(config.activation_function) self.activation_dropout = tf.keras.layers.Dropout(config.activation_dropout) self.fc1 = tf.keras.layers.Dense(config.encoder_ffn_dim, name="fc1") self.fc2 = tf.keras.layers.Dense(self.embed_dim, name="fc2") self.final_layer_norm = tf.keras.layers.LayerNormalization(epsilon=1e-5, name="final_layer_norm") self.config = config def call( self, hidden_states: tf.Tensor, attention_mask: tf.Tensor, layer_head_mask: tf.Tensor, training: Optional[bool] = False, ): """ Args: hidden_states (`tf.Tensor`): input to the layer of shape *(batch, seq_len, embed_dim)* attention_mask (`tf.Tensor`): attention mask of size *(batch, 1, tgt_len, src_len)* where padding elements are indicated by very large negative values. layer_head_mask (`tf.Tensor`): mask for attention heads in a given layer of size *(encoder_attention_heads,)* """ residual = hidden_states hidden_states = self.self_attn_layer_norm(hidden_states) hidden_states, self_attn_weights, _ = self.self_attn( hidden_states=hidden_states, attention_mask=attention_mask, layer_head_mask=layer_head_mask ) tf.debugging.assert_equal( shape_list(hidden_states), shape_list(residual), message=f"Self attn modified the shape of query {shape_list(residual)} to {shape_list(hidden_states)}", ) hidden_states = self.dropout(hidden_states, training=training) hidden_states = residual + hidden_states residual = hidden_states hidden_states = self.final_layer_norm(hidden_states) hidden_states = self.activation_fn(self.fc1(hidden_states)) hidden_states = self.activation_dropout(hidden_states, training=training) hidden_states = self.fc2(hidden_states) hidden_states = self.dropout(hidden_states, training=training) hidden_states = residual + hidden_states return hidden_states, self_attn_weights def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "self_attn", None) is not None: with tf.name_scope(self.self_attn.name): self.self_attn.build(None) if getattr(self, "self_attn_layer_norm", None) is not None: with tf.name_scope(self.self_attn_layer_norm.name): self.self_attn_layer_norm.build([None, None, self.embed_dim]) if getattr(self, "fc1", None) is not None: with tf.name_scope(self.fc1.name): self.fc1.build([None, None, self.embed_dim]) if getattr(self, "fc2", None) is not None: with tf.name_scope(self.fc2.name): self.fc2.build([None, None, self.config.encoder_ffn_dim]) if getattr(self, "final_layer_norm", None) is not None: with tf.name_scope(self.final_layer_norm.name): self.final_layer_norm.build([None, None, self.embed_dim]) class TFMBartDecoderLayer(tf.keras.layers.Layer): def __init__(self, config: MBartConfig, **kwargs): super().__init__(**kwargs) self.embed_dim = config.d_model self.self_attn = TFMBartAttention( embed_dim=self.embed_dim, num_heads=config.decoder_attention_heads, dropout=config.attention_dropout, name="self_attn", is_decoder=True, ) self.dropout = tf.keras.layers.Dropout(config.dropout) self.activation_fn = get_tf_activation(config.activation_function) self.activation_dropout = tf.keras.layers.Dropout(config.activation_dropout) self.self_attn_layer_norm = tf.keras.layers.LayerNormalization(epsilon=1e-5, name="self_attn_layer_norm") self.encoder_attn = TFMBartAttention( self.embed_dim, config.decoder_attention_heads, dropout=config.attention_dropout, name="encoder_attn", is_decoder=True, ) self.encoder_attn_layer_norm = tf.keras.layers.LayerNormalization(epsilon=1e-5, name="encoder_attn_layer_norm") self.fc1 = tf.keras.layers.Dense(config.decoder_ffn_dim, name="fc1") self.fc2 = tf.keras.layers.Dense(self.embed_dim, name="fc2") self.final_layer_norm = tf.keras.layers.LayerNormalization(epsilon=1e-5, name="final_layer_norm") self.config = config def call( self, hidden_states: tf.Tensor, attention_mask: tf.Tensor | None = None, encoder_hidden_states: tf.Tensor | None = None, encoder_attention_mask: tf.Tensor | None = None, layer_head_mask: tf.Tensor | None = None, cross_attn_layer_head_mask: tf.Tensor | None = None, past_key_value: Tuple[tf.Tensor] | None = None, training: Optional[bool] = False, ) -> Tuple[tf.Tensor, tf.Tensor, Tuple[Tuple[tf.Tensor]]]: """ Args: hidden_states (`tf.Tensor`): input to the layer of shape *(batch, seq_len, embed_dim)* attention_mask (`tf.Tensor`): attention mask of size *(batch, 1, tgt_len, src_len)* where padding elements are indicated by very large negative values. encoder_hidden_states (`tf.Tensor`): cross attention input to the layer of shape *(batch, seq_len, embed_dim)* encoder_attention_mask (`tf.Tensor`): encoder attention mask of size *(batch, 1, tgt_len, src_len)* where padding elements are indicated by very large negative values. layer_head_mask (`tf.Tensor`): mask for attention heads in a given layer of size *(decoder_attention_heads,)* cross_attn_layer_head_mask (`tf.Tensor`): mask for heads of the cross-attention module. *(decoder_attention_heads,)* past_key_value (`Tuple(tf.Tensor)`): cached past key and value projection states """ residual = hidden_states hidden_states = self.self_attn_layer_norm(hidden_states) # Self Attention # decoder uni-directional self-attention cached key/values tuple is at positions 1,2 self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None # add present self-attn cache to positions 1,2 of present_key_value tuple hidden_states, self_attn_weights, present_key_value = self.self_attn( hidden_states=hidden_states, past_key_value=self_attn_past_key_value, attention_mask=attention_mask, layer_head_mask=layer_head_mask, ) hidden_states = self.dropout(hidden_states, training=training) hidden_states = residual + hidden_states # Cross-Attention Block cross_attn_present_key_value = None cross_attn_weights = None if encoder_hidden_states is not None: residual = hidden_states hidden_states = self.encoder_attn_layer_norm(hidden_states) # cross_attn cached key/values tuple is at positions 3,4 of present_key_value tuple cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None hidden_states, cross_attn_weights, cross_attn_present_key_value = self.encoder_attn( hidden_states=hidden_states, key_value_states=encoder_hidden_states, attention_mask=encoder_attention_mask, layer_head_mask=cross_attn_layer_head_mask, past_key_value=cross_attn_past_key_value, ) hidden_states = self.dropout(hidden_states, training=training) hidden_states = residual + hidden_states # add cross-attn to positions 3,4 of present_key_value tuple present_key_value = present_key_value + cross_attn_present_key_value # Fully Connected residual = hidden_states hidden_states = self.final_layer_norm(hidden_states) hidden_states = self.activation_fn(self.fc1(hidden_states)) hidden_states = self.activation_dropout(hidden_states, training=training) hidden_states = self.fc2(hidden_states) hidden_states = self.dropout(hidden_states, training=training) hidden_states = residual + hidden_states return ( hidden_states, self_attn_weights, cross_attn_weights, present_key_value, ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "self_attn", None) is not None: with tf.name_scope(self.self_attn.name): self.self_attn.build(None) if getattr(self, "self_attn_layer_norm", None) is not None: with tf.name_scope(self.self_attn_layer_norm.name): self.self_attn_layer_norm.build([None, None, self.embed_dim]) if getattr(self, "encoder_attn", None) is not None: with tf.name_scope(self.encoder_attn.name): self.encoder_attn.build(None) if getattr(self, "encoder_attn_layer_norm", None) is not None: with tf.name_scope(self.encoder_attn_layer_norm.name): self.encoder_attn_layer_norm.build([None, None, self.embed_dim]) if getattr(self, "fc1", None) is not None: with tf.name_scope(self.fc1.name): self.fc1.build([None, None, self.embed_dim]) if getattr(self, "fc2", None) is not None: with tf.name_scope(self.fc2.name): self.fc2.build([None, None, self.config.decoder_ffn_dim]) if getattr(self, "final_layer_norm", None) is not None: with tf.name_scope(self.final_layer_norm.name): self.final_layer_norm.build([None, None, self.embed_dim]) class TFMBartPreTrainedModel(TFPreTrainedModel): config_class = MBartConfig base_model_prefix = "model" MBART_START_DOCSTRING = r""" This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a [tf.keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior. <Tip> TensorFlow models and layers in `transformers` accept two formats as input: - having all inputs as keyword arguments (like PyTorch models), or - having all inputs as a list, tuple or dict in the first positional argument. The reason the second format is supported is that Keras methods prefer this format when passing inputs to models and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first positional argument: - a single Tensor with `input_ids` only and nothing else: `model(input_ids)` - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])` - a dictionary with one or several input Tensors associated to the input names given in the docstring: `model({"input_ids": input_ids, "token_type_ids": token_type_ids})` Note that when creating models and layers with [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry about any of this, as you can just pass inputs like you would to any other Python function! </Tip> Args: config ([`MBartConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights. """ MBART_INPUTS_DOCSTRING = r""" Args: input_ids (`tf.Tensor` of shape `({0})`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`tf.Tensor` of shape `({0})`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) decoder_input_ids (`tf.Tensor` of shape `(batch_size, target_sequence_length)`, *optional*): Indices of decoder input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are decoder input IDs?](../glossary#decoder-input-ids) MBart uses a specific language id token as the starting token for `decoder_input_ids` generation that varies according to source and target language, *e.g.* 25004 for *en_XX*, and 25003 for *de_DE*. If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see `past_key_values`). For translation and summarization training, `decoder_input_ids` should be provided. If no `decoder_input_ids` is provided, the model will create this tensor by shifting the `input_ids` to the right for denoising pre-training following the paper. decoder_attention_mask (`tf.Tensor` of shape `(batch_size, target_sequence_length)`, *optional*): will be made by default and ignore pad tokens. It is not recommended to set this for most use cases. decoder_position_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Indices of positions of each decoder input sequence tokens in the position embeddings. Selected in the range `[0, config.max_position_embeddings - 1]`. head_mask (`tf.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*): Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. decoder_head_mask (`tf.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. cross_attn_head_mask (`tf.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. encoder_outputs (`tf.FloatTensor`, *optional*): hidden states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. of shape `(batch_size, sequence_length, hidden_size)` is a sequence of past_key_values (`Tuple[Tuple[tf.Tensor]]` of length `config.n_layers`) contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. inputs_embeds (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. use_cache (`bool`, *optional*, defaults to `True`): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). Set to `False` during training, `True` during generation output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True. training (`bool`, *optional*, defaults to `False`): Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation). """ MBART_GENERATION_EXAMPLE = r""" Translation example: ```python >>> from transformers import AutoTokenizer, TFMBartForConditionalGeneration >>> model = TFMBartForConditionalGeneration.from_pretrained("facebook/mbart-large-en-ro") >>> tokenizer = AutoTokenizer.from_pretrained("facebook/mbart-large-en-ro") >>> example_english_phrase = "42 is the answer" >>> inputs = tokenizer(example_english_phrase, return_tensors="tf") >>> # Translate >>> generated_ids = model.generate(**inputs, num_beams=4, max_length=5) >>> tokenizer.batch_decode(generated_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0] '42 este răspuns' ``` Mask filling example: ```python >>> from transformers import AutoTokenizer, TFMBartForConditionalGeneration >>> import tensorflow as tf >>> model = TFMBartForConditionalGeneration.from_pretrained("facebook/mbart-large-cc25") >>> tokenizer = AutoTokenizer.from_pretrained("facebook/mbart-large-cc25") >>> # de_DE is the language symbol id <LID> for German >>> TXT = "</s> Meine Freunde sind <mask> nett aber sie essen zu viel Kuchen. </s> de_DE" >>> input_ids = tokenizer([TXT], add_special_tokens=False, return_tensors="tf")["input_ids"] >>> logits = model(input_ids).logits >>> masked_index = tf.where(input_ids[0] == tokenizer.mask_token_id)[0, 0] >>> probs = tf.nn.softmax(logits[0, masked_index], axis=0) >>> values, predictions = tf.math.top_k(probs, 5) >>> tokenizer.decode(predictions).split() ['nett', 'sehr', 'ganz', 'nicht', 'so'] ``` """ @keras_serializable class TFMBartEncoder(tf.keras.layers.Layer): config_class = MBartConfig """ Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a [`TFMBartEncoderLayer`]. Args: config: MBartConfig """ def __init__(self, config: MBartConfig, embed_tokens: Optional[tf.keras.layers.Embedding] = None, **kwargs): super().__init__(**kwargs) self.config = config self.dropout = tf.keras.layers.Dropout(config.dropout) self.layerdrop = config.encoder_layerdrop self.padding_idx = config.pad_token_id self.max_source_positions = config.max_position_embeddings self.embed_scale = tf.math.sqrt(float(config.d_model)) if config.scale_embedding else 1.0 self.embed_tokens = embed_tokens self.embed_positions = TFMBartLearnedPositionalEmbedding( config.max_position_embeddings, config.d_model, name="embed_positions", ) self.layers = [TFMBartEncoderLayer(config, name=f"layers.{i}") for i in range(config.encoder_layers)] self.layernorm_embedding = tf.keras.layers.LayerNormalization(epsilon=1e-5, name="layernorm_embedding") self.layer_norm = tf.keras.layers.LayerNormalization(epsilon=1e-5, name="layer_norm") self.embed_dim = config.d_model def get_embed_tokens(self): return self.embed_tokens def set_embed_tokens(self, embed_tokens): self.embed_tokens = embed_tokens @unpack_inputs def call( self, input_ids: TFModelInputType | None = None, inputs_embeds: tf.Tensor | None = None, attention_mask: tf.Tensor | None = None, head_mask: tf.Tensor | None = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, training: Optional[bool] = False, ) -> Union[TFBaseModelOutput, Tuple[tf.Tensor]]: """ Args: input_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) head_mask (`tf.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, `optional): Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. inputs_embeds (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True. training (`bool`, *optional*, defaults to `False`): Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation). """ if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: input_shape = shape_list(input_ids) elif inputs_embeds is not None: input_shape = shape_list(inputs_embeds)[:-1] else: raise ValueError("You have to specify either input_ids or inputs_embeds") if inputs_embeds is None: check_embeddings_within_bounds(input_ids, self.embed_tokens.input_dim) inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale embed_pos = self.embed_positions(input_shape) hidden_states = inputs_embeds + embed_pos hidden_states = self.layernorm_embedding(hidden_states) hidden_states = self.dropout(hidden_states, training=training) # check attention mask and invert if attention_mask is not None: # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] attention_mask = _expand_mask(attention_mask) else: attention_mask = None encoder_states = () if output_hidden_states else None all_attentions = () if output_attentions else None # check if head_mask has a correct number of layers specified if desired if head_mask is not None: tf.debugging.assert_equal( shape_list(head_mask)[0], len(self.layers), message=( f"The head_mask should be specified for {len(self.layers)} layers, but it is for" f" {shape_list(head_mask)[0]}." ), ) # encoder layers for idx, encoder_layer in enumerate(self.layers): if output_hidden_states: encoder_states = encoder_states + (hidden_states,) # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description) dropout_probability = random.uniform(0, 1) if training and (dropout_probability < self.layerdrop): # skip the layer continue hidden_states, attn = encoder_layer( hidden_states, attention_mask, head_mask[idx] if head_mask is not None else None, ) if output_attentions: all_attentions += (attn,) hidden_states = self.layer_norm(hidden_states) if output_hidden_states: encoder_states = encoder_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None) return TFBaseModelOutput( last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "embed_positions", None) is not None: with tf.name_scope(self.embed_positions.name): self.embed_positions.build(None) if getattr(self, "layernorm_embedding", None) is not None: with tf.name_scope(self.layernorm_embedding.name): self.layernorm_embedding.build([None, None, self.embed_dim]) if getattr(self, "layer_norm", None) is not None: with tf.name_scope(self.layer_norm.name): self.layer_norm.build([None, None, self.config.d_model]) if getattr(self, "layers", None) is not None: for layer in self.layers: with tf.name_scope(layer.name): layer.build(None) @keras_serializable class TFMBartDecoder(tf.keras.layers.Layer): config_class = MBartConfig """ Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a [`TFMBartDecoderLayer`] Args: config: MBartConfig embed_tokens: output embedding """ def __init__(self, config: MBartConfig, embed_tokens: Optional[tf.keras.layers.Embedding] = None, **kwargs): super().__init__(**kwargs) self.config = config self.padding_idx = config.pad_token_id self.embed_tokens = embed_tokens self.layerdrop = config.decoder_layerdrop self.embed_positions = TFMBartLearnedPositionalEmbedding( config.max_position_embeddings, config.d_model, name="embed_positions", ) self.embed_scale = tf.math.sqrt(float(config.d_model)) if config.scale_embedding else 1.0 self.layers = [TFMBartDecoderLayer(config, name=f"layers.{i}") for i in range(config.decoder_layers)] self.layernorm_embedding = tf.keras.layers.LayerNormalization(epsilon=1e-5, name="layernorm_embedding") self.layer_norm = tf.keras.layers.LayerNormalization(epsilon=1e-5, name="layer_norm") self.dropout = tf.keras.layers.Dropout(config.dropout) def get_embed_tokens(self): return self.embed_tokens def set_embed_tokens(self, embed_tokens): self.embed_tokens = embed_tokens @unpack_inputs def call( self, input_ids: TFModelInputType = None, inputs_embeds: tf.Tensor | None = None, attention_mask: tf.Tensor | None = None, position_ids: tf.Tensor | None = None, encoder_hidden_states: tf.Tensor | None = None, encoder_attention_mask: tf.Tensor | None = None, head_mask: tf.Tensor | None = None, cross_attn_head_mask: tf.Tensor | None = None, past_key_values: Tuple[Tuple[tf.Tensor]] | None = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, training: Optional[bool] = False, ) -> Union[ TFBaseModelOutputWithPastAndCrossAttentions, Tuple[tf.Tensor, tf.Tensor, tf.Tensor, tf.Tensor, tf.Tensor] ]: r""" Args: input_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) position_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Indices of positions of each decoder input sequence tokens in the position embeddings. Selected in the range `[0, config.max_position_embeddings - 1]`. encoder_hidden_states (`tf.Tensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. encoder_attention_mask (`tf.Tensor` of shape `(batch_size, encoder_sequence_length)`, *optional*): Mask to avoid performing cross-attention on padding tokens indices of encoder input_ids. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) head_mask (`tf.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. cross_attn_head_mask (`tf.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. past_key_values (`Tuple[Tuple[tf.Tensor]]` of length `config.n_layers` with each tuple having 2 tuples each of which has 2 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): Contains precomputed key and value hidden-states of the attention blocks. Can be used to speed up decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. inputs_embeds (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True. training (`bool`, *optional*, defaults to `False`): Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation). """ if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time") elif input_ids is not None: input_shape = shape_list(input_ids) elif inputs_embeds is not None: input_shape = shape_list(inputs_embeds)[:-1] else: raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds") past_key_values_length = shape_list(past_key_values[0][0])[2] if past_key_values is not None else 0 # embed positions if position_ids is None: positions = self.embed_positions(input_shape, past_key_values_length) else: positions = self.embed_positions(input_shape, position_ids=position_ids) if inputs_embeds is None: check_embeddings_within_bounds(input_ids, self.embed_tokens.input_dim) inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale hidden_states = inputs_embeds # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] if input_shape[-1] > 1: combined_attention_mask = _make_causal_mask(input_shape, past_key_values_length=past_key_values_length) else: combined_attention_mask = _expand_mask( tf.ones((input_shape[0], input_shape[1] + past_key_values_length)), tgt_len=input_shape[-1] ) if attention_mask is not None: combined_attention_mask = combined_attention_mask + _expand_mask(attention_mask, tgt_len=input_shape[-1]) if encoder_hidden_states is not None and encoder_attention_mask is not None: # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] encoder_attention_mask = _expand_mask(encoder_attention_mask, tgt_len=input_shape[-1]) hidden_states = self.layernorm_embedding(hidden_states + positions) hidden_states = self.dropout(hidden_states, training=training) # decoder layers all_hidden_states = () if output_hidden_states else None all_self_attns = () if output_attentions else None all_cross_attns = () if (output_attentions and encoder_hidden_states is not None) else None present_key_values = () if use_cache else None # check if head_mask and cross_attn_head_mask have a correct number of layers specified if desired for attn_mask_name, attn_mask in [("head_mask", head_mask), ("cross_attn_head_mask", cross_attn_head_mask)]: if attn_mask is not None: tf.debugging.assert_equal( shape_list(attn_mask)[0], len(self.layers), message=( f"The {attn_mask_name} should be specified for {len(self.layers)} layers, but it is for" f" {shape_list(attn_mask)[0]}." ), ) for idx, decoder_layer in enumerate(self.layers): # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description) if output_hidden_states: all_hidden_states += (hidden_states,) dropout_probability = random.uniform(0, 1) if training and (dropout_probability < self.layerdrop): continue past_key_value = past_key_values[idx] if past_key_values is not None else None hidden_states, layer_self_attn, layer_cross_attn, present_key_value = decoder_layer( hidden_states, attention_mask=combined_attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, layer_head_mask=head_mask[idx] if head_mask is not None else None, cross_attn_layer_head_mask=cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None, past_key_value=past_key_value, ) if use_cache: present_key_values += (present_key_value,) if output_attentions: all_self_attns += (layer_self_attn,) if encoder_hidden_states is not None: all_cross_attns += (layer_cross_attn,) hidden_states = self.layer_norm(hidden_states) if output_hidden_states: all_hidden_states += (hidden_states,) if not return_dict: return hidden_states, present_key_values, all_hidden_states, all_self_attns, all_cross_attns else: return TFBaseModelOutputWithPastAndCrossAttentions( last_hidden_state=hidden_states, past_key_values=present_key_values, hidden_states=all_hidden_states, attentions=all_self_attns, cross_attentions=all_cross_attns, ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "embed_positions", None) is not None: with tf.name_scope(self.embed_positions.name): self.embed_positions.build(None) if getattr(self, "layernorm_embedding", None) is not None: with tf.name_scope(self.layernorm_embedding.name): self.layernorm_embedding.build([None, None, self.config.d_model]) if getattr(self, "layer_norm", None) is not None: with tf.name_scope(self.layer_norm.name): self.layer_norm.build([None, None, self.config.d_model]) if getattr(self, "layers", None) is not None: for layer in self.layers: with tf.name_scope(layer.name): layer.build(None) @keras_serializable class TFMBartMainLayer(tf.keras.layers.Layer): config_class = MBartConfig def __init__(self, config: MBartConfig, **kwargs): super().__init__(**kwargs) self.config = config self.shared = tf.keras.layers.Embedding( input_dim=config.vocab_size, output_dim=config.d_model, embeddings_initializer=tf.keras.initializers.TruncatedNormal(stddev=self.config.init_std), name="model.shared", ) # Additional attribute to specify the expected name scope of the layer (for loading/storing weights) self.shared.load_weight_prefix = "model.shared" self.encoder = TFMBartEncoder(config, self.shared, name="encoder") self.decoder = TFMBartDecoder(config, self.shared, name="decoder") def get_input_embeddings(self): return self.shared def set_input_embeddings(self, new_embeddings): self.shared = new_embeddings self.encoder.embed_tokens = self.shared self.decoder.embed_tokens = self.shared @unpack_inputs def call( self, input_ids: TFModelInputType = None, attention_mask: tf.Tensor | None = None, decoder_input_ids: tf.Tensor | None = None, decoder_attention_mask: tf.Tensor | None = None, decoder_position_ids: tf.Tensor | None = None, head_mask: tf.Tensor | None = None, decoder_head_mask: tf.Tensor | None = None, cross_attn_head_mask: tf.Tensor | None = None, encoder_outputs: Optional[Union[Tuple, TFBaseModelOutput]] = None, past_key_values: Tuple[Tuple[tf.Tensor]] | None = None, inputs_embeds: tf.Tensor | None = None, decoder_inputs_embeds: tf.Tensor | None = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, training: Optional[bool] = False, **kwargs, ) -> Union[TFSeq2SeqModelOutput, tf.Tensor]: if decoder_input_ids is None and decoder_inputs_embeds is None: use_cache = False output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) if decoder_input_ids is None and input_ids is not None: decoder_input_ids = shift_tokens_right(input_ids, self.config.pad_token_id) if encoder_outputs is None: encoder_outputs = self.encoder( input_ids=input_ids, attention_mask=attention_mask, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) # If the user passed a tuple for encoder_outputs, we wrap it in a TFBaseModelOutput when return_dict=True elif return_dict and not isinstance(encoder_outputs, TFBaseModelOutput): encoder_outputs = TFBaseModelOutput( last_hidden_state=encoder_outputs[0], hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None, attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None, ) # If the user passed a TFBaseModelOutput for encoder_outputs, we wrap it in a tuple when return_dict=False elif not return_dict and not isinstance(encoder_outputs, tuple): encoder_outputs = encoder_outputs.to_tuple() decoder_outputs = self.decoder( decoder_input_ids, attention_mask=decoder_attention_mask, position_ids=decoder_position_ids, encoder_hidden_states=encoder_outputs[0], encoder_attention_mask=attention_mask, head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, past_key_values=past_key_values, inputs_embeds=decoder_inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) if not return_dict: return decoder_outputs + encoder_outputs return TFSeq2SeqModelOutput( last_hidden_state=decoder_outputs.last_hidden_state, past_key_values=decoder_outputs.past_key_values, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions, ) def build(self, input_shape=None): if self.built: return self.built = True # The shared/tied weights expect to be in the model base namespace # Adding "/" to the end (not the start!) of a tf.name_scope puts it in the root namespace rather than # the current one. with tf.name_scope(self.shared.load_weight_prefix + "/" + self.shared.name + "/"): self.shared.build(None) if getattr(self, "encoder", None) is not None: with tf.name_scope(self.encoder.name): self.encoder.build(None) if getattr(self, "decoder", None) is not None: with tf.name_scope(self.decoder.name): self.decoder.build(None) @add_start_docstrings( "The bare MBART Model outputting raw hidden-states without any specific head on top.", MBART_START_DOCSTRING, ) class TFMBartModel(TFMBartPreTrainedModel): def __init__(self, config: MBartConfig, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.model = TFMBartMainLayer(config, name="model") def get_encoder(self): return self.model.encoder def get_decoder(self): return self.model.decoder @unpack_inputs @add_start_docstrings_to_model_forward(MBART_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFSeq2SeqModelOutput, config_class=_CONFIG_FOR_DOC, ) def call( self, input_ids: TFModelInputType = None, attention_mask: tf.Tensor | None = None, decoder_input_ids: tf.Tensor | None = None, decoder_attention_mask: tf.Tensor | None = None, decoder_position_ids: tf.Tensor | None = None, head_mask: tf.Tensor | None = None, decoder_head_mask: tf.Tensor | None = None, cross_attn_head_mask: tf.Tensor | None = None, encoder_outputs: Optional[Union[Tuple, TFBaseModelOutput]] = None, past_key_values: Tuple[Tuple[tf.Tensor]] | None = None, inputs_embeds: tf.Tensor | None = None, decoder_inputs_embeds: tf.Tensor | None = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, training: Optional[bool] = False, **kwargs, ) -> Union[TFSeq2SeqModelOutput, Tuple[tf.Tensor]]: outputs = self.model( input_ids=input_ids, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, decoder_position_ids=decoder_position_ids, head_mask=head_mask, decoder_head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, encoder_outputs=encoder_outputs, past_key_values=past_key_values, inputs_embeds=inputs_embeds, decoder_inputs_embeds=decoder_inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) return outputs # Copied from transformers.models.bart.modeling_tf_bart.TFBartModel.serving_output def serving_output(self, output): pkv = tf.tuple(output.past_key_values)[1] if self.config.use_cache else None dec_hs = tf.convert_to_tensor(output.decoder_hidden_states) if self.config.output_hidden_states else None dec_attns = tf.convert_to_tensor(output.decoder_attentions) if self.config.output_attentions else None cross_attns = tf.convert_to_tensor(output.cross_attentions) if self.config.output_attentions else None enc_hs = tf.convert_to_tensor(output.encoder_hidden_states) if self.config.output_hidden_states else None enc_attns = tf.convert_to_tensor(output.encoder_attentions) if self.config.output_attentions else None return TFSeq2SeqModelOutput( last_hidden_state=output.last_hidden_state, past_key_values=pkv, decoder_hidden_states=dec_hs, decoder_attentions=dec_attns, cross_attentions=cross_attns, encoder_last_hidden_state=output.encoder_last_hidden_state, encoder_hidden_states=enc_hs, encoder_attentions=enc_attns, ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "model", None) is not None: with tf.name_scope(self.model.name): self.model.build(None) # Copied from transformers.models.bart.modeling_tf_bart.BiasLayer class BiasLayer(tf.keras.layers.Layer): """ Bias as a layer. It is used for serialization purposes: `tf.keras.Model.save_weights` stores on a per-layer basis, so all weights have to be registered in a layer. """ def __init__(self, shape, initializer, trainable, name, **kwargs): super().__init__(name=name, **kwargs) # Note: the name of this variable will NOT be scoped when serialized, i.e. it will not be in the format of # "outer_layer/inner_layer/.../name:0". Instead, it will be "name:0". For further details, see: # https://github.com/huggingface/transformers/pull/18833#issuecomment-1233090214 self.bias = self.add_weight(name=name, shape=shape, initializer=initializer, trainable=trainable) def call(self, x): return x + self.bias @add_start_docstrings( "The MBART Model with a language modeling head. Can be used for summarization, after fine-tuning the pretrained models.", MBART_START_DOCSTRING, ) class TFMBartForConditionalGeneration(TFMBartPreTrainedModel, TFCausalLanguageModelingLoss): _keys_to_ignore_on_load_unexpected = [ r"model.encoder.embed_tokens.weight", r"model.decoder.embed_tokens.weight", ] def __init__(self, config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.model = TFMBartMainLayer(config, name="model") self.use_cache = config.use_cache # final_bias_logits is registered as a buffer in pytorch, so not trainable for the sake of consistency. self.bias_layer = BiasLayer( name="final_logits_bias", shape=[1, config.vocab_size], initializer="zeros", trainable=False ) def get_decoder(self): return self.model.decoder def get_encoder(self): return self.model.encoder def get_output_embeddings(self): return self.get_input_embeddings() def set_output_embeddings(self, value): self.set_input_embeddings(value) def get_bias(self): return {"final_logits_bias": self.bias_layer.bias} def set_bias(self, value): # Replaces the existing layers containing bias for correct (de)serialization. vocab_size = value["final_logits_bias"].shape[-1] self.bias_layer = BiasLayer( name="final_logits_bias", shape=[1, vocab_size], initializer="zeros", trainable=False ) self.bias_layer.bias.assign(value["final_logits_bias"]) @unpack_inputs @add_start_docstrings_to_model_forward(MBART_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=TFSeq2SeqLMOutput, config_class=_CONFIG_FOR_DOC) @add_end_docstrings(MBART_GENERATION_EXAMPLE) def call( self, input_ids: TFModelInputType = None, attention_mask: tf.Tensor | None = None, decoder_input_ids: tf.Tensor | None = None, decoder_attention_mask: tf.Tensor | None = None, decoder_position_ids: tf.Tensor | None = None, head_mask: tf.Tensor | None = None, decoder_head_mask: tf.Tensor | None = None, cross_attn_head_mask: tf.Tensor | None = None, encoder_outputs: Optional[TFBaseModelOutput] = None, past_key_values: Tuple[Tuple[tf.Tensor]] = None, inputs_embeds: tf.Tensor | None = None, decoder_inputs_embeds: tf.Tensor | None = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, labels: tf.Tensor | None = None, training: Optional[bool] = False, ) -> Union[TFSeq2SeqLMOutput, Tuple[tf.Tensor]]: """ labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. Returns: """ if labels is not None: labels = tf.where( labels == self.config.pad_token_id, tf.cast(tf.fill(shape_list(labels), -100), labels.dtype), labels, ) use_cache = False if decoder_input_ids is None and decoder_inputs_embeds is None: decoder_input_ids = shift_tokens_right(labels, self.config.pad_token_id) outputs = self.model( input_ids, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, encoder_outputs=encoder_outputs, decoder_attention_mask=decoder_attention_mask, decoder_position_ids=decoder_position_ids, head_mask=head_mask, decoder_head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, past_key_values=past_key_values, inputs_embeds=inputs_embeds, decoder_inputs_embeds=decoder_inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) lm_logits = tf.matmul(outputs[0], self.model.shared.weights, transpose_b=True) lm_logits = self.bias_layer(lm_logits) masked_lm_loss = None if labels is None else self.hf_compute_loss(labels, lm_logits) if not return_dict: output = (lm_logits,) + outputs[1:] return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output return TFSeq2SeqLMOutput( loss=masked_lm_loss, logits=lm_logits, past_key_values=outputs.past_key_values, # index 1 of d outputs decoder_hidden_states=outputs.decoder_hidden_states, # index 2 of d outputs decoder_attentions=outputs.decoder_attentions, # index 3 of d outputs cross_attentions=outputs.cross_attentions, # index 4 of d outputs encoder_last_hidden_state=outputs.encoder_last_hidden_state, # index 0 of encoder outputs encoder_hidden_states=outputs.encoder_hidden_states, # 1 of e out encoder_attentions=outputs.encoder_attentions, # 2 of e out ) # Copied from transformers.models.bart.modeling_tf_bart.TFBartForConditionalGeneration.serving_output def serving_output(self, output): pkv = tf.tuple(output.past_key_values)[1] if self.config.use_cache else None dec_hs = tf.convert_to_tensor(output.decoder_hidden_states) if self.config.output_hidden_states else None dec_attns = tf.convert_to_tensor(output.decoder_attentions) if self.config.output_attentions else None cross_attns = tf.convert_to_tensor(output.cross_attentions) if self.config.output_attentions else None enc_hs = tf.convert_to_tensor(output.encoder_hidden_states) if self.config.output_hidden_states else None enc_attns = tf.convert_to_tensor(output.encoder_attentions) if self.config.output_attentions else None return TFSeq2SeqLMOutput( logits=output.logits, past_key_values=pkv, decoder_hidden_states=dec_hs, decoder_attentions=dec_attns, cross_attentions=cross_attns, encoder_last_hidden_state=output.encoder_last_hidden_state, encoder_hidden_states=enc_hs, encoder_attentions=enc_attns, ) # Copied from transformers.models.bart.modeling_tf_bart.TFBartForConditionalGeneration.prepare_inputs_for_generation def prepare_inputs_for_generation( self, decoder_input_ids, past_key_values=None, attention_mask=None, decoder_attention_mask=None, head_mask=None, decoder_head_mask=None, cross_attn_head_mask=None, use_cache=None, encoder_outputs=None, **kwargs, ): # cut decoder_input_ids if past_key_values is used if past_key_values is not None: decoder_input_ids = decoder_input_ids[:, -1:] if decoder_attention_mask is not None: # xla decoder_position_ids = tf.math.cumsum(decoder_attention_mask, axis=-1, exclusive=True)[:, -1:] elif past_key_values is not None: # no xla + past_key_values decoder_position_ids = past_key_values[0][0].shape[2] else: # no xla + no past_key_values decoder_position_ids = tf.range(decoder_input_ids.shape[1]) return { "input_ids": None, # encoder_outputs is defined. input_ids not needed "encoder_outputs": encoder_outputs, "past_key_values": past_key_values, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "decoder_position_ids": decoder_position_ids, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, "use_cache": use_cache, # change this to avoid caching (presumably for debugging) } def prepare_decoder_input_ids_from_labels(self, labels: tf.Tensor): return shift_tokens_right(labels, self.config.pad_token_id) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "model", None) is not None: with tf.name_scope(self.model.name): self.model.build(None) if getattr(self, "bias_layer", None) is not None: with tf.name_scope(self.bias_layer.name): self.bias_layer.build(None)
0
hf_public_repos/transformers/src/transformers/models
hf_public_repos/transformers/src/transformers/models/mbart/tokenization_mbart_fast.py
# coding=utf-8 # Copyright 2020 The Facebook AI Research Team Authors and The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os from shutil import copyfile from typing import List, Optional, Tuple from tokenizers import processors from ...tokenization_utils import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_mbart import MBartTokenizer else: MBartTokenizer = None logger = logging.get_logger(__name__) VOCAB_FILES_NAMES = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"} PRETRAINED_VOCAB_FILES_MAP = { "vocab_file": { "facebook/mbart-large-en-ro": ( "https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model" ), "facebook/mbart-large-cc25": ( "https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model" ), }, "tokenizer_file": { "facebook/mbart-large-en-ro": "https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json", "facebook/mbart-large-cc25": "https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json", }, } PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = { "facebook/mbart-large-en-ro": 1024, "facebook/mbart-large-cc25": 1024, } FAIRSEQ_LANGUAGE_CODES = ["ar_AR", "cs_CZ", "de_DE", "en_XX", "es_XX", "et_EE", "fi_FI", "fr_XX", "gu_IN", "hi_IN", "it_IT", "ja_XX", "kk_KZ", "ko_KR", "lt_LT", "lv_LV", "my_MM", "ne_NP", "nl_XX", "ro_RO", "ru_RU", "si_LK", "tr_TR", "vi_VN", "zh_CN"] # fmt: skip class MBartTokenizerFast(PreTrainedTokenizerFast): """ Construct a "fast" MBART tokenizer (backed by HuggingFace's *tokenizers* library). Based on [BPE](https://huggingface.co/docs/tokenizers/python/latest/components.html?highlight=BPE#models). This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. The tokenization method is `<tokens> <eos> <language code>` for source language documents, and `<language code> <tokens> <eos>` for target language documents. Examples: ```python >>> from transformers import MBartTokenizerFast >>> tokenizer = MBartTokenizerFast.from_pretrained( ... "facebook/mbart-large-en-ro", src_lang="en_XX", tgt_lang="ro_RO" ... ) >>> example_english_phrase = " UN Chief Says There Is No Military Solution in Syria" >>> expected_translation_romanian = "Şeful ONU declară că nu există o soluţie militară în Siria" >>> inputs = tokenizer(example_english_phrase, text_target=expected_translation_romanian, return_tensors="pt") ```""" vocab_files_names = VOCAB_FILES_NAMES max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP model_input_names = ["input_ids", "attention_mask"] slow_tokenizer_class = MBartTokenizer prefix_tokens: List[int] = [] suffix_tokens: List[int] = [] def __init__( self, vocab_file=None, tokenizer_file=None, bos_token="<s>", eos_token="</s>", sep_token="</s>", cls_token="<s>", unk_token="<unk>", pad_token="<pad>", mask_token="<mask>", src_lang=None, tgt_lang=None, additional_special_tokens=None, **kwargs, ): # Mask token behave like a normal word, i.e. include the space before it mask_token = AddedToken(mask_token, lstrip=True, rstrip=False) if isinstance(mask_token, str) else mask_token _additional_special_tokens = FAIRSEQ_LANGUAGE_CODES.copy() if additional_special_tokens is not None: # Only add those special tokens if they are not already there. _additional_special_tokens.extend( [t for t in additional_special_tokens if t not in _additional_special_tokens] ) super().__init__( vocab_file=vocab_file, tokenizer_file=tokenizer_file, bos_token=bos_token, eos_token=eos_token, sep_token=sep_token, cls_token=cls_token, unk_token=unk_token, pad_token=pad_token, mask_token=mask_token, src_lang=src_lang, tgt_lang=tgt_lang, additional_special_tokens=_additional_special_tokens, **kwargs, ) self.vocab_file = vocab_file self.lang_code_to_id = { lang_code: self.convert_tokens_to_ids(lang_code) for lang_code in FAIRSEQ_LANGUAGE_CODES } self._src_lang = src_lang if src_lang is not None else "en_XX" self.cur_lang_code = self.convert_tokens_to_ids(self._src_lang) self.tgt_lang = tgt_lang self.set_src_lang_special_tokens(self._src_lang) @property def can_save_slow_tokenizer(self) -> bool: return os.path.isfile(self.vocab_file) if self.vocab_file else False @property def src_lang(self) -> str: return self._src_lang @src_lang.setter def src_lang(self, new_src_lang: str) -> None: self._src_lang = new_src_lang self.set_src_lang_special_tokens(self._src_lang) def build_inputs_with_special_tokens( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: """ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. The special tokens depend on calling set_lang. An MBART sequence has the following format, where `X` represents the sequence: - `input_ids` (for encoder) `X [eos, src_lang_code]` - `decoder_input_ids`: (for decoder) `X [eos, tgt_lang_code]` BOS is never used. Pairs of sequences are not the expected use case, but they will be handled without a separator. Args: token_ids_0 (`List[int]`): List of IDs to which the special tokens will be added. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: list of [input IDs](../glossary#input-ids) with the appropriate special tokens. """ if token_ids_1 is None: return self.prefix_tokens + token_ids_0 + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_0 + token_ids_1 + self.suffix_tokens def create_token_type_ids_from_sequences( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: """ Create a mask from the two sequences passed to be used in a sequence-pair classification task. mBART does not make use of token type ids, therefore a list of zeros is returned. Args: token_ids_0 (`List[int]`): List of IDs. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of zeros. """ sep = [self.sep_token_id] cls = [self.cls_token_id] if token_ids_1 is None: return len(cls + token_ids_0 + sep) * [0] return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0] def _build_translation_inputs( self, raw_inputs, return_tensors: str, src_lang: Optional[str], tgt_lang: Optional[str], **extra_kwargs ): """Used by translation pipeline, to prepare inputs for the generate function""" if src_lang is None or tgt_lang is None: raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model") self.src_lang = src_lang inputs = self(raw_inputs, add_special_tokens=True, return_tensors=return_tensors, **extra_kwargs) tgt_lang_id = self.convert_tokens_to_ids(tgt_lang) inputs["forced_bos_token_id"] = tgt_lang_id return inputs def prepare_seq2seq_batch( self, src_texts: List[str], src_lang: str = "en_XX", tgt_texts: Optional[List[str]] = None, tgt_lang: str = "ro_RO", **kwargs, ) -> BatchEncoding: self.src_lang = src_lang self.tgt_lang = tgt_lang return super().prepare_seq2seq_batch(src_texts, tgt_texts, **kwargs) def _switch_to_input_mode(self): return self.set_src_lang_special_tokens(self.src_lang) def _switch_to_target_mode(self): return self.set_tgt_lang_special_tokens(self.tgt_lang) def set_src_lang_special_tokens(self, src_lang) -> None: """Reset the special tokens to the source lang setting. No prefix and suffix=[eos, src_lang_code].""" self.cur_lang_code = self.convert_tokens_to_ids(src_lang) self.prefix_tokens = [] self.suffix_tokens = [self.eos_token_id, self.cur_lang_code] prefix_tokens_str = self.convert_ids_to_tokens(self.prefix_tokens) suffix_tokens_str = self.convert_ids_to_tokens(self.suffix_tokens) self._tokenizer.post_processor = processors.TemplateProcessing( single=prefix_tokens_str + ["$A"] + suffix_tokens_str, pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str, special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str, self.prefix_tokens + self.suffix_tokens)), ) def set_tgt_lang_special_tokens(self, lang: str) -> None: """Reset the special tokens to the target language setting. No prefix and suffix=[eos, tgt_lang_code].""" self.cur_lang_code = self.convert_tokens_to_ids(lang) self.prefix_tokens = [] self.suffix_tokens = [self.eos_token_id, self.cur_lang_code] prefix_tokens_str = self.convert_ids_to_tokens(self.prefix_tokens) suffix_tokens_str = self.convert_ids_to_tokens(self.suffix_tokens) self._tokenizer.post_processor = processors.TemplateProcessing( single=prefix_tokens_str + ["$A"] + suffix_tokens_str, pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str, special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str, self.prefix_tokens + self.suffix_tokens)), ) def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: if not self.can_save_slow_tokenizer: raise ValueError( "Your fast tokenizer does not have the necessary information to save the vocabulary for a slow " "tokenizer." ) if not os.path.isdir(save_directory): logger.error(f"Vocabulary path ({save_directory}) should be a directory.") return out_vocab_file = os.path.join( save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file): copyfile(self.vocab_file, out_vocab_file) return (out_vocab_file,)
0
hf_public_repos/transformers/src/transformers/models
hf_public_repos/transformers/src/transformers/models/mbart/configuration_mbart.py
# coding=utf-8 # Copyright 2021, The Facebook AI Research Team and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ MBART model configuration""" from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeq2SeqConfigWithPast from ...onnx.utils import compute_effective_axis_dimension from ...utils import TensorType, is_torch_available, logging logger = logging.get_logger(__name__) MBART_PRETRAINED_CONFIG_ARCHIVE_MAP = { "facebook/mbart-large-cc25": "https://huggingface.co/facebook/mbart-large-cc25/resolve/main/config.json", # See all MBART models at https://huggingface.co/models?filter=mbart } class MBartConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`MBartModel`]. It is used to instantiate an MBART model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the MBART [facebook/mbart-large-cc25](https://huggingface.co/facebook/mbart-large-cc25) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 50265): Vocabulary size of the MBART model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`MBartModel`] or [`TFMBartModel`]. d_model (`int`, *optional*, defaults to 1024): Dimensionality of the layers and the pooler layer. encoder_layers (`int`, *optional*, defaults to 12): Number of encoder layers. decoder_layers (`int`, *optional*, defaults to 12): Number of decoder layers. encoder_attention_heads (`int`, *optional*, defaults to 16): Number of attention heads for each attention layer in the Transformer encoder. decoder_attention_heads (`int`, *optional*, defaults to 16): Number of attention heads for each attention layer in the Transformer decoder. decoder_ffn_dim (`int`, *optional*, defaults to 4096): Dimensionality of the "intermediate" (often named feed-forward) layer in decoder. encoder_ffn_dim (`int`, *optional*, defaults to 4096): Dimensionality of the "intermediate" (often named feed-forward) layer in decoder. activation_function (`str` or `function`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"silu"` and `"gelu_new"` are supported. dropout (`float`, *optional*, defaults to 0.1): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. attention_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. activation_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for activations inside the fully connected layer. classifier_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for classifier. max_position_embeddings (`int`, *optional*, defaults to 1024): The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). init_std (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. encoder_layerdrop (`float`, *optional*, defaults to 0.0): The LayerDrop probability for the encoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556) for more details. decoder_layerdrop (`float`, *optional*, defaults to 0.0): The LayerDrop probability for the decoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556) for more details. scale_embedding (`bool`, *optional*, defaults to `False`): Scale embeddings by diving by sqrt(d_model). use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models) forced_eos_token_id (`int`, *optional*, defaults to 2): The id of the token to force as the last generated token when `max_length` is reached. Usually set to `eos_token_id`. Example: ```python >>> from transformers import MBartConfig, MBartModel >>> # Initializing a MBART facebook/mbart-large-cc25 style configuration >>> configuration = MBartConfig() >>> # Initializing a model (with random weights) from the facebook/mbart-large-cc25 style configuration >>> model = MBartModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "mbart" keys_to_ignore_at_inference = ["past_key_values"] attribute_map = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"} def __init__( self, vocab_size=50265, max_position_embeddings=1024, encoder_layers=12, encoder_ffn_dim=4096, encoder_attention_heads=16, decoder_layers=12, decoder_ffn_dim=4096, decoder_attention_heads=16, encoder_layerdrop=0.0, decoder_layerdrop=0.0, use_cache=True, is_encoder_decoder=True, activation_function="gelu", d_model=1024, dropout=0.1, attention_dropout=0.0, activation_dropout=0.0, init_std=0.02, classifier_dropout=0.0, scale_embedding=False, pad_token_id=1, bos_token_id=0, eos_token_id=2, forced_eos_token_id=2, **kwargs, ): self.vocab_size = vocab_size self.max_position_embeddings = max_position_embeddings self.d_model = d_model self.encoder_ffn_dim = encoder_ffn_dim self.encoder_layers = encoder_layers self.encoder_attention_heads = encoder_attention_heads self.decoder_ffn_dim = decoder_ffn_dim self.decoder_layers = decoder_layers self.decoder_attention_heads = decoder_attention_heads self.dropout = dropout self.attention_dropout = attention_dropout self.activation_dropout = activation_dropout self.activation_function = activation_function self.init_std = init_std self.encoder_layerdrop = encoder_layerdrop self.decoder_layerdrop = decoder_layerdrop self.classifier_dropout = classifier_dropout self.use_cache = use_cache self.num_hidden_layers = encoder_layers self.scale_embedding = scale_embedding # scale factor will be sqrt(d_model) if True super().__init__( pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, is_encoder_decoder=is_encoder_decoder, forced_eos_token_id=forced_eos_token_id, **kwargs, ) # Copied from transformers.models.bart.configuration_bart.BartOnnxConfig with Bart->MBart class MBartOnnxConfig(OnnxSeq2SeqConfigWithPast): @property def inputs(self) -> Mapping[str, Mapping[int, str]]: if self.task in ["default", "seq2seq-lm"]: common_inputs = OrderedDict( [ ("input_ids", {0: "batch", 1: "encoder_sequence"}), ("attention_mask", {0: "batch", 1: "encoder_sequence"}), ] ) if self.use_past: common_inputs["decoder_input_ids"] = {0: "batch"} common_inputs["decoder_attention_mask"] = {0: "batch", 1: "past_decoder_sequence + sequence"} else: common_inputs["decoder_input_ids"] = {0: "batch", 1: "decoder_sequence"} common_inputs["decoder_attention_mask"] = {0: "batch", 1: "decoder_sequence"} if self.use_past: self.fill_with_past_key_values_(common_inputs, direction="inputs") elif self.task == "causal-lm": # TODO: figure this case out. common_inputs = OrderedDict( [ ("input_ids", {0: "batch", 1: "encoder_sequence"}), ("attention_mask", {0: "batch", 1: "encoder_sequence"}), ] ) if self.use_past: num_encoder_layers, _ = self.num_layers for i in range(num_encoder_layers): common_inputs[f"past_key_values.{i}.key"] = {0: "batch", 2: "past_sequence + sequence"} common_inputs[f"past_key_values.{i}.value"] = {0: "batch", 2: "past_sequence + sequence"} else: common_inputs = OrderedDict( [ ("input_ids", {0: "batch", 1: "encoder_sequence"}), ("attention_mask", {0: "batch", 1: "encoder_sequence"}), ("decoder_input_ids", {0: "batch", 1: "decoder_sequence"}), ("decoder_attention_mask", {0: "batch", 1: "decoder_sequence"}), ] ) return common_inputs @property def outputs(self) -> Mapping[str, Mapping[int, str]]: if self.task in ["default", "seq2seq-lm"]: common_outputs = super().outputs else: common_outputs = super(OnnxConfigWithPast, self).outputs if self.use_past: num_encoder_layers, _ = self.num_layers for i in range(num_encoder_layers): common_outputs[f"present.{i}.key"] = {0: "batch", 2: "past_sequence + sequence"} common_outputs[f"present.{i}.value"] = {0: "batch", 2: "past_sequence + sequence"} return common_outputs def _generate_dummy_inputs_for_default_and_seq2seq_lm( self, tokenizer: PreTrainedTokenizer, batch_size: int = -1, seq_length: int = -1, is_pair: bool = False, framework: Optional[TensorType] = None, ) -> Mapping[str, Any]: encoder_inputs = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( tokenizer, batch_size, seq_length, is_pair, framework ) # Generate decoder inputs decoder_seq_length = seq_length if not self.use_past else 1 decoder_inputs = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( tokenizer, batch_size, decoder_seq_length, is_pair, framework ) decoder_inputs = {f"decoder_{name}": tensor for name, tensor in decoder_inputs.items()} common_inputs = dict(**encoder_inputs, **decoder_inputs) if self.use_past: if not is_torch_available(): raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed.") else: import torch batch, encoder_seq_length = common_inputs["input_ids"].shape decoder_seq_length = common_inputs["decoder_input_ids"].shape[1] num_encoder_attention_heads, num_decoder_attention_heads = self.num_attention_heads encoder_shape = ( batch, num_encoder_attention_heads, encoder_seq_length, self._config.hidden_size // num_encoder_attention_heads, ) decoder_past_length = decoder_seq_length + 3 decoder_shape = ( batch, num_decoder_attention_heads, decoder_past_length, self._config.hidden_size // num_decoder_attention_heads, ) common_inputs["decoder_attention_mask"] = torch.cat( [common_inputs["decoder_attention_mask"], torch.ones(batch, decoder_past_length)], dim=1 ) common_inputs["past_key_values"] = [] # If the number of encoder and decoder layers are present in the model configuration, both are considered num_encoder_layers, num_decoder_layers = self.num_layers min_num_layers = min(num_encoder_layers, num_decoder_layers) max_num_layers = max(num_encoder_layers, num_decoder_layers) - min_num_layers remaining_side_name = "encoder" if num_encoder_layers > num_decoder_layers else "decoder" for _ in range(min_num_layers): common_inputs["past_key_values"].append( ( torch.zeros(decoder_shape), torch.zeros(decoder_shape), torch.zeros(encoder_shape), torch.zeros(encoder_shape), ) ) # TODO: test this. shape = encoder_shape if remaining_side_name == "encoder" else decoder_shape for _ in range(min_num_layers, max_num_layers): common_inputs["past_key_values"].append((torch.zeros(shape), torch.zeros(shape))) return common_inputs def _generate_dummy_inputs_for_causal_lm( self, tokenizer: PreTrainedTokenizer, batch_size: int = -1, seq_length: int = -1, is_pair: bool = False, framework: Optional[TensorType] = None, ) -> Mapping[str, Any]: common_inputs = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( tokenizer, batch_size, seq_length, is_pair, framework ) if self.use_past: if not is_torch_available(): raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed.") else: import torch batch, seqlen = common_inputs["input_ids"].shape # Not using the same length for past_key_values past_key_values_length = seqlen + 2 num_encoder_layers, _ = self.num_layers num_encoder_attention_heads, _ = self.num_attention_heads past_shape = ( batch, num_encoder_attention_heads, past_key_values_length, self._config.hidden_size // num_encoder_attention_heads, ) mask_dtype = common_inputs["attention_mask"].dtype common_inputs["attention_mask"] = torch.cat( [common_inputs["attention_mask"], torch.ones(batch, past_key_values_length, dtype=mask_dtype)], dim=1 ) common_inputs["past_key_values"] = [ (torch.zeros(past_shape), torch.zeros(past_shape)) for _ in range(num_encoder_layers) ] return common_inputs def _generate_dummy_inputs_for_sequence_classification_and_question_answering( self, tokenizer: PreTrainedTokenizer, batch_size: int = -1, seq_length: int = -1, is_pair: bool = False, framework: Optional[TensorType] = None, ) -> Mapping[str, Any]: # Copied from OnnxConfig.generate_dummy_inputs # Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity. # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX batch_size = compute_effective_axis_dimension( batch_size, fixed_dimension=OnnxConfig.default_fixed_batch, num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX token_to_add = tokenizer.num_special_tokens_to_add(is_pair) seq_length = compute_effective_axis_dimension( seq_length, fixed_dimension=OnnxConfig.default_fixed_sequence, num_token_to_add=token_to_add ) # Generate dummy inputs according to compute batch and sequence dummy_input = [" ".join([tokenizer.unk_token]) * seq_length] * batch_size common_inputs = dict(tokenizer(dummy_input, return_tensors=framework)) return common_inputs def generate_dummy_inputs( self, tokenizer: PreTrainedTokenizer, batch_size: int = -1, seq_length: int = -1, is_pair: bool = False, framework: Optional[TensorType] = None, ) -> Mapping[str, Any]: if self.task in ["default", "seq2seq-lm"]: common_inputs = self._generate_dummy_inputs_for_default_and_seq2seq_lm( tokenizer, batch_size=batch_size, seq_length=seq_length, is_pair=is_pair, framework=framework ) elif self.task == "causal-lm": common_inputs = self._generate_dummy_inputs_for_causal_lm( tokenizer, batch_size=batch_size, seq_length=seq_length, is_pair=is_pair, framework=framework ) else: common_inputs = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( tokenizer, batch_size=batch_size, seq_length=seq_length, is_pair=is_pair, framework=framework ) return common_inputs def _flatten_past_key_values_(self, flattened_output, name, idx, t): if self.task in ["default", "seq2seq-lm"]: flattened_output = super()._flatten_past_key_values_(flattened_output, name, idx, t) else: flattened_output = super(OnnxSeq2SeqConfigWithPast, self)._flatten_past_key_values_( flattened_output, name, idx, t )
0
hf_public_repos/transformers/src/transformers/models
hf_public_repos/transformers/src/transformers/models/mbart/tokenization_mbart.py
# coding=utf-8 # Copyright 2020 The Facebook AI Research Team Authors and The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer from ...utils import logging logger = logging.get_logger(__name__) SPIECE_UNDERLINE = "▁" VOCAB_FILES_NAMES = {"vocab_file": "sentencepiece.bpe.model"} PRETRAINED_VOCAB_FILES_MAP = { "vocab_file": { "facebook/mbart-large-en-ro": ( "https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model" ), "facebook/mbart-large-cc25": ( "https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model" ), } } PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = { "facebook/mbart-large-en-ro": 1024, "facebook/mbart-large-cc25": 1024, } FAIRSEQ_LANGUAGE_CODES = ["ar_AR", "cs_CZ", "de_DE", "en_XX", "es_XX", "et_EE", "fi_FI", "fr_XX", "gu_IN", "hi_IN", "it_IT", "ja_XX", "kk_KZ", "ko_KR", "lt_LT", "lv_LV", "my_MM", "ne_NP", "nl_XX", "ro_RO", "ru_RU", "si_LK", "tr_TR", "vi_VN", "zh_CN"] # fmt: skip class MBartTokenizer(PreTrainedTokenizer): """ Construct an MBART tokenizer. Adapted from [`RobertaTokenizer`] and [`XLNetTokenizer`]. Based on [SentencePiece](https://github.com/google/sentencepiece). The tokenization method is `<tokens> <eos> <language code>` for source language documents, and `<language code> <tokens> <eos>` for target language documents. Examples: ```python >>> from transformers import MBartTokenizer >>> tokenizer = MBartTokenizer.from_pretrained("facebook/mbart-large-en-ro", src_lang="en_XX", tgt_lang="ro_RO") >>> example_english_phrase = " UN Chief Says There Is No Military Solution in Syria" >>> expected_translation_romanian = "Şeful ONU declară că nu există o soluţie militară în Siria" >>> inputs = tokenizer(example_english_phrase, text_target=expected_translation_romanian, return_tensors="pt") ```""" vocab_files_names = VOCAB_FILES_NAMES max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP model_input_names = ["input_ids", "attention_mask"] prefix_tokens: List[int] = [] suffix_tokens: List[int] = [] def __init__( self, vocab_file, bos_token="<s>", eos_token="</s>", sep_token="</s>", cls_token="<s>", unk_token="<unk>", pad_token="<pad>", mask_token="<mask>", tokenizer_file=None, src_lang=None, tgt_lang=None, sp_model_kwargs: Optional[Dict[str, Any]] = None, additional_special_tokens=None, **kwargs, ): # Mask token behave like a normal word, i.e. include the space before it mask_token = ( AddedToken(mask_token, lstrip=True, normalized=False) if isinstance(mask_token, str) else mask_token ) self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.Load(str(vocab_file)) self.vocab_file = vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-' # spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a' # Mimic fairseq token-to-id alignment for the first 4 token self.fairseq_tokens_to_ids = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3} # The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab self.fairseq_offset = 1 self.sp_model_size = len(self.sp_model) self.lang_code_to_id = { code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(FAIRSEQ_LANGUAGE_CODES) } self.id_to_lang_code = {v: k for k, v in self.lang_code_to_id.items()} self.fairseq_tokens_to_ids["<mask>"] = len(self.sp_model) + len(self.lang_code_to_id) + self.fairseq_offset self.fairseq_tokens_to_ids.update(self.lang_code_to_id) self.fairseq_ids_to_tokens = {v: k for k, v in self.fairseq_tokens_to_ids.items()} _additional_special_tokens = list(self.lang_code_to_id.keys()) if additional_special_tokens is not None: # Only add those special tokens if they are not already there. _additional_special_tokens.extend( [t for t in additional_special_tokens if t not in _additional_special_tokens] ) super().__init__( bos_token=bos_token, eos_token=eos_token, unk_token=unk_token, sep_token=sep_token, cls_token=cls_token, pad_token=pad_token, mask_token=mask_token, tokenizer_file=None, src_lang=src_lang, tgt_lang=tgt_lang, additional_special_tokens=_additional_special_tokens, sp_model_kwargs=self.sp_model_kwargs, **kwargs, ) self._src_lang = src_lang if src_lang is not None else "en_XX" self.cur_lang_code_id = self.lang_code_to_id[self._src_lang] self.tgt_lang = tgt_lang self.set_src_lang_special_tokens(self._src_lang) def __getstate__(self): state = self.__dict__.copy() state["sp_model"] = None state["sp_model_proto"] = self.sp_model.serialized_model_proto() return state def __setstate__(self, d): self.__dict__ = d # for backward compatibility if not hasattr(self, "sp_model_kwargs"): self.sp_model_kwargs = {} self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.LoadFromSerializedProto(self.sp_model_proto) @property def vocab_size(self): return len(self.sp_model) + len(self.lang_code_to_id) + self.fairseq_offset + 1 # Plus 1 for the mask token @property def src_lang(self) -> str: return self._src_lang @src_lang.setter def src_lang(self, new_src_lang: str) -> None: self._src_lang = new_src_lang self.set_src_lang_special_tokens(self._src_lang) def get_special_tokens_mask( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False ) -> List[int]: """ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `prepare_for_model` method. Args: token_ids_0 (`List[int]`): List of IDs. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. already_has_special_tokens (`bool`, *optional*, defaults to `False`): Whether or not the token list is already formatted with special tokens for the model. Returns: `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. """ if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True ) prefix_ones = [1] * len(self.prefix_tokens) suffix_ones = [1] * len(self.suffix_tokens) if token_ids_1 is None: return prefix_ones + ([0] * len(token_ids_0)) + suffix_ones return prefix_ones + ([0] * len(token_ids_0)) + ([0] * len(token_ids_1)) + suffix_ones def build_inputs_with_special_tokens( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: """ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. An MBART sequence has the following format, where `X` represents the sequence: - `input_ids` (for encoder) `X [eos, src_lang_code]` - `decoder_input_ids`: (for decoder) `X [eos, tgt_lang_code]` BOS is never used. Pairs of sequences are not the expected use case, but they will be handled without a separator. Args: token_ids_0 (`List[int]`): List of IDs to which the special tokens will be added. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens. """ if token_ids_1 is None: return self.prefix_tokens + token_ids_0 + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_0 + token_ids_1 + self.suffix_tokens def create_token_type_ids_from_sequences( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: """ Create a mask from the two sequences passed to be used in a sequence-pair classification task. mBART does not make use of token type ids, therefore a list of zeros is returned. Args: token_ids_0 (`List[int]`): List of IDs. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of zeros. """ sep = [self.sep_token_id] cls = [self.cls_token_id] if token_ids_1 is None: return len(cls + token_ids_0 + sep) * [0] return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0] def _build_translation_inputs( self, raw_inputs, return_tensors: str, src_lang: Optional[str], tgt_lang: Optional[str], **extra_kwargs ): """Used by translation pipeline, to prepare inputs for the generate function""" if src_lang is None or tgt_lang is None: raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model") self.src_lang = src_lang inputs = self(raw_inputs, add_special_tokens=True, return_tensors=return_tensors, **extra_kwargs) tgt_lang_id = self.convert_tokens_to_ids(tgt_lang) inputs["forced_bos_token_id"] = tgt_lang_id return inputs def get_vocab(self): vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)} vocab.update(self.added_tokens_encoder) return vocab def _tokenize(self, text: str) -> List[str]: return self.sp_model.encode(text, out_type=str) def _convert_token_to_id(self, token): """Converts a token (str) in an id using the vocab.""" if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] spm_id = self.sp_model.PieceToId(token) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def _convert_id_to_token(self, index): """Converts an index (integer) in a token (str) using the vocab.""" if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset) def convert_tokens_to_string(self, tokens): """Converts a sequence of tokens (strings for sub-words) in a single string.""" out_string = "".join(tokens).replace(SPIECE_UNDERLINE, " ").strip() return out_string def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: if not os.path.isdir(save_directory): logger.error(f"Vocabulary path ({save_directory}) should be a directory") return out_vocab_file = os.path.join( save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file): copyfile(self.vocab_file, out_vocab_file) elif not os.path.isfile(self.vocab_file): with open(out_vocab_file, "wb") as fi: content_spiece_model = self.sp_model.serialized_model_proto() fi.write(content_spiece_model) return (out_vocab_file,) def prepare_seq2seq_batch( self, src_texts: List[str], src_lang: str = "en_XX", tgt_texts: Optional[List[str]] = None, tgt_lang: str = "ro_RO", **kwargs, ) -> BatchEncoding: self.src_lang = src_lang self.tgt_lang = tgt_lang return super().prepare_seq2seq_batch(src_texts, tgt_texts, **kwargs) def _switch_to_input_mode(self): return self.set_src_lang_special_tokens(self.src_lang) def _switch_to_target_mode(self): return self.set_tgt_lang_special_tokens(self.tgt_lang) def set_src_lang_special_tokens(self, src_lang) -> None: """Reset the special tokens to the source lang setting. No prefix and suffix=[eos, src_lang_code].""" self.cur_lang_code = self.lang_code_to_id[src_lang] self.prefix_tokens = [] self.suffix_tokens = [self.eos_token_id, self.cur_lang_code] def set_tgt_lang_special_tokens(self, lang: str) -> None: """Reset the special tokens to the target language setting. No prefix and suffix=[eos, tgt_lang_code].""" self.cur_lang_code = self.lang_code_to_id[lang] self.prefix_tokens = [] self.suffix_tokens = [self.eos_token_id, self.cur_lang_code]
0
hf_public_repos/transformers/src/transformers/models
hf_public_repos/transformers/src/transformers/models/mbart/__init__.py
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) _import_structure = {"configuration_mbart": ["MBART_PRETRAINED_CONFIG_ARCHIVE_MAP", "MBartConfig", "MBartOnnxConfig"]} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["tokenization_mbart"] = ["MBartTokenizer"] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["tokenization_mbart_fast"] = ["MBartTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["modeling_mbart"] = [ "MBART_PRETRAINED_MODEL_ARCHIVE_LIST", "MBartForCausalLM", "MBartForConditionalGeneration", "MBartForQuestionAnswering", "MBartForSequenceClassification", "MBartModel", "MBartPreTrainedModel", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["modeling_tf_mbart"] = [ "TFMBartForConditionalGeneration", "TFMBartModel", "TFMBartPreTrainedModel", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["modeling_flax_mbart"] = [ "FlaxMBartForConditionalGeneration", "FlaxMBartForQuestionAnswering", "FlaxMBartForSequenceClassification", "FlaxMBartModel", "FlaxMBartPreTrainedModel", ] if TYPE_CHECKING: from .configuration_mbart import MBART_PRETRAINED_CONFIG_ARCHIVE_MAP, MBartConfig, MBartOnnxConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_mbart import MBartTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_mbart_fast import MBartTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mbart import ( MBART_PRETRAINED_MODEL_ARCHIVE_LIST, MBartForCausalLM, MBartForConditionalGeneration, MBartForQuestionAnswering, MBartForSequenceClassification, MBartModel, MBartPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_mbart import TFMBartForConditionalGeneration, TFMBartModel, TFMBartPreTrainedModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_mbart import ( FlaxMBartForConditionalGeneration, FlaxMBartForQuestionAnswering, FlaxMBartForSequenceClassification, FlaxMBartModel, FlaxMBartPreTrainedModel, ) else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
0
hf_public_repos/transformers/src/transformers/models
hf_public_repos/transformers/src/transformers/models/mbart/convert_mbart_original_checkpoint_to_pytorch.py
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import torch from torch import nn from transformers import MBartConfig, MBartForConditionalGeneration def remove_ignore_keys_(state_dict): ignore_keys = [ "encoder.version", "decoder.version", "model.encoder.version", "model.decoder.version", "_float_tensor", "decoder.output_projection.weight", ] for k in ignore_keys: state_dict.pop(k, None) def make_linear_from_emb(emb): vocab_size, emb_size = emb.weight.shape lin_layer = nn.Linear(vocab_size, emb_size, bias=False) lin_layer.weight.data = emb.weight.data return lin_layer def convert_fairseq_mbart_checkpoint_from_disk( checkpoint_path, hf_config_path="facebook/mbart-large-en-ro", finetuned=False, mbart_50=False ): state_dict = torch.load(checkpoint_path, map_location="cpu")["model"] remove_ignore_keys_(state_dict) vocab_size = state_dict["encoder.embed_tokens.weight"].shape[0] mbart_config = MBartConfig.from_pretrained(hf_config_path, vocab_size=vocab_size) if mbart_50 and finetuned: mbart_config.activation_function = "relu" state_dict["shared.weight"] = state_dict["decoder.embed_tokens.weight"] model = MBartForConditionalGeneration(mbart_config) model.model.load_state_dict(state_dict) if finetuned: model.lm_head = make_linear_from_emb(model.model.shared) return model if __name__ == "__main__": parser = argparse.ArgumentParser() # Required parameters parser.add_argument( "fairseq_path", type=str, help="bart.large, bart.large.cnn or a path to a model.pt on local filesystem." ) parser.add_argument("pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument( "--hf_config", default="facebook/mbart-large-cc25", type=str, help="Which huggingface architecture to use: mbart-large", ) parser.add_argument("--mbart_50", action="store_true", help="whether the model is mMART-50 checkpoint") parser.add_argument("--finetuned", action="store_true", help="whether the model is a fine-tuned checkpoint") args = parser.parse_args() model = convert_fairseq_mbart_checkpoint_from_disk( args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_50=args.mbart_50 ) model.save_pretrained(args.pytorch_dump_folder_path)
0
hf_public_repos/transformers/src/transformers/models
hf_public_repos/transformers/src/transformers/models/mbart/modeling_flax_mbart.py
# coding=utf-8 # Copyright 2021, The Facebook AI Research Team and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Flax MBart model.""" import math import random from functools import partial from typing import Callable, Optional, Tuple import flax.linen as nn import jax import jax.numpy as jnp from flax.core.frozen_dict import FrozenDict, freeze, unfreeze from flax.linen import combine_masks, make_causal_mask from flax.linen.attention import dot_product_attention_weights from flax.traverse_util import flatten_dict, unflatten_dict from jax import lax from jax.random import PRNGKey from ...modeling_flax_outputs import ( FlaxBaseModelOutput, FlaxBaseModelOutputWithPastAndCrossAttentions, FlaxCausalLMOutputWithCrossAttentions, FlaxSeq2SeqLMOutput, FlaxSeq2SeqModelOutput, FlaxSeq2SeqQuestionAnsweringModelOutput, FlaxSeq2SeqSequenceClassifierOutput, ) from ...modeling_flax_utils import ( ACT2FN, FlaxPreTrainedModel, append_call_sample_docstring, append_replace_return_docstrings, overwrite_call_docstring, ) from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings from .configuration_mbart import MBartConfig logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = "facebook/mbart-large-cc25" _CONFIG_FOR_DOC = "MBartConfig" MBART_START_DOCSTRING = r""" This model inherits from [`FlaxPreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a Flax Linen [flax.nn.Module](https://flax.readthedocs.io/en/latest/_autosummary/flax.nn.module.html) subclass. Use it as a regular Flax Module and refer to the Flax documentation for all matter related to general usage and behavior. Finally, this model supports inherent JAX features such as: - [Just-In-Time (JIT) compilation](https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit) - [Automatic Differentiation](https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation) - [Vectorization](https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap) - [Parallelization](https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap) Parameters: config ([`MBartConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~FlaxPreTrainedModel.from_pretrained`] method to load the model weights. dtype (`jax.numpy.dtype`, *optional*, defaults to `jax.numpy.float32`): The data type of the computation. Can be one of `jax.numpy.float32`, `jax.numpy.float16` (on GPUs) and `jax.numpy.bfloat16` (on TPUs). This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If specified all the computation will be performed with the given `dtype`. **Note that this only specifies the dtype of the computation and does not influence the dtype of model parameters.** If you wish to change the dtype of the model parameters, see [`~FlaxPreTrainedModel.to_fp16`] and [`~FlaxPreTrainedModel.to_bf16`]. """ MBART_INPUTS_DOCSTRING = r""" Args: input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`jnp.ndarray` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) decoder_input_ids (`jnp.ndarray` of shape `(batch_size, target_sequence_length)`, *optional*): Indices of decoder input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are decoder input IDs?](../glossary#decoder-input-ids) For translation and summarization training, `decoder_input_ids` should be provided. If no `decoder_input_ids` is provided, the model will create this tensor by shifting the `input_ids` to the right for denoising pre-training following the paper. decoder_attention_mask (`jnp.ndarray` of shape `(batch_size, target_sequence_length)`, *optional*): Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also be used by default. If you want to change padding behavior, you should modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more information on the default strategy. position_ids (`numpy.ndarray` of shape `(batch_size, sequence_length)`, *optional*): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, config.max_position_embeddings - 1]`. decoder_position_ids (`numpy.ndarray` of shape `(batch_size, sequence_length)`, *optional*): Indices of positions of each decoder input sequence tokens in the position embeddings. Selected in the range `[0, config.max_position_embeddings - 1]`. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ MBART_ENCODE_INPUTS_DOCSTRING = r""" Args: input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`jnp.ndarray` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) position_ids (`numpy.ndarray` of shape `(batch_size, sequence_length)`, *optional*): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, config.max_position_embeddings - 1]`. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ MBART_DECODE_INPUTS_DOCSTRING = r""" Args: decoder_input_ids (`jnp.ndarray` of shape `(batch_size, target_sequence_length)`): Indices of decoder input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are decoder input IDs?](../glossary#decoder-input-ids) For translation and summarization training, `decoder_input_ids` should be provided. If no `decoder_input_ids` is provided, the model will create this tensor by shifting the `input_ids` to the right for denoising pre-training following the paper. encoder_outputs (`tuple(tuple(jnp.ndarray)`): Tuple consists of (`last_hidden_state`, *optional*: `hidden_states`, *optional*: `attentions`) `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)`, *optional*) is a sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. encoder_attention_mask (`jnp.ndarray` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) decoder_attention_mask (`jnp.ndarray` of shape `(batch_size, target_sequence_length)`, *optional*): Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also be used by default. If you want to change padding behavior, you should modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more information on the default strategy. decoder_position_ids (`numpy.ndarray` of shape `(batch_size, sequence_length)`, *optional*): Indices of positions of each decoder input sequence tokens in the position embeddings. Selected in the range `[0, config.max_position_embeddings - 1]`. past_key_values (`Dict[str, np.ndarray]`, *optional*, returned by `init_cache` or when passing previous `past_key_values`): Dictionary of pre-computed hidden-states (key and values in the attention blocks) that can be used for fast auto-regressive decoding. Pre-computed key and value hidden-states are of shape *[batch_size, max_length]*. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ def shift_tokens_right(input_ids: jnp.ndarray, pad_token_id: int) -> jnp.ndarray: """ Shift input ids one token to the right, and wrap the last non pad token (the <LID> token) Note that MBart does not have a single `decoder_start_token_id` in contrast to other Bart-like models. """ prev_output_tokens = jnp.array(input_ids).copy() if pad_token_id is None: raise ValueError("self.model.config.pad_token_id has to be defined.") # replace possible -100 values in labels by `pad_token_id` prev_output_tokens = jnp.where(prev_output_tokens == -100, pad_token_id, input_ids) index_of_eos = (jnp.where(prev_output_tokens != pad_token_id, 1, 0).sum(axis=-1) - 1).reshape(-1, 1) decoder_start_tokens = jnp.array( [prev_output_tokens[i, eos_idx] for i, eos_idx in enumerate(index_of_eos)], dtype=jnp.int32 ).squeeze() prev_output_tokens = prev_output_tokens.at[:, 1:].set(prev_output_tokens[:, :-1]) prev_output_tokens = prev_output_tokens.at[:, 0].set(decoder_start_tokens) return prev_output_tokens # Copied from transformers.models.bart.modeling_flax_bart.FlaxBartAttention with Bart->MBart class FlaxMBartAttention(nn.Module): config: MBartConfig embed_dim: int num_heads: int dropout: float = 0.0 causal: bool = False bias: bool = True dtype: jnp.dtype = jnp.float32 # the dtype of the computation def setup(self) -> None: self.head_dim = self.embed_dim // self.num_heads if self.head_dim * self.num_heads != self.embed_dim: raise ValueError( f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}" f" and `num_heads`: {self.num_heads})." ) dense = partial( nn.Dense, self.embed_dim, use_bias=self.bias, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(self.config.init_std), ) self.q_proj, self.k_proj, self.v_proj = dense(), dense(), dense() self.out_proj = dense() self.dropout_layer = nn.Dropout(rate=self.dropout) if self.causal: self.causal_mask = make_causal_mask( jnp.ones((1, self.config.max_position_embeddings), dtype="bool"), dtype="bool" ) def _split_heads(self, hidden_states): return hidden_states.reshape(hidden_states.shape[:2] + (self.num_heads, self.head_dim)) def _merge_heads(self, hidden_states): return hidden_states.reshape(hidden_states.shape[:2] + (self.embed_dim,)) @nn.compact def _concatenate_to_cache(self, key, value, query, attention_mask): """ This function takes projected key, value states from a single input token and concatenates the states to cached states from previous steps. This function is slighly adapted from the official Flax repository: https://github.com/google/flax/blob/491ce18759622506588784b4fca0e4bf05f8c8cd/flax/linen/attention.py#L252 """ # detect if we're initializing by absence of existing cache data. is_initialized = self.has_variable("cache", "cached_key") cached_key = self.variable("cache", "cached_key", jnp.zeros, key.shape, key.dtype) cached_value = self.variable("cache", "cached_value", jnp.zeros, value.shape, value.dtype) cache_index = self.variable("cache", "cache_index", lambda: jnp.array(0, dtype=jnp.int32)) if is_initialized: *batch_dims, max_length, num_heads, depth_per_head = cached_key.value.shape # update key, value caches with our new 1d spatial slices cur_index = cache_index.value indices = (0,) * len(batch_dims) + (cur_index, 0, 0) key = lax.dynamic_update_slice(cached_key.value, key, indices) value = lax.dynamic_update_slice(cached_value.value, value, indices) cached_key.value = key cached_value.value = value num_updated_cache_vectors = query.shape[1] cache_index.value = cache_index.value + num_updated_cache_vectors # causal mask for cached decoder self-attention: our single query position should only attend to those key positions that have already been generated and cached, not the remaining zero elements. pad_mask = jnp.broadcast_to( jnp.arange(max_length) < cur_index + num_updated_cache_vectors, tuple(batch_dims) + (1, num_updated_cache_vectors, max_length), ) attention_mask = combine_masks(pad_mask, attention_mask) return key, value, attention_mask def __call__( self, hidden_states: jnp.ndarray, key_value_states: Optional[jnp.ndarray] = None, attention_mask: Optional[jnp.ndarray] = None, init_cache: bool = False, deterministic: bool = True, ) -> Tuple[jnp.ndarray]: """Input shape: Batch x Time x Channel""" # if key_value_states are provided this layer is used as a cross-attention layer # for the decoder is_cross_attention = key_value_states is not None batch_size = hidden_states.shape[0] # get query proj query_states = self.q_proj(hidden_states) # get key, value proj if is_cross_attention: # cross_attentions key_states = self.k_proj(key_value_states) value_states = self.v_proj(key_value_states) else: # self_attention key_states = self.k_proj(hidden_states) value_states = self.v_proj(hidden_states) query_states = self._split_heads(query_states) key_states = self._split_heads(key_states) value_states = self._split_heads(value_states) # handle cache prepare causal attention mask if self.causal: query_length, key_length = query_states.shape[1], key_states.shape[1] if self.has_variable("cache", "cached_key"): mask_shift = self.variables["cache"]["cache_index"] max_decoder_length = self.variables["cache"]["cached_key"].shape[1] causal_mask = lax.dynamic_slice( self.causal_mask, (0, 0, mask_shift, 0), (1, 1, query_length, max_decoder_length) ) else: causal_mask = self.causal_mask[:, :, :query_length, :key_length] causal_mask = jnp.broadcast_to(causal_mask, (batch_size,) + causal_mask.shape[1:]) # combine masks if needed if attention_mask is not None and self.causal: attention_mask = jnp.broadcast_to(jnp.expand_dims(attention_mask, axis=(-3, -2)), causal_mask.shape) attention_mask = combine_masks(attention_mask, causal_mask) elif self.causal: attention_mask = causal_mask elif attention_mask is not None: attention_mask = jnp.expand_dims(attention_mask, axis=(-3, -2)) # During fast autoregressive decoding, we feed one position at a time, # and cache the keys and values step by step. if self.causal and (self.has_variable("cache", "cached_key") or init_cache): key_states, value_states, attention_mask = self._concatenate_to_cache( key_states, value_states, query_states, attention_mask ) # Convert the boolean attention mask to an attention bias. if attention_mask is not None: # attention mask in the form of attention bias attention_bias = lax.select( attention_mask > 0, jnp.full(attention_mask.shape, 0.0).astype(self.dtype), jnp.full(attention_mask.shape, jnp.finfo(self.dtype).min).astype(self.dtype), ) else: attention_bias = None dropout_rng = None if not deterministic and self.dropout > 0.0: dropout_rng = self.make_rng("dropout") attn_weights = dot_product_attention_weights( query_states, key_states, bias=attention_bias, dropout_rng=dropout_rng, dropout_rate=self.dropout, broadcast_dropout=True, deterministic=deterministic, dtype=self.dtype, precision=None, ) attn_output = jnp.einsum("...hqk,...khd->...qhd", attn_weights, value_states) attn_output = self._merge_heads(attn_output) attn_output = self.out_proj(attn_output) return attn_output, attn_weights class FlaxMBartEncoderLayer(nn.Module): config: MBartConfig dtype: jnp.dtype = jnp.float32 def setup(self) -> None: self.embed_dim = self.config.d_model self.self_attn = FlaxMBartAttention( config=self.config, embed_dim=self.embed_dim, num_heads=self.config.encoder_attention_heads, dropout=self.config.attention_dropout, dtype=self.dtype, ) self.self_attn_layer_norm = nn.LayerNorm(dtype=self.dtype, epsilon=1e-05) self.dropout_layer = nn.Dropout(rate=self.config.dropout) self.activation_fn = ACT2FN[self.config.activation_function] self.activation_dropout_layer = nn.Dropout(rate=self.config.activation_dropout) self.fc1 = nn.Dense( self.config.encoder_ffn_dim, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(self.config.init_std), ) self.fc2 = nn.Dense( self.embed_dim, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(self.config.init_std) ) self.final_layer_norm = nn.LayerNorm(dtype=self.dtype, epsilon=1e-05) def __call__( self, hidden_states: jnp.ndarray, attention_mask: jnp.ndarray, output_attentions: bool = True, deterministic: bool = True, ) -> Tuple[jnp.ndarray]: residual = hidden_states hidden_states = self.self_attn_layer_norm(hidden_states) hidden_states, attn_weights = self.self_attn(hidden_states=hidden_states, attention_mask=attention_mask) hidden_states = self.dropout_layer(hidden_states, deterministic=deterministic) hidden_states = residual + hidden_states residual = hidden_states hidden_states = self.final_layer_norm(hidden_states) hidden_states = self.activation_fn(self.fc1(hidden_states)) hidden_states = self.activation_dropout_layer(hidden_states, deterministic=deterministic) hidden_states = self.fc2(hidden_states) hidden_states = self.dropout_layer(hidden_states, deterministic=deterministic) hidden_states = residual + hidden_states outputs = (hidden_states,) if output_attentions: outputs += (attn_weights,) return outputs # Copied from transformers.models.bart.modeling_flax_bart.FlaxBartEncoderLayerCollection with Bart->MBart class FlaxMBartEncoderLayerCollection(nn.Module): config: MBartConfig dtype: jnp.dtype = jnp.float32 # the dtype of the computation def setup(self): self.layers = [ FlaxMBartEncoderLayer(self.config, name=str(i), dtype=self.dtype) for i in range(self.config.encoder_layers) ] self.layerdrop = self.config.encoder_layerdrop def __call__( self, hidden_states, attention_mask, deterministic: bool = True, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, ): all_attentions = () if output_attentions else None all_hidden_states = () if output_hidden_states else None for encoder_layer in self.layers: if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description) dropout_probability = random.uniform(0, 1) if not deterministic and (dropout_probability < self.layerdrop): # skip the layer layer_outputs = (None, None) else: layer_outputs = encoder_layer( hidden_states, attention_mask, output_attentions, deterministic, ) hidden_states = layer_outputs[0] if output_attentions: all_attentions = all_attentions + (layer_outputs[1],) if output_hidden_states: all_hidden_states += (hidden_states,) outputs = (hidden_states, all_hidden_states, all_attentions) if not return_dict: return tuple(v for v in outputs if v is not None) return FlaxBaseModelOutput( last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions ) class FlaxMBartDecoderLayer(nn.Module): config: MBartConfig dtype: jnp.dtype = jnp.float32 def setup(self) -> None: self.embed_dim = self.config.d_model self.self_attn = FlaxMBartAttention( config=self.config, embed_dim=self.embed_dim, num_heads=self.config.decoder_attention_heads, dropout=self.config.attention_dropout, causal=True, dtype=self.dtype, ) self.dropout_layer = nn.Dropout(rate=self.config.dropout) self.activation_fn = ACT2FN[self.config.activation_function] self.activation_dropout_layer = nn.Dropout(rate=self.config.activation_dropout) self.self_attn_layer_norm = nn.LayerNorm(dtype=self.dtype, epsilon=1e-05) self.encoder_attn = FlaxMBartAttention( config=self.config, embed_dim=self.embed_dim, num_heads=self.config.decoder_attention_heads, dropout=self.config.attention_dropout, dtype=self.dtype, ) self.encoder_attn_layer_norm = nn.LayerNorm(dtype=self.dtype, epsilon=1e-05) self.fc1 = nn.Dense( self.config.decoder_ffn_dim, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(self.config.init_std), ) self.fc2 = nn.Dense( self.embed_dim, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(self.config.init_std) ) self.final_layer_norm = nn.LayerNorm(dtype=self.dtype, epsilon=1e-05) def __call__( self, hidden_states: jnp.ndarray, attention_mask: jnp.ndarray, encoder_hidden_states: Optional[jnp.ndarray] = None, encoder_attention_mask: Optional[jnp.ndarray] = None, init_cache: bool = False, output_attentions: bool = True, deterministic: bool = True, ) -> Tuple[jnp.ndarray]: residual = hidden_states hidden_states = self.self_attn_layer_norm(hidden_states) # Self Attention hidden_states, self_attn_weights = self.self_attn( hidden_states=hidden_states, attention_mask=attention_mask, init_cache=init_cache ) hidden_states = self.dropout_layer(hidden_states, deterministic=deterministic) hidden_states = residual + hidden_states # Cross-Attention Block cross_attn_weights = None if encoder_hidden_states is not None: residual = hidden_states hidden_states = self.encoder_attn_layer_norm(hidden_states) hidden_states, cross_attn_weights = self.encoder_attn( hidden_states=hidden_states, key_value_states=encoder_hidden_states, attention_mask=encoder_attention_mask, ) hidden_states = self.dropout_layer(hidden_states, deterministic=deterministic) hidden_states = residual + hidden_states # Fully Connected residual = hidden_states hidden_states = self.final_layer_norm(hidden_states) hidden_states = self.activation_fn(self.fc1(hidden_states)) hidden_states = self.activation_dropout_layer(hidden_states, deterministic=deterministic) hidden_states = self.fc2(hidden_states) hidden_states = self.dropout_layer(hidden_states, deterministic=deterministic) hidden_states = residual + hidden_states outputs = (hidden_states,) if output_attentions: outputs += (self_attn_weights, cross_attn_weights) return outputs # Copied from transformers.models.bart.modeling_flax_bart.FlaxBartDecoderLayerCollection with Bart->MBart class FlaxMBartDecoderLayerCollection(nn.Module): config: MBartConfig dtype: jnp.dtype = jnp.float32 # the dtype of the computation def setup(self): self.layers = [ FlaxMBartDecoderLayer(self.config, name=str(i), dtype=self.dtype) for i in range(self.config.decoder_layers) ] self.layerdrop = self.config.decoder_layerdrop def __call__( self, hidden_states, attention_mask, encoder_hidden_states: Optional[jnp.ndarray] = None, encoder_attention_mask: Optional[jnp.ndarray] = None, deterministic: bool = True, init_cache: bool = False, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, ): # decoder layers all_hidden_states = () if output_hidden_states else None all_self_attns = () if output_attentions else None all_cross_attentions = () if (output_attentions and encoder_hidden_states is not None) else None for decoder_layer in self.layers: if output_hidden_states: all_hidden_states += (hidden_states,) # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description) dropout_probability = random.uniform(0, 1) if not deterministic and (dropout_probability < self.layerdrop): layer_outputs = (None, None, None) else: layer_outputs = decoder_layer( hidden_states, attention_mask=attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, init_cache=init_cache, output_attentions=output_attentions, deterministic=deterministic, ) hidden_states = layer_outputs[0] if output_attentions: all_self_attns += (layer_outputs[1],) if encoder_hidden_states is not None: all_cross_attentions += (layer_outputs[2],) # add hidden states from the last decoder layer if output_hidden_states: all_hidden_states += (hidden_states,) outputs = [hidden_states, all_hidden_states, all_self_attns, all_cross_attentions] if not return_dict: return tuple(v for v in outputs if v is not None) return FlaxBaseModelOutputWithPastAndCrossAttentions( last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_self_attns, cross_attentions=all_cross_attentions, ) # Copied from transformers.models.bart.modeling_flax_bart.FlaxBartClassificationHead with Bart->MBart class FlaxMBartClassificationHead(nn.Module): """Head for sentence-level classification tasks.""" config: MBartConfig inner_dim: int num_classes: int pooler_dropout: float dtype: jnp.dtype = jnp.float32 def setup(self): self.dense = nn.Dense( self.inner_dim, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(self.config.init_std) ) self.dropout = nn.Dropout(rate=self.pooler_dropout) self.out_proj = nn.Dense( self.num_classes, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(self.config.init_std), ) def __call__(self, hidden_states: jnp.ndarray, deterministic: bool): hidden_states = self.dropout(hidden_states, deterministic=deterministic) hidden_states = self.dense(hidden_states) hidden_states = jnp.tanh(hidden_states) hidden_states = self.dropout(hidden_states, deterministic=deterministic) hidden_states = self.out_proj(hidden_states) return hidden_states class FlaxMBartEncoder(nn.Module): config: MBartConfig embed_tokens: nn.Embed dtype: jnp.dtype = jnp.float32 # the dtype of the computation def setup(self): self.dropout_layer = nn.Dropout(rate=self.config.dropout) embed_dim = self.config.d_model self.padding_idx = self.config.pad_token_id self.max_source_positions = self.config.max_position_embeddings self.embed_scale = math.sqrt(embed_dim) if self.config.scale_embedding else 1.0 # MBart is set up so that if padding_idx is specified then offset the embedding ids by 2 # and adjust num_embeddings appropriately. Other models don't have this hack self.offset = 2 self.embed_positions = nn.Embed( self.config.max_position_embeddings + self.offset, embed_dim, embedding_init=jax.nn.initializers.normal(self.config.init_std), ) self.layers = FlaxMBartEncoderLayerCollection(self.config, self.dtype) self.layernorm_embedding = nn.LayerNorm(dtype=self.dtype, epsilon=1e-05) self.layer_norm = nn.LayerNorm(dtype=self.dtype, epsilon=1e-05) def __call__( self, input_ids, attention_mask, position_ids, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, deterministic: bool = True, ): input_shape = input_ids.shape input_ids = input_ids.reshape(-1, input_shape[-1]) inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale embed_pos = self.embed_positions(position_ids + self.offset) hidden_states = inputs_embeds + embed_pos hidden_states = self.layernorm_embedding(hidden_states) hidden_states = self.dropout_layer(hidden_states, deterministic=deterministic) outputs = self.layers( hidden_states, attention_mask, deterministic=deterministic, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) last_hidden_states = outputs[0] last_hidden_states = self.layer_norm(last_hidden_states) # update the last element in `hidden_states` after applying `layernorm` above hidden_states = None if output_hidden_states: hidden_states = outputs[1] hidden_states = hidden_states[:-1] + (last_hidden_states,) if not return_dict: outputs = (last_hidden_states, hidden_states) + (outputs[2:] if output_hidden_states else outputs[1:]) return tuple(v for v in outputs if v is not None) return FlaxBaseModelOutput( last_hidden_state=last_hidden_states, hidden_states=hidden_states, attentions=outputs.attentions, ) class FlaxMBartDecoder(nn.Module): config: MBartConfig embed_tokens: nn.Embed dtype: jnp.dtype = jnp.float32 # the dtype of the computation def setup(self): self.dropout_layer = nn.Dropout(rate=self.config.dropout) embed_dim = self.config.d_model self.padding_idx = self.config.pad_token_id self.max_target_positions = self.config.max_position_embeddings self.embed_scale = math.sqrt(self.config.d_model) if self.config.scale_embedding else 1.0 # MBart is set up so that if padding_idx is specified then offset the embedding ids by 2 # and adjust num_embeddings appropriately. Other models don't have this hack self.offset = 2 self.embed_positions = nn.Embed( self.config.max_position_embeddings + self.offset, embed_dim, embedding_init=jax.nn.initializers.normal(self.config.init_std), ) self.layers = FlaxMBartDecoderLayerCollection(self.config, self.dtype) self.layernorm_embedding = nn.LayerNorm(dtype=self.dtype, epsilon=1e-05) self.layer_norm = nn.LayerNorm(dtype=self.dtype, epsilon=1e-05) def __call__( self, input_ids, attention_mask, position_ids, encoder_hidden_states: Optional[jnp.ndarray] = None, encoder_attention_mask: Optional[jnp.ndarray] = None, init_cache: bool = False, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, deterministic: bool = True, ): input_shape = input_ids.shape input_ids = input_ids.reshape(-1, input_shape[-1]) inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale # embed positions positions = self.embed_positions(position_ids + self.offset) hidden_states = inputs_embeds + positions hidden_states = self.layernorm_embedding(hidden_states) hidden_states = self.dropout_layer(hidden_states, deterministic=deterministic) outputs = self.layers( hidden_states, attention_mask, encoder_hidden_states, encoder_attention_mask, deterministic=deterministic, init_cache=init_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) last_hidden_states = outputs[0] last_hidden_states = self.layer_norm(last_hidden_states) # update the last element in `hidden_states` after applying `layernorm` above hidden_states = None if output_hidden_states: hidden_states = outputs[1] hidden_states = hidden_states[:-1] + (last_hidden_states,) if not return_dict: outputs = (last_hidden_states, hidden_states) + (outputs[2:] if output_hidden_states else outputs[1:]) return tuple(v for v in outputs if v is not None) return FlaxBaseModelOutputWithPastAndCrossAttentions( last_hidden_state=last_hidden_states, hidden_states=hidden_states, attentions=outputs.attentions, cross_attentions=outputs.cross_attentions, ) # Copied from transformers.models.bart.modeling_flax_bart.FlaxBartModule with Bart->MBart class FlaxMBartModule(nn.Module): config: MBartConfig dtype: jnp.dtype = jnp.float32 # the dtype of the computation def setup(self): self.shared = nn.Embed( self.config.vocab_size, self.config.d_model, embedding_init=jax.nn.initializers.normal(self.config.init_std), dtype=self.dtype, ) self.encoder = FlaxMBartEncoder(self.config, dtype=self.dtype, embed_tokens=self.shared) self.decoder = FlaxMBartDecoder(self.config, dtype=self.dtype, embed_tokens=self.shared) def _get_encoder_module(self): return self.encoder def _get_decoder_module(self): return self.decoder def __call__( self, input_ids, attention_mask, decoder_input_ids, decoder_attention_mask, position_ids, decoder_position_ids, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, deterministic: bool = True, ): encoder_outputs = self.encoder( input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, deterministic=deterministic, ) decoder_outputs = self.decoder( input_ids=decoder_input_ids, attention_mask=decoder_attention_mask, position_ids=decoder_position_ids, encoder_hidden_states=encoder_outputs[0], encoder_attention_mask=attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, deterministic=deterministic, ) if not return_dict: return decoder_outputs + encoder_outputs return FlaxSeq2SeqModelOutput( last_hidden_state=decoder_outputs.last_hidden_state, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions, ) class FlaxMBartPreTrainedModel(FlaxPreTrainedModel): config_class = MBartConfig base_model_prefix: str = "model" module_class: nn.Module = None def __init__( self, config: MBartConfig, input_shape: Tuple[int] = (1, 1), seed: int = 0, dtype: jnp.dtype = jnp.float32, _do_init: bool = True, **kwargs, ): module = self.module_class(config=config, dtype=dtype, **kwargs) super().__init__(config, module, input_shape=input_shape, seed=seed, dtype=dtype, _do_init=_do_init) def init_weights(self, rng: jax.random.PRNGKey, input_shape: Tuple, params: FrozenDict = None) -> FrozenDict: # init input tensors input_ids = jnp.zeros(input_shape, dtype="i4") # make sure initialization pass will work for FlaxMBartForSequenceClassificationModule input_ids = input_ids.at[(..., -1)].set(self.config.eos_token_id) attention_mask = jnp.ones_like(input_ids) decoder_input_ids = input_ids decoder_attention_mask = jnp.ones_like(input_ids) batch_size, sequence_length = input_ids.shape position_ids = jnp.broadcast_to(jnp.arange(sequence_length)[None, :], (batch_size, sequence_length)) decoder_position_ids = jnp.broadcast_to(jnp.arange(sequence_length)[None, :], (batch_size, sequence_length)) params_rng, dropout_rng = jax.random.split(rng) rngs = {"params": params_rng, "dropout": dropout_rng} random_params = self.module.init( rngs, input_ids, attention_mask, decoder_input_ids, decoder_attention_mask, position_ids, decoder_position_ids, )["params"] if params is not None: random_params = flatten_dict(unfreeze(random_params)) params = flatten_dict(unfreeze(params)) for missing_key in self._missing_keys: params[missing_key] = random_params[missing_key] self._missing_keys = set() return freeze(unflatten_dict(params)) else: return random_params # Copied from transformers.models.bart.modeling_flax_bart.FlaxBartPreTrainedModel.init_cache with Bart->MBart def init_cache(self, batch_size, max_length, encoder_outputs): r""" Args: batch_size (`int`): batch_size used for fast auto-regressive decoding. Defines the batch size of the initialized cache. max_length (`int`): maximum possible length for auto-regressive decoding. Defines the sequence length of the initialized cache. encoder_outputs (`Union[FlaxBaseModelOutput, tuple(tuple(jnp.ndarray)]`): `encoder_outputs` consists of (`last_hidden_state`, *optional*: `hidden_states`, *optional*: `attentions`). `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)`, *optional*) is a sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. """ # init input variables to retrieve cache decoder_input_ids = jnp.ones((batch_size, max_length), dtype="i4") decoder_attention_mask = jnp.ones_like(decoder_input_ids) decoder_position_ids = jnp.broadcast_to( jnp.arange(jnp.atleast_2d(decoder_input_ids).shape[-1]), decoder_input_ids.shape ) def _decoder_forward(module, decoder_input_ids, decoder_attention_mask, decoder_position_ids, **kwargs): decoder_module = module._get_decoder_module() return decoder_module( decoder_input_ids, decoder_attention_mask, decoder_position_ids, **kwargs, ) init_variables = self.module.init( jax.random.PRNGKey(0), decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, decoder_position_ids=decoder_position_ids, encoder_hidden_states=encoder_outputs[0], init_cache=True, method=_decoder_forward, # we only need to call the decoder to init the cache ) return unfreeze(init_variables["cache"]) @add_start_docstrings(MBART_ENCODE_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=FlaxBaseModelOutput, config_class=MBartConfig) def encode( self, input_ids: jnp.ndarray, attention_mask: Optional[jnp.ndarray] = None, position_ids: Optional[jnp.ndarray] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, train: bool = False, params: dict = None, dropout_rng: PRNGKey = None, ): r""" Returns: Example: ```python >>> from transformers import AutoTokenizer, FlaxMBartForConditionalGeneration >>> model = FlaxMBartForConditionalGeneration.from_pretrained("facebook/mbart-large-cc25") >>> tokenizer = AutoTokenizer.from_pretrained("facebook/mbart-large-cc25") >>> text = "My friends are cool but they eat too many carbs." >>> inputs = tokenizer(text, max_length=1024, return_tensors="jax") >>> encoder_outputs = model.encode(**inputs) ```""" output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.return_dict if attention_mask is None: attention_mask = jnp.ones_like(input_ids) if position_ids is None: batch_size, sequence_length = input_ids.shape position_ids = jnp.broadcast_to(jnp.arange(sequence_length)[None, :], (batch_size, sequence_length)) # Handle any PRNG if needed rngs = {} if dropout_rng is not None: rngs["dropout"] = dropout_rng def _encoder_forward(module, input_ids, attention_mask, position_ids, **kwargs): encode_module = module._get_encoder_module() return encode_module(input_ids, attention_mask, position_ids, **kwargs) return self.module.apply( {"params": params or self.params}, input_ids=jnp.array(input_ids, dtype="i4"), attention_mask=jnp.array(attention_mask, dtype="i4"), position_ids=jnp.array(position_ids, dtype="i4"), output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, deterministic=not train, rngs=rngs, method=_encoder_forward, ) @add_start_docstrings(MBART_DECODE_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=FlaxBaseModelOutputWithPastAndCrossAttentions, config_class=MBartConfig) def decode( self, decoder_input_ids, encoder_outputs, encoder_attention_mask: Optional[jnp.ndarray] = None, decoder_attention_mask: Optional[jnp.ndarray] = None, decoder_position_ids: Optional[jnp.ndarray] = None, past_key_values: dict = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, train: bool = False, params: dict = None, dropout_rng: PRNGKey = None, ): r""" Returns: Example: ```python >>> from transformers import AutoTokenizer, FlaxMBartForConditionalGeneration >>> model = FlaxMBartForConditionalGeneration.from_pretrained("facebook/mbart-large-cc25") >>> tokenizer = AutoTokenizer.from_pretrained("facebook/mbart-large-cc25") >>> text = "My friends are cool but they eat too many carbs." >>> inputs = tokenizer(text, max_length=1024, return_tensors="jax") >>> encoder_outputs = model.encode(**inputs) >>> decoder_start_token_id = model.config.decoder_start_token_id >>> decoder_input_ids = jnp.ones((inputs.input_ids.shape[0], 1), dtype="i4") * decoder_start_token_id >>> outputs = model.decode(decoder_input_ids, encoder_outputs) >>> last_decoder_hidden_states = outputs.last_hidden_state ```""" output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.return_dict encoder_hidden_states = encoder_outputs[0] if encoder_attention_mask is None: batch_size, sequence_length = encoder_hidden_states.shape[:2] encoder_attention_mask = jnp.ones((batch_size, sequence_length)) batch_size, sequence_length = decoder_input_ids.shape if decoder_attention_mask is None: decoder_attention_mask = jnp.ones((batch_size, sequence_length)) if decoder_position_ids is None: if past_key_values is not None: raise ValueError("Make sure to provide `decoder_position_ids` when passing `past_key_values`.") decoder_position_ids = jnp.broadcast_to( jnp.arange(sequence_length)[None, :], (batch_size, sequence_length) ) # Handle any PRNG if needed rngs = {} if dropout_rng is not None: rngs["dropout"] = dropout_rng inputs = {"params": params or self.params} # if past_key_values are passed then cache is already initialized a private flag init_cache has to be # passed down to ensure cache is used. It has to be made sure that cache is marked as mutable so that # it can be changed by FlaxMBartAttention module if past_key_values: inputs["cache"] = past_key_values mutable = ["cache"] else: mutable = False def _decoder_forward(module, decoder_input_ids, decoder_attention_mask, decoder_position_ids, **kwargs): decoder_module = module._get_decoder_module() return decoder_module( decoder_input_ids, decoder_attention_mask, decoder_position_ids, **kwargs, ) outputs = self.module.apply( inputs, decoder_input_ids=jnp.array(decoder_input_ids, dtype="i4"), decoder_attention_mask=jnp.array(decoder_attention_mask, dtype="i4"), decoder_position_ids=jnp.array(decoder_position_ids, dtype="i4"), encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=jnp.array(encoder_attention_mask, dtype="i4"), output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, deterministic=not train, rngs=rngs, mutable=mutable, method=_decoder_forward, ) # add updated cache to model output if past_key_values is not None and return_dict: outputs, past = outputs outputs["past_key_values"] = unfreeze(past["cache"]) return outputs elif past_key_values is not None and not return_dict: outputs, past = outputs outputs = outputs[:1] + (unfreeze(past["cache"]),) + outputs[1:] return outputs @add_start_docstrings_to_model_forward(MBART_INPUTS_DOCSTRING) def __call__( self, input_ids: jnp.ndarray, attention_mask: Optional[jnp.ndarray] = None, decoder_input_ids: Optional[jnp.ndarray] = None, decoder_attention_mask: Optional[jnp.ndarray] = None, position_ids: Optional[jnp.ndarray] = None, decoder_position_ids: Optional[jnp.ndarray] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, train: bool = False, params: dict = None, dropout_rng: PRNGKey = None, ): output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.return_dict # prepare encoder inputs if attention_mask is None: attention_mask = jnp.ones_like(input_ids) if position_ids is None: batch_size, sequence_length = input_ids.shape position_ids = jnp.broadcast_to(jnp.arange(sequence_length)[None, :], (batch_size, sequence_length)) # prepare decoder inputs if decoder_input_ids is None: decoder_input_ids = shift_tokens_right(input_ids, self.config.pad_token_id) if decoder_attention_mask is None: decoder_attention_mask = jnp.ones_like(decoder_input_ids) if decoder_position_ids is None: batch_size, sequence_length = decoder_input_ids.shape decoder_position_ids = jnp.broadcast_to( jnp.arange(sequence_length)[None, :], (batch_size, sequence_length) ) # Handle any PRNG if needed rngs = {"dropout": dropout_rng} if dropout_rng is not None else {} return self.module.apply( {"params": params or self.params}, input_ids=jnp.array(input_ids, dtype="i4"), attention_mask=jnp.array(attention_mask, dtype="i4"), position_ids=jnp.array(position_ids, dtype="i4"), decoder_input_ids=jnp.array(decoder_input_ids, dtype="i4"), decoder_attention_mask=jnp.array(decoder_attention_mask, dtype="i4"), decoder_position_ids=jnp.array(decoder_position_ids, dtype="i4"), output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, deterministic=not train, rngs=rngs, ) @add_start_docstrings( "The bare MBart Model transformer outputting raw hidden-states without any specific head on top.", MBART_START_DOCSTRING, ) class FlaxMBartModel(FlaxMBartPreTrainedModel): config: MBartConfig dtype: jnp.dtype = jnp.float32 # the dtype of the computation module_class = FlaxMBartModule append_call_sample_docstring(FlaxMBartModel, _CHECKPOINT_FOR_DOC, FlaxSeq2SeqModelOutput, _CONFIG_FOR_DOC) # Copied from transformers.models.bart.modeling_flax_bart.FlaxBartForConditionalGenerationModule with Bart->MBart class FlaxMBartForConditionalGenerationModule(nn.Module): config: MBartConfig dtype: jnp.dtype = jnp.float32 bias_init: Callable[..., jnp.ndarray] = jax.nn.initializers.zeros def setup(self): self.model = FlaxMBartModule(config=self.config, dtype=self.dtype) self.lm_head = nn.Dense( self.model.shared.num_embeddings, use_bias=False, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(self.config.init_std), ) self.final_logits_bias = self.param("final_logits_bias", self.bias_init, (1, self.model.shared.num_embeddings)) def _get_encoder_module(self): return self.model.encoder def _get_decoder_module(self): return self.model.decoder def __call__( self, input_ids, attention_mask, decoder_input_ids, decoder_attention_mask, position_ids, decoder_position_ids, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, deterministic: bool = True, ): outputs = self.model( input_ids=input_ids, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, position_ids=position_ids, decoder_position_ids=decoder_position_ids, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, deterministic=deterministic, ) hidden_states = outputs[0] if self.config.tie_word_embeddings: shared_embedding = self.model.variables["params"]["shared"]["embedding"] lm_logits = self.lm_head.apply({"params": {"kernel": shared_embedding.T}}, hidden_states) else: lm_logits = self.lm_head(hidden_states) lm_logits += jax.lax.stop_gradient(self.final_logits_bias.astype(self.dtype)) if not return_dict: output = (lm_logits,) + outputs[1:] return output return FlaxSeq2SeqLMOutput( logits=lm_logits, decoder_hidden_states=outputs.decoder_hidden_states, decoder_attentions=outputs.decoder_attentions, cross_attentions=outputs.cross_attentions, encoder_last_hidden_state=outputs.encoder_last_hidden_state, encoder_hidden_states=outputs.encoder_hidden_states, encoder_attentions=outputs.encoder_attentions, ) @add_start_docstrings( "The MMBart Model with a language modeling head. Can be used for summarization.", MBART_START_DOCSTRING ) class FlaxMBartForConditionalGeneration(FlaxMBartPreTrainedModel): module_class = FlaxMBartForConditionalGenerationModule dtype: jnp.dtype = jnp.float32 @add_start_docstrings(MBART_DECODE_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=FlaxCausalLMOutputWithCrossAttentions, config_class=MBartConfig) def decode( self, decoder_input_ids, encoder_outputs, encoder_attention_mask: Optional[jnp.ndarray] = None, decoder_attention_mask: Optional[jnp.ndarray] = None, decoder_position_ids: Optional[jnp.ndarray] = None, past_key_values: dict = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, train: bool = False, params: dict = None, dropout_rng: PRNGKey = None, ): r""" Returns: Example: ```python >>> from transformers import AutoTokenizer, FlaxMBartForConditionalGeneration >>> model = FlaxMBartForConditionalGeneration.from_pretrained("facebook/mbart-large-cc25") >>> tokenizer = AutoTokenizer.from_pretrained("facebook/mbart-large-cc25") >>> text = "My friends are cool but they eat too many carbs." >>> inputs = tokenizer(text, max_length=1024, return_tensors="jax") >>> encoder_outputs = model.encode(**inputs) >>> decoder_start_token_id = model.config.decoder_start_token_id >>> decoder_input_ids = jnp.ones((inputs.input_ids.shape[0], 1), dtype="i4") * decoder_start_token_id >>> outputs = model.decode(decoder_input_ids, encoder_outputs) >>> logits = outputs.logits ```""" output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.return_dict encoder_hidden_states = encoder_outputs[0] if encoder_attention_mask is None: batch_size, sequence_length = encoder_hidden_states.shape[:2] encoder_attention_mask = jnp.ones((batch_size, sequence_length)) batch_size, sequence_length = decoder_input_ids.shape if decoder_attention_mask is None: decoder_attention_mask = jnp.ones((batch_size, sequence_length)) if decoder_position_ids is None: if past_key_values is not None: raise ValueError("Make sure to provide `decoder_position_ids` when passing `past_key_values`.") decoder_position_ids = jnp.broadcast_to( jnp.arange(sequence_length)[None, :], (batch_size, sequence_length) ) # Handle any PRNG if needed rngs = {} if dropout_rng is not None: rngs["dropout"] = dropout_rng inputs = {"params": params or self.params} # if past_key_values are passed then cache is already initialized a private flag init_cache has to be # passed down to ensure cache is used. It has to be made sure that cache is marked as mutable so that # it can be changed by FlaxMBartAttention module if past_key_values: inputs["cache"] = past_key_values mutable = ["cache"] else: mutable = False def _decoder_forward(module, decoder_input_ids, decoder_attention_mask, decoder_position_ids, **kwargs): decoder_module = module._get_decoder_module() outputs = decoder_module( decoder_input_ids, decoder_attention_mask, decoder_position_ids, **kwargs, ) hidden_states = outputs[0] if self.config.tie_word_embeddings: shared_embedding = module.model.variables["params"]["shared"]["embedding"] lm_logits = module.lm_head.apply({"params": {"kernel": shared_embedding.T}}, hidden_states) else: lm_logits = module.lm_head(hidden_states) lm_logits += module.final_logits_bias.astype(self.dtype) return lm_logits, outputs outputs = self.module.apply( inputs, decoder_input_ids=jnp.array(decoder_input_ids, dtype="i4"), decoder_attention_mask=jnp.array(decoder_attention_mask, dtype="i4"), decoder_position_ids=jnp.array(decoder_position_ids, dtype="i4"), encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=jnp.array(encoder_attention_mask, dtype="i4"), output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, deterministic=not train, rngs=rngs, mutable=mutable, method=_decoder_forward, ) if past_key_values is None: lm_logits, decoder_outputs = outputs else: (lm_logits, decoder_outputs), past = outputs if return_dict: outputs = FlaxCausalLMOutputWithCrossAttentions( logits=lm_logits, hidden_states=decoder_outputs.hidden_states, attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, ) else: outputs = (lm_logits,) + decoder_outputs[1:] # add updated cache to model output if past_key_values is not None and return_dict: outputs["past_key_values"] = unfreeze(past["cache"]) return outputs elif past_key_values is not None and not return_dict: outputs = outputs[:1] + (unfreeze(past["cache"]),) + outputs[1:] return outputs def prepare_inputs_for_generation( self, decoder_input_ids, max_length, attention_mask: Optional[jax.Array] = None, decoder_attention_mask: Optional[jax.Array] = None, encoder_outputs=None, **kwargs, ): # initializing the cache batch_size, seq_length = decoder_input_ids.shape past_key_values = self.init_cache(batch_size, max_length, encoder_outputs) # Note that usually one would have to put 0's in the attention_mask for x > input_ids.shape[-1] and x < cache_length. # But since the decoder uses a causal mask, those positions are masked anyways. # Thus we can create a single static attention_mask here, which is more efficient for compilation extended_attention_mask = jnp.ones((batch_size, max_length), dtype="i4") if decoder_attention_mask is not None: position_ids = decoder_attention_mask.cumsum(axis=-1) - 1 extended_attention_mask = lax.dynamic_update_slice(extended_attention_mask, decoder_attention_mask, (0, 0)) else: position_ids = jnp.broadcast_to(jnp.arange(seq_length, dtype="i4")[None, :], (batch_size, seq_length)) return { "past_key_values": past_key_values, "encoder_outputs": encoder_outputs, "encoder_attention_mask": attention_mask, "decoder_attention_mask": extended_attention_mask, "decoder_position_ids": position_ids, } def update_inputs_for_generation(self, model_outputs, model_kwargs): model_kwargs["past_key_values"] = model_outputs.past_key_values model_kwargs["decoder_position_ids"] = model_kwargs["decoder_position_ids"][:, -1:] + 1 return model_kwargs FLAX_MBART_CONDITIONAL_GENERATION_DOCSTRING = r""" Returns: Summarization example: ```python >>> from transformers import AutoTokenizer, FlaxMBartForConditionalGeneration, MBartConfig >>> model = FlaxMBartForConditionalGeneration.from_pretrained("facebook/mbart-large-cc25") >>> tokenizer = AutoTokenizer.from_pretrained("facebook/mbart-large-cc25") >>> ARTICLE_TO_SUMMARIZE = "Meine Freunde sind cool, aber sie essen zu viel Kuchen." >>> inputs = tokenizer([ARTICLE_TO_SUMMARIZE], max_length=1024, return_tensors="np") >>> # Generate Summary >>> summary_ids = model.generate(inputs["input_ids"], num_beams=4, max_length=5).sequences >>> print(tokenizer.batch_decode(summary_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)) ``` Mask filling example: ```python >>> from transformers import AutoTokenizer, FlaxMBartForConditionalGeneration >>> model = FlaxMBartForConditionalGeneration.from_pretrained("facebook/mbart-large-cc25") >>> tokenizer = AutoTokenizer.from_pretrained("facebook/mbart-large-cc25") >>> # de_DE is the language symbol id <LID> for German >>> TXT = "</s> Meine Freunde sind <mask> nett aber sie essen zu viel Kuchen. </s> de_DE" >>> input_ids = tokenizer([TXT], add_special_tokens=False, return_tensors="np")["input_ids"] >>> logits = model(input_ids).logits >>> masked_index = (input_ids[0] == tokenizer.mask_token_id).nonzero()[0].item() >>> probs = logits[0, masked_index].softmax(dim=0) >>> values, predictions = probs.topk(5) >>> tokenizer.decode(predictions).split() ``` """ overwrite_call_docstring( FlaxMBartForConditionalGeneration, MBART_INPUTS_DOCSTRING + FLAX_MBART_CONDITIONAL_GENERATION_DOCSTRING ) append_replace_return_docstrings( FlaxMBartForConditionalGeneration, output_type=FlaxSeq2SeqLMOutput, config_class=_CONFIG_FOR_DOC ) # Copied from transformers.models.bart.modeling_flax_bart.FlaxBartForSequenceClassificationModule with Bart->MBart class FlaxMBartForSequenceClassificationModule(nn.Module): config: MBartConfig dtype: jnp.dtype = jnp.float32 num_labels: Optional[int] = None def setup(self): self.model = FlaxMBartModule(config=self.config, dtype=self.dtype) self.classification_head = FlaxMBartClassificationHead( config=self.config, inner_dim=self.config.d_model, num_classes=self.num_labels if self.num_labels is not None else self.config.num_labels, pooler_dropout=self.config.classifier_dropout, ) def _get_encoder_module(self): return self.model.encoder def _get_decoder_module(self): return self.model.decoder def __call__( self, input_ids, attention_mask, decoder_input_ids, decoder_attention_mask, position_ids, decoder_position_ids, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, deterministic: bool = True, ): outputs = self.model( input_ids=input_ids, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, position_ids=position_ids, decoder_position_ids=decoder_position_ids, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, deterministic=deterministic, ) hidden_states = outputs[0] # last hidden state eos_mask = jnp.where(input_ids == self.config.eos_token_id, 1, 0) # The first condition is necessary to overcome jax._src.errors.ConcretizationTypeError during JIT compilation if type(eos_mask) != jax.interpreters.partial_eval.DynamicJaxprTracer: if len(jnp.unique(eos_mask.sum(1))) > 1: raise ValueError("All examples must have the same number of <eos> tokens.") if any(eos_mask.sum(1) == 0): raise ValueError("There are missing <eos> tokens in input_ids") # Ensure to keep 1 only for the last <eos> token for each example eos_mask_noised = eos_mask + jnp.arange(eos_mask.shape[1]) * 1e-6 eos_mask = jnp.where(eos_mask_noised == eos_mask_noised.max(1).reshape(-1, 1), 1, 0) sentence_representation = jnp.einsum("ijk, ij -> ijk", hidden_states, eos_mask).sum(1) logits = self.classification_head(sentence_representation, deterministic=deterministic) if not return_dict: output = (logits,) + outputs[1:] return output return FlaxSeq2SeqSequenceClassifierOutput( logits=logits, decoder_hidden_states=outputs.decoder_hidden_states, decoder_attentions=outputs.decoder_attentions, cross_attentions=outputs.cross_attentions, encoder_last_hidden_state=outputs.encoder_last_hidden_state, encoder_hidden_states=outputs.encoder_hidden_states, encoder_attentions=outputs.encoder_attentions, ) @add_start_docstrings( """ MBart model with a sequence classification/head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks. """, MBART_START_DOCSTRING, ) class FlaxMBartForSequenceClassification(FlaxMBartPreTrainedModel): module_class = FlaxMBartForSequenceClassificationModule dtype = jnp.float32 append_call_sample_docstring( FlaxMBartForSequenceClassification, _CHECKPOINT_FOR_DOC, FlaxSeq2SeqSequenceClassifierOutput, _CONFIG_FOR_DOC, ) # Copied from transformers.models.bart.modeling_flax_bart.FlaxBartForQuestionAnsweringModule with Bart->MBart class FlaxMBartForQuestionAnsweringModule(nn.Module): config: MBartConfig dtype: jnp.dtype = jnp.float32 num_labels = 2 def setup(self): self.model = FlaxMBartModule(config=self.config, dtype=self.dtype) self.qa_outputs = nn.Dense( self.num_labels, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(self.config.init_std) ) def _get_encoder_module(self): return self.model.encoder def _get_decoder_module(self): return self.model.decoder def __call__( self, input_ids, attention_mask, decoder_input_ids, decoder_attention_mask, position_ids, decoder_position_ids, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, deterministic: bool = True, ): outputs = self.model( input_ids=input_ids, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, position_ids=position_ids, decoder_position_ids=decoder_position_ids, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, deterministic=deterministic, ) sequence_output = outputs[0] logits = self.qa_outputs(sequence_output) start_logits, end_logits = jnp.split(logits, logits.shape[-1], axis=-1) start_logits = start_logits.squeeze(-1) end_logits = end_logits.squeeze(-1) if not return_dict: output = (start_logits, end_logits) + outputs[1:] return output return FlaxSeq2SeqQuestionAnsweringModelOutput( start_logits=start_logits, end_logits=end_logits, decoder_hidden_states=outputs.decoder_hidden_states, decoder_attentions=outputs.decoder_attentions, cross_attentions=outputs.cross_attentions, encoder_last_hidden_state=outputs.encoder_last_hidden_state, encoder_hidden_states=outputs.encoder_hidden_states, encoder_attentions=outputs.encoder_attentions, ) @add_start_docstrings( """ MBart Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layer on top of the hidden-states output to compute `span start logits` and `span end logits`). """, MBART_START_DOCSTRING, ) class FlaxMBartForQuestionAnswering(FlaxMBartPreTrainedModel): module_class = FlaxMBartForQuestionAnsweringModule dtype = jnp.float32 append_call_sample_docstring( FlaxMBartForQuestionAnswering, _CHECKPOINT_FOR_DOC, FlaxSeq2SeqQuestionAnsweringModelOutput, _CONFIG_FOR_DOC, )
0
hf_public_repos/transformers/src/transformers/models
hf_public_repos/transformers/src/transformers/models/gptsan_japanese/convert_gptsan_tf_checkpoint_to_pytorch.py
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Convert GPTSANJapanese checkpoints from the original repository to pytorch model.""" import argparse import json import os from collections import OrderedDict import numpy as np import tensorflow as tf import torch def convert_tf_gptsan_to_pt(args): parameter_file = os.path.join(args.tf_model_dir, "parameters.json") params = json.loads(open(parameter_file).read()) if not params: raise ValueError( f"It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file." ) if not args.output.endswith(".pt"): args.output = args.output + ".pt" new_state = OrderedDict() with tf.device("/CPU:0"): reader = tf.train.load_checkpoint(args.tf_model_dir) shapes = reader.get_variable_to_shape_map() for key_name in shapes.keys(): vnp = reader.get_tensor(key_name).astype(np.float16) if key_name.endswith("/adam_m") or key_name.endswith("/adam_v"): continue if key_name.startswith("pasts/"): if key_name.startswith("pasts/mlp"): player = int(key_name[9]) elif key_name.startswith("pasts/out"): player = 8 name = "model.sqout.%d.weight" % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time state = vnp.transpose([1, 0]).copy() # Mesh-Tensorflow is a diagonal matrix new_state[name] = torch.tensor(state) elif key_name.startswith("model/moe"): player = int(key_name[9:].split("/")[0]) if key_name.endswith("/switch_gating/kernel"): name = "model.blocks.%d.feed_forward.mlp.router.classifier.weight" % player state = vnp.transpose([1, 0]).copy() # Mesh-Tensorflow is a diagonal matrix new_state[name] = torch.tensor(state) elif key_name.endswith("/softmlp/kernel"): name = "model.blocks.%d.feed_forward.soft_bypass_mlp.weight" % player state = vnp.transpose([1, 0]).copy() # Mesh-Tensorflow is a diagonal matrix new_state[name] = torch.tensor(state) elif key_name.endswith("/wo/kernel") or key_name.endswith("/wi/kernel"): nlayer = key_name[-9:-7] for i in range(16): name = "model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight" % (player, i, nlayer) state = ( vnp[i].transpose([1, 0]).copy() ) # In Mesh-Tensorflow, it is one array, so it is divided new_state[name] = torch.tensor(state) elif key_name.startswith("model/mlp"): player = int(key_name[9:].split("/")[0]) if key_name.endswith("/p1/kernel"): name = "model.blocks.%d.feed_forward.mlp.wi.weight" % player state = vnp.transpose([1, 0]).copy() # Mesh-Tensorflow is a diagonal matrix new_state[name] = torch.tensor(state) elif key_name.endswith("/p1/bias"): name = "model.blocks.%d.feed_forward.mlp.wi.bias" % player state = vnp.copy() # same because it is one dimensional new_state[name] = torch.tensor(state) elif key_name.endswith("/p2/kernel"): name = "model.blocks.%d.feed_forward.mlp.wo.weight" % player state = vnp.transpose([1, 0]).copy() # Mesh-Tensorflow is a diagonal matrix new_state[name] = torch.tensor(state) elif key_name.endswith("/p2/bias"): name = "model.blocks.%d.feed_forward.mlp.wo.bias" % player state = vnp.copy() # same because it is one dimensional new_state[name] = torch.tensor(state) elif key_name.startswith("model/ln"): player = int(key_name[8:].split("/")[0]) if key_name.endswith("/b"): name = "model.blocks.%d.feed_forward.norm.bias" % player state = vnp.copy() # same because it is one dimensional new_state[name] = torch.tensor(state) elif key_name.endswith("/g"): name = "model.blocks.%d.feed_forward.norm.weight" % player state = vnp.copy() # same because it is one dimensional new_state[name] = torch.tensor(state) elif key_name.startswith("model/att"): player = int(key_name[9:].split("/")[0]) if key_name.endswith("/qkv/kernel"): state = vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum state_q = state[:, 0, :, :] state_k = state[:, 1, :, :] state_v = state[:, 2, :, :] state_q = ( state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]]) .transpose([1, 0]) .copy() ) # Mesh-Tensorflow is a diagonal matrix state_k = ( state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]]) .transpose([1, 0]) .copy() ) # Mesh-Tensorflow is a diagonal matrix state_v = ( state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]]) .transpose([1, 0]) .copy() ) # Mesh-Tensorflow is a diagonal matrix name = "model.blocks.%d.self_attn.self_attn.q_proj.weight" % player new_state[name] = torch.tensor(state_q) name = "model.blocks.%d.self_attn.self_attn.k_proj.weight" % player new_state[name] = torch.tensor(state_k) name = "model.blocks.%d.self_attn.self_attn.v_proj.weight" % player new_state[name] = torch.tensor(state_v) elif key_name.endswith("/o/kernel"): name = "model.blocks.%d.self_attn.self_attn.out_proj.weight" % player state = ( vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]]).transpose([1, 0]).copy() ) # Mesh-Tensorflow is a diagonal matrix new_state[name] = torch.tensor(state) elif key_name.startswith("model/an"): player = int(key_name[8:].split("/")[0]) if key_name.endswith("/b"): name = "model.blocks.%d.self_attn.norm.bias" % player state = vnp.copy() # same because it is one dimensional new_state[name] = torch.tensor(state) elif key_name.endswith("/g"): name = "model.blocks.%d.self_attn.norm.weight" % player state = vnp.copy() # same because it is one dimensional new_state[name] = torch.tensor(state) elif ( key_name.startswith("model/wte") or key_name.startswith("model/wpe") or key_name.startswith("model/ete") ): nlayer = {"wte": "embed_tokens", "wpe": "position_embeddings", "ete": "extra_position_embeddings"}[ key_name[-3:] ] name = "model.%s.weight" % nlayer state = vnp.copy() # same in embedded new_state[name] = torch.tensor(state) if key_name.startswith("model/wte"): name = "lm_head.weight" state = vnp.copy() # same in embedded new_state[name] = torch.tensor(state) elif key_name.startswith("model/wob"): name = "final_logits_bias" state = vnp.copy() # same in embedded state = state.reshape((1, -1)) new_state[name] = torch.tensor(state) elif key_name == "model/dense/kernel": name = "model.last_project.weight" state = vnp.transpose([1, 0]).copy() # Mesh-Tensorflow is a diagonal matrix new_state[name] = torch.tensor(state) elif key_name == "model/dense_1/bias": name = "model.last_project.bias" state = vnp.copy() # same because it is one dimensional new_state[name] = torch.tensor(state) torch.save(new_state, args.output) if __name__ == "__main__": parser = argparse.ArgumentParser( description="model converter.", formatter_class=argparse.ArgumentDefaultsHelpFormatter ) parser.add_argument("--tf_model_dir", metavar="PATH", type=str, required=True, help="import model") parser.add_argument("--output", metavar="PATH", type=str, required=True, help="output model") args = parser.parse_args() convert_tf_gptsan_to_pt(args)
0
hf_public_repos/transformers/src/transformers/models
hf_public_repos/transformers/src/transformers/models/gptsan_japanese/tokenization_gptsan_japanese.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tokenization classes for GPTSANJapanese.""" import collections import json import os import re from typing import List, Optional, Tuple, Union import numpy as np from ...tokenization_utils import PreTrainedTokenizer from ...tokenization_utils_base import ( BatchEncoding, PreTokenizedInput, PreTokenizedInputPair, TextInput, TextInputPair, TruncationStrategy, ) from ...utils import PaddingStrategy, logging logger = logging.get_logger(__name__) VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt", "emoji_file": "emoji.json"} PRETRAINED_VOCAB_FILES_MAP = { "vocab_file": { "Tanrei/GPTSAN-japanese": "https://huggingface.co/Tanrei/GPTSAN-japanese/blob/main/vocab.txt", }, "emoji_file": { "Tanrei/GPTSAN-japanese": "https://huggingface.co/Tanrei/GPTSAN-japanese/blob/main/emoji.json", }, } PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = { "Tanrei/GPTSAN-japanese": 1280, } def load_vocab_and_emoji(vocab_file, emoji_file): """Loads a vocabulary file and emoji file into a dictionary.""" with open(emoji_file, "r", encoding="utf-8") as f: emoji = json.loads(f.read()) vocab = collections.OrderedDict() raw_vocab = collections.OrderedDict() ids_to_tokens = collections.OrderedDict() with open(vocab_file, "r", encoding="utf-8") as f: token = f.readlines() token = [[t.rstrip("\n")] if (t == ",\n" or "," not in t) else t.rstrip("\n").split(",") for t in token] for idx, b in enumerate(token): ids_to_tokens[idx] = b raw_vocab[",".join(b)] = idx for wd in b: vocab[wd] = idx return vocab, raw_vocab, ids_to_tokens, emoji class GPTSanJapaneseTokenizer(PreTrainedTokenizer): """ This tokenizer is based on GPTNeoXJapaneseTokenizer and has the following modifications - Decoding byte0~byte255 tokens correctly - Added bagofword token handling - Return token_type_ids for Prefix-LM model The bagofword token represents a repetition of the previous token and is converted to 3 consecutive tokens when decoding In addition, the original Japanese special Sub-Word-Encoding has been released in this repository (https://github.com/tanreinama/Japanese-BPEEncoder_V2). The token_type_ids is a mask indicating the prefix input position of the Prefix-LM model. To specify a prefix position, specify a prefix input for prefix_text, or specify a sentence of the prefix part and the part after it as a text pair of batch input. Example: ```python >>> from transformers import GPTSanJapaneseTokenizer >>> tokenizer = GPTSanJapaneseTokenizer.from_pretrained("Tanrei/GPTSAN-japanese") >>> # You can confirm both 慶応 and 慶應 are encoded to 17750 >>> tokenizer("吾輩は猫である🐯。実は慶応(慶應)大学出身")["input_ids"] [35993, 35998, 34347, 31459, 30647, 31448, 25, 30659, 35729, 35676, 32417, 30647, 17750, 35589, 17750, 35590, 321, 1281] >>> # Both 慶応 and 慶應 are decoded to 慶応 >>> tokenizer.decode(tokenizer("吾輩は猫である🐯。実は慶応(慶應)大学出身")["input_ids"]) '吾輩は猫である🐯。実は慶応(慶応)大学出身' ``` Example for Prefix-LM: ```python >>> from transformers import GPTSanJapaneseTokenizer >>> tokenizer = GPTSanJapaneseTokenizer.from_pretrained("Tanrei/GPTSAN-japanese") >>> tokenizer("実は慶応(慶應)大学出身", prefix_text="吾輩は猫である🐯。")["input_ids"] [35993, 34347, 31459, 30647, 31448, 25, 30659, 35729, 35676, 35998, 32417, 30647, 17750, 35589, 17750, 35590, 321, 1281] >>> # Mask for Prefix-LM inputs >>> tokenizer("実は慶応(慶應)大学出身", prefix_text="吾輩は猫である🐯。")["token_type_ids"] [1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0] ``` Example for batch encode: ```python >>> from transformers import GPTSanJapaneseTokenizer >>> tokenizer = GPTSanJapaneseTokenizer.from_pretrained("Tanrei/GPTSAN-japanese") >>> tokenizer([["武田信玄", "は、"], ["織田信長", "の配下の、"]], padding=True)["input_ids"] [[35993, 8640, 25948, 35998, 30647, 35675, 35999, 35999], [35993, 10382, 9868, 35998, 30646, 9459, 30646, 35675]] >>> # Mask for Prefix-LM inputs >>> tokenizer([["武田信玄", "は、"], ["織田信長", "の配下の、"]], padding=True)["token_type_ids"] [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]] >>> # Mask for padding >>> tokenizer([["武田信玄", "は、"], ["織田信長", "の配下の、"]], padding=True)["attention_mask"] [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]] ``` Args: vocab_file (`str`): File containing the vocabulary. emoji_file (`str`): File containing the emoji. unk_token (`str`, *optional*, defaults to `"<|nottoken|>"`): The token used for unknown charactor pad_token (`str`, *optional*, defaults to `"<|separator|>"`): The token used for padding bos_token (`str`, *optional*, defaults to `"<|startoftext|>"`): The beginning of sequence token. eos_token (`str`, *optional*, defaults to `"<|endoftext|>"`): The end of sequence token. sep_token (`str`, *optional*, defaults to `"<|segmenter|>"`): A special token to separate token to prefix part and general input part. do_clean_text (`bool`, *optional*, defaults to `False`): Whether or not to clean text for URL, EMAIL, TEL, Japanese DATE and Japanese PRICE. """ vocab_files_names = VOCAB_FILES_NAMES pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES model_input_names = ["input_ids", "attention_mask", "token_type_ids"] def __init__( self, vocab_file, emoji_file, unk_token="<|nottoken|>", pad_token="<|separator|>", bos_token="<|startoftext|>", eos_token="<|endoftext|>", sep_token="<|segmenter|>", do_clean_text=False, **kwargs, ): if not os.path.isfile(vocab_file): raise ValueError( f"Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained" " model use `tokenizer = GPTSanJapaneseTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`" ) if not os.path.isfile(emoji_file): raise ValueError( f"Can't find a emoji file at path '{emoji_file}'. To load the emoji information from a Google" " pretrained model use `tokenizer = GPTSanJapaneseTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`" ) self.do_clean_text = do_clean_text self.vocab, self.raw_vocab, self.ids_to_tokens, self.emoji = load_vocab_and_emoji(vocab_file, emoji_file) self.subword_tokenizer = SubWordJapaneseTokenizer( vocab=self.vocab, ids_to_tokens=self.ids_to_tokens, emoji=self.emoji ) super().__init__( unk_token=unk_token, pad_token=pad_token, bos_token=bos_token, eos_token=eos_token, sep_token=sep_token, do_clean_text=do_clean_text, **kwargs, ) @property # Copied from tokenization_gpt_neox_japanese.GPTNeoXJapaneseTokenizer.vocab_size def vocab_size(self): # self.vocab contains support for character fluctuation unique to Japanese, and has a large number of vocab return len(self.raw_vocab) # Copied from tokenization_gpt_neox_japanese.GPTNeoXJapaneseTokenizer.get_vocab def get_vocab(self): return dict(self.raw_vocab, **self.added_tokens_encoder) # Copied from tokenization_gpt_neox_japanese.GPTNeoXJapaneseTokenizer._tokenize def _tokenize(self, text): return self.subword_tokenizer.tokenize(text, clean=self.do_clean_text) # Copied from tokenization_gpt_neox_japanese.GPTNeoXJapaneseTokenizer._convert_token_to_id def _convert_token_to_id(self, token): """Converts a token (str) in an id using the vocab.""" return self.vocab.get(token, self.vocab.get(self.unk_token)) # Copied from tokenization_gpt_neox_japanese.GPTNeoXJapaneseTokenizer._convert_id_to_token def _convert_id_to_token(self, index): """Converts an index (integer) in a token (str) using the vocab.""" return self.subword_tokenizer.convert_id_to_token(index) def convert_tokens_to_string(self, tokens): """Converts a sequence of tokens (string) in a single string.""" words = [] byte_tokens = [] for word in tokens: if word[:6] == "<|byte" and word[-2:] == "|>": byte_tokens.append(int(word[6:-2])) else: if len(byte_tokens) > 0: words.append(bytearray(byte_tokens).decode("utf-8", errors="replace")) byte_tokens = [] if word[:7] == "<|emoji" and word[-2:] == "|>": words.append(self.emoji["emoji_inv"][word]) elif word == "<SP>": words.append(" ") elif word == "<BR>": words.append("\n") elif word == "<TAB>": words.append("\t") elif word == "<BLOCK>": words.append("▀") elif word == "<KIGOU>": words.append("ǀ") elif word == "<U2000U2BFF>": words.append("‖") elif word == "<|bagoftoken|>": if len(words) > 0: words.append(words[-1]) words.append(words[-1]) words.append(words[-1]) elif word.startswith("<|") and word.endswith("|>"): words.append("") else: words.append(word) if len(byte_tokens) > 0: words.append(bytearray(byte_tokens).decode("utf-8", errors="replace")) text = "".join(words) return text @property def default_chat_template(self): """ A simple chat template that adds standard BOS, SEP and EOS tokens between messages while discarding role information. """ logger.warning_once( "\nNo chat template is defined for this tokenizer - using the default template " f"for the {self.__class__.__name__} class. If the default is not appropriate for " "your model, please set `tokenizer.chat_template` to an appropriate template. " "See https://huggingface.co/docs/transformers/main/chat_templating for more information.\n" ) return ( "{% for message in messages %}" "{% if not loop.first %}{{ bos_token}}{% endif %}" "{{ sep_token }}{{ message.content }} {{ eos_token }}" "{% endfor %}" ) # Copied from tokenization_gpt_neox_japanese.GPTNeoXJapaneseTokenizer.save_vocabulary def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: index = 0 if os.path.isdir(save_directory): vocab_file = os.path.join( save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) emoji_file = os.path.join( save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["emoji_file"] ) else: vocab_file = ( (filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["vocab_file"] ) emoji_file = ( (filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["emoji_file"] ) with open(vocab_file, "w", encoding="utf-8") as writer: for token_index, token in self.ids_to_tokens.items(): if index != token_index: logger.warning( f"Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive." " Please check that the vocabulary is not corrupted!" ) index = token_index writer.write(",".join(token) + "\n") index += 1 with open(emoji_file, "w", encoding="utf-8") as writer: json.dump(self.emoji, writer) return vocab_file, emoji_file def create_token_type_ids_from_sequences( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: # docstyle-ignore """ The tokenizer returns token_type_ids as separators between the Prefix part and the rest. token_type_ids is 1 for the Prefix part and 0 for the rest of the token. Example: ```python >>> from transformers import GPTSanJapaneseTokenizer >>> tokenizer = GPTSanJapaneseTokenizer.from_pretrained("Tanrei/GPTSAN-japanese") >>> x_token = tokenizer("アイウエ") >>> # input_ids: | SOT | SEG | ア | イ | ウ | エ | >>> # token_type_ids: | 1 | 0 | 0 | 0 | 0 | 0 | >>> x_token = tokenizer("", prefix_text="アイウエ") >>> # input_ids: | SOT | ア | イ | ウ | エ | SEG | >>> # token_type_ids: | 1 | 1 | 1 | 1 | 1 | 0 | >>> x_token = tokenizer("ウエ", prefix_text="アイ") >>> # input_ids: | SOT | ア | イ | SEG | ウ | エ | >>> # token_type_ids: | 1 | 1 | 1 | 0 | 0 | 0 | ```""" prefix_len = 0 if self.sep_token in self.vocab: segid = self.vocab[self.sep_token] if segid in token_ids_0: prefix_len = token_ids_0.index(segid) if token_ids_1 is None: total_len = len(token_ids_0) else: total_len = len(token_ids_0 + token_ids_1) return prefix_len * [1] + (total_len - prefix_len) * [0] def prepare_for_tokenization(self, text, prefix_text=None, add_sep_token=None, **kwargs): # GPTSAN inserts extra SEP tokens in Prefix-LM in addition to SOT for text generation. # SOT at the beginning of the text, and SEP at the separator between the Prefix part and the rest. if add_sep_token is None: add_sep_token = self.sep_token not in text # If insert un-prefix position explicitly prepared = self.bos_token if self.bos_token in self.vocab else "" prepared += prefix_text if prefix_text is not None else "" if add_sep_token: prepared += self.sep_token if self.sep_token in self.vocab else "" prepared += text return (prepared, kwargs) def _batch_encode_plus( self, batch_text_or_text_pairs: Union[ List[TextInput], List[TextInputPair], List[PreTokenizedInput], List[PreTokenizedInputPair] ], add_special_tokens: bool = True, padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD, truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE, max_length: Optional[int] = None, stride: int = 0, is_split_into_words: bool = False, pad_to_multiple_of: Optional[int] = None, return_tensors: Optional[str] = None, return_token_type_ids: Optional[bool] = None, return_attention_mask: Optional[bool] = None, return_overflowing_tokens: bool = False, return_special_tokens_mask: bool = False, return_offsets_mapping: bool = False, return_length: bool = False, verbose: bool = True, ) -> BatchEncoding: # This tokenizer converts input text pairs into Prefix input and subsequent input if isinstance(batch_text_or_text_pairs[0], tuple) or isinstance(tuple(batch_text_or_text_pairs[0]), list): # As a single text with an explicit un-prefix position batch_prefix_texts = [] for pref, txt in batch_text_or_text_pairs: batch_prefix_texts.append(pref + self.sep_token + txt) batch_text_or_text_pairs = batch_prefix_texts return super()._batch_encode_plus( batch_text_or_text_pairs, add_special_tokens, padding_strategy, truncation_strategy, max_length, stride, is_split_into_words, pad_to_multiple_of, return_tensors, return_token_type_ids, return_attention_mask, return_overflowing_tokens, return_special_tokens_mask, return_offsets_mapping, return_length, verbose, ) class SubWordJapaneseTokenizer(object): """ This tokenizer is based on GPTNeoXJapaneseTokenizer and has the following modifications - Decoding byte0~byte255 tokens correctly - Added bagofword token handling https://github.com/tanreinama/Japanese-BPEEncoder_V2 This tokenizer class is under MIT Lisence according to the original repository. MIT License Copyright (c) 2020 tanreinama Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ # Copied from tokenization_gpt_neox_japanese.SubWordJapaneseTokenizer.__init__ def __init__(self, vocab, ids_to_tokens, emoji): self.vocab = vocab # same as swe self.ids_to_tokens = ids_to_tokens # same as bpe self.emoji = emoji self.maxlen = np.max([len(w) for w in self.vocab.keys()]) self.content_repatter1 = re.compile(r"(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)") self.content_repatter2 = re.compile(r"[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*") self.content_repatter3 = re.compile(r"[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}") self.content_repatter4 = re.compile( r"([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*" ) self.content_repatter5 = re.compile( r"(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*" ) self.content_repatter6 = re.compile( r"((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*" ) keisen = "─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿" blocks = "▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟" self.content_trans1 = str.maketrans({k: "<BLOCK>" for k in keisen + blocks}) # Copied from tokenization_gpt_neox_japanese.SubWordJapaneseTokenizer.__len__ def __len__(self): return len(self.ids_to_tokens) # Copied from tokenization_gpt_neox_japanese.SubWordJapaneseTokenizer.clean_text def clean_text(self, content): content = self.content_repatter1.sub("<URL>", content) content = self.content_repatter2.sub("<EMAIL>", content) content = self.content_repatter3.sub("<TEL>", content) content = self.content_repatter4.sub("<DATE>", content) content = self.content_repatter5.sub("<DATE>", content) content = self.content_repatter6.sub("<PRICE>", content) content = content.translate(self.content_trans1) while "<BLOCK><BLOCK>" in content: content = content.replace("<BLOCK><BLOCK>", "<BLOCK>") return content # Copied from tokenization_gpt_neox_japanese.SubWordJapaneseTokenizer.tokenize def tokenize(self, text, clean=False): text = text.replace(" ", "<SP>") text = text.replace(" ", "<SP>") text = text.replace("\r\n", "<BR>") text = text.replace("\n", "<BR>") text = text.replace("\r", "<BR>") text = text.replace("\t", "<TAB>") text = text.replace("—", "ー") text = text.replace("−", "ー") for k, v in self.emoji["emoji"].items(): if k in text: text = text.replace(k, v) if clean: text = self.clean_text(text) def check_simbol(x): e = x.encode() if len(x) == 1 and len(e) == 2: c = (int(e[0]) << 8) + int(e[1]) if ( (c >= 0xC2A1 and c <= 0xC2BF) or (c >= 0xC780 and c <= 0xC783) or (c >= 0xCAB9 and c <= 0xCBBF) or (c >= 0xCC80 and c <= 0xCDA2) ): return True return False def checku2e(x): e = x.encode() if len(x) == 1 and len(e) == 3: c = (int(e[0]) << 16) + (int(e[1]) << 8) + int(e[2]) if c >= 0xE28080 and c <= 0xE2B07F: return True return False pos = 0 result = [] while pos < len(text): end = min(len(text), pos + self.maxlen + 1) if text[pos] == "<" else pos + 3 candidates = [] # (token_id, token, pos) for e in range(end, pos, -1): wd = text[pos:e] if wd in self.vocab: if wd[0] == "<" and len(wd) > 2: candidates = [(self.vocab[wd], wd, e)] break else: candidates.append((self.vocab[wd], wd, e)) if len(candidates) > 0: # the smallest token_id is adopted _, wd, e = sorted(candidates, key=lambda x: x[0])[0] result.append(wd) pos = e else: end = pos + 1 wd = text[pos:end] if check_simbol(wd): result.append("<KIGOU>") elif checku2e(wd): result.append("<U2000U2BFF>") else: for i in wd.encode("utf-8"): result.append("<|byte%d|>" % i) pos = end return result def convert_id_to_token(self, index): return self.ids_to_tokens[index][0]
0
hf_public_repos/transformers/src/transformers/models
hf_public_repos/transformers/src/transformers/models/gptsan_japanese/modeling_gptsan_japanese.py
# coding=utf-8 # Copyright 2023 Toshiyuki Sakamoto(tanreinama) and HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ PyTorch GPTSANJapanese model.""" import copy from typing import List, Optional, Tuple, Union import torch import torch.nn as nn from ...activations import ACT2FN from ...modeling_outputs import MoECausalLMOutputWithPast, MoEModelOutputWithPastAndCrossAttentions from ...modeling_utils import PreTrainedModel from ...utils import ( DUMMY_INPUTS, DUMMY_MASK, add_start_docstrings, add_start_docstrings_to_model_forward, is_torch_fx_proxy, logging, ) from .configuration_gptsan_japanese import GPTSanJapaneseConfig logger = logging.get_logger(__name__) _CONFIG_FOR_DOC = "GPTSanJapaneseConfig" _CHECKPOINT_FOR_DOC = "Tanrei/GPTSAN-japanese" #################################################### # This dict contains ids and associated url # for the pretrained weights provided with the models #################################################### GPTSAN_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST = [ "Tanrei/GPTSAN-japanese", # See all GPTSAN-japanese models at https://huggingface.co/models?filter=gptsan-japanese ] # Copied from transformers.models.switch_transformers.modeling_switch_transformers.router_z_loss_func def router_z_loss_func(router_logits: torch.Tensor) -> float: r""" Compute the router z-loss implemented in PyTorch. The router z-loss was introduced in [Designing Effective Sparse Expert Models](https://arxiv.org/abs/2202.08906). It encourages router logits to remain small in an effort to improve stability. Args: router_logits (`float`): Input logits of shape [batch_size, sequence_length, num_experts] Returns: Scalar router z-loss. """ num_groups, tokens_per_group, _ = router_logits.shape log_z = torch.logsumexp(router_logits, dim=-1) z_loss = log_z**2 return torch.sum(z_loss) / (num_groups * tokens_per_group) # Copied from transformers.models.switch_transformers.modeling_switch_transformers.load_balancing_loss_func def load_balancing_loss_func(router_probs: torch.Tensor, expert_indices: torch.Tensor) -> float: r""" Computes auxiliary load balancing loss as in Switch Transformer - implemented in Pytorch. See Switch Transformer (https://arxiv.org/abs/2101.03961) for more details. This function implements the loss function presented in equations (4) - (6) of the paper. It aims at penalizing cases where the routing between experts is too unbalanced. Args: router_probs (`torch.Tensor`): Probability assigned to each expert per token. Shape: [batch_size, seqeunce_length, num_experts]. expert_indices (`torch.Tensor`): Indices tensor of shape [batch_size, seqeunce_length] identifying the selected expert for a given token. Returns: The auxiliary loss. """ num_experts = router_probs.shape[-1] # cast the expert indices to int64, otherwise one-hot encoding will fail if expert_indices.dtype != torch.int64: expert_indices = expert_indices.to(torch.int64) if len(expert_indices.shape) == 2: expert_indices = expert_indices.unsqueeze(2) expert_mask = torch.nn.functional.one_hot(expert_indices, num_experts) # For a given token, determine if it was routed to a given expert. expert_mask = torch.max(expert_mask, axis=-2).values # cast to float32 otherwise mean will fail expert_mask = expert_mask.to(torch.float32) tokens_per_group_and_expert = torch.mean(expert_mask, axis=-2) router_prob_per_group_and_expert = torch.mean(router_probs, axis=-2) return torch.mean(tokens_per_group_and_expert * router_prob_per_group_and_expert) * (num_experts**2) class GPTSanJapaneseDenseActDense(nn.Module): """ FFN Layer for Switch Transformer and Extra layers GPTSAN can mix Switch Transformer layers and normal Transformer layers This class is used as Expert in Switch Transformer layers and as FFN in regular Transformer layers. RELU is used in the Switch Transformer layer, and Swish is used in the normal Transformer layer, so there is a choice of which is used in the argument. """ def __init__(self, config: GPTSanJapaneseConfig, ext_layer=False): super().__init__() d_inter = config.d_ext if ext_layer else config.d_ff self.wi = nn.Linear(config.d_model, d_inter, bias=ext_layer) self.wo = nn.Linear(d_inter, config.d_model, bias=ext_layer) self.dropout = nn.Identity() if ext_layer else nn.Dropout(config.dropout_rate) self.act = ACT2FN["swish" if ext_layer else "relu"] def forward(self, hidden_states): r""" Args: hidden_states (`torch.Tensor`) : [num_groups, tokens_per_group, hidden_dim] inputs to send to experts. Returns: torch.Tensor[num_groups, tokens_per_group, hidden_dim] """ hidden_states = self.wi(hidden_states) hidden_states = self.act(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.wo(hidden_states) return hidden_states # Copied from transformers.models.switch_transformers.modeling_switch_transformers.SwitchTransformersTop1Router with SwitchTransformers->GPTSanJapanese class GPTSanJapaneseTop1Router(nn.Module): """ Router using tokens choose top-1 experts assignment. This router uses the same mechanism as in Switch Transformer (https://arxiv.org/abs/2101.03961) and V-MoE (https://arxiv.org/abs/2106.05974): tokens choose their top experts. Items are sorted by router_probs and then routed to their choice of expert until the expert's expert_capacity is reached. **There is no guarantee that each token is processed by an expert**, or that each expert receives at least one token. """ def __init__(self, config: GPTSanJapaneseConfig): super().__init__() self.num_experts = config.num_experts self.expert_capacity = config.expert_capacity self.classifier = nn.Linear(config.hidden_size, self.num_experts, bias=config.router_bias) self.jitter_noise = config.router_jitter_noise self.ignore_padding_tokens = config.router_ignore_padding_tokens self.dtype = getattr(torch, config.router_dtype) def _compute_router_probabilities(self, hidden_states: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: r""" Computes router probabilities from input hidden states. Args: hidden_states (`torch.Tensor`): (batch_size, sequence_length, hidden_dim) from which router probabilities are computed. Returns: router_probabilities (`torch.Tensor`): Tensor of shape (batch_size, sequence_length, num_experts) corresponding to the probabilities for each token and expert. Used for routing tokens to experts. router_logits (`torch.Tensor`): Logits tensor of shape (batch_size, sequence_length, num_experts) corresponding to raw router logits. This is used later for computing router z-loss. """ # float32 is used to ensure stability. See the discussion of "selective precision" in # https://arxiv.org/abs/2101.03961. # We also store the previous dtype to cast back the output to the previous dtype self.input_dtype = hidden_states.dtype hidden_states = hidden_states.to(self.dtype) if self.training and self.jitter_noise > 0: # Multiply the token inputs by the uniform distribution - adding some noise hidden_states *= torch.empty_like(hidden_states).uniform_(1.0 - self.jitter_noise, 1.0 + self.jitter_noise) # Shape: [num_groups, tokens_per_group, num_experts] self._cast_classifier() router_logits = self.classifier(hidden_states) # Apply Softmax and cast back to the original `dtype` router_probabilities = nn.functional.softmax(router_logits, dim=-1, dtype=self.dtype).to(self.input_dtype) return router_probabilities, router_logits def _cast_classifier(self): r""" `bitsandbytes` `Linear8bitLt` layers does not support manual casting Therefore we need to check if they are an instance of the `Linear8bitLt` class by checking special attributes. """ if not (hasattr(self.classifier, "SCB") or hasattr(self.classifier, "CB")): self.classifier = self.classifier.to(self.dtype) def forward(self, hidden_states: torch.Tensor) -> Tuple: r""" Generic forward function for every Router class. Each Router expects to have the same input hidden states (`hidden_states`) corresponding to the hidden states for each token, the `expert_capacity` corresponding to the number of tokens the Router will send to each expert, some Routers can send up to few tokens to each expert. Each Router works as the following: it expects the hidden states for each token, gets the `router_probs` and `router_logits` from the `router_weights`. This will assign for each token, the raw probability to be assigned to an expert. Then each Router class will have to define its own `_compute_routing_instructions`. Args: hidden_states (`torch.Tensor`) : [num_groups, tokens_per_group, hidden_dim] inputs to send to experts. Returns: Tuple[`torch.Tensor`, `torch.Tensor`, `torch.Tensor`] Tuple containing the expert index, the router probs and the router logits. The router probabilities and logits are required to compute the loss. """ router_probs, router_logits = self._compute_router_probabilities(hidden_states) expert_index = torch.argmax(router_probs, dim=-1) expert_index = torch.nn.functional.one_hot(expert_index, num_classes=self.num_experts) # Mask tokens outside expert capacity. Sum over each sequence token_priority = torch.cumsum(expert_index, dim=-2) # mask if the token routed to to the expert will overflow expert_capacity_mask = token_priority <= self.expert_capacity expert_index = expert_index * expert_capacity_mask router_probs = torch.max(router_probs, dim=-1).values.unsqueeze(-1) return expert_index, router_probs, router_logits # Copied from transformers.models.switch_transformers.modeling_switch_transformers.SwitchTransformersSparseMLP with SwitchTransformers->GPTSanJapanese class GPTSanJapaneseSparseMLP(nn.Module): r""" Implementation of the Switch Transformers Sparse MLP module. """ def __init__(self, config: GPTSanJapaneseConfig, expert_class: nn.Module = GPTSanJapaneseDenseActDense): super().__init__() # Step 1: Get the correct router according to its class self.router = GPTSanJapaneseTop1Router(config) # Step 2: Get the experts self.experts = nn.ModuleDict() for idx in range(config.num_experts): self.experts[f"expert_{idx}"] = expert_class(config) def forward(self, hidden_states): r""" Hold on, this will be slightly tricky to understand In the correct order, a MoE layer does the following: 1- Gets the `router_mask` from the router. The shape of the mask is `(batch_size, sequence_length, num_expert)` and corresponds to the argmax of the `router_probs`. The probabilities are needed in the computation of the hidden states : they are broadcasted to the hidden states values (can be interpreted as a scaling factor). 2- Dispatch the tokens to its associated experts. We do a classic for loop over the experts and assign for each expert the corresponding hidden states. """ # Step 1: Get the router_mask from the router as wel as the probabilities router_mask, router_probs, router_logits = self.router(hidden_states) expert_index = torch.argmax(router_mask, dim=-1) # The routers introduced might not always map all the tokens, to a router, which means that some hidden states # can be unchanged from one layer to another. That is why the hidden states are cloned before updating only the seleced ones. next_states = hidden_states.clone() for idx, expert in enumerate(self.experts.values()): token_indices = router_mask[:, :, idx].bool() next_states[token_indices] = expert(hidden_states[token_indices]).to(next_states.dtype) hidden_states = router_probs * next_states return hidden_states, (router_logits, expert_index) class GPTSanJapaneseLayerSparseFF(nn.Module): r""" Switch Transformers Feed Forward layer module. This is a wrapper around the Mixture of Experts module. Parameters: config : ([`GPTSanJapaneseConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ def __init__(self, config: GPTSanJapaneseConfig): super().__init__() self.mlp = GPTSanJapaneseSparseMLP(config) self.soft_bypass_mlp = nn.Linear(config.d_model, config.d_model, bias=False) self.norm = nn.LayerNorm(config.d_model, eps=config.layer_norm_epsilon) def forward(self, hidden_states, output_router_logits): r""" Args: hidden_states (`torch.Tensor`) : [num_groups, tokens_per_group, hidden_dim] inputs to send to experts. output_router_logits (`bool`) : output experts router output. Returns: torch.Tensor[num_groups, tokens_per_group, hidden_dim] """ forwarded_states, router_tuple = self.mlp(hidden_states) forwarded_states += torch.tanh(self.soft_bypass_mlp(hidden_states)) output = hidden_states + self.norm(forwarded_states) if output_router_logits and router_tuple is not None: return output, router_tuple else: return output class GPTSanJapaneseLayerDenseFF(nn.Module): r""" Extra Transformers Feed Forward layer module. Parameters: config : ([`GPTSanJapaneseConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ def __init__(self, config: GPTSanJapaneseConfig): super().__init__() # Check if it is a sparse layer, if not then it is a dense layer self.mlp = GPTSanJapaneseDenseActDense(config, ext_layer=True) self.norm = nn.LayerNorm(config.d_model, eps=config.layer_norm_epsilon) def forward(self, hidden_states): r""" Args: hidden_states (`torch.Tensor`) : [num_groups, tokens_per_group, hidden_dim] inputs to send to experts. Returns: torch.Tensor[num_groups, tokens_per_group, hidden_dim] """ forwarded_states = self.mlp(hidden_states) output = hidden_states + self.norm(forwarded_states) return output # Copied from transformers.models.bart.modeling_bart.BartAttention with Bart->GPTSanJapanese class GPTSanJapaneseAttention(nn.Module): """Multi-headed attention from 'Attention Is All You Need' paper""" def __init__( self, embed_dim: int, num_heads: int, dropout: float = 0.0, is_decoder: bool = False, bias: bool = True, is_causal: bool = False, config: Optional[GPTSanJapaneseConfig] = None, ): super().__init__() self.embed_dim = embed_dim self.num_heads = num_heads self.dropout = dropout self.head_dim = embed_dim // num_heads self.config = config if (self.head_dim * num_heads) != self.embed_dim: raise ValueError( f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}" f" and `num_heads`: {num_heads})." ) self.scaling = self.head_dim**-0.5 self.is_decoder = is_decoder self.is_causal = is_causal self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias) def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int): return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous() def forward( self, hidden_states: torch.Tensor, key_value_states: Optional[torch.Tensor] = None, past_key_value: Optional[Tuple[torch.Tensor]] = None, attention_mask: Optional[torch.Tensor] = None, layer_head_mask: Optional[torch.Tensor] = None, output_attentions: bool = False, ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: """Input shape: Batch x Time x Channel""" # if key_value_states are provided this layer is used as a cross-attention layer # for the decoder is_cross_attention = key_value_states is not None bsz, tgt_len, _ = hidden_states.size() # get query proj query_states = self.q_proj(hidden_states) * self.scaling # get key, value proj # `past_key_value[0].shape[2] == key_value_states.shape[1]` # is checking that the `sequence_length` of the `past_key_value` is the same as # the provided `key_value_states` to support prefix tuning if ( is_cross_attention and past_key_value is not None and past_key_value[0].shape[2] == key_value_states.shape[1] ): # reuse k,v, cross_attentions key_states = past_key_value[0] value_states = past_key_value[1] elif is_cross_attention: # cross_attentions key_states = self._shape(self.k_proj(key_value_states), -1, bsz) value_states = self._shape(self.v_proj(key_value_states), -1, bsz) elif past_key_value is not None: # reuse k, v, self_attention key_states = self._shape(self.k_proj(hidden_states), -1, bsz) value_states = self._shape(self.v_proj(hidden_states), -1, bsz) key_states = torch.cat([past_key_value[0], key_states], dim=2) value_states = torch.cat([past_key_value[1], value_states], dim=2) else: # self_attention key_states = self._shape(self.k_proj(hidden_states), -1, bsz) value_states = self._shape(self.v_proj(hidden_states), -1, bsz) if self.is_decoder: # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states. # Further calls to cross_attention layer can then reuse all cross-attention # key/value_states (first "if" case) # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of # all previous decoder key/value_states. Further calls to uni-directional self-attention # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case) # if encoder bi-directional self-attention `past_key_value` is always `None` past_key_value = (key_states, value_states) proj_shape = (bsz * self.num_heads, -1, self.head_dim) query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape) key_states = key_states.reshape(*proj_shape) value_states = value_states.reshape(*proj_shape) src_len = key_states.size(1) attn_weights = torch.bmm(query_states, key_states.transpose(1, 2)) if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len): raise ValueError( f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is" f" {attn_weights.size()}" ) if attention_mask is not None: if attention_mask.size() != (bsz, 1, tgt_len, src_len): raise ValueError( f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}" ) attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) attn_weights = nn.functional.softmax(attn_weights, dim=-1) if layer_head_mask is not None: if layer_head_mask.size() != (self.num_heads,): raise ValueError( f"Head mask for a single layer should be of size {(self.num_heads,)}, but is" f" {layer_head_mask.size()}" ) attn_weights = layer_head_mask.view(1, -1, 1, 1) * attn_weights.view(bsz, self.num_heads, tgt_len, src_len) attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) if output_attentions: # this operation is a bit awkward, but it's required to # make sure that attn_weights keeps its gradient. # In order to do so, attn_weights have to be reshaped # twice and have to be reused in the following attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len) else: attn_weights_reshaped = None attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training) attn_output = torch.bmm(attn_probs, value_states) if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim): raise ValueError( f"`attn_output` should be of size {(bsz * self.num_heads, tgt_len, self.head_dim)}, but is" f" {attn_output.size()}" ) attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim) attn_output = attn_output.transpose(1, 2) # Use the `embed_dim` from the config (stored in the class) rather than `hidden_state` because `attn_output` can be # partitioned across GPUs when using tensor-parallelism. attn_output = attn_output.reshape(bsz, tgt_len, self.embed_dim) attn_output = self.out_proj(attn_output) return attn_output, attn_weights_reshaped, past_key_value class GPTSanJapaneseLayerSelfAttention(nn.Module): """ Self Attention and Normalization Unit """ def __init__(self, config, has_relative_attention_bias=False): super().__init__() self.self_attn = GPTSanJapaneseAttention( embed_dim=config.d_model, num_heads=config.num_heads, is_decoder=True, bias=has_relative_attention_bias, ) self.norm = nn.LayerNorm(config.d_model, eps=config.layer_norm_epsilon) def forward( self, hidden_states: Optional[Tuple[torch.FloatTensor]], past_key_value: Optional[Tuple[torch.Tensor]] = None, attention_mask: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.FloatTensor] = None, use_cache: Optional[bool] = False, output_attentions: Optional[bool] = False, ) -> Tuple[Union[torch.Tensor, Tuple[torch.Tensor]], ...]: r""" Self-attention and normalize block. Args: hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if the model is configured as a decoder. past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. head_mask (`numpy.ndarray` of shape `({0})`, `optional): Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. Returns: Tuple[torch.Tensor[num_groups, tokens_per_group, hidden_dim],...] """ # Self Attention # decoder uni-directional self-attention cached key/values tuple is at positions 1,2 self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None # add present self-attn cache to positions 1,2 of present_key_value tuple atten_out = self.self_attn( hidden_states=hidden_states, past_key_value=self_attn_past_key_value, attention_mask=(1 - attention_mask) * torch.finfo(hidden_states.dtype).min, layer_head_mask=head_mask, output_attentions=output_attentions, ) if output_attentions: attn_weights = (atten_out[1],) else: attn_weights = () attention_output = atten_out[0] hidden = hidden_states + self.norm(attention_output) if use_cache: outputs = (hidden, atten_out[2]) # hidden, present, (attentions) else: outputs = (hidden,) # hidden, (attentions) return outputs + attn_weights class GPTSanJapaneseBlock(nn.Module): """ Self Attention and FFN Unit """ def __init__(self, config, ext_layer=False): super().__init__() self.self_attn = GPTSanJapaneseLayerSelfAttention(config) self.feed_forward = GPTSanJapaneseLayerDenseFF(config) if ext_layer else GPTSanJapaneseLayerSparseFF(config) def forward( self, hidden_states: Optional[Tuple[torch.FloatTensor]], past_key_value: Optional[Tuple[torch.Tensor]] = None, attention_mask: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.FloatTensor] = None, use_cache: Optional[bool] = False, output_attentions: Optional[bool] = False, output_router_tuple: Optional[bool] = False, ) -> Tuple[Union[torch.Tensor, Tuple[torch.Tensor]], ...]: r""" GPTSAN transformer block. Args: hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if the model is configured as a decoder. past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. head_mask (`numpy.ndarray` of shape `({0})`, `optional): Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). output_attentions (`bool`) : output attention probabirities. output_router_tuple: output experts router logits and expert id. Returns: Tuple[torch.Tensor[num_groups, tokens_per_group, hidden_dim],...] """ atten_out = self.self_attn( hidden_states=hidden_states, past_key_value=past_key_value, attention_mask=attention_mask, head_mask=head_mask, use_cache=use_cache, output_attentions=output_attentions, ) attention_output = atten_out[0] if isinstance(self.feed_forward, GPTSanJapaneseLayerSparseFF): sparse_out = self.feed_forward(attention_output, output_router_tuple) if output_router_tuple: hidden, router_tuple = sparse_out else: hidden = sparse_out else: hidden = self.feed_forward(attention_output) outputs = (hidden,) + atten_out[1:] if isinstance(self.feed_forward, GPTSanJapaneseLayerSparseFF) and output_router_tuple: outputs += (router_tuple,) return outputs class GPTSanJapanesePreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = GPTSanJapaneseConfig base_model_prefix = "gptsan_japanese" supports_gradient_checkpointing = False _no_split_modules = ["GPTSanJapaneseBlock"] _skip_keys_device_placement = "past_key_values" @property def dummy_inputs(self): input_ids = torch.tensor(DUMMY_INPUTS) input_mask = torch.tensor(DUMMY_MASK) dummy_inputs = { "input_ids": input_ids, "attention_mask": input_mask, } return dummy_inputs def _init_weights(self, module): """Initialize the weights""" factor = self.config.initializer_factor # Used for testing weights initialization if isinstance(module, nn.LayerNorm): module.weight.data.fill_(factor * 1.0) module.bias.data.zero_() elif isinstance(module, nn.Linear): module.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_model) ** -0.5)) if hasattr(module, "bias") and module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=factor * 1.0) elif isinstance(module, GPTSanJapaneseModel): # Mesh TensorFlow embeddings initialization # See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/layers.py#L1624 module.embed_tokens.weight.data.normal_(mean=0.0, std=factor * 1.0) module.position_embeddings.weight.data.normal_(mean=0.0, std=factor * 1.0) if hasattr(module, "extra_position_embeddings") and module.extra_position_embeddings is not None: module.extra_position_embeddings.weight.data.normal_(mean=0.0, std=factor * 1.0) elif isinstance(module, (GPTSanJapaneseModel, GPTSanJapaneseForConditionalGeneration)): # Mesh TensorFlow embeddings initialization # See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/layers.py#L1624 module.final_logits_bias.data.normal_(mean=0.0, std=factor * 1.0) if hasattr(module, "lm_head") and not self.config.tie_word_embeddings: module.lm_head.weight.data.normal_(mean=0.0, std=factor * 1.0) elif isinstance(module, GPTSanJapaneseDenseActDense): # Mesh TensorFlow FF initialization # See https://github.com/tensorflow/mesh/blob/master/mesh_tensorflow/transformer/transformer_layers.py#L56 # and https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/layers.py#L89 module.wi.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_model) ** -0.5)) if hasattr(module.wi, "bias") and module.wi.bias is not None: module.wi.bias.data.zero_() module.wo.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_ff) ** -0.5)) if hasattr(module.wo, "bias") and module.wo.bias is not None: module.wo.bias.data.zero_() elif isinstance(module, GPTSanJapaneseAttention): # Multi-headed attention d_model = self.config.d_model key_value_proj_dim = self.config.d_model n_heads = self.config.num_heads module.k_proj.weight.data.normal_(mean=0.0, std=factor * ((d_model * key_value_proj_dim) ** -0.5)) module.v_proj.weight.data.normal_(mean=0.0, std=factor * ((d_model * key_value_proj_dim) ** -0.5)) module.q_proj.weight.data.normal_(mean=0.0, std=factor * ((d_model * key_value_proj_dim) ** -0.5)) module.out_proj.weight.data.normal_(mean=0.0, std=factor * ((n_heads * key_value_proj_dim) ** -0.5)) elif isinstance(module, GPTSanJapaneseSparseMLP): # Mesh TensorFlow attention initialization to avoid scaling before softmax # See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/transformer/attention.py#L136 d_model = self.config.d_model key_value_proj_dim = self.config.d_model n_heads = self.config.num_heads module.router.classifier.weight.data.normal_(mean=0.0, std=factor * 1) for idx in range(self.config.num_experts): module.experts[f"expert_{idx}"].wi.weight.data.normal_(mean=0.0, std=factor * (d_model**-0.5)) module.experts[f"expert_{idx}"].wo.weight.data.normal_(mean=0.0, std=factor * (d_model**-0.5)) # Copied from transformers.models.t5.modeling_t5.T5PreTrainedModel._shift_right def _shift_right(self, input_ids): decoder_start_token_id = self.config.decoder_start_token_id pad_token_id = self.config.pad_token_id if decoder_start_token_id is None: raise ValueError( "self.model.config.decoder_start_token_id has to be defined. In T5 it is usually set to the pad_token_id. " "See T5 docs for more information." ) # shift inputs to the right if is_torch_fx_proxy(input_ids): # Item assignment is not supported natively for proxies. shifted_input_ids = torch.full(input_ids.shape[:-1] + (1,), decoder_start_token_id) shifted_input_ids = torch.cat([shifted_input_ids, input_ids[..., :-1]], dim=-1) else: shifted_input_ids = input_ids.new_zeros(input_ids.shape) shifted_input_ids[..., 1:] = input_ids[..., :-1].clone() shifted_input_ids[..., 0] = decoder_start_token_id if pad_token_id is None: raise ValueError("self.model.config.pad_token_id has to be defined.") # replace possible -100 values in labels by `pad_token_id` shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id) return shifted_input_ids GPTSAN_JAPANESE_START_DOCSTRING = r""" The [GPTSAN-japanese](https://github.com/tanreinama/GPTSAN) model was proposed in General-purpose Swich transformer based Japanese language model This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`GPTSanJapaneseConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ GPTSAN_JAPANESE_INPUTS_DOCSTRING = r""" Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. GPTSAN-japanese is a model that generates sentence continuations or predicts tokens at mask positions. Special tokens required for inputs to the model are automatically appended. attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) token_type_ids (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): An input that masks the Prefix part in the Prefix-LM input. Mask values selected in `[0, 1]`: - 1 for tokens that are **prefix** input, - 0 for tokens that are **not-prefix** input. spout (`torch.Tensor` of shape `(batch_size, config.d_spout)`): This vector is transformed through an 8-layer FFN and can be used instead of `past_key_values`. past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`: use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. decoder_inputs_embeds (`torch.FloatTensor` of shape `(batch_size, target_sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `decoder_input_ids` you can choose to directly pass an embedded representation. If `past_key_values` is used, optionally only the last `decoder_inputs_embeds` have to be input (see `past_key_values`). This is useful if you want more control over how to convert `decoder_input_ids` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. router_logits (`tuple(torch.FloatTensor)`, *optional*, returned when `output_router_logits=True` is passed or when `config.add_router_probs=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, sequence_length, num_experts)`. Router logits of the decoder model, useful to compute the auxiliary loss for Mixture of Experts models. """ @add_start_docstrings( "The bare GPTSAN-japanese Model transformer outputting raw hidden-states without any specific head on top.", GPTSAN_JAPANESE_START_DOCSTRING, ) class GPTSanJapaneseModel(GPTSanJapanesePreTrainedModel): def __init__(self, config: GPTSanJapaneseConfig): super().__init__(config) self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.d_model) self.config = copy.deepcopy(config) self.embed_tokens = nn.Embedding(config.vocab_size, config.d_model) self.last_project = nn.Linear(config.d_model, config.d_model, bias=True) self.act = ACT2FN["swish"] self.blocks = torch.nn.ModuleList([]) for _ in range(config.num_switch_layers): self.blocks.append(GPTSanJapaneseBlock(config)) for _ in range(config.num_ext_layers): self.blocks.append(GPTSanJapaneseBlock(config, ext_layer=True)) if config.num_ext_layers > 0: self.extra_position_embeddings = nn.Embedding(config.max_position_embeddings, config.d_model) if config.d_spout: spouts = [] for _ in range(8): spouts.append(nn.Linear(config.d_spout, config.d_spout, bias=False)) spouts.append(nn.Tanh()) spouts.append(nn.Linear(config.d_spout, config.num_layers * 2 * config.d_model, bias=False)) self.spout = nn.Sequential(*spouts) self.post_init() def get_input_embeddings(self): return self.embed_tokens def set_input_embeddings(self, new_embeddings): self.embed_tokens = new_embeddings @add_start_docstrings_to_model_forward(GPTSAN_JAPANESE_INPUTS_DOCSTRING) def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, token_type_ids: Optional[torch.FloatTensor] = None, spout: Optional[torch.FloatTensor] = None, past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, head_mask: Optional[torch.FloatTensor] = None, use_cache: Optional[bool] = False, inputs_embeds: Optional[torch.FloatTensor] = None, decoder_inputs_embeds: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, output_router_logits: Optional[bool] = None, num_precontext: Optional[torch.LongTensor] = None, ) -> Union[MoEModelOutputWithPastAndCrossAttentions, Tuple[torch.FloatTensor]]: r""" num_precontext (`torch.LongTensor` of shape `(batch_size,1)`): length of `hybrid` input tokens in the input. Tokens up to this length refer to both front and back like BERT, tokens after that refer only to front like GPT. see also: https://github.com/tanreinama/GPTSAN/blob/main/report/model.md Returns: `MoEModelOutputWithPastAndCrossAttentions` or `tuple` if `return_dict` returns MoEModelOutputWithPastAndCrossAttentions insted of tuple """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict device = self.position_embeddings.weight.device if input_ids is None: input_ids = torch.zeros([1, 1]).int().to(device) # dummy for input_ids was None num_pasts_contexts = 0 num_batch = input_ids.shape[0] pasts_or_spout_value = None if past_key_values is not None: num_pasts_contexts = past_key_values[0][0].shape[2] elif self.config.d_spout and spout is not None: # `spout` is a special input vector specific to GPTSAN # This controls the output by projecting embedded information such as the class of sentences during learning. # It should passed instead of the first past_key_value. # See the original GPTSAN repository for details num_pasts_contexts += 1 # If there is an attention_mask, increase first one for spout if self.config.d_spout and spout is not None and attention_mask is not None: attention_mask_with_spout = torch.ones(num_batch, attention_mask.shape[1] + 1, device=device) attention_mask_with_spout[:, 1:] -= 1 - attention_mask # 1st token should be spout attention_mask = attention_mask_with_spout # update attention_mask if num_precontext is not None: # `num_precontext` is the number of tokens that refer to each other in prefix-lm # created per batch, so dimension of num_precontext should be [batch, 1] if not ( len(num_precontext.shape) == 2 and num_precontext.shape[1] == 1 ): # num_precontext Should be [batch,1] raise ValueError("num_precontext should be [batch, 1] size.") num_precontext = torch.reshape(num_precontext, [-1]) else: num_precontext = torch.zeros([num_batch]).int().to(device) num_input_contexts = input_ids.shape[1] num_output_contexts = num_input_contexts + num_pasts_contexts hidden_states = self.embed_tokens(input_ids) if past_key_values is not None: pasts_or_spout_value = past_key_values elif self.config.d_spout and spout is not None: # Make vector from `spout` of GPTSAN to the same shape as past_key_values pasts_or_spout_value = self.spout(spout) # projecting `spout` vector pasts_or_spout_value = torch.reshape( pasts_or_spout_value, [ num_batch, self.config.num_layers, 2, self.config.num_heads, num_pasts_contexts, self.config.d_model // self.config.num_heads, ], ) pasts_or_spout_value = torch.split(pasts_or_spout_value, [1] * self.config.num_layers, dim=1) # make same shape as past_key_values pasts_or_spout_value = tuple( tuple([b.squeeze(1) for b in torch.split(a.squeeze(1), [1, 1], dim=1)]) for a in pasts_or_spout_value ) else: pasts_or_spout_value = [None] * self.config.num_layers # Token position considering spout and pasts token_position = torch.arange(num_input_contexts).to(device) + num_pasts_contexts if attention_mask is None: attention_mask = torch.ones(num_batch, num_input_contexts, device=device) # positions for get position_embeddings gather_position = ( ( torch.zeros((num_batch, self.config.d_model, num_input_contexts)).to(device) + token_position.unsqueeze(0) ) .transpose(1, 2) .long() ) # When padding with padding_side="left", zeros line up on the left side of attention_mask, so position_embeddings is shifted accordingly gather_position -= (1 - attention_mask).argmin(dim=-1).unsqueeze(1).unsqueeze(2) gather_position = torch.clip(gather_position, num_pasts_contexts, self.config.max_position_embeddings - 1) # attention_mask is applied per batch for i in range(num_batch): hidden_states[i] += torch.gather(self.position_embeddings.weight, dim=0, index=gather_position[i]) # Create a mask to be used when making the prefix Input length of Prefix-LM variable causal_mask = ( torch.tril(torch.ones((num_output_contexts, num_output_contexts), dtype=torch.uint8)) .view(1, 1, num_output_contexts, num_output_contexts) .to(device) ) prefix_lm_mask = causal_mask[:, :, -num_input_contexts:, :] if token_type_ids is not None: token_type_ids = token_type_ids.unsqueeze(1).unsqueeze(2) prefix_lm_mask = ((prefix_lm_mask + token_type_ids) > 0).float() # Marge prefix_lm_mask and attention_mask extended_attention_mask = prefix_lm_mask * attention_mask.unsqueeze(1).unsqueeze(2) # Prepare head mask if needed if head_mask is not None: head_mask = self.get_head_mask( head_mask, self.config.num_switch_layers + self.config.num_ext_layers ) # n_layer x batch x n_heads x N x N # outputs present_key_value_states = () if self.config.use_cache or use_cache else None all_hidden_states = () if self.config.output_hidden_states or output_hidden_states else None all_attentions = () if self.config.output_attentions or output_attentions else None all_router_probs = () if self.config.output_router_logits or output_router_logits else None for layer, past in enumerate(pasts_or_spout_value): if layer == self.config.num_switch_layers: if self.config.num_ext_layers > 0: # extra_position_embeddings are extra position embeddings that are only created when extending the model with code from the original GPTSAN repository. Not used in the default model. # However, it is created when you create an additional layer and partially train only that location. # Therefore, convert_gptsan_tf_checkpoint_to_pytorch.py is used when converting and loading models created in the original GPTSAN repository. for i in range(num_batch): hidden_states[i] += torch.gather( self.extra_position_embeddings.weight, dim=0, index=gather_position[i] ) output_router_tuple = ( self.config.output_router_logits or output_router_logits ) and layer < self.config.num_switch_layers block_output = self.blocks[layer]( hidden_states=hidden_states, past_key_value=past, attention_mask=extended_attention_mask, head_mask=head_mask, use_cache=self.config.use_cache or use_cache, output_attentions=self.config.output_attentions or output_attentions, output_router_tuple=output_router_tuple, ) outpos = 0 hidden_states = block_output[outpos] if self.config.output_hidden_states or output_hidden_states: all_hidden_states += (hidden_states,) if self.config.use_cache or use_cache: outpos += 1 present = block_output[outpos] present_key_value_states += (present,) if self.config.output_attentions or output_attentions: outpos += 1 attention_probs = block_output[outpos] all_attentions += (attention_probs,) if output_router_tuple: outpos += 1 router_tuple = block_output[outpos] all_router_probs.append(router_tuple[0]) hidden_states = self.last_project(hidden_states) hidden_states = self.act(hidden_states) if self.config.output_hidden_states or output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple( v for v in [ hidden_states, present_key_value_states, all_hidden_states, all_attentions, all_router_probs, ] if v is not None ) return MoEModelOutputWithPastAndCrossAttentions( last_hidden_state=hidden_states, past_key_values=present_key_value_states, hidden_states=all_hidden_states, attentions=all_attentions, router_probs=all_router_probs, ) @add_start_docstrings( "The bare GPTSAN-japanese Model with a language modeling head.", GPTSAN_JAPANESE_START_DOCSTRING, ) class GPTSanJapaneseForConditionalGeneration(GPTSanJapanesePreTrainedModel): _tied_weights_keys = ["lm_head.weight"] def __init__(self, config: GPTSanJapaneseConfig): super().__init__(config) self.model = GPTSanJapaneseModel(config) self.register_buffer("final_logits_bias", torch.zeros([1, config.vocab_size])) self.lm_head = nn.Linear(config.d_model, config.vocab_size, bias=False) if not self.config.torchscript: self.lm_head.weight = self.model.embed_tokens.weight @add_start_docstrings_to_model_forward(GPTSAN_JAPANESE_INPUTS_DOCSTRING) def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, token_type_ids: Optional[torch.FloatTensor] = None, spout: Optional[torch.FloatTensor] = None, past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, head_mask: Optional[torch.FloatTensor] = None, use_cache: Optional[bool] = False, inputs_embeds: Optional[torch.FloatTensor] = None, decoder_inputs_embeds: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, output_router_logits: Optional[bool] = None, labels: Optional[torch.LongTensor] = None, ) -> Union[Tuple[torch.FloatTensor], MoECausalLMOutputWithPast]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification loss. Indices should be in `[-100, 0, ..., config.vocab_size - 1]`. All labels set to `-100` are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]` Returns: `MoECausalLMOutputWithPast` or `tuple` if `return_dict` returns MoECausalLMOutputWithPast insted of tuple Example: Text Generation with regular LM Model ```python >>> from transformers import AutoModel, AutoTokenizer, trainer_utils >>> device = "cuda" >>> model = AutoModel.from_pretrained("Tanrei/GPTSAN-japanese").to(device) >>> tokenizer = AutoTokenizer.from_pretrained("Tanrei/GPTSAN-japanese") >>> x_token = tokenizer("織田信長は、", return_tensors="pt") >>> trainer_utils.set_seed(30) >>> input_ids = x_token.input_ids.to(device) >>> gen_token = model.generate(input_ids, max_new_tokens=50) >>> tokenizer.decode(gen_token[0]) "織田信長は、政治・軍事の中枢まで掌握した政治家であり、日本史上類を見ない驚異的な軍事侵攻を続け..." ``` Text Generation with Prefix-LM Model ```python >>> from transformers import AutoModel, AutoTokenizer, trainer_utils >>> device = "cuda" >>> model = AutoModel.from_pretrained("Tanrei/GPTSAN-japanese").to(device) >>> tokenizer = AutoTokenizer.from_pretrained("Tanrei/GPTSAN-japanese") >>> x_token = tokenizer("", prefix_text="織田信長は、", return_tensors="pt") >>> trainer_utils.set_seed(30) >>> input_ids = x_token.input_ids.to(device) >>> token_type_ids = x_token.token_type_ids.to(device) >>> gen_token = model.generate(input_ids, token_type_ids=token_type_ids, max_new_tokens=50) >>> tokenizer.decode(gen_token[0]) "織田信長は、政治・外交で数々の戦果を上げるが、1568年からは、いわゆる本能寺の変で細川晴元に暗殺される..." ``` Simultaneously Text Generation And Masked Language Model ```python >>> from transformers import AutoModel, AutoTokenizer, trainer_utils >>> device = "cuda" >>> model = AutoModel.from_pretrained("Tanrei/GPTSAN-japanese").to(device) >>> tokenizer = AutoTokenizer.from_pretrained("Tanrei/GPTSAN-japanese") >>> masked_sentence = "武田信玄は、<|inputmask|>時代ファンならぜひ押さえ<|inputmask|>きたい名将の一人。" >>> x_token = tokenizer("", prefix_text=masked_sentence, return_tensors="pt") >>> trainer_utils.set_seed(30) >>> input_ids = x_token.input_ids.to(device) >>> token_type_ids = x_token.token_type_ids.to(device) >>> out_lm_token = model.generate(input_ids, token_type_ids=token_type_ids, max_new_tokens=50) >>> out_mlm_token = model(input_ids, token_type_ids=token_type_ids).logits.argmax(axis=-1) >>> tokenizer.decode(out_mlm_token[0]) "武田信玄は、戦国時代ファンならぜひ押さえておきたい名将の一人。" >>> tokenizer.decode(out_lm_token[0][input_ids.shape[1] :]) "武田氏の三代に渡った武田家のひとり\n甲斐市に住む、日本史上最大の戦国大名。..." ```""" SEG_TOKEN = self.config.separator_token_id use_cache = use_cache or self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict model_return_dict = True num_precontext = None if input_ids is not None: num_batch = input_ids.shape[0] num_precontext = torch.zeros([num_batch]).int().to(input_ids.device) where_separators = torch.where(input_ids == SEG_TOKEN) num_precontext[where_separators[0]] += where_separators[1] num_precontext = num_precontext.unsqueeze(1) outputs = self.model( input_ids, attention_mask, token_type_ids, spout, past_key_values, head_mask, use_cache, inputs_embeds, decoder_inputs_embeds, output_attentions, output_hidden_states, model_return_dict, output_router_logits, num_precontext, ) lm_logits = self.lm_head(outputs[0]) if lm_logits.shape[-1] == self.final_logits_bias.shape[-1]: lm_logits = lm_logits + self.final_logits_bias loss = None z_loss = None router_probs = None aux_loss = None if labels is not None: # move labels to correct device to enable model parallelism labels = labels.to(lm_logits.device) loss_fct = nn.CrossEntropyLoss(ignore_index=-100) if output_router_logits: # Compute the router loss (z_loss + auxiliary loss) for each router in the encoder and decoder router_logits, expert_indexes = self._unpack_router_logits(outputs.router_probs) z_loss = router_z_loss_func(router_logits) router_probs = nn.Softmax(dim=-1)(router_logits) aux_loss = load_balancing_loss_func(router_probs, expert_indexes) loss = loss_fct(lm_logits.view(-1, lm_logits.size(-1)), labels.view(-1)) if not return_dict: return tuple( v for v in [ loss, lm_logits, outputs.past_key_values, outputs.hidden_states, outputs.router_probs, z_loss, aux_loss, ] if v is not None ) return MoECausalLMOutputWithPast( loss=loss, logits=lm_logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, router_logits=outputs.router_probs, z_loss=z_loss, aux_loss=aux_loss, ) def prepare_inputs_for_generation( self, input_ids: torch.LongTensor, attention_mask: torch.FloatTensor, token_type_ids: Optional[torch.FloatTensor] = None, spout: Optional[Union[List, torch.FloatTensor]] = None, past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, **kwargs, ): if isinstance(spout, list): spout = torch.tensor(spout).float() if input_ids is not None: spout = spout.to(input_ids.device) if past_key_values is not None: return { "input_ids": input_ids[:, -1:] if input_ids is not None else None, "attention_mask": attention_mask, "token_type_ids": token_type_ids[:, -1:] if token_type_ids is not None else None, "spout": spout, "past_key_values": past_key_values, } return { "input_ids": input_ids, "attention_mask": attention_mask, "token_type_ids": token_type_ids, "spout": spout, "past_key_values": None, } # Copied from transformers.models.switch_transformers.modeling_switch_transformers.SwitchTransformersForConditionalGeneration.prepare_decoder_input_ids_from_labels with SwitchTransformers->GPTSanJapanese def prepare_decoder_input_ids_from_labels(self, labels: torch.Tensor): return self._shift_right(labels) # Copied from transformers.models.mbart.modeling_mbart.MBartForConditionalGeneration.resize_token_embeddings with MBart->GPTSanJapanese def resize_token_embeddings(self, new_num_tokens: int, pad_to_multiple_of: Optional[int] = None) -> nn.Embedding: new_embeddings = super().resize_token_embeddings(new_num_tokens, pad_to_multiple_of) self._resize_final_logits_bias(new_embeddings.weight.shape[0]) return new_embeddings # Copied from transformers.models.mbart.modeling_mbart.MBartForConditionalGeneration._resize_final_logits_bias with MBart->GPTSanJapanese def _resize_final_logits_bias(self, new_num_tokens: int) -> None: old_num_tokens = self.final_logits_bias.shape[-1] if new_num_tokens <= old_num_tokens: new_bias = self.final_logits_bias[:, :new_num_tokens] else: extra_bias = torch.zeros((1, new_num_tokens - old_num_tokens), device=self.final_logits_bias.device) new_bias = torch.cat([self.final_logits_bias, extra_bias], dim=1) self.register_buffer("final_logits_bias", new_bias) def get_input_embeddings(self): return self.model.get_input_embeddings() def set_input_embeddings(self, new_embeddings): self.model.set_input_embeddings(new_embeddings) # Copied from transformers.models.switch_transformers.modeling_switch_transformers.SwitchTransformersForConditionalGeneration.set_output_embeddings with SwitchTransformers->GPTSanJapanese def set_output_embeddings(self, new_embeddings): self.lm_head = new_embeddings # Copied from transformers.models.switch_transformers.modeling_switch_transformers.SwitchTransformersForConditionalGeneration.get_output_embeddings with SwitchTransformers->GPTSanJapanese def get_output_embeddings(self): return self.lm_head # Copied from transformers.models.switch_transformers.modeling_switch_transformers.SwitchTransformersForConditionalGeneration._unpack_router_logits with SwitchTransformers->GPTSanJapanese def _unpack_router_logits(self, router_outputs): total_router_logits = [] total_expert_indexes = [] for router_output in router_outputs: if len(router_output[0].shape) > 1: router_logits, expert_indexes = router_output total_router_logits.append(router_logits) total_expert_indexes.append(expert_indexes) return torch.cat(total_router_logits, dim=1), torch.cat(total_expert_indexes, dim=1)
0
hf_public_repos/transformers/src/transformers/models
hf_public_repos/transformers/src/transformers/models/gptsan_japanese/__init__.py
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) _import_structure = { "configuration_gptsan_japanese": ["GPTSAN_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTSanJapaneseConfig"], "tokenization_gptsan_japanese": ["GPTSanJapaneseTokenizer"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["modeling_gptsan_japanese"] = [ "GPTSAN_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST", "GPTSanJapaneseForConditionalGeneration", "GPTSanJapaneseModel", "GPTSanJapanesePreTrainedModel", ] _import_structure["tokenization_gptsan_japanese"] = [ "GPTSanJapaneseTokenizer", ] if TYPE_CHECKING: from .configuration_gptsan_japanese import GPTSAN_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTSanJapaneseConfig from .tokenization_gptsan_japanese import GPTSanJapaneseTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_gptsan_japanese import ( GPTSAN_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST, GPTSanJapaneseForConditionalGeneration, GPTSanJapaneseModel, GPTSanJapanesePreTrainedModel, ) from .tokenization_gptsan_japanese import GPTSanJapaneseTokenizer else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
0
hf_public_repos/transformers/src/transformers/models
hf_public_repos/transformers/src/transformers/models/gptsan_japanese/configuration_gptsan_japanese.py
# coding=utf-8 # Copyright 2023, HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ GPTSAN-japanese model configuration""" from ...configuration_utils import PretrainedConfig from ...utils import logging logger = logging.get_logger(__name__) GPTSAN_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP = { "tanreinama/GPTSAN-2.8B-spout_is_uniform": ( "https://huggingface.co/tanreinama/GPTSAN-2.8B-spout_is_uniform/resolve/main/config.json" ), } class GPTSanJapaneseConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`GPTSanJapaneseModel`]. It is used to instantiate a GPTSANJapanese model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the GPTSANJapanese [Tanrei/GPTSAN-japanese](https://huggingface.co/Tanrei/GPTSAN-japanese) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Arguments: vocab_size (`int`, *optional*, defaults to 36000): Vocabulary size of the GPTSANJapanese model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`GPTSanJapaneseModel`]. max_position_embeddings (`int`, *optional*, defaults to 1280): The maximum sequence length that this model might ever be used with. Defaults set this to 1280. d_model (`int`, *optional*, defaults to 1024): Size of the encoder layers and the pooler layer. d_ff (`int`, *optional*, defaults to 8192): Size of the intermediate feed forward layer in each `SwitchTransformersBlock`. d_ext (`int`, *optional*, defaults to 4096): Size of the intermediate feed forward layer in each Extra-layers. d_spout (`int`, *optional*, defaults to 128): Size of the `spout` vector. num_switch_layers (`int`, *optional*, defaults to 10): Number of layers in the Switch Transformer layer. num_ext_layers (`int`, *optional*, defaults to 0): Number of layers in the Extra-layers. num_heads (`int`, *optional*, defaults to 16): Number of attention heads for each attention layer in the Transformer encoder. num_experts (`int`, *optional*, defaults to 16): Number of experts for each SwitchTransformer layer. expert_capacity (`int`, *optional*, defaults to 128): Number of tokens that can be stored in each expert. If set to 1, the model will behave like a regular Transformer. dropout_rate (`float`, *optional*, defaults to 0.0): The ratio for all dropout layers. layer_norm_eps (`float`, *optional*, defaults to 1e-5): The epsilon used by the layer normalization layers. router_bias (`bool`, *optional*, defaults to `False`): Whether to add a bias to the router. router_jitter_noise (`float`, *optional*, defaults to 0.0): Amount of noise to add to the router. Set it to 0.0 during prediction or set small value (usually 1e-2) during training. router_dtype (`str`, *optional*, default to `"float32"`): The `dtype` used for the routers. It is preferable to keep the `dtype` to `"float32"` as specified in the *selective precision* discussion in [the paper](https://arxiv.org/abs/2101.03961). router_ignore_padding_tokens (`bool`, *optional*, defaults to `False`): Whether to ignore padding tokens when routing. output_hidden_states (`bool`, *optional*, default to `False`): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. output_attentions (`bool`, *optional*, defaults to `False`): Whether or not to return the attentions tensors of all attention layers. initializer_factor (`float`, *optional*, defaults to 0.002): A factor for initializing all weight matrices. output_router_logits (`bool`, *optional*, default to `False`): Whether or not to return the router logits of all experts. use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models) """ model_type = "gptsan-japanese" keys_to_ignore_at_inference = [ "past_key_values", ] attribute_map = { "hidden_size": "d_model", "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers", } def __init__( self, vocab_size=36000, max_position_embeddings=1280, d_model=1024, d_ff=8192, d_ext=4096, d_spout=128, num_switch_layers=10, num_ext_layers=0, num_heads=16, num_experts=16, expert_capacity=128, dropout_rate=0.0, layer_norm_epsilon=1e-5, router_bias=False, router_jitter_noise=0.0, router_dtype="float32", router_ignore_padding_tokens=False, output_hidden_states=False, output_attentions=False, initializer_factor=0.002, output_router_logits=False, use_cache=True, separator_token_id=35998, pad_token_id=35995, eos_token_id=35999, **kwargs, ): self.vocab_size = vocab_size self.max_position_embeddings = max_position_embeddings self.d_model = d_model self.d_ff = d_ff self.d_ext = d_ext self.d_spout = d_spout self.num_switch_layers = num_switch_layers self.num_ext_layers = num_ext_layers self.num_layers = num_switch_layers + num_ext_layers self.num_heads = num_heads self.num_experts = num_experts self.expert_capacity = expert_capacity self.dropout_rate = dropout_rate self.layer_norm_epsilon = layer_norm_epsilon self.router_bias = router_bias self.router_jitter_noise = router_jitter_noise self.router_dtype = router_dtype self.router_ignore_padding_tokens = router_ignore_padding_tokens self.output_hidden_states = output_hidden_states self.output_attentions = output_attentions self.initializer_factor = initializer_factor self.output_router_logits = output_router_logits self.use_cache = use_cache super().__init__( separator_token_id=separator_token_id, pad_token_id=pad_token_id, eos_token_id=eos_token_id, **kwargs, )
0
hf_public_repos/transformers/src/transformers/models
hf_public_repos/transformers/src/transformers/models/dialogpt/convert_dialogpt_original_pytorch_checkpoint_to_pytorch.py
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os import torch from transformers.utils import WEIGHTS_NAME DIALOGPT_MODELS = ["small", "medium", "large"] OLD_KEY = "lm_head.decoder.weight" NEW_KEY = "lm_head.weight" def convert_dialogpt_checkpoint(checkpoint_path: str, pytorch_dump_folder_path: str): d = torch.load(checkpoint_path) d[NEW_KEY] = d.pop(OLD_KEY) os.makedirs(pytorch_dump_folder_path, exist_ok=True) torch.save(d, os.path.join(pytorch_dump_folder_path, WEIGHTS_NAME)) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--dialogpt_path", default=".", type=str) args = parser.parse_args() for MODEL in DIALOGPT_MODELS: checkpoint_path = os.path.join(args.dialogpt_path, f"{MODEL}_ft.pkl") pytorch_dump_folder_path = f"./DialoGPT-{MODEL}" convert_dialogpt_checkpoint( checkpoint_path, pytorch_dump_folder_path, )
0
hf_public_repos/transformers/src/transformers/models
hf_public_repos/transformers/src/transformers/models/upernet/convert_swin_upernet_to_pytorch.py
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Convert Swin Transformer + UperNet checkpoints from mmsegmentation. URL: https://github.com/open-mmlab/mmsegmentation/tree/master/configs/swin """ import argparse import json import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import SegformerImageProcessor, SwinConfig, UperNetConfig, UperNetForSemanticSegmentation def get_upernet_config(model_name): auxiliary_in_channels = 384 window_size = 7 if "tiny" in model_name: embed_dim = 96 depths = (2, 2, 6, 2) num_heads = (3, 6, 12, 24) elif "small" in model_name: embed_dim = 96 depths = (2, 2, 18, 2) num_heads = (3, 6, 12, 24) elif "base" in model_name: embed_dim = 128 depths = (2, 2, 18, 2) num_heads = (4, 8, 16, 32) window_size = 12 auxiliary_in_channels = 512 elif "large" in model_name: embed_dim = 192 depths = (2, 2, 18, 2) num_heads = (6, 12, 24, 48) window_size = 12 auxiliary_in_channels = 768 # set label information num_labels = 150 repo_id = "huggingface/label-files" filename = "ade20k-id2label.json" id2label = json.load(open(hf_hub_download(repo_id, filename, repo_type="dataset"), "r")) id2label = {int(k): v for k, v in id2label.items()} label2id = {v: k for k, v in id2label.items()} backbone_config = SwinConfig( embed_dim=embed_dim, depths=depths, num_heads=num_heads, window_size=window_size, out_features=["stage1", "stage2", "stage3", "stage4"], ) config = UperNetConfig( backbone_config=backbone_config, auxiliary_in_channels=auxiliary_in_channels, num_labels=num_labels, id2label=id2label, label2id=label2id, ) return config # here we list all keys to be renamed (original name on the left, our name on the right) def create_rename_keys(config): rename_keys = [] # fmt: off # stem rename_keys.append(("backbone.patch_embed.projection.weight", "backbone.embeddings.patch_embeddings.projection.weight")) rename_keys.append(("backbone.patch_embed.projection.bias", "backbone.embeddings.patch_embeddings.projection.bias")) rename_keys.append(("backbone.patch_embed.norm.weight", "backbone.embeddings.norm.weight")) rename_keys.append(("backbone.patch_embed.norm.bias", "backbone.embeddings.norm.bias")) # stages for i in range(len(config.backbone_config.depths)): for j in range(config.backbone_config.depths[i]): rename_keys.append((f"backbone.stages.{i}.blocks.{j}.norm1.weight", f"backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.weight")) rename_keys.append((f"backbone.stages.{i}.blocks.{j}.norm1.bias", f"backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.bias")) rename_keys.append((f"backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_bias_table", f"backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table")) rename_keys.append((f"backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_index", f"backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index")) rename_keys.append((f"backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.weight", f"backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight")) rename_keys.append((f"backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.bias", f"backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias")) rename_keys.append((f"backbone.stages.{i}.blocks.{j}.norm2.weight", f"backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.weight")) rename_keys.append((f"backbone.stages.{i}.blocks.{j}.norm2.bias", f"backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.bias")) rename_keys.append((f"backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.weight", f"backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight")) rename_keys.append((f"backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.bias", f"backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias")) rename_keys.append((f"backbone.stages.{i}.blocks.{j}.ffn.layers.1.weight", f"backbone.encoder.layers.{i}.blocks.{j}.output.dense.weight")) rename_keys.append((f"backbone.stages.{i}.blocks.{j}.ffn.layers.1.bias", f"backbone.encoder.layers.{i}.blocks.{j}.output.dense.bias")) if i < 3: rename_keys.append((f"backbone.stages.{i}.downsample.reduction.weight", f"backbone.encoder.layers.{i}.downsample.reduction.weight")) rename_keys.append((f"backbone.stages.{i}.downsample.norm.weight", f"backbone.encoder.layers.{i}.downsample.norm.weight")) rename_keys.append((f"backbone.stages.{i}.downsample.norm.bias", f"backbone.encoder.layers.{i}.downsample.norm.bias")) rename_keys.append((f"backbone.norm{i}.weight", f"backbone.hidden_states_norms.stage{i+1}.weight")) rename_keys.append((f"backbone.norm{i}.bias", f"backbone.hidden_states_norms.stage{i+1}.bias")) # decode head rename_keys.extend( [ ("decode_head.conv_seg.weight", "decode_head.classifier.weight"), ("decode_head.conv_seg.bias", "decode_head.classifier.bias"), ("auxiliary_head.conv_seg.weight", "auxiliary_head.classifier.weight"), ("auxiliary_head.conv_seg.bias", "auxiliary_head.classifier.bias"), ] ) # fmt: on return rename_keys def rename_key(dct, old, new): val = dct.pop(old) dct[new] = val # we split up the matrix of each encoder layer into queries, keys and values def read_in_q_k_v(state_dict, backbone_config): num_features = [int(backbone_config.embed_dim * 2**i) for i in range(len(backbone_config.depths))] for i in range(len(backbone_config.depths)): dim = num_features[i] for j in range(backbone_config.depths[i]): # fmt: off # read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias) in_proj_weight = state_dict.pop(f"backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.weight") in_proj_bias = state_dict.pop(f"backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.bias") # next, add query, keys and values (in that order) to the state dict state_dict[f"backbone.encoder.layers.{i}.blocks.{j}.attention.self.query.weight"] = in_proj_weight[:dim, :] state_dict[f"backbone.encoder.layers.{i}.blocks.{j}.attention.self.query.bias"] = in_proj_bias[: dim] state_dict[f"backbone.encoder.layers.{i}.blocks.{j}.attention.self.key.weight"] = in_proj_weight[ dim : dim * 2, : ] state_dict[f"backbone.encoder.layers.{i}.blocks.{j}.attention.self.key.bias"] = in_proj_bias[ dim : dim * 2 ] state_dict[f"backbone.encoder.layers.{i}.blocks.{j}.attention.self.value.weight"] = in_proj_weight[ -dim :, : ] state_dict[f"backbone.encoder.layers.{i}.blocks.{j}.attention.self.value.bias"] = in_proj_bias[-dim :] # fmt: on def correct_unfold_reduction_order(x): out_channel, in_channel = x.shape x = x.reshape(out_channel, 4, in_channel // 4) x = x[:, [0, 2, 1, 3], :].transpose(1, 2).reshape(out_channel, in_channel) return x def reverse_correct_unfold_reduction_order(x): out_channel, in_channel = x.shape x = x.reshape(out_channel, in_channel // 4, 4) x = x[:, :, [0, 2, 1, 3]].transpose(1, 2).reshape(out_channel, in_channel) return x def correct_unfold_norm_order(x): in_channel = x.shape[0] x = x.reshape(4, in_channel // 4) x = x[[0, 2, 1, 3], :].transpose(0, 1).reshape(in_channel) return x # there was an incompatibility with this version, due to a new implementation of their downsampling operation using nn.Unfold. # was resolved as seen here: # https://github.com/open-mmlab/mmdetection/blob/31c84958f54287a8be2b99cbf87a6dcf12e57753/mmdet/models/utils/ckpt_convert.py#L96. def reverse_correct_unfold_norm_order(x): in_channel = x.shape[0] x = x.reshape(in_channel // 4, 4) x = x[:, [0, 2, 1, 3]].transpose(0, 1).reshape(in_channel) return x def convert_upernet_checkpoint(model_name, pytorch_dump_folder_path, push_to_hub): model_name_to_url = { "upernet-swin-tiny": "https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542-e380ad3e.pth", "upernet-swin-small": "https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192015-ee2fff1c.pth", "upernet-swin-base": "https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K_20210531_125459-429057bf.pth", "upernet-swin-large": "https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k_20220318_091743-9ba68901.pth", } checkpoint_url = model_name_to_url[model_name] state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu", file_name=model_name)[ "state_dict" ] for name, param in state_dict.items(): print(name, param.shape) config = get_upernet_config(model_name) model = UperNetForSemanticSegmentation(config) model.eval() # replace "bn" => "batch_norm" for key in state_dict.copy().keys(): val = state_dict.pop(key) if "bn" in key: key = key.replace("bn", "batch_norm") state_dict[key] = val # rename keys rename_keys = create_rename_keys(config) for src, dest in rename_keys: rename_key(state_dict, src, dest) read_in_q_k_v(state_dict, config.backbone_config) # fix downsample parameters for key, value in state_dict.items(): if "downsample" in key: if "reduction" in key: state_dict[key] = reverse_correct_unfold_reduction_order(value) if "norm" in key: state_dict[key] = reverse_correct_unfold_norm_order(value) model.load_state_dict(state_dict) # verify on image url = "https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg" image = Image.open(requests.get(url, stream=True).raw).convert("RGB") processor = SegformerImageProcessor() pixel_values = processor(image, return_tensors="pt").pixel_values with torch.no_grad(): outputs = model(pixel_values) logits = outputs.logits print(logits.shape) print("First values of logits:", logits[0, 0, :3, :3]) # assert values if model_name == "upernet-swin-tiny": expected_slice = torch.tensor( [[-7.5958, -7.5958, -7.4302], [-7.5958, -7.5958, -7.4302], [-7.4797, -7.4797, -7.3068]] ) elif model_name == "upernet-swin-small": expected_slice = torch.tensor( [[-7.1921, -7.1921, -6.9532], [-7.1921, -7.1921, -6.9532], [-7.0908, -7.0908, -6.8534]] ) elif model_name == "upernet-swin-base": expected_slice = torch.tensor( [[-6.5851, -6.5851, -6.4330], [-6.5851, -6.5851, -6.4330], [-6.4763, -6.4763, -6.3254]] ) elif model_name == "upernet-swin-large": expected_slice = torch.tensor( [[-7.5297, -7.5297, -7.3802], [-7.5297, -7.5297, -7.3802], [-7.4044, -7.4044, -7.2586]] ) print("Logits:", outputs.logits[0, 0, :3, :3]) assert torch.allclose(outputs.logits[0, 0, :3, :3], expected_slice, atol=1e-4) print("Looks ok!") if pytorch_dump_folder_path is not None: print(f"Saving model {model_name} to {pytorch_dump_folder_path}") model.save_pretrained(pytorch_dump_folder_path) print(f"Saving processor to {pytorch_dump_folder_path}") processor.save_pretrained(pytorch_dump_folder_path) if push_to_hub: print(f"Pushing model and processor for {model_name} to hub") model.push_to_hub(f"openmmlab/{model_name}") processor.push_to_hub(f"openmmlab/{model_name}") if __name__ == "__main__": parser = argparse.ArgumentParser() # Required parameters parser.add_argument( "--model_name", default="upernet-swin-tiny", type=str, choices=[f"upernet-swin-{size}" for size in ["tiny", "small", "base", "large"]], help="Name of the Swin + UperNet model you'd like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory." ) parser.add_argument( "--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub." ) args = parser.parse_args() convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
0
hf_public_repos/transformers/src/transformers/models
hf_public_repos/transformers/src/transformers/models/upernet/convert_convnext_upernet_to_pytorch.py
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Convert ConvNext + UperNet checkpoints from mmsegmentation.""" import argparse import json import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ConvNextConfig, SegformerImageProcessor, UperNetConfig, UperNetForSemanticSegmentation def get_upernet_config(model_name): auxiliary_in_channels = 384 if "tiny" in model_name: depths = [3, 3, 9, 3] hidden_sizes = [96, 192, 384, 768] if "small" in model_name: depths = [3, 3, 27, 3] hidden_sizes = [96, 192, 384, 768] if "base" in model_name: depths = [3, 3, 27, 3] hidden_sizes = [128, 256, 512, 1024] auxiliary_in_channels = 512 if "large" in model_name: depths = [3, 3, 27, 3] hidden_sizes = [192, 384, 768, 1536] auxiliary_in_channels = 768 if "xlarge" in model_name: depths = [3, 3, 27, 3] hidden_sizes = [256, 512, 1024, 2048] auxiliary_in_channels = 1024 # set label information num_labels = 150 repo_id = "huggingface/label-files" filename = "ade20k-id2label.json" id2label = json.load(open(hf_hub_download(repo_id, filename, repo_type="dataset"), "r")) id2label = {int(k): v for k, v in id2label.items()} label2id = {v: k for k, v in id2label.items()} backbone_config = ConvNextConfig( depths=depths, hidden_sizes=hidden_sizes, out_features=["stage1", "stage2", "stage3", "stage4"] ) config = UperNetConfig( backbone_config=backbone_config, auxiliary_in_channels=auxiliary_in_channels, num_labels=num_labels, id2label=id2label, label2id=label2id, ) return config # here we list all keys to be renamed (original name on the left, our name on the right) def create_rename_keys(config): rename_keys = [] # fmt: off # stem rename_keys.append(("backbone.downsample_layers.0.0.weight", "backbone.embeddings.patch_embeddings.weight")) rename_keys.append(("backbone.downsample_layers.0.0.bias", "backbone.embeddings.patch_embeddings.bias")) rename_keys.append(("backbone.downsample_layers.0.1.weight", "backbone.embeddings.layernorm.weight")) rename_keys.append(("backbone.downsample_layers.0.1.bias", "backbone.embeddings.layernorm.bias")) # stages for i in range(len(config.backbone_config.depths)): for j in range(config.backbone_config.depths[i]): rename_keys.append((f"backbone.stages.{i}.{j}.gamma", f"backbone.encoder.stages.{i}.layers.{j}.layer_scale_parameter")) rename_keys.append((f"backbone.stages.{i}.{j}.depthwise_conv.weight", f"backbone.encoder.stages.{i}.layers.{j}.dwconv.weight")) rename_keys.append((f"backbone.stages.{i}.{j}.depthwise_conv.bias", f"backbone.encoder.stages.{i}.layers.{j}.dwconv.bias")) rename_keys.append((f"backbone.stages.{i}.{j}.norm.weight", f"backbone.encoder.stages.{i}.layers.{j}.layernorm.weight")) rename_keys.append((f"backbone.stages.{i}.{j}.norm.bias", f"backbone.encoder.stages.{i}.layers.{j}.layernorm.bias")) rename_keys.append((f"backbone.stages.{i}.{j}.pointwise_conv1.weight", f"backbone.encoder.stages.{i}.layers.{j}.pwconv1.weight")) rename_keys.append((f"backbone.stages.{i}.{j}.pointwise_conv1.bias", f"backbone.encoder.stages.{i}.layers.{j}.pwconv1.bias")) rename_keys.append((f"backbone.stages.{i}.{j}.pointwise_conv2.weight", f"backbone.encoder.stages.{i}.layers.{j}.pwconv2.weight")) rename_keys.append((f"backbone.stages.{i}.{j}.pointwise_conv2.bias", f"backbone.encoder.stages.{i}.layers.{j}.pwconv2.bias")) if i > 0: rename_keys.append((f"backbone.downsample_layers.{i}.0.weight", f"backbone.encoder.stages.{i}.downsampling_layer.0.weight")) rename_keys.append((f"backbone.downsample_layers.{i}.0.bias", f"backbone.encoder.stages.{i}.downsampling_layer.0.bias")) rename_keys.append((f"backbone.downsample_layers.{i}.1.weight", f"backbone.encoder.stages.{i}.downsampling_layer.1.weight")) rename_keys.append((f"backbone.downsample_layers.{i}.1.bias", f"backbone.encoder.stages.{i}.downsampling_layer.1.bias")) rename_keys.append((f"backbone.norm{i}.weight", f"backbone.hidden_states_norms.stage{i+1}.weight")) rename_keys.append((f"backbone.norm{i}.bias", f"backbone.hidden_states_norms.stage{i+1}.bias")) # decode head rename_keys.extend( [ ("decode_head.conv_seg.weight", "decode_head.classifier.weight"), ("decode_head.conv_seg.bias", "decode_head.classifier.bias"), ("auxiliary_head.conv_seg.weight", "auxiliary_head.classifier.weight"), ("auxiliary_head.conv_seg.bias", "auxiliary_head.classifier.bias"), ] ) # fmt: on return rename_keys def rename_key(dct, old, new): val = dct.pop(old) dct[new] = val def convert_upernet_checkpoint(model_name, pytorch_dump_folder_path, push_to_hub): model_name_to_url = { "upernet-convnext-tiny": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_tiny_fp16_512x512_160k_ade20k/upernet_convnext_tiny_fp16_512x512_160k_ade20k_20220227_124553-cad485de.pth", "upernet-convnext-small": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_small_fp16_512x512_160k_ade20k/upernet_convnext_small_fp16_512x512_160k_ade20k_20220227_131208-1b1e394f.pth", "upernet-convnext-base": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_base_fp16_512x512_160k_ade20k/upernet_convnext_base_fp16_512x512_160k_ade20k_20220227_181227-02a24fc6.pth", "upernet-convnext-large": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_large_fp16_640x640_160k_ade20k/upernet_convnext_large_fp16_640x640_160k_ade20k_20220226_040532-e57aa54d.pth", "upernet-convnext-xlarge": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_xlarge_fp16_640x640_160k_ade20k/upernet_convnext_xlarge_fp16_640x640_160k_ade20k_20220226_080344-95fc38c2.pth", } checkpoint_url = model_name_to_url[model_name] state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu")["state_dict"] config = get_upernet_config(model_name) model = UperNetForSemanticSegmentation(config) model.eval() # replace "bn" => "batch_norm" for key in state_dict.copy().keys(): val = state_dict.pop(key) if "bn" in key: key = key.replace("bn", "batch_norm") state_dict[key] = val # rename keys rename_keys = create_rename_keys(config) for src, dest in rename_keys: rename_key(state_dict, src, dest) model.load_state_dict(state_dict) # verify on image url = "https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg" image = Image.open(requests.get(url, stream=True).raw).convert("RGB") processor = SegformerImageProcessor() pixel_values = processor(image, return_tensors="pt").pixel_values with torch.no_grad(): outputs = model(pixel_values) if model_name == "upernet-convnext-tiny": expected_slice = torch.tensor( [[-8.8110, -8.8110, -8.6521], [-8.8110, -8.8110, -8.6521], [-8.7746, -8.7746, -8.6130]] ) elif model_name == "upernet-convnext-small": expected_slice = torch.tensor( [[-8.8236, -8.8236, -8.6771], [-8.8236, -8.8236, -8.6771], [-8.7638, -8.7638, -8.6240]] ) elif model_name == "upernet-convnext-base": expected_slice = torch.tensor( [[-8.8558, -8.8558, -8.6905], [-8.8558, -8.8558, -8.6905], [-8.7669, -8.7669, -8.6021]] ) elif model_name == "upernet-convnext-large": expected_slice = torch.tensor( [[-8.6660, -8.6660, -8.6210], [-8.6660, -8.6660, -8.6210], [-8.6310, -8.6310, -8.5964]] ) elif model_name == "upernet-convnext-xlarge": expected_slice = torch.tensor( [[-8.4980, -8.4980, -8.3977], [-8.4980, -8.4980, -8.3977], [-8.4379, -8.4379, -8.3412]] ) print("Logits:", outputs.logits[0, 0, :3, :3]) assert torch.allclose(outputs.logits[0, 0, :3, :3], expected_slice, atol=1e-4) print("Looks ok!") if pytorch_dump_folder_path is not None: print(f"Saving model {model_name} to {pytorch_dump_folder_path}") model.save_pretrained(pytorch_dump_folder_path) print(f"Saving processor to {pytorch_dump_folder_path}") processor.save_pretrained(pytorch_dump_folder_path) if push_to_hub: print(f"Pushing model and processor for {model_name} to hub") model.push_to_hub(f"openmmlab/{model_name}") processor.push_to_hub(f"openmmlab/{model_name}") if __name__ == "__main__": parser = argparse.ArgumentParser() # Required parameters parser.add_argument( "--model_name", default="upernet-convnext-tiny", type=str, choices=[f"upernet-convnext-{size}" for size in ["tiny", "small", "base", "large", "xlarge"]], help="Name of the ConvNext UperNet model you'd like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory." ) parser.add_argument( "--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub." ) args = parser.parse_args() convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
0
hf_public_repos/transformers/src/transformers/models
hf_public_repos/transformers/src/transformers/models/upernet/__init__.py
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _import_structure = { "configuration_upernet": ["UperNetConfig"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["modeling_upernet"] = [ "UperNetForSemanticSegmentation", "UperNetPreTrainedModel", ] if TYPE_CHECKING: from .configuration_upernet import UperNetConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_upernet import UperNetForSemanticSegmentation, UperNetPreTrainedModel else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
0
hf_public_repos/transformers/src/transformers/models
hf_public_repos/transformers/src/transformers/models/upernet/modeling_upernet.py
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ PyTorch UperNet model. Based on OpenMMLab's implementation, found in https://github.com/open-mmlab/mmsegmentation.""" from typing import List, Optional, Tuple, Union import torch from torch import nn from torch.nn import CrossEntropyLoss from ... import AutoBackbone from ...modeling_outputs import SemanticSegmenterOutput from ...modeling_utils import PreTrainedModel from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings from .configuration_upernet import UperNetConfig UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST = [ "openmmlab/upernet-convnext-tiny", # See all UperNet models at https://huggingface.co/models?filter=upernet ] # General docstring _CONFIG_FOR_DOC = "UperNetConfig" class UperNetConvModule(nn.Module): """ A convolutional block that bundles conv/norm/activation layers. This block simplifies the usage of convolution layers, which are commonly used with a norm layer (e.g., BatchNorm) and activation layer (e.g., ReLU). """ def __init__( self, in_channels: int, out_channels: int, kernel_size: Union[int, Tuple[int, int]], padding: Union[int, Tuple[int, int], str] = 0, bias: bool = False, dilation: Union[int, Tuple[int, int]] = 1, ) -> None: super().__init__() self.conv = nn.Conv2d( in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, padding=padding, bias=bias, dilation=dilation, ) self.batch_norm = nn.BatchNorm2d(out_channels) self.activation = nn.ReLU() def forward(self, input: torch.Tensor) -> torch.Tensor: output = self.conv(input) output = self.batch_norm(output) output = self.activation(output) return output class UperNetPyramidPoolingBlock(nn.Module): def __init__(self, pool_scale: int, in_channels: int, channels: int) -> None: super().__init__() self.layers = [ nn.AdaptiveAvgPool2d(pool_scale), UperNetConvModule(in_channels, channels, kernel_size=1), ] for i, layer in enumerate(self.layers): self.add_module(str(i), layer) def forward(self, input: torch.Tensor) -> torch.Tensor: hidden_state = input for layer in self.layers: hidden_state = layer(hidden_state) return hidden_state class UperNetPyramidPoolingModule(nn.Module): """ Pyramid Pooling Module (PPM) used in PSPNet. Args: pool_scales (`Tuple[int]`): Pooling scales used in Pooling Pyramid Module. in_channels (`int`): Input channels. channels (`int`): Channels after modules, before conv_seg. align_corners (`bool`): align_corners argument of F.interpolate. """ def __init__(self, pool_scales: Tuple[int, ...], in_channels: int, channels: int, align_corners: bool) -> None: super().__init__() self.pool_scales = pool_scales self.align_corners = align_corners self.in_channels = in_channels self.channels = channels self.blocks = [] for i, pool_scale in enumerate(pool_scales): block = UperNetPyramidPoolingBlock(pool_scale=pool_scale, in_channels=in_channels, channels=channels) self.blocks.append(block) self.add_module(str(i), block) def forward(self, x: torch.Tensor) -> List[torch.Tensor]: ppm_outs = [] for ppm in self.blocks: ppm_out = ppm(x) upsampled_ppm_out = nn.functional.interpolate( ppm_out, size=x.size()[2:], mode="bilinear", align_corners=self.align_corners ) ppm_outs.append(upsampled_ppm_out) return ppm_outs class UperNetHead(nn.Module): """ Unified Perceptual Parsing for Scene Understanding. This head is the implementation of [UPerNet](https://arxiv.org/abs/1807.10221). """ def __init__(self, config, in_channels): super().__init__() self.config = config self.pool_scales = config.pool_scales # e.g. (1, 2, 3, 6) self.in_channels = in_channels self.channels = config.hidden_size self.align_corners = False self.classifier = nn.Conv2d(self.channels, config.num_labels, kernel_size=1) # PSP Module self.psp_modules = UperNetPyramidPoolingModule( self.pool_scales, self.in_channels[-1], self.channels, align_corners=self.align_corners, ) self.bottleneck = UperNetConvModule( self.in_channels[-1] + len(self.pool_scales) * self.channels, self.channels, kernel_size=3, padding=1, ) # FPN Module self.lateral_convs = nn.ModuleList() self.fpn_convs = nn.ModuleList() for in_channels in self.in_channels[:-1]: # skip the top layer l_conv = UperNetConvModule(in_channels, self.channels, kernel_size=1) fpn_conv = UperNetConvModule(self.channels, self.channels, kernel_size=3, padding=1) self.lateral_convs.append(l_conv) self.fpn_convs.append(fpn_conv) self.fpn_bottleneck = UperNetConvModule( len(self.in_channels) * self.channels, self.channels, kernel_size=3, padding=1, ) def init_weights(self): self.apply(self._init_weights) def _init_weights(self, module): if isinstance(module, nn.Conv2d): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() def psp_forward(self, inputs): x = inputs[-1] psp_outs = [x] psp_outs.extend(self.psp_modules(x)) psp_outs = torch.cat(psp_outs, dim=1) output = self.bottleneck(psp_outs) return output def forward(self, encoder_hidden_states: torch.Tensor) -> torch.Tensor: # build laterals laterals = [lateral_conv(encoder_hidden_states[i]) for i, lateral_conv in enumerate(self.lateral_convs)] laterals.append(self.psp_forward(encoder_hidden_states)) # build top-down path used_backbone_levels = len(laterals) for i in range(used_backbone_levels - 1, 0, -1): prev_shape = laterals[i - 1].shape[2:] laterals[i - 1] = laterals[i - 1] + nn.functional.interpolate( laterals[i], size=prev_shape, mode="bilinear", align_corners=self.align_corners ) # build outputs fpn_outs = [self.fpn_convs[i](laterals[i]) for i in range(used_backbone_levels - 1)] # append psp feature fpn_outs.append(laterals[-1]) for i in range(used_backbone_levels - 1, 0, -1): fpn_outs[i] = nn.functional.interpolate( fpn_outs[i], size=fpn_outs[0].shape[2:], mode="bilinear", align_corners=self.align_corners ) fpn_outs = torch.cat(fpn_outs, dim=1) output = self.fpn_bottleneck(fpn_outs) output = self.classifier(output) return output class UperNetFCNHead(nn.Module): """ Fully Convolution Networks for Semantic Segmentation. This head is the implementation of [FCNNet](https://arxiv.org/abs/1411.4038>). Args: config: Configuration. in_channels (int): Number of input channels. kernel_size (int): The kernel size for convs in the head. Default: 3. dilation (int): The dilation rate for convs in the head. Default: 1. """ def __init__( self, config, in_index: int = 2, kernel_size: int = 3, dilation: Union[int, Tuple[int, int]] = 1 ) -> None: super().__init__() self.config = config self.in_channels = config.auxiliary_in_channels self.channels = config.auxiliary_channels self.num_convs = config.auxiliary_num_convs self.concat_input = config.auxiliary_concat_input self.in_index = in_index conv_padding = (kernel_size // 2) * dilation convs = [] convs.append( UperNetConvModule( self.in_channels, self.channels, kernel_size=kernel_size, padding=conv_padding, dilation=dilation ) ) for i in range(self.num_convs - 1): convs.append( UperNetConvModule( self.channels, self.channels, kernel_size=kernel_size, padding=conv_padding, dilation=dilation ) ) if self.num_convs == 0: self.convs = nn.Identity() else: self.convs = nn.Sequential(*convs) if self.concat_input: self.conv_cat = UperNetConvModule( self.in_channels + self.channels, self.channels, kernel_size=kernel_size, padding=kernel_size // 2 ) self.classifier = nn.Conv2d(self.channels, config.num_labels, kernel_size=1) def init_weights(self): self.apply(self._init_weights) def _init_weights(self, module): if isinstance(module, nn.Conv2d): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() def forward(self, encoder_hidden_states: torch.Tensor) -> torch.Tensor: # just take the relevant feature maps hidden_states = encoder_hidden_states[self.in_index] output = self.convs(hidden_states) if self.concat_input: output = self.conv_cat(torch.cat([hidden_states, output], dim=1)) output = self.classifier(output) return output class UperNetPreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = UperNetConfig main_input_name = "pixel_values" def _init_weights(self, module): if isinstance(module, UperNetPreTrainedModel): module.backbone.init_weights() module.decode_head.init_weights() if module.auxiliary_head is not None: module.auxiliary_head.init_weights() def init_weights(self): """Initialize the weights""" self.backbone.init_weights() self.decode_head.init_weights() if self.auxiliary_head is not None: self.auxiliary_head.init_weights() UPERNET_START_DOCSTRING = r""" Parameters: This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. config ([`UperNetConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ UPERNET_INPUTS_DOCSTRING = r""" Args: pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using [`AutoImageProcessor`]. See [`SegformerImageProcessor.__call__`] for details. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers in case the backbone has them. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers of the backbone. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ @add_start_docstrings( """UperNet framework leveraging any vision backbone e.g. for ADE20k, CityScapes.""", UPERNET_START_DOCSTRING, ) class UperNetForSemanticSegmentation(UperNetPreTrainedModel): def __init__(self, config): super().__init__(config) self.backbone = AutoBackbone.from_config(config.backbone_config) # Semantic segmentation head(s) self.decode_head = UperNetHead(config, in_channels=self.backbone.channels) self.auxiliary_head = UperNetFCNHead(config) if config.use_auxiliary_head else None # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(UPERNET_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @replace_return_docstrings(output_type=SemanticSegmenterOutput, config_class=_CONFIG_FOR_DOC) def forward( self, pixel_values: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, labels: Optional[torch.Tensor] = None, return_dict: Optional[bool] = None, ) -> Union[tuple, SemanticSegmenterOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size, height, width)`, *optional*): Ground truth semantic segmentation maps for computing the loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels > 1`, a classification loss is computed (Cross-Entropy). Returns: Examples: ```python >>> from transformers import AutoImageProcessor, UperNetForSemanticSegmentation >>> from PIL import Image >>> from huggingface_hub import hf_hub_download >>> image_processor = AutoImageProcessor.from_pretrained("openmmlab/upernet-convnext-tiny") >>> model = UperNetForSemanticSegmentation.from_pretrained("openmmlab/upernet-convnext-tiny") >>> filepath = hf_hub_download( ... repo_id="hf-internal-testing/fixtures_ade20k", filename="ADE_val_00000001.jpg", repo_type="dataset" ... ) >>> image = Image.open(filepath).convert("RGB") >>> inputs = image_processor(images=image, return_tensors="pt") >>> outputs = model(**inputs) >>> logits = outputs.logits # shape (batch_size, num_labels, height, width) >>> list(logits.shape) [1, 150, 512, 512] ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions outputs = self.backbone.forward_with_filtered_kwargs( pixel_values, output_hidden_states=output_hidden_states, output_attentions=output_attentions ) features = outputs.feature_maps logits = self.decode_head(features) logits = nn.functional.interpolate(logits, size=pixel_values.shape[2:], mode="bilinear", align_corners=False) auxiliary_logits = None if self.auxiliary_head is not None: auxiliary_logits = self.auxiliary_head(features) auxiliary_logits = nn.functional.interpolate( auxiliary_logits, size=pixel_values.shape[2:], mode="bilinear", align_corners=False ) loss = None if labels is not None: if self.config.num_labels == 1: raise ValueError("The number of labels should be greater than one") else: # compute weighted loss loss_fct = CrossEntropyLoss(ignore_index=self.config.loss_ignore_index) loss = loss_fct(logits, labels) if auxiliary_logits is not None: auxiliary_loss = loss_fct(auxiliary_logits, labels) loss += self.config.auxiliary_loss_weight * auxiliary_loss if not return_dict: if output_hidden_states: output = (logits,) + outputs[1:] else: output = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return SemanticSegmenterOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, )
0
hf_public_repos/transformers/src/transformers/models
hf_public_repos/transformers/src/transformers/models/upernet/configuration_upernet.py
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ UperNet model configuration""" from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto.configuration_auto import CONFIG_MAPPING logger = logging.get_logger(__name__) class UperNetConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of an [`UperNetForSemanticSegmentation`]. It is used to instantiate an UperNet model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the UperNet [openmmlab/upernet-convnext-tiny](https://huggingface.co/openmmlab/upernet-convnext-tiny) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: backbone_config (`PretrainedConfig` or `dict`, *optional*, defaults to `ResNetConfig()`): The configuration of the backbone model. hidden_size (`int`, *optional*, defaults to 512): The number of hidden units in the convolutional layers. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. pool_scales (`Tuple[int]`, *optional*, defaults to `[1, 2, 3, 6]`): Pooling scales used in Pooling Pyramid Module applied on the last feature map. use_auxiliary_head (`bool`, *optional*, defaults to `True`): Whether to use an auxiliary head during training. auxiliary_loss_weight (`float`, *optional*, defaults to 0.4): Weight of the cross-entropy loss of the auxiliary head. auxiliary_channels (`int`, *optional*, defaults to 256): Number of channels to use in the auxiliary head. auxiliary_num_convs (`int`, *optional*, defaults to 1): Number of convolutional layers to use in the auxiliary head. auxiliary_concat_input (`bool`, *optional*, defaults to `False`): Whether to concatenate the output of the auxiliary head with the input before the classification layer. loss_ignore_index (`int`, *optional*, defaults to 255): The index that is ignored by the loss function. Examples: ```python >>> from transformers import UperNetConfig, UperNetForSemanticSegmentation >>> # Initializing a configuration >>> configuration = UperNetConfig() >>> # Initializing a model (with random weights) from the configuration >>> model = UperNetForSemanticSegmentation(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "upernet" def __init__( self, backbone_config=None, hidden_size=512, initializer_range=0.02, pool_scales=[1, 2, 3, 6], use_auxiliary_head=True, auxiliary_loss_weight=0.4, auxiliary_in_channels=384, auxiliary_channels=256, auxiliary_num_convs=1, auxiliary_concat_input=False, loss_ignore_index=255, **kwargs, ): super().__init__(**kwargs) if backbone_config is None: logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.") backbone_config = CONFIG_MAPPING["resnet"](out_features=["stage1", "stage2", "stage3", "stage4"]) elif isinstance(backbone_config, dict): backbone_model_type = backbone_config.get("model_type") config_class = CONFIG_MAPPING[backbone_model_type] backbone_config = config_class.from_dict(backbone_config) self.backbone_config = backbone_config self.hidden_size = hidden_size self.initializer_range = initializer_range self.pool_scales = pool_scales self.use_auxiliary_head = use_auxiliary_head self.auxiliary_loss_weight = auxiliary_loss_weight self.auxiliary_in_channels = auxiliary_in_channels self.auxiliary_channels = auxiliary_channels self.auxiliary_num_convs = auxiliary_num_convs self.auxiliary_concat_input = auxiliary_concat_input self.loss_ignore_index = loss_ignore_index
0
hf_public_repos/transformers/src/transformers/models
hf_public_repos/transformers/src/transformers/models/nllb/__init__.py
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) _import_structure = {} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["tokenization_nllb"] = ["NllbTokenizer"] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["tokenization_nllb_fast"] = ["NllbTokenizerFast"] if TYPE_CHECKING: try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_nllb import NllbTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_nllb_fast import NllbTokenizerFast else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
0
hf_public_repos/transformers/src/transformers/models
hf_public_repos/transformers/src/transformers/models/nllb/tokenization_nllb_fast.py
# coding=utf-8 # Copyright 2022 The Facebook AI Research Team Authors and The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os from shutil import copyfile from typing import List, Optional, Tuple from tokenizers import processors from ...tokenization_utils import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_nllb import NllbTokenizer else: NllbTokenizer = None logger = logging.get_logger(__name__) VOCAB_FILES_NAMES = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"} PRETRAINED_VOCAB_FILES_MAP = { "vocab_file": { "facebook/nllb-200-distilled-600M": ( "https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model" ), }, "tokenizer_file": { "facebook/nllb-200-distilled-600M": ( "https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json" ), }, } PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = { "facebook/nllb-large-en-ro": 1024, "facebook/nllb-200-distilled-600M": 1024, } FAIRSEQ_LANGUAGE_CODES = ['ace_Arab', 'ace_Latn', 'acm_Arab', 'acq_Arab', 'aeb_Arab', 'afr_Latn', 'ajp_Arab', 'aka_Latn', 'amh_Ethi', 'apc_Arab', 'arb_Arab', 'ars_Arab', 'ary_Arab', 'arz_Arab', 'asm_Beng', 'ast_Latn', 'awa_Deva', 'ayr_Latn', 'azb_Arab', 'azj_Latn', 'bak_Cyrl', 'bam_Latn', 'ban_Latn', 'bel_Cyrl', 'bem_Latn', 'ben_Beng', 'bho_Deva', 'bjn_Arab', 'bjn_Latn', 'bod_Tibt', 'bos_Latn', 'bug_Latn', 'bul_Cyrl', 'cat_Latn', 'ceb_Latn', 'ces_Latn', 'cjk_Latn', 'ckb_Arab', 'crh_Latn', 'cym_Latn', 'dan_Latn', 'deu_Latn', 'dik_Latn', 'dyu_Latn', 'dzo_Tibt', 'ell_Grek', 'eng_Latn', 'epo_Latn', 'est_Latn', 'eus_Latn', 'ewe_Latn', 'fao_Latn', 'pes_Arab', 'fij_Latn', 'fin_Latn', 'fon_Latn', 'fra_Latn', 'fur_Latn', 'fuv_Latn', 'gla_Latn', 'gle_Latn', 'glg_Latn', 'grn_Latn', 'guj_Gujr', 'hat_Latn', 'hau_Latn', 'heb_Hebr', 'hin_Deva', 'hne_Deva', 'hrv_Latn', 'hun_Latn', 'hye_Armn', 'ibo_Latn', 'ilo_Latn', 'ind_Latn', 'isl_Latn', 'ita_Latn', 'jav_Latn', 'jpn_Jpan', 'kab_Latn', 'kac_Latn', 'kam_Latn', 'kan_Knda', 'kas_Arab', 'kas_Deva', 'kat_Geor', 'knc_Arab', 'knc_Latn', 'kaz_Cyrl', 'kbp_Latn', 'kea_Latn', 'khm_Khmr', 'kik_Latn', 'kin_Latn', 'kir_Cyrl', 'kmb_Latn', 'kon_Latn', 'kor_Hang', 'kmr_Latn', 'lao_Laoo', 'lvs_Latn', 'lij_Latn', 'lim_Latn', 'lin_Latn', 'lit_Latn', 'lmo_Latn', 'ltg_Latn', 'ltz_Latn', 'lua_Latn', 'lug_Latn', 'luo_Latn', 'lus_Latn', 'mag_Deva', 'mai_Deva', 'mal_Mlym', 'mar_Deva', 'min_Latn', 'mkd_Cyrl', 'plt_Latn', 'mlt_Latn', 'mni_Beng', 'khk_Cyrl', 'mos_Latn', 'mri_Latn', 'zsm_Latn', 'mya_Mymr', 'nld_Latn', 'nno_Latn', 'nob_Latn', 'npi_Deva', 'nso_Latn', 'nus_Latn', 'nya_Latn', 'oci_Latn', 'gaz_Latn', 'ory_Orya', 'pag_Latn', 'pan_Guru', 'pap_Latn', 'pol_Latn', 'por_Latn', 'prs_Arab', 'pbt_Arab', 'quy_Latn', 'ron_Latn', 'run_Latn', 'rus_Cyrl', 'sag_Latn', 'san_Deva', 'sat_Beng', 'scn_Latn', 'shn_Mymr', 'sin_Sinh', 'slk_Latn', 'slv_Latn', 'smo_Latn', 'sna_Latn', 'snd_Arab', 'som_Latn', 'sot_Latn', 'spa_Latn', 'als_Latn', 'srd_Latn', 'srp_Cyrl', 'ssw_Latn', 'sun_Latn', 'swe_Latn', 'swh_Latn', 'szl_Latn', 'tam_Taml', 'tat_Cyrl', 'tel_Telu', 'tgk_Cyrl', 'tgl_Latn', 'tha_Thai', 'tir_Ethi', 'taq_Latn', 'taq_Tfng', 'tpi_Latn', 'tsn_Latn', 'tso_Latn', 'tuk_Latn', 'tum_Latn', 'tur_Latn', 'twi_Latn', 'tzm_Tfng', 'uig_Arab', 'ukr_Cyrl', 'umb_Latn', 'urd_Arab', 'uzn_Latn', 'vec_Latn', 'vie_Latn', 'war_Latn', 'wol_Latn', 'xho_Latn', 'ydd_Hebr', 'yor_Latn', 'yue_Hant', 'zho_Hans', 'zho_Hant', 'zul_Latn'] # fmt: skip class NllbTokenizerFast(PreTrainedTokenizerFast): """ Construct a "fast" NLLB tokenizer (backed by HuggingFace's *tokenizers* library). Based on [BPE](https://huggingface.co/docs/tokenizers/python/latest/components.html?highlight=BPE#models). This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. The tokenization method is `<tokens> <eos> <language code>` for source language documents, and `<language code> <tokens> <eos>` for target language documents. Examples: ```python >>> from transformers import NllbTokenizerFast >>> tokenizer = NllbTokenizerFast.from_pretrained( ... "facebook/nllb-200-distilled-600M", src_lang="eng_Latn", tgt_lang="fra_Latn" ... ) >>> example_english_phrase = " UN Chief Says There Is No Military Solution in Syria" >>> expected_translation_french = "Le chef de l'ONU affirme qu'il n'y a pas de solution militaire en Syrie." >>> inputs = tokenizer(example_english_phrase, text_target=expected_translation_french, return_tensors="pt") ``` Args: vocab_file (`str`): Path to the vocabulary file. bos_token (`str`, *optional*, defaults to `"<s>"`): The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token. <Tip> When building a sequence using special tokens, this is not the token that is used for the beginning of sequence. The token used is the `cls_token`. </Tip> eos_token (`str`, *optional*, defaults to `"</s>"`): The end of sequence token. <Tip> When building a sequence using special tokens, this is not the token that is used for the end of sequence. The token used is the `sep_token`. </Tip> sep_token (`str`, *optional*, defaults to `"</s>"`): The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for sequence classification or for a text and a question for question answering. It is also used as the last token of a sequence built with special tokens. cls_token (`str`, *optional*, defaults to `"<s>"`): The classifier token which is used when doing sequence classification (classification of the whole sequence instead of per-token classification). It is the first token of the sequence when built with special tokens. unk_token (`str`, *optional*, defaults to `"<unk>"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. pad_token (`str`, *optional*, defaults to `"<pad>"`): The token used for padding, for example when batching sequences of different lengths. mask_token (`str`, *optional*, defaults to `"<mask>"`): The token used for masking values. This is the token used when training this model with masked language modeling. This is the token which the model will try to predict. tokenizer_file (`str`, *optional*): The path to a tokenizer file to use instead of the vocab file. src_lang (`str`, *optional*): The language to use as source language for translation. tgt_lang (`str`, *optional*): The language to use as target language for translation. """ vocab_files_names = VOCAB_FILES_NAMES max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP model_input_names = ["input_ids", "attention_mask"] slow_tokenizer_class = NllbTokenizer prefix_tokens: List[int] = [] suffix_tokens: List[int] = [] def __init__( self, vocab_file=None, tokenizer_file=None, bos_token="<s>", eos_token="</s>", sep_token="</s>", cls_token="<s>", unk_token="<unk>", pad_token="<pad>", mask_token="<mask>", src_lang=None, tgt_lang=None, additional_special_tokens=None, legacy_behaviour=False, **kwargs, ): # Mask token behave like a normal word, i.e. include the space before it mask_token = ( AddedToken(mask_token, normalized=True, lstrip=True, special=True) if isinstance(mask_token, str) else mask_token ) self.legacy_behaviour = legacy_behaviour _additional_special_tokens = FAIRSEQ_LANGUAGE_CODES.copy() if additional_special_tokens is not None: # Only add those special tokens if they are not already there. _additional_special_tokens.extend( [t for t in additional_special_tokens if t not in _additional_special_tokens] ) super().__init__( vocab_file=vocab_file, tokenizer_file=tokenizer_file, bos_token=bos_token, eos_token=eos_token, sep_token=sep_token, cls_token=cls_token, unk_token=unk_token, pad_token=pad_token, mask_token=mask_token, src_lang=src_lang, tgt_lang=tgt_lang, additional_special_tokens=_additional_special_tokens, legacy_behaviour=legacy_behaviour, **kwargs, ) self.vocab_file = vocab_file self.lang_code_to_id = { lang_code: self.convert_tokens_to_ids(lang_code) for lang_code in FAIRSEQ_LANGUAGE_CODES } self._src_lang = src_lang if src_lang is not None else "eng_Latn" self.cur_lang_code = self.convert_tokens_to_ids(self._src_lang) self.tgt_lang = tgt_lang self.set_src_lang_special_tokens(self._src_lang) @property def can_save_slow_tokenizer(self) -> bool: return os.path.isfile(self.vocab_file) if self.vocab_file else False @property def src_lang(self) -> str: return self._src_lang @src_lang.setter def src_lang(self, new_src_lang: str) -> None: self._src_lang = new_src_lang self.set_src_lang_special_tokens(self._src_lang) def build_inputs_with_special_tokens( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: """ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. The special tokens depend on calling set_lang. An NLLB sequence has the following format, where `X` represents the sequence: - `input_ids` (for encoder) `X [eos, src_lang_code]` - `decoder_input_ids`: (for decoder) `X [eos, tgt_lang_code]` BOS is never used. Pairs of sequences are not the expected use case, but they will be handled without a separator. Args: token_ids_0 (`List[int]`): List of IDs to which the special tokens will be added. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: list of [input IDs](../glossary#input-ids) with the appropriate special tokens. """ if token_ids_1 is None: return self.prefix_tokens + token_ids_0 + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_0 + token_ids_1 + self.suffix_tokens def create_token_type_ids_from_sequences( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: """ Create a mask from the two sequences passed to be used in a sequence-pair classification task. nllb does not make use of token type ids, therefore a list of zeros is returned. Args: token_ids_0 (`List[int]`): List of IDs. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of zeros. """ sep = [self.sep_token_id] cls = [self.cls_token_id] if token_ids_1 is None: return len(cls + token_ids_0 + sep) * [0] return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0] def _build_translation_inputs( self, raw_inputs, return_tensors: str, src_lang: Optional[str], tgt_lang: Optional[str], **extra_kwargs ): """Used by translation pipeline, to prepare inputs for the generate function""" if src_lang is None or tgt_lang is None: raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model") self.src_lang = src_lang inputs = self(raw_inputs, add_special_tokens=True, return_tensors=return_tensors, **extra_kwargs) tgt_lang_id = self.convert_tokens_to_ids(tgt_lang) inputs["forced_bos_token_id"] = tgt_lang_id return inputs def prepare_seq2seq_batch( self, src_texts: List[str], src_lang: str = "eng_Latn", tgt_texts: Optional[List[str]] = None, tgt_lang: str = "fra_Latn", **kwargs, ) -> BatchEncoding: self.src_lang = src_lang self.tgt_lang = tgt_lang return super().prepare_seq2seq_batch(src_texts, tgt_texts, **kwargs) def _switch_to_input_mode(self): return self.set_src_lang_special_tokens(self.src_lang) def _switch_to_target_mode(self): return self.set_tgt_lang_special_tokens(self.tgt_lang) def set_src_lang_special_tokens(self, src_lang) -> None: """Reset the special tokens to the source lang setting. - In legacy mode: No prefix and suffix=[eos, src_lang_code]. - In default mode: Prefix=[src_lang_code], suffix = [eos] """ self.cur_lang_code = self.convert_tokens_to_ids(src_lang) if self.legacy_behaviour: self.prefix_tokens = [] self.suffix_tokens = [self.eos_token_id, self.cur_lang_code] else: self.prefix_tokens = [self.cur_lang_code] self.suffix_tokens = [self.eos_token_id] prefix_tokens_str = self.convert_ids_to_tokens(self.prefix_tokens) suffix_tokens_str = self.convert_ids_to_tokens(self.suffix_tokens) self._tokenizer.post_processor = processors.TemplateProcessing( single=prefix_tokens_str + ["$A"] + suffix_tokens_str, pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str, special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str, self.prefix_tokens + self.suffix_tokens)), ) def set_tgt_lang_special_tokens(self, lang: str) -> None: """Reset the special tokens to the target lang setting. - In legacy mode: No prefix and suffix=[eos, tgt_lang_code]. - In default mode: Prefix=[tgt_lang_code], suffix = [eos] """ self.cur_lang_code = self.convert_tokens_to_ids(lang) if self.legacy_behaviour: self.prefix_tokens = [] self.suffix_tokens = [self.eos_token_id, self.cur_lang_code] else: self.prefix_tokens = [self.cur_lang_code] self.suffix_tokens = [self.eos_token_id] prefix_tokens_str = self.convert_ids_to_tokens(self.prefix_tokens) suffix_tokens_str = self.convert_ids_to_tokens(self.suffix_tokens) self._tokenizer.post_processor = processors.TemplateProcessing( single=prefix_tokens_str + ["$A"] + suffix_tokens_str, pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str, special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str, self.prefix_tokens + self.suffix_tokens)), ) def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: if not self.can_save_slow_tokenizer: raise ValueError( "Your fast tokenizer does not have the necessary information to save the vocabulary for a slow " "tokenizer." ) if not os.path.isdir(save_directory): logger.error(f"Vocabulary path ({save_directory}) should be a directory.") return out_vocab_file = os.path.join( save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file): copyfile(self.vocab_file, out_vocab_file) return (out_vocab_file,)
0
hf_public_repos/transformers/src/transformers/models
hf_public_repos/transformers/src/transformers/models/nllb/tokenization_nllb.py
# coding=utf-8 # Copyright 2022 The Facebook AI Research Team Authors and The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer from ...utils import logging logger = logging.get_logger(__name__) SPIECE_UNDERLINE = "▁" VOCAB_FILES_NAMES = {"vocab_file": "sentencepiece.bpe.model"} PRETRAINED_VOCAB_FILES_MAP = { "vocab_file": { "facebook/nllb-200-distilled-600M": ( "https://huggingface.co/facebook/nllb-200-distilled-600M/blob/main/sentencepiece.bpe.model" ), } } PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = { "facebook/nllb-200-distilled-600M": 1024, } FAIRSEQ_LANGUAGE_CODES = ['ace_Arab', 'ace_Latn', 'acm_Arab', 'acq_Arab', 'aeb_Arab', 'afr_Latn', 'ajp_Arab', 'aka_Latn', 'amh_Ethi', 'apc_Arab', 'arb_Arab', 'ars_Arab', 'ary_Arab', 'arz_Arab', 'asm_Beng', 'ast_Latn', 'awa_Deva', 'ayr_Latn', 'azb_Arab', 'azj_Latn', 'bak_Cyrl', 'bam_Latn', 'ban_Latn', 'bel_Cyrl', 'bem_Latn', 'ben_Beng', 'bho_Deva', 'bjn_Arab', 'bjn_Latn', 'bod_Tibt', 'bos_Latn', 'bug_Latn', 'bul_Cyrl', 'cat_Latn', 'ceb_Latn', 'ces_Latn', 'cjk_Latn', 'ckb_Arab', 'crh_Latn', 'cym_Latn', 'dan_Latn', 'deu_Latn', 'dik_Latn', 'dyu_Latn', 'dzo_Tibt', 'ell_Grek', 'eng_Latn', 'epo_Latn', 'est_Latn', 'eus_Latn', 'ewe_Latn', 'fao_Latn', 'pes_Arab', 'fij_Latn', 'fin_Latn', 'fon_Latn', 'fra_Latn', 'fur_Latn', 'fuv_Latn', 'gla_Latn', 'gle_Latn', 'glg_Latn', 'grn_Latn', 'guj_Gujr', 'hat_Latn', 'hau_Latn', 'heb_Hebr', 'hin_Deva', 'hne_Deva', 'hrv_Latn', 'hun_Latn', 'hye_Armn', 'ibo_Latn', 'ilo_Latn', 'ind_Latn', 'isl_Latn', 'ita_Latn', 'jav_Latn', 'jpn_Jpan', 'kab_Latn', 'kac_Latn', 'kam_Latn', 'kan_Knda', 'kas_Arab', 'kas_Deva', 'kat_Geor', 'knc_Arab', 'knc_Latn', 'kaz_Cyrl', 'kbp_Latn', 'kea_Latn', 'khm_Khmr', 'kik_Latn', 'kin_Latn', 'kir_Cyrl', 'kmb_Latn', 'kon_Latn', 'kor_Hang', 'kmr_Latn', 'lao_Laoo', 'lvs_Latn', 'lij_Latn', 'lim_Latn', 'lin_Latn', 'lit_Latn', 'lmo_Latn', 'ltg_Latn', 'ltz_Latn', 'lua_Latn', 'lug_Latn', 'luo_Latn', 'lus_Latn', 'mag_Deva', 'mai_Deva', 'mal_Mlym', 'mar_Deva', 'min_Latn', 'mkd_Cyrl', 'plt_Latn', 'mlt_Latn', 'mni_Beng', 'khk_Cyrl', 'mos_Latn', 'mri_Latn', 'zsm_Latn', 'mya_Mymr', 'nld_Latn', 'nno_Latn', 'nob_Latn', 'npi_Deva', 'nso_Latn', 'nus_Latn', 'nya_Latn', 'oci_Latn', 'gaz_Latn', 'ory_Orya', 'pag_Latn', 'pan_Guru', 'pap_Latn', 'pol_Latn', 'por_Latn', 'prs_Arab', 'pbt_Arab', 'quy_Latn', 'ron_Latn', 'run_Latn', 'rus_Cyrl', 'sag_Latn', 'san_Deva', 'sat_Beng', 'scn_Latn', 'shn_Mymr', 'sin_Sinh', 'slk_Latn', 'slv_Latn', 'smo_Latn', 'sna_Latn', 'snd_Arab', 'som_Latn', 'sot_Latn', 'spa_Latn', 'als_Latn', 'srd_Latn', 'srp_Cyrl', 'ssw_Latn', 'sun_Latn', 'swe_Latn', 'swh_Latn', 'szl_Latn', 'tam_Taml', 'tat_Cyrl', 'tel_Telu', 'tgk_Cyrl', 'tgl_Latn', 'tha_Thai', 'tir_Ethi', 'taq_Latn', 'taq_Tfng', 'tpi_Latn', 'tsn_Latn', 'tso_Latn', 'tuk_Latn', 'tum_Latn', 'tur_Latn', 'twi_Latn', 'tzm_Tfng', 'uig_Arab', 'ukr_Cyrl', 'umb_Latn', 'urd_Arab', 'uzn_Latn', 'vec_Latn', 'vie_Latn', 'war_Latn', 'wol_Latn', 'xho_Latn', 'ydd_Hebr', 'yor_Latn', 'yue_Hant', 'zho_Hans', 'zho_Hant', 'zul_Latn'] # fmt: skip class NllbTokenizer(PreTrainedTokenizer): """ Construct an NLLB tokenizer. Adapted from [`RobertaTokenizer`] and [`XLNetTokenizer`]. Based on [SentencePiece](https://github.com/google/sentencepiece). The tokenization method is `<tokens> <eos> <language code>` for source language documents, and `<language code> <tokens> <eos>` for target language documents. Examples: ```python >>> from transformers import NllbTokenizer >>> tokenizer = NllbTokenizer.from_pretrained( ... "facebook/nllb-200-distilled-600M", src_lang="eng_Latn", tgt_lang="fra_Latn" ... ) >>> example_english_phrase = " UN Chief Says There Is No Military Solution in Syria" >>> expected_translation_french = "Le chef de l'ONU affirme qu'il n'y a pas de solution militaire en Syrie." >>> inputs = tokenizer(example_english_phrase, text_target=expected_translation_french, return_tensors="pt") ``` Args: vocab_file (`str`): Path to the vocabulary file. bos_token (`str`, *optional*, defaults to `"<s>"`): The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token. <Tip> When building a sequence using special tokens, this is not the token that is used for the beginning of sequence. The token used is the `cls_token`. </Tip> eos_token (`str`, *optional*, defaults to `"</s>"`): The end of sequence token. <Tip> When building a sequence using special tokens, this is not the token that is used for the end of sequence. The token used is the `sep_token`. </Tip> sep_token (`str`, *optional*, defaults to `"</s>"`): The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for sequence classification or for a text and a question for question answering. It is also used as the last token of a sequence built with special tokens. cls_token (`str`, *optional*, defaults to `"<s>"`): The classifier token which is used when doing sequence classification (classification of the whole sequence instead of per-token classification). It is the first token of the sequence when built with special tokens. unk_token (`str`, *optional*, defaults to `"<unk>"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. pad_token (`str`, *optional*, defaults to `"<pad>"`): The token used for padding, for example when batching sequences of different lengths. mask_token (`str`, *optional*, defaults to `"<mask>"`): The token used for masking values. This is the token used when training this model with masked language modeling. This is the token which the model will try to predict. tokenizer_file (`str`, *optional*): The path to a tokenizer file to use instead of the vocab file. src_lang (`str`, *optional*): The language to use as source language for translation. tgt_lang (`str`, *optional*): The language to use as target language for translation. sp_model_kwargs (`Dict[str, str]`): Additional keyword arguments to pass to the model initialization. """ vocab_files_names = VOCAB_FILES_NAMES max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP model_input_names = ["input_ids", "attention_mask"] prefix_tokens: List[int] = [] suffix_tokens: List[int] = [] def __init__( self, vocab_file, bos_token="<s>", eos_token="</s>", sep_token="</s>", cls_token="<s>", unk_token="<unk>", pad_token="<pad>", mask_token="<mask>", tokenizer_file=None, src_lang=None, tgt_lang=None, sp_model_kwargs: Optional[Dict[str, Any]] = None, additional_special_tokens=None, legacy_behaviour=False, **kwargs, ): # Mask token behave like a normal word, i.e. include the space before it mask_token = ( AddedToken(mask_token, normalized=True, lstrip=True, special=True) if isinstance(mask_token, str) else mask_token ) self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs self.legacy_behaviour = legacy_behaviour self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.Load(str(vocab_file)) self.vocab_file = vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | ---- | ---- | ---- | ---- | ---- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a' # spm | '<unk>' | '<s>' | '</s>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a' | '▁s' # Mimic fairseq token-to-id alignment for the first 4 token self.fairseq_tokens_to_ids = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3} # The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab self.fairseq_offset = 1 self.sp_model_size = len(self.sp_model) self.lang_code_to_id = { code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(FAIRSEQ_LANGUAGE_CODES) } self.id_to_lang_code = {v: k for k, v in self.lang_code_to_id.items()} self.fairseq_tokens_to_ids["<mask>"] = len(self.sp_model) + len(self.lang_code_to_id) + self.fairseq_offset self.fairseq_tokens_to_ids.update(self.lang_code_to_id) self.fairseq_ids_to_tokens = {v: k for k, v in self.fairseq_tokens_to_ids.items()} self._src_lang = src_lang if src_lang is not None else "eng_Latn" self.cur_lang_code_id = self.lang_code_to_id[self._src_lang] _additional_special_tokens = list(self.lang_code_to_id.keys()) if additional_special_tokens is not None: # Only add those special tokens if they are not already there. _additional_special_tokens.extend( [t for t in additional_special_tokens if t not in _additional_special_tokens] ) super().__init__( bos_token=bos_token, eos_token=eos_token, unk_token=unk_token, sep_token=sep_token, cls_token=cls_token, pad_token=pad_token, mask_token=mask_token, tokenizer_file=tokenizer_file, src_lang=src_lang, tgt_lang=tgt_lang, additional_special_tokens=_additional_special_tokens, sp_model_kwargs=self.sp_model_kwargs, legacy_behaviour=legacy_behaviour, **kwargs, ) self.tgt_lang = tgt_lang self.set_src_lang_special_tokens(self._src_lang) def __getstate__(self): state = self.__dict__.copy() state["sp_model"] = None state["sp_model_proto"] = self.sp_model.serialized_model_proto() return state def __setstate__(self, d): self.__dict__ = d # for backward compatibility if not hasattr(self, "sp_model_kwargs"): self.sp_model_kwargs = {} self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.LoadFromSerializedProto(self.sp_model_proto) @property def vocab_size(self): return len(self.sp_model) + len(self.lang_code_to_id) + self.fairseq_offset + 1 # Plus 1 for the mask token @property def src_lang(self) -> str: return self._src_lang @src_lang.setter def src_lang(self, new_src_lang: str) -> None: self._src_lang = new_src_lang self.set_src_lang_special_tokens(self._src_lang) def get_special_tokens_mask( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False ) -> List[int]: """ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `prepare_for_model` method. Args: token_ids_0 (`List[int]`): List of IDs. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. already_has_special_tokens (`bool`, *optional*, defaults to `False`): Whether or not the token list is already formatted with special tokens for the model. Returns: `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. """ if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True ) prefix_ones = [1] * len(self.prefix_tokens) suffix_ones = [1] * len(self.suffix_tokens) if token_ids_1 is None: return prefix_ones + ([0] * len(token_ids_0)) + suffix_ones return prefix_ones + ([0] * len(token_ids_0)) + ([0] * len(token_ids_1)) + suffix_ones def build_inputs_with_special_tokens( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: """ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. An NLLB sequence has the following format, where `X` represents the sequence: - `input_ids` (for encoder) `X [eos, src_lang_code]` - `decoder_input_ids`: (for decoder) `X [eos, tgt_lang_code]` BOS is never used. Pairs of sequences are not the expected use case, but they will be handled without a separator. Args: token_ids_0 (`List[int]`): List of IDs to which the special tokens will be added. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens. """ if token_ids_1 is None: return self.prefix_tokens + token_ids_0 + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_0 + token_ids_1 + self.suffix_tokens def create_token_type_ids_from_sequences( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: """ Create a mask from the two sequences passed to be used in a sequence-pair classification task. nllb does not make use of token type ids, therefore a list of zeros is returned. Args: token_ids_0 (`List[int]`): List of IDs. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of zeros. """ sep = [self.sep_token_id] cls = [self.cls_token_id] if token_ids_1 is None: return len(cls + token_ids_0 + sep) * [0] return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0] def _build_translation_inputs( self, raw_inputs, return_tensors: str, src_lang: Optional[str], tgt_lang: Optional[str], **extra_kwargs ): """Used by translation pipeline, to prepare inputs for the generate function""" if src_lang is None or tgt_lang is None: raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model") self.src_lang = src_lang inputs = self(raw_inputs, add_special_tokens=True, return_tensors=return_tensors, **extra_kwargs) tgt_lang_id = self.convert_tokens_to_ids(tgt_lang) inputs["forced_bos_token_id"] = tgt_lang_id return inputs def get_vocab(self): vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)} vocab.update(self.added_tokens_encoder) return vocab def _tokenize(self, text: str) -> List[str]: return self.sp_model.encode(text, out_type=str) def _convert_token_to_id(self, token): """Converts a token (str) in an id using the vocab.""" if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] spm_id = self.sp_model.PieceToId(token) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def _convert_id_to_token(self, index): """Converts an index (integer) in a token (str) using the vocab.""" if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset) def convert_tokens_to_string(self, tokens): """Converts a sequence of tokens (strings for sub-words) in a single string.""" out_string = "".join(tokens).replace(SPIECE_UNDERLINE, " ").strip() return out_string def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: if not os.path.isdir(save_directory): logger.error(f"Vocabulary path ({save_directory}) should be a directory") return out_vocab_file = os.path.join( save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file): copyfile(self.vocab_file, out_vocab_file) elif not os.path.isfile(self.vocab_file): with open(out_vocab_file, "wb") as fi: content_spiece_model = self.sp_model.serialized_model_proto() fi.write(content_spiece_model) return (out_vocab_file,) def prepare_seq2seq_batch( self, src_texts: List[str], src_lang: str = "eng_Latn", tgt_texts: Optional[List[str]] = None, tgt_lang: str = "fra_Latn", **kwargs, ) -> BatchEncoding: self.src_lang = src_lang self.tgt_lang = tgt_lang return super().prepare_seq2seq_batch(src_texts, tgt_texts, **kwargs) def _switch_to_input_mode(self): return self.set_src_lang_special_tokens(self.src_lang) def _switch_to_target_mode(self): return self.set_tgt_lang_special_tokens(self.tgt_lang) def set_src_lang_special_tokens(self, src_lang) -> None: """Reset the special tokens to the source lang setting. - In legacy mode: No prefix and suffix=[eos, src_lang_code]. - In default mode: Prefix=[src_lang_code], suffix = [eos] """ self.cur_lang_code = self.lang_code_to_id[src_lang] if self.legacy_behaviour: self.prefix_tokens = [] self.suffix_tokens = [self.eos_token_id, self.cur_lang_code] else: self.prefix_tokens = [self.cur_lang_code] self.suffix_tokens = [self.eos_token_id] def set_tgt_lang_special_tokens(self, lang: str) -> None: """Reset the special tokens to the target lang setting. - In legacy mode: No prefix and suffix=[eos, tgt_lang_code]. - In default mode: Prefix=[tgt_lang_code], suffix = [eos] """ self.cur_lang_code = self.lang_code_to_id[lang] if self.legacy_behaviour: self.prefix_tokens = [] self.suffix_tokens = [self.eos_token_id, self.cur_lang_code] else: self.prefix_tokens = [self.cur_lang_code] self.suffix_tokens = [self.eos_token_id]
0
hf_public_repos/transformers/src/transformers/models
hf_public_repos/transformers/src/transformers/models/wavlm/convert_wavlm_original_s3prl_checkpoint_to_pytorch.py
# coding=utf-8 # Copyright 2021 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Convert Hubert checkpoint.""" import argparse import torch from transformers import ( Wav2Vec2FeatureExtractor, WavLMConfig, WavLMForAudioFrameClassification, WavLMForSequenceClassification, WavLMForXVector, logging, ) logging.set_verbosity_info() logger = logging.get_logger(__name__) def convert_classification(base_model_name, hf_config, downstream_dict): model = WavLMForSequenceClassification.from_pretrained(base_model_name, config=hf_config) model.projector.weight.data = downstream_dict["projector.weight"] model.projector.bias.data = downstream_dict["projector.bias"] model.classifier.weight.data = downstream_dict["model.post_net.linear.weight"] model.classifier.bias.data = downstream_dict["model.post_net.linear.bias"] return model def convert_diarization(base_model_name, hf_config, downstream_dict): model = WavLMForAudioFrameClassification.from_pretrained(base_model_name, config=hf_config) model.classifier.weight.data = downstream_dict["model.linear.weight"] model.classifier.bias.data = downstream_dict["model.linear.bias"] return model def convert_xvector(base_model_name, hf_config, downstream_dict): model = WavLMForXVector.from_pretrained(base_model_name, config=hf_config) model.projector.weight.data = downstream_dict["connector.weight"] model.projector.bias.data = downstream_dict["connector.bias"] for i, kernel_size in enumerate(hf_config.tdnn_kernel): model.tdnn[i].kernel.weight.data = downstream_dict[ f"model.framelevel_feature_extractor.module.{i}.kernel.weight" ] model.tdnn[i].kernel.bias.data = downstream_dict[f"model.framelevel_feature_extractor.module.{i}.kernel.bias"] model.feature_extractor.weight.data = downstream_dict["model.utterancelevel_feature_extractor.linear1.weight"] model.feature_extractor.bias.data = downstream_dict["model.utterancelevel_feature_extractor.linear1.bias"] model.classifier.weight.data = downstream_dict["model.utterancelevel_feature_extractor.linear2.weight"] model.classifier.bias.data = downstream_dict["model.utterancelevel_feature_extractor.linear2.bias"] model.objective.weight.data = downstream_dict["objective.W"] return model @torch.no_grad() def convert_s3prl_checkpoint(base_model_name, config_path, checkpoint_path, model_dump_path): """ Copy/paste/tweak model's weights to transformers design. """ checkpoint = torch.load(checkpoint_path, map_location="cpu") downstream_dict = checkpoint["Downstream"] hf_config = WavLMConfig.from_pretrained(config_path) hf_feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained( base_model_name, return_attention_mask=True, do_normalize=False ) arch = hf_config.architectures[0] if arch.endswith("ForSequenceClassification"): hf_model = convert_classification(base_model_name, hf_config, downstream_dict) elif arch.endswith("ForAudioFrameClassification"): hf_model = convert_diarization(base_model_name, hf_config, downstream_dict) elif arch.endswith("ForXVector"): hf_model = convert_xvector(base_model_name, hf_config, downstream_dict) else: raise NotImplementedError(f"S3PRL weights conversion is not supported for {arch}") if hf_config.use_weighted_layer_sum: hf_model.layer_weights.data = checkpoint["Featurizer"]["weights"] hf_feature_extractor.save_pretrained(model_dump_path) hf_model.save_pretrained(model_dump_path) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument( "--base_model_name", default=None, type=str, help="Name of the huggingface pretrained base model." ) parser.add_argument("--config_path", default=None, type=str, help="Path to the huggingface classifier config.") parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to the s3prl checkpoint.") parser.add_argument("--model_dump_path", default=None, type=str, help="Path to the final converted model.") args = parser.parse_args() convert_s3prl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
0
hf_public_repos/transformers/src/transformers/models
hf_public_repos/transformers/src/transformers/models/wavlm/modeling_wavlm.py
# coding=utf-8 # Copyright 2021 The Fairseq Authors, Microsoft Research, and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ PyTorch WavLM model.""" import math import warnings from typing import Optional, Tuple, Union import numpy as np import torch import torch.nn.functional as F import torch.utils.checkpoint from torch import nn from torch.nn import CrossEntropyLoss from ...activations import ACT2FN from ...integrations.deepspeed import is_deepspeed_zero3_enabled from ...modeling_outputs import ( BaseModelOutput, CausalLMOutput, SequenceClassifierOutput, TokenClassifierOutput, Wav2Vec2BaseModelOutput, XVectorOutput, ) from ...modeling_utils import PreTrainedModel from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging from .configuration_wavlm import WavLMConfig logger = logging.get_logger(__name__) _HIDDEN_STATES_START_POSITION = 2 # General docstring _CONFIG_FOR_DOC = "WavLMConfig" # Base docstring _CHECKPOINT_FOR_DOC = "patrickvonplaten/wavlm-libri-clean-100h-base-plus" _EXPECTED_OUTPUT_SHAPE = [1, 292, 768] # CTC docstring _CTC_EXPECTED_OUTPUT = "'mister quilter is the aposle of the middle classes and we are glad to welcome his gospel'" _CTC_EXPECTED_LOSS = 12.51 # Frame class docstring _FRAME_CLASS_CHECKPOINT = "microsoft/wavlm-base-plus-sd" _FRAME_EXPECTED_OUTPUT = [0, 0] # Speaker Verification docstring _XVECTOR_CHECKPOINT = "microsoft/wavlm-base-plus-sv" _XVECTOR_EXPECTED_OUTPUT = 0.97 WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST = [ "microsoft/wavlm-base", "microsoft/wavlm-base-plus", "microsoft/wavlm-large", # See all WavLM models at https://huggingface.co/models?filter=wavlm ] # Copied from transformers.models.wav2vec2.modeling_wav2vec2._compute_mask_indices def _compute_mask_indices( shape: Tuple[int, int], mask_prob: float, mask_length: int, attention_mask: Optional[torch.LongTensor] = None, min_masks: int = 0, ) -> np.ndarray: """ Computes random mask spans for a given shape. Used to implement [SpecAugment: A Simple Data Augmentation Method for ASR](https://arxiv.org/abs/1904.08779). Note that this method is not optimized to run on TPU and should be run on CPU as part of the preprocessing during training. Args: shape: The shape for which to compute masks. This should be of a tuple of size 2 where the first element is the batch size and the second element is the length of the axis to span. mask_prob: The percentage of the whole axis (between 0 and 1) which will be masked. The number of independently generated mask spans of length `mask_length` is computed by `mask_prob*shape[1]/mask_length`. Note that due to overlaps, `mask_prob` is an upper bound and the actual percentage will be smaller. mask_length: size of the mask min_masks: minimum number of masked spans attention_mask: A (right-padded) attention mask which independently shortens the feature axis of each batch dimension. """ batch_size, sequence_length = shape if mask_length < 1: raise ValueError("`mask_length` has to be bigger than 0.") if mask_length > sequence_length: raise ValueError( f"`mask_length` has to be smaller than `sequence_length`, but got `mask_length`: {mask_length}" f" and `sequence_length`: {sequence_length}`" ) # epsilon is used for probabilistic rounding epsilon = np.random.rand(1).item() def compute_num_masked_span(input_length): """Given input length, compute how many spans should be masked""" num_masked_span = int(mask_prob * input_length / mask_length + epsilon) num_masked_span = max(num_masked_span, min_masks) # make sure num masked span <= sequence_length if num_masked_span * mask_length > sequence_length: num_masked_span = sequence_length // mask_length # make sure num_masked span is also <= input_length - (mask_length - 1) if input_length - (mask_length - 1) < num_masked_span: num_masked_span = max(input_length - (mask_length - 1), 0) return num_masked_span # compute number of masked spans in batch input_lengths = ( attention_mask.sum(-1).detach().tolist() if attention_mask is not None else [sequence_length for _ in range(batch_size)] ) # SpecAugment mask to fill spec_aug_mask = np.zeros((batch_size, sequence_length), dtype=bool) spec_aug_mask_idxs = [] max_num_masked_span = compute_num_masked_span(sequence_length) if max_num_masked_span == 0: return spec_aug_mask for input_length in input_lengths: # compute num of masked spans for this input num_masked_span = compute_num_masked_span(input_length) # get random indices to mask spec_aug_mask_idx = np.random.choice( np.arange(input_length - (mask_length - 1)), num_masked_span, replace=False ) # pick first sampled index that will serve as a dummy index to pad vector # to ensure same dimension for all batches due to probabilistic rounding # Picking first sample just pads those vectors twice. if len(spec_aug_mask_idx) == 0: # this case can only happen if `input_length` is strictly smaller then # `sequence_length` in which case the last token has to be a padding # token which we can use as a dummy mask id dummy_mask_idx = sequence_length - 1 else: dummy_mask_idx = spec_aug_mask_idx[0] spec_aug_mask_idx = np.concatenate( [spec_aug_mask_idx, np.ones(max_num_masked_span - num_masked_span, dtype=np.int32) * dummy_mask_idx] ) spec_aug_mask_idxs.append(spec_aug_mask_idx) spec_aug_mask_idxs = np.array(spec_aug_mask_idxs) # expand masked indices to masked spans spec_aug_mask_idxs = np.broadcast_to( spec_aug_mask_idxs[:, :, None], (batch_size, max_num_masked_span, mask_length) ) spec_aug_mask_idxs = spec_aug_mask_idxs.reshape(batch_size, max_num_masked_span * mask_length) # add offset to the starting indexes so that indexes now create a span offsets = np.arange(mask_length)[None, None, :] offsets = np.broadcast_to(offsets, (batch_size, max_num_masked_span, mask_length)).reshape( batch_size, max_num_masked_span * mask_length ) spec_aug_mask_idxs = spec_aug_mask_idxs + offsets # ensure that we cannot have indices larger than sequence_length if spec_aug_mask_idxs.max() > sequence_length - 1: spec_aug_mask_idxs[spec_aug_mask_idxs > sequence_length - 1] = sequence_length - 1 # scatter indices to mask np.put_along_axis(spec_aug_mask, spec_aug_mask_idxs, 1, -1) return spec_aug_mask # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2NoLayerNormConvLayer with Wav2Vec2->WavLM class WavLMNoLayerNormConvLayer(nn.Module): def __init__(self, config, layer_id=0): super().__init__() self.in_conv_dim = config.conv_dim[layer_id - 1] if layer_id > 0 else 1 self.out_conv_dim = config.conv_dim[layer_id] self.conv = nn.Conv1d( self.in_conv_dim, self.out_conv_dim, kernel_size=config.conv_kernel[layer_id], stride=config.conv_stride[layer_id], bias=config.conv_bias, ) self.activation = ACT2FN[config.feat_extract_activation] def forward(self, hidden_states): hidden_states = self.conv(hidden_states) hidden_states = self.activation(hidden_states) return hidden_states # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2LayerNormConvLayer with Wav2Vec2->WavLM class WavLMLayerNormConvLayer(nn.Module): def __init__(self, config, layer_id=0): super().__init__() self.in_conv_dim = config.conv_dim[layer_id - 1] if layer_id > 0 else 1 self.out_conv_dim = config.conv_dim[layer_id] self.conv = nn.Conv1d( self.in_conv_dim, self.out_conv_dim, kernel_size=config.conv_kernel[layer_id], stride=config.conv_stride[layer_id], bias=config.conv_bias, ) self.layer_norm = nn.LayerNorm(self.out_conv_dim, elementwise_affine=True) self.activation = ACT2FN[config.feat_extract_activation] def forward(self, hidden_states): hidden_states = self.conv(hidden_states) hidden_states = hidden_states.transpose(-2, -1) hidden_states = self.layer_norm(hidden_states) hidden_states = hidden_states.transpose(-2, -1) hidden_states = self.activation(hidden_states) return hidden_states # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2GroupNormConvLayer with Wav2Vec2->WavLM class WavLMGroupNormConvLayer(nn.Module): def __init__(self, config, layer_id=0): super().__init__() self.in_conv_dim = config.conv_dim[layer_id - 1] if layer_id > 0 else 1 self.out_conv_dim = config.conv_dim[layer_id] self.conv = nn.Conv1d( self.in_conv_dim, self.out_conv_dim, kernel_size=config.conv_kernel[layer_id], stride=config.conv_stride[layer_id], bias=config.conv_bias, ) self.activation = ACT2FN[config.feat_extract_activation] self.layer_norm = nn.GroupNorm(num_groups=self.out_conv_dim, num_channels=self.out_conv_dim, affine=True) def forward(self, hidden_states): hidden_states = self.conv(hidden_states) hidden_states = self.layer_norm(hidden_states) hidden_states = self.activation(hidden_states) return hidden_states # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2PositionalConvEmbedding with Wav2Vec2->WavLM class WavLMPositionalConvEmbedding(nn.Module): def __init__(self, config): super().__init__() self.conv = nn.Conv1d( config.hidden_size, config.hidden_size, kernel_size=config.num_conv_pos_embeddings, padding=config.num_conv_pos_embeddings // 2, groups=config.num_conv_pos_embedding_groups, ) weight_norm = nn.utils.weight_norm if hasattr(nn.utils.parametrizations, "weight_norm"): weight_norm = nn.utils.parametrizations.weight_norm if is_deepspeed_zero3_enabled(): import deepspeed with deepspeed.zero.GatheredParameters(self.conv.weight, modifier_rank=0): self.conv = weight_norm(self.conv, name="weight", dim=2) deepspeed.zero.register_external_parameter(self, self.conv.weight_v) deepspeed.zero.register_external_parameter(self, self.conv.weight_g) else: self.conv = weight_norm(self.conv, name="weight", dim=2) self.padding = WavLMSamePadLayer(config.num_conv_pos_embeddings) self.activation = ACT2FN[config.feat_extract_activation] def forward(self, hidden_states): hidden_states = hidden_states.transpose(1, 2) hidden_states = self.conv(hidden_states) hidden_states = self.padding(hidden_states) hidden_states = self.activation(hidden_states) hidden_states = hidden_states.transpose(1, 2) return hidden_states # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2SamePadLayer with Wav2Vec2->WavLM class WavLMSamePadLayer(nn.Module): def __init__(self, num_conv_pos_embeddings): super().__init__() self.num_pad_remove = 1 if num_conv_pos_embeddings % 2 == 0 else 0 def forward(self, hidden_states): if self.num_pad_remove > 0: hidden_states = hidden_states[:, :, : -self.num_pad_remove] return hidden_states # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2FeatureEncoder with Wav2Vec2->WavLM class WavLMFeatureEncoder(nn.Module): """Construct the features from raw audio waveform""" def __init__(self, config): super().__init__() if config.feat_extract_norm == "group": conv_layers = [WavLMGroupNormConvLayer(config, layer_id=0)] + [ WavLMNoLayerNormConvLayer(config, layer_id=i + 1) for i in range(config.num_feat_extract_layers - 1) ] elif config.feat_extract_norm == "layer": conv_layers = [WavLMLayerNormConvLayer(config, layer_id=i) for i in range(config.num_feat_extract_layers)] else: raise ValueError( f"`config.feat_extract_norm` is {config.feat_extract_norm}, but has to be one of ['group', 'layer']" ) self.conv_layers = nn.ModuleList(conv_layers) self.gradient_checkpointing = False self._requires_grad = True def _freeze_parameters(self): for param in self.parameters(): param.requires_grad = False self._requires_grad = False def forward(self, input_values): hidden_states = input_values[:, None] # make sure hidden_states require grad for gradient_checkpointing if self._requires_grad and self.training: hidden_states.requires_grad = True for conv_layer in self.conv_layers: if self._requires_grad and self.gradient_checkpointing and self.training: hidden_states = self._gradient_checkpointing_func( conv_layer.__call__, hidden_states, ) else: hidden_states = conv_layer(hidden_states) return hidden_states class WavLMFeatureExtractor(WavLMFeatureEncoder): def __init__(self, config): super().__init__(config) warnings.warn( f"The class `{self.__class__.__name__}` has been depreciated " "and will be removed in Transformers v5. " f"Use `{self.__class__.__bases__[0].__name__}` instead.", FutureWarning, ) # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2FeatureProjection with Wav2Vec2->WavLM class WavLMFeatureProjection(nn.Module): def __init__(self, config): super().__init__() self.layer_norm = nn.LayerNorm(config.conv_dim[-1], eps=config.layer_norm_eps) self.projection = nn.Linear(config.conv_dim[-1], config.hidden_size) self.dropout = nn.Dropout(config.feat_proj_dropout) def forward(self, hidden_states): # non-projected hidden states are needed for quantization norm_hidden_states = self.layer_norm(hidden_states) hidden_states = self.projection(norm_hidden_states) hidden_states = self.dropout(hidden_states) return hidden_states, norm_hidden_states class WavLMAttention(nn.Module): """Multi-headed attention from 'Attention Is All You Need' paper""" def __init__( self, embed_dim: int, num_heads: int, dropout: float = 0.0, num_buckets: int = 320, max_distance: int = 800, has_relative_position_bias: bool = True, ): super().__init__() self.embed_dim = embed_dim self.num_heads = num_heads self.dropout = dropout self.head_dim = embed_dim // num_heads if (self.head_dim * num_heads) != self.embed_dim: raise ValueError( f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}" f" and `num_heads`: {num_heads})." ) self.scaling = self.head_dim**-0.5 self.k_proj = nn.Linear(embed_dim, embed_dim) self.v_proj = nn.Linear(embed_dim, embed_dim) self.q_proj = nn.Linear(embed_dim, embed_dim) self.out_proj = nn.Linear(embed_dim, embed_dim) self.num_buckets = num_buckets self.max_distance = max_distance self.gru_rel_pos_const = nn.Parameter(torch.ones(1, self.num_heads, 1, 1)) self.gru_rel_pos_linear = nn.Linear(self.head_dim, 8) if has_relative_position_bias: self.rel_attn_embed = nn.Embedding(self.num_buckets, self.num_heads) def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, position_bias: Optional[torch.Tensor] = None, output_attentions: bool = False, index=0, ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: """Attention layer with relative attention""" bsz, tgt_len, _ = hidden_states.size() # first pass of attention layer creates position bias if position_bias is None: position_bias = self.compute_bias(tgt_len, tgt_len) position_bias = ( position_bias.unsqueeze(0).repeat(bsz, 1, 1, 1).view(bsz * self.num_heads, tgt_len, tgt_len) ) # Compute relative position bias: # 1) get reshape hidden_states gated_hidden_states = hidden_states.view(hidden_states.shape[:-1] + (self.num_heads, -1)) gated_hidden_states = gated_hidden_states.permute(0, 2, 1, 3) # 2) project hidden states relative_position_proj = self.gru_rel_pos_linear(gated_hidden_states) relative_position_proj = relative_position_proj.view(gated_hidden_states.shape[:-1] + (2, 4)).sum(-1) # 3) compute gate for position bias from projected hidden states gate_a, gate_b = torch.sigmoid(relative_position_proj).chunk(2, dim=-1) gate_output = gate_a * (gate_b * self.gru_rel_pos_const - 1.0) + 2.0 # 4) apply gate to position bias to compute gated position_bias gated_position_bias = gate_output.view(bsz * self.num_heads, -1, 1) * position_bias gated_position_bias = gated_position_bias.view((-1, tgt_len, tgt_len)) attn_output, attn_weights = self.torch_multi_head_self_attention( hidden_states, attention_mask, gated_position_bias, output_attentions ) return attn_output, attn_weights, position_bias def torch_multi_head_self_attention( self, hidden_states: torch.FloatTensor, attention_mask: Union[torch.LongTensor, torch.BoolTensor], gated_position_bias: torch.FloatTensor, output_attentions: bool, ) -> (torch.FloatTensor, torch.FloatTensor): """simple wrapper around torch's multi_head_attention_forward function""" # self-attention assumes q = k = v query = key = value = hidden_states.transpose(0, 1) key_padding_mask = attention_mask.ne(1) if attention_mask is not None else None # disable bias and add_zero_attn bias_k = bias_v = None add_zero_attn = False # PyTorch 1.3.0 has F.multi_head_attention_forward defined # so no problem with backwards compatibility attn_output, attn_weights = F.multi_head_attention_forward( query, key, value, self.embed_dim, self.num_heads, torch.empty([0]), torch.cat((self.q_proj.bias, self.k_proj.bias, self.v_proj.bias)), bias_k, bias_v, add_zero_attn, self.dropout, self.out_proj.weight, self.out_proj.bias, self.training, key_padding_mask, output_attentions, gated_position_bias, use_separate_proj_weight=True, q_proj_weight=self.q_proj.weight, k_proj_weight=self.k_proj.weight, v_proj_weight=self.v_proj.weight, ) # [Seq_Len, Batch Size, ...] -> [Batch Size, Seq_Len, ...] attn_output = attn_output.transpose(0, 1) if attn_weights is not None: # IMPORTANT: Attention weights are averaged weights # here which should not be the case. This is an open issue # on PyTorch: https://github.com/pytorch/pytorch/issues/32590 attn_weights = attn_weights[:, None].broadcast_to( attn_weights.shape[:1] + (self.num_heads,) + attn_weights.shape[1:] ) return attn_output, attn_weights def compute_bias(self, query_length: int, key_length: int) -> torch.FloatTensor: context_position = torch.arange(query_length, dtype=torch.long)[:, None] memory_position = torch.arange(key_length, dtype=torch.long)[None, :] relative_position = memory_position - context_position relative_position_bucket = self._relative_positions_bucket(relative_position) relative_position_bucket = relative_position_bucket.to(self.rel_attn_embed.weight.device) values = self.rel_attn_embed(relative_position_bucket) values = values.permute([2, 0, 1]) return values def _relative_positions_bucket(self, relative_positions: torch.FloatTensor) -> torch.FloatTensor: num_buckets = self.num_buckets // 2 relative_buckets = (relative_positions > 0).to(torch.long) * num_buckets relative_positions = torch.abs(relative_positions) max_exact = num_buckets // 2 is_small = relative_positions < max_exact relative_positions_if_large = torch.log(relative_positions.float() / max_exact) relative_positions_if_large = relative_positions_if_large / math.log(self.max_distance / max_exact) relative_positions_if_large = relative_positions_if_large * (num_buckets - max_exact) relative_position_if_large = (max_exact + relative_positions_if_large).to(torch.long) relative_position_if_large = torch.min( relative_position_if_large, torch.full_like(relative_position_if_large, num_buckets - 1) ) relative_buckets += torch.where(is_small, relative_positions, relative_position_if_large) return relative_buckets # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2FeedForward with Wav2Vec2->WavLM class WavLMFeedForward(nn.Module): def __init__(self, config): super().__init__() self.intermediate_dropout = nn.Dropout(config.activation_dropout) self.intermediate_dense = nn.Linear(config.hidden_size, config.intermediate_size) if isinstance(config.hidden_act, str): self.intermediate_act_fn = ACT2FN[config.hidden_act] else: self.intermediate_act_fn = config.hidden_act self.output_dense = nn.Linear(config.intermediate_size, config.hidden_size) self.output_dropout = nn.Dropout(config.hidden_dropout) def forward(self, hidden_states): hidden_states = self.intermediate_dense(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) hidden_states = self.intermediate_dropout(hidden_states) hidden_states = self.output_dense(hidden_states) hidden_states = self.output_dropout(hidden_states) return hidden_states class WavLMEncoderLayer(nn.Module): def __init__(self, config: WavLMConfig, has_relative_position_bias: bool = True): super().__init__() self.attention = WavLMAttention( embed_dim=config.hidden_size, num_heads=config.num_attention_heads, dropout=config.attention_dropout, num_buckets=config.num_buckets, max_distance=config.max_bucket_distance, has_relative_position_bias=has_relative_position_bias, ) self.dropout = nn.Dropout(config.hidden_dropout) self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.feed_forward = WavLMFeedForward(config) self.final_layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) def forward(self, hidden_states, attention_mask=None, position_bias=None, output_attentions=False, index=0): attn_residual = hidden_states hidden_states, attn_weights, position_bias = self.attention( hidden_states, attention_mask=attention_mask, position_bias=position_bias, output_attentions=output_attentions, index=index, ) hidden_states = self.dropout(hidden_states) hidden_states = attn_residual + hidden_states hidden_states = self.layer_norm(hidden_states) hidden_states = hidden_states + self.feed_forward(hidden_states) hidden_states = self.final_layer_norm(hidden_states) outputs = (hidden_states, position_bias) if output_attentions: outputs += (attn_weights,) return outputs class WavLMEncoderLayerStableLayerNorm(nn.Module): def __init__(self, config: WavLMConfig, has_relative_position_bias: bool = True): super().__init__() self.attention = WavLMAttention( embed_dim=config.hidden_size, num_heads=config.num_attention_heads, dropout=config.attention_dropout, num_buckets=config.num_buckets, max_distance=config.max_bucket_distance, has_relative_position_bias=has_relative_position_bias, ) self.dropout = nn.Dropout(config.hidden_dropout) self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.feed_forward = WavLMFeedForward(config) self.final_layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) def forward(self, hidden_states, attention_mask=None, position_bias=None, output_attentions=False): attn_residual = hidden_states hidden_states = self.layer_norm(hidden_states) hidden_states, attn_weights, position_bias = self.attention( hidden_states, attention_mask=attention_mask, position_bias=position_bias, output_attentions=output_attentions, ) hidden_states = self.dropout(hidden_states) hidden_states = attn_residual + hidden_states hidden_states = hidden_states + self.feed_forward(self.final_layer_norm(hidden_states)) outputs = (hidden_states, position_bias) if output_attentions: outputs += (attn_weights,) return outputs class WavLMEncoder(nn.Module): def __init__(self, config): super().__init__() self.config = config self.pos_conv_embed = WavLMPositionalConvEmbedding(config) self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout) self.layers = nn.ModuleList( [WavLMEncoderLayer(config, has_relative_position_bias=(i == 0)) for i in range(config.num_hidden_layers)] ) self.gradient_checkpointing = False def forward( self, hidden_states, attention_mask=None, output_attentions=False, output_hidden_states=False, return_dict=True, ): all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None if attention_mask is not None: # make sure padded tokens output 0 hidden_states[~attention_mask] = 0.0 position_embeddings = self.pos_conv_embed(hidden_states) hidden_states = hidden_states + position_embeddings hidden_states = self.layer_norm(hidden_states) hidden_states = self.dropout(hidden_states) deepspeed_zero3_is_enabled = is_deepspeed_zero3_enabled() position_bias = None for i, layer in enumerate(self.layers): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description) dropout_probability = torch.rand([]) skip_the_layer = self.training and i > 0 and (dropout_probability < self.config.layerdrop) if not skip_the_layer or deepspeed_zero3_is_enabled: # under deepspeed zero3 all gpus must run in sync if self.gradient_checkpointing and self.training: layer_outputs = self._gradient_checkpointing_func( layer.__call__, hidden_states, attention_mask, position_bias, output_attentions, ) else: layer_outputs = layer( hidden_states, attention_mask=attention_mask, position_bias=position_bias, output_attentions=output_attentions, index=i, ) hidden_states, position_bias = layer_outputs[:2] if skip_the_layer: layer_outputs = (None, None) if output_attentions: all_self_attentions = all_self_attentions + (layer_outputs[2],) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None) return BaseModelOutput( last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_self_attentions, ) class WavLMEncoderStableLayerNorm(nn.Module): def __init__(self, config): super().__init__() self.config = config self.pos_conv_embed = WavLMPositionalConvEmbedding(config) self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout) self.layers = nn.ModuleList( [ WavLMEncoderLayerStableLayerNorm(config, has_relative_position_bias=(i == 0)) for i in range(config.num_hidden_layers) ] ) self.gradient_checkpointing = False def forward( self, hidden_states, attention_mask=None, output_attentions=False, output_hidden_states=False, return_dict=True, ): all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None if attention_mask is not None: # make sure padded tokens are not attended to hidden_states[~attention_mask] = 0 position_embeddings = self.pos_conv_embed(hidden_states) hidden_states = hidden_states + position_embeddings hidden_states = self.dropout(hidden_states) deepspeed_zero3_is_enabled = is_deepspeed_zero3_enabled() position_bias = None for i, layer in enumerate(self.layers): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description) dropout_probability = torch.rand([]) skip_the_layer = self.training and i > 0 and (dropout_probability < self.config.layerdrop) if not skip_the_layer or deepspeed_zero3_is_enabled: # under deepspeed zero3 all gpus must run in sync # XXX: could optimize this like synced_gpus in generate_utils but not sure if it's worth the code complication if self.gradient_checkpointing and self.training: layer_outputs = self._gradient_checkpointing_func( layer.__call__, hidden_states, attention_mask, position_bias, output_attentions, ) else: layer_outputs = layer( hidden_states, attention_mask=attention_mask, output_attentions=output_attentions, position_bias=position_bias, ) hidden_states, position_bias = layer_outputs[:2] if skip_the_layer: layer_outputs = (None, None) if output_attentions: all_self_attentions = all_self_attentions + (layer_outputs[2],) hidden_states = self.layer_norm(hidden_states) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None) return BaseModelOutput( last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_self_attentions ) class WavLMGumbelVectorQuantizer(nn.Module): """ Vector quantization using gumbel softmax. See [CATEGORICAL REPARAMETERIZATION WITH GUMBEL-SOFTMAX](https://arxiv.org/pdf/1611.01144.pdf) for more information. """ def __init__(self, config): super().__init__() self.num_groups = config.num_codevector_groups self.num_vars = config.num_codevectors_per_group if config.codevector_dim % self.num_groups != 0: raise ValueError( f"`config.codevector_dim {config.codevector_dim} must be divisible" f" by `config.num_codevector_groups` {self.num_groups} " "for concatenation." ) # storage for codebook variables (codewords) self.codevectors = nn.Parameter( torch.FloatTensor(1, self.num_groups * self.num_vars, config.codevector_dim // self.num_groups) ) self.weight_proj = nn.Linear(config.conv_dim[-1], self.num_groups * self.num_vars) # can be decayed for training self.temperature = 2 @staticmethod def _compute_perplexity(probs): marginal_probs = probs.mean(dim=0) perplexity = torch.exp(-torch.sum(marginal_probs * torch.log(marginal_probs + 1e-7), dim=-1)).sum() return perplexity def forward(self, hidden_states): batch_size, sequence_length, hidden_size = hidden_states.shape # project to codevector dim hidden_states = self.weight_proj(hidden_states) hidden_states = hidden_states.view(batch_size * sequence_length * self.num_groups, -1) if self.training: # sample code vector probs via gumbel in differentiateable way codevector_probs = nn.functional.gumbel_softmax(hidden_states.float(), tau=self.temperature, hard=True) codevector_probs = codevector_probs.type_as(hidden_states) # compute perplexity codevector_soft_dist = torch.softmax( hidden_states.view(batch_size * sequence_length, self.num_groups, -1).float(), dim=-1 ) perplexity = self._compute_perplexity(codevector_soft_dist) else: # take argmax in non-differentiable way # comptute hard codevector distribution (one hot) codevector_idx = hidden_states.argmax(dim=-1) codevector_probs = hidden_states.new_zeros(*hidden_states.shape).scatter_( -1, codevector_idx.view(-1, 1), 1.0 ) codevector_probs = codevector_probs.view(batch_size * sequence_length, self.num_groups, -1) perplexity = self._compute_perplexity(codevector_probs) codevector_probs = codevector_probs.view(batch_size * sequence_length, -1) # use probs to retrieve codevectors codevectors_per_group = codevector_probs.unsqueeze(-1) * self.codevectors codevectors = codevectors_per_group.view(batch_size * sequence_length, self.num_groups, self.num_vars, -1) codevectors = codevectors.sum(-2).view(batch_size, sequence_length, -1) return codevectors, perplexity # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2Adapter with Wav2Vec2->WavLM class WavLMAdapter(nn.Module): def __init__(self, config): super().__init__() # feature dim might need to be down-projected if config.output_hidden_size != config.hidden_size: self.proj = nn.Linear(config.hidden_size, config.output_hidden_size) self.proj_layer_norm = nn.LayerNorm(config.output_hidden_size) else: self.proj = self.proj_layer_norm = None self.layers = nn.ModuleList(WavLMAdapterLayer(config) for _ in range(config.num_adapter_layers)) self.layerdrop = config.layerdrop def forward(self, hidden_states): # down project hidden_states if necessary if self.proj is not None and self.proj_layer_norm is not None: hidden_states = self.proj(hidden_states) hidden_states = self.proj_layer_norm(hidden_states) hidden_states = hidden_states.transpose(1, 2) for layer in self.layers: layerdrop_prob = np.random.random() if not self.training or (layerdrop_prob > self.layerdrop): hidden_states = layer(hidden_states) hidden_states = hidden_states.transpose(1, 2) return hidden_states # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2AdapterLayer with Wav2Vec2->WavLM class WavLMAdapterLayer(nn.Module): def __init__(self, config): super().__init__() self.conv = nn.Conv1d( config.output_hidden_size, 2 * config.output_hidden_size, config.adapter_kernel_size, stride=config.adapter_stride, padding=1, ) def forward(self, hidden_states): hidden_states = self.conv(hidden_states) hidden_states = nn.functional.glu(hidden_states, dim=1) return hidden_states class WavLMPreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = WavLMConfig base_model_prefix = "wavlm" main_input_name = "input_values" supports_gradient_checkpointing = True def _init_weights(self, module): """Initialize the weights""" # gumbel softmax requires special init if isinstance(module, WavLMGumbelVectorQuantizer): module.weight_proj.weight.data.normal_(mean=0.0, std=1) module.weight_proj.bias.data.zero_() nn.init.uniform_(module.codevectors) elif isinstance(module, WavLMPositionalConvEmbedding): nn.init.normal_( module.conv.weight, mean=0, std=2 * math.sqrt(1 / (module.conv.kernel_size[0] * module.conv.in_channels)), ) nn.init.constant_(module.conv.bias, 0) elif isinstance(module, WavLMFeatureProjection): k = math.sqrt(1 / module.projection.in_features) nn.init.uniform_(module.projection.weight, a=-k, b=k) nn.init.uniform_(module.projection.bias, a=-k, b=k) elif isinstance(module, nn.Linear): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, (nn.LayerNorm, nn.GroupNorm)): module.bias.data.zero_() module.weight.data.fill_(1.0) elif isinstance(module, nn.Conv1d): nn.init.kaiming_normal_(module.weight) if module.bias is not None: k = math.sqrt(module.groups / (module.in_channels * module.kernel_size[0])) nn.init.uniform_(module.bias, a=-k, b=k) def _get_feat_extract_output_lengths( self, input_lengths: Union[torch.LongTensor, int], add_adapter: Optional[bool] = None ): """ Computes the output length of the convolutional layers """ add_adapter = self.config.add_adapter if add_adapter is None else add_adapter def _conv_out_length(input_length, kernel_size, stride): # 1D convolutional layer output length formula taken # from https://pytorch.org/docs/stable/generated/torch.nn.Conv1d.html return torch.div(input_length - kernel_size, stride, rounding_mode="floor") + 1 for kernel_size, stride in zip(self.config.conv_kernel, self.config.conv_stride): input_lengths = _conv_out_length(input_lengths, kernel_size, stride) if add_adapter: for _ in range(self.config.num_adapter_layers): input_lengths = _conv_out_length(input_lengths, 1, self.config.adapter_stride) return input_lengths def _get_feature_vector_attention_mask( self, feature_vector_length: int, attention_mask: torch.LongTensor, add_adapter=None ): # Effectively attention_mask.sum(-1), but not inplace to be able to run # on inference mode. non_padded_lengths = attention_mask.cumsum(dim=-1)[:, -1] output_lengths = self._get_feat_extract_output_lengths(non_padded_lengths, add_adapter=add_adapter) output_lengths = output_lengths.to(torch.long) batch_size = attention_mask.shape[0] attention_mask = torch.zeros( (batch_size, feature_vector_length), dtype=attention_mask.dtype, device=attention_mask.device ) # these two operations makes sure that all values before the output lengths idxs are attended to attention_mask[(torch.arange(attention_mask.shape[0], device=attention_mask.device), output_lengths - 1)] = 1 attention_mask = attention_mask.flip([-1]).cumsum(-1).flip([-1]).bool() return attention_mask WAVLM_START_DOCSTRING = r""" WavLM was proposed in [WavLM: Unified Speech Representation Learning with Labeled and Unlabeled Data](https://arxiv.org/abs/2110.13900) by Sanyuan Chen, Chengyi Wang, Zhengyang Chen, Yu Wu, Shujie Liu, Zhuo Chen, Jinyu Li, Naoyuki Kanda, Takuya Yoshioka, Xiong Xiao, Jian Wu, Long Zhou, Shuo Ren, Yanmin Qian, Yao Qian, Jian Wu, Michael Zeng, Xiangzhan Yu, Furu Wei. This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving etc.). This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`WavLMConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ WAVLM_INPUTS_DOCSTRING = r""" Args: input_values (`torch.FloatTensor` of shape `(batch_size, sequence_length)`): Float values of input raw speech waveform. Values can be obtained by loading a `.flac` or `.wav` audio file into an array of type `List[float]` or a `numpy.ndarray`, *e.g.* via the soundfile library (`pip install soundfile`). To prepare the array into `input_values`, the [`AutoProcessor`] should be used for padding and conversion into a tensor of type `torch.FloatTensor`. See [`Wav2Vec2Processor.__call__`] for details. attention_mask (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing convolution and attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) <Tip warning={true}> `attention_mask` should only be passed if the corresponding processor has `config.return_attention_mask == True`. For all models whose processor has `config.return_attention_mask == False`, `attention_mask` should **not** be passed to avoid degraded performance when doing batched inference. For such models `input_values` should simply be padded with 0 and passed without `attention_mask`. Be aware that these models also yield slightly different results depending on whether `input_values` is padded or not. </Tip> output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ @add_start_docstrings( "The bare WavLM Model transformer outputting raw hidden-states without any specific head on top.", WAVLM_START_DOCSTRING, ) # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2Model with Wav2Vec2->WavLM, wav2vec2->wavlm, WAV_2_VEC_2->WAVLM, WavLMBaseModelOutput->Wav2Vec2BaseModelOutput class WavLMModel(WavLMPreTrainedModel): def __init__(self, config: WavLMConfig): super().__init__(config) self.config = config self.feature_extractor = WavLMFeatureEncoder(config) self.feature_projection = WavLMFeatureProjection(config) # model only needs masking vector if mask prob is > 0.0 if config.mask_time_prob > 0.0 or config.mask_feature_prob > 0.0: self.masked_spec_embed = nn.Parameter(torch.FloatTensor(config.hidden_size).uniform_()) if config.do_stable_layer_norm: self.encoder = WavLMEncoderStableLayerNorm(config) else: self.encoder = WavLMEncoder(config) self.adapter = WavLMAdapter(config) if config.add_adapter else None # Initialize weights and apply final processing self.post_init() def freeze_feature_extractor(self): """ Calling this function will disable the gradient computation for the feature encoder so that its parameters will not be updated during training. """ warnings.warn( "The method `freeze_feature_extractor` is deprecated and will be removed in Transformers v5. " "Please use the equivalent `freeze_feature_encoder` method instead.", FutureWarning, ) self.freeze_feature_encoder() def freeze_feature_encoder(self): """ Calling this function will disable the gradient computation for the feature encoder so that its parameter will not be updated during training. """ self.feature_extractor._freeze_parameters() def _mask_hidden_states( self, hidden_states: torch.FloatTensor, mask_time_indices: Optional[torch.FloatTensor] = None, attention_mask: Optional[torch.LongTensor] = None, ): """ Masks extracted features along time axis and/or along feature axis according to [SpecAugment](https://arxiv.org/abs/1904.08779). """ # `config.apply_spec_augment` can set masking to False if not getattr(self.config, "apply_spec_augment", True): return hidden_states # generate indices & apply SpecAugment along time axis batch_size, sequence_length, hidden_size = hidden_states.size() if mask_time_indices is not None: # apply SpecAugment along time axis with given mask_time_indices hidden_states[mask_time_indices] = self.masked_spec_embed.to(hidden_states.dtype) elif self.config.mask_time_prob > 0 and self.training: mask_time_indices = _compute_mask_indices( (batch_size, sequence_length), mask_prob=self.config.mask_time_prob, mask_length=self.config.mask_time_length, attention_mask=attention_mask, min_masks=self.config.mask_time_min_masks, ) mask_time_indices = torch.tensor(mask_time_indices, device=hidden_states.device, dtype=torch.bool) hidden_states[mask_time_indices] = self.masked_spec_embed.to(hidden_states.dtype) if self.config.mask_feature_prob > 0 and self.training: # generate indices & apply SpecAugment along feature axis mask_feature_indices = _compute_mask_indices( (batch_size, hidden_size), mask_prob=self.config.mask_feature_prob, mask_length=self.config.mask_feature_length, min_masks=self.config.mask_feature_min_masks, ) mask_feature_indices = torch.tensor(mask_feature_indices, device=hidden_states.device, dtype=torch.bool) mask_feature_indices = mask_feature_indices[:, None].expand(-1, sequence_length, -1) hidden_states[mask_feature_indices] = 0 return hidden_states @add_start_docstrings_to_model_forward(WAVLM_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=Wav2Vec2BaseModelOutput, config_class=_CONFIG_FOR_DOC, modality="audio", expected_output=_EXPECTED_OUTPUT_SHAPE, ) def forward( self, input_values: Optional[torch.Tensor], attention_mask: Optional[torch.Tensor] = None, mask_time_indices: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, Wav2Vec2BaseModelOutput]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict extract_features = self.feature_extractor(input_values) extract_features = extract_features.transpose(1, 2) if attention_mask is not None: # compute reduced attention_mask corresponding to feature vectors attention_mask = self._get_feature_vector_attention_mask( extract_features.shape[1], attention_mask, add_adapter=False ) hidden_states, extract_features = self.feature_projection(extract_features) hidden_states = self._mask_hidden_states( hidden_states, mask_time_indices=mask_time_indices, attention_mask=attention_mask ) encoder_outputs = self.encoder( hidden_states, attention_mask=attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) hidden_states = encoder_outputs[0] if self.adapter is not None: hidden_states = self.adapter(hidden_states) if not return_dict: return (hidden_states, extract_features) + encoder_outputs[1:] return Wav2Vec2BaseModelOutput( last_hidden_state=hidden_states, extract_features=extract_features, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, ) @add_start_docstrings( """WavLM Model with a `language modeling` head on top for Connectionist Temporal Classification (CTC).""", WAVLM_START_DOCSTRING, ) # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2ForCTC with Wav2Vec2->WavLM, wav2vec2->wavlm, WAV_2_VEC_2->WAVLM class WavLMForCTC(WavLMPreTrainedModel): def __init__(self, config, target_lang: Optional[str] = None): super().__init__(config) self.wavlm = WavLMModel(config) self.dropout = nn.Dropout(config.final_dropout) self.target_lang = target_lang if config.vocab_size is None: raise ValueError( f"You are trying to instantiate {self.__class__} with a configuration that " "does not define the vocabulary size of the language model head. Please " "instantiate the model as follows: `WavLMForCTC.from_pretrained(..., vocab_size=vocab_size)`. " "or define `vocab_size` of your model's configuration." ) output_hidden_size = ( config.output_hidden_size if hasattr(config, "add_adapter") and config.add_adapter else config.hidden_size ) self.lm_head = nn.Linear(output_hidden_size, config.vocab_size) # Initialize weights and apply final processing self.post_init() def tie_weights(self): """ This method overwrites [`~PreTrainedModel.tie_weights`] so that adapter weights can be correctly loaded when passing `target_lang=...` to `from_pretrained(...)`. This method is **not** supposed to be called by the user and is prone to be changed in the future. """ # Note that `tie_weights` is usually used to tie input and output embedding weights. The method is re-purposed to # correctly load adapter layers for WavLM so that we do not have to introduce a new API to # [`PreTrainedModel`]. While slightly hacky, WavLM never has to tie input and output embeddings, so that it is # ok to repurpose this function here. target_lang = self.target_lang if target_lang is not None and getattr(self.config, "adapter_attn_dim", None) is None: raise ValueError(f"Cannot pass `target_lang`: {target_lang} if `config.adapter_attn_dim` is not defined.") elif target_lang is None and getattr(self.config, "adapter_attn_dim", None) is not None: logger.info("By default `target_lang` is set to 'eng'.") elif target_lang is not None: self.load_adapter(target_lang, force_load=True) def freeze_feature_extractor(self): """ Calling this function will disable the gradient computation for the feature encoder so that its parameter will not be updated during training. """ warnings.warn( "The method `freeze_feature_extractor` is deprecated and will be removed in Transformers v5. " "Please use the equivalent `freeze_feature_encoder` method instead.", FutureWarning, ) self.freeze_feature_encoder() def freeze_feature_encoder(self): """ Calling this function will disable the gradient computation for the feature encoder so that its parameter will not be updated during training. """ self.wavlm.feature_extractor._freeze_parameters() def freeze_base_model(self): """ Calling this function will disable the gradient computation for the base model so that its parameters will not be updated during training. Only the classification head will be updated. """ for param in self.wavlm.parameters(): param.requires_grad = False @add_start_docstrings_to_model_forward(WAVLM_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=CausalLMOutput, config_class=_CONFIG_FOR_DOC, expected_output=_CTC_EXPECTED_OUTPUT, expected_loss=_CTC_EXPECTED_LOSS, ) def forward( self, input_values: Optional[torch.Tensor], attention_mask: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, labels: Optional[torch.Tensor] = None, ) -> Union[Tuple, CausalLMOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size, target_length)`, *optional*): Labels for connectionist temporal classification. Note that `target_length` has to be smaller or equal to the sequence length of the output logits. Indices are selected in `[-100, 0, ..., config.vocab_size - 1]`. All labels set to `-100` are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size - 1]`. """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.wavlm( input_values, attention_mask=attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) hidden_states = outputs[0] hidden_states = self.dropout(hidden_states) logits = self.lm_head(hidden_states) loss = None if labels is not None: if labels.max() >= self.config.vocab_size: raise ValueError(f"Label values must be <= vocab_size: {self.config.vocab_size}") # retrieve loss input_lengths from attention_mask attention_mask = ( attention_mask if attention_mask is not None else torch.ones_like(input_values, dtype=torch.long) ) input_lengths = self._get_feat_extract_output_lengths(attention_mask.sum(-1)).to(torch.long) # assuming that padded tokens are filled with -100 # when not being attended to labels_mask = labels >= 0 target_lengths = labels_mask.sum(-1) flattened_targets = labels.masked_select(labels_mask) # ctc_loss doesn't support fp16 log_probs = nn.functional.log_softmax(logits, dim=-1, dtype=torch.float32).transpose(0, 1) with torch.backends.cudnn.flags(enabled=False): loss = nn.functional.ctc_loss( log_probs, flattened_targets, input_lengths, target_lengths, blank=self.config.pad_token_id, reduction=self.config.ctc_loss_reduction, zero_infinity=self.config.ctc_zero_infinity, ) if not return_dict: output = (logits,) + outputs[_HIDDEN_STATES_START_POSITION:] return ((loss,) + output) if loss is not None else output return CausalLMOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions ) @add_start_docstrings( """ WavLM Model with a sequence classification head on top (a linear layer over the pooled output) for tasks like SUPERB Keyword Spotting. """, WAVLM_START_DOCSTRING, ) class WavLMForSequenceClassification(WavLMPreTrainedModel): def __init__(self, config): super().__init__(config) if hasattr(config, "add_adapter") and config.add_adapter: raise ValueError( "Sequence classification does not support the use of WavLM adapters (config.add_adapter=True)" ) self.wavlm = WavLMModel(config) num_layers = config.num_hidden_layers + 1 # transformer layers + input embeddings if config.use_weighted_layer_sum: self.layer_weights = nn.Parameter(torch.ones(num_layers) / num_layers) self.projector = nn.Linear(config.hidden_size, config.classifier_proj_size) self.classifier = nn.Linear(config.classifier_proj_size, config.num_labels) # Initialize weights and apply final processing self.post_init() # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2ForSequenceClassification.freeze_feature_extractor def freeze_feature_extractor(self): """ Calling this function will disable the gradient computation for the feature encoder so that its parameters will not be updated during training. """ warnings.warn( "The method `freeze_feature_extractor` is deprecated and will be removed in Transformers v5. " "Please use the equivalent `freeze_feature_encoder` method instead.", FutureWarning, ) self.freeze_feature_encoder() # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2ForSequenceClassification.freeze_feature_encoder with wav2vec2->wavlm def freeze_feature_encoder(self): """ Calling this function will disable the gradient computation for the feature encoder so that its parameter will not be updated during training. """ self.wavlm.feature_extractor._freeze_parameters() # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2ForSequenceClassification.freeze_base_model with wav2vec2->wavlm def freeze_base_model(self): """ Calling this function will disable the gradient computation for the base model so that its parameters will not be updated during training. Only the classification head will be updated. """ for param in self.wavlm.parameters(): param.requires_grad = False @add_start_docstrings_to_model_forward(WAVLM_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=SequenceClassifierOutput, config_class=_CONFIG_FOR_DOC, modality="audio", ) # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2ForSequenceClassification.forward with Wav2Vec2->WavLM, wav2vec2->wavlm def forward( self, input_values: Optional[torch.Tensor], attention_mask: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, labels: Optional[torch.Tensor] = None, ) -> Union[Tuple, SequenceClassifierOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict output_hidden_states = True if self.config.use_weighted_layer_sum else output_hidden_states outputs = self.wavlm( input_values, attention_mask=attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) if self.config.use_weighted_layer_sum: hidden_states = outputs[_HIDDEN_STATES_START_POSITION] hidden_states = torch.stack(hidden_states, dim=1) norm_weights = nn.functional.softmax(self.layer_weights, dim=-1) hidden_states = (hidden_states * norm_weights.view(-1, 1, 1)).sum(dim=1) else: hidden_states = outputs[0] hidden_states = self.projector(hidden_states) if attention_mask is None: pooled_output = hidden_states.mean(dim=1) else: padding_mask = self._get_feature_vector_attention_mask(hidden_states.shape[1], attention_mask) hidden_states[~padding_mask] = 0.0 pooled_output = hidden_states.sum(dim=1) / padding_mask.sum(dim=1).view(-1, 1) logits = self.classifier(pooled_output) loss = None if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.config.num_labels), labels.view(-1)) if not return_dict: output = (logits,) + outputs[_HIDDEN_STATES_START_POSITION:] return ((loss,) + output) if loss is not None else output return SequenceClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @add_start_docstrings( """ WavLM Model with a frame classification head on top for tasks like Speaker Diarization. """, WAVLM_START_DOCSTRING, ) # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2ForAudioFrameClassification with Wav2Vec2->WavLM, wav2vec2->wavlm, WAV_2_VEC_2->WAVLM class WavLMForAudioFrameClassification(WavLMPreTrainedModel): def __init__(self, config): super().__init__(config) if hasattr(config, "add_adapter") and config.add_adapter: raise ValueError( "Audio frame classification does not support the use of WavLM adapters (config.add_adapter=True)" ) self.wavlm = WavLMModel(config) num_layers = config.num_hidden_layers + 1 # transformer layers + input embeddings if config.use_weighted_layer_sum: self.layer_weights = nn.Parameter(torch.ones(num_layers) / num_layers) self.classifier = nn.Linear(config.hidden_size, config.num_labels) self.num_labels = config.num_labels self.init_weights() def freeze_feature_extractor(self): """ Calling this function will disable the gradient computation for the feature encoder so that its parameter will not be updated during training. """ warnings.warn( "The method `freeze_feature_extractor` is deprecated and will be removed in Transformers v5. " "Please use the equivalent `freeze_feature_encoder` method instead.", FutureWarning, ) self.freeze_feature_encoder() def freeze_feature_encoder(self): """ Calling this function will disable the gradient computation for the feature encoder so that its parameter will not be updated during training. """ self.wavlm.feature_extractor._freeze_parameters() def freeze_base_model(self): """ Calling this function will disable the gradient computation for the base model so that its parameters will not be updated during training. Only the classification head will be updated. """ for param in self.wavlm.parameters(): param.requires_grad = False @add_start_docstrings_to_model_forward(WAVLM_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint=_FRAME_CLASS_CHECKPOINT, output_type=TokenClassifierOutput, config_class=_CONFIG_FOR_DOC, modality="audio", expected_output=_FRAME_EXPECTED_OUTPUT, ) def forward( self, input_values: Optional[torch.Tensor], attention_mask: Optional[torch.Tensor] = None, labels: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, TokenClassifierOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict output_hidden_states = True if self.config.use_weighted_layer_sum else output_hidden_states outputs = self.wavlm( input_values, attention_mask=attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) if self.config.use_weighted_layer_sum: hidden_states = outputs[_HIDDEN_STATES_START_POSITION] hidden_states = torch.stack(hidden_states, dim=1) norm_weights = nn.functional.softmax(self.layer_weights, dim=-1) hidden_states = (hidden_states * norm_weights.view(-1, 1, 1)).sum(dim=1) else: hidden_states = outputs[0] logits = self.classifier(hidden_states) loss = None if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), torch.argmax(labels.view(-1, self.num_labels), axis=1)) if not return_dict: output = (logits,) + outputs[_HIDDEN_STATES_START_POSITION:] return output return TokenClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) # Copied from transformers.models.wav2vec2.modeling_wav2vec2.AMSoftmaxLoss class AMSoftmaxLoss(nn.Module): def __init__(self, input_dim, num_labels, scale=30.0, margin=0.4): super(AMSoftmaxLoss, self).__init__() self.scale = scale self.margin = margin self.num_labels = num_labels self.weight = nn.Parameter(torch.randn(input_dim, num_labels), requires_grad=True) self.loss = nn.CrossEntropyLoss() def forward(self, hidden_states, labels): labels = labels.flatten() weight = nn.functional.normalize(self.weight, dim=0) hidden_states = nn.functional.normalize(hidden_states, dim=1) cos_theta = torch.mm(hidden_states, weight) psi = cos_theta - self.margin onehot = nn.functional.one_hot(labels, self.num_labels) logits = self.scale * torch.where(onehot.bool(), psi, cos_theta) loss = self.loss(logits, labels) return loss # Copied from transformers.models.wav2vec2.modeling_wav2vec2.TDNNLayer class TDNNLayer(nn.Module): def __init__(self, config, layer_id=0): super().__init__() self.in_conv_dim = config.tdnn_dim[layer_id - 1] if layer_id > 0 else config.tdnn_dim[layer_id] self.out_conv_dim = config.tdnn_dim[layer_id] self.kernel_size = config.tdnn_kernel[layer_id] self.dilation = config.tdnn_dilation[layer_id] self.kernel = nn.Linear(self.in_conv_dim * self.kernel_size, self.out_conv_dim) self.activation = nn.ReLU() def forward(self, hidden_states): hidden_states = hidden_states.unsqueeze(1) hidden_states = nn.functional.unfold( hidden_states, (self.kernel_size, self.in_conv_dim), stride=(1, self.in_conv_dim), dilation=(self.dilation, 1), ) hidden_states = hidden_states.transpose(1, 2) hidden_states = self.kernel(hidden_states) hidden_states = self.activation(hidden_states) return hidden_states @add_start_docstrings( """ WavLM Model with an XVector feature extraction head on top for tasks like Speaker Verification. """, WAVLM_START_DOCSTRING, ) # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2ForXVector with Wav2Vec2->WavLM, wav2vec2->wavlm, WAV_2_VEC_2->WAVLM class WavLMForXVector(WavLMPreTrainedModel): def __init__(self, config): super().__init__(config) self.wavlm = WavLMModel(config) num_layers = config.num_hidden_layers + 1 # transformer layers + input embeddings if config.use_weighted_layer_sum: self.layer_weights = nn.Parameter(torch.ones(num_layers) / num_layers) self.projector = nn.Linear(config.hidden_size, config.tdnn_dim[0]) tdnn_layers = [TDNNLayer(config, i) for i in range(len(config.tdnn_dim))] self.tdnn = nn.ModuleList(tdnn_layers) self.feature_extractor = nn.Linear(config.tdnn_dim[-1] * 2, config.xvector_output_dim) self.classifier = nn.Linear(config.xvector_output_dim, config.xvector_output_dim) self.objective = AMSoftmaxLoss(config.xvector_output_dim, config.num_labels) self.init_weights() def freeze_feature_extractor(self): """ Calling this function will disable the gradient computation for the feature encoder so that its parameter will not be updated during training. """ warnings.warn( "The method `freeze_feature_extractor` is deprecated and will be removed in Transformers v5. " "Please use the equivalent `freeze_feature_encoder` method instead.", FutureWarning, ) self.freeze_feature_encoder() def freeze_feature_encoder(self): """ Calling this function will disable the gradient computation for the feature encoder so that its parameter will not be updated during training. """ self.wavlm.feature_extractor._freeze_parameters() def freeze_base_model(self): """ Calling this function will disable the gradient computation for the base model so that its parameters will not be updated during training. Only the classification head will be updated. """ for param in self.wavlm.parameters(): param.requires_grad = False def _get_tdnn_output_lengths(self, input_lengths: Union[torch.LongTensor, int]): """ Computes the output length of the TDNN layers """ def _conv_out_length(input_length, kernel_size, stride): # 1D convolutional layer output length formula taken # from https://pytorch.org/docs/stable/generated/torch.nn.Conv1d.html return (input_length - kernel_size) // stride + 1 for kernel_size in self.config.tdnn_kernel: input_lengths = _conv_out_length(input_lengths, kernel_size, 1) return input_lengths @add_start_docstrings_to_model_forward(WAVLM_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint=_XVECTOR_CHECKPOINT, output_type=XVectorOutput, config_class=_CONFIG_FOR_DOC, modality="audio", expected_output=_XVECTOR_EXPECTED_OUTPUT, ) def forward( self, input_values: Optional[torch.Tensor], attention_mask: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, labels: Optional[torch.Tensor] = None, ) -> Union[Tuple, XVectorOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict output_hidden_states = True if self.config.use_weighted_layer_sum else output_hidden_states outputs = self.wavlm( input_values, attention_mask=attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) if self.config.use_weighted_layer_sum: hidden_states = outputs[_HIDDEN_STATES_START_POSITION] hidden_states = torch.stack(hidden_states, dim=1) norm_weights = nn.functional.softmax(self.layer_weights, dim=-1) hidden_states = (hidden_states * norm_weights.view(-1, 1, 1)).sum(dim=1) else: hidden_states = outputs[0] hidden_states = self.projector(hidden_states) for tdnn_layer in self.tdnn: hidden_states = tdnn_layer(hidden_states) # Statistic Pooling if attention_mask is None: mean_features = hidden_states.mean(dim=1) std_features = hidden_states.std(dim=1) else: feat_extract_output_lengths = self._get_feat_extract_output_lengths(attention_mask.sum(dim=1)) tdnn_output_lengths = self._get_tdnn_output_lengths(feat_extract_output_lengths) mean_features = [] std_features = [] for i, length in enumerate(tdnn_output_lengths): mean_features.append(hidden_states[i, :length].mean(dim=0)) std_features.append(hidden_states[i, :length].std(dim=0)) mean_features = torch.stack(mean_features) std_features = torch.stack(std_features) statistic_pooling = torch.cat([mean_features, std_features], dim=-1) output_embeddings = self.feature_extractor(statistic_pooling) logits = self.classifier(output_embeddings) loss = None if labels is not None: loss = self.objective(logits, labels) if not return_dict: output = (logits, output_embeddings) + outputs[_HIDDEN_STATES_START_POSITION:] return ((loss,) + output) if loss is not None else output return XVectorOutput( loss=loss, logits=logits, embeddings=output_embeddings, hidden_states=outputs.hidden_states, attentions=outputs.attentions, )
0
hf_public_repos/transformers/src/transformers/models
hf_public_repos/transformers/src/transformers/models/wavlm/__init__.py
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _import_structure = {"configuration_wavlm": ["WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "WavLMConfig"]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["modeling_wavlm"] = [ "WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST", "WavLMForAudioFrameClassification", "WavLMForCTC", "WavLMForSequenceClassification", "WavLMForXVector", "WavLMModel", "WavLMPreTrainedModel", ] if TYPE_CHECKING: from .configuration_wavlm import WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP, WavLMConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_wavlm import ( WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST, WavLMForAudioFrameClassification, WavLMForCTC, WavLMForSequenceClassification, WavLMForXVector, WavLMModel, WavLMPreTrainedModel, ) else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
0
hf_public_repos/transformers/src/transformers/models
hf_public_repos/transformers/src/transformers/models/wavlm/configuration_wavlm.py
# coding=utf-8 # Copyright 2021 The Fairseq Authors, Microsoft Research, and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ WavLM model configuration""" import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging logger = logging.get_logger(__name__) WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP = { "microsoft/wavlm-base": "https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json", # See all WavLM models at https://huggingface.co/models?filter=wavlm } class WavLMConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`WavLMModel`]. It is used to instantiate an WavLM model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the WavLM [microsoft/wavlm-base](https://huggingface.co/microsoft/wavlm-base) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 32): Vocabulary size of the WavLM model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`WavLMModel`]. Vocabulary size of the model. Defines the different tokens that can be represented by the *inputs_ids* passed to the forward method of [`WavLMModel`]. hidden_size (`int`, *optional*, defaults to 768): Dimensionality of the encoder layers and the pooler layer. num_hidden_layers (`int`, *optional*, defaults to 12): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 12): Number of attention heads for each attention layer in the Transformer encoder. intermediate_size (`int`, *optional*, defaults to 3072): Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` are supported. hidden_dropout (`float`, *optional*, defaults to 0.1): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. activation_dropout (`float`, *optional*, defaults to 0.1): The dropout ratio for activations inside the fully connected layer. attention_dropout (`float`, *optional*, defaults to 0.1): The dropout ratio for the attention probabilities. final_dropout (`float`, *optional*, defaults to 0.1): The dropout probability for the final projection layer of [`WavLMForCTC`]. layerdrop (`float`, *optional*, defaults to 0.1): The LayerDrop probability. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556) for more details. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. layer_norm_eps (`float`, *optional*, defaults to 1e-12): The epsilon used by the layer normalization layers. feat_extract_norm (`str`, *optional*, defaults to `"group"`): The norm to be applied to 1D convolutional layers in feature encoder. One of `"group"` for group normalization of only the first 1D convolutional layer or `"layer"` for layer normalization of all 1D convolutional layers. feat_proj_dropout (`float`, *optional*, defaults to 0.0): The dropout probability for output of the feature encoder. feat_extract_activation (`str, `optional`, defaults to `"gelu"`): The non-linear activation function (function or string) in the 1D convolutional layers of the feature extractor. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` are supported. conv_dim (`Tuple[int]` or `List[int]`, *optional*, defaults to `(512, 512, 512, 512, 512, 512, 512)`): A tuple of integers defining the number of input and output channels of each 1D convolutional layer in the feature encoder. The length of *conv_dim* defines the number of 1D convolutional layers. conv_stride (`Tuple[int]` or `List[int]`, *optional*, defaults to `(5, 2, 2, 2, 2, 2, 2)`): A tuple of integers defining the stride of each 1D convolutional layer in the feature encoder. The length of *conv_stride* defines the number of convolutional layers and has to match the length of *conv_dim*. conv_kernel (`Tuple[int]` or `List[int]`, *optional*, defaults to `(10, 3, 3, 3, 3, 3, 3)`): A tuple of integers defining the kernel size of each 1D convolutional layer in the feature encoder. The length of *conv_kernel* defines the number of convolutional layers and has to match the length of *conv_dim*. conv_bias (`bool`, *optional*, defaults to `False`): Whether the 1D convolutional layers have a bias. num_conv_pos_embeddings (`int`, *optional*, defaults to 128): Number of convolutional positional embeddings. Defines the kernel size of 1D convolutional positional embeddings layer. num_conv_pos_embedding_groups (`int`, *optional*, defaults to 16): Number of groups of 1D convolutional positional embeddings layer. do_stable_layer_norm (`bool`, *optional*, defaults to `False`): Whether to apply *stable* layer norm architecture of the Transformer encoder. `do_stable_layer_norm is True` corresponds to applying layer norm before the attention layer, whereas `do_stable_layer_norm is False` corresponds to applying layer norm after the attention layer. apply_spec_augment (`bool`, *optional*, defaults to `True`): Whether to apply *SpecAugment* data augmentation to the outputs of the feature encoder. For reference see [SpecAugment: A Simple Data Augmentation Method for Automatic Speech Recognition](https://arxiv.org/abs/1904.08779). mask_time_prob (`float`, *optional*, defaults to 0.05): Propability of each feature vector along the time axis to be chosen as the start of the vector span to be masked. Approximately `mask_time_prob * sequence_length // mask_time_length` feature vectors will be masked along the time axis. This is only relevant if `apply_spec_augment is True`. mask_time_length (`int`, *optional*, defaults to 10): Length of vector span along the time axis. mask_time_min_masks (`int`, *optional*, defaults to 2),: The minimum number of masks of length `mask_feature_length` generated along the time axis, each time step, irrespectively of `mask_feature_prob`. Only relevant if ''mask_time_prob*len(time_axis)/mask_time_length < mask_time_min_masks'' mask_feature_prob (`float`, *optional*, defaults to 0.0): Propability of each feature vector along the feature axis to be chosen as the start of the vector span to be masked. Approximately `mask_time_prob * hidden_size // mask_time_length` feature vectors will be masked along the time axis. This is only relevant if `apply_spec_augment is True`. mask_feature_length (`int`, *optional*, defaults to 10): Length of vector span along the feature axis. num_codevectors_per_group (`int`, *optional*, defaults to 320): Number of entries in each quantization codebook (group). num_codevector_groups (`int`, *optional*, defaults to 2): Number of codevector groups for product codevector quantization. contrastive_logits_temperature (`float`, *optional*, defaults to 0.1): The temperature *kappa* in the contrastive loss. num_negatives (`int`, *optional*, defaults to 100): Number of negative samples for the contrastive loss. codevector_dim (`int`, *optional*, defaults to 256): Dimensionality of the quantized feature vectors. proj_codevector_dim (`int`, *optional*, defaults to 256): Dimensionality of the final projection of both the quantized and the transformer features. diversity_loss_weight (`int`, *optional*, defaults to 0.1): The weight of the codebook diversity loss component. ctc_loss_reduction (`str`, *optional*, defaults to `"mean"`): Specifies the reduction to apply to the output of `torch.nn.CTCLoss`. Only relevant when training an instance of [`WavLMForCTC`]. ctc_zero_infinity (`bool`, *optional*, defaults to `False`): Whether to zero infinite losses and the associated gradients of `torch.nn.CTCLoss`. Infinite losses mainly occur when the inputs are too short to be aligned to the targets. Only relevant when training an instance of [`WavLMForCTC`]. use_weighted_layer_sum (`bool`, *optional*, defaults to `False`): Whether to use a weighted average of layer outputs with learned weights. Only relevant when using an instance of [`WavLMForSequenceClassification`]. classifier_proj_size (`int`, *optional*, defaults to 256): Dimensionality of the projection before token mean-pooling for classification. tdnn_dim (`Tuple[int]` or `List[int]`, *optional*, defaults to `(512, 512, 512, 512, 1500)`): A tuple of integers defining the number of output channels of each 1D convolutional layer in the *TDNN* module of the *XVector* model. The length of *tdnn_dim* defines the number of *TDNN* layers. tdnn_kernel (`Tuple[int]` or `List[int]`, *optional*, defaults to `(5, 3, 3, 1, 1)`): A tuple of integers defining the kernel size of each 1D convolutional layer in the *TDNN* module of the *XVector* model. The length of *tdnn_kernel* has to match the length of *tdnn_dim*. tdnn_dilation (`Tuple[int]` or `List[int]`, *optional*, defaults to `(1, 2, 3, 1, 1)`): A tuple of integers defining the dilation factor of each 1D convolutional layer in *TDNN* module of the *XVector* model. The length of *tdnn_dilation* has to match the length of *tdnn_dim*. xvector_output_dim (`int`, *optional*, defaults to 512): Dimensionality of the *XVector* embedding vectors. add_adapter (`bool`, *optional*, defaults to `False`): Whether a convolutional network should be stacked on top of the Wav2Vec2 Encoder. Can be very useful for warm-starting Wav2Vec2 for SpeechEncoderDecoder models. adapter_kernel_size (`int`, *optional*, defaults to 3): Kernel size of the convolutional layers in the adapter network. Only relevant if `add_adapter is True`. adapter_stride (`int`, *optional*, defaults to 2): Stride of the convolutional layers in the adapter network. Only relevant if `add_adapter is True`. num_adapter_layers (`int`, *optional*, defaults to 3): Number of convolutional layers that should be used in the adapter network. Only relevant if `add_adapter is True`. output_hidden_size (`int`, *optional*): Dimensionality of the encoder output layer. If not defined, this defaults to *hidden-size*. Only relevant if `add_adapter is True`. Example: ```python ``` Example: ```python >>> from transformers import WavLMConfig, WavLMModel >>> # Initializing a WavLM facebook/wavlm-base-960h style configuration >>> configuration = WavLMConfig() >>> # Initializing a model (with random weights) from the facebook/wavlm-base-960h style configuration >>> model = WavLMModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "wavlm" def __init__( self, vocab_size=32, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act="gelu", hidden_dropout=0.1, activation_dropout=0.1, attention_dropout=0.1, feat_proj_dropout=0.0, final_dropout=0.1, layerdrop=0.1, initializer_range=0.02, layer_norm_eps=1e-5, feat_extract_norm="group", feat_extract_activation="gelu", conv_dim=(512, 512, 512, 512, 512, 512, 512), conv_stride=(5, 2, 2, 2, 2, 2, 2), conv_kernel=(10, 3, 3, 3, 3, 2, 2), conv_bias=False, num_conv_pos_embeddings=128, num_conv_pos_embedding_groups=16, num_buckets=320, max_bucket_distance=800, do_stable_layer_norm=False, apply_spec_augment=True, mask_time_prob=0.05, mask_time_length=10, mask_time_min_masks=2, mask_feature_prob=0.0, mask_feature_length=10, num_codevectors_per_group=320, num_codevector_groups=2, contrastive_logits_temperature=0.1, num_negatives=100, codevector_dim=256, proj_codevector_dim=256, diversity_loss_weight=0.1, ctc_loss_reduction="mean", ctc_zero_infinity=False, use_weighted_layer_sum=False, classifier_proj_size=256, tdnn_dim=(512, 512, 512, 512, 1500), tdnn_kernel=(5, 3, 3, 1, 1), tdnn_dilation=(1, 2, 3, 1, 1), xvector_output_dim=512, num_ctc_classes=80, pad_token_id=0, bos_token_id=1, eos_token_id=2, add_adapter=False, adapter_kernel_size=3, adapter_stride=2, num_adapter_layers=3, output_hidden_size=None, **kwargs, ): super().__init__(**kwargs, pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id) self.hidden_size = hidden_size self.feat_extract_norm = feat_extract_norm self.feat_extract_activation = feat_extract_activation self.conv_dim = list(conv_dim) self.conv_stride = list(conv_stride) self.conv_kernel = list(conv_kernel) self.conv_bias = conv_bias self.num_buckets = num_buckets self.max_bucket_distance = max_bucket_distance self.num_conv_pos_embeddings = num_conv_pos_embeddings self.num_conv_pos_embedding_groups = num_conv_pos_embedding_groups self.num_feat_extract_layers = len(self.conv_dim) self.num_hidden_layers = num_hidden_layers self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.num_attention_heads = num_attention_heads self.hidden_dropout = hidden_dropout self.attention_dropout = attention_dropout self.activation_dropout = activation_dropout self.feat_proj_dropout = feat_proj_dropout self.final_dropout = final_dropout self.layerdrop = layerdrop self.layer_norm_eps = layer_norm_eps self.initializer_range = initializer_range self.num_ctc_classes = num_ctc_classes self.vocab_size = vocab_size self.do_stable_layer_norm = do_stable_layer_norm self.use_weighted_layer_sum = use_weighted_layer_sum self.classifier_proj_size = classifier_proj_size if ( (len(self.conv_stride) != self.num_feat_extract_layers) or (len(self.conv_kernel) != self.num_feat_extract_layers) or (len(self.conv_dim) != self.num_feat_extract_layers) ): raise ValueError( "Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` ==" " `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) =" f" {len(self.conv_dim)}`, `len(config.conv_stride) = {len(self.conv_stride)}`," f" `len(config.conv_kernel) = {len(self.conv_kernel)}`." ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 self.apply_spec_augment = apply_spec_augment self.mask_time_prob = mask_time_prob self.mask_time_length = mask_time_length self.mask_time_min_masks = mask_time_min_masks self.mask_feature_prob = mask_feature_prob self.mask_feature_length = mask_feature_length # parameters for pretraining with codevector quantized representations self.num_codevectors_per_group = num_codevectors_per_group self.num_codevector_groups = num_codevector_groups self.contrastive_logits_temperature = contrastive_logits_temperature self.num_negatives = num_negatives self.codevector_dim = codevector_dim self.proj_codevector_dim = proj_codevector_dim self.diversity_loss_weight = diversity_loss_weight # ctc loss self.ctc_loss_reduction = ctc_loss_reduction self.ctc_zero_infinity = ctc_zero_infinity # adapter self.add_adapter = add_adapter self.adapter_kernel_size = adapter_kernel_size self.adapter_stride = adapter_stride self.num_adapter_layers = num_adapter_layers self.output_hidden_size = output_hidden_size or hidden_size # SequenceClassification-specific parameter. Feel free to ignore for other classes. self.classifier_proj_size = classifier_proj_size # XVector-specific parameters. Feel free to ignore for other classes. self.tdnn_dim = list(tdnn_dim) self.tdnn_kernel = list(tdnn_kernel) self.tdnn_dilation = list(tdnn_dilation) self.xvector_output_dim = xvector_output_dim @property def inputs_to_logits_ratio(self): return functools.reduce(operator.mul, self.conv_stride, 1)
0
hf_public_repos/transformers/src/transformers/models
hf_public_repos/transformers/src/transformers/models/wavlm/convert_wavlm_original_pytorch_checkpoint_to_pytorch.py
# coding=utf-8 # Copyright 2021 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Convert WavLM checkpoint.""" import argparse import torch # Step 1. clone https://github.com/microsoft/unilm # Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd # Step 3. cd unilm # Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink # import classes from unilm.wavlm.WavLM import WavLM as WavLMOrig from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig from transformers import WavLMConfig, WavLMModel, logging logging.set_verbosity_info() logger = logging.get_logger(__name__) MAPPING = { "post_extract_proj": "feature_projection.projection", "encoder.pos_conv.0": "encoder.pos_conv_embed.conv", "self_attn.k_proj": "encoder.layers.*.attention.k_proj", "self_attn.v_proj": "encoder.layers.*.attention.v_proj", "self_attn.q_proj": "encoder.layers.*.attention.q_proj", "self_attn.out_proj": "encoder.layers.*.attention.out_proj", "self_attn.grep_linear": "encoder.layers.*.attention.gru_rel_pos_linear", "self_attn.relative_attention_bias": "encoder.layers.*.attention.rel_attn_embed", "self_attn.grep_a": "encoder.layers.*.attention.gru_rel_pos_const", "self_attn_layer_norm": "encoder.layers.*.layer_norm", "fc1": "encoder.layers.*.feed_forward.intermediate_dense", "fc2": "encoder.layers.*.feed_forward.output_dense", "final_layer_norm": "encoder.layers.*.final_layer_norm", "encoder.layer_norm": "encoder.layer_norm", "w2v_model.layer_norm": "feature_projection.layer_norm", "quantizer.weight_proj": "quantizer.weight_proj", "quantizer.vars": "quantizer.codevectors", "project_q": "project_q", "final_proj": "project_hid", "w2v_encoder.proj": "ctc_proj", "mask_emb": "masked_spec_embed", } TOP_LEVEL_KEYS = [ "ctc_proj", "quantizer.weight_proj", "quantizer.codevectors", "project_q", "project_hid", ] def set_recursively(hf_pointer, key, value, full_name, weight_type): for attribute in key.split("."): hf_pointer = getattr(hf_pointer, attribute) if weight_type is not None: hf_shape = getattr(hf_pointer, weight_type).shape else: hf_shape = hf_pointer.shape assert hf_shape == value.shape, ( f"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be" f" {value.shape} for {full_name}" ) if weight_type == "weight": hf_pointer.weight.data = value elif weight_type == "weight_g": hf_pointer.weight_g.data = value elif weight_type == "weight_v": hf_pointer.weight_v.data = value elif weight_type == "bias": hf_pointer.bias.data = value else: hf_pointer.data = value logger.info(f"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.") def recursively_load_weights(fairseq_model, hf_model): unused_weights = [] fairseq_dict = fairseq_model.state_dict() feature_extractor = hf_model.feature_extractor for name, value in fairseq_dict.items(): is_used = False if "conv_layers" in name: load_conv_layer( name, value, feature_extractor, unused_weights, hf_model.config.feat_extract_norm == "group", ) is_used = True else: for key, mapped_key in MAPPING.items(): if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]: is_used = True if "*" in mapped_key: layer_index = name.split(key)[0].split(".")[-2] mapped_key = mapped_key.replace("*", layer_index) if "weight_g" in name: weight_type = "weight_g" elif "weight_v" in name: weight_type = "weight_v" elif "bias" in name and "relative_attention_bias" not in name: weight_type = "bias" elif "weight" in name: # TODO: don't match quantizer.weight_proj weight_type = "weight" else: weight_type = None set_recursively(hf_model, mapped_key, value, name, weight_type) continue if not is_used: unused_weights.append(name) logger.warning(f"Unused weights: {unused_weights}") def load_conv_layer(full_name, value, feature_extractor, unused_weights, use_group_norm): name = full_name.split("conv_layers.")[-1] items = name.split(".") layer_id = int(items[0]) type_id = int(items[1]) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( f"{full_name} has size {value.shape}, but" f" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found." ) feature_extractor.conv_layers[layer_id].conv.bias.data = value logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}.") elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( f"{full_name} has size {value.shape}, but" f" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found." ) feature_extractor.conv_layers[layer_id].conv.weight.data = value logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}.") elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( f"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was" " found." ) feature_extractor.conv_layers[layer_id].layer_norm.bias.data = value logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.") elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( f"{full_name} has size {value.shape}, but" f" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found." ) feature_extractor.conv_layers[layer_id].layer_norm.weight.data = value logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.") else: unused_weights.append(full_name) @torch.no_grad() def convert_wavlm_checkpoint(checkpoint_path, pytorch_dump_folder_path, config_path=None): # load the pre-trained checkpoints checkpoint = torch.load(checkpoint_path) cfg = WavLMConfigOrig(checkpoint["cfg"]) model = WavLMOrig(cfg) model.load_state_dict(checkpoint["model"]) model.eval() if config_path is not None: config = WavLMConfig.from_pretrained(config_path) else: config = WavLMConfig() hf_wavlm = WavLMModel(config) recursively_load_weights(model, hf_wavlm) hf_wavlm.save_pretrained(pytorch_dump_folder_path) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint") parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert") args = parser.parse_args() convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
0
hf_public_repos/transformers/src/transformers/models
hf_public_repos/transformers/src/transformers/models/deta/image_processing_deta.py
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Image processor class for Deformable DETR.""" import pathlib from typing import Any, Callable, Dict, Iterable, List, Optional, Tuple, Union import numpy as np from ...feature_extraction_utils import BatchFeature from ...image_processing_utils import BaseImageProcessor, get_size_dict from ...image_transforms import ( PaddingMode, center_to_corners_format, corners_to_center_format, pad, rescale, resize, rgb_to_id, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, AnnotationFormat, ChannelDimension, ImageInput, PILImageResampling, get_image_size, infer_channel_dimension_format, is_batched, is_scaled_image, to_numpy_array, valid_images, validate_annotations, ) from ...utils import ( is_flax_available, is_jax_tensor, is_tf_available, is_tf_tensor, is_torch_available, is_torch_tensor, is_torchvision_available, is_vision_available, logging, ) from ...utils.generic import TensorType if is_torch_available(): import torch if is_torchvision_available(): from torchvision.ops.boxes import batched_nms if is_vision_available(): import PIL logger = logging.get_logger(__name__) # pylint: disable=invalid-name SUPPORTED_ANNOTATION_FORMATS = (AnnotationFormat.COCO_DETECTION, AnnotationFormat.COCO_PANOPTIC) # Copied from transformers.models.detr.image_processing_detr.get_size_with_aspect_ratio def get_size_with_aspect_ratio(image_size, size, max_size=None) -> Tuple[int, int]: """ Computes the output image size given the input image size and the desired output size. Args: image_size (`Tuple[int, int]`): The input image size. size (`int`): The desired output size. max_size (`int`, *optional*): The maximum allowed output size. """ height, width = image_size if max_size is not None: min_original_size = float(min((height, width))) max_original_size = float(max((height, width))) if max_original_size / min_original_size * size > max_size: size = int(round(max_size * min_original_size / max_original_size)) if (height <= width and height == size) or (width <= height and width == size): return height, width if width < height: ow = size oh = int(size * height / width) else: oh = size ow = int(size * width / height) return (oh, ow) # Copied from transformers.models.detr.image_processing_detr.get_resize_output_image_size def get_resize_output_image_size( input_image: np.ndarray, size: Union[int, Tuple[int, int], List[int]], max_size: Optional[int] = None, input_data_format: Optional[Union[str, ChannelDimension]] = None, ) -> Tuple[int, int]: """ Computes the output image size given the input image size and the desired output size. If the desired output size is a tuple or list, the output image size is returned as is. If the desired output size is an integer, the output image size is computed by keeping the aspect ratio of the input image size. Args: input_image (`np.ndarray`): The image to resize. size (`int` or `Tuple[int, int]` or `List[int]`): The desired output size. max_size (`int`, *optional*): The maximum allowed output size. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format of the input image. If not provided, it will be inferred from the input image. """ image_size = get_image_size(input_image, input_data_format) if isinstance(size, (list, tuple)): return size return get_size_with_aspect_ratio(image_size, size, max_size) # Copied from transformers.models.detr.image_processing_detr.get_numpy_to_framework_fn def get_numpy_to_framework_fn(arr) -> Callable: """ Returns a function that converts a numpy array to the framework of the input array. Args: arr (`np.ndarray`): The array to convert. """ if isinstance(arr, np.ndarray): return np.array if is_tf_available() and is_tf_tensor(arr): import tensorflow as tf return tf.convert_to_tensor if is_torch_available() and is_torch_tensor(arr): import torch return torch.tensor if is_flax_available() and is_jax_tensor(arr): import jax.numpy as jnp return jnp.array raise ValueError(f"Cannot convert arrays of type {type(arr)}") # Copied from transformers.models.detr.image_processing_detr.safe_squeeze def safe_squeeze(arr: np.ndarray, axis: Optional[int] = None) -> np.ndarray: """ Squeezes an array, but only if the axis specified has dim 1. """ if axis is None: return arr.squeeze() try: return arr.squeeze(axis=axis) except ValueError: return arr # Copied from transformers.models.detr.image_processing_detr.normalize_annotation def normalize_annotation(annotation: Dict, image_size: Tuple[int, int]) -> Dict: image_height, image_width = image_size norm_annotation = {} for key, value in annotation.items(): if key == "boxes": boxes = value boxes = corners_to_center_format(boxes) boxes /= np.asarray([image_width, image_height, image_width, image_height], dtype=np.float32) norm_annotation[key] = boxes else: norm_annotation[key] = value return norm_annotation # Copied from transformers.models.detr.image_processing_detr.max_across_indices def max_across_indices(values: Iterable[Any]) -> List[Any]: """ Return the maximum value across all indices of an iterable of values. """ return [max(values_i) for values_i in zip(*values)] # Copied from transformers.models.detr.image_processing_detr.get_max_height_width def get_max_height_width( images: List[np.ndarray], input_data_format: Optional[Union[str, ChannelDimension]] = None ) -> List[int]: """ Get the maximum height and width across all images in a batch. """ if input_data_format is None: input_data_format = infer_channel_dimension_format(images[0]) if input_data_format == ChannelDimension.FIRST: _, max_height, max_width = max_across_indices([img.shape for img in images]) elif input_data_format == ChannelDimension.LAST: max_height, max_width, _ = max_across_indices([img.shape for img in images]) else: raise ValueError(f"Invalid channel dimension format: {input_data_format}") return (max_height, max_width) # Copied from transformers.models.detr.image_processing_detr.make_pixel_mask def make_pixel_mask( image: np.ndarray, output_size: Tuple[int, int], input_data_format: Optional[Union[str, ChannelDimension]] = None ) -> np.ndarray: """ Make a pixel mask for the image, where 1 indicates a valid pixel and 0 indicates padding. Args: image (`np.ndarray`): Image to make the pixel mask for. output_size (`Tuple[int, int]`): Output size of the mask. """ input_height, input_width = get_image_size(image, channel_dim=input_data_format) mask = np.zeros(output_size, dtype=np.int64) mask[:input_height, :input_width] = 1 return mask # Copied from transformers.models.detr.image_processing_detr.convert_coco_poly_to_mask def convert_coco_poly_to_mask(segmentations, height: int, width: int) -> np.ndarray: """ Convert a COCO polygon annotation to a mask. Args: segmentations (`List[List[float]]`): List of polygons, each polygon represented by a list of x-y coordinates. height (`int`): Height of the mask. width (`int`): Width of the mask. """ try: from pycocotools import mask as coco_mask except ImportError: raise ImportError("Pycocotools is not installed in your environment.") masks = [] for polygons in segmentations: rles = coco_mask.frPyObjects(polygons, height, width) mask = coco_mask.decode(rles) if len(mask.shape) < 3: mask = mask[..., None] mask = np.asarray(mask, dtype=np.uint8) mask = np.any(mask, axis=2) masks.append(mask) if masks: masks = np.stack(masks, axis=0) else: masks = np.zeros((0, height, width), dtype=np.uint8) return masks # Copied from transformers.models.detr.image_processing_detr.prepare_coco_detection_annotation with DETR->DETA def prepare_coco_detection_annotation( image, target, return_segmentation_masks: bool = False, input_data_format: Optional[Union[ChannelDimension, str]] = None, ): """ Convert the target in COCO format into the format expected by DETA. """ image_height, image_width = get_image_size(image, channel_dim=input_data_format) image_id = target["image_id"] image_id = np.asarray([image_id], dtype=np.int64) # Get all COCO annotations for the given image. annotations = target["annotations"] annotations = [obj for obj in annotations if "iscrowd" not in obj or obj["iscrowd"] == 0] classes = [obj["category_id"] for obj in annotations] classes = np.asarray(classes, dtype=np.int64) # for conversion to coco api area = np.asarray([obj["area"] for obj in annotations], dtype=np.float32) iscrowd = np.asarray([obj["iscrowd"] if "iscrowd" in obj else 0 for obj in annotations], dtype=np.int64) boxes = [obj["bbox"] for obj in annotations] # guard against no boxes via resizing boxes = np.asarray(boxes, dtype=np.float32).reshape(-1, 4) boxes[:, 2:] += boxes[:, :2] boxes[:, 0::2] = boxes[:, 0::2].clip(min=0, max=image_width) boxes[:, 1::2] = boxes[:, 1::2].clip(min=0, max=image_height) keep = (boxes[:, 3] > boxes[:, 1]) & (boxes[:, 2] > boxes[:, 0]) new_target = {} new_target["image_id"] = image_id new_target["class_labels"] = classes[keep] new_target["boxes"] = boxes[keep] new_target["area"] = area[keep] new_target["iscrowd"] = iscrowd[keep] new_target["orig_size"] = np.asarray([int(image_height), int(image_width)], dtype=np.int64) if annotations and "keypoints" in annotations[0]: keypoints = [obj["keypoints"] for obj in annotations] # Converting the filtered keypoints list to a numpy array keypoints = np.asarray(keypoints, dtype=np.float32) # Apply the keep mask here to filter the relevant annotations keypoints = keypoints[keep] num_keypoints = keypoints.shape[0] keypoints = keypoints.reshape((-1, 3)) if num_keypoints else keypoints new_target["keypoints"] = keypoints if return_segmentation_masks: segmentation_masks = [obj["segmentation"] for obj in annotations] masks = convert_coco_poly_to_mask(segmentation_masks, image_height, image_width) new_target["masks"] = masks[keep] return new_target # Copied from transformers.models.detr.image_processing_detr.masks_to_boxes def masks_to_boxes(masks: np.ndarray) -> np.ndarray: """ Compute the bounding boxes around the provided panoptic segmentation masks. Args: masks: masks in format `[number_masks, height, width]` where N is the number of masks Returns: boxes: bounding boxes in format `[number_masks, 4]` in xyxy format """ if masks.size == 0: return np.zeros((0, 4)) h, w = masks.shape[-2:] y = np.arange(0, h, dtype=np.float32) x = np.arange(0, w, dtype=np.float32) # see https://github.com/pytorch/pytorch/issues/50276 y, x = np.meshgrid(y, x, indexing="ij") x_mask = masks * np.expand_dims(x, axis=0) x_max = x_mask.reshape(x_mask.shape[0], -1).max(-1) x = np.ma.array(x_mask, mask=~(np.array(masks, dtype=bool))) x_min = x.filled(fill_value=1e8) x_min = x_min.reshape(x_min.shape[0], -1).min(-1) y_mask = masks * np.expand_dims(y, axis=0) y_max = y_mask.reshape(x_mask.shape[0], -1).max(-1) y = np.ma.array(y_mask, mask=~(np.array(masks, dtype=bool))) y_min = y.filled(fill_value=1e8) y_min = y_min.reshape(y_min.shape[0], -1).min(-1) return np.stack([x_min, y_min, x_max, y_max], 1) # Copied from transformers.models.detr.image_processing_detr.prepare_coco_panoptic_annotation with DETR->DETA def prepare_coco_panoptic_annotation( image: np.ndarray, target: Dict, masks_path: Union[str, pathlib.Path], return_masks: bool = True, input_data_format: Union[ChannelDimension, str] = None, ) -> Dict: """ Prepare a coco panoptic annotation for DETA. """ image_height, image_width = get_image_size(image, channel_dim=input_data_format) annotation_path = pathlib.Path(masks_path) / target["file_name"] new_target = {} new_target["image_id"] = np.asarray([target["image_id"] if "image_id" in target else target["id"]], dtype=np.int64) new_target["size"] = np.asarray([image_height, image_width], dtype=np.int64) new_target["orig_size"] = np.asarray([image_height, image_width], dtype=np.int64) if "segments_info" in target: masks = np.asarray(PIL.Image.open(annotation_path), dtype=np.uint32) masks = rgb_to_id(masks) ids = np.array([segment_info["id"] for segment_info in target["segments_info"]]) masks = masks == ids[:, None, None] masks = masks.astype(np.uint8) if return_masks: new_target["masks"] = masks new_target["boxes"] = masks_to_boxes(masks) new_target["class_labels"] = np.array( [segment_info["category_id"] for segment_info in target["segments_info"]], dtype=np.int64 ) new_target["iscrowd"] = np.asarray( [segment_info["iscrowd"] for segment_info in target["segments_info"]], dtype=np.int64 ) new_target["area"] = np.asarray( [segment_info["area"] for segment_info in target["segments_info"]], dtype=np.float32 ) return new_target # Copied from transformers.models.detr.image_processing_detr.resize_annotation def resize_annotation( annotation: Dict[str, Any], orig_size: Tuple[int, int], target_size: Tuple[int, int], threshold: float = 0.5, resample: PILImageResampling = PILImageResampling.NEAREST, ): """ Resizes an annotation to a target size. Args: annotation (`Dict[str, Any]`): The annotation dictionary. orig_size (`Tuple[int, int]`): The original size of the input image. target_size (`Tuple[int, int]`): The target size of the image, as returned by the preprocessing `resize` step. threshold (`float`, *optional*, defaults to 0.5): The threshold used to binarize the segmentation masks. resample (`PILImageResampling`, defaults to `PILImageResampling.NEAREST`): The resampling filter to use when resizing the masks. """ ratios = tuple(float(s) / float(s_orig) for s, s_orig in zip(target_size, orig_size)) ratio_height, ratio_width = ratios new_annotation = {} new_annotation["size"] = target_size for key, value in annotation.items(): if key == "boxes": boxes = value scaled_boxes = boxes * np.asarray([ratio_width, ratio_height, ratio_width, ratio_height], dtype=np.float32) new_annotation["boxes"] = scaled_boxes elif key == "area": area = value scaled_area = area * (ratio_width * ratio_height) new_annotation["area"] = scaled_area elif key == "masks": masks = value[:, None] masks = np.array([resize(mask, target_size, resample=resample) for mask in masks]) masks = masks.astype(np.float32) masks = masks[:, 0] > threshold new_annotation["masks"] = masks elif key == "size": new_annotation["size"] = target_size else: new_annotation[key] = value return new_annotation class DetaImageProcessor(BaseImageProcessor): r""" Constructs a Deformable DETR image processor. Args: format (`str`, *optional*, defaults to `"coco_detection"`): Data format of the annotations. One of "coco_detection" or "coco_panoptic". do_resize (`bool`, *optional*, defaults to `True`): Controls whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden by the `do_resize` parameter in the `preprocess` method. size (`Dict[str, int]` *optional*, defaults to `{"shortest_edge": 800, "longest_edge": 1333}`): Size of the image's (height, width) dimensions after resizing. Can be overridden by the `size` parameter in the `preprocess` method. resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BILINEAR`): Resampling filter to use if resizing the image. do_rescale (`bool`, *optional*, defaults to `True`): Controls whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by the `do_rescale` parameter in the `preprocess` method. rescale_factor (`int` or `float`, *optional*, defaults to `1/255`): Scale factor to use if rescaling the image. Can be overridden by the `rescale_factor` parameter in the `preprocess` method. do_normalize: Controls whether to normalize the image. Can be overridden by the `do_normalize` parameter in the `preprocess` method. image_mean (`float` or `List[float]`, *optional*, defaults to `IMAGENET_DEFAULT_MEAN`): Mean values to use when normalizing the image. Can be a single value or a list of values, one for each channel. Can be overridden by the `image_mean` parameter in the `preprocess` method. image_std (`float` or `List[float]`, *optional*, defaults to `IMAGENET_DEFAULT_STD`): Standard deviation values to use when normalizing the image. Can be a single value or a list of values, one for each channel. Can be overridden by the `image_std` parameter in the `preprocess` method. do_pad (`bool`, *optional*, defaults to `True`): Controls whether to pad the image to the largest image in a batch and create a pixel mask. Can be overridden by the `do_pad` parameter in the `preprocess` method. """ model_input_names = ["pixel_values", "pixel_mask"] def __init__( self, format: Union[str, AnnotationFormat] = AnnotationFormat.COCO_DETECTION, do_resize: bool = True, size: Dict[str, int] = None, resample: PILImageResampling = PILImageResampling.BILINEAR, do_rescale: bool = True, rescale_factor: Union[int, float] = 1 / 255, do_normalize: bool = True, image_mean: Union[float, List[float]] = None, image_std: Union[float, List[float]] = None, do_pad: bool = True, **kwargs, ) -> None: if "pad_and_return_pixel_mask" in kwargs: do_pad = kwargs.pop("pad_and_return_pixel_mask") size = size if size is not None else {"shortest_edge": 800, "longest_edge": 1333} size = get_size_dict(size, default_to_square=False) super().__init__(**kwargs) self.format = format self.do_resize = do_resize self.size = size self.resample = resample self.do_rescale = do_rescale self.rescale_factor = rescale_factor self.do_normalize = do_normalize self.image_mean = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN self.image_std = image_std if image_std is not None else IMAGENET_DEFAULT_STD self.do_pad = do_pad # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.prepare_annotation with DETR->DETA def prepare_annotation( self, image: np.ndarray, target: Dict, format: Optional[AnnotationFormat] = None, return_segmentation_masks: bool = None, masks_path: Optional[Union[str, pathlib.Path]] = None, input_data_format: Optional[Union[str, ChannelDimension]] = None, ) -> Dict: """ Prepare an annotation for feeding into DETA model. """ format = format if format is not None else self.format if format == AnnotationFormat.COCO_DETECTION: return_segmentation_masks = False if return_segmentation_masks is None else return_segmentation_masks target = prepare_coco_detection_annotation( image, target, return_segmentation_masks, input_data_format=input_data_format ) elif format == AnnotationFormat.COCO_PANOPTIC: return_segmentation_masks = True if return_segmentation_masks is None else return_segmentation_masks target = prepare_coco_panoptic_annotation( image, target, masks_path=masks_path, return_masks=return_segmentation_masks, input_data_format=input_data_format, ) else: raise ValueError(f"Format {format} is not supported.") return target # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.prepare def prepare(self, image, target, return_segmentation_masks=None, masks_path=None): logger.warning_once( "The `prepare` method is deprecated and will be removed in a v4.33. " "Please use `prepare_annotation` instead. Note: the `prepare_annotation` method " "does not return the image anymore.", ) target = self.prepare_annotation(image, target, return_segmentation_masks, masks_path, self.format) return image, target # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.convert_coco_poly_to_mask def convert_coco_poly_to_mask(self, *args, **kwargs): logger.warning_once("The `convert_coco_poly_to_mask` method is deprecated and will be removed in v4.33. ") return convert_coco_poly_to_mask(*args, **kwargs) # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.prepare_coco_detection def prepare_coco_detection(self, *args, **kwargs): logger.warning_once("The `prepare_coco_detection` method is deprecated and will be removed in v4.33. ") return prepare_coco_detection_annotation(*args, **kwargs) # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.prepare_coco_panoptic def prepare_coco_panoptic(self, *args, **kwargs): logger.warning_once("The `prepare_coco_panoptic` method is deprecated and will be removed in v4.33. ") return prepare_coco_panoptic_annotation(*args, **kwargs) def resize( self, image: np.ndarray, size: Dict[str, int], resample: PILImageResampling = PILImageResampling.BILINEAR, data_format: Optional[ChannelDimension] = None, input_data_format: Optional[Union[str, ChannelDimension]] = None, **kwargs, ) -> np.ndarray: """ Resize the image to the given size. Size can be `min_size` (scalar) or `(height, width)` tuple. If size is an int, smaller edge of the image will be matched to this number. Args: image (`np.ndarray`): Image to resize. size (`Dict[str, int]`): The desired output size. Can contain keys `shortest_edge` and `longest_edge` or `height` and `width`. resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BILINEAR`): Resampling filter to use if resizing the image. data_format (`ChannelDimension`, *optional*): The channel dimension format for the output image. If unset, the channel dimension format of the input image is used. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format of the input image. If not provided, it will be inferred from the input image. """ size = get_size_dict(size, default_to_square=False) if "shortest_edge" in size and "longest_edge" in size: size = get_resize_output_image_size( image, size["shortest_edge"], size["longest_edge"], input_data_format=input_data_format ) elif "height" in size and "width" in size: size = (size["height"], size["width"]) else: raise ValueError( "Size must contain 'height' and 'width' keys or 'shortest_edge' and 'longest_edge' keys. Got" f" {size.keys()}." ) image = resize( image, size=size, resample=resample, data_format=data_format, input_data_format=input_data_format ) return image # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.resize_annotation def resize_annotation( self, annotation, orig_size, size, resample: PILImageResampling = PILImageResampling.NEAREST, ) -> Dict: """ Resize the annotation to match the resized image. If size is an int, smaller edge of the mask will be matched to this number. """ return resize_annotation(annotation, orig_size=orig_size, target_size=size, resample=resample) # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.rescale def rescale( self, image: np.ndarray, rescale_factor: float, data_format: Optional[Union[str, ChannelDimension]] = None, input_data_format: Optional[Union[str, ChannelDimension]] = None, ) -> np.ndarray: """ Rescale the image by the given factor. image = image * rescale_factor. Args: image (`np.ndarray`): Image to rescale. rescale_factor (`float`): The value to use for rescaling. data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format for the output image. If unset, the channel dimension format of the input image is used. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. input_data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format for the input image. If unset, is inferred from the input image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. """ return rescale(image, rescale_factor, data_format=data_format, input_data_format=input_data_format) # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.normalize_annotation def normalize_annotation(self, annotation: Dict, image_size: Tuple[int, int]) -> Dict: """ Normalize the boxes in the annotation from `[top_left_x, top_left_y, bottom_right_x, bottom_right_y]` to `[center_x, center_y, width, height]` format. """ return normalize_annotation(annotation, image_size=image_size) # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor._pad_image def _pad_image( self, image: np.ndarray, output_size: Tuple[int, int], constant_values: Union[float, Iterable[float]] = 0, data_format: Optional[ChannelDimension] = None, input_data_format: Optional[Union[str, ChannelDimension]] = None, ) -> np.ndarray: """ Pad an image with zeros to the given size. """ input_height, input_width = get_image_size(image, channel_dim=input_data_format) output_height, output_width = output_size pad_bottom = output_height - input_height pad_right = output_width - input_width padding = ((0, pad_bottom), (0, pad_right)) padded_image = pad( image, padding, mode=PaddingMode.CONSTANT, constant_values=constant_values, data_format=data_format, input_data_format=input_data_format, ) return padded_image # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.pad def pad( self, images: List[np.ndarray], constant_values: Union[float, Iterable[float]] = 0, return_pixel_mask: bool = True, return_tensors: Optional[Union[str, TensorType]] = None, data_format: Optional[ChannelDimension] = None, input_data_format: Optional[Union[str, ChannelDimension]] = None, ) -> BatchFeature: """ Pads a batch of images to the bottom and right of the image with zeros to the size of largest height and width in the batch and optionally returns their corresponding pixel mask. Args: image (`np.ndarray`): Image to pad. constant_values (`float` or `Iterable[float]`, *optional*): The value to use for the padding if `mode` is `"constant"`. return_pixel_mask (`bool`, *optional*, defaults to `True`): Whether to return a pixel mask. return_tensors (`str` or `TensorType`, *optional*): The type of tensors to return. Can be one of: - Unset: Return a list of `np.ndarray`. - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`. - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`. - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`. - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`. data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format of the image. If not provided, it will be the same as the input image. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format of the input image. If not provided, it will be inferred. """ pad_size = get_max_height_width(images, input_data_format=input_data_format) padded_images = [ self._pad_image( image, pad_size, constant_values=constant_values, data_format=data_format, input_data_format=input_data_format, ) for image in images ] data = {"pixel_values": padded_images} if return_pixel_mask: masks = [ make_pixel_mask(image=image, output_size=pad_size, input_data_format=input_data_format) for image in images ] data["pixel_mask"] = masks return BatchFeature(data=data, tensor_type=return_tensors) def preprocess( self, images: ImageInput, annotations: Optional[Union[List[Dict], List[List[Dict]]]] = None, return_segmentation_masks: bool = None, masks_path: Optional[Union[str, pathlib.Path]] = None, do_resize: Optional[bool] = None, size: Optional[Dict[str, int]] = None, resample=None, # PILImageResampling do_rescale: Optional[bool] = None, rescale_factor: Optional[Union[int, float]] = None, do_normalize: Optional[bool] = None, image_mean: Optional[Union[float, List[float]]] = None, image_std: Optional[Union[float, List[float]]] = None, do_pad: Optional[bool] = None, format: Optional[Union[str, AnnotationFormat]] = None, return_tensors: Optional[Union[TensorType, str]] = None, data_format: Union[str, ChannelDimension] = ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]] = None, **kwargs, ) -> BatchFeature: """ Preprocess an image or a batch of images so that it can be used by the model. Args: images (`ImageInput`): Image or batch of images to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If passing in images with pixel values between 0 and 1, set `do_rescale=False`. annotations (`List[Dict]` or `List[List[Dict]]`, *optional*): List of annotations associated with the image or batch of images. If annotation is for object detection, the annotations should be a dictionary with the following keys: - "image_id" (`int`): The image id. - "annotations" (`List[Dict]`): List of annotations for an image. Each annotation should be a dictionary. An image can have no annotations, in which case the list should be empty. If annotation is for segmentation, the annotations should be a dictionary with the following keys: - "image_id" (`int`): The image id. - "segments_info" (`List[Dict]`): List of segments for an image. Each segment should be a dictionary. An image can have no segments, in which case the list should be empty. - "file_name" (`str`): The file name of the image. return_segmentation_masks (`bool`, *optional*, defaults to self.return_segmentation_masks): Whether to return segmentation masks. masks_path (`str` or `pathlib.Path`, *optional*): Path to the directory containing the segmentation masks. do_resize (`bool`, *optional*, defaults to self.do_resize): Whether to resize the image. size (`Dict[str, int]`, *optional*, defaults to self.size): Size of the image after resizing. resample (`PILImageResampling`, *optional*, defaults to self.resample): Resampling filter to use when resizing the image. do_rescale (`bool`, *optional*, defaults to self.do_rescale): Whether to rescale the image. rescale_factor (`float`, *optional*, defaults to self.rescale_factor): Rescale factor to use when rescaling the image. do_normalize (`bool`, *optional*, defaults to self.do_normalize): Whether to normalize the image. image_mean (`float` or `List[float]`, *optional*, defaults to self.image_mean): Mean to use when normalizing the image. image_std (`float` or `List[float]`, *optional*, defaults to self.image_std): Standard deviation to use when normalizing the image. do_pad (`bool`, *optional*, defaults to self.do_pad): Whether to pad the image. format (`str` or `AnnotationFormat`, *optional*, defaults to self.format): Format of the annotations. return_tensors (`str` or `TensorType`, *optional*, defaults to self.return_tensors): Type of tensors to return. If `None`, will return the list of images. data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`): The channel dimension format for the output image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - Unset: Use the channel dimension format of the input image. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format for the input image. If unset, the channel dimension format is inferred from the input image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. """ if "pad_and_return_pixel_mask" in kwargs: logger.warning_once( "The `pad_and_return_pixel_mask` argument is deprecated and will be removed in a future version, " "use `do_pad` instead.", ) do_pad = kwargs.pop("pad_and_return_pixel_mask") do_resize = self.do_resize if do_resize is None else do_resize size = self.size if size is None else size size = get_size_dict(size=size, default_to_square=False) resample = self.resample if resample is None else resample do_rescale = self.do_rescale if do_rescale is None else do_rescale rescale_factor = self.rescale_factor if rescale_factor is None else rescale_factor do_normalize = self.do_normalize if do_normalize is None else do_normalize image_mean = self.image_mean if image_mean is None else image_mean image_std = self.image_std if image_std is None else image_std do_pad = self.do_pad if do_pad is None else do_pad format = self.format if format is None else format if do_resize is not None and size is None: raise ValueError("Size and max_size must be specified if do_resize is True.") if do_rescale is not None and rescale_factor is None: raise ValueError("Rescale factor must be specified if do_rescale is True.") if do_normalize is not None and (image_mean is None or image_std is None): raise ValueError("Image mean and std must be specified if do_normalize is True.") if not is_batched(images): images = [images] annotations = [annotations] if annotations is not None else None if annotations is not None and len(images) != len(annotations): raise ValueError( f"The number of images ({len(images)}) and annotations ({len(annotations)}) do not match." ) if not valid_images(images): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) format = AnnotationFormat(format) if annotations is not None: validate_annotations(format, SUPPORTED_ANNOTATION_FORMATS, annotations) if ( masks_path is not None and format == AnnotationFormat.COCO_PANOPTIC and not isinstance(masks_path, (pathlib.Path, str)) ): raise ValueError( "The path to the directory containing the mask PNG files should be provided as a" f" `pathlib.Path` or string object, but is {type(masks_path)} instead." ) # All transformations expect numpy arrays images = [to_numpy_array(image) for image in images] if is_scaled_image(images[0]) and do_rescale: logger.warning_once( "It looks like you are trying to rescale already rescaled images. If the input" " images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again." ) if input_data_format is None: # We assume that all images have the same channel dimension format. input_data_format = infer_channel_dimension_format(images[0]) # prepare (COCO annotations as a list of Dict -> DETR target as a single Dict per image) if annotations is not None: prepared_images = [] prepared_annotations = [] for image, target in zip(images, annotations): target = self.prepare_annotation( image, target, format, return_segmentation_masks=return_segmentation_masks, masks_path=masks_path, input_data_format=input_data_format, ) prepared_images.append(image) prepared_annotations.append(target) images = prepared_images annotations = prepared_annotations del prepared_images, prepared_annotations # transformations if do_resize: if annotations is not None: resized_images, resized_annotations = [], [] for image, target in zip(images, annotations): orig_size = get_image_size(image, input_data_format) resized_image = self.resize( image, size=size, resample=resample, input_data_format=input_data_format ) resized_annotation = self.resize_annotation( target, orig_size, get_image_size(resized_image, input_data_format) ) resized_images.append(resized_image) resized_annotations.append(resized_annotation) images = resized_images annotations = resized_annotations del resized_images, resized_annotations else: images = [ self.resize(image, size=size, resample=resample, input_data_format=input_data_format) for image in images ] if do_rescale: images = [self.rescale(image, rescale_factor, input_data_format=input_data_format) for image in images] if do_normalize: images = [ self.normalize(image, image_mean, image_std, input_data_format=input_data_format) for image in images ] if annotations is not None: annotations = [ self.normalize_annotation(annotation, get_image_size(image, input_data_format)) for annotation, image in zip(annotations, images) ] if do_pad: # Pads images and returns their mask: {'pixel_values': ..., 'pixel_mask': ...} data = self.pad( images, return_pixel_mask=True, data_format=data_format, input_data_format=input_data_format ) else: images = [ to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in images ] data = {"pixel_values": images} encoded_inputs = BatchFeature(data=data, tensor_type=return_tensors) if annotations is not None: encoded_inputs["labels"] = [ BatchFeature(annotation, tensor_type=return_tensors) for annotation in annotations ] return encoded_inputs def post_process_object_detection( self, outputs, threshold: float = 0.5, target_sizes: Union[TensorType, List[Tuple]] = None, nms_threshold: float = 0.7, ): """ Converts the output of [`DetaForObjectDetection`] into final bounding boxes in (top_left_x, top_left_y, bottom_right_x, bottom_right_y) format. Only supports PyTorch. Args: outputs ([`DetrObjectDetectionOutput`]): Raw outputs of the model. threshold (`float`, *optional*, defaults to 0.5): Score threshold to keep object detection predictions. target_sizes (`torch.Tensor` or `List[Tuple[int, int]]`, *optional*): Tensor of shape `(batch_size, 2)` or list of tuples (`Tuple[int, int]`) containing the target size (height, width) of each image in the batch. If left to None, predictions will not be resized. nms_threshold (`float`, *optional*, defaults to 0.7): NMS threshold. Returns: `List[Dict]`: A list of dictionaries, each dictionary containing the scores, labels and boxes for an image in the batch as predicted by the model. """ out_logits, out_bbox = outputs.logits, outputs.pred_boxes batch_size, num_queries, num_labels = out_logits.shape if target_sizes is not None: if len(out_logits) != len(target_sizes): raise ValueError( "Make sure that you pass in as many target sizes as the batch dimension of the logits" ) prob = out_logits.sigmoid() all_scores = prob.view(batch_size, num_queries * num_labels).to(out_logits.device) all_indexes = torch.arange(num_queries * num_labels)[None].repeat(batch_size, 1).to(out_logits.device) all_boxes = torch.div(all_indexes, out_logits.shape[2], rounding_mode="floor") all_labels = all_indexes % out_logits.shape[2] boxes = center_to_corners_format(out_bbox) boxes = torch.gather(boxes, 1, all_boxes.unsqueeze(-1).repeat(1, 1, 4)) # and from relative [0, 1] to absolute [0, height] coordinates if target_sizes is not None: if isinstance(target_sizes, List): img_h = torch.Tensor([i[0] for i in target_sizes]) img_w = torch.Tensor([i[1] for i in target_sizes]) else: img_h, img_w = target_sizes.unbind(1) scale_fct = torch.stack([img_w, img_h, img_w, img_h], dim=1).to(boxes.device) boxes = boxes * scale_fct[:, None, :] results = [] for b in range(batch_size): box = boxes[b] score = all_scores[b] lbls = all_labels[b] pre_topk = score.topk(min(10000, num_queries * num_labels)).indices box = box[pre_topk] score = score[pre_topk] lbls = lbls[pre_topk] # apply NMS keep_inds = batched_nms(box, score, lbls, nms_threshold)[:100] score = score[keep_inds] lbls = lbls[keep_inds] box = box[keep_inds] results.append( { "scores": score[score > threshold], "labels": lbls[score > threshold], "boxes": box[score > threshold], } ) return results
0
hf_public_repos/transformers/src/transformers/models
hf_public_repos/transformers/src/transformers/models/deta/convert_deta_swin_to_pytorch.py
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Convert DETA checkpoints from the original repository. URL: https://github.com/jozhang97/DETA/tree/master""" import argparse import json from pathlib import Path import requests import torch from huggingface_hub import cached_download, hf_hub_download, hf_hub_url from PIL import Image from transformers import DetaConfig, DetaForObjectDetection, DetaImageProcessor, SwinConfig from transformers.utils import logging logging.set_verbosity_info() logger = logging.get_logger(__name__) def get_deta_config(model_name): backbone_config = SwinConfig( embed_dim=192, depths=(2, 2, 18, 2), num_heads=(6, 12, 24, 48), window_size=12, out_features=["stage2", "stage3", "stage4"], ) config = DetaConfig( backbone_config=backbone_config, num_queries=900, encoder_ffn_dim=2048, decoder_ffn_dim=2048, num_feature_levels=5, assign_first_stage=True, with_box_refine=True, two_stage=True, ) # set labels repo_id = "huggingface/label-files" if "o365" in model_name: num_labels = 366 filename = "object365-id2label.json" else: num_labels = 91 filename = "coco-detection-id2label.json" config.num_labels = num_labels id2label = json.load(open(cached_download(hf_hub_url(repo_id, filename, repo_type="dataset")), "r")) id2label = {int(k): v for k, v in id2label.items()} config.id2label = id2label config.label2id = {v: k for k, v in id2label.items()} return config # here we list all keys to be renamed (original name on the left, our name on the right) def create_rename_keys(config): rename_keys = [] # stem # fmt: off rename_keys.append(("backbone.0.body.patch_embed.proj.weight", "model.backbone.model.embeddings.patch_embeddings.projection.weight")) rename_keys.append(("backbone.0.body.patch_embed.proj.bias", "model.backbone.model.embeddings.patch_embeddings.projection.bias")) rename_keys.append(("backbone.0.body.patch_embed.norm.weight", "model.backbone.model.embeddings.norm.weight")) rename_keys.append(("backbone.0.body.patch_embed.norm.bias", "model.backbone.model.embeddings.norm.bias")) # stages for i in range(len(config.backbone_config.depths)): for j in range(config.backbone_config.depths[i]): rename_keys.append((f"backbone.0.body.layers.{i}.blocks.{j}.norm1.weight", f"model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight")) rename_keys.append((f"backbone.0.body.layers.{i}.blocks.{j}.norm1.bias", f"model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias")) rename_keys.append((f"backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_bias_table", f"model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table")) rename_keys.append((f"backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_index", f"model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index")) rename_keys.append((f"backbone.0.body.layers.{i}.blocks.{j}.attn.proj.weight", f"model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight")) rename_keys.append((f"backbone.0.body.layers.{i}.blocks.{j}.attn.proj.bias", f"model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias")) rename_keys.append((f"backbone.0.body.layers.{i}.blocks.{j}.norm2.weight", f"model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight")) rename_keys.append((f"backbone.0.body.layers.{i}.blocks.{j}.norm2.bias", f"model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias")) rename_keys.append((f"backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.weight", f"model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight")) rename_keys.append((f"backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.bias", f"model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias")) rename_keys.append((f"backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.weight", f"model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.weight")) rename_keys.append((f"backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.bias", f"model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.bias")) if i < 3: rename_keys.append((f"backbone.0.body.layers.{i}.downsample.reduction.weight", f"model.backbone.model.encoder.layers.{i}.downsample.reduction.weight")) rename_keys.append((f"backbone.0.body.layers.{i}.downsample.norm.weight", f"model.backbone.model.encoder.layers.{i}.downsample.norm.weight")) rename_keys.append((f"backbone.0.body.layers.{i}.downsample.norm.bias", f"model.backbone.model.encoder.layers.{i}.downsample.norm.bias")) rename_keys.append(("backbone.0.body.norm1.weight", "model.backbone.model.hidden_states_norms.stage2.weight")) rename_keys.append(("backbone.0.body.norm1.bias", "model.backbone.model.hidden_states_norms.stage2.bias")) rename_keys.append(("backbone.0.body.norm2.weight", "model.backbone.model.hidden_states_norms.stage3.weight")) rename_keys.append(("backbone.0.body.norm2.bias", "model.backbone.model.hidden_states_norms.stage3.bias")) rename_keys.append(("backbone.0.body.norm3.weight", "model.backbone.model.hidden_states_norms.stage4.weight")) rename_keys.append(("backbone.0.body.norm3.bias", "model.backbone.model.hidden_states_norms.stage4.bias")) # transformer encoder for i in range(config.encoder_layers): rename_keys.append((f"transformer.encoder.layers.{i}.self_attn.sampling_offsets.weight", f"model.encoder.layers.{i}.self_attn.sampling_offsets.weight")) rename_keys.append((f"transformer.encoder.layers.{i}.self_attn.sampling_offsets.bias", f"model.encoder.layers.{i}.self_attn.sampling_offsets.bias")) rename_keys.append((f"transformer.encoder.layers.{i}.self_attn.attention_weights.weight", f"model.encoder.layers.{i}.self_attn.attention_weights.weight")) rename_keys.append((f"transformer.encoder.layers.{i}.self_attn.attention_weights.bias", f"model.encoder.layers.{i}.self_attn.attention_weights.bias")) rename_keys.append((f"transformer.encoder.layers.{i}.self_attn.value_proj.weight", f"model.encoder.layers.{i}.self_attn.value_proj.weight")) rename_keys.append((f"transformer.encoder.layers.{i}.self_attn.value_proj.bias", f"model.encoder.layers.{i}.self_attn.value_proj.bias")) rename_keys.append((f"transformer.encoder.layers.{i}.self_attn.output_proj.weight", f"model.encoder.layers.{i}.self_attn.output_proj.weight")) rename_keys.append((f"transformer.encoder.layers.{i}.self_attn.output_proj.bias", f"model.encoder.layers.{i}.self_attn.output_proj.bias")) rename_keys.append((f"transformer.encoder.layers.{i}.norm1.weight", f"model.encoder.layers.{i}.self_attn_layer_norm.weight")) rename_keys.append((f"transformer.encoder.layers.{i}.norm1.bias", f"model.encoder.layers.{i}.self_attn_layer_norm.bias")) rename_keys.append((f"transformer.encoder.layers.{i}.linear1.weight", f"model.encoder.layers.{i}.fc1.weight")) rename_keys.append((f"transformer.encoder.layers.{i}.linear1.bias", f"model.encoder.layers.{i}.fc1.bias")) rename_keys.append((f"transformer.encoder.layers.{i}.linear2.weight", f"model.encoder.layers.{i}.fc2.weight")) rename_keys.append((f"transformer.encoder.layers.{i}.linear2.bias", f"model.encoder.layers.{i}.fc2.bias")) rename_keys.append((f"transformer.encoder.layers.{i}.norm2.weight", f"model.encoder.layers.{i}.final_layer_norm.weight")) rename_keys.append((f"transformer.encoder.layers.{i}.norm2.bias", f"model.encoder.layers.{i}.final_layer_norm.bias")) # transformer decoder for i in range(config.decoder_layers): rename_keys.append((f"transformer.decoder.layers.{i}.cross_attn.sampling_offsets.weight", f"model.decoder.layers.{i}.encoder_attn.sampling_offsets.weight")) rename_keys.append((f"transformer.decoder.layers.{i}.cross_attn.sampling_offsets.bias", f"model.decoder.layers.{i}.encoder_attn.sampling_offsets.bias")) rename_keys.append((f"transformer.decoder.layers.{i}.cross_attn.attention_weights.weight", f"model.decoder.layers.{i}.encoder_attn.attention_weights.weight")) rename_keys.append((f"transformer.decoder.layers.{i}.cross_attn.attention_weights.bias", f"model.decoder.layers.{i}.encoder_attn.attention_weights.bias")) rename_keys.append((f"transformer.decoder.layers.{i}.cross_attn.value_proj.weight", f"model.decoder.layers.{i}.encoder_attn.value_proj.weight")) rename_keys.append((f"transformer.decoder.layers.{i}.cross_attn.value_proj.bias", f"model.decoder.layers.{i}.encoder_attn.value_proj.bias")) rename_keys.append((f"transformer.decoder.layers.{i}.cross_attn.output_proj.weight", f"model.decoder.layers.{i}.encoder_attn.output_proj.weight")) rename_keys.append((f"transformer.decoder.layers.{i}.cross_attn.output_proj.bias", f"model.decoder.layers.{i}.encoder_attn.output_proj.bias")) rename_keys.append((f"transformer.decoder.layers.{i}.norm1.weight", f"model.decoder.layers.{i}.encoder_attn_layer_norm.weight")) rename_keys.append((f"transformer.decoder.layers.{i}.norm1.bias", f"model.decoder.layers.{i}.encoder_attn_layer_norm.bias")) rename_keys.append((f"transformer.decoder.layers.{i}.self_attn.out_proj.weight", f"model.decoder.layers.{i}.self_attn.out_proj.weight")) rename_keys.append((f"transformer.decoder.layers.{i}.self_attn.out_proj.bias", f"model.decoder.layers.{i}.self_attn.out_proj.bias")) rename_keys.append((f"transformer.decoder.layers.{i}.norm2.weight", f"model.decoder.layers.{i}.self_attn_layer_norm.weight")) rename_keys.append((f"transformer.decoder.layers.{i}.norm2.bias", f"model.decoder.layers.{i}.self_attn_layer_norm.bias")) rename_keys.append((f"transformer.decoder.layers.{i}.linear1.weight", f"model.decoder.layers.{i}.fc1.weight")) rename_keys.append((f"transformer.decoder.layers.{i}.linear1.bias", f"model.decoder.layers.{i}.fc1.bias")) rename_keys.append((f"transformer.decoder.layers.{i}.linear2.weight", f"model.decoder.layers.{i}.fc2.weight")) rename_keys.append((f"transformer.decoder.layers.{i}.linear2.bias", f"model.decoder.layers.{i}.fc2.bias")) rename_keys.append((f"transformer.decoder.layers.{i}.norm3.weight", f"model.decoder.layers.{i}.final_layer_norm.weight")) rename_keys.append((f"transformer.decoder.layers.{i}.norm3.bias", f"model.decoder.layers.{i}.final_layer_norm.bias")) # fmt: on return rename_keys def rename_key(dct, old, new): val = dct.pop(old) dct[new] = val # we split up the matrix of each encoder layer into queries, keys and values def read_in_swin_q_k_v(state_dict, backbone_config): num_features = [int(backbone_config.embed_dim * 2**i) for i in range(len(backbone_config.depths))] for i in range(len(backbone_config.depths)): dim = num_features[i] for j in range(backbone_config.depths[i]): # fmt: off # read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias) in_proj_weight = state_dict.pop(f"backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.weight") in_proj_bias = state_dict.pop(f"backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.bias") # next, add query, keys and values (in that order) to the state dict state_dict[f"model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.query.weight"] = in_proj_weight[:dim, :] state_dict[f"model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.query.bias"] = in_proj_bias[: dim] state_dict[f"model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.key.weight"] = in_proj_weight[ dim : dim * 2, : ] state_dict[f"model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.key.bias"] = in_proj_bias[ dim : dim * 2 ] state_dict[f"model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.value.weight"] = in_proj_weight[ -dim :, : ] state_dict[f"model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.value.bias"] = in_proj_bias[-dim :] # fmt: on def read_in_decoder_q_k_v(state_dict, config): # transformer decoder self-attention layers hidden_size = config.d_model for i in range(config.decoder_layers): # read in weights + bias of input projection layer of self-attention in_proj_weight = state_dict.pop(f"transformer.decoder.layers.{i}.self_attn.in_proj_weight") in_proj_bias = state_dict.pop(f"transformer.decoder.layers.{i}.self_attn.in_proj_bias") # next, add query, keys and values (in that order) to the state dict state_dict[f"model.decoder.layers.{i}.self_attn.q_proj.weight"] = in_proj_weight[:hidden_size, :] state_dict[f"model.decoder.layers.{i}.self_attn.q_proj.bias"] = in_proj_bias[:hidden_size] state_dict[f"model.decoder.layers.{i}.self_attn.k_proj.weight"] = in_proj_weight[ hidden_size : hidden_size * 2, : ] state_dict[f"model.decoder.layers.{i}.self_attn.k_proj.bias"] = in_proj_bias[hidden_size : hidden_size * 2] state_dict[f"model.decoder.layers.{i}.self_attn.v_proj.weight"] = in_proj_weight[-hidden_size:, :] state_dict[f"model.decoder.layers.{i}.self_attn.v_proj.bias"] = in_proj_bias[-hidden_size:] # We will verify our results on an image of cute cats def prepare_img(): url = "http://images.cocodataset.org/val2017/000000039769.jpg" im = Image.open(requests.get(url, stream=True).raw) return im @torch.no_grad() def convert_deta_checkpoint(model_name, pytorch_dump_folder_path, push_to_hub): """ Copy/paste/tweak model's weights to our DETA structure. """ # load config config = get_deta_config(model_name) # load original state dict if model_name == "deta-swin-large": checkpoint_path = hf_hub_download(repo_id="nielsr/deta-checkpoints", filename="adet_swin_ft.pth") elif model_name == "deta-swin-large-o365": checkpoint_path = hf_hub_download(repo_id="jozhang97/deta-swin-l-o365", filename="deta_swin_pt_o365.pth") else: raise ValueError(f"Model name {model_name} not supported") state_dict = torch.load(checkpoint_path, map_location="cpu")["model"] # original state dict for name, param in state_dict.items(): print(name, param.shape) # rename keys rename_keys = create_rename_keys(config) for src, dest in rename_keys: rename_key(state_dict, src, dest) read_in_swin_q_k_v(state_dict, config.backbone_config) read_in_decoder_q_k_v(state_dict, config) # fix some prefixes for key in state_dict.copy().keys(): if "transformer.decoder.class_embed" in key or "transformer.decoder.bbox_embed" in key: val = state_dict.pop(key) state_dict[key.replace("transformer.decoder", "model.decoder")] = val if "input_proj" in key: val = state_dict.pop(key) state_dict["model." + key] = val if "level_embed" in key or "pos_trans" in key or "pix_trans" in key or "enc_output" in key: val = state_dict.pop(key) state_dict[key.replace("transformer", "model")] = val # finally, create HuggingFace model and load state dict model = DetaForObjectDetection(config) model.load_state_dict(state_dict) model.eval() device = "cuda" if torch.cuda.is_available() else "cpu" model.to(device) # load image processor processor = DetaImageProcessor(format="coco_detection") # verify our conversion on image img = prepare_img() encoding = processor(images=img, return_tensors="pt") pixel_values = encoding["pixel_values"] outputs = model(pixel_values.to(device)) # verify logits print("Logits:", outputs.logits[0, :3, :3]) print("Boxes:", outputs.pred_boxes[0, :3, :3]) if model_name == "deta-swin-large": expected_logits = torch.tensor( [[-7.6308, -2.8485, -5.3737], [-7.2037, -4.5505, -4.8027], [-7.2943, -4.2611, -4.6617]] ) expected_boxes = torch.tensor([[0.4987, 0.4969, 0.9999], [0.2549, 0.5498, 0.4805], [0.5498, 0.2757, 0.0569]]) elif model_name == "deta-swin-large-o365": expected_logits = torch.tensor( [[-8.0122, -3.5720, -4.9717], [-8.1547, -3.6886, -4.6389], [-7.6610, -3.6194, -5.0134]] ) expected_boxes = torch.tensor([[0.2523, 0.5549, 0.4881], [0.7715, 0.4149, 0.4601], [0.5503, 0.2753, 0.0575]]) assert torch.allclose(outputs.logits[0, :3, :3], expected_logits.to(device), atol=1e-4) assert torch.allclose(outputs.pred_boxes[0, :3, :3], expected_boxes.to(device), atol=1e-4) print("Everything ok!") if pytorch_dump_folder_path: # Save model and processor logger.info(f"Saving PyTorch model and processor to {pytorch_dump_folder_path}...") Path(pytorch_dump_folder_path).mkdir(exist_ok=True) model.save_pretrained(pytorch_dump_folder_path) processor.save_pretrained(pytorch_dump_folder_path) # Push to hub if push_to_hub: print("Pushing model and processor to hub...") model.push_to_hub(f"jozhang97/{model_name}") processor.push_to_hub(f"jozhang97/{model_name}") if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument( "--model_name", type=str, default="deta-swin-large", choices=["deta-swin-large", "deta-swin-large-o365"], help="Name of the model you'd like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model.", ) parser.add_argument( "--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub." ) args = parser.parse_args() convert_deta_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
0
hf_public_repos/transformers/src/transformers/models
hf_public_repos/transformers/src/transformers/models/deta/convert_deta_resnet_to_pytorch.py
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Convert DETA checkpoints from the original repository. URL: https://github.com/jozhang97/DETA/tree/master""" import argparse import json from pathlib import Path import requests import torch from huggingface_hub import cached_download, hf_hub_download, hf_hub_url from PIL import Image from transformers import DetaConfig, DetaForObjectDetection, DetaImageProcessor from transformers.utils import logging logging.set_verbosity_info() logger = logging.get_logger(__name__) def get_deta_config(): config = DetaConfig( num_queries=900, encoder_ffn_dim=2048, decoder_ffn_dim=2048, num_feature_levels=5, assign_first_stage=True, with_box_refine=True, two_stage=True, ) # set labels config.num_labels = 91 repo_id = "huggingface/label-files" filename = "coco-detection-id2label.json" id2label = json.load(open(cached_download(hf_hub_url(repo_id, filename, repo_type="dataset")), "r")) id2label = {int(k): v for k, v in id2label.items()} config.id2label = id2label config.label2id = {v: k for k, v in id2label.items()} return config # here we list all keys to be renamed (original name on the left, our name on the right) def create_rename_keys(config): rename_keys = [] # stem # fmt: off rename_keys.append(("backbone.0.body.conv1.weight", "model.backbone.model.embedder.embedder.convolution.weight")) rename_keys.append(("backbone.0.body.bn1.weight", "model.backbone.model.embedder.embedder.normalization.weight")) rename_keys.append(("backbone.0.body.bn1.bias", "model.backbone.model.embedder.embedder.normalization.bias")) rename_keys.append(("backbone.0.body.bn1.running_mean", "model.backbone.model.embedder.embedder.normalization.running_mean")) rename_keys.append(("backbone.0.body.bn1.running_var", "model.backbone.model.embedder.embedder.normalization.running_var")) # stages for stage_idx in range(len(config.backbone_config.depths)): for layer_idx in range(config.backbone_config.depths[stage_idx]): # shortcut if layer_idx == 0: rename_keys.append( ( f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.0.weight", f"model.backbone.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.convolution.weight", ) ) rename_keys.append( ( f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.weight", f"model.backbone.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.weight", ) ) rename_keys.append( ( f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.bias", f"model.backbone.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.bias", ) ) rename_keys.append( ( f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_mean", f"model.backbone.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_mean", ) ) rename_keys.append( ( f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_var", f"model.backbone.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_var", ) ) # 3 convs for i in range(3): rename_keys.append( ( f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.conv{i+1}.weight", f"model.backbone.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.convolution.weight", ) ) rename_keys.append( ( f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.weight", f"model.backbone.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.weight", ) ) rename_keys.append( ( f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.bias", f"model.backbone.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.bias", ) ) rename_keys.append( ( f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_mean", f"model.backbone.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_mean", ) ) rename_keys.append( ( f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_var", f"model.backbone.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_var", ) ) # transformer encoder for i in range(config.encoder_layers): rename_keys.append((f"transformer.encoder.layers.{i}.self_attn.sampling_offsets.weight", f"model.encoder.layers.{i}.self_attn.sampling_offsets.weight")) rename_keys.append((f"transformer.encoder.layers.{i}.self_attn.sampling_offsets.bias", f"model.encoder.layers.{i}.self_attn.sampling_offsets.bias")) rename_keys.append((f"transformer.encoder.layers.{i}.self_attn.attention_weights.weight", f"model.encoder.layers.{i}.self_attn.attention_weights.weight")) rename_keys.append((f"transformer.encoder.layers.{i}.self_attn.attention_weights.bias", f"model.encoder.layers.{i}.self_attn.attention_weights.bias")) rename_keys.append((f"transformer.encoder.layers.{i}.self_attn.value_proj.weight", f"model.encoder.layers.{i}.self_attn.value_proj.weight")) rename_keys.append((f"transformer.encoder.layers.{i}.self_attn.value_proj.bias", f"model.encoder.layers.{i}.self_attn.value_proj.bias")) rename_keys.append((f"transformer.encoder.layers.{i}.self_attn.output_proj.weight", f"model.encoder.layers.{i}.self_attn.output_proj.weight")) rename_keys.append((f"transformer.encoder.layers.{i}.self_attn.output_proj.bias", f"model.encoder.layers.{i}.self_attn.output_proj.bias")) rename_keys.append((f"transformer.encoder.layers.{i}.norm1.weight", f"model.encoder.layers.{i}.self_attn_layer_norm.weight")) rename_keys.append((f"transformer.encoder.layers.{i}.norm1.bias", f"model.encoder.layers.{i}.self_attn_layer_norm.bias")) rename_keys.append((f"transformer.encoder.layers.{i}.linear1.weight", f"model.encoder.layers.{i}.fc1.weight")) rename_keys.append((f"transformer.encoder.layers.{i}.linear1.bias", f"model.encoder.layers.{i}.fc1.bias")) rename_keys.append((f"transformer.encoder.layers.{i}.linear2.weight", f"model.encoder.layers.{i}.fc2.weight")) rename_keys.append((f"transformer.encoder.layers.{i}.linear2.bias", f"model.encoder.layers.{i}.fc2.bias")) rename_keys.append((f"transformer.encoder.layers.{i}.norm2.weight", f"model.encoder.layers.{i}.final_layer_norm.weight")) rename_keys.append((f"transformer.encoder.layers.{i}.norm2.bias", f"model.encoder.layers.{i}.final_layer_norm.bias")) # transformer decoder for i in range(config.decoder_layers): rename_keys.append((f"transformer.decoder.layers.{i}.cross_attn.sampling_offsets.weight", f"model.decoder.layers.{i}.encoder_attn.sampling_offsets.weight")) rename_keys.append((f"transformer.decoder.layers.{i}.cross_attn.sampling_offsets.bias", f"model.decoder.layers.{i}.encoder_attn.sampling_offsets.bias")) rename_keys.append((f"transformer.decoder.layers.{i}.cross_attn.attention_weights.weight", f"model.decoder.layers.{i}.encoder_attn.attention_weights.weight")) rename_keys.append((f"transformer.decoder.layers.{i}.cross_attn.attention_weights.bias", f"model.decoder.layers.{i}.encoder_attn.attention_weights.bias")) rename_keys.append((f"transformer.decoder.layers.{i}.cross_attn.value_proj.weight", f"model.decoder.layers.{i}.encoder_attn.value_proj.weight")) rename_keys.append((f"transformer.decoder.layers.{i}.cross_attn.value_proj.bias", f"model.decoder.layers.{i}.encoder_attn.value_proj.bias")) rename_keys.append((f"transformer.decoder.layers.{i}.cross_attn.output_proj.weight", f"model.decoder.layers.{i}.encoder_attn.output_proj.weight")) rename_keys.append((f"transformer.decoder.layers.{i}.cross_attn.output_proj.bias", f"model.decoder.layers.{i}.encoder_attn.output_proj.bias")) rename_keys.append((f"transformer.decoder.layers.{i}.norm1.weight", f"model.decoder.layers.{i}.encoder_attn_layer_norm.weight")) rename_keys.append((f"transformer.decoder.layers.{i}.norm1.bias", f"model.decoder.layers.{i}.encoder_attn_layer_norm.bias")) rename_keys.append((f"transformer.decoder.layers.{i}.self_attn.out_proj.weight", f"model.decoder.layers.{i}.self_attn.out_proj.weight")) rename_keys.append((f"transformer.decoder.layers.{i}.self_attn.out_proj.bias", f"model.decoder.layers.{i}.self_attn.out_proj.bias")) rename_keys.append((f"transformer.decoder.layers.{i}.norm2.weight", f"model.decoder.layers.{i}.self_attn_layer_norm.weight")) rename_keys.append((f"transformer.decoder.layers.{i}.norm2.bias", f"model.decoder.layers.{i}.self_attn_layer_norm.bias")) rename_keys.append((f"transformer.decoder.layers.{i}.linear1.weight", f"model.decoder.layers.{i}.fc1.weight")) rename_keys.append((f"transformer.decoder.layers.{i}.linear1.bias", f"model.decoder.layers.{i}.fc1.bias")) rename_keys.append((f"transformer.decoder.layers.{i}.linear2.weight", f"model.decoder.layers.{i}.fc2.weight")) rename_keys.append((f"transformer.decoder.layers.{i}.linear2.bias", f"model.decoder.layers.{i}.fc2.bias")) rename_keys.append((f"transformer.decoder.layers.{i}.norm3.weight", f"model.decoder.layers.{i}.final_layer_norm.weight")) rename_keys.append((f"transformer.decoder.layers.{i}.norm3.bias", f"model.decoder.layers.{i}.final_layer_norm.bias")) # fmt: on return rename_keys def rename_key(dct, old, new): val = dct.pop(old) dct[new] = val def read_in_decoder_q_k_v(state_dict, config): # transformer decoder self-attention layers hidden_size = config.d_model for i in range(config.decoder_layers): # read in weights + bias of input projection layer of self-attention in_proj_weight = state_dict.pop(f"transformer.decoder.layers.{i}.self_attn.in_proj_weight") in_proj_bias = state_dict.pop(f"transformer.decoder.layers.{i}.self_attn.in_proj_bias") # next, add query, keys and values (in that order) to the state dict state_dict[f"model.decoder.layers.{i}.self_attn.q_proj.weight"] = in_proj_weight[:hidden_size, :] state_dict[f"model.decoder.layers.{i}.self_attn.q_proj.bias"] = in_proj_bias[:hidden_size] state_dict[f"model.decoder.layers.{i}.self_attn.k_proj.weight"] = in_proj_weight[ hidden_size : hidden_size * 2, : ] state_dict[f"model.decoder.layers.{i}.self_attn.k_proj.bias"] = in_proj_bias[hidden_size : hidden_size * 2] state_dict[f"model.decoder.layers.{i}.self_attn.v_proj.weight"] = in_proj_weight[-hidden_size:, :] state_dict[f"model.decoder.layers.{i}.self_attn.v_proj.bias"] = in_proj_bias[-hidden_size:] # We will verify our results on an image of cute cats def prepare_img(): url = "http://images.cocodataset.org/val2017/000000039769.jpg" im = Image.open(requests.get(url, stream=True).raw) return im @torch.no_grad() def convert_deta_checkpoint(model_name, pytorch_dump_folder_path, push_to_hub): """ Copy/paste/tweak model's weights to our DETA structure. """ # load config config = get_deta_config() # load original state dict if model_name == "deta-resnet-50": filename = "adet_checkpoint0011.pth" elif model_name == "deta-resnet-50-24-epochs": filename = "adet_2x_checkpoint0023.pth" else: raise ValueError(f"Model name {model_name} not supported") checkpoint_path = hf_hub_download(repo_id="nielsr/deta-checkpoints", filename=filename) state_dict = torch.load(checkpoint_path, map_location="cpu")["model"] # rename keys rename_keys = create_rename_keys(config) for src, dest in rename_keys: rename_key(state_dict, src, dest) read_in_decoder_q_k_v(state_dict, config) # fix some prefixes for key in state_dict.copy().keys(): if "transformer.decoder.class_embed" in key or "transformer.decoder.bbox_embed" in key: val = state_dict.pop(key) state_dict[key.replace("transformer.decoder", "model.decoder")] = val if "input_proj" in key: val = state_dict.pop(key) state_dict["model." + key] = val if "level_embed" in key or "pos_trans" in key or "pix_trans" in key or "enc_output" in key: val = state_dict.pop(key) state_dict[key.replace("transformer", "model")] = val # finally, create HuggingFace model and load state dict model = DetaForObjectDetection(config) model.load_state_dict(state_dict) model.eval() device = "cuda" if torch.cuda.is_available() else "cpu" model.to(device) # load image processor processor = DetaImageProcessor(format="coco_detection") # verify our conversion on image img = prepare_img() encoding = processor(images=img, return_tensors="pt") pixel_values = encoding["pixel_values"] outputs = model(pixel_values.to(device)) # verify logits if model_name == "deta-resnet-50": expected_logits = torch.tensor( [[-7.3978, -2.5406, -4.1668], [-8.2684, -3.9933, -3.8096], [-7.0515, -3.7973, -5.8516]] ) expected_boxes = torch.tensor([[0.5043, 0.4973, 0.9998], [0.2542, 0.5489, 0.4748], [0.5490, 0.2765, 0.0570]]) elif model_name == "deta-resnet-50-24-epochs": expected_logits = torch.tensor( [[-7.1688, -2.4857, -4.8669], [-7.8630, -3.8154, -4.2674], [-7.2730, -4.1865, -5.5323]] ) expected_boxes = torch.tensor([[0.5021, 0.4971, 0.9994], [0.2546, 0.5486, 0.4731], [0.1686, 0.1986, 0.2142]]) assert torch.allclose(outputs.logits[0, :3, :3], expected_logits.to(device), atol=1e-4) assert torch.allclose(outputs.pred_boxes[0, :3, :3], expected_boxes.to(device), atol=1e-4) print("Everything ok!") if pytorch_dump_folder_path: # Save model and processor logger.info(f"Saving PyTorch model and processor to {pytorch_dump_folder_path}...") Path(pytorch_dump_folder_path).mkdir(exist_ok=True) model.save_pretrained(pytorch_dump_folder_path) processor.save_pretrained(pytorch_dump_folder_path) # Push to hub if push_to_hub: print("Pushing model and processor to hub...") model.push_to_hub(f"jozhang97/{model_name}") processor.push_to_hub(f"jozhang97/{model_name}") if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument( "--model_name", type=str, default="deta-resnet-50", choices=["deta-resnet-50", "deta-resnet-50-24-epochs"], help="Name of the model you'd like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model.", ) parser.add_argument( "--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub." ) args = parser.parse_args() convert_deta_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
0
hf_public_repos/transformers/src/transformers/models
hf_public_repos/transformers/src/transformers/models/deta/__init__.py
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available _import_structure = { "configuration_deta": ["DETA_PRETRAINED_CONFIG_ARCHIVE_MAP", "DetaConfig"], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["image_processing_deta"] = ["DetaImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["modeling_deta"] = [ "DETA_PRETRAINED_MODEL_ARCHIVE_LIST", "DetaForObjectDetection", "DetaModel", "DetaPreTrainedModel", ] if TYPE_CHECKING: from .configuration_deta import DETA_PRETRAINED_CONFIG_ARCHIVE_MAP, DetaConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_deta import DetaImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_deta import ( DETA_PRETRAINED_MODEL_ARCHIVE_LIST, DetaForObjectDetection, DetaModel, DetaPreTrainedModel, ) else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
0
hf_public_repos/transformers/src/transformers/models
hf_public_repos/transformers/src/transformers/models/deta/configuration_deta.py
# coding=utf-8 # Copyright 2022 SenseTime and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ DETA model configuration""" from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto import CONFIG_MAPPING logger = logging.get_logger(__name__) DETA_PRETRAINED_CONFIG_ARCHIVE_MAP = { "ut/deta": "https://huggingface.co/ut/deta/resolve/main/config.json", } class DetaConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`DetaModel`]. It is used to instantiate a DETA model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the DETA [SenseTime/deformable-detr](https://huggingface.co/SenseTime/deformable-detr) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: backbone_config (`PretrainedConfig` or `dict`, *optional*, defaults to `ResNetConfig()`): The configuration of the backbone model. num_queries (`int`, *optional*, defaults to 900): Number of object queries, i.e. detection slots. This is the maximal number of objects [`DetaModel`] can detect in a single image. In case `two_stage` is set to `True`, we use `two_stage_num_proposals` instead. d_model (`int`, *optional*, defaults to 256): Dimension of the layers. encoder_layers (`int`, *optional*, defaults to 6): Number of encoder layers. decoder_layers (`int`, *optional*, defaults to 6): Number of decoder layers. encoder_attention_heads (`int`, *optional*, defaults to 8): Number of attention heads for each attention layer in the Transformer encoder. decoder_attention_heads (`int`, *optional*, defaults to 8): Number of attention heads for each attention layer in the Transformer decoder. decoder_ffn_dim (`int`, *optional*, defaults to 2048): Dimension of the "intermediate" (often named feed-forward) layer in decoder. encoder_ffn_dim (`int`, *optional*, defaults to 2048): Dimension of the "intermediate" (often named feed-forward) layer in decoder. activation_function (`str` or `function`, *optional*, defaults to `"relu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"silu"` and `"gelu_new"` are supported. dropout (`float`, *optional*, defaults to 0.1): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. attention_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. activation_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for activations inside the fully connected layer. init_std (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. init_xavier_std (`float`, *optional*, defaults to 1): The scaling factor used for the Xavier initialization gain in the HM Attention map module. encoder_layerdrop (`float`, *optional*, defaults to 0.0): The LayerDrop probability for the encoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556) for more details. auxiliary_loss (`bool`, *optional*, defaults to `False`): Whether auxiliary decoding losses (loss at each decoder layer) are to be used. position_embedding_type (`str`, *optional*, defaults to `"sine"`): Type of position embeddings to be used on top of the image features. One of `"sine"` or `"learned"`. class_cost (`float`, *optional*, defaults to 1): Relative weight of the classification error in the Hungarian matching cost. bbox_cost (`float`, *optional*, defaults to 5): Relative weight of the L1 error of the bounding box coordinates in the Hungarian matching cost. giou_cost (`float`, *optional*, defaults to 2): Relative weight of the generalized IoU loss of the bounding box in the Hungarian matching cost. mask_loss_coefficient (`float`, *optional*, defaults to 1): Relative weight of the Focal loss in the panoptic segmentation loss. dice_loss_coefficient (`float`, *optional*, defaults to 1): Relative weight of the DICE/F-1 loss in the panoptic segmentation loss. bbox_loss_coefficient (`float`, *optional*, defaults to 5): Relative weight of the L1 bounding box loss in the object detection loss. giou_loss_coefficient (`float`, *optional*, defaults to 2): Relative weight of the generalized IoU loss in the object detection loss. eos_coefficient (`float`, *optional*, defaults to 0.1): Relative classification weight of the 'no-object' class in the object detection loss. num_feature_levels (`int`, *optional*, defaults to 5): The number of input feature levels. encoder_n_points (`int`, *optional*, defaults to 4): The number of sampled keys in each feature level for each attention head in the encoder. decoder_n_points (`int`, *optional*, defaults to 4): The number of sampled keys in each feature level for each attention head in the decoder. two_stage (`bool`, *optional*, defaults to `True`): Whether to apply a two-stage deformable DETR, where the region proposals are also generated by a variant of DETA, which are further fed into the decoder for iterative bounding box refinement. two_stage_num_proposals (`int`, *optional*, defaults to 300): The number of region proposals to be generated, in case `two_stage` is set to `True`. with_box_refine (`bool`, *optional*, defaults to `True`): Whether to apply iterative bounding box refinement, where each decoder layer refines the bounding boxes based on the predictions from the previous layer. focal_alpha (`float`, *optional*, defaults to 0.25): Alpha parameter in the focal loss. assign_first_stage (`bool`, *optional*, defaults to `True`): Whether to assign each prediction i to the highest overlapping ground truth object if the overlap is larger than a threshold 0.7. assign_second_stage (`bool`, *optional*, defaults to `True`): Whether to assign second assignment procedure in the second stage closely follows the first stage assignment procedure. Examples: ```python >>> from transformers import DetaConfig, DetaModel >>> # Initializing a DETA SenseTime/deformable-detr style configuration >>> configuration = DetaConfig() >>> # Initializing a model (with random weights) from the SenseTime/deformable-detr style configuration >>> model = DetaModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "deta" attribute_map = { "hidden_size": "d_model", "num_attention_heads": "encoder_attention_heads", } def __init__( self, backbone_config=None, num_queries=900, max_position_embeddings=2048, encoder_layers=6, encoder_ffn_dim=2048, encoder_attention_heads=8, decoder_layers=6, decoder_ffn_dim=1024, decoder_attention_heads=8, encoder_layerdrop=0.0, is_encoder_decoder=True, activation_function="relu", d_model=256, dropout=0.1, attention_dropout=0.0, activation_dropout=0.0, init_std=0.02, init_xavier_std=1.0, return_intermediate=True, auxiliary_loss=False, position_embedding_type="sine", num_feature_levels=5, encoder_n_points=4, decoder_n_points=4, two_stage=True, two_stage_num_proposals=300, with_box_refine=True, assign_first_stage=True, assign_second_stage=True, class_cost=1, bbox_cost=5, giou_cost=2, mask_loss_coefficient=1, dice_loss_coefficient=1, bbox_loss_coefficient=5, giou_loss_coefficient=2, eos_coefficient=0.1, focal_alpha=0.25, **kwargs, ): if backbone_config is None: logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.") backbone_config = CONFIG_MAPPING["resnet"](out_features=["stage2", "stage3", "stage4"]) else: if isinstance(backbone_config, dict): backbone_model_type = backbone_config.pop("model_type") config_class = CONFIG_MAPPING[backbone_model_type] backbone_config = config_class.from_dict(backbone_config) self.backbone_config = backbone_config self.num_queries = num_queries self.max_position_embeddings = max_position_embeddings self.d_model = d_model self.encoder_ffn_dim = encoder_ffn_dim self.encoder_layers = encoder_layers self.encoder_attention_heads = encoder_attention_heads self.decoder_ffn_dim = decoder_ffn_dim self.decoder_layers = decoder_layers self.decoder_attention_heads = decoder_attention_heads self.dropout = dropout self.attention_dropout = attention_dropout self.activation_dropout = activation_dropout self.activation_function = activation_function self.init_std = init_std self.init_xavier_std = init_xavier_std self.encoder_layerdrop = encoder_layerdrop self.auxiliary_loss = auxiliary_loss self.position_embedding_type = position_embedding_type # deformable attributes self.num_feature_levels = num_feature_levels self.encoder_n_points = encoder_n_points self.decoder_n_points = decoder_n_points self.two_stage = two_stage self.two_stage_num_proposals = two_stage_num_proposals self.with_box_refine = with_box_refine self.assign_first_stage = assign_first_stage self.assign_second_stage = assign_second_stage if two_stage is True and with_box_refine is False: raise ValueError("If two_stage is True, with_box_refine must be True.") # Hungarian matcher self.class_cost = class_cost self.bbox_cost = bbox_cost self.giou_cost = giou_cost # Loss coefficients self.mask_loss_coefficient = mask_loss_coefficient self.dice_loss_coefficient = dice_loss_coefficient self.bbox_loss_coefficient = bbox_loss_coefficient self.giou_loss_coefficient = giou_loss_coefficient self.eos_coefficient = eos_coefficient self.focal_alpha = focal_alpha super().__init__(is_encoder_decoder=is_encoder_decoder, **kwargs) @property def num_attention_heads(self) -> int: return self.encoder_attention_heads @property def hidden_size(self) -> int: return self.d_model
0
hf_public_repos/transformers/src/transformers/models
hf_public_repos/transformers/src/transformers/models/deta/modeling_deta.py
# coding=utf-8 # Copyright 2022 SenseTime and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ PyTorch DETA model.""" import copy import math import warnings from dataclasses import dataclass from typing import Dict, List, Optional, Tuple, Union import torch import torch.nn.functional as F from torch import Tensor, nn from ...activations import ACT2FN from ...file_utils import ( ModelOutput, add_start_docstrings, add_start_docstrings_to_model_forward, is_scipy_available, is_vision_available, replace_return_docstrings, ) from ...modeling_attn_mask_utils import _prepare_4d_attention_mask from ...modeling_outputs import BaseModelOutput from ...modeling_utils import PreTrainedModel from ...pytorch_utils import meshgrid from ...utils import is_accelerate_available, is_torchvision_available, logging, requires_backends from ..auto import AutoBackbone from .configuration_deta import DetaConfig logger = logging.get_logger(__name__) if is_accelerate_available(): from accelerate import PartialState from accelerate.utils import reduce if is_vision_available(): from transformers.image_transforms import center_to_corners_format if is_torchvision_available(): from torchvision.ops.boxes import batched_nms if is_scipy_available(): from scipy.optimize import linear_sum_assignment logger = logging.get_logger(__name__) _CONFIG_FOR_DOC = "DetaConfig" _CHECKPOINT_FOR_DOC = "jozhang97/deta-swin-large-o365" DETA_PRETRAINED_MODEL_ARCHIVE_LIST = [ "jozhang97/deta-swin-large-o365", # See all DETA models at https://huggingface.co/models?filter=deta ] @dataclass # Copied from transformers.models.deformable_detr.modeling_deformable_detr.DeformableDetrDecoderOutput with DeformableDetr->Deta class DetaDecoderOutput(ModelOutput): """ Base class for outputs of the DetaDecoder. This class adds two attributes to BaseModelOutputWithCrossAttentions, namely: - a stacked tensor of intermediate decoder hidden states (i.e. the output of each decoder layer) - a stacked tensor of intermediate reference points. Args: last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the model. intermediate_hidden_states (`torch.FloatTensor` of shape `(batch_size, config.decoder_layers, num_queries, hidden_size)`): Stacked intermediate hidden states (output of each layer of the decoder). intermediate_reference_points (`torch.FloatTensor` of shape `(batch_size, config.decoder_layers, sequence_length, hidden_size)`): Stacked intermediate reference points (reference points of each layer of the decoder). hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` and `config.add_cross_attention=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads. """ last_hidden_state: torch.FloatTensor = None intermediate_hidden_states: torch.FloatTensor = None intermediate_reference_points: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None attentions: Optional[Tuple[torch.FloatTensor]] = None cross_attentions: Optional[Tuple[torch.FloatTensor]] = None @dataclass class DetaModelOutput(ModelOutput): """ Base class for outputs of the Deformable DETR encoder-decoder model. Args: init_reference_points (`torch.FloatTensor` of shape `(batch_size, num_queries, 4)`): Initial reference points sent through the Transformer decoder. last_hidden_state (`torch.FloatTensor` of shape `(batch_size, num_queries, hidden_size)`): Sequence of hidden-states at the output of the last layer of the decoder of the model. intermediate_hidden_states (`torch.FloatTensor` of shape `(batch_size, config.decoder_layers, num_queries, hidden_size)`): Stacked intermediate hidden states (output of each layer of the decoder). intermediate_reference_points (`torch.FloatTensor` of shape `(batch_size, config.decoder_layers, num_queries, 4)`): Stacked intermediate reference points (reference points of each layer of the decoder). decoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, num_queries, hidden_size)`. Hidden-states of the decoder at the output of each layer plus the initial embedding outputs. decoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, num_queries, num_queries)`. Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads. cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_queries, num_heads, 4, 4)`. Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads. encoder_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder of the model. encoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the encoder at the output of each layer plus the initial embedding outputs. encoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_queries, num_heads, 4, 4)`. Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads. enc_outputs_class (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.num_labels)`, *optional*, returned when `config.with_box_refine=True` and `config.two_stage=True`): Predicted bounding boxes scores where the top `config.two_stage_num_proposals` scoring bounding boxes are picked as region proposals in the first stage. Output of bounding box binary classification (i.e. foreground and background). enc_outputs_coord_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, 4)`, *optional*, returned when `config.with_box_refine=True` and `config.two_stage=True`): Logits of predicted bounding boxes coordinates in the first stage. output_proposals (`torch.FloatTensor` of shape `(batch_size, sequence_length, 4)`, *optional*, returned when `config.two_stage=True`): Logits of proposal bounding boxes coordinates in the gen_encoder_output_proposals. """ init_reference_points: torch.FloatTensor = None last_hidden_state: torch.FloatTensor = None intermediate_hidden_states: torch.FloatTensor = None intermediate_reference_points: torch.FloatTensor = None decoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None decoder_attentions: Optional[Tuple[torch.FloatTensor]] = None cross_attentions: Optional[Tuple[torch.FloatTensor]] = None encoder_last_hidden_state: Optional[torch.FloatTensor] = None encoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None encoder_attentions: Optional[Tuple[torch.FloatTensor]] = None enc_outputs_class: Optional[torch.FloatTensor] = None enc_outputs_coord_logits: Optional[torch.FloatTensor] = None output_proposals: Optional[torch.FloatTensor] = None @dataclass class DetaObjectDetectionOutput(ModelOutput): """ Output type of [`DetaForObjectDetection`]. Args: loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` are provided)): Total loss as a linear combination of a negative log-likehood (cross-entropy) for class prediction and a bounding box loss. The latter is defined as a linear combination of the L1 loss and the generalized scale-invariant IoU loss. loss_dict (`Dict`, *optional*): A dictionary containing the individual losses. Useful for logging. logits (`torch.FloatTensor` of shape `(batch_size, num_queries, num_classes + 1)`): Classification logits (including no-object) for all queries. pred_boxes (`torch.FloatTensor` of shape `(batch_size, num_queries, 4)`): Normalized boxes coordinates for all queries, represented as (center_x, center_y, width, height). These values are normalized in [0, 1], relative to the size of each individual image in the batch (disregarding possible padding). You can use [`~DetaProcessor.post_process_object_detection`] to retrieve the unnormalized bounding boxes. auxiliary_outputs (`list[Dict]`, *optional*): Optional, only returned when auxilary losses are activated (i.e. `config.auxiliary_loss` is set to `True`) and labels are provided. It is a list of dictionaries containing the two above keys (`logits` and `pred_boxes`) for each decoder layer. last_hidden_state (`torch.FloatTensor` of shape `(batch_size, num_queries, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the decoder of the model. decoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, num_queries, hidden_size)`. Hidden-states of the decoder at the output of each layer plus the initial embedding outputs. decoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, num_queries, num_queries)`. Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads. cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_queries, num_heads, 4, 4)`. Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads. encoder_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder of the model. encoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the encoder at the output of each layer plus the initial embedding outputs. encoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, sequence_length, num_heads, 4, 4)`. Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads. intermediate_hidden_states (`torch.FloatTensor` of shape `(batch_size, config.decoder_layers, num_queries, hidden_size)`): Stacked intermediate hidden states (output of each layer of the decoder). intermediate_reference_points (`torch.FloatTensor` of shape `(batch_size, config.decoder_layers, num_queries, 4)`): Stacked intermediate reference points (reference points of each layer of the decoder). init_reference_points (`torch.FloatTensor` of shape `(batch_size, num_queries, 4)`): Initial reference points sent through the Transformer decoder. enc_outputs_class (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.num_labels)`, *optional*, returned when `config.with_box_refine=True` and `config.two_stage=True`): Predicted bounding boxes scores where the top `config.two_stage_num_proposals` scoring bounding boxes are picked as region proposals in the first stage. Output of bounding box binary classification (i.e. foreground and background). enc_outputs_coord_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, 4)`, *optional*, returned when `config.with_box_refine=True` and `config.two_stage=True`): Logits of predicted bounding boxes coordinates in the first stage. output_proposals (`torch.FloatTensor` of shape `(batch_size, sequence_length, 4)`, *optional*, returned when `config.two_stage=True`): Logits of proposal bounding boxes coordinates in the gen_encoder_output_proposals. """ loss: Optional[torch.FloatTensor] = None loss_dict: Optional[Dict] = None logits: torch.FloatTensor = None pred_boxes: torch.FloatTensor = None auxiliary_outputs: Optional[List[Dict]] = None init_reference_points: Optional[torch.FloatTensor] = None last_hidden_state: Optional[torch.FloatTensor] = None intermediate_hidden_states: Optional[torch.FloatTensor] = None intermediate_reference_points: Optional[torch.FloatTensor] = None decoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None decoder_attentions: Optional[Tuple[torch.FloatTensor]] = None cross_attentions: Optional[Tuple[torch.FloatTensor]] = None encoder_last_hidden_state: Optional[torch.FloatTensor] = None encoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None encoder_attentions: Optional[Tuple[torch.FloatTensor]] = None enc_outputs_class: Optional = None enc_outputs_coord_logits: Optional = None output_proposals: Optional[torch.FloatTensor] = None def _get_clones(module, N): return nn.ModuleList([copy.deepcopy(module) for i in range(N)]) def inverse_sigmoid(x, eps=1e-5): x = x.clamp(min=0, max=1) x1 = x.clamp(min=eps) x2 = (1 - x).clamp(min=eps) return torch.log(x1 / x2) # Copied from transformers.models.detr.modeling_detr.DetrFrozenBatchNorm2d with Detr->Deta class DetaFrozenBatchNorm2d(nn.Module): """ BatchNorm2d where the batch statistics and the affine parameters are fixed. Copy-paste from torchvision.misc.ops with added eps before rqsrt, without which any other models than torchvision.models.resnet[18,34,50,101] produce nans. """ def __init__(self, n): super().__init__() self.register_buffer("weight", torch.ones(n)) self.register_buffer("bias", torch.zeros(n)) self.register_buffer("running_mean", torch.zeros(n)) self.register_buffer("running_var", torch.ones(n)) def _load_from_state_dict( self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs ): num_batches_tracked_key = prefix + "num_batches_tracked" if num_batches_tracked_key in state_dict: del state_dict[num_batches_tracked_key] super()._load_from_state_dict( state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs ) def forward(self, x): # move reshapes to the beginning # to make it user-friendly weight = self.weight.reshape(1, -1, 1, 1) bias = self.bias.reshape(1, -1, 1, 1) running_var = self.running_var.reshape(1, -1, 1, 1) running_mean = self.running_mean.reshape(1, -1, 1, 1) epsilon = 1e-5 scale = weight * (running_var + epsilon).rsqrt() bias = bias - running_mean * scale return x * scale + bias # Copied from transformers.models.detr.modeling_detr.replace_batch_norm with Detr->Deta def replace_batch_norm(model): r""" Recursively replace all `torch.nn.BatchNorm2d` with `DetaFrozenBatchNorm2d`. Args: model (torch.nn.Module): input model """ for name, module in model.named_children(): if isinstance(module, nn.BatchNorm2d): new_module = DetaFrozenBatchNorm2d(module.num_features) if not module.weight.device == torch.device("meta"): new_module.weight.data.copy_(module.weight) new_module.bias.data.copy_(module.bias) new_module.running_mean.data.copy_(module.running_mean) new_module.running_var.data.copy_(module.running_var) model._modules[name] = new_module if len(list(module.children())) > 0: replace_batch_norm(module) class DetaBackboneWithPositionalEncodings(nn.Module): """ Backbone model with positional embeddings. nn.BatchNorm2d layers are replaced by DetaFrozenBatchNorm2d as defined above. """ def __init__(self, config): super().__init__() backbone = AutoBackbone.from_config(config.backbone_config) with torch.no_grad(): replace_batch_norm(backbone) self.model = backbone self.intermediate_channel_sizes = self.model.channels # TODO fix this if config.backbone_config.model_type == "resnet": for name, parameter in self.model.named_parameters(): if "stages.1" not in name and "stages.2" not in name and "stages.3" not in name: parameter.requires_grad_(False) self.position_embedding = build_position_encoding(config) def forward(self, pixel_values: torch.Tensor, pixel_mask: torch.Tensor): """ Outputs feature maps of latter stages C_3 through C_5 in ResNet if `config.num_feature_levels > 1`, otherwise outputs feature maps of C_5. """ # first, send pixel_values through the backbone to get list of feature maps features = self.model(pixel_values).feature_maps # next, create position embeddings out = [] pos = [] for feature_map in features: # downsample pixel_mask to match shape of corresponding feature_map mask = nn.functional.interpolate(pixel_mask[None].float(), size=feature_map.shape[-2:]).to(torch.bool)[0] position_embeddings = self.position_embedding(feature_map, mask).to(feature_map.dtype) out.append((feature_map, mask)) pos.append(position_embeddings) return out, pos # Copied from transformers.models.deformable_detr.modeling_deformable_detr.DeformableDetrSinePositionEmbedding with DeformableDetr->Deta class DetaSinePositionEmbedding(nn.Module): """ This is a more standard version of the position embedding, very similar to the one used by the Attention is all you need paper, generalized to work on images. """ def __init__(self, embedding_dim=64, temperature=10000, normalize=False, scale=None): super().__init__() self.embedding_dim = embedding_dim self.temperature = temperature self.normalize = normalize if scale is not None and normalize is False: raise ValueError("normalize should be True if scale is passed") if scale is None: scale = 2 * math.pi self.scale = scale def forward(self, pixel_values, pixel_mask): if pixel_mask is None: raise ValueError("No pixel mask provided") y_embed = pixel_mask.cumsum(1, dtype=torch.float32) x_embed = pixel_mask.cumsum(2, dtype=torch.float32) if self.normalize: eps = 1e-6 y_embed = (y_embed - 0.5) / (y_embed[:, -1:, :] + eps) * self.scale x_embed = (x_embed - 0.5) / (x_embed[:, :, -1:] + eps) * self.scale dim_t = torch.arange(self.embedding_dim, dtype=torch.float32, device=pixel_values.device) dim_t = self.temperature ** (2 * torch.div(dim_t, 2, rounding_mode="floor") / self.embedding_dim) pos_x = x_embed[:, :, :, None] / dim_t pos_y = y_embed[:, :, :, None] / dim_t pos_x = torch.stack((pos_x[:, :, :, 0::2].sin(), pos_x[:, :, :, 1::2].cos()), dim=4).flatten(3) pos_y = torch.stack((pos_y[:, :, :, 0::2].sin(), pos_y[:, :, :, 1::2].cos()), dim=4).flatten(3) pos = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2) return pos # Copied from transformers.models.detr.modeling_detr.DetrLearnedPositionEmbedding class DetaLearnedPositionEmbedding(nn.Module): """ This module learns positional embeddings up to a fixed maximum size. """ def __init__(self, embedding_dim=256): super().__init__() self.row_embeddings = nn.Embedding(50, embedding_dim) self.column_embeddings = nn.Embedding(50, embedding_dim) def forward(self, pixel_values, pixel_mask=None): height, width = pixel_values.shape[-2:] width_values = torch.arange(width, device=pixel_values.device) height_values = torch.arange(height, device=pixel_values.device) x_emb = self.column_embeddings(width_values) y_emb = self.row_embeddings(height_values) pos = torch.cat([x_emb.unsqueeze(0).repeat(height, 1, 1), y_emb.unsqueeze(1).repeat(1, width, 1)], dim=-1) pos = pos.permute(2, 0, 1) pos = pos.unsqueeze(0) pos = pos.repeat(pixel_values.shape[0], 1, 1, 1) return pos # Copied from transformers.models.detr.modeling_detr.build_position_encoding with Detr->Deta def build_position_encoding(config): n_steps = config.d_model // 2 if config.position_embedding_type == "sine": # TODO find a better way of exposing other arguments position_embedding = DetaSinePositionEmbedding(n_steps, normalize=True) elif config.position_embedding_type == "learned": position_embedding = DetaLearnedPositionEmbedding(n_steps) else: raise ValueError(f"Not supported {config.position_embedding_type}") return position_embedding # Copied from transformers.models.deformable_detr.modeling_deformable_detr.multi_scale_deformable_attention def multi_scale_deformable_attention( value: Tensor, value_spatial_shapes: Tensor, sampling_locations: Tensor, attention_weights: Tensor ) -> Tensor: batch_size, _, num_heads, hidden_dim = value.shape _, num_queries, num_heads, num_levels, num_points, _ = sampling_locations.shape value_list = value.split([height.item() * width.item() for height, width in value_spatial_shapes], dim=1) sampling_grids = 2 * sampling_locations - 1 sampling_value_list = [] for level_id, (height, width) in enumerate(value_spatial_shapes): # batch_size, height*width, num_heads, hidden_dim # -> batch_size, height*width, num_heads*hidden_dim # -> batch_size, num_heads*hidden_dim, height*width # -> batch_size*num_heads, hidden_dim, height, width value_l_ = ( value_list[level_id].flatten(2).transpose(1, 2).reshape(batch_size * num_heads, hidden_dim, height, width) ) # batch_size, num_queries, num_heads, num_points, 2 # -> batch_size, num_heads, num_queries, num_points, 2 # -> batch_size*num_heads, num_queries, num_points, 2 sampling_grid_l_ = sampling_grids[:, :, :, level_id].transpose(1, 2).flatten(0, 1) # batch_size*num_heads, hidden_dim, num_queries, num_points sampling_value_l_ = nn.functional.grid_sample( value_l_, sampling_grid_l_, mode="bilinear", padding_mode="zeros", align_corners=False ) sampling_value_list.append(sampling_value_l_) # (batch_size, num_queries, num_heads, num_levels, num_points) # -> (batch_size, num_heads, num_queries, num_levels, num_points) # -> (batch_size, num_heads, 1, num_queries, num_levels*num_points) attention_weights = attention_weights.transpose(1, 2).reshape( batch_size * num_heads, 1, num_queries, num_levels * num_points ) output = ( (torch.stack(sampling_value_list, dim=-2).flatten(-2) * attention_weights) .sum(-1) .view(batch_size, num_heads * hidden_dim, num_queries) ) return output.transpose(1, 2).contiguous() class DetaMultiscaleDeformableAttention(nn.Module): """ Multiscale deformable attention as proposed in Deformable DETR. """ def __init__(self, embed_dim: int, num_heads: int, n_levels: int, n_points: int): super().__init__() if embed_dim % num_heads != 0: raise ValueError( f"embed_dim (d_model) must be divisible by num_heads, but got {embed_dim} and {num_heads}" ) dim_per_head = embed_dim // num_heads # check if dim_per_head is power of 2 if not ((dim_per_head & (dim_per_head - 1) == 0) and dim_per_head != 0): warnings.warn( "You'd better set embed_dim (d_model) in DetaMultiscaleDeformableAttention to make the" " dimension of each attention head a power of 2 which is more efficient in the authors' CUDA" " implementation." ) self.im2col_step = 64 self.d_model = embed_dim self.n_levels = n_levels self.n_heads = num_heads self.n_points = n_points self.sampling_offsets = nn.Linear(embed_dim, num_heads * n_levels * n_points * 2) self.attention_weights = nn.Linear(embed_dim, num_heads * n_levels * n_points) self.value_proj = nn.Linear(embed_dim, embed_dim) self.output_proj = nn.Linear(embed_dim, embed_dim) self._reset_parameters() def _reset_parameters(self): nn.init.constant_(self.sampling_offsets.weight.data, 0.0) thetas = torch.arange(self.n_heads, dtype=torch.float32) * (2.0 * math.pi / self.n_heads) grid_init = torch.stack([thetas.cos(), thetas.sin()], -1) grid_init = ( (grid_init / grid_init.abs().max(-1, keepdim=True)[0]) .view(self.n_heads, 1, 1, 2) .repeat(1, self.n_levels, self.n_points, 1) ) for i in range(self.n_points): grid_init[:, :, i, :] *= i + 1 with torch.no_grad(): self.sampling_offsets.bias = nn.Parameter(grid_init.view(-1)) nn.init.constant_(self.attention_weights.weight.data, 0.0) nn.init.constant_(self.attention_weights.bias.data, 0.0) nn.init.xavier_uniform_(self.value_proj.weight.data) nn.init.constant_(self.value_proj.bias.data, 0.0) nn.init.xavier_uniform_(self.output_proj.weight.data) nn.init.constant_(self.output_proj.bias.data, 0.0) def with_pos_embed(self, tensor: torch.Tensor, position_embeddings: Optional[Tensor]): return tensor if position_embeddings is None else tensor + position_embeddings def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, encoder_hidden_states=None, encoder_attention_mask=None, position_embeddings: Optional[torch.Tensor] = None, reference_points=None, spatial_shapes=None, level_start_index=None, output_attentions: bool = False, ): # add position embeddings to the hidden states before projecting to queries and keys if position_embeddings is not None: hidden_states = self.with_pos_embed(hidden_states, position_embeddings) batch_size, num_queries, _ = hidden_states.shape batch_size, sequence_length, _ = encoder_hidden_states.shape if (spatial_shapes[:, 0] * spatial_shapes[:, 1]).sum() != sequence_length: raise ValueError( "Make sure to align the spatial shapes with the sequence length of the encoder hidden states" ) value = self.value_proj(encoder_hidden_states) if attention_mask is not None: # we invert the attention_mask value = value.masked_fill(~attention_mask[..., None], float(0)) value = value.view(batch_size, sequence_length, self.n_heads, self.d_model // self.n_heads) sampling_offsets = self.sampling_offsets(hidden_states).view( batch_size, num_queries, self.n_heads, self.n_levels, self.n_points, 2 ) attention_weights = self.attention_weights(hidden_states).view( batch_size, num_queries, self.n_heads, self.n_levels * self.n_points ) attention_weights = F.softmax(attention_weights, -1).view( batch_size, num_queries, self.n_heads, self.n_levels, self.n_points ) # batch_size, num_queries, n_heads, n_levels, n_points, 2 if reference_points.shape[-1] == 2: offset_normalizer = torch.stack([spatial_shapes[..., 1], spatial_shapes[..., 0]], -1) sampling_locations = ( reference_points[:, :, None, :, None, :] + sampling_offsets / offset_normalizer[None, None, None, :, None, :] ) elif reference_points.shape[-1] == 4: sampling_locations = ( reference_points[:, :, None, :, None, :2] + sampling_offsets / self.n_points * reference_points[:, :, None, :, None, 2:] * 0.5 ) else: raise ValueError(f"Last dim of reference_points must be 2 or 4, but got {reference_points.shape[-1]}") # PyTorch implementation (for now) output = multi_scale_deformable_attention(value, spatial_shapes, sampling_locations, attention_weights) output = self.output_proj(output) return output, attention_weights # Copied from transformers.models.deformable_detr.modeling_deformable_detr.DeformableDetrMultiheadAttention with DeformableDetr->Deta,Deformable DETR->DETA class DetaMultiheadAttention(nn.Module): """ Multi-headed attention from 'Attention Is All You Need' paper. Here, we add position embeddings to the queries and keys (as explained in the Deformable DETR paper). """ def __init__( self, embed_dim: int, num_heads: int, dropout: float = 0.0, bias: bool = True, ): super().__init__() self.embed_dim = embed_dim self.num_heads = num_heads self.dropout = dropout self.head_dim = embed_dim // num_heads if self.head_dim * num_heads != self.embed_dim: raise ValueError( f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:" f" {num_heads})." ) self.scaling = self.head_dim**-0.5 self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias) def _shape(self, tensor: torch.Tensor, seq_len: int, batch_size: int): return tensor.view(batch_size, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous() def with_pos_embed(self, tensor: torch.Tensor, position_embeddings: Optional[Tensor]): return tensor if position_embeddings is None else tensor + position_embeddings def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, position_embeddings: Optional[torch.Tensor] = None, output_attentions: bool = False, ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: """Input shape: Batch x Time x Channel""" batch_size, target_len, embed_dim = hidden_states.size() # add position embeddings to the hidden states before projecting to queries and keys if position_embeddings is not None: hidden_states_original = hidden_states hidden_states = self.with_pos_embed(hidden_states, position_embeddings) # get queries, keys and values query_states = self.q_proj(hidden_states) * self.scaling key_states = self._shape(self.k_proj(hidden_states), -1, batch_size) value_states = self._shape(self.v_proj(hidden_states_original), -1, batch_size) proj_shape = (batch_size * self.num_heads, -1, self.head_dim) query_states = self._shape(query_states, target_len, batch_size).view(*proj_shape) key_states = key_states.view(*proj_shape) value_states = value_states.view(*proj_shape) source_len = key_states.size(1) attn_weights = torch.bmm(query_states, key_states.transpose(1, 2)) if attn_weights.size() != (batch_size * self.num_heads, target_len, source_len): raise ValueError( f"Attention weights should be of size {(batch_size * self.num_heads, target_len, source_len)}, but is" f" {attn_weights.size()}" ) # expand attention_mask if attention_mask is not None: # [batch_size, seq_len] -> [batch_size, 1, target_seq_len, source_seq_len] attention_mask = _prepare_4d_attention_mask(attention_mask, hidden_states.dtype) if attention_mask is not None: if attention_mask.size() != (batch_size, 1, target_len, source_len): raise ValueError( f"Attention mask should be of size {(batch_size, 1, target_len, source_len)}, but is" f" {attention_mask.size()}" ) attn_weights = attn_weights.view(batch_size, self.num_heads, target_len, source_len) + attention_mask attn_weights = attn_weights.view(batch_size * self.num_heads, target_len, source_len) attn_weights = nn.functional.softmax(attn_weights, dim=-1) if output_attentions: # this operation is a bit awkward, but it's required to # make sure that attn_weights keeps its gradient. # In order to do so, attn_weights have to reshaped # twice and have to be reused in the following attn_weights_reshaped = attn_weights.view(batch_size, self.num_heads, target_len, source_len) attn_weights = attn_weights_reshaped.view(batch_size * self.num_heads, target_len, source_len) else: attn_weights_reshaped = None attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training) attn_output = torch.bmm(attn_probs, value_states) if attn_output.size() != (batch_size * self.num_heads, target_len, self.head_dim): raise ValueError( f"`attn_output` should be of size {(batch_size, self.num_heads, target_len, self.head_dim)}, but is" f" {attn_output.size()}" ) attn_output = attn_output.view(batch_size, self.num_heads, target_len, self.head_dim) attn_output = attn_output.transpose(1, 2) attn_output = attn_output.reshape(batch_size, target_len, embed_dim) attn_output = self.out_proj(attn_output) return attn_output, attn_weights_reshaped class DetaEncoderLayer(nn.Module): def __init__(self, config: DetaConfig): super().__init__() self.embed_dim = config.d_model self.self_attn = DetaMultiscaleDeformableAttention( embed_dim=self.embed_dim, num_heads=config.encoder_attention_heads, n_levels=config.num_feature_levels, n_points=config.encoder_n_points, ) self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim) self.dropout = config.dropout self.activation_fn = ACT2FN[config.activation_function] self.activation_dropout = config.activation_dropout self.fc1 = nn.Linear(self.embed_dim, config.encoder_ffn_dim) self.fc2 = nn.Linear(config.encoder_ffn_dim, self.embed_dim) self.final_layer_norm = nn.LayerNorm(self.embed_dim) def forward( self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, position_embeddings: torch.Tensor = None, reference_points=None, spatial_shapes=None, level_start_index=None, output_attentions: bool = False, ): """ Args: hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Input to the layer. attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`): Attention mask. position_embeddings (`torch.FloatTensor`, *optional*): Position embeddings, to be added to `hidden_states`. reference_points (`torch.FloatTensor`, *optional*): Reference points. spatial_shapes (`torch.LongTensor`, *optional*): Spatial shapes of the backbone feature maps. level_start_index (`torch.LongTensor`, *optional*): Level start index. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. """ residual = hidden_states # Apply Multi-scale Deformable Attention Module on the multi-scale feature maps. hidden_states, attn_weights = self.self_attn( hidden_states=hidden_states, attention_mask=attention_mask, encoder_hidden_states=hidden_states, encoder_attention_mask=attention_mask, position_embeddings=position_embeddings, reference_points=reference_points, spatial_shapes=spatial_shapes, level_start_index=level_start_index, output_attentions=output_attentions, ) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states hidden_states = self.self_attn_layer_norm(hidden_states) residual = hidden_states hidden_states = self.activation_fn(self.fc1(hidden_states)) hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training) hidden_states = self.fc2(hidden_states) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states hidden_states = self.final_layer_norm(hidden_states) if self.training: if torch.isinf(hidden_states).any() or torch.isnan(hidden_states).any(): clamp_value = torch.finfo(hidden_states.dtype).max - 1000 hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value) outputs = (hidden_states,) if output_attentions: outputs += (attn_weights,) return outputs class DetaDecoderLayer(nn.Module): def __init__(self, config: DetaConfig): super().__init__() self.embed_dim = config.d_model # self-attention self.self_attn = DetaMultiheadAttention( embed_dim=self.embed_dim, num_heads=config.decoder_attention_heads, dropout=config.attention_dropout, ) self.dropout = config.dropout self.activation_fn = ACT2FN[config.activation_function] self.activation_dropout = config.activation_dropout self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim) # cross-attention self.encoder_attn = DetaMultiscaleDeformableAttention( embed_dim=self.embed_dim, num_heads=config.decoder_attention_heads, n_levels=config.num_feature_levels, n_points=config.decoder_n_points, ) self.encoder_attn_layer_norm = nn.LayerNorm(self.embed_dim) # feedforward neural networks self.fc1 = nn.Linear(self.embed_dim, config.decoder_ffn_dim) self.fc2 = nn.Linear(config.decoder_ffn_dim, self.embed_dim) self.final_layer_norm = nn.LayerNorm(self.embed_dim) def forward( self, hidden_states: torch.Tensor, position_embeddings: Optional[torch.Tensor] = None, reference_points=None, spatial_shapes=None, level_start_index=None, encoder_hidden_states: Optional[torch.Tensor] = None, encoder_attention_mask: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = False, ): """ Args: hidden_states (`torch.FloatTensor`): Input to the layer of shape `(batch, seq_len, embed_dim)`. position_embeddings (`torch.FloatTensor`, *optional*): Position embeddings that are added to the queries and keys in the self-attention layer. reference_points (`torch.FloatTensor`, *optional*): Reference points. spatial_shapes (`torch.LongTensor`, *optional*): Spatial shapes. level_start_index (`torch.LongTensor`, *optional*): Level start index. encoder_hidden_states (`torch.FloatTensor`): cross attention input to the layer of shape `(batch, seq_len, embed_dim)` encoder_attention_mask (`torch.FloatTensor`): encoder attention mask of size `(batch, 1, target_len, source_len)` where padding elements are indicated by very large negative values. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. """ residual = hidden_states # Self Attention hidden_states, self_attn_weights = self.self_attn( hidden_states=hidden_states, position_embeddings=position_embeddings, output_attentions=output_attentions, ) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states hidden_states = self.self_attn_layer_norm(hidden_states) second_residual = hidden_states # Cross-Attention cross_attn_weights = None hidden_states, cross_attn_weights = self.encoder_attn( hidden_states=hidden_states, attention_mask=encoder_attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, position_embeddings=position_embeddings, reference_points=reference_points, spatial_shapes=spatial_shapes, level_start_index=level_start_index, output_attentions=output_attentions, ) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = second_residual + hidden_states hidden_states = self.encoder_attn_layer_norm(hidden_states) # Fully Connected residual = hidden_states hidden_states = self.activation_fn(self.fc1(hidden_states)) hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training) hidden_states = self.fc2(hidden_states) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states hidden_states = self.final_layer_norm(hidden_states) outputs = (hidden_states,) if output_attentions: outputs += (self_attn_weights, cross_attn_weights) return outputs # Copied from transformers.models.detr.modeling_detr.DetrClassificationHead class DetaClassificationHead(nn.Module): """Head for sentence-level classification tasks.""" def __init__(self, input_dim: int, inner_dim: int, num_classes: int, pooler_dropout: float): super().__init__() self.dense = nn.Linear(input_dim, inner_dim) self.dropout = nn.Dropout(p=pooler_dropout) self.out_proj = nn.Linear(inner_dim, num_classes) def forward(self, hidden_states: torch.Tensor): hidden_states = self.dropout(hidden_states) hidden_states = self.dense(hidden_states) hidden_states = torch.tanh(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.out_proj(hidden_states) return hidden_states # Copied from transformers.models.deformable_detr.modeling_deformable_detr.DeformableDetrPreTrainedModel with DeformableDetrConvEncoder->DetaBackboneWithPositionalEncodings,DeformableDetr->Deta class DetaPreTrainedModel(PreTrainedModel): config_class = DetaConfig base_model_prefix = "model" main_input_name = "pixel_values" _no_split_modules = [r"DetaBackboneWithPositionalEncodings", r"DetaEncoderLayer", r"DetaDecoderLayer"] def _init_weights(self, module): std = self.config.init_std if isinstance(module, DetaLearnedPositionEmbedding): nn.init.uniform_(module.row_embeddings.weight) nn.init.uniform_(module.column_embeddings.weight) elif isinstance(module, DetaMultiscaleDeformableAttention): module._reset_parameters() elif isinstance(module, (nn.Linear, nn.Conv2d, nn.BatchNorm2d)): # Slightly different from the TF version which uses truncated_normal for initialization # cf https://github.com/pytorch/pytorch/pull/5617 module.weight.data.normal_(mean=0.0, std=std) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=std) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() if hasattr(module, "reference_points") and not self.config.two_stage: nn.init.xavier_uniform_(module.reference_points.weight.data, gain=1.0) nn.init.constant_(module.reference_points.bias.data, 0.0) if hasattr(module, "level_embed"): nn.init.normal_(module.level_embed) DETA_START_DOCSTRING = r""" This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`DetaConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ DETA_INPUTS_DOCSTRING = r""" Args: pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using [`AutoImageProcessor`]. See [`AutoImageProcessor.__call__`] for details. pixel_mask (`torch.LongTensor` of shape `(batch_size, height, width)`, *optional*): Mask to avoid performing attention on padding pixel values. Mask values selected in `[0, 1]`: - 1 for pixels that are real (i.e. **not masked**), - 0 for pixels that are padding (i.e. **masked**). [What are attention masks?](../glossary#attention-mask) decoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, num_queries)`, *optional*): Not used by default. Can be used to mask object queries. encoder_outputs (`tuple(tuple(torch.FloatTensor)`, *optional*): Tuple consists of (`last_hidden_state`, *optional*: `hidden_states`, *optional*: `attentions`) `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)`, *optional*) is a sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing the flattened feature map (output of the backbone + projection layer), you can choose to directly pass a flattened representation of an image. decoder_inputs_embeds (`torch.FloatTensor` of shape `(batch_size, num_queries, hidden_size)`, *optional*): Optionally, instead of initializing the queries with a tensor of zeros, you can choose to directly pass an embedded representation. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple. """ # Copied from transformers.models.deformable_detr.modeling_deformable_detr.DeformableDetrEncoder with DeformableDetr->Deta class DetaEncoder(DetaPreTrainedModel): """ Transformer encoder consisting of *config.encoder_layers* deformable attention layers. Each layer is a [`DetaEncoderLayer`]. The encoder updates the flattened multi-scale feature maps through multiple deformable attention layers. Args: config: DetaConfig """ def __init__(self, config: DetaConfig): super().__init__(config) self.dropout = config.dropout self.layers = nn.ModuleList([DetaEncoderLayer(config) for _ in range(config.encoder_layers)]) # Initialize weights and apply final processing self.post_init() @staticmethod def get_reference_points(spatial_shapes, valid_ratios, device): """ Get reference points for each feature map. Used in decoder. Args: spatial_shapes (`torch.LongTensor` of shape `(num_feature_levels, 2)`): Spatial shapes of each feature map. valid_ratios (`torch.FloatTensor` of shape `(batch_size, num_feature_levels, 2)`): Valid ratios of each feature map. device (`torch.device`): Device on which to create the tensors. Returns: `torch.FloatTensor` of shape `(batch_size, num_queries, num_feature_levels, 2)` """ reference_points_list = [] for level, (height, width) in enumerate(spatial_shapes): ref_y, ref_x = meshgrid( torch.linspace(0.5, height - 0.5, height, dtype=torch.float32, device=device), torch.linspace(0.5, width - 0.5, width, dtype=torch.float32, device=device), indexing="ij", ) # TODO: valid_ratios could be useless here. check https://github.com/fundamentalvision/Deformable-DETR/issues/36 ref_y = ref_y.reshape(-1)[None] / (valid_ratios[:, None, level, 1] * height) ref_x = ref_x.reshape(-1)[None] / (valid_ratios[:, None, level, 0] * width) ref = torch.stack((ref_x, ref_y), -1) reference_points_list.append(ref) reference_points = torch.cat(reference_points_list, 1) reference_points = reference_points[:, :, None] * valid_ratios[:, None] return reference_points def forward( self, inputs_embeds=None, attention_mask=None, position_embeddings=None, spatial_shapes=None, level_start_index=None, valid_ratios=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): r""" Args: inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Flattened feature map (output of the backbone + projection layer) that is passed to the encoder. attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding pixel features. Mask values selected in `[0, 1]`: - 1 for pixel features that are real (i.e. **not masked**), - 0 for pixel features that are padding (i.e. **masked**). [What are attention masks?](../glossary#attention-mask) position_embeddings (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Position embeddings that are added to the queries and keys in each self-attention layer. spatial_shapes (`torch.LongTensor` of shape `(num_feature_levels, 2)`): Spatial shapes of each feature map. level_start_index (`torch.LongTensor` of shape `(num_feature_levels)`): Starting index of each feature map. valid_ratios (`torch.FloatTensor` of shape `(batch_size, num_feature_levels, 2)`): Ratio of valid area in each feature level. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple. """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict hidden_states = inputs_embeds hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) reference_points = self.get_reference_points(spatial_shapes, valid_ratios, device=inputs_embeds.device) encoder_states = () if output_hidden_states else None all_attentions = () if output_attentions else None for i, encoder_layer in enumerate(self.layers): if output_hidden_states: encoder_states = encoder_states + (hidden_states,) layer_outputs = encoder_layer( hidden_states, attention_mask, position_embeddings=position_embeddings, reference_points=reference_points, spatial_shapes=spatial_shapes, level_start_index=level_start_index, output_attentions=output_attentions, ) hidden_states = layer_outputs[0] if output_attentions: all_attentions = all_attentions + (layer_outputs[1],) if output_hidden_states: encoder_states = encoder_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None) return BaseModelOutput( last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions ) # Copied from transformers.models.deformable_detr.modeling_deformable_detr.DeformableDetrDecoder with DeformableDetr->Deta,Deformable DETR->DETA class DetaDecoder(DetaPreTrainedModel): """ Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a [`DetaDecoderLayer`]. The decoder updates the query embeddings through multiple self-attention and cross-attention layers. Some tweaks for Deformable DETR: - `position_embeddings`, `reference_points`, `spatial_shapes` and `valid_ratios` are added to the forward pass. - it also returns a stack of intermediate outputs and reference points from all decoding layers. Args: config: DetaConfig """ def __init__(self, config: DetaConfig): super().__init__(config) self.dropout = config.dropout self.layers = nn.ModuleList([DetaDecoderLayer(config) for _ in range(config.decoder_layers)]) self.gradient_checkpointing = False # hack implementation for iterative bounding box refinement and two-stage Deformable DETR self.bbox_embed = None self.class_embed = None # Initialize weights and apply final processing self.post_init() def forward( self, inputs_embeds=None, encoder_hidden_states=None, encoder_attention_mask=None, position_embeddings=None, reference_points=None, spatial_shapes=None, level_start_index=None, valid_ratios=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): r""" Args: inputs_embeds (`torch.FloatTensor` of shape `(batch_size, num_queries, hidden_size)`): The query embeddings that are passed into the decoder. encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. encoder_attention_mask (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing cross-attention on padding pixel_values of the encoder. Mask values selected in `[0, 1]`: - 1 for pixels that are real (i.e. **not masked**), - 0 for pixels that are padding (i.e. **masked**). position_embeddings (`torch.FloatTensor` of shape `(batch_size, num_queries, hidden_size)`, *optional*): Position embeddings that are added to the queries and keys in each self-attention layer. reference_points (`torch.FloatTensor` of shape `(batch_size, num_queries, 4)` is `as_two_stage` else `(batch_size, num_queries, 2)` or , *optional*): Reference point in range `[0, 1]`, top-left (0,0), bottom-right (1, 1), including padding area. spatial_shapes (`torch.FloatTensor` of shape `(num_feature_levels, 2)`): Spatial shapes of the feature maps. level_start_index (`torch.LongTensor` of shape `(num_feature_levels)`, *optional*): Indexes for the start of each feature level. In range `[0, sequence_length]`. valid_ratios (`torch.FloatTensor` of shape `(batch_size, num_feature_levels, 2)`, *optional*): Ratio of valid area in each feature level. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple. """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if inputs_embeds is not None: hidden_states = inputs_embeds # decoder layers all_hidden_states = () if output_hidden_states else None all_self_attns = () if output_attentions else None all_cross_attentions = () if (output_attentions and encoder_hidden_states is not None) else None intermediate = () intermediate_reference_points = () for idx, decoder_layer in enumerate(self.layers): if reference_points.shape[-1] == 4: reference_points_input = ( reference_points[:, :, None] * torch.cat([valid_ratios, valid_ratios], -1)[:, None] ) else: if reference_points.shape[-1] != 2: raise ValueError("Reference points' last dimension must be of size 2") reference_points_input = reference_points[:, :, None] * valid_ratios[:, None] if output_hidden_states: all_hidden_states += (hidden_states,) if self.gradient_checkpointing and self.training: layer_outputs = self._gradient_checkpointing_func( decoder_layer.__call__, hidden_states, encoder_hidden_states, encoder_attention_mask, None, ) else: layer_outputs = decoder_layer( hidden_states, position_embeddings=position_embeddings, encoder_hidden_states=encoder_hidden_states, reference_points=reference_points_input, spatial_shapes=spatial_shapes, level_start_index=level_start_index, encoder_attention_mask=encoder_attention_mask, output_attentions=output_attentions, ) hidden_states = layer_outputs[0] # hack implementation for iterative bounding box refinement if self.bbox_embed is not None: tmp = self.bbox_embed[idx](hidden_states) if reference_points.shape[-1] == 4: new_reference_points = tmp + inverse_sigmoid(reference_points) new_reference_points = new_reference_points.sigmoid() else: if reference_points.shape[-1] != 2: raise ValueError( f"Reference points' last dimension must be of size 2, but is {reference_points.shape[-1]}" ) new_reference_points = tmp new_reference_points[..., :2] = tmp[..., :2] + inverse_sigmoid(reference_points) new_reference_points = new_reference_points.sigmoid() reference_points = new_reference_points.detach() intermediate += (hidden_states,) intermediate_reference_points += (reference_points,) if output_attentions: all_self_attns += (layer_outputs[1],) if encoder_hidden_states is not None: all_cross_attentions += (layer_outputs[2],) # Keep batch_size as first dimension intermediate = torch.stack(intermediate, dim=1) intermediate_reference_points = torch.stack(intermediate_reference_points, dim=1) # add hidden states from the last decoder layer if output_hidden_states: all_hidden_states += (hidden_states,) if not return_dict: return tuple( v for v in [ hidden_states, intermediate, intermediate_reference_points, all_hidden_states, all_self_attns, all_cross_attentions, ] if v is not None ) return DetaDecoderOutput( last_hidden_state=hidden_states, intermediate_hidden_states=intermediate, intermediate_reference_points=intermediate_reference_points, hidden_states=all_hidden_states, attentions=all_self_attns, cross_attentions=all_cross_attentions, ) @add_start_docstrings( """ The bare DETA Model (consisting of a backbone and encoder-decoder Transformer) outputting raw hidden-states without any specific head on top. """, DETA_START_DOCSTRING, ) class DetaModel(DetaPreTrainedModel): def __init__(self, config: DetaConfig): super().__init__(config) if config.two_stage: requires_backends(self, ["torchvision"]) # Create backbone with positional encoding self.backbone = DetaBackboneWithPositionalEncodings(config) intermediate_channel_sizes = self.backbone.intermediate_channel_sizes # Create input projection layers if config.num_feature_levels > 1: num_backbone_outs = len(intermediate_channel_sizes) input_proj_list = [] for _ in range(num_backbone_outs): in_channels = intermediate_channel_sizes[_] input_proj_list.append( nn.Sequential( nn.Conv2d(in_channels, config.d_model, kernel_size=1), nn.GroupNorm(32, config.d_model), ) ) for _ in range(config.num_feature_levels - num_backbone_outs): input_proj_list.append( nn.Sequential( nn.Conv2d(in_channels, config.d_model, kernel_size=3, stride=2, padding=1), nn.GroupNorm(32, config.d_model), ) ) in_channels = config.d_model self.input_proj = nn.ModuleList(input_proj_list) else: self.input_proj = nn.ModuleList( [ nn.Sequential( nn.Conv2d(intermediate_channel_sizes[-1], config.d_model, kernel_size=1), nn.GroupNorm(32, config.d_model), ) ] ) if not config.two_stage: self.query_position_embeddings = nn.Embedding(config.num_queries, config.d_model * 2) self.encoder = DetaEncoder(config) self.decoder = DetaDecoder(config) self.level_embed = nn.Parameter(torch.Tensor(config.num_feature_levels, config.d_model)) if config.two_stage: self.enc_output = nn.Linear(config.d_model, config.d_model) self.enc_output_norm = nn.LayerNorm(config.d_model) self.pos_trans = nn.Linear(config.d_model * 2, config.d_model * 2) self.pos_trans_norm = nn.LayerNorm(config.d_model * 2) self.pix_trans = nn.Linear(config.d_model, config.d_model) self.pix_trans_norm = nn.LayerNorm(config.d_model) else: self.reference_points = nn.Linear(config.d_model, 2) self.assign_first_stage = config.assign_first_stage self.two_stage_num_proposals = config.two_stage_num_proposals self.post_init() # Copied from transformers.models.deformable_detr.modeling_deformable_detr.DeformableDetrModel.get_encoder def get_encoder(self): return self.encoder # Copied from transformers.models.deformable_detr.modeling_deformable_detr.DeformableDetrModel.get_decoder def get_decoder(self): return self.decoder def freeze_backbone(self): for name, param in self.backbone.model.named_parameters(): param.requires_grad_(False) def unfreeze_backbone(self): for name, param in self.backbone.model.named_parameters(): param.requires_grad_(True) # Copied from transformers.models.deformable_detr.modeling_deformable_detr.DeformableDetrModel.get_valid_ratio def get_valid_ratio(self, mask): """Get the valid ratio of all feature maps.""" _, height, width = mask.shape valid_height = torch.sum(mask[:, :, 0], 1) valid_width = torch.sum(mask[:, 0, :], 1) valid_ratio_heigth = valid_height.float() / height valid_ratio_width = valid_width.float() / width valid_ratio = torch.stack([valid_ratio_width, valid_ratio_heigth], -1) return valid_ratio # Copied from transformers.models.deformable_detr.modeling_deformable_detr.DeformableDetrModel.get_proposal_pos_embed def get_proposal_pos_embed(self, proposals): """Get the position embedding of the proposals.""" num_pos_feats = self.config.d_model // 2 temperature = 10000 scale = 2 * math.pi dim_t = torch.arange(num_pos_feats, dtype=torch.float32, device=proposals.device) dim_t = temperature ** (2 * torch.div(dim_t, 2, rounding_mode="floor") / num_pos_feats) # batch_size, num_queries, 4 proposals = proposals.sigmoid() * scale # batch_size, num_queries, 4, 128 pos = proposals[:, :, :, None] / dim_t # batch_size, num_queries, 4, 64, 2 -> batch_size, num_queries, 512 pos = torch.stack((pos[:, :, :, 0::2].sin(), pos[:, :, :, 1::2].cos()), dim=4).flatten(2) return pos def gen_encoder_output_proposals(self, enc_output, padding_mask, spatial_shapes): """Generate the encoder output proposals from encoded enc_output. Args: enc_output (Tensor[batch_size, sequence_length, hidden_size]): Output of the encoder. padding_mask (Tensor[batch_size, sequence_length]): Padding mask for `enc_output`. spatial_shapes (Tensor[num_feature_levels, 2]): Spatial shapes of the feature maps. Returns: `tuple(torch.FloatTensor)`: A tuple of feature map and bbox prediction. - object_query (Tensor[batch_size, sequence_length, hidden_size]): Object query features. Later used to directly predict a bounding box. (without the need of a decoder) - output_proposals (Tensor[batch_size, sequence_length, 4]): Normalized proposals, after an inverse sigmoid. """ batch_size = enc_output.shape[0] proposals = [] _cur = 0 level_ids = [] for level, (height, width) in enumerate(spatial_shapes): mask_flatten_ = padding_mask[:, _cur : (_cur + height * width)].view(batch_size, height, width, 1) valid_height = torch.sum(~mask_flatten_[:, :, 0, 0], 1) valid_width = torch.sum(~mask_flatten_[:, 0, :, 0], 1) grid_y, grid_x = meshgrid( torch.linspace(0, height - 1, height, dtype=torch.float32, device=enc_output.device), torch.linspace(0, width - 1, width, dtype=torch.float32, device=enc_output.device), indexing="ij", ) grid = torch.cat([grid_x.unsqueeze(-1), grid_y.unsqueeze(-1)], -1) scale = torch.cat([valid_width.unsqueeze(-1), valid_height.unsqueeze(-1)], 1).view(batch_size, 1, 1, 2) grid = (grid.unsqueeze(0).expand(batch_size, -1, -1, -1) + 0.5) / scale width_heigth = torch.ones_like(grid) * 0.05 * (2.0**level) proposal = torch.cat((grid, width_heigth), -1).view(batch_size, -1, 4) proposals.append(proposal) _cur += height * width level_ids.append(grid.new_ones(height * width, dtype=torch.long) * level) output_proposals = torch.cat(proposals, 1) output_proposals_valid = ((output_proposals > 0.01) & (output_proposals < 0.99)).all(-1, keepdim=True) output_proposals = torch.log(output_proposals / (1 - output_proposals)) # inverse sigmoid output_proposals = output_proposals.masked_fill(padding_mask.unsqueeze(-1), float("inf")) output_proposals = output_proposals.masked_fill(~output_proposals_valid, float("inf")) # assign each pixel as an object query object_query = enc_output object_query = object_query.masked_fill(padding_mask.unsqueeze(-1), float(0)) object_query = object_query.masked_fill(~output_proposals_valid, float(0)) object_query = self.enc_output_norm(self.enc_output(object_query)) level_ids = torch.cat(level_ids) return object_query, output_proposals, level_ids @add_start_docstrings_to_model_forward(DETA_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=DetaModelOutput, config_class=_CONFIG_FOR_DOC) def forward( self, pixel_values: torch.FloatTensor, pixel_mask: Optional[torch.LongTensor] = None, decoder_attention_mask: Optional[torch.FloatTensor] = None, encoder_outputs: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, decoder_inputs_embeds: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple[torch.FloatTensor], DetaModelOutput]: r""" Returns: Examples: ```python >>> from transformers import AutoImageProcessor, DetaModel >>> from PIL import Image >>> import requests >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> image_processor = AutoImageProcessor.from_pretrained("jozhang97/deta-swin-large-o365") >>> model = DetaModel.from_pretrained("jozhang97/deta-swin-large-o365", two_stage=False) >>> inputs = image_processor(images=image, return_tensors="pt") >>> outputs = model(**inputs) >>> last_hidden_states = outputs.last_hidden_state >>> list(last_hidden_states.shape) [1, 900, 256] ```""" output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict batch_size, num_channels, height, width = pixel_values.shape device = pixel_values.device if pixel_mask is None: pixel_mask = torch.ones(((batch_size, height, width)), dtype=torch.long, device=device) # Extract multi-scale feature maps of same resolution `config.d_model` (cf Figure 4 in paper) # First, sent pixel_values + pixel_mask through Backbone to obtain the features # which is a list of tuples features, position_embeddings_list = self.backbone(pixel_values, pixel_mask) # Then, apply 1x1 convolution to reduce the channel dimension to d_model (256 by default) sources = [] masks = [] for level, (source, mask) in enumerate(features): sources.append(self.input_proj[level](source)) masks.append(mask) if mask is None: raise ValueError("No attention mask was provided") # Lowest resolution feature maps are obtained via 3x3 stride 2 convolutions on the final stage if self.config.num_feature_levels > len(sources): _len_sources = len(sources) for level in range(_len_sources, self.config.num_feature_levels): if level == _len_sources: source = self.input_proj[level](features[-1][0]) else: source = self.input_proj[level](sources[-1]) mask = nn.functional.interpolate(pixel_mask[None].float(), size=source.shape[-2:]).to(torch.bool)[0] pos_l = self.backbone.position_embedding(source, mask).to(source.dtype) sources.append(source) masks.append(mask) position_embeddings_list.append(pos_l) # Create queries query_embeds = None if not self.config.two_stage: query_embeds = self.query_position_embeddings.weight # Prepare encoder inputs (by flattening) spatial_shapes = [(source.shape[2:]) for source in sources] source_flatten = [source.flatten(2).transpose(1, 2) for source in sources] mask_flatten = [mask.flatten(1) for mask in masks] lvl_pos_embed_flatten = [] for level, pos_embed in enumerate(position_embeddings_list): pos_embed = pos_embed.flatten(2).transpose(1, 2) lvl_pos_embed = pos_embed + self.level_embed[level].view(1, 1, -1) lvl_pos_embed_flatten.append(lvl_pos_embed) source_flatten = torch.cat(source_flatten, 1) mask_flatten = torch.cat(mask_flatten, 1) lvl_pos_embed_flatten = torch.cat(lvl_pos_embed_flatten, 1) spatial_shapes = torch.as_tensor(spatial_shapes, dtype=torch.long, device=source_flatten.device) level_start_index = torch.cat((spatial_shapes.new_zeros((1,)), spatial_shapes.prod(1).cumsum(0)[:-1])) valid_ratios = torch.stack([self.get_valid_ratio(m) for m in masks], 1) valid_ratios = valid_ratios.float() # Fourth, sent source_flatten + mask_flatten + lvl_pos_embed_flatten (backbone + proj layer output) through encoder # Also provide spatial_shapes, level_start_index and valid_ratios if encoder_outputs is None: encoder_outputs = self.encoder( inputs_embeds=source_flatten, attention_mask=mask_flatten, position_embeddings=lvl_pos_embed_flatten, spatial_shapes=spatial_shapes, level_start_index=level_start_index, valid_ratios=valid_ratios, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) # If the user passed a tuple for encoder_outputs, we wrap it in a BaseModelOutput when return_dict=True elif return_dict and not isinstance(encoder_outputs, BaseModelOutput): encoder_outputs = BaseModelOutput( last_hidden_state=encoder_outputs[0], hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None, attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None, ) # Fifth, prepare decoder inputs batch_size, _, num_channels = encoder_outputs[0].shape enc_outputs_class = None enc_outputs_coord_logits = None output_proposals = None if self.config.two_stage: object_query_embedding, output_proposals, level_ids = self.gen_encoder_output_proposals( encoder_outputs[0], ~mask_flatten, spatial_shapes ) # hack implementation for two-stage DETA # apply a detection head to each pixel (A.4 in paper) # linear projection for bounding box binary classification (i.e. foreground and background) enc_outputs_class = self.decoder.class_embed[-1](object_query_embedding) # 3-layer FFN to predict bounding boxes coordinates (bbox regression branch) delta_bbox = self.decoder.bbox_embed[-1](object_query_embedding) enc_outputs_coord_logits = delta_bbox + output_proposals # only keep top scoring `config.two_stage_num_proposals` proposals topk = self.two_stage_num_proposals proposal_logit = enc_outputs_class[..., 0] if self.assign_first_stage: proposal_boxes = center_to_corners_format(enc_outputs_coord_logits.sigmoid().float()).clamp(0, 1) topk_proposals = [] for b in range(batch_size): prop_boxes_b = proposal_boxes[b] prop_logits_b = proposal_logit[b] # pre-nms per-level topk pre_nms_topk = 1000 pre_nms_inds = [] for lvl in range(len(spatial_shapes)): lvl_mask = level_ids == lvl pre_nms_inds.append(torch.topk(prop_logits_b.sigmoid() * lvl_mask, pre_nms_topk)[1]) pre_nms_inds = torch.cat(pre_nms_inds) # nms on topk indices post_nms_inds = batched_nms( prop_boxes_b[pre_nms_inds], prop_logits_b[pre_nms_inds], level_ids[pre_nms_inds], 0.9 ) keep_inds = pre_nms_inds[post_nms_inds] if len(keep_inds) < self.two_stage_num_proposals: print( f"[WARNING] nms proposals ({len(keep_inds)}) < {self.two_stage_num_proposals}, running" " naive topk" ) keep_inds = torch.topk(proposal_logit[b], topk)[1] # keep top Q/L indices for L levels q_per_l = topk // len(spatial_shapes) is_level_ordered = ( level_ids[keep_inds][None] == torch.arange(len(spatial_shapes), device=level_ids.device)[:, None] ) keep_inds_mask = is_level_ordered & (is_level_ordered.cumsum(1) <= q_per_l) # LS keep_inds_mask = keep_inds_mask.any(0) # S # pad to Q indices (might let ones filtered from pre-nms sneak by... unlikely because we pick high conf anyways) if keep_inds_mask.sum() < topk: num_to_add = topk - keep_inds_mask.sum() pad_inds = (~keep_inds_mask).nonzero()[:num_to_add] keep_inds_mask[pad_inds] = True keep_inds_topk = keep_inds[keep_inds_mask] topk_proposals.append(keep_inds_topk) topk_proposals = torch.stack(topk_proposals) else: topk_proposals = torch.topk(enc_outputs_class[..., 0], topk, dim=1)[1] topk_coords_logits = torch.gather( enc_outputs_coord_logits, 1, topk_proposals.unsqueeze(-1).repeat(1, 1, 4) ) topk_coords_logits = topk_coords_logits.detach() reference_points = topk_coords_logits.sigmoid() init_reference_points = reference_points pos_trans_out = self.pos_trans_norm(self.pos_trans(self.get_proposal_pos_embed(topk_coords_logits))) query_embed, target = torch.split(pos_trans_out, num_channels, dim=2) else: query_embed, target = torch.split(query_embeds, num_channels, dim=1) query_embed = query_embed.unsqueeze(0).expand(batch_size, -1, -1) target = target.unsqueeze(0).expand(batch_size, -1, -1) reference_points = self.reference_points(query_embed).sigmoid() init_reference_points = reference_points decoder_outputs = self.decoder( inputs_embeds=target, position_embeddings=query_embed, encoder_hidden_states=encoder_outputs[0], encoder_attention_mask=mask_flatten, reference_points=reference_points, spatial_shapes=spatial_shapes, level_start_index=level_start_index, valid_ratios=valid_ratios, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) if not return_dict: enc_outputs = tuple(value for value in [enc_outputs_class, enc_outputs_coord_logits] if value is not None) tuple_outputs = (init_reference_points,) + decoder_outputs + encoder_outputs + enc_outputs return tuple_outputs return DetaModelOutput( init_reference_points=init_reference_points, last_hidden_state=decoder_outputs.last_hidden_state, intermediate_hidden_states=decoder_outputs.intermediate_hidden_states, intermediate_reference_points=decoder_outputs.intermediate_reference_points, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions, enc_outputs_class=enc_outputs_class, enc_outputs_coord_logits=enc_outputs_coord_logits, output_proposals=output_proposals, ) @add_start_docstrings( """ DETA Model (consisting of a backbone and encoder-decoder Transformer) with object detection heads on top, for tasks such as COCO detection. """, DETA_START_DOCSTRING, ) class DetaForObjectDetection(DetaPreTrainedModel): # When using clones, all layers > 0 will be clones, but layer 0 *is* required _tied_weights_keys = [r"bbox_embed\.\d+"] # We can't initialize the model on meta device as some weights are modified during the initialization _no_split_modules = None # Copied from transformers.models.deformable_detr.modeling_deformable_detr.DeformableDetrForObjectDetection.__init__ with DeformableDetr->Deta def __init__(self, config: DetaConfig): super().__init__(config) # Deformable DETR encoder-decoder model self.model = DetaModel(config) # Detection heads on top self.class_embed = nn.Linear(config.d_model, config.num_labels) self.bbox_embed = DetaMLPPredictionHead( input_dim=config.d_model, hidden_dim=config.d_model, output_dim=4, num_layers=3 ) prior_prob = 0.01 bias_value = -math.log((1 - prior_prob) / prior_prob) self.class_embed.bias.data = torch.ones(config.num_labels) * bias_value nn.init.constant_(self.bbox_embed.layers[-1].weight.data, 0) nn.init.constant_(self.bbox_embed.layers[-1].bias.data, 0) # if two-stage, the last class_embed and bbox_embed is for region proposal generation num_pred = (config.decoder_layers + 1) if config.two_stage else config.decoder_layers if config.with_box_refine: self.class_embed = _get_clones(self.class_embed, num_pred) self.bbox_embed = _get_clones(self.bbox_embed, num_pred) nn.init.constant_(self.bbox_embed[0].layers[-1].bias.data[2:], -2.0) # hack implementation for iterative bounding box refinement self.model.decoder.bbox_embed = self.bbox_embed else: nn.init.constant_(self.bbox_embed.layers[-1].bias.data[2:], -2.0) self.class_embed = nn.ModuleList([self.class_embed for _ in range(num_pred)]) self.bbox_embed = nn.ModuleList([self.bbox_embed for _ in range(num_pred)]) self.model.decoder.bbox_embed = None if config.two_stage: # hack implementation for two-stage self.model.decoder.class_embed = self.class_embed for box_embed in self.bbox_embed: nn.init.constant_(box_embed.layers[-1].bias.data[2:], 0.0) # Initialize weights and apply final processing self.post_init() @torch.jit.unused def _set_aux_loss(self, outputs_class, outputs_coord): # this is a workaround to make torchscript happy, as torchscript # doesn't support dictionary with non-homogeneous values, such # as a dict having both a Tensor and a list. aux_loss = [ {"logits": logits, "pred_boxes": pred_boxes} for logits, pred_boxes in zip(outputs_class.transpose(0, 1)[:-1], outputs_coord.transpose(0, 1)[:-1]) ] return aux_loss @add_start_docstrings_to_model_forward(DETA_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=DetaObjectDetectionOutput, config_class=_CONFIG_FOR_DOC) def forward( self, pixel_values: torch.FloatTensor, pixel_mask: Optional[torch.LongTensor] = None, decoder_attention_mask: Optional[torch.FloatTensor] = None, encoder_outputs: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, decoder_inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[List[dict]] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple[torch.FloatTensor], DetaObjectDetectionOutput]: r""" labels (`List[Dict]` of len `(batch_size,)`, *optional*): Labels for computing the bipartite matching loss. List of dicts, each dictionary containing at least the following 2 keys: 'class_labels' and 'boxes' (the class labels and bounding boxes of an image in the batch respectively). The class labels themselves should be a `torch.LongTensor` of len `(number of bounding boxes in the image,)` and the boxes a `torch.FloatTensor` of shape `(number of bounding boxes in the image, 4)`. Returns: Examples: ```python >>> from transformers import AutoImageProcessor, DetaForObjectDetection >>> from PIL import Image >>> import requests >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> image_processor = AutoImageProcessor.from_pretrained("jozhang97/deta-swin-large") >>> model = DetaForObjectDetection.from_pretrained("jozhang97/deta-swin-large") >>> inputs = image_processor(images=image, return_tensors="pt") >>> outputs = model(**inputs) >>> # convert outputs (bounding boxes and class logits) to Pascal VOC format (xmin, ymin, xmax, ymax) >>> target_sizes = torch.tensor([image.size[::-1]]) >>> results = image_processor.post_process_object_detection(outputs, threshold=0.5, target_sizes=target_sizes)[ ... 0 ... ] >>> for score, label, box in zip(results["scores"], results["labels"], results["boxes"]): ... box = [round(i, 2) for i in box.tolist()] ... print( ... f"Detected {model.config.id2label[label.item()]} with confidence " ... f"{round(score.item(), 3)} at location {box}" ... ) Detected cat with confidence 0.683 at location [345.85, 23.68, 639.86, 372.83] Detected cat with confidence 0.683 at location [8.8, 52.49, 316.93, 473.45] Detected remote with confidence 0.568 at location [40.02, 73.75, 175.96, 117.33] Detected remote with confidence 0.546 at location [333.68, 77.13, 370.12, 187.51] ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict # First, sent images through DETR base model to obtain encoder + decoder outputs outputs = self.model( pixel_values, pixel_mask=pixel_mask, decoder_attention_mask=decoder_attention_mask, encoder_outputs=encoder_outputs, inputs_embeds=inputs_embeds, decoder_inputs_embeds=decoder_inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) hidden_states = outputs.intermediate_hidden_states if return_dict else outputs[2] init_reference = outputs.init_reference_points if return_dict else outputs[0] inter_references = outputs.intermediate_reference_points if return_dict else outputs[3] # class logits + predicted bounding boxes outputs_classes = [] outputs_coords = [] for level in range(hidden_states.shape[1]): if level == 0: reference = init_reference else: reference = inter_references[:, level - 1] reference = inverse_sigmoid(reference) outputs_class = self.class_embed[level](hidden_states[:, level]) delta_bbox = self.bbox_embed[level](hidden_states[:, level]) if reference.shape[-1] == 4: outputs_coord_logits = delta_bbox + reference elif reference.shape[-1] == 2: delta_bbox[..., :2] += reference outputs_coord_logits = delta_bbox else: raise ValueError(f"reference.shape[-1] should be 4 or 2, but got {reference.shape[-1]}") outputs_coord = outputs_coord_logits.sigmoid() outputs_classes.append(outputs_class) outputs_coords.append(outputs_coord) # Keep batch_size as first dimension outputs_class = torch.stack(outputs_classes, dim=1) outputs_coord = torch.stack(outputs_coords, dim=1) logits = outputs_class[:, -1] pred_boxes = outputs_coord[:, -1] loss, loss_dict, auxiliary_outputs = None, None, None if labels is not None: # First: create the matcher matcher = DetaHungarianMatcher( class_cost=self.config.class_cost, bbox_cost=self.config.bbox_cost, giou_cost=self.config.giou_cost ) # Second: create the criterion losses = ["labels", "boxes", "cardinality"] criterion = DetaLoss( matcher=matcher, num_classes=self.config.num_labels, focal_alpha=self.config.focal_alpha, losses=losses, num_queries=self.config.num_queries, assign_first_stage=self.config.assign_first_stage, assign_second_stage=self.config.assign_second_stage, ) criterion.to(logits.device) # Third: compute the losses, based on outputs and labels outputs_loss = {} outputs_loss["logits"] = logits outputs_loss["pred_boxes"] = pred_boxes outputs_loss["init_reference"] = init_reference if self.config.auxiliary_loss: auxiliary_outputs = self._set_aux_loss(outputs_class, outputs_coord) outputs_loss["auxiliary_outputs"] = auxiliary_outputs if self.config.two_stage: enc_outputs_coord = outputs.enc_outputs_coord_logits.sigmoid() outputs_loss["enc_outputs"] = { "logits": outputs.enc_outputs_class, "pred_boxes": enc_outputs_coord, "anchors": outputs.output_proposals.sigmoid(), } loss_dict = criterion(outputs_loss, labels) # Fourth: compute total loss, as a weighted sum of the various losses weight_dict = {"loss_ce": 1, "loss_bbox": self.config.bbox_loss_coefficient} weight_dict["loss_giou"] = self.config.giou_loss_coefficient if self.config.auxiliary_loss: aux_weight_dict = {} for i in range(self.config.decoder_layers - 1): aux_weight_dict.update({k + f"_{i}": v for k, v in weight_dict.items()}) aux_weight_dict.update({k + "_enc": v for k, v in weight_dict.items()}) weight_dict.update(aux_weight_dict) loss = sum(loss_dict[k] * weight_dict[k] for k in loss_dict.keys() if k in weight_dict) if not return_dict: if auxiliary_outputs is not None: output = (logits, pred_boxes) + auxiliary_outputs + outputs else: output = (logits, pred_boxes) + outputs tuple_outputs = ((loss, loss_dict) + output) if loss is not None else output return tuple_outputs dict_outputs = DetaObjectDetectionOutput( loss=loss, loss_dict=loss_dict, logits=logits, pred_boxes=pred_boxes, auxiliary_outputs=auxiliary_outputs, last_hidden_state=outputs.last_hidden_state, decoder_hidden_states=outputs.decoder_hidden_states, decoder_attentions=outputs.decoder_attentions, cross_attentions=outputs.cross_attentions, encoder_last_hidden_state=outputs.encoder_last_hidden_state, encoder_hidden_states=outputs.encoder_hidden_states, encoder_attentions=outputs.encoder_attentions, intermediate_hidden_states=outputs.intermediate_hidden_states, intermediate_reference_points=outputs.intermediate_reference_points, init_reference_points=outputs.init_reference_points, enc_outputs_class=outputs.enc_outputs_class, enc_outputs_coord_logits=outputs.enc_outputs_coord_logits, output_proposals=outputs.output_proposals, ) return dict_outputs # Copied from transformers.models.detr.modeling_detr.dice_loss def dice_loss(inputs, targets, num_boxes): """ Compute the DICE loss, similar to generalized IOU for masks Args: inputs: A float tensor of arbitrary shape. The predictions for each example. targets: A float tensor with the same shape as inputs. Stores the binary classification label for each element in inputs (0 for the negative class and 1 for the positive class). """ inputs = inputs.sigmoid() inputs = inputs.flatten(1) numerator = 2 * (inputs * targets).sum(1) denominator = inputs.sum(-1) + targets.sum(-1) loss = 1 - (numerator + 1) / (denominator + 1) return loss.sum() / num_boxes # Copied from transformers.models.detr.modeling_detr.sigmoid_focal_loss def sigmoid_focal_loss(inputs, targets, num_boxes, alpha: float = 0.25, gamma: float = 2): """ Loss used in RetinaNet for dense detection: https://arxiv.org/abs/1708.02002. Args: inputs (`torch.FloatTensor` of arbitrary shape): The predictions for each example. targets (`torch.FloatTensor` with the same shape as `inputs`) A tensor storing the binary classification label for each element in the `inputs` (0 for the negative class and 1 for the positive class). alpha (`float`, *optional*, defaults to `0.25`): Optional weighting factor in the range (0,1) to balance positive vs. negative examples. gamma (`int`, *optional*, defaults to `2`): Exponent of the modulating factor (1 - p_t) to balance easy vs hard examples. Returns: Loss tensor """ prob = inputs.sigmoid() ce_loss = nn.functional.binary_cross_entropy_with_logits(inputs, targets, reduction="none") # add modulating factor p_t = prob * targets + (1 - prob) * (1 - targets) loss = ce_loss * ((1 - p_t) ** gamma) if alpha >= 0: alpha_t = alpha * targets + (1 - alpha) * (1 - targets) loss = alpha_t * loss return loss.mean(1).sum() / num_boxes class DetaLoss(nn.Module): """ This class computes the losses for `DetaForObjectDetection`. The process happens in two steps: 1) we compute hungarian assignment between ground truth boxes and the outputs of the model 2) we supervise each pair of matched ground-truth / prediction (supervised class and box). Args: matcher (`DetaHungarianMatcher`): Module able to compute a matching between targets and proposals. num_classes (`int`): Number of object categories, omitting the special no-object category. focal_alpha (`float`): Alpha parameter in focal loss. losses (`List[str]`): List of all the losses to be applied. See `get_loss` for a list of all available losses. """ def __init__( self, matcher, num_classes, focal_alpha, losses, num_queries, assign_first_stage=False, assign_second_stage=False, ): super().__init__() self.matcher = matcher self.num_classes = num_classes self.focal_alpha = focal_alpha self.losses = losses self.assign_first_stage = assign_first_stage self.assign_second_stage = assign_second_stage if self.assign_first_stage: self.stg1_assigner = DetaStage1Assigner() if self.assign_second_stage: self.stg2_assigner = DetaStage2Assigner(num_queries) # Copied from transformers.models.deformable_detr.modeling_deformable_detr.DeformableDetrLoss.loss_labels def loss_labels(self, outputs, targets, indices, num_boxes): """ Classification loss (Binary focal loss) targets dicts must contain the key "class_labels" containing a tensor of dim [nb_target_boxes] """ if "logits" not in outputs: raise KeyError("No logits were found in the outputs") source_logits = outputs["logits"] idx = self._get_source_permutation_idx(indices) target_classes_o = torch.cat([t["class_labels"][J] for t, (_, J) in zip(targets, indices)]) target_classes = torch.full( source_logits.shape[:2], self.num_classes, dtype=torch.int64, device=source_logits.device ) target_classes[idx] = target_classes_o target_classes_onehot = torch.zeros( [source_logits.shape[0], source_logits.shape[1], source_logits.shape[2] + 1], dtype=source_logits.dtype, layout=source_logits.layout, device=source_logits.device, ) target_classes_onehot.scatter_(2, target_classes.unsqueeze(-1), 1) target_classes_onehot = target_classes_onehot[:, :, :-1] loss_ce = ( sigmoid_focal_loss(source_logits, target_classes_onehot, num_boxes, alpha=self.focal_alpha, gamma=2) * source_logits.shape[1] ) losses = {"loss_ce": loss_ce} return losses @torch.no_grad() # Copied from transformers.models.deformable_detr.modeling_deformable_detr.DeformableDetrLoss.loss_cardinality def loss_cardinality(self, outputs, targets, indices, num_boxes): """ Compute the cardinality error, i.e. the absolute error in the number of predicted non-empty boxes. This is not really a loss, it is intended for logging purposes only. It doesn't propagate gradients. """ logits = outputs["logits"] device = logits.device target_lengths = torch.as_tensor([len(v["class_labels"]) for v in targets], device=device) # Count the number of predictions that are NOT "no-object" (which is the last class) card_pred = (logits.argmax(-1) != logits.shape[-1] - 1).sum(1) card_err = nn.functional.l1_loss(card_pred.float(), target_lengths.float()) losses = {"cardinality_error": card_err} return losses # Copied from transformers.models.deformable_detr.modeling_deformable_detr.DeformableDetrLoss.loss_boxes def loss_boxes(self, outputs, targets, indices, num_boxes): """ Compute the losses related to the bounding boxes, the L1 regression loss and the GIoU loss. Targets dicts must contain the key "boxes" containing a tensor of dim [nb_target_boxes, 4]. The target boxes are expected in format (center_x, center_y, w, h), normalized by the image size. """ if "pred_boxes" not in outputs: raise KeyError("No predicted boxes found in outputs") idx = self._get_source_permutation_idx(indices) source_boxes = outputs["pred_boxes"][idx] target_boxes = torch.cat([t["boxes"][i] for t, (_, i) in zip(targets, indices)], dim=0) loss_bbox = nn.functional.l1_loss(source_boxes, target_boxes, reduction="none") losses = {} losses["loss_bbox"] = loss_bbox.sum() / num_boxes loss_giou = 1 - torch.diag( generalized_box_iou(center_to_corners_format(source_boxes), center_to_corners_format(target_boxes)) ) losses["loss_giou"] = loss_giou.sum() / num_boxes return losses # Copied from transformers.models.deformable_detr.modeling_deformable_detr.DeformableDetrLoss._get_source_permutation_idx def _get_source_permutation_idx(self, indices): # permute predictions following indices batch_idx = torch.cat([torch.full_like(source, i) for i, (source, _) in enumerate(indices)]) source_idx = torch.cat([source for (source, _) in indices]) return batch_idx, source_idx # Copied from transformers.models.deformable_detr.modeling_deformable_detr.DeformableDetrLoss._get_target_permutation_idx def _get_target_permutation_idx(self, indices): # permute targets following indices batch_idx = torch.cat([torch.full_like(target, i) for i, (_, target) in enumerate(indices)]) target_idx = torch.cat([target for (_, target) in indices]) return batch_idx, target_idx # Copied from transformers.models.deformable_detr.modeling_deformable_detr.DeformableDetrLoss.get_loss def get_loss(self, loss, outputs, targets, indices, num_boxes): loss_map = { "labels": self.loss_labels, "cardinality": self.loss_cardinality, "boxes": self.loss_boxes, } if loss not in loss_map: raise ValueError(f"Loss {loss} not supported") return loss_map[loss](outputs, targets, indices, num_boxes) def forward(self, outputs, targets): """ This performs the loss computation. Args: outputs (`dict`, *optional*): Dictionary of tensors, see the output specification of the model for the format. targets (`List[dict]`, *optional*): List of dicts, such that `len(targets) == batch_size`. The expected keys in each dict depends on the losses applied, see each loss' doc. """ outputs_without_aux = {k: v for k, v in outputs.items() if k not in ("auxiliary_outputs", "enc_outputs")} # Retrieve the matching between the outputs of the last layer and the targets if self.assign_second_stage: indices = self.stg2_assigner(outputs_without_aux, targets) else: indices = self.matcher(outputs_without_aux, targets) # Compute the average number of target boxes accross all nodes, for normalization purposes num_boxes = sum(len(t["class_labels"]) for t in targets) num_boxes = torch.as_tensor([num_boxes], dtype=torch.float, device=next(iter(outputs.values())).device) # Check that we have initialized the distributed state world_size = 1 if PartialState._shared_state != {}: num_boxes = reduce(num_boxes) world_size = PartialState().num_processes num_boxes = torch.clamp(num_boxes / world_size, min=1).item() # Compute all the requested losses losses = {} for loss in self.losses: losses.update(self.get_loss(loss, outputs, targets, indices, num_boxes)) # In case of auxiliary losses, we repeat this process with the output of each intermediate layer. if "auxiliary_outputs" in outputs: for i, auxiliary_outputs in enumerate(outputs["auxiliary_outputs"]): if not self.assign_second_stage: indices = self.matcher(auxiliary_outputs, targets) for loss in self.losses: l_dict = self.get_loss(loss, auxiliary_outputs, targets, indices, num_boxes) l_dict = {k + f"_{i}": v for k, v in l_dict.items()} losses.update(l_dict) if "enc_outputs" in outputs: enc_outputs = outputs["enc_outputs"] bin_targets = copy.deepcopy(targets) for bt in bin_targets: bt["class_labels"] = torch.zeros_like(bt["class_labels"]) if self.assign_first_stage: indices = self.stg1_assigner(enc_outputs, bin_targets) else: indices = self.matcher(enc_outputs, bin_targets) for loss in self.losses: l_dict = self.get_loss(loss, enc_outputs, bin_targets, indices, num_boxes) l_dict = {k + "_enc": v for k, v in l_dict.items()} losses.update(l_dict) return losses # Copied from transformers.models.detr.modeling_detr.DetrMLPPredictionHead class DetaMLPPredictionHead(nn.Module): """ Very simple multi-layer perceptron (MLP, also called FFN), used to predict the normalized center coordinates, height and width of a bounding box w.r.t. an image. Copied from https://github.com/facebookresearch/detr/blob/master/models/detr.py """ def __init__(self, input_dim, hidden_dim, output_dim, num_layers): super().__init__() self.num_layers = num_layers h = [hidden_dim] * (num_layers - 1) self.layers = nn.ModuleList(nn.Linear(n, k) for n, k in zip([input_dim] + h, h + [output_dim])) def forward(self, x): for i, layer in enumerate(self.layers): x = nn.functional.relu(layer(x)) if i < self.num_layers - 1 else layer(x) return x # Copied from transformers.models.deformable_detr.modeling_deformable_detr.DeformableDetrHungarianMatcher with DeformableDetr->Deta class DetaHungarianMatcher(nn.Module): """ This class computes an assignment between the targets and the predictions of the network. For efficiency reasons, the targets don't include the no_object. Because of this, in general, there are more predictions than targets. In this case, we do a 1-to-1 matching of the best predictions, while the others are un-matched (and thus treated as non-objects). Args: class_cost: The relative weight of the classification error in the matching cost. bbox_cost: The relative weight of the L1 error of the bounding box coordinates in the matching cost. giou_cost: The relative weight of the giou loss of the bounding box in the matching cost. """ def __init__(self, class_cost: float = 1, bbox_cost: float = 1, giou_cost: float = 1): super().__init__() requires_backends(self, ["scipy"]) self.class_cost = class_cost self.bbox_cost = bbox_cost self.giou_cost = giou_cost if class_cost == 0 and bbox_cost == 0 and giou_cost == 0: raise ValueError("All costs of the Matcher can't be 0") @torch.no_grad() def forward(self, outputs, targets): """ Args: outputs (`dict`): A dictionary that contains at least these entries: * "logits": Tensor of dim [batch_size, num_queries, num_classes] with the classification logits * "pred_boxes": Tensor of dim [batch_size, num_queries, 4] with the predicted box coordinates. targets (`List[dict]`): A list of targets (len(targets) = batch_size), where each target is a dict containing: * "class_labels": Tensor of dim [num_target_boxes] (where num_target_boxes is the number of ground-truth objects in the target) containing the class labels * "boxes": Tensor of dim [num_target_boxes, 4] containing the target box coordinates. Returns: `List[Tuple]`: A list of size `batch_size`, containing tuples of (index_i, index_j) where: - index_i is the indices of the selected predictions (in order) - index_j is the indices of the corresponding selected targets (in order) For each batch element, it holds: len(index_i) = len(index_j) = min(num_queries, num_target_boxes) """ batch_size, num_queries = outputs["logits"].shape[:2] # We flatten to compute the cost matrices in a batch out_prob = outputs["logits"].flatten(0, 1).sigmoid() # [batch_size * num_queries, num_classes] out_bbox = outputs["pred_boxes"].flatten(0, 1) # [batch_size * num_queries, 4] # Also concat the target labels and boxes target_ids = torch.cat([v["class_labels"] for v in targets]) target_bbox = torch.cat([v["boxes"] for v in targets]) # Compute the classification cost. alpha = 0.25 gamma = 2.0 neg_cost_class = (1 - alpha) * (out_prob**gamma) * (-(1 - out_prob + 1e-8).log()) pos_cost_class = alpha * ((1 - out_prob) ** gamma) * (-(out_prob + 1e-8).log()) class_cost = pos_cost_class[:, target_ids] - neg_cost_class[:, target_ids] # Compute the L1 cost between boxes bbox_cost = torch.cdist(out_bbox, target_bbox, p=1) # Compute the giou cost between boxes giou_cost = -generalized_box_iou(center_to_corners_format(out_bbox), center_to_corners_format(target_bbox)) # Final cost matrix cost_matrix = self.bbox_cost * bbox_cost + self.class_cost * class_cost + self.giou_cost * giou_cost cost_matrix = cost_matrix.view(batch_size, num_queries, -1).cpu() sizes = [len(v["boxes"]) for v in targets] indices = [linear_sum_assignment(c[i]) for i, c in enumerate(cost_matrix.split(sizes, -1))] return [(torch.as_tensor(i, dtype=torch.int64), torch.as_tensor(j, dtype=torch.int64)) for i, j in indices] # Copied from transformers.models.detr.modeling_detr._upcast def _upcast(t: Tensor) -> Tensor: # Protects from numerical overflows in multiplications by upcasting to the equivalent higher type if t.is_floating_point(): return t if t.dtype in (torch.float32, torch.float64) else t.float() else: return t if t.dtype in (torch.int32, torch.int64) else t.int() # Copied from transformers.models.detr.modeling_detr.box_area def box_area(boxes: Tensor) -> Tensor: """ Computes the area of a set of bounding boxes, which are specified by its (x1, y1, x2, y2) coordinates. Args: boxes (`torch.FloatTensor` of shape `(number_of_boxes, 4)`): Boxes for which the area will be computed. They are expected to be in (x1, y1, x2, y2) format with `0 <= x1 < x2` and `0 <= y1 < y2`. Returns: `torch.FloatTensor`: a tensor containing the area for each box. """ boxes = _upcast(boxes) return (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1]) # Copied from transformers.models.detr.modeling_detr.box_iou def box_iou(boxes1, boxes2): area1 = box_area(boxes1) area2 = box_area(boxes2) left_top = torch.max(boxes1[:, None, :2], boxes2[:, :2]) # [N,M,2] right_bottom = torch.min(boxes1[:, None, 2:], boxes2[:, 2:]) # [N,M,2] width_height = (right_bottom - left_top).clamp(min=0) # [N,M,2] inter = width_height[:, :, 0] * width_height[:, :, 1] # [N,M] union = area1[:, None] + area2 - inter iou = inter / union return iou, union # Copied from transformers.models.detr.modeling_detr.generalized_box_iou def generalized_box_iou(boxes1, boxes2): """ Generalized IoU from https://giou.stanford.edu/. The boxes should be in [x0, y0, x1, y1] (corner) format. Returns: `torch.FloatTensor`: a [N, M] pairwise matrix, where N = len(boxes1) and M = len(boxes2) """ # degenerate boxes gives inf / nan results # so do an early check if not (boxes1[:, 2:] >= boxes1[:, :2]).all(): raise ValueError(f"boxes1 must be in [x0, y0, x1, y1] (corner) format, but got {boxes1}") if not (boxes2[:, 2:] >= boxes2[:, :2]).all(): raise ValueError(f"boxes2 must be in [x0, y0, x1, y1] (corner) format, but got {boxes2}") iou, union = box_iou(boxes1, boxes2) top_left = torch.min(boxes1[:, None, :2], boxes2[:, :2]) bottom_right = torch.max(boxes1[:, None, 2:], boxes2[:, 2:]) width_height = (bottom_right - top_left).clamp(min=0) # [N,M,2] area = width_height[:, :, 0] * width_height[:, :, 1] return iou - (area - union) / area # from https://github.com/facebookresearch/detectron2/blob/cbbc1ce26473cb2a5cc8f58e8ada9ae14cb41052/detectron2/layers/wrappers.py#L100 def nonzero_tuple(x): """ A 'as_tuple=True' version of torch.nonzero to support torchscript. because of https://github.com/pytorch/pytorch/issues/38718 """ if torch.jit.is_scripting(): if x.dim() == 0: return x.unsqueeze(0).nonzero().unbind(1) return x.nonzero().unbind(1) else: return x.nonzero(as_tuple=True) # from https://github.com/facebookresearch/detectron2/blob/9921a2caa585d4fa66c4b534b6fab6e74d89b582/detectron2/modeling/matcher.py#L9 class DetaMatcher(object): """ This class assigns to each predicted "element" (e.g., a box) a ground-truth element. Each predicted element will have exactly zero or one matches; each ground-truth element may be matched to zero or more predicted elements. The matching is determined by the MxN match_quality_matrix, that characterizes how well each (ground-truth, prediction)-pair match each other. For example, if the elements are boxes, this matrix may contain box intersection-over-union overlap values. The matcher returns (a) a vector of length N containing the index of the ground-truth element m in [0, M) that matches to prediction n in [0, N). (b) a vector of length N containing the labels for each prediction. """ def __init__(self, thresholds: List[float], labels: List[int], allow_low_quality_matches: bool = False): """ Args: thresholds (`list[float]`): A list of thresholds used to stratify predictions into levels. labels (`list[int`): A list of values to label predictions belonging at each level. A label can be one of {-1, 0, 1} signifying {ignore, negative class, positive class}, respectively. allow_low_quality_matches (`bool`, *optional*, defaults to `False`): If `True`, produce additional matches for predictions with maximum match quality lower than high_threshold. See `set_low_quality_matches_` for more details. For example, thresholds = [0.3, 0.5] labels = [0, -1, 1] All predictions with iou < 0.3 will be marked with 0 and thus will be considered as false positives while training. All predictions with 0.3 <= iou < 0.5 will be marked with -1 and thus will be ignored. All predictions with 0.5 <= iou will be marked with 1 and thus will be considered as true positives. """ # Add -inf and +inf to first and last position in thresholds thresholds = thresholds[:] if thresholds[0] < 0: raise ValueError("Thresholds should be positive") thresholds.insert(0, -float("inf")) thresholds.append(float("inf")) # Currently torchscript does not support all + generator if not all(low <= high for (low, high) in zip(thresholds[:-1], thresholds[1:])): raise ValueError("Thresholds should be sorted.") if not all(l in [-1, 0, 1] for l in labels): raise ValueError("All labels should be either -1, 0 or 1") if len(labels) != len(thresholds) - 1: raise ValueError("Number of labels should be equal to number of thresholds - 1") self.thresholds = thresholds self.labels = labels self.allow_low_quality_matches = allow_low_quality_matches def __call__(self, match_quality_matrix): """ Args: match_quality_matrix (Tensor[float]): an MxN tensor, containing the pairwise quality between M ground-truth elements and N predicted elements. All elements must be >= 0 (due to the us of `torch.nonzero` for selecting indices in `set_low_quality_matches_`). Returns: matches (Tensor[int64]): a vector of length N, where matches[i] is a matched ground-truth index in [0, M) match_labels (Tensor[int8]): a vector of length N, where pred_labels[i] indicates whether a prediction is a true or false positive or ignored """ assert match_quality_matrix.dim() == 2 if match_quality_matrix.numel() == 0: default_matches = match_quality_matrix.new_full((match_quality_matrix.size(1),), 0, dtype=torch.int64) # When no gt boxes exist, we define IOU = 0 and therefore set labels # to `self.labels[0]`, which usually defaults to background class 0 # To choose to ignore instead, can make labels=[-1,0,-1,1] + set appropriate thresholds default_match_labels = match_quality_matrix.new_full( (match_quality_matrix.size(1),), self.labels[0], dtype=torch.int8 ) return default_matches, default_match_labels assert torch.all(match_quality_matrix >= 0) # match_quality_matrix is M (gt) x N (predicted) # Max over gt elements (dim 0) to find best gt candidate for each prediction matched_vals, matches = match_quality_matrix.max(dim=0) match_labels = matches.new_full(matches.size(), 1, dtype=torch.int8) for l, low, high in zip(self.labels, self.thresholds[:-1], self.thresholds[1:]): low_high = (matched_vals >= low) & (matched_vals < high) match_labels[low_high] = l if self.allow_low_quality_matches: self.set_low_quality_matches_(match_labels, match_quality_matrix) return matches, match_labels def set_low_quality_matches_(self, match_labels, match_quality_matrix): """ Produce additional matches for predictions that have only low-quality matches. Specifically, for each ground-truth G find the set of predictions that have maximum overlap with it (including ties); for each prediction in that set, if it is unmatched, then match it to the ground-truth G. This function implements the RPN assignment case (i) in Sec. 3.1.2 of :paper:`Faster R-CNN`. """ # For each gt, find the prediction with which it has highest quality highest_quality_foreach_gt, _ = match_quality_matrix.max(dim=1) # Find the highest quality match available, even if it is low, including ties. # Note that the matches qualities must be positive due to the use of # `torch.nonzero`. _, pred_inds_with_highest_quality = nonzero_tuple(match_quality_matrix == highest_quality_foreach_gt[:, None]) # If an anchor was labeled positive only due to a low-quality match # with gt_A, but it has larger overlap with gt_B, it's matched index will still be gt_B. # This follows the implementation in Detectron, and is found to have no significant impact. match_labels[pred_inds_with_highest_quality] = 1 # from https://github.com/facebookresearch/detectron2/blob/cbbc1ce26473cb2a5cc8f58e8ada9ae14cb41052/detectron2/modeling/sampling.py#L9 def subsample_labels(labels: torch.Tensor, num_samples: int, positive_fraction: float, bg_label: int): """ Return `num_samples` (or fewer, if not enough found) random samples from `labels` which is a mixture of positives & negatives. It will try to return as many positives as possible without exceeding `positive_fraction * num_samples`, and then try to fill the remaining slots with negatives. Args: labels (Tensor): (N, ) label vector with values: * -1: ignore * bg_label: background ("negative") class * otherwise: one or more foreground ("positive") classes num_samples (int): The total number of labels with value >= 0 to return. Values that are not sampled will be filled with -1 (ignore). positive_fraction (float): The number of subsampled labels with values > 0 is `min(num_positives, int(positive_fraction * num_samples))`. The number of negatives sampled is `min(num_negatives, num_samples - num_positives_sampled)`. In order words, if there are not enough positives, the sample is filled with negatives. If there are also not enough negatives, then as many elements are sampled as is possible. bg_label (int): label index of background ("negative") class. Returns: pos_idx, neg_idx (Tensor): 1D vector of indices. The total length of both is `num_samples` or fewer. """ positive = nonzero_tuple((labels != -1) & (labels != bg_label))[0] negative = nonzero_tuple(labels == bg_label)[0] num_pos = int(num_samples * positive_fraction) # protect against not enough positive examples num_pos = min(positive.numel(), num_pos) num_neg = num_samples - num_pos # protect against not enough negative examples num_neg = min(negative.numel(), num_neg) # randomly select positive and negative examples perm1 = torch.randperm(positive.numel(), device=positive.device)[:num_pos] perm2 = torch.randperm(negative.numel(), device=negative.device)[:num_neg] pos_idx = positive[perm1] neg_idx = negative[perm2] return pos_idx, neg_idx def sample_topk_per_gt(pr_inds, gt_inds, iou, k): if len(gt_inds) == 0: return pr_inds, gt_inds # find topk matches for each gt gt_inds2, counts = gt_inds.unique(return_counts=True) scores, pr_inds2 = iou[gt_inds2].topk(k, dim=1) gt_inds2 = gt_inds2[:, None].repeat(1, k) # filter to as many matches that gt has pr_inds3 = torch.cat([pr[:c] for c, pr in zip(counts, pr_inds2)]) gt_inds3 = torch.cat([gt[:c] for c, gt in zip(counts, gt_inds2)]) return pr_inds3, gt_inds3 # modified from https://github.com/facebookresearch/detectron2/blob/cbbc1ce26473cb2a5cc8f58e8ada9ae14cb41052/detectron2/modeling/roi_heads/roi_heads.py#L123 class DetaStage2Assigner(nn.Module): def __init__(self, num_queries, max_k=4): super().__init__() self.positive_fraction = 0.25 self.bg_label = 400 # number > 91 to filter out later self.batch_size_per_image = num_queries self.proposal_matcher = DetaMatcher(thresholds=[0.6], labels=[0, 1], allow_low_quality_matches=True) self.k = max_k def _sample_proposals(self, matched_idxs: torch.Tensor, matched_labels: torch.Tensor, gt_classes: torch.Tensor): """ Based on the matching between N proposals and M groundtruth, sample the proposals and set their classification labels. Args: matched_idxs (Tensor): a vector of length N, each is the best-matched gt index in [0, M) for each proposal. matched_labels (Tensor): a vector of length N, the matcher's label (one of cfg.MODEL.ROI_HEADS.IOU_LABELS) for each proposal. gt_classes (Tensor): a vector of length M. Returns: Tensor: a vector of indices of sampled proposals. Each is in [0, N). Tensor: a vector of the same length, the classification label for each sampled proposal. Each sample is labeled as either a category in [0, num_classes) or the background (num_classes). """ has_gt = gt_classes.numel() > 0 # Get the corresponding GT for each proposal if has_gt: gt_classes = gt_classes[matched_idxs] # Label unmatched proposals (0 label from matcher) as background (label=num_classes) gt_classes[matched_labels == 0] = self.bg_label # Label ignore proposals (-1 label) gt_classes[matched_labels == -1] = -1 else: gt_classes = torch.zeros_like(matched_idxs) + self.bg_label sampled_fg_idxs, sampled_bg_idxs = subsample_labels( gt_classes, self.batch_size_per_image, self.positive_fraction, self.bg_label ) sampled_idxs = torch.cat([sampled_fg_idxs, sampled_bg_idxs], dim=0) return sampled_idxs, gt_classes[sampled_idxs] def forward(self, outputs, targets, return_cost_matrix=False): # COCO categories are from 1 to 90. They set num_classes=91 and apply sigmoid. bs = len(targets) indices = [] ious = [] for b in range(bs): iou, _ = box_iou( center_to_corners_format(targets[b]["boxes"]), center_to_corners_format(outputs["init_reference"][b].detach()), ) matched_idxs, matched_labels = self.proposal_matcher( iou ) # proposal_id -> highest_iou_gt_id, proposal_id -> [1 if iou > 0.6, 0 ow] ( sampled_idxs, sampled_gt_classes, ) = self._sample_proposals( # list of sampled proposal_ids, sampled_id -> [0, num_classes)+[bg_label] matched_idxs, matched_labels, targets[b]["class_labels"] ) pos_pr_inds = sampled_idxs[sampled_gt_classes != self.bg_label] pos_gt_inds = matched_idxs[pos_pr_inds] pos_pr_inds, pos_gt_inds = self.postprocess_indices(pos_pr_inds, pos_gt_inds, iou) indices.append((pos_pr_inds, pos_gt_inds)) ious.append(iou) if return_cost_matrix: return indices, ious return indices def postprocess_indices(self, pr_inds, gt_inds, iou): return sample_topk_per_gt(pr_inds, gt_inds, iou, self.k) # modified from https://github.com/facebookresearch/detectron2/blob/cbbc1ce26473cb2a5cc8f58e8ada9ae14cb41052/detectron2/modeling/proposal_generator/rpn.py#L181 class DetaStage1Assigner(nn.Module): def __init__(self, t_low=0.3, t_high=0.7, max_k=4): super().__init__() self.positive_fraction = 0.5 self.batch_size_per_image = 256 self.k = max_k self.t_low = t_low self.t_high = t_high self.anchor_matcher = DetaMatcher( thresholds=[t_low, t_high], labels=[0, -1, 1], allow_low_quality_matches=True ) def _subsample_labels(self, label): """ Randomly sample a subset of positive and negative examples, and overwrite the label vector to the ignore value (-1) for all elements that are not included in the sample. Args: labels (Tensor): a vector of -1, 0, 1. Will be modified in-place and returned. """ pos_idx, neg_idx = subsample_labels(label, self.batch_size_per_image, self.positive_fraction, 0) # Fill with the ignore label (-1), then set positive and negative labels label.fill_(-1) label.scatter_(0, pos_idx, 1) label.scatter_(0, neg_idx, 0) return label def forward(self, outputs, targets): bs = len(targets) indices = [] for b in range(bs): anchors = outputs["anchors"][b] if len(targets[b]["boxes"]) == 0: indices.append( ( torch.tensor([], dtype=torch.long, device=anchors.device), torch.tensor([], dtype=torch.long, device=anchors.device), ) ) continue iou, _ = box_iou( center_to_corners_format(targets[b]["boxes"]), center_to_corners_format(anchors), ) matched_idxs, matched_labels = self.anchor_matcher( iou ) # proposal_id -> highest_iou_gt_id, proposal_id -> [1 if iou > 0.7, 0 if iou < 0.3, -1 ow] matched_labels = self._subsample_labels(matched_labels) all_pr_inds = torch.arange(len(anchors), device=matched_labels.device) pos_pr_inds = all_pr_inds[matched_labels == 1] pos_gt_inds = matched_idxs[pos_pr_inds] pos_pr_inds, pos_gt_inds = self.postprocess_indices(pos_pr_inds, pos_gt_inds, iou) pos_pr_inds, pos_gt_inds = pos_pr_inds.to(anchors.device), pos_gt_inds.to(anchors.device) indices.append((pos_pr_inds, pos_gt_inds)) return indices def postprocess_indices(self, pr_inds, gt_inds, iou): return sample_topk_per_gt(pr_inds, gt_inds, iou, self.k)
0
hf_public_repos/transformers/src/transformers/models
hf_public_repos/transformers/src/transformers/models/biogpt/configuration_biogpt.py
# coding=utf-8 # Copyright 2022 The HuggingFace Team and Microsoft Research AI4Science All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ BioGPT model configuration""" from ...configuration_utils import PretrainedConfig from ...utils import logging logger = logging.get_logger(__name__) BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP = { "microsoft/biogpt": "https://huggingface.co/microsoft/biogpt/resolve/main/config.json", # See all BioGPT models at https://huggingface.co/models?filter=biogpt } class BioGptConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`BioGptModel`]. It is used to instantiate an BioGPT model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the BioGPT [microsoft/biogpt](https://huggingface.co/microsoft/biogpt) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 42384): Vocabulary size of the BioGPT model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`BioGptModel`]. hidden_size (`int`, *optional*, defaults to 1024): Dimension of the encoder layers and the pooler layer. num_hidden_layers (`int`, *optional*, defaults to 24): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 16): Number of attention heads for each attention layer in the Transformer encoder. intermediate_size (`int`, *optional*, defaults to 4096): Dimension of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` are supported. hidden_dropout_prob (`float`, *optional*, defaults to 0.1): The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler. attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1): The dropout ratio for the attention probabilities. max_position_embeddings (`int`, *optional*, defaults to 1024): The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. layer_norm_eps (`float`, *optional*, defaults to 1e-12): The epsilon used by the layer normalization layers. scale_embedding (`bool`, *optional*, defaults to `True`): Scale embeddings by diving by sqrt(d_model). use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). Only relevant if `config.is_decoder=True`. layerdrop (`float`, *optional*, defaults to 0.0): Please refer to the paper about LayerDrop: https://arxiv.org/abs/1909.11556 for further details activation_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for activations inside the fully connected layer. pad_token_id (`int`, *optional*, defaults to 1): Padding token id. bos_token_id (`int`, *optional*, defaults to 0): Beginning of stream token id. eos_token_id (`int`, *optional*, defaults to 2): End of stream token id. Example: ```python >>> from transformers import BioGptModel, BioGptConfig >>> # Initializing a BioGPT microsoft/biogpt style configuration >>> configuration = BioGptConfig() >>> # Initializing a model from the microsoft/biogpt style configuration >>> model = BioGptModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "biogpt" def __init__( self, vocab_size=42384, hidden_size=1024, num_hidden_layers=24, num_attention_heads=16, intermediate_size=4096, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=1024, initializer_range=0.02, layer_norm_eps=1e-12, scale_embedding=True, use_cache=True, layerdrop=0.0, activation_dropout=0.0, pad_token_id=1, bos_token_id=0, eos_token_id=2, **kwargs, ): self.vocab_size = vocab_size self.max_position_embeddings = max_position_embeddings self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.initializer_range = initializer_range self.layer_norm_eps = layer_norm_eps self.scale_embedding = scale_embedding self.use_cache = use_cache self.layerdrop = layerdrop self.activation_dropout = activation_dropout super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs)
0
hf_public_repos/transformers/src/transformers/models
hf_public_repos/transformers/src/transformers/models/biogpt/convert_biogpt_original_pytorch_checkpoint_to_pytorch.py
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import json import os import re import shutil import torch from transformers import BioGptConfig, BioGptForCausalLM from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE from transformers.utils import WEIGHTS_NAME, logging logging.set_verbosity_warning() json_indent = 2 # modified from https://github.com/facebookresearch/fairseq/blob/dd74992d0d143155998e9ed4076826bcea80fb06/fairseq/data/dictionary.py#L18 class Dictionary: """A mapping from symbols to consecutive integers""" def __init__( self, *, # begin keyword-only arguments bos="<s>", pad="<pad>", eos="</s>", unk="<unk>", extra_special_symbols=None, ): self.bos_word, self.unk_word, self.pad_word, self.eos_word = bos, unk, pad, eos self.symbols = [] self.count = [] self.indices = {} self.bos_index = self.add_symbol(bos) self.pad_index = self.add_symbol(pad) self.eos_index = self.add_symbol(eos) self.unk_index = self.add_symbol(unk) if extra_special_symbols: for s in extra_special_symbols: self.add_symbol(s) self.nspecial = len(self.symbols) def __eq__(self, other): return self.indices == other.indices def __getitem__(self, idx): if idx < len(self.symbols): return self.symbols[idx] return self.unk_word def __len__(self): """Returns the number of symbols in the dictionary""" return len(self.symbols) def __contains__(self, sym): return sym in self.indices @classmethod def load(cls, f): """Loads the dictionary from a text file with the format: ``` <symbol0> <count0> <symbol1> <count1> ... ``` """ d = cls() d.add_from_file(f) return d def add_symbol(self, word, n=1, overwrite=False): """Adds a word to the dictionary""" if word in self.indices and not overwrite: idx = self.indices[word] self.count[idx] = self.count[idx] + n return idx else: idx = len(self.symbols) self.indices[word] = idx self.symbols.append(word) self.count.append(n) return idx def _load_meta(self, lines): return 0 def add_from_file(self, f): """ Loads a pre-existing dictionary from a text file and adds its symbols to this instance. """ if isinstance(f, str): try: with open(f, "r", encoding="utf-8") as fd: self.add_from_file(fd) except FileNotFoundError as fnfe: raise fnfe except UnicodeError: raise Exception("Incorrect encoding detected in {}, please rebuild the dataset".format(f)) return lines = f.readlines() indices_start_line = self._load_meta(lines) for line in lines[indices_start_line:]: try: line, field = line.rstrip().rsplit(" ", 1) if field == "#fairseq:overwrite": overwrite = True line, field = line.rsplit(" ", 1) else: overwrite = False count = int(field) word = line if word in self and not overwrite: raise RuntimeError( "Duplicate word found when loading Dictionary: '{}'. " "Duplicate words can overwrite earlier ones by adding the " "#fairseq:overwrite flag at the end of the corresponding row " "in the dictionary file. If using the Camembert model, please " "download an updated copy of the model file.".format(word) ) self.add_symbol(word, n=count, overwrite=overwrite) except ValueError: raise ValueError("Incorrect dictionary format, expected '<token> <cnt> [flags]'") def rewrite_dict_keys(d): # (1) remove word breaking symbol, (2) add word ending symbol where the word is not broken up, # e.g.: d = {'le@@': 5, 'tt@@': 6, 'er': 7} => {'le': 5, 'tt': 6, 'er</w>': 7} d2 = dict((re.sub(r"@@$", "", k), v) if k.endswith("@@") else (re.sub(r"$", "</w>", k), v) for k, v in d.items()) keep_keys = "<s> <pad> </s> <unk>".split() # restore the special tokens for k in keep_keys: del d2[f"{k}</w>"] d2[k] = d[k] # restore return d2 def convert_biogpt_checkpoint_to_pytorch(biogpt_checkpoint_path, pytorch_dump_folder_path): # prep if not os.path.exists(biogpt_checkpoint_path): raise ValueError(f"path {biogpt_checkpoint_path} does not exist!") os.makedirs(pytorch_dump_folder_path, exist_ok=True) print(f"Writing results to {pytorch_dump_folder_path}") # handle various types of models checkpoint_file = os.path.join(biogpt_checkpoint_path, "checkpoint.pt") if not os.path.isfile(checkpoint_file): raise ValueError(f"path to the file {checkpoint_file} does not exist!") chkpt = torch.load(checkpoint_file, map_location="cpu") args = chkpt["cfg"]["model"] # dicts dict_file = os.path.join(biogpt_checkpoint_path, "dict.txt") if not os.path.isfile(dict_file): raise ValueError(f"path to the file {dict_file} does not exist!") src_dict = Dictionary.load(dict_file) src_vocab = rewrite_dict_keys(src_dict.indices) src_vocab_size = len(src_vocab) src_vocab_file = os.path.join(pytorch_dump_folder_path, VOCAB_FILES_NAMES["vocab_file"]) print(f"Generating {src_vocab_file} of {src_vocab_size} records") with open(src_vocab_file, "w", encoding="utf-8") as f: f.write(json.dumps(src_vocab, ensure_ascii=False, indent=json_indent)) # merges_file (bpecodes) bpecodes_file = os.path.join(biogpt_checkpoint_path, "bpecodes") if not os.path.isfile(bpecodes_file): raise ValueError(f"path to the file {bpecodes_file} does not exist!") merges_file = os.path.join(pytorch_dump_folder_path, VOCAB_FILES_NAMES["merges_file"]) shutil.copyfile(bpecodes_file, merges_file) # model config biogpt_model_config_file = os.path.join(pytorch_dump_folder_path, "config.json") model_conf = { "activation_dropout": args["activation_dropout"], "architectures": ["BioGptForCausalLM"], "attention_probs_dropout_prob": args["attention_dropout"], "bos_token_id": 0, "eos_token_id": 2, "hidden_act": args["activation_fn"], "hidden_dropout_prob": args["dropout"], "hidden_size": args["decoder_embed_dim"], "initializer_range": 0.02, "intermediate_size": args["decoder_ffn_embed_dim"], "layer_norm_eps": 1e-12, "layerdrop": args["decoder_layerdrop"], "max_position_embeddings": args["max_target_positions"], "model_type": "biogpt", "num_attention_heads": args["decoder_attention_heads"], "num_hidden_layers": args["decoder_layers"], "pad_token_id": 1, "scale_embedding": not args["no_scale_embedding"], "tie_word_embeddings": args["share_decoder_input_output_embed"], "vocab_size": src_vocab_size, } # good hparam defaults to start with print(f"Generating {biogpt_model_config_file}") with open(biogpt_model_config_file, "w", encoding="utf-8") as f: f.write(json.dumps(model_conf, ensure_ascii=False, indent=json_indent)) # tokenizer config biogpt_tokenizer_config_file = os.path.join(pytorch_dump_folder_path, TOKENIZER_CONFIG_FILE) tokenizer_conf = { "bos_token": "<s>", "eos_token": "</s>", "model_max_length": 1024, "pad_token": "<pad>", "special_tokens_map_file": None, "tokenizer_class": "BioGptTokenizer", "unk_token": "<unk>", } print(f"Generating {biogpt_tokenizer_config_file}") with open(biogpt_tokenizer_config_file, "w", encoding="utf-8") as f: f.write(json.dumps(tokenizer_conf, ensure_ascii=False, indent=json_indent)) # model model_state_dict = chkpt["model"] # remove unneeded keys ignore_keys = [ "decoder.version", ] for k in ignore_keys: model_state_dict.pop(k, None) layer_names = list(model_state_dict.keys()) for layer_name in layer_names: if layer_name.endswith("output_projection.weight"): model_state_dict[layer_name.replace("decoder.", "")] = model_state_dict.pop(layer_name) else: model_state_dict[layer_name.replace("decoder", "biogpt")] = model_state_dict.pop(layer_name) config = BioGptConfig.from_pretrained(pytorch_dump_folder_path) model_new = BioGptForCausalLM(config) # check that it loads ok model_new.load_state_dict(model_state_dict) # save pytorch_weights_dump_path = os.path.join(pytorch_dump_folder_path, WEIGHTS_NAME) print(f"Generating {pytorch_weights_dump_path}") torch.save(model_state_dict, pytorch_weights_dump_path) print("Conversion is done!") if __name__ == "__main__": parser = argparse.ArgumentParser() # Required parameters parser.add_argument( "--biogpt_checkpoint_path", default=None, type=str, required=True, help=( "Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts," " bpecodes, etc." ), ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) args = parser.parse_args() convert_biogpt_checkpoint_to_pytorch(args.biogpt_checkpoint_path, args.pytorch_dump_folder_path)
0
hf_public_repos/transformers/src/transformers/models
hf_public_repos/transformers/src/transformers/models/biogpt/tokenization_biogpt.py
# coding=utf-8 # Copyright 2022 The HuggingFace Team and Microsoft Research AI4Science. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tokenization classes for BioGPT.""" import json import os from typing import List, Optional, Tuple from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging logger = logging.get_logger(__name__) VOCAB_FILES_NAMES = { "vocab_file": "vocab.json", "merges_file": "merges.txt", } PRETRAINED_VOCAB_FILES_MAP = { "vocab_file": { "microsoft/biogpt": "https://huggingface.co/microsoft/biogpt/resolve/main/vocab.json", }, "merges_file": {"microsoft/biogpt": "https://huggingface.co/microsoft/biogpt/resolve/main/merges.txt"}, } PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = { "microsoft/biogpt": 1024, } def get_pairs(word): """ Return set of symbol pairs in a word. word is represented as tuple of symbols (symbols being variable-length strings) """ pairs = set() prev_char = word[0] for char in word[1:]: pairs.add((prev_char, char)) prev_char = char return pairs class BioGptTokenizer(PreTrainedTokenizer): """ Construct an FAIRSEQ Transformer tokenizer. Moses tokenization followed by Byte-Pair Encoding. This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. Args: vocab_file (`str`): Path to the vocabulary file. merges_file (`str`): Merges file. unk_token (`str`, *optional*, defaults to `"<unk>"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. bos_token (`str`, *optional*, defaults to `"<s>"`): The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token. <Tip> When building a sequence using special tokens, this is not the token that is used for the beginning of sequence. The token used is the `cls_token`. </Tip> eos_token (`str`, *optional*, defaults to `"</s>"`): The end of sequence token. <Tip> When building a sequence using special tokens, this is not the token that is used for the end of sequence. The token used is the `sep_token`. </Tip> sep_token (`str`, *optional*, defaults to `"</s>"`): The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for sequence classification or for a text and a question for question answering. It is also used as the last token of a sequence built with special tokens. pad_token (`str`, *optional*, defaults to `"<pad>"`): The token used for padding, for example when batching sequences of different lengths. """ vocab_files_names = VOCAB_FILES_NAMES pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES model_input_names = ["input_ids", "attention_mask"] def __init__( self, vocab_file, merges_file, unk_token="<unk>", bos_token="<s>", eos_token="</s>", sep_token="</s>", pad_token="<pad>", **kwargs, ): try: import sacremoses except ImportError: raise ImportError( "You need to install sacremoses to use BioGptTokenizer. " "See https://pypi.org/project/sacremoses/ for installation." ) self.lang = "en" self.sm = sacremoses # cache of sm.MosesTokenizer instance self.cache_moses_tokenizer = {} self.cache_moses_detokenizer = {} """ Initialisation""" with open(vocab_file, encoding="utf-8") as vocab_handle: self.encoder = json.load(vocab_handle) self.decoder = {v: k for k, v in self.encoder.items()} with open(merges_file, encoding="utf-8") as merges_handle: merges = merges_handle.read().split("\n")[:-1] merges = [tuple(merge.split()[:2]) for merge in merges] self.bpe_ranks = dict(zip(merges, range(len(merges)))) self.cache = {} super().__init__( bos_token=bos_token, eos_token=eos_token, sep_token=sep_token, unk_token=unk_token, pad_token=pad_token, **kwargs, ) @property def vocab_size(self): """Returns vocab size""" return len(self.encoder) def get_vocab(self): return dict(self.encoder, **self.added_tokens_encoder) def moses_tokenize(self, text, lang): if lang not in self.cache_moses_tokenizer: moses_tokenizer = self.sm.MosesTokenizer(lang=lang) self.cache_moses_tokenizer[lang] = moses_tokenizer return self.cache_moses_tokenizer[lang].tokenize( text, aggressive_dash_splits=True, return_str=False, escape=True ) def moses_detokenize(self, tokens, lang): if lang not in self.cache_moses_detokenizer: moses_detokenizer = self.sm.MosesDetokenizer(lang=lang) self.cache_moses_detokenizer[lang] = moses_detokenizer return self.cache_moses_detokenizer[lang].detokenize(tokens) def bpe(self, token): word = tuple(token[:-1]) + (token[-1] + "</w>",) if token in self.cache: return self.cache[token] pairs = get_pairs(word) if not pairs: return token + "</w>" while True: bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float("inf"))) if bigram not in self.bpe_ranks: break first, second = bigram new_word = [] i = 0 while i < len(word): try: j = word.index(first, i) except ValueError: new_word.extend(word[i:]) break else: new_word.extend(word[i:j]) i = j if word[i] == first and i < len(word) - 1 and word[i + 1] == second: new_word.append(first + second) i += 2 else: new_word.append(word[i]) i += 1 new_word = tuple(new_word) word = new_word if len(word) == 1: break else: pairs = get_pairs(word) word = " ".join(word) if word == "\n </w>": word = "\n</w>" self.cache[token] = word return word def _tokenize(self, text, bypass_tokenizer=False): """Returns a tokenized string.""" if bypass_tokenizer: text = text.split() else: text = self.moses_tokenize(text, self.lang) split_tokens = [] for token in text: if token: split_tokens.extend(list(self.bpe(token).split(" "))) return split_tokens def _convert_token_to_id(self, token): """Converts a token (str) in an id using the vocab.""" return self.encoder.get(token, self.encoder.get(self.unk_token)) def _convert_id_to_token(self, index): """Converts an index (integer) in a token (str) using the vocab.""" return self.decoder.get(index, self.unk_token) def convert_tokens_to_string(self, tokens): """Converts a sequence of tokens (string) in a single string.""" # remove BPE tokens = [t.replace(" ", "").replace("</w>", " ") for t in tokens] tokens = "".join(tokens).split() # detokenize text = self.moses_detokenize(tokens, self.lang) return text def build_inputs_with_special_tokens( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: """ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A BioGPT sequence has the following format: - single sequence: `</s> X ` - pair of sequences: `</s> A </s> B ` Args: token_ids_0 (`List[int]`): List of IDs to which the special tokens will be added. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens. """ if token_ids_1 is None: return [self.sep_token_id] + token_ids_0 sep = [self.sep_token_id] return sep + token_ids_0 + sep + token_ids_1 def get_special_tokens_mask( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False ) -> List[int]: """ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `prepare_for_model` method. Args: token_ids_0 (`List[int]`): List of IDs. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. already_has_special_tokens (`bool`, *optional*, defaults to `False`): Whether or not the token list is already formatted with special tokens for the model. Returns: `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. """ if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True ) # no bos used in fairseq if token_ids_1 is not None: return [1] + ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) return [1] + ([0] * len(token_ids_0)) def create_token_type_ids_from_sequences( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: """ Create a mask from the two sequences passed to be used in a sequence-pair classification task. A FAIRSEQ Transformer sequence pair mask has the following format: ``` 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 | first sequence | second sequence | ``` If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s). Args: token_ids_0 (`List[int]`): List of IDs. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s). """ sep = [self.sep_token_id] # no bos used in fairseq if token_ids_1 is None: return len(token_ids_0 + sep) * [0] return len(token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1] def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: if not os.path.isdir(save_directory): logger.error(f"Vocabulary path ({save_directory}) should be a directory") return vocab_file = os.path.join( save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) merge_file = os.path.join( save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] ) with open(vocab_file, "w", encoding="utf-8") as f: f.write(json.dumps(self.encoder, indent=2, sort_keys=True, ensure_ascii=False) + "\n") index = 0 with open(merge_file, "w", encoding="utf-8") as writer: for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda kv: kv[1]): if index != token_index: logger.warning( f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive." " Please check that the tokenizer is not corrupted!" ) index = token_index writer.write(" ".join(bpe_tokens) + "\n") index += 1 return vocab_file, merge_file def __getstate__(self): state = self.__dict__.copy() state["sm"] = None return state def __setstate__(self, d): self.__dict__ = d try: import sacremoses except ImportError: raise ImportError( "You need to install sacremoses to use XLMTokenizer. " "See https://pypi.org/project/sacremoses/ for installation." ) self.sm = sacremoses
0
hf_public_repos/transformers/src/transformers/models
hf_public_repos/transformers/src/transformers/models/biogpt/__init__.py
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available _import_structure = { "configuration_biogpt": ["BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP", "BioGptConfig"], "tokenization_biogpt": ["BioGptTokenizer"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["modeling_biogpt"] = [ "BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST", "BioGptForCausalLM", "BioGptForTokenClassification", "BioGptForSequenceClassification", "BioGptModel", "BioGptPreTrainedModel", ] if TYPE_CHECKING: from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig from .tokenization_biogpt import BioGptTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_biogpt import ( BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification, BioGptModel, BioGptPreTrainedModel, ) else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
0
hf_public_repos/transformers/src/transformers/models
hf_public_repos/transformers/src/transformers/models/biogpt/modeling_biogpt.py
# coding=utf-8 # Copyright 2022 The HuggingFace Team and Microsoft Research AI4Science All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ PyTorch BioGPT model.""" import math from typing import Optional, Tuple, Union import torch import torch.utils.checkpoint from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACT2FN from ...modeling_attn_mask_utils import _prepare_4d_causal_attention_mask from ...modeling_outputs import ( BaseModelOutputWithPastAndCrossAttentions, CausalLMOutputWithCrossAttentions, SequenceClassifierOutputWithPast, TokenClassifierOutput, ) from ...modeling_utils import PreTrainedModel from ...utils import ( add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging, ) from .configuration_biogpt import BioGptConfig logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = "microsoft/biogpt" _CONFIG_FOR_DOC = "BioGptConfig" BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST = [ "microsoft/biogpt", "microsoft/BioGPT-Large", # See all BioGPT models at https://huggingface.co/models?filter=biogpt ] # Copied from transformers.models.opt.modeling_opt.OPTLearnedPositionalEmbedding with OPT->BioGpt class BioGptLearnedPositionalEmbedding(nn.Embedding): """ This module learns positional embeddings up to a fixed maximum size. """ def __init__(self, num_embeddings: int, embedding_dim: int): # BioGpt is set up so that if padding_idx is specified then offset the embedding ids by 2 # and adjust num_embeddings appropriately. Other models don't have this hack self.offset = 2 super().__init__(num_embeddings + self.offset, embedding_dim) def forward(self, attention_mask: torch.LongTensor, past_key_values_length: int = 0): """`input_ids_shape` is expected to be [bsz x seqlen].""" attention_mask = attention_mask.long() # create positions depending on attention_mask positions = (torch.cumsum(attention_mask, dim=1).type_as(attention_mask) * attention_mask).long() - 1 # cut positions if `past_key_values_length` is > 0 positions = positions[:, past_key_values_length:] return super().forward(positions + self.offset) # Copied from transformers.models.bart.modeling_bart.BartAttention with Bart->BioGpt class BioGptAttention(nn.Module): """Multi-headed attention from 'Attention Is All You Need' paper""" def __init__( self, embed_dim: int, num_heads: int, dropout: float = 0.0, is_decoder: bool = False, bias: bool = True, is_causal: bool = False, config: Optional[BioGptConfig] = None, ): super().__init__() self.embed_dim = embed_dim self.num_heads = num_heads self.dropout = dropout self.head_dim = embed_dim // num_heads self.config = config if (self.head_dim * num_heads) != self.embed_dim: raise ValueError( f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}" f" and `num_heads`: {num_heads})." ) self.scaling = self.head_dim**-0.5 self.is_decoder = is_decoder self.is_causal = is_causal self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias) def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int): return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous() def forward( self, hidden_states: torch.Tensor, key_value_states: Optional[torch.Tensor] = None, past_key_value: Optional[Tuple[torch.Tensor]] = None, attention_mask: Optional[torch.Tensor] = None, layer_head_mask: Optional[torch.Tensor] = None, output_attentions: bool = False, ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: """Input shape: Batch x Time x Channel""" # if key_value_states are provided this layer is used as a cross-attention layer # for the decoder is_cross_attention = key_value_states is not None bsz, tgt_len, _ = hidden_states.size() # get query proj query_states = self.q_proj(hidden_states) * self.scaling # get key, value proj # `past_key_value[0].shape[2] == key_value_states.shape[1]` # is checking that the `sequence_length` of the `past_key_value` is the same as # the provided `key_value_states` to support prefix tuning if ( is_cross_attention and past_key_value is not None and past_key_value[0].shape[2] == key_value_states.shape[1] ): # reuse k,v, cross_attentions key_states = past_key_value[0] value_states = past_key_value[1] elif is_cross_attention: # cross_attentions key_states = self._shape(self.k_proj(key_value_states), -1, bsz) value_states = self._shape(self.v_proj(key_value_states), -1, bsz) elif past_key_value is not None: # reuse k, v, self_attention key_states = self._shape(self.k_proj(hidden_states), -1, bsz) value_states = self._shape(self.v_proj(hidden_states), -1, bsz) key_states = torch.cat([past_key_value[0], key_states], dim=2) value_states = torch.cat([past_key_value[1], value_states], dim=2) else: # self_attention key_states = self._shape(self.k_proj(hidden_states), -1, bsz) value_states = self._shape(self.v_proj(hidden_states), -1, bsz) if self.is_decoder: # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states. # Further calls to cross_attention layer can then reuse all cross-attention # key/value_states (first "if" case) # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of # all previous decoder key/value_states. Further calls to uni-directional self-attention # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case) # if encoder bi-directional self-attention `past_key_value` is always `None` past_key_value = (key_states, value_states) proj_shape = (bsz * self.num_heads, -1, self.head_dim) query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape) key_states = key_states.reshape(*proj_shape) value_states = value_states.reshape(*proj_shape) src_len = key_states.size(1) attn_weights = torch.bmm(query_states, key_states.transpose(1, 2)) if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len): raise ValueError( f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is" f" {attn_weights.size()}" ) if attention_mask is not None: if attention_mask.size() != (bsz, 1, tgt_len, src_len): raise ValueError( f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}" ) attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) attn_weights = nn.functional.softmax(attn_weights, dim=-1) if layer_head_mask is not None: if layer_head_mask.size() != (self.num_heads,): raise ValueError( f"Head mask for a single layer should be of size {(self.num_heads,)}, but is" f" {layer_head_mask.size()}" ) attn_weights = layer_head_mask.view(1, -1, 1, 1) * attn_weights.view(bsz, self.num_heads, tgt_len, src_len) attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) if output_attentions: # this operation is a bit awkward, but it's required to # make sure that attn_weights keeps its gradient. # In order to do so, attn_weights have to be reshaped # twice and have to be reused in the following attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len) else: attn_weights_reshaped = None attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training) attn_output = torch.bmm(attn_probs, value_states) if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim): raise ValueError( f"`attn_output` should be of size {(bsz * self.num_heads, tgt_len, self.head_dim)}, but is" f" {attn_output.size()}" ) attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim) attn_output = attn_output.transpose(1, 2) # Use the `embed_dim` from the config (stored in the class) rather than `hidden_state` because `attn_output` can be # partitioned across GPUs when using tensor-parallelism. attn_output = attn_output.reshape(bsz, tgt_len, self.embed_dim) attn_output = self.out_proj(attn_output) return attn_output, attn_weights_reshaped, past_key_value class BioGptDecoderLayer(nn.Module): def __init__(self, config: BioGptConfig): super().__init__() self.embed_dim = config.hidden_size self.self_attn = BioGptAttention( embed_dim=self.embed_dim, num_heads=config.num_attention_heads, dropout=config.attention_probs_dropout_prob, is_decoder=True, ) self.dropout = config.hidden_dropout_prob self.activation_fn = ACT2FN[config.hidden_act] self.activation_dropout = config.activation_dropout self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim) self.fc1 = nn.Linear(self.embed_dim, config.intermediate_size) self.fc2 = nn.Linear(config.intermediate_size, self.embed_dim) self.final_layer_norm = nn.LayerNorm(self.embed_dim) def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, layer_head_mask: Optional[torch.Tensor] = None, past_key_value: Optional[Tuple[torch.Tensor]] = None, output_attentions: Optional[bool] = False, use_cache: Optional[bool] = True, ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]: """ Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` attention_mask (`torch.FloatTensor`): attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size `(encoder_attention_heads,)`. past_key_value (`Tuple(torch.FloatTensor)`): cached past key and value projection states output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). """ residual = hidden_states hidden_states = self.self_attn_layer_norm(hidden_states) # Self Attention # decoder uni-directional self-attention cached key/values tuple is at positions 1,2 self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None # add present self-attn cache to positions 1,2 of present_key_value tuple hidden_states, self_attn_weights, present_key_value = self.self_attn( hidden_states=hidden_states, past_key_value=self_attn_past_key_value, attention_mask=attention_mask, layer_head_mask=layer_head_mask, output_attentions=output_attentions, ) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states # Fully Connected residual = hidden_states hidden_states = self.final_layer_norm(hidden_states) hidden_states = self.fc1(hidden_states) hidden_states = self.activation_fn(hidden_states) hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training) hidden_states = self.fc2(hidden_states) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states outputs = (hidden_states,) if output_attentions: outputs += (self_attn_weights,) if use_cache: outputs += (present_key_value,) return outputs class BioGptPreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = BioGptConfig base_model_prefix = "biogpt" supports_gradient_checkpointing = True def _init_weights(self, module): """Initialize the weights""" if isinstance(module, nn.Linear): # Slightly different from the TF version which uses truncated_normal for initialization # cf https://github.com/pytorch/pytorch/pull/5617 module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) BIOGPT_START_DOCSTRING = r""" This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`~BioGptConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ BIOGPT_INPUTS_DOCSTRING = r""" Args: input_ids (`torch.LongTensor` of shape `({0})`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert *input_ids* indices into associated vectors than the model's internal embedding lookup matrix. past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ @add_start_docstrings( "The bare BioGPT Model transformer outputting raw hidden-states without any specific head on top.", BIOGPT_START_DOCSTRING, ) class BioGptModel(BioGptPreTrainedModel): def __init__(self, config: BioGptConfig): super().__init__(config) self.config = config self.layerdrop = config.layerdrop self.dropout = config.hidden_dropout_prob self.embed_dim = config.hidden_size self.padding_idx = config.pad_token_id self.embed_scale = math.sqrt(config.hidden_size) if config.scale_embedding else 1.0 self.embed_tokens = nn.Embedding(config.vocab_size, self.embed_dim, self.padding_idx) self.embed_positions = BioGptLearnedPositionalEmbedding(config.max_position_embeddings, self.embed_dim) self.layers = nn.ModuleList([BioGptDecoderLayer(config) for _ in range(config.num_hidden_layers)]) self.layer_norm = nn.LayerNorm(self.embed_dim) self.gradient_checkpointing = False # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.embed_tokens def set_input_embeddings(self, value): self.embed_tokens = value @add_start_docstrings_to_model_forward(BIOGPT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=BaseModelOutputWithPastAndCrossAttentions, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, BaseModelOutputWithPastAndCrossAttentions]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict # retrieve input_ids and inputs_embeds if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: input = input_ids input_shape = input.size() elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] input = inputs_embeds[:, :, -1] else: raise ValueError("You have to specify either input_ids or inputs_embeds") # past_key_values_length past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0 if inputs_embeds is None: inputs_embeds = self.embed_tokens(input) * self.embed_scale if attention_mask is None: attention_mask = torch.ones( (inputs_embeds.shape[0], inputs_embeds.shape[1] + past_key_values_length), dtype=torch.bool, device=inputs_embeds.device, ) elif attention_mask.shape[1] != past_key_values_length + input_shape[1]: raise ValueError( f"The provided attention mask has length {attention_mask.shape[1]}, but its length should be " f"{past_key_values_length + input_shape[1]} (sum of the lengths of current and past inputs)" ) # embed positions positions = self.embed_positions(attention_mask, past_key_values_length) attention_mask = _prepare_4d_causal_attention_mask( attention_mask, input_shape, inputs_embeds, past_key_values_length ) hidden_states = inputs_embeds + positions hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) if self.gradient_checkpointing and self.training: if use_cache: logger.warning_once( "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." ) use_cache = False all_hidden_states = () if output_hidden_states else None all_self_attns = () if output_attentions else None all_cross_attentions = None next_decoder_cache = () if use_cache else None for idx, decoder_layer in enumerate(self.layers): # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description) if output_hidden_states: all_hidden_states += (hidden_states,) if self.training: dropout_probability = torch.rand([]) if dropout_probability < self.layerdrop: continue past_key_value = past_key_values[idx] if past_key_values is not None else None if self.gradient_checkpointing and self.training: layer_outputs = self._gradient_checkpointing_func( decoder_layer.__call__, hidden_states, attention_mask, head_mask[idx] if head_mask is not None else None, None, output_attentions, use_cache, ) else: layer_outputs = decoder_layer( hidden_states, attention_mask=attention_mask, layer_head_mask=(head_mask[idx] if head_mask is not None else None), past_key_value=past_key_value, output_attentions=output_attentions, use_cache=use_cache, ) hidden_states = layer_outputs[0] if use_cache: next_decoder_cache += (layer_outputs[2 if output_attentions else 1],) if output_attentions: all_self_attns += (layer_outputs[1],) # add hidden states from the last decoder layer if output_hidden_states: all_hidden_states += (hidden_states,) hidden_states = self.layer_norm(hidden_states) next_cache = next_decoder_cache if use_cache else None if not return_dict: return tuple( v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns, all_cross_attentions] if v is not None ) return BaseModelOutputWithPastAndCrossAttentions( last_hidden_state=hidden_states, past_key_values=next_cache, hidden_states=all_hidden_states, attentions=all_self_attns, cross_attentions=all_cross_attentions, ) @add_start_docstrings( """BioGPT Model with a `language modeling` head on top for CLM fine-tuning.""", BIOGPT_START_DOCSTRING ) class BioGptForCausalLM(BioGptPreTrainedModel): _tied_weights_keys = ["output_projection.weight"] def __init__(self, config): super().__init__(config) self.biogpt = BioGptModel(config) self.output_projection = nn.Linear(config.hidden_size, config.vocab_size, bias=False) # Initialize weights and apply final processing self.post_init() def get_output_embeddings(self): return self.output_projection def set_output_embeddings(self, new_embeddings): self.output_projection = new_embeddings @add_start_docstrings_to_model_forward(BIOGPT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=CausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, CausalLMOutputWithCrossAttentions]: r""" labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set `labels = input_ids` Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100` are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]` """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.biogpt( input_ids, attention_mask=attention_mask, head_mask=head_mask, inputs_embeds=inputs_embeds, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] prediction_scores = self.output_projection(sequence_output) lm_loss = None if labels is not None: # we are doing next-token prediction; shift prediction scores and input ids by one shifted_prediction_scores = prediction_scores[:, :-1, :].contiguous() labels = labels[:, 1:].contiguous() loss_fct = CrossEntropyLoss() lm_loss = loss_fct(shifted_prediction_scores.view(-1, self.config.vocab_size), labels.view(-1)) if not return_dict: output = (prediction_scores,) + outputs[1:] return ((lm_loss,) + output) if lm_loss is not None else output return CausalLMOutputWithCrossAttentions( loss=lm_loss, logits=prediction_scores, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, cross_attentions=outputs.cross_attentions, ) def prepare_inputs_for_generation( self, input_ids, attention_mask, inputs_embeds=None, past_key_values=None, **kwargs ): # only last tokens for inputs_ids if past is defined in kwargs if past_key_values is not None: past_length = past_key_values[0][0].shape[2] # Some generation methods already pass only the last input ID if input_ids.shape[1] > past_length: remove_prefix_length = past_length else: # Default to old behavior: keep only final ID remove_prefix_length = input_ids.shape[1] - 1 input_ids = input_ids[:, remove_prefix_length:] if inputs_embeds is not None and past_key_values is None: model_inputs = {"inputs_embeds": inputs_embeds} else: model_inputs = {"input_ids": input_ids} model_inputs.update( { "attention_mask": attention_mask, "past_key_values": past_key_values, "use_cache": kwargs.get("use_cache"), } ) return model_inputs @staticmethod def _reorder_cache(past_key_values, beam_idx): reordered_past = () for layer_past in past_key_values: reordered_past += ( tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past), ) return reordered_past @add_start_docstrings( """ BioGPT Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks. """, BIOGPT_START_DOCSTRING, ) class BioGptForTokenClassification(BioGptPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.biogpt = BioGptModel(config) if hasattr(config, "classifier_dropout") and config.classifier_dropout is not None: classifier_dropout = config.classifier_dropout else: classifier_dropout = config.hidden_dropout_prob self.dropout = nn.Dropout(classifier_dropout) self.classifier = nn.Linear(config.hidden_size, config.num_labels) self.post_init() @add_start_docstrings_to_model_forward(BIOGPT_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=TokenClassifierOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids: Optional[torch.LongTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.FloatTensor] = None, past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, TokenClassifierOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict transformer_outputs = self.biogpt( input_ids, past_key_values=past_key_values, attention_mask=attention_mask, head_mask=head_mask, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) hidden_states = transformer_outputs[0] hidden_states = self.dropout(hidden_states) logits = self.classifier(hidden_states) loss = None if labels is not None: loss_fct = CrossEntropyLoss() # Only keep active parts of the loss if attention_mask is not None: active_loss = attention_mask.view(-1) == 1 active_logits = logits.view(-1, self.num_labels) active_labels = torch.where( active_loss, labels.view(-1), torch.tensor(loss_fct.ignore_index).type_as(labels) ) loss = loss_fct(active_logits, active_labels) else: loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) if not return_dict: output = (logits,) + transformer_outputs[2:] return ((loss,) + output) if loss is not None else output return TokenClassifierOutput( loss=loss, logits=logits, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions, ) @add_start_docstrings( """ The BioGpt Model transformer with a sequence classification head on top (linear layer). [`BioGptForSequenceClassification`] uses the last token in order to do the classification, as other causal models (e.g. GPT-2) do. Since it does classification on the last token, it is required to know the position of the last token. If a `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in each row of the batch). """, BIOGPT_START_DOCSTRING, ) class BioGptForSequenceClassification(BioGptPreTrainedModel): def __init__(self, config: BioGptConfig): super().__init__(config) self.num_labels = config.num_labels self.biogpt = BioGptModel(config) self.score = nn.Linear(config.hidden_size, self.num_labels, bias=False) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(BIOGPT_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=SequenceClassifierOutputWithPast, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.FloatTensor] = None, past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, SequenceClassifierOutputWithPast]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict transformer_outputs = self.biogpt( input_ids, past_key_values=past_key_values, attention_mask=attention_mask, head_mask=head_mask, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) hidden_states = transformer_outputs[0] logits = self.score(hidden_states) if input_ids is not None: batch_size, sequence_length = input_ids.shape[:2] else: batch_size, sequence_length = inputs_embeds.shape[:2] if self.config.pad_token_id is None: sequence_length = -1 else: if input_ids is not None: sequence_length = (torch.ne(input_ids, self.config.pad_token_id).sum(-1) - 1).to(logits.device) else: sequence_length = -1 logger.warning( f"{self.__class__.__name__} will not detect padding tokens in `inputs_embeds`. Results may be " "unexpected if using padding tokens in conjunction with `inputs_embeds.`" ) pooled_logits = logits[torch.arange(batch_size, device=logits.device), sequence_length] loss = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = "regression" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = "single_label_classification" else: self.config.problem_type = "multi_label_classification" if self.config.problem_type == "regression": loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(pooled_logits.squeeze(), labels.squeeze()) else: loss = loss_fct(pooled_logits, labels) elif self.config.problem_type == "single_label_classification": loss_fct = CrossEntropyLoss() loss = loss_fct(pooled_logits.view(-1, self.num_labels), labels.view(-1)) elif self.config.problem_type == "multi_label_classification": loss_fct = BCEWithLogitsLoss() loss = loss_fct(pooled_logits, labels) if not return_dict: output = (pooled_logits,) + transformer_outputs[1:] return ((loss,) + output) if loss is not None else output return SequenceClassifierOutputWithPast( loss=loss, logits=pooled_logits, past_key_values=transformer_outputs.past_key_values, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions, ) def get_input_embeddings(self): return self.biogpt.embed_tokens def set_input_embeddings(self, value): self.biogpt.embed_tokens = value
0
hf_public_repos/transformers/src/transformers/models
hf_public_repos/transformers/src/transformers/models/conditional_detr/configuration_conditional_detr.py
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Conditional DETR model configuration""" from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ..auto import CONFIG_MAPPING logger = logging.get_logger(__name__) CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP = { "microsoft/conditional-detr-resnet-50": ( "https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json" ), } class ConditionalDetrConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`ConditionalDetrModel`]. It is used to instantiate a Conditional DETR model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the Conditional DETR [microsoft/conditional-detr-resnet-50](https://huggingface.co/microsoft/conditional-detr-resnet-50) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: use_timm_backbone (`bool`, *optional*, defaults to `True`): Whether or not to use the `timm` library for the backbone. If set to `False`, will use the [`AutoBackbone`] API. backbone_config (`PretrainedConfig` or `dict`, *optional*): The configuration of the backbone model. Only used in case `use_timm_backbone` is set to `False` in which case it will default to `ResNetConfig()`. num_channels (`int`, *optional*, defaults to 3): The number of input channels. num_queries (`int`, *optional*, defaults to 100): Number of object queries, i.e. detection slots. This is the maximal number of objects [`ConditionalDetrModel`] can detect in a single image. For COCO, we recommend 100 queries. d_model (`int`, *optional*, defaults to 256): Dimension of the layers. encoder_layers (`int`, *optional*, defaults to 6): Number of encoder layers. decoder_layers (`int`, *optional*, defaults to 6): Number of decoder layers. encoder_attention_heads (`int`, *optional*, defaults to 8): Number of attention heads for each attention layer in the Transformer encoder. decoder_attention_heads (`int`, *optional*, defaults to 8): Number of attention heads for each attention layer in the Transformer decoder. decoder_ffn_dim (`int`, *optional*, defaults to 2048): Dimension of the "intermediate" (often named feed-forward) layer in decoder. encoder_ffn_dim (`int`, *optional*, defaults to 2048): Dimension of the "intermediate" (often named feed-forward) layer in decoder. activation_function (`str` or `function`, *optional*, defaults to `"relu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"silu"` and `"gelu_new"` are supported. dropout (`float`, *optional*, defaults to 0.1): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. attention_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. activation_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for activations inside the fully connected layer. init_std (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. init_xavier_std (`float`, *optional*, defaults to 1): The scaling factor used for the Xavier initialization gain in the HM Attention map module. encoder_layerdrop (`float`, *optional*, defaults to 0.0): The LayerDrop probability for the encoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556) for more details. decoder_layerdrop (`float`, *optional*, defaults to 0.0): The LayerDrop probability for the decoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556) for more details. auxiliary_loss (`bool`, *optional*, defaults to `False`): Whether auxiliary decoding losses (loss at each decoder layer) are to be used. position_embedding_type (`str`, *optional*, defaults to `"sine"`): Type of position embeddings to be used on top of the image features. One of `"sine"` or `"learned"`. backbone (`str`, *optional*, defaults to `"resnet50"`): Name of convolutional backbone to use in case `use_timm_backbone` = `True`. Supports any convolutional backbone from the timm package. For a list of all available models, see [this page](https://rwightman.github.io/pytorch-image-models/#load-a-pretrained-model). use_pretrained_backbone (`bool`, *optional*, defaults to `True`): Whether to use pretrained weights for the backbone. Only supported when `use_timm_backbone` = `True`. dilation (`bool`, *optional*, defaults to `False`): Whether to replace stride with dilation in the last convolutional block (DC5). Only supported when `use_timm_backbone` = `True`. class_cost (`float`, *optional*, defaults to 1): Relative weight of the classification error in the Hungarian matching cost. bbox_cost (`float`, *optional*, defaults to 5): Relative weight of the L1 error of the bounding box coordinates in the Hungarian matching cost. giou_cost (`float`, *optional*, defaults to 2): Relative weight of the generalized IoU loss of the bounding box in the Hungarian matching cost. mask_loss_coefficient (`float`, *optional*, defaults to 1): Relative weight of the Focal loss in the panoptic segmentation loss. dice_loss_coefficient (`float`, *optional*, defaults to 1): Relative weight of the DICE/F-1 loss in the panoptic segmentation loss. bbox_loss_coefficient (`float`, *optional*, defaults to 5): Relative weight of the L1 bounding box loss in the object detection loss. giou_loss_coefficient (`float`, *optional*, defaults to 2): Relative weight of the generalized IoU loss in the object detection loss. eos_coefficient (`float`, *optional*, defaults to 0.1): Relative classification weight of the 'no-object' class in the object detection loss. focal_alpha (`float`, *optional*, defaults to 0.25): Alpha parameter in the focal loss. Examples: ```python >>> from transformers import ConditionalDetrConfig, ConditionalDetrModel >>> # Initializing a Conditional DETR microsoft/conditional-detr-resnet-50 style configuration >>> configuration = ConditionalDetrConfig() >>> # Initializing a model (with random weights) from the microsoft/conditional-detr-resnet-50 style configuration >>> model = ConditionalDetrModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "conditional_detr" keys_to_ignore_at_inference = ["past_key_values"] attribute_map = { "hidden_size": "d_model", "num_attention_heads": "encoder_attention_heads", } def __init__( self, use_timm_backbone=True, backbone_config=None, num_channels=3, num_queries=300, encoder_layers=6, encoder_ffn_dim=2048, encoder_attention_heads=8, decoder_layers=6, decoder_ffn_dim=2048, decoder_attention_heads=8, encoder_layerdrop=0.0, decoder_layerdrop=0.0, is_encoder_decoder=True, activation_function="relu", d_model=256, dropout=0.1, attention_dropout=0.0, activation_dropout=0.0, init_std=0.02, init_xavier_std=1.0, auxiliary_loss=False, position_embedding_type="sine", backbone="resnet50", use_pretrained_backbone=True, dilation=False, class_cost=2, bbox_cost=5, giou_cost=2, mask_loss_coefficient=1, dice_loss_coefficient=1, cls_loss_coefficient=2, bbox_loss_coefficient=5, giou_loss_coefficient=2, focal_alpha=0.25, **kwargs, ): if backbone_config is not None and use_timm_backbone: raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`.") if not use_timm_backbone: if backbone_config is None: logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.") backbone_config = CONFIG_MAPPING["resnet"](out_features=["stage4"]) elif isinstance(backbone_config, dict): backbone_model_type = backbone_config.get("model_type") config_class = CONFIG_MAPPING[backbone_model_type] backbone_config = config_class.from_dict(backbone_config) self.use_timm_backbone = use_timm_backbone self.backbone_config = backbone_config self.num_channels = num_channels self.num_queries = num_queries self.d_model = d_model self.encoder_ffn_dim = encoder_ffn_dim self.encoder_layers = encoder_layers self.encoder_attention_heads = encoder_attention_heads self.decoder_ffn_dim = decoder_ffn_dim self.decoder_layers = decoder_layers self.decoder_attention_heads = decoder_attention_heads self.dropout = dropout self.attention_dropout = attention_dropout self.activation_dropout = activation_dropout self.activation_function = activation_function self.init_std = init_std self.init_xavier_std = init_xavier_std self.encoder_layerdrop = encoder_layerdrop self.decoder_layerdrop = decoder_layerdrop self.num_hidden_layers = encoder_layers self.auxiliary_loss = auxiliary_loss self.position_embedding_type = position_embedding_type self.backbone = backbone self.use_pretrained_backbone = use_pretrained_backbone self.dilation = dilation # Hungarian matcher self.class_cost = class_cost self.bbox_cost = bbox_cost self.giou_cost = giou_cost # Loss coefficients self.mask_loss_coefficient = mask_loss_coefficient self.dice_loss_coefficient = dice_loss_coefficient self.cls_loss_coefficient = cls_loss_coefficient self.bbox_loss_coefficient = bbox_loss_coefficient self.giou_loss_coefficient = giou_loss_coefficient self.focal_alpha = focal_alpha super().__init__(is_encoder_decoder=is_encoder_decoder, **kwargs) @property def num_attention_heads(self) -> int: return self.encoder_attention_heads @property def hidden_size(self) -> int: return self.d_model class ConditionalDetrOnnxConfig(OnnxConfig): torch_onnx_minimum_version = version.parse("1.11") @property def inputs(self) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), ("pixel_mask", {0: "batch"}), ] ) @property def atol_for_validation(self) -> float: return 1e-5 @property def default_onnx_opset(self) -> int: return 12
0
hf_public_repos/transformers/src/transformers/models
hf_public_repos/transformers/src/transformers/models/conditional_detr/feature_extraction_conditional_detr.py
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Feature extractor class for Conditional DETR.""" import warnings from ...image_transforms import rgb_to_id as _rgb_to_id from ...utils import logging from .image_processing_conditional_detr import ConditionalDetrImageProcessor logger = logging.get_logger(__name__) def rgb_to_id(x): warnings.warn( "rgb_to_id has moved and will not be importable from this module from v5. " "Please import from transformers.image_transforms instead.", FutureWarning, ) return _rgb_to_id(x) class ConditionalDetrFeatureExtractor(ConditionalDetrImageProcessor): def __init__(self, *args, **kwargs) -> None: warnings.warn( "The class ConditionalDetrFeatureExtractor is deprecated and will be removed in version 5 of Transformers." " Please use ConditionalDetrImageProcessor instead.", FutureWarning, ) super().__init__(*args, **kwargs)
0
hf_public_repos/transformers/src/transformers/models
hf_public_repos/transformers/src/transformers/models/conditional_detr/convert_conditional_detr_original_pytorch_checkpoint_to_pytorch.py
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Convert Conditional DETR checkpoints.""" import argparse import json from collections import OrderedDict from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( ConditionalDetrConfig, ConditionalDetrForObjectDetection, ConditionalDetrForSegmentation, ConditionalDetrImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() logger = logging.get_logger(__name__) # here we list all keys to be renamed (original name on the left, our name on the right) rename_keys = [] for i in range(6): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append( (f"transformer.encoder.layers.{i}.self_attn.out_proj.weight", f"encoder.layers.{i}.self_attn.out_proj.weight") ) rename_keys.append( (f"transformer.encoder.layers.{i}.self_attn.out_proj.bias", f"encoder.layers.{i}.self_attn.out_proj.bias") ) rename_keys.append((f"transformer.encoder.layers.{i}.linear1.weight", f"encoder.layers.{i}.fc1.weight")) rename_keys.append((f"transformer.encoder.layers.{i}.linear1.bias", f"encoder.layers.{i}.fc1.bias")) rename_keys.append((f"transformer.encoder.layers.{i}.linear2.weight", f"encoder.layers.{i}.fc2.weight")) rename_keys.append((f"transformer.encoder.layers.{i}.linear2.bias", f"encoder.layers.{i}.fc2.bias")) rename_keys.append( (f"transformer.encoder.layers.{i}.norm1.weight", f"encoder.layers.{i}.self_attn_layer_norm.weight") ) rename_keys.append((f"transformer.encoder.layers.{i}.norm1.bias", f"encoder.layers.{i}.self_attn_layer_norm.bias")) rename_keys.append((f"transformer.encoder.layers.{i}.norm2.weight", f"encoder.layers.{i}.final_layer_norm.weight")) rename_keys.append((f"transformer.encoder.layers.{i}.norm2.bias", f"encoder.layers.{i}.final_layer_norm.bias")) # decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms rename_keys.append( (f"transformer.decoder.layers.{i}.self_attn.out_proj.weight", f"decoder.layers.{i}.self_attn.out_proj.weight") ) rename_keys.append( (f"transformer.decoder.layers.{i}.self_attn.out_proj.bias", f"decoder.layers.{i}.self_attn.out_proj.bias") ) rename_keys.append( ( f"transformer.decoder.layers.{i}.cross_attn.out_proj.weight", f"decoder.layers.{i}.encoder_attn.out_proj.weight", ) ) rename_keys.append( ( f"transformer.decoder.layers.{i}.cross_attn.out_proj.bias", f"decoder.layers.{i}.encoder_attn.out_proj.bias", ) ) rename_keys.append((f"transformer.decoder.layers.{i}.linear1.weight", f"decoder.layers.{i}.fc1.weight")) rename_keys.append((f"transformer.decoder.layers.{i}.linear1.bias", f"decoder.layers.{i}.fc1.bias")) rename_keys.append((f"transformer.decoder.layers.{i}.linear2.weight", f"decoder.layers.{i}.fc2.weight")) rename_keys.append((f"transformer.decoder.layers.{i}.linear2.bias", f"decoder.layers.{i}.fc2.bias")) rename_keys.append( (f"transformer.decoder.layers.{i}.norm1.weight", f"decoder.layers.{i}.self_attn_layer_norm.weight") ) rename_keys.append((f"transformer.decoder.layers.{i}.norm1.bias", f"decoder.layers.{i}.self_attn_layer_norm.bias")) rename_keys.append( (f"transformer.decoder.layers.{i}.norm2.weight", f"decoder.layers.{i}.encoder_attn_layer_norm.weight") ) rename_keys.append( (f"transformer.decoder.layers.{i}.norm2.bias", f"decoder.layers.{i}.encoder_attn_layer_norm.bias") ) rename_keys.append((f"transformer.decoder.layers.{i}.norm3.weight", f"decoder.layers.{i}.final_layer_norm.weight")) rename_keys.append((f"transformer.decoder.layers.{i}.norm3.bias", f"decoder.layers.{i}.final_layer_norm.bias")) # q, k, v projections in self/cross-attention in decoder for conditional DETR rename_keys.append( (f"transformer.decoder.layers.{i}.sa_qcontent_proj.weight", f"decoder.layers.{i}.sa_qcontent_proj.weight") ) rename_keys.append( (f"transformer.decoder.layers.{i}.sa_kcontent_proj.weight", f"decoder.layers.{i}.sa_kcontent_proj.weight") ) rename_keys.append( (f"transformer.decoder.layers.{i}.sa_qpos_proj.weight", f"decoder.layers.{i}.sa_qpos_proj.weight") ) rename_keys.append( (f"transformer.decoder.layers.{i}.sa_kpos_proj.weight", f"decoder.layers.{i}.sa_kpos_proj.weight") ) rename_keys.append((f"transformer.decoder.layers.{i}.sa_v_proj.weight", f"decoder.layers.{i}.sa_v_proj.weight")) rename_keys.append( (f"transformer.decoder.layers.{i}.ca_qcontent_proj.weight", f"decoder.layers.{i}.ca_qcontent_proj.weight") ) # rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight")) rename_keys.append( (f"transformer.decoder.layers.{i}.ca_kcontent_proj.weight", f"decoder.layers.{i}.ca_kcontent_proj.weight") ) rename_keys.append( (f"transformer.decoder.layers.{i}.ca_kpos_proj.weight", f"decoder.layers.{i}.ca_kpos_proj.weight") ) rename_keys.append((f"transformer.decoder.layers.{i}.ca_v_proj.weight", f"decoder.layers.{i}.ca_v_proj.weight")) rename_keys.append( (f"transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight", f"decoder.layers.{i}.ca_qpos_sine_proj.weight") ) rename_keys.append( (f"transformer.decoder.layers.{i}.sa_qcontent_proj.bias", f"decoder.layers.{i}.sa_qcontent_proj.bias") ) rename_keys.append( (f"transformer.decoder.layers.{i}.sa_kcontent_proj.bias", f"decoder.layers.{i}.sa_kcontent_proj.bias") ) rename_keys.append((f"transformer.decoder.layers.{i}.sa_qpos_proj.bias", f"decoder.layers.{i}.sa_qpos_proj.bias")) rename_keys.append((f"transformer.decoder.layers.{i}.sa_kpos_proj.bias", f"decoder.layers.{i}.sa_kpos_proj.bias")) rename_keys.append((f"transformer.decoder.layers.{i}.sa_v_proj.bias", f"decoder.layers.{i}.sa_v_proj.bias")) rename_keys.append( (f"transformer.decoder.layers.{i}.ca_qcontent_proj.bias", f"decoder.layers.{i}.ca_qcontent_proj.bias") ) # rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias")) rename_keys.append( (f"transformer.decoder.layers.{i}.ca_kcontent_proj.bias", f"decoder.layers.{i}.ca_kcontent_proj.bias") ) rename_keys.append((f"transformer.decoder.layers.{i}.ca_kpos_proj.bias", f"decoder.layers.{i}.ca_kpos_proj.bias")) rename_keys.append((f"transformer.decoder.layers.{i}.ca_v_proj.bias", f"decoder.layers.{i}.ca_v_proj.bias")) rename_keys.append( (f"transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias", f"decoder.layers.{i}.ca_qpos_sine_proj.bias") ) # convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads # for conditional DETR, also convert reference point head and query scale MLP rename_keys.extend( [ ("input_proj.weight", "input_projection.weight"), ("input_proj.bias", "input_projection.bias"), ("query_embed.weight", "query_position_embeddings.weight"), ("transformer.decoder.norm.weight", "decoder.layernorm.weight"), ("transformer.decoder.norm.bias", "decoder.layernorm.bias"), ("class_embed.weight", "class_labels_classifier.weight"), ("class_embed.bias", "class_labels_classifier.bias"), ("bbox_embed.layers.0.weight", "bbox_predictor.layers.0.weight"), ("bbox_embed.layers.0.bias", "bbox_predictor.layers.0.bias"), ("bbox_embed.layers.1.weight", "bbox_predictor.layers.1.weight"), ("bbox_embed.layers.1.bias", "bbox_predictor.layers.1.bias"), ("bbox_embed.layers.2.weight", "bbox_predictor.layers.2.weight"), ("bbox_embed.layers.2.bias", "bbox_predictor.layers.2.bias"), ("transformer.decoder.ref_point_head.layers.0.weight", "decoder.ref_point_head.layers.0.weight"), ("transformer.decoder.ref_point_head.layers.0.bias", "decoder.ref_point_head.layers.0.bias"), ("transformer.decoder.ref_point_head.layers.1.weight", "decoder.ref_point_head.layers.1.weight"), ("transformer.decoder.ref_point_head.layers.1.bias", "decoder.ref_point_head.layers.1.bias"), ("transformer.decoder.query_scale.layers.0.weight", "decoder.query_scale.layers.0.weight"), ("transformer.decoder.query_scale.layers.0.bias", "decoder.query_scale.layers.0.bias"), ("transformer.decoder.query_scale.layers.1.weight", "decoder.query_scale.layers.1.weight"), ("transformer.decoder.query_scale.layers.1.bias", "decoder.query_scale.layers.1.bias"), ("transformer.decoder.layers.0.ca_qpos_proj.weight", "decoder.layers.0.ca_qpos_proj.weight"), ("transformer.decoder.layers.0.ca_qpos_proj.bias", "decoder.layers.0.ca_qpos_proj.bias"), ] ) def rename_key(state_dict, old, new): val = state_dict.pop(old) state_dict[new] = val def rename_backbone_keys(state_dict): new_state_dict = OrderedDict() for key, value in state_dict.items(): if "backbone.0.body" in key: new_key = key.replace("backbone.0.body", "backbone.conv_encoder.model") new_state_dict[new_key] = value else: new_state_dict[key] = value return new_state_dict def read_in_q_k_v(state_dict, is_panoptic=False): prefix = "" if is_panoptic: prefix = "conditional_detr." # first: transformer encoder for i in range(6): # read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias) in_proj_weight = state_dict.pop(f"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight") in_proj_bias = state_dict.pop(f"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias") # next, add query, keys and values (in that order) to the state dict state_dict[f"encoder.layers.{i}.self_attn.q_proj.weight"] = in_proj_weight[:256, :] state_dict[f"encoder.layers.{i}.self_attn.q_proj.bias"] = in_proj_bias[:256] state_dict[f"encoder.layers.{i}.self_attn.k_proj.weight"] = in_proj_weight[256:512, :] state_dict[f"encoder.layers.{i}.self_attn.k_proj.bias"] = in_proj_bias[256:512] state_dict[f"encoder.layers.{i}.self_attn.v_proj.weight"] = in_proj_weight[-256:, :] state_dict[f"encoder.layers.{i}.self_attn.v_proj.bias"] = in_proj_bias[-256:] # We will verify our results on an image of cute cats def prepare_img(): url = "http://images.cocodataset.org/val2017/000000039769.jpg" im = Image.open(requests.get(url, stream=True).raw) return im @torch.no_grad() def convert_conditional_detr_checkpoint(model_name, pytorch_dump_folder_path): """ Copy/paste/tweak model's weights to our CONDITIONAL_DETR structure. """ # load default config config = ConditionalDetrConfig() # set backbone and dilation attributes if "resnet101" in model_name: config.backbone = "resnet101" if "dc5" in model_name: config.dilation = True is_panoptic = "panoptic" in model_name if is_panoptic: config.num_labels = 250 else: config.num_labels = 91 repo_id = "huggingface/label-files" filename = "coco-detection-id2label.json" id2label = json.load(open(hf_hub_download(repo_id, filename, repo_type="dataset"), "r")) id2label = {int(k): v for k, v in id2label.items()} config.id2label = id2label config.label2id = {v: k for k, v in id2label.items()} # load image processor format = "coco_panoptic" if is_panoptic else "coco_detection" image_processor = ConditionalDetrImageProcessor(format=format) # prepare image img = prepare_img() encoding = image_processor(images=img, return_tensors="pt") pixel_values = encoding["pixel_values"] logger.info(f"Converting model {model_name}...") # load original model from torch hub conditional_detr = torch.hub.load("DeppMeng/ConditionalDETR", model_name, pretrained=True).eval() state_dict = conditional_detr.state_dict() # rename keys for src, dest in rename_keys: if is_panoptic: src = "conditional_detr." + src rename_key(state_dict, src, dest) state_dict = rename_backbone_keys(state_dict) # query, key and value matrices need special treatment read_in_q_k_v(state_dict, is_panoptic=is_panoptic) # important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them prefix = "conditional_detr.model." if is_panoptic else "model." for key in state_dict.copy().keys(): if is_panoptic: if ( key.startswith("conditional_detr") and not key.startswith("class_labels_classifier") and not key.startswith("bbox_predictor") ): val = state_dict.pop(key) state_dict["conditional_detr.model" + key[4:]] = val elif "class_labels_classifier" in key or "bbox_predictor" in key: val = state_dict.pop(key) state_dict["conditional_detr." + key] = val elif key.startswith("bbox_attention") or key.startswith("mask_head"): continue else: val = state_dict.pop(key) state_dict[prefix + key] = val else: if not key.startswith("class_labels_classifier") and not key.startswith("bbox_predictor"): val = state_dict.pop(key) state_dict[prefix + key] = val # finally, create HuggingFace model and load state dict model = ConditionalDetrForSegmentation(config) if is_panoptic else ConditionalDetrForObjectDetection(config) model.load_state_dict(state_dict) model.eval() model.push_to_hub(repo_id=model_name, organization="DepuMeng", commit_message="Add model") # verify our conversion original_outputs = conditional_detr(pixel_values) outputs = model(pixel_values) assert torch.allclose(outputs.logits, original_outputs["pred_logits"], atol=1e-4) assert torch.allclose(outputs.pred_boxes, original_outputs["pred_boxes"], atol=1e-4) if is_panoptic: assert torch.allclose(outputs.pred_masks, original_outputs["pred_masks"], atol=1e-4) # Save model and image processor logger.info(f"Saving PyTorch model and image processor to {pytorch_dump_folder_path}...") Path(pytorch_dump_folder_path).mkdir(exist_ok=True) model.save_pretrained(pytorch_dump_folder_path) image_processor.save_pretrained(pytorch_dump_folder_path) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument( "--model_name", default="conditional_detr_resnet50", type=str, help="Name of the CONDITIONAL_DETR model you'd like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model." ) args = parser.parse_args() convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
0
hf_public_repos/transformers/src/transformers/models
hf_public_repos/transformers/src/transformers/models/conditional_detr/modeling_conditional_detr.py
# coding=utf-8 # Copyright 2022 Microsoft Research Asia and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ PyTorch Conditional DETR model.""" import math from dataclasses import dataclass from typing import Dict, List, Optional, Tuple, Union import torch from torch import Tensor, nn from ...activations import ACT2FN from ...modeling_attn_mask_utils import _prepare_4d_attention_mask from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithCrossAttentions, Seq2SeqModelOutput from ...modeling_utils import PreTrainedModel from ...utils import ( ModelOutput, add_start_docstrings, add_start_docstrings_to_model_forward, is_scipy_available, is_timm_available, is_vision_available, logging, replace_return_docstrings, requires_backends, ) from ..auto import AutoBackbone from .configuration_conditional_detr import ConditionalDetrConfig if is_scipy_available(): from scipy.optimize import linear_sum_assignment if is_timm_available(): from timm import create_model if is_vision_available(): from ...image_transforms import center_to_corners_format logger = logging.get_logger(__name__) _CONFIG_FOR_DOC = "ConditionalDetrConfig" _CHECKPOINT_FOR_DOC = "microsoft/conditional-detr-resnet-50" CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST = [ "microsoft/conditional-detr-resnet-50", # See all Conditional DETR models at https://huggingface.co/models?filter=conditional_detr ] @dataclass class ConditionalDetrDecoderOutput(BaseModelOutputWithCrossAttentions): """ Base class for outputs of the Conditional DETR decoder. This class adds one attribute to BaseModelOutputWithCrossAttentions, namely an optional stack of intermediate decoder activations, i.e. the output of each decoder layer, each of them gone through a layernorm. This is useful when training the model with auxiliary decoding losses. Args: last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the model. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` and `config.add_cross_attention=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads. intermediate_hidden_states (`torch.FloatTensor` of shape `(config.decoder_layers, batch_size, num_queries, hidden_size)`, *optional*, returned when `config.auxiliary_loss=True`): Intermediate decoder activations, i.e. the output of each decoder layer, each of them gone through a layernorm. """ intermediate_hidden_states: Optional[torch.FloatTensor] = None reference_points: Optional[Tuple[torch.FloatTensor]] = None @dataclass class ConditionalDetrModelOutput(Seq2SeqModelOutput): """ Base class for outputs of the Conditional DETR encoder-decoder model. This class adds one attribute to Seq2SeqModelOutput, namely an optional stack of intermediate decoder activations, i.e. the output of each decoder layer, each of them gone through a layernorm. This is useful when training the model with auxiliary decoding losses. Args: last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the decoder of the model. decoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the decoder at the output of each layer plus the initial embedding outputs. decoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads. cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads. encoder_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder of the model. encoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the encoder at the output of each layer plus the initial embedding outputs. encoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads. intermediate_hidden_states (`torch.FloatTensor` of shape `(config.decoder_layers, batch_size, sequence_length, hidden_size)`, *optional*, returned when `config.auxiliary_loss=True`): Intermediate decoder activations, i.e. the output of each decoder layer, each of them gone through a layernorm. """ intermediate_hidden_states: Optional[torch.FloatTensor] = None reference_points: Optional[Tuple[torch.FloatTensor]] = None @dataclass # Copied from transformers.models.detr.modeling_detr.DetrObjectDetectionOutput with Detr->ConditionalDetr class ConditionalDetrObjectDetectionOutput(ModelOutput): """ Output type of [`ConditionalDetrForObjectDetection`]. Args: loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` are provided)): Total loss as a linear combination of a negative log-likehood (cross-entropy) for class prediction and a bounding box loss. The latter is defined as a linear combination of the L1 loss and the generalized scale-invariant IoU loss. loss_dict (`Dict`, *optional*): A dictionary containing the individual losses. Useful for logging. logits (`torch.FloatTensor` of shape `(batch_size, num_queries, num_classes + 1)`): Classification logits (including no-object) for all queries. pred_boxes (`torch.FloatTensor` of shape `(batch_size, num_queries, 4)`): Normalized boxes coordinates for all queries, represented as (center_x, center_y, width, height). These values are normalized in [0, 1], relative to the size of each individual image in the batch (disregarding possible padding). You can use [`~ConditionalDetrImageProcessor.post_process_object_detection`] to retrieve the unnormalized bounding boxes. auxiliary_outputs (`list[Dict]`, *optional*): Optional, only returned when auxilary losses are activated (i.e. `config.auxiliary_loss` is set to `True`) and labels are provided. It is a list of dictionaries containing the two above keys (`logits` and `pred_boxes`) for each decoder layer. last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the decoder of the model. decoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the decoder at the output of each layer plus the initial embedding outputs. decoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads. cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads. encoder_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder of the model. encoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the encoder at the output of each layer plus the initial embedding outputs. encoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: Optional[torch.FloatTensor] = None loss_dict: Optional[Dict] = None logits: torch.FloatTensor = None pred_boxes: torch.FloatTensor = None auxiliary_outputs: Optional[List[Dict]] = None last_hidden_state: Optional[torch.FloatTensor] = None decoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None decoder_attentions: Optional[Tuple[torch.FloatTensor]] = None cross_attentions: Optional[Tuple[torch.FloatTensor]] = None encoder_last_hidden_state: Optional[torch.FloatTensor] = None encoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None encoder_attentions: Optional[Tuple[torch.FloatTensor]] = None @dataclass # Copied from transformers.models.detr.modeling_detr.DetrSegmentationOutput with Detr->ConditionalDetr class ConditionalDetrSegmentationOutput(ModelOutput): """ Output type of [`ConditionalDetrForSegmentation`]. Args: loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` are provided)): Total loss as a linear combination of a negative log-likehood (cross-entropy) for class prediction and a bounding box loss. The latter is defined as a linear combination of the L1 loss and the generalized scale-invariant IoU loss. loss_dict (`Dict`, *optional*): A dictionary containing the individual losses. Useful for logging. logits (`torch.FloatTensor` of shape `(batch_size, num_queries, num_classes + 1)`): Classification logits (including no-object) for all queries. pred_boxes (`torch.FloatTensor` of shape `(batch_size, num_queries, 4)`): Normalized boxes coordinates for all queries, represented as (center_x, center_y, width, height). These values are normalized in [0, 1], relative to the size of each individual image in the batch (disregarding possible padding). You can use [`~ConditionalDetrImageProcessor.post_process_object_detection`] to retrieve the unnormalized bounding boxes. pred_masks (`torch.FloatTensor` of shape `(batch_size, num_queries, height/4, width/4)`): Segmentation masks logits for all queries. See also [`~ConditionalDetrImageProcessor.post_process_semantic_segmentation`] or [`~ConditionalDetrImageProcessor.post_process_instance_segmentation`] [`~ConditionalDetrImageProcessor.post_process_panoptic_segmentation`] to evaluate semantic, instance and panoptic segmentation masks respectively. auxiliary_outputs (`list[Dict]`, *optional*): Optional, only returned when auxiliary losses are activated (i.e. `config.auxiliary_loss` is set to `True`) and labels are provided. It is a list of dictionaries containing the two above keys (`logits` and `pred_boxes`) for each decoder layer. last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the decoder of the model. decoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the decoder at the output of each layer plus the initial embedding outputs. decoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads. cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads. encoder_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder of the model. encoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the encoder at the output of each layer plus the initial embedding outputs. encoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: Optional[torch.FloatTensor] = None loss_dict: Optional[Dict] = None logits: torch.FloatTensor = None pred_boxes: torch.FloatTensor = None pred_masks: torch.FloatTensor = None auxiliary_outputs: Optional[List[Dict]] = None last_hidden_state: Optional[torch.FloatTensor] = None decoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None decoder_attentions: Optional[Tuple[torch.FloatTensor]] = None cross_attentions: Optional[Tuple[torch.FloatTensor]] = None encoder_last_hidden_state: Optional[torch.FloatTensor] = None encoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None encoder_attentions: Optional[Tuple[torch.FloatTensor]] = None # Copied from transformers.models.detr.modeling_detr.DetrFrozenBatchNorm2d with Detr->ConditionalDetr class ConditionalDetrFrozenBatchNorm2d(nn.Module): """ BatchNorm2d where the batch statistics and the affine parameters are fixed. Copy-paste from torchvision.misc.ops with added eps before rqsrt, without which any other models than torchvision.models.resnet[18,34,50,101] produce nans. """ def __init__(self, n): super().__init__() self.register_buffer("weight", torch.ones(n)) self.register_buffer("bias", torch.zeros(n)) self.register_buffer("running_mean", torch.zeros(n)) self.register_buffer("running_var", torch.ones(n)) def _load_from_state_dict( self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs ): num_batches_tracked_key = prefix + "num_batches_tracked" if num_batches_tracked_key in state_dict: del state_dict[num_batches_tracked_key] super()._load_from_state_dict( state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs ) def forward(self, x): # move reshapes to the beginning # to make it user-friendly weight = self.weight.reshape(1, -1, 1, 1) bias = self.bias.reshape(1, -1, 1, 1) running_var = self.running_var.reshape(1, -1, 1, 1) running_mean = self.running_mean.reshape(1, -1, 1, 1) epsilon = 1e-5 scale = weight * (running_var + epsilon).rsqrt() bias = bias - running_mean * scale return x * scale + bias # Copied from transformers.models.detr.modeling_detr.replace_batch_norm with Detr->ConditionalDetr def replace_batch_norm(model): r""" Recursively replace all `torch.nn.BatchNorm2d` with `ConditionalDetrFrozenBatchNorm2d`. Args: model (torch.nn.Module): input model """ for name, module in model.named_children(): if isinstance(module, nn.BatchNorm2d): new_module = ConditionalDetrFrozenBatchNorm2d(module.num_features) if not module.weight.device == torch.device("meta"): new_module.weight.data.copy_(module.weight) new_module.bias.data.copy_(module.bias) new_module.running_mean.data.copy_(module.running_mean) new_module.running_var.data.copy_(module.running_var) model._modules[name] = new_module if len(list(module.children())) > 0: replace_batch_norm(module) # Copied from transformers.models.detr.modeling_detr.DetrConvEncoder class ConditionalDetrConvEncoder(nn.Module): """ Convolutional backbone, using either the AutoBackbone API or one from the timm library. nn.BatchNorm2d layers are replaced by DetrFrozenBatchNorm2d as defined above. """ def __init__(self, config): super().__init__() self.config = config if config.use_timm_backbone: requires_backends(self, ["timm"]) kwargs = {} if config.dilation: kwargs["output_stride"] = 16 backbone = create_model( config.backbone, pretrained=config.use_pretrained_backbone, features_only=True, out_indices=(1, 2, 3, 4), in_chans=config.num_channels, **kwargs, ) else: backbone = AutoBackbone.from_config(config.backbone_config) # replace batch norm by frozen batch norm with torch.no_grad(): replace_batch_norm(backbone) self.model = backbone self.intermediate_channel_sizes = ( self.model.feature_info.channels() if config.use_timm_backbone else self.model.channels ) backbone_model_type = config.backbone if config.use_timm_backbone else config.backbone_config.model_type if "resnet" in backbone_model_type: for name, parameter in self.model.named_parameters(): if config.use_timm_backbone: if "layer2" not in name and "layer3" not in name and "layer4" not in name: parameter.requires_grad_(False) else: if "stage.1" not in name and "stage.2" not in name and "stage.3" not in name: parameter.requires_grad_(False) def forward(self, pixel_values: torch.Tensor, pixel_mask: torch.Tensor): # send pixel_values through the model to get list of feature maps features = self.model(pixel_values) if self.config.use_timm_backbone else self.model(pixel_values).feature_maps out = [] for feature_map in features: # downsample pixel_mask to match shape of corresponding feature_map mask = nn.functional.interpolate(pixel_mask[None].float(), size=feature_map.shape[-2:]).to(torch.bool)[0] out.append((feature_map, mask)) return out # Copied from transformers.models.detr.modeling_detr.DetrConvModel with Detr->ConditionalDetr class ConditionalDetrConvModel(nn.Module): """ This module adds 2D position embeddings to all intermediate feature maps of the convolutional encoder. """ def __init__(self, conv_encoder, position_embedding): super().__init__() self.conv_encoder = conv_encoder self.position_embedding = position_embedding def forward(self, pixel_values, pixel_mask): # send pixel_values and pixel_mask through backbone to get list of (feature_map, pixel_mask) tuples out = self.conv_encoder(pixel_values, pixel_mask) pos = [] for feature_map, mask in out: # position encoding pos.append(self.position_embedding(feature_map, mask).to(feature_map.dtype)) return out, pos class ConditionalDetrSinePositionEmbedding(nn.Module): """ This is a more standard version of the position embedding, very similar to the one used by the Attention is all you need paper, generalized to work on images. """ def __init__(self, embedding_dim=64, temperature=10000, normalize=False, scale=None): super().__init__() self.embedding_dim = embedding_dim self.temperature = temperature self.normalize = normalize if scale is not None and normalize is False: raise ValueError("normalize should be True if scale is passed") if scale is None: scale = 2 * math.pi self.scale = scale def forward(self, pixel_values, pixel_mask): if pixel_mask is None: raise ValueError("No pixel mask provided") y_embed = pixel_mask.cumsum(1, dtype=torch.float32) x_embed = pixel_mask.cumsum(2, dtype=torch.float32) if self.normalize: y_embed = y_embed / (y_embed[:, -1:, :] + 1e-6) * self.scale x_embed = x_embed / (x_embed[:, :, -1:] + 1e-6) * self.scale dim_t = torch.arange(self.embedding_dim, dtype=torch.float32, device=pixel_values.device) dim_t = self.temperature ** (2 * torch.div(dim_t, 2, rounding_mode="floor") / self.embedding_dim) pos_x = x_embed[:, :, :, None] / dim_t pos_y = y_embed[:, :, :, None] / dim_t pos_x = torch.stack((pos_x[:, :, :, 0::2].sin(), pos_x[:, :, :, 1::2].cos()), dim=4).flatten(3) pos_y = torch.stack((pos_y[:, :, :, 0::2].sin(), pos_y[:, :, :, 1::2].cos()), dim=4).flatten(3) pos = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2) return pos # Copied from transformers.models.detr.modeling_detr.DetrLearnedPositionEmbedding with Detr->ConditionalDetr class ConditionalDetrLearnedPositionEmbedding(nn.Module): """ This module learns positional embeddings up to a fixed maximum size. """ def __init__(self, embedding_dim=256): super().__init__() self.row_embeddings = nn.Embedding(50, embedding_dim) self.column_embeddings = nn.Embedding(50, embedding_dim) def forward(self, pixel_values, pixel_mask=None): height, width = pixel_values.shape[-2:] width_values = torch.arange(width, device=pixel_values.device) height_values = torch.arange(height, device=pixel_values.device) x_emb = self.column_embeddings(width_values) y_emb = self.row_embeddings(height_values) pos = torch.cat([x_emb.unsqueeze(0).repeat(height, 1, 1), y_emb.unsqueeze(1).repeat(1, width, 1)], dim=-1) pos = pos.permute(2, 0, 1) pos = pos.unsqueeze(0) pos = pos.repeat(pixel_values.shape[0], 1, 1, 1) return pos # Copied from transformers.models.detr.modeling_detr.build_position_encoding with Detr->ConditionalDetr def build_position_encoding(config): n_steps = config.d_model // 2 if config.position_embedding_type == "sine": # TODO find a better way of exposing other arguments position_embedding = ConditionalDetrSinePositionEmbedding(n_steps, normalize=True) elif config.position_embedding_type == "learned": position_embedding = ConditionalDetrLearnedPositionEmbedding(n_steps) else: raise ValueError(f"Not supported {config.position_embedding_type}") return position_embedding # function to generate sine positional embedding for 2d coordinates def gen_sine_position_embeddings(pos_tensor, d_model): scale = 2 * math.pi dim = d_model // 2 dim_t = torch.arange(dim, dtype=torch.float32, device=pos_tensor.device) dim_t = 10000 ** (2 * torch.div(dim_t, 2, rounding_mode="floor") / dim) x_embed = pos_tensor[:, :, 0] * scale y_embed = pos_tensor[:, :, 1] * scale pos_x = x_embed[:, :, None] / dim_t pos_y = y_embed[:, :, None] / dim_t pos_x = torch.stack((pos_x[:, :, 0::2].sin(), pos_x[:, :, 1::2].cos()), dim=3).flatten(2) pos_y = torch.stack((pos_y[:, :, 0::2].sin(), pos_y[:, :, 1::2].cos()), dim=3).flatten(2) pos = torch.cat((pos_y, pos_x), dim=2) return pos def inverse_sigmoid(x, eps=1e-5): x = x.clamp(min=0, max=1) x1 = x.clamp(min=eps) x2 = (1 - x).clamp(min=eps) return torch.log(x1 / x2) # Copied from transformers.models.detr.modeling_detr.DetrAttention class DetrAttention(nn.Module): """ Multi-headed attention from 'Attention Is All You Need' paper. Here, we add position embeddings to the queries and keys (as explained in the DETR paper). """ def __init__( self, embed_dim: int, num_heads: int, dropout: float = 0.0, bias: bool = True, ): super().__init__() self.embed_dim = embed_dim self.num_heads = num_heads self.dropout = dropout self.head_dim = embed_dim // num_heads if self.head_dim * num_heads != self.embed_dim: raise ValueError( f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:" f" {num_heads})." ) self.scaling = self.head_dim**-0.5 self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias) def _shape(self, tensor: torch.Tensor, seq_len: int, batch_size: int): return tensor.view(batch_size, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous() def with_pos_embed(self, tensor: torch.Tensor, object_queries: Optional[Tensor], **kwargs): position_embeddings = kwargs.pop("position_embeddings", None) if kwargs: raise ValueError(f"Unexpected arguments {kwargs.keys()}") if position_embeddings is not None and object_queries is not None: raise ValueError( "Cannot specify both position_embeddings and object_queries. Please use just object_queries" ) if position_embeddings is not None: logger.warning_once( "position_embeddings has been deprecated and will be removed in v4.34. Please use object_queries instead" ) object_queries = position_embeddings return tensor if object_queries is None else tensor + object_queries def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, object_queries: Optional[torch.Tensor] = None, key_value_states: Optional[torch.Tensor] = None, spatial_position_embeddings: Optional[torch.Tensor] = None, output_attentions: bool = False, **kwargs, ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: """Input shape: Batch x Time x Channel""" position_embeddings = kwargs.pop("position_ebmeddings", None) key_value_position_embeddings = kwargs.pop("key_value_position_embeddings", None) if kwargs: raise ValueError(f"Unexpected arguments {kwargs.keys()}") if position_embeddings is not None and object_queries is not None: raise ValueError( "Cannot specify both position_embeddings and object_queries. Please use just object_queries" ) if key_value_position_embeddings is not None and spatial_position_embeddings is not None: raise ValueError( "Cannot specify both key_value_position_embeddings and spatial_position_embeddings. Please use just spatial_position_embeddings" ) if position_embeddings is not None: logger.warning_once( "position_embeddings has been deprecated and will be removed in v4.34. Please use object_queries instead" ) object_queries = position_embeddings if key_value_position_embeddings is not None: logger.warning_once( "key_value_position_embeddings has been deprecated and will be removed in v4.34. Please use spatial_position_embeddings instead" ) spatial_position_embeddings = key_value_position_embeddings # if key_value_states are provided this layer is used as a cross-attention layer # for the decoder is_cross_attention = key_value_states is not None batch_size, target_len, embed_dim = hidden_states.size() # add position embeddings to the hidden states before projecting to queries and keys if object_queries is not None: hidden_states_original = hidden_states hidden_states = self.with_pos_embed(hidden_states, object_queries) # add key-value position embeddings to the key value states if spatial_position_embeddings is not None: key_value_states_original = key_value_states key_value_states = self.with_pos_embed(key_value_states, spatial_position_embeddings) # get query proj query_states = self.q_proj(hidden_states) * self.scaling # get key, value proj if is_cross_attention: # cross_attentions key_states = self._shape(self.k_proj(key_value_states), -1, batch_size) value_states = self._shape(self.v_proj(key_value_states_original), -1, batch_size) else: # self_attention key_states = self._shape(self.k_proj(hidden_states), -1, batch_size) value_states = self._shape(self.v_proj(hidden_states_original), -1, batch_size) proj_shape = (batch_size * self.num_heads, -1, self.head_dim) query_states = self._shape(query_states, target_len, batch_size).view(*proj_shape) key_states = key_states.view(*proj_shape) value_states = value_states.view(*proj_shape) source_len = key_states.size(1) attn_weights = torch.bmm(query_states, key_states.transpose(1, 2)) if attn_weights.size() != (batch_size * self.num_heads, target_len, source_len): raise ValueError( f"Attention weights should be of size {(batch_size * self.num_heads, target_len, source_len)}, but is" f" {attn_weights.size()}" ) if attention_mask is not None: if attention_mask.size() != (batch_size, 1, target_len, source_len): raise ValueError( f"Attention mask should be of size {(batch_size, 1, target_len, source_len)}, but is" f" {attention_mask.size()}" ) attn_weights = attn_weights.view(batch_size, self.num_heads, target_len, source_len) + attention_mask attn_weights = attn_weights.view(batch_size * self.num_heads, target_len, source_len) attn_weights = nn.functional.softmax(attn_weights, dim=-1) if output_attentions: # this operation is a bit awkward, but it's required to # make sure that attn_weights keeps its gradient. # In order to do so, attn_weights have to reshaped # twice and have to be reused in the following attn_weights_reshaped = attn_weights.view(batch_size, self.num_heads, target_len, source_len) attn_weights = attn_weights_reshaped.view(batch_size * self.num_heads, target_len, source_len) else: attn_weights_reshaped = None attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training) attn_output = torch.bmm(attn_probs, value_states) if attn_output.size() != (batch_size * self.num_heads, target_len, self.head_dim): raise ValueError( f"`attn_output` should be of size {(batch_size, self.num_heads, target_len, self.head_dim)}, but is" f" {attn_output.size()}" ) attn_output = attn_output.view(batch_size, self.num_heads, target_len, self.head_dim) attn_output = attn_output.transpose(1, 2) attn_output = attn_output.reshape(batch_size, target_len, embed_dim) attn_output = self.out_proj(attn_output) return attn_output, attn_weights_reshaped class ConditionalDetrAttention(nn.Module): """ Cross-Attention used in Conditional DETR 'Conditional DETR for Fast Training Convergence' paper. The key q_proj, k_proj, v_proj are defined outside the attention. This attention allows the dim of q, k to be different to v. """ def __init__( self, embed_dim: int, out_dim: int, num_heads: int, dropout: float = 0.0, bias: bool = True, ): super().__init__() self.embed_dim = embed_dim self.out_dim = out_dim self.num_heads = num_heads self.dropout = dropout self.head_dim = embed_dim // num_heads if self.head_dim * num_heads != self.embed_dim: raise ValueError( f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:" f" {num_heads})." ) # head dimension of values self.v_head_dim = out_dim // num_heads if self.v_head_dim * num_heads != self.out_dim: raise ValueError( f"out_dim must be divisible by num_heads (got `out_dim`: {self.out_dim} and `num_heads`: {num_heads})." ) self.scaling = self.head_dim**-0.5 self.out_proj = nn.Linear(out_dim, out_dim, bias=bias) def _qk_shape(self, tensor: torch.Tensor, seq_len: int, batch_size: int): return tensor.view(batch_size, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous() def _v_shape(self, tensor: torch.Tensor, seq_len: int, batch_size: int): return tensor.view(batch_size, seq_len, self.num_heads, self.v_head_dim).transpose(1, 2).contiguous() def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, key_states: Optional[torch.Tensor] = None, value_states: Optional[torch.Tensor] = None, output_attentions: bool = False, ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: """Input shape: Batch x Time x Channel""" batch_size, target_len, _ = hidden_states.size() # get query proj query_states = hidden_states * self.scaling # get key, value proj key_states = self._qk_shape(key_states, -1, batch_size) value_states = self._v_shape(value_states, -1, batch_size) proj_shape = (batch_size * self.num_heads, -1, self.head_dim) v_proj_shape = (batch_size * self.num_heads, -1, self.v_head_dim) query_states = self._qk_shape(query_states, target_len, batch_size).view(*proj_shape) key_states = key_states.view(*proj_shape) value_states = value_states.view(*v_proj_shape) source_len = key_states.size(1) attn_weights = torch.bmm(query_states, key_states.transpose(1, 2)) if attn_weights.size() != (batch_size * self.num_heads, target_len, source_len): raise ValueError( f"Attention weights should be of size {(batch_size * self.num_heads, target_len, source_len)}, but is" f" {attn_weights.size()}" ) if attention_mask is not None: if attention_mask.size() != (batch_size, 1, target_len, source_len): raise ValueError( f"Attention mask should be of size {(batch_size, 1, target_len, source_len)}, but is" f" {attention_mask.size()}" ) attn_weights = attn_weights.view(batch_size, self.num_heads, target_len, source_len) + attention_mask attn_weights = attn_weights.view(batch_size * self.num_heads, target_len, source_len) attn_weights = nn.functional.softmax(attn_weights, dim=-1) if output_attentions: # this operation is a bit awkward, but it's required to # make sure that attn_weights keeps its gradient. # In order to do so, attn_weights have to reshaped # twice and have to be reused in the following attn_weights_reshaped = attn_weights.view(batch_size, self.num_heads, target_len, source_len) attn_weights = attn_weights_reshaped.view(batch_size * self.num_heads, target_len, source_len) else: attn_weights_reshaped = None attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training) attn_output = torch.bmm(attn_probs, value_states) if attn_output.size() != (batch_size * self.num_heads, target_len, self.v_head_dim): raise ValueError( f"`attn_output` should be of size {(batch_size, self.num_heads, target_len, self.v_head_dim)}, but is" f" {attn_output.size()}" ) attn_output = attn_output.view(batch_size, self.num_heads, target_len, self.v_head_dim) attn_output = attn_output.transpose(1, 2) attn_output = attn_output.reshape(batch_size, target_len, self.out_dim) attn_output = self.out_proj(attn_output) return attn_output, attn_weights_reshaped # Copied from transformers.models.detr.modeling_detr.DetrEncoderLayer with DetrEncoderLayer->ConditionalDetrEncoderLayer,DetrConfig->ConditionalDetrConfig class ConditionalDetrEncoderLayer(nn.Module): def __init__(self, config: ConditionalDetrConfig): super().__init__() self.embed_dim = config.d_model self.self_attn = DetrAttention( embed_dim=self.embed_dim, num_heads=config.encoder_attention_heads, dropout=config.attention_dropout, ) self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim) self.dropout = config.dropout self.activation_fn = ACT2FN[config.activation_function] self.activation_dropout = config.activation_dropout self.fc1 = nn.Linear(self.embed_dim, config.encoder_ffn_dim) self.fc2 = nn.Linear(config.encoder_ffn_dim, self.embed_dim) self.final_layer_norm = nn.LayerNorm(self.embed_dim) def forward( self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, object_queries: torch.Tensor = None, output_attentions: bool = False, **kwargs, ): """ Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` attention_mask (`torch.FloatTensor`): attention mask of size `(batch, 1, target_len, source_len)` where padding elements are indicated by very large negative values. object_queries (`torch.FloatTensor`, *optional*): Object queries (also called content embeddings), to be added to the hidden states. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. """ position_embeddings = kwargs.pop("position_embeddings", None) if kwargs: raise ValueError(f"Unexpected arguments {kwargs.keys()}") if position_embeddings is not None and object_queries is not None: raise ValueError( "Cannot specify both position_embeddings and object_queries. Please use just object_queries" ) if position_embeddings is not None: logger.warning_once( "position_embeddings has been deprecated and will be removed in v4.34. Please use object_queries instead" ) object_queries = position_embeddings residual = hidden_states hidden_states, attn_weights = self.self_attn( hidden_states=hidden_states, attention_mask=attention_mask, object_queries=object_queries, output_attentions=output_attentions, ) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states hidden_states = self.self_attn_layer_norm(hidden_states) residual = hidden_states hidden_states = self.activation_fn(self.fc1(hidden_states)) hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training) hidden_states = self.fc2(hidden_states) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states hidden_states = self.final_layer_norm(hidden_states) if self.training: if torch.isinf(hidden_states).any() or torch.isnan(hidden_states).any(): clamp_value = torch.finfo(hidden_states.dtype).max - 1000 hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value) outputs = (hidden_states,) if output_attentions: outputs += (attn_weights,) return outputs class ConditionalDetrDecoderLayer(nn.Module): def __init__(self, config: ConditionalDetrConfig): super().__init__() self.embed_dim = config.d_model d_model = config.d_model # Decoder Self-Attention projections self.sa_qcontent_proj = nn.Linear(d_model, d_model) self.sa_qpos_proj = nn.Linear(d_model, d_model) self.sa_kcontent_proj = nn.Linear(d_model, d_model) self.sa_kpos_proj = nn.Linear(d_model, d_model) self.sa_v_proj = nn.Linear(d_model, d_model) self.self_attn = ConditionalDetrAttention( embed_dim=self.embed_dim, out_dim=self.embed_dim, num_heads=config.decoder_attention_heads, dropout=config.attention_dropout, ) self.dropout = config.dropout self.activation_fn = ACT2FN[config.activation_function] self.activation_dropout = config.activation_dropout self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim) # Decoder Cross-Attention projections self.ca_qcontent_proj = nn.Linear(d_model, d_model) self.ca_qpos_proj = nn.Linear(d_model, d_model) self.ca_kcontent_proj = nn.Linear(d_model, d_model) self.ca_kpos_proj = nn.Linear(d_model, d_model) self.ca_v_proj = nn.Linear(d_model, d_model) self.ca_qpos_sine_proj = nn.Linear(d_model, d_model) self.encoder_attn = ConditionalDetrAttention( self.embed_dim * 2, self.embed_dim, config.decoder_attention_heads, dropout=config.attention_dropout ) self.encoder_attn_layer_norm = nn.LayerNorm(self.embed_dim) self.fc1 = nn.Linear(self.embed_dim, config.decoder_ffn_dim) self.fc2 = nn.Linear(config.decoder_ffn_dim, self.embed_dim) self.final_layer_norm = nn.LayerNorm(self.embed_dim) self.nhead = config.decoder_attention_heads def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, object_queries: Optional[torch.Tensor] = None, query_position_embeddings: Optional[torch.Tensor] = None, query_sine_embed: Optional[torch.Tensor] = None, encoder_hidden_states: Optional[torch.Tensor] = None, encoder_attention_mask: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = False, is_first: Optional[bool] = False, **kwargs, ): """ Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(seq_len, batch, embed_dim)` attention_mask (`torch.FloatTensor`): attention mask of size `(batch, 1, target_len, source_len)` where padding elements are indicated by very large negative values. object_queries (`torch.FloatTensor`, *optional*): object_queries that are added to the queries and keys in the cross-attention layer. query_position_embeddings (`torch.FloatTensor`, *optional*): object_queries that are added to the queries and keys in the self-attention layer. encoder_hidden_states (`torch.FloatTensor`): cross attention input to the layer of shape `(seq_len, batch, embed_dim)` encoder_attention_mask (`torch.FloatTensor`): encoder attention mask of size `(batch, 1, target_len, source_len)` where padding elements are indicated by very large negative values. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. """ position_embeddings = kwargs.pop("position_embeddings", None) if kwargs: raise ValueError(f"Unexpected arguments {kwargs.keys()}") if position_embeddings is not None and object_queries is not None: raise ValueError( "Cannot specify both position_embeddings and object_queries. Please use just object_queries" ) if position_embeddings is not None: logger.warning_once( "position_embeddings has been deprecated and will be removed in v4.34. Please use object_queries instead" ) object_queries = position_embeddings residual = hidden_states # ========== Begin of Self-Attention ============= # Apply projections here # shape: num_queries x batch_size x 256 q_content = self.sa_qcontent_proj( hidden_states ) # target is the input of the first decoder layer. zero by default. q_pos = self.sa_qpos_proj(query_position_embeddings) k_content = self.sa_kcontent_proj(hidden_states) k_pos = self.sa_kpos_proj(query_position_embeddings) v = self.sa_v_proj(hidden_states) _, num_queries, n_model = q_content.shape q = q_content + q_pos k = k_content + k_pos hidden_states, self_attn_weights = self.self_attn( hidden_states=q, attention_mask=attention_mask, key_states=k, value_states=v, output_attentions=output_attentions, ) # ============ End of Self-Attention ============= hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states hidden_states = self.self_attn_layer_norm(hidden_states) # ========== Begin of Cross-Attention ============= # Apply projections here # shape: num_queries x batch_size x 256 q_content = self.ca_qcontent_proj(hidden_states) k_content = self.ca_kcontent_proj(encoder_hidden_states) v = self.ca_v_proj(encoder_hidden_states) batch_size, num_queries, n_model = q_content.shape _, source_len, _ = k_content.shape k_pos = self.ca_kpos_proj(object_queries) # For the first decoder layer, we concatenate the positional embedding predicted from # the object query (the positional embedding) into the original query (key) in DETR. if is_first: q_pos = self.ca_qpos_proj(query_position_embeddings) q = q_content + q_pos k = k_content + k_pos else: q = q_content k = k_content q = q.view(batch_size, num_queries, self.nhead, n_model // self.nhead) query_sine_embed = self.ca_qpos_sine_proj(query_sine_embed) query_sine_embed = query_sine_embed.view(batch_size, num_queries, self.nhead, n_model // self.nhead) q = torch.cat([q, query_sine_embed], dim=3).view(batch_size, num_queries, n_model * 2) k = k.view(batch_size, source_len, self.nhead, n_model // self.nhead) k_pos = k_pos.view(batch_size, source_len, self.nhead, n_model // self.nhead) k = torch.cat([k, k_pos], dim=3).view(batch_size, source_len, n_model * 2) # Cross-Attention Block cross_attn_weights = None if encoder_hidden_states is not None: residual = hidden_states hidden_states, cross_attn_weights = self.encoder_attn( hidden_states=q, attention_mask=encoder_attention_mask, key_states=k, value_states=v, output_attentions=output_attentions, ) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states hidden_states = self.encoder_attn_layer_norm(hidden_states) # ============ End of Cross-Attention ============= # Fully Connected residual = hidden_states hidden_states = self.activation_fn(self.fc1(hidden_states)) hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training) hidden_states = self.fc2(hidden_states) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states hidden_states = self.final_layer_norm(hidden_states) outputs = (hidden_states,) if output_attentions: outputs += (self_attn_weights, cross_attn_weights) return outputs # Copied from transformers.models.detr.modeling_detr.DetrClassificationHead with Detr->ConditionalDetr class ConditionalDetrClassificationHead(nn.Module): """Head for sentence-level classification tasks.""" def __init__(self, input_dim: int, inner_dim: int, num_classes: int, pooler_dropout: float): super().__init__() self.dense = nn.Linear(input_dim, inner_dim) self.dropout = nn.Dropout(p=pooler_dropout) self.out_proj = nn.Linear(inner_dim, num_classes) def forward(self, hidden_states: torch.Tensor): hidden_states = self.dropout(hidden_states) hidden_states = self.dense(hidden_states) hidden_states = torch.tanh(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.out_proj(hidden_states) return hidden_states # Copied from transformers.models.detr.modeling_detr.DetrMLPPredictionHead with DetrMLPPredictionHead->MLP class MLP(nn.Module): """ Very simple multi-layer perceptron (MLP, also called FFN), used to predict the normalized center coordinates, height and width of a bounding box w.r.t. an image. Copied from https://github.com/facebookresearch/detr/blob/master/models/detr.py """ def __init__(self, input_dim, hidden_dim, output_dim, num_layers): super().__init__() self.num_layers = num_layers h = [hidden_dim] * (num_layers - 1) self.layers = nn.ModuleList(nn.Linear(n, k) for n, k in zip([input_dim] + h, h + [output_dim])) def forward(self, x): for i, layer in enumerate(self.layers): x = nn.functional.relu(layer(x)) if i < self.num_layers - 1 else layer(x) return x # Copied from transformers.models.detr.modeling_detr.DetrPreTrainedModel with Detr->ConditionalDetr class ConditionalDetrPreTrainedModel(PreTrainedModel): config_class = ConditionalDetrConfig base_model_prefix = "model" main_input_name = "pixel_values" _no_split_modules = [r"ConditionalDetrConvEncoder", r"ConditionalDetrEncoderLayer", r"ConditionalDetrDecoderLayer"] def _init_weights(self, module): std = self.config.init_std xavier_std = self.config.init_xavier_std if isinstance(module, ConditionalDetrMHAttentionMap): nn.init.zeros_(module.k_linear.bias) nn.init.zeros_(module.q_linear.bias) nn.init.xavier_uniform_(module.k_linear.weight, gain=xavier_std) nn.init.xavier_uniform_(module.q_linear.weight, gain=xavier_std) elif isinstance(module, ConditionalDetrLearnedPositionEmbedding): nn.init.uniform_(module.row_embeddings.weight) nn.init.uniform_(module.column_embeddings.weight) if isinstance(module, (nn.Linear, nn.Conv2d, nn.BatchNorm2d)): # Slightly different from the TF version which uses truncated_normal for initialization # cf https://github.com/pytorch/pytorch/pull/5617 module.weight.data.normal_(mean=0.0, std=std) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=std) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() CONDITIONAL_DETR_START_DOCSTRING = r""" This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`ConditionalDetrConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ CONDITIONAL_DETR_INPUTS_DOCSTRING = r""" Args: pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using [`AutoImageProcessor`]. See [`ConditionalDetrImageProcessor.__call__`] for details. pixel_mask (`torch.LongTensor` of shape `(batch_size, height, width)`, *optional*): Mask to avoid performing attention on padding pixel values. Mask values selected in `[0, 1]`: - 1 for pixels that are real (i.e. **not masked**), - 0 for pixels that are padding (i.e. **masked**). [What are attention masks?](../glossary#attention-mask) decoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, num_queries)`, *optional*): Not used by default. Can be used to mask object queries. encoder_outputs (`tuple(tuple(torch.FloatTensor)`, *optional*): Tuple consists of (`last_hidden_state`, *optional*: `hidden_states`, *optional*: `attentions`) `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)`, *optional*) is a sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing the flattened feature map (output of the backbone + projection layer), you can choose to directly pass a flattened representation of an image. decoder_inputs_embeds (`torch.FloatTensor` of shape `(batch_size, num_queries, hidden_size)`, *optional*): Optionally, instead of initializing the queries with a tensor of zeros, you can choose to directly pass an embedded representation. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ # Copied from transformers.models.detr.modeling_detr.DetrEncoder with Detr->ConditionalDetr,DETR->ConditionalDETR class ConditionalDetrEncoder(ConditionalDetrPreTrainedModel): """ Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a [`ConditionalDetrEncoderLayer`]. The encoder updates the flattened feature map through multiple self-attention layers. Small tweak for ConditionalDETR: - object_queries are added to the forward pass. Args: config: ConditionalDetrConfig """ def __init__(self, config: ConditionalDetrConfig): super().__init__(config) self.dropout = config.dropout self.layerdrop = config.encoder_layerdrop self.layers = nn.ModuleList([ConditionalDetrEncoderLayer(config) for _ in range(config.encoder_layers)]) # in the original ConditionalDETR, no layernorm is used at the end of the encoder, as "normalize_before" is set to False by default # Initialize weights and apply final processing self.post_init() def forward( self, inputs_embeds=None, attention_mask=None, object_queries=None, output_attentions=None, output_hidden_states=None, return_dict=None, **kwargs, ): r""" Args: inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Flattened feature map (output of the backbone + projection layer) that is passed to the encoder. attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding pixel features. Mask values selected in `[0, 1]`: - 1 for pixel features that are real (i.e. **not masked**), - 0 for pixel features that are padding (i.e. **masked**). [What are attention masks?](../glossary#attention-mask) object_queries (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Object queries that are added to the queries in each self-attention layer. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ position_embeddings = kwargs.pop("position_embeddings", None) if kwargs: raise ValueError(f"Unexpected arguments {kwargs.keys()}") if position_embeddings is not None and object_queries is not None: raise ValueError( "Cannot specify both position_embeddings and object_queries. Please use just object_queries" ) if position_embeddings is not None: logger.warning_once( "position_embeddings has been deprecated and will be removed in v4.34. Please use object_queries instead" ) object_queries = position_embeddings output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict hidden_states = inputs_embeds hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) # expand attention_mask if attention_mask is not None: # [batch_size, seq_len] -> [batch_size, 1, target_seq_len, source_seq_len] attention_mask = _prepare_4d_attention_mask(attention_mask, inputs_embeds.dtype) encoder_states = () if output_hidden_states else None all_attentions = () if output_attentions else None for i, encoder_layer in enumerate(self.layers): if output_hidden_states: encoder_states = encoder_states + (hidden_states,) # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description) to_drop = False if self.training: dropout_probability = torch.rand([]) if dropout_probability < self.layerdrop: # skip the layer to_drop = True if to_drop: layer_outputs = (None, None) else: # we add object_queries as extra input to the encoder_layer layer_outputs = encoder_layer( hidden_states, attention_mask, object_queries=object_queries, output_attentions=output_attentions, ) hidden_states = layer_outputs[0] if output_attentions: all_attentions = all_attentions + (layer_outputs[1],) if output_hidden_states: encoder_states = encoder_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None) return BaseModelOutput( last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions ) class ConditionalDetrDecoder(ConditionalDetrPreTrainedModel): """ Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a [`ConditionalDetrDecoderLayer`]. The decoder updates the query embeddings through multiple self-attention and cross-attention layers. Some small tweaks for Conditional DETR: - object_queries and query_position_embeddings are added to the forward pass. - if self.config.auxiliary_loss is set to True, also returns a stack of activations from all decoding layers. Args: config: ConditionalDetrConfig """ def __init__(self, config: ConditionalDetrConfig): super().__init__(config) self.dropout = config.dropout self.layerdrop = config.decoder_layerdrop self.layers = nn.ModuleList([ConditionalDetrDecoderLayer(config) for _ in range(config.decoder_layers)]) # in Conditional DETR, the decoder uses layernorm after the last decoder layer output self.layernorm = nn.LayerNorm(config.d_model) d_model = config.d_model self.gradient_checkpointing = False # query_scale is the FFN applied on f to generate transformation T self.query_scale = MLP(d_model, d_model, d_model, 2) self.ref_point_head = MLP(d_model, d_model, 2, 2) for layer_id in range(config.decoder_layers - 1): self.layers[layer_id + 1].ca_qpos_proj = None # Initialize weights and apply final processing self.post_init() def forward( self, inputs_embeds=None, attention_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, object_queries=None, query_position_embeddings=None, output_attentions=None, output_hidden_states=None, return_dict=None, **kwargs, ): r""" Args: inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): The query embeddings that are passed into the decoder. attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on certain queries. Mask values selected in `[0, 1]`: - 1 for queries that are **not masked**, - 0 for queries that are **masked**. [What are attention masks?](../glossary#attention-mask) encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. encoder_attention_mask (`torch.LongTensor` of shape `(batch_size, encoder_sequence_length)`, *optional*): Mask to avoid performing cross-attention on padding pixel_values of the encoder. Mask values selected in `[0, 1]`: - 1 for pixels that are real (i.e. **not masked**), - 0 for pixels that are padding (i.e. **masked**). object_queries (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Position embeddings that are added to the queries and keys in each cross-attention layer. query_position_embeddings (`torch.FloatTensor` of shape `(batch_size, num_queries, hidden_size)`): , *optional*): Position embeddings that are added to the queries and keys in each self-attention layer. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ position_embeddings = kwargs.pop("position_embeddings", None) if kwargs: raise ValueError(f"Unexpected arguments {kwargs.keys()}") if position_embeddings is not None and object_queries is not None: raise ValueError( "Cannot specify both position_embeddings and object_queries. Please use just object_queries" ) if position_embeddings is not None: logger.warning_once( "position_embeddings has been deprecated and will be removed in v4.34. Please use object_queries instead" ) object_queries = position_embeddings output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if inputs_embeds is not None: hidden_states = inputs_embeds input_shape = inputs_embeds.size()[:-1] # expand encoder attention mask if encoder_hidden_states is not None and encoder_attention_mask is not None: # [batch_size, seq_len] -> [batch_size, 1, target_seq_len, source_seq_len] encoder_attention_mask = _prepare_4d_attention_mask( encoder_attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1] ) # optional intermediate hidden states intermediate = () if self.config.auxiliary_loss else None # decoder layers all_hidden_states = () if output_hidden_states else None all_self_attns = () if output_attentions else None all_cross_attentions = () if (output_attentions and encoder_hidden_states is not None) else None reference_points_before_sigmoid = self.ref_point_head( query_position_embeddings ) # [num_queries, batch_size, 2] reference_points = reference_points_before_sigmoid.sigmoid().transpose(0, 1) obj_center = reference_points[..., :2].transpose(0, 1) # get sine embedding for the query vector query_sine_embed_before_transformation = gen_sine_position_embeddings(obj_center, self.config.d_model) for idx, decoder_layer in enumerate(self.layers): # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description) if output_hidden_states: all_hidden_states += (hidden_states,) if self.training: dropout_probability = torch.rand([]) if dropout_probability < self.layerdrop: continue if idx == 0: pos_transformation = 1 else: pos_transformation = self.query_scale(hidden_states) # apply transformation query_sine_embed = query_sine_embed_before_transformation * pos_transformation if self.gradient_checkpointing and self.training: layer_outputs = self._gradient_checkpointing_func( decoder_layer.__call__, hidden_states, None, object_queries, query_position_embeddings, query_sine_embed, encoder_hidden_states, encoder_attention_mask, None, None, ) else: layer_outputs = decoder_layer( hidden_states, attention_mask=None, object_queries=object_queries, query_position_embeddings=query_position_embeddings, query_sine_embed=query_sine_embed, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, output_attentions=output_attentions, is_first=(idx == 0), ) hidden_states = layer_outputs[0] if self.config.auxiliary_loss: hidden_states = self.layernorm(hidden_states) intermediate += (hidden_states,) if output_attentions: all_self_attns += (layer_outputs[1],) if encoder_hidden_states is not None: all_cross_attentions += (layer_outputs[2],) # finally, apply layernorm hidden_states = self.layernorm(hidden_states) # add hidden states from the last decoder layer if output_hidden_states: all_hidden_states += (hidden_states,) # stack intermediate decoder activations if self.config.auxiliary_loss: intermediate = torch.stack(intermediate) if not return_dict: return tuple( v for v in [ hidden_states, all_hidden_states, all_self_attns, all_cross_attentions, intermediate, reference_points, ] if v is not None ) return ConditionalDetrDecoderOutput( last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_self_attns, cross_attentions=all_cross_attentions, intermediate_hidden_states=intermediate, reference_points=reference_points, ) @add_start_docstrings( """ The bare Conditional DETR Model (consisting of a backbone and encoder-decoder Transformer) outputting raw hidden-states without any specific head on top. """, CONDITIONAL_DETR_START_DOCSTRING, ) class ConditionalDetrModel(ConditionalDetrPreTrainedModel): def __init__(self, config: ConditionalDetrConfig): super().__init__(config) # Create backbone + positional encoding backbone = ConditionalDetrConvEncoder(config) object_queries = build_position_encoding(config) self.backbone = ConditionalDetrConvModel(backbone, object_queries) # Create projection layer self.input_projection = nn.Conv2d(backbone.intermediate_channel_sizes[-1], config.d_model, kernel_size=1) self.query_position_embeddings = nn.Embedding(config.num_queries, config.d_model) self.encoder = ConditionalDetrEncoder(config) self.decoder = ConditionalDetrDecoder(config) # Initialize weights and apply final processing self.post_init() def get_encoder(self): return self.encoder def get_decoder(self): return self.decoder def freeze_backbone(self): for name, param in self.backbone.conv_encoder.model.named_parameters(): param.requires_grad_(False) def unfreeze_backbone(self): for name, param in self.backbone.conv_encoder.model.named_parameters(): param.requires_grad_(True) @add_start_docstrings_to_model_forward(CONDITIONAL_DETR_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=ConditionalDetrModelOutput, config_class=_CONFIG_FOR_DOC) def forward( self, pixel_values: torch.FloatTensor, pixel_mask: Optional[torch.LongTensor] = None, decoder_attention_mask: Optional[torch.LongTensor] = None, encoder_outputs: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, decoder_inputs_embeds: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple[torch.FloatTensor], ConditionalDetrModelOutput]: r""" Returns: Examples: ```python >>> from transformers import AutoImageProcessor, AutoModel >>> from PIL import Image >>> import requests >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> image_processor = AutoImageProcessor.from_pretrained("microsoft/conditional-detr-resnet-50") >>> model = AutoModel.from_pretrained("microsoft/conditional-detr-resnet-50") >>> # prepare image for the model >>> inputs = image_processor(images=image, return_tensors="pt") >>> # forward pass >>> outputs = model(**inputs) >>> # the last hidden states are the final query embeddings of the Transformer decoder >>> # these are of shape (batch_size, num_queries, hidden_size) >>> last_hidden_states = outputs.last_hidden_state >>> list(last_hidden_states.shape) [1, 300, 256] ```""" output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict batch_size, num_channels, height, width = pixel_values.shape device = pixel_values.device if pixel_mask is None: pixel_mask = torch.ones(((batch_size, height, width)), device=device) # First, sent pixel_values + pixel_mask through Backbone to obtain the features # pixel_values should be of shape (batch_size, num_channels, height, width) # pixel_mask should be of shape (batch_size, height, width) features, object_queries_list = self.backbone(pixel_values, pixel_mask) # get final feature map and downsampled mask feature_map, mask = features[-1] if mask is None: raise ValueError("Backbone does not return downsampled pixel mask") # Second, apply 1x1 convolution to reduce the channel dimension to d_model (256 by default) projected_feature_map = self.input_projection(feature_map) # Third, flatten the feature map + object_queries of shape NxCxHxW to NxCxHW, and permute it to NxHWxC # In other words, turn their shape into (batch_size, sequence_length, hidden_size) flattened_features = projected_feature_map.flatten(2).permute(0, 2, 1) object_queries = object_queries_list[-1].flatten(2).permute(0, 2, 1) flattened_mask = mask.flatten(1) # Fourth, sent flattened_features + flattened_mask + object_queries through encoder # flattened_features is a Tensor of shape (batch_size, heigth*width, hidden_size) # flattened_mask is a Tensor of shape (batch_size, heigth*width) if encoder_outputs is None: encoder_outputs = self.encoder( inputs_embeds=flattened_features, attention_mask=flattened_mask, object_queries=object_queries, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) # If the user passed a tuple for encoder_outputs, we wrap it in a BaseModelOutput when return_dict=True elif return_dict and not isinstance(encoder_outputs, BaseModelOutput): encoder_outputs = BaseModelOutput( last_hidden_state=encoder_outputs[0], hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None, attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None, ) # Fifth, sent query embeddings + object_queries through the decoder (which is conditioned on the encoder output) query_position_embeddings = self.query_position_embeddings.weight.unsqueeze(0).repeat(batch_size, 1, 1) queries = torch.zeros_like(query_position_embeddings) # decoder outputs consists of (dec_features, dec_hidden, dec_attn) decoder_outputs = self.decoder( inputs_embeds=queries, attention_mask=None, object_queries=object_queries, query_position_embeddings=query_position_embeddings, encoder_hidden_states=encoder_outputs[0], encoder_attention_mask=flattened_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) if not return_dict: return decoder_outputs + encoder_outputs return ConditionalDetrModelOutput( last_hidden_state=decoder_outputs.last_hidden_state, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions, intermediate_hidden_states=decoder_outputs.intermediate_hidden_states, reference_points=decoder_outputs.reference_points, ) @add_start_docstrings( """ CONDITIONAL_DETR Model (consisting of a backbone and encoder-decoder Transformer) with object detection heads on top, for tasks such as COCO detection. """, CONDITIONAL_DETR_START_DOCSTRING, ) class ConditionalDetrForObjectDetection(ConditionalDetrPreTrainedModel): def __init__(self, config: ConditionalDetrConfig): super().__init__(config) # CONDITIONAL DETR encoder-decoder model self.model = ConditionalDetrModel(config) # Object detection heads self.class_labels_classifier = nn.Linear( config.d_model, config.num_labels ) # We add one for the "no object" class self.bbox_predictor = ConditionalDetrMLPPredictionHead( input_dim=config.d_model, hidden_dim=config.d_model, output_dim=4, num_layers=3 ) # Initialize weights and apply final processing self.post_init() # taken from https://github.com/Atten4Vis/conditionalDETR/blob/master/models/conditional_detr.py @torch.jit.unused def _set_aux_loss(self, outputs_class, outputs_coord): # this is a workaround to make torchscript happy, as torchscript # doesn't support dictionary with non-homogeneous values, such # as a dict having both a Tensor and a list. return [{"logits": a, "pred_boxes": b} for a, b in zip(outputs_class[:-1], outputs_coord[:-1])] @add_start_docstrings_to_model_forward(CONDITIONAL_DETR_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=ConditionalDetrObjectDetectionOutput, config_class=_CONFIG_FOR_DOC) def forward( self, pixel_values: torch.FloatTensor, pixel_mask: Optional[torch.LongTensor] = None, decoder_attention_mask: Optional[torch.LongTensor] = None, encoder_outputs: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, decoder_inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[List[dict]] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple[torch.FloatTensor], ConditionalDetrObjectDetectionOutput]: r""" labels (`List[Dict]` of len `(batch_size,)`, *optional*): Labels for computing the bipartite matching loss. List of dicts, each dictionary containing at least the following 2 keys: 'class_labels' and 'boxes' (the class labels and bounding boxes of an image in the batch respectively). The class labels themselves should be a `torch.LongTensor` of len `(number of bounding boxes in the image,)` and the boxes a `torch.FloatTensor` of shape `(number of bounding boxes in the image, 4)`. Returns: Examples: ```python >>> from transformers import AutoImageProcessor, AutoModelForObjectDetection >>> from PIL import Image >>> import requests >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> image_processor = AutoImageProcessor.from_pretrained("microsoft/conditional-detr-resnet-50") >>> model = AutoModelForObjectDetection.from_pretrained("microsoft/conditional-detr-resnet-50") >>> inputs = image_processor(images=image, return_tensors="pt") >>> outputs = model(**inputs) >>> # convert outputs (bounding boxes and class logits) to Pascal VOC format (xmin, ymin, xmax, ymax) >>> target_sizes = torch.tensor([image.size[::-1]]) >>> results = image_processor.post_process_object_detection(outputs, threshold=0.5, target_sizes=target_sizes)[ ... 0 ... ] >>> for score, label, box in zip(results["scores"], results["labels"], results["boxes"]): ... box = [round(i, 2) for i in box.tolist()] ... print( ... f"Detected {model.config.id2label[label.item()]} with confidence " ... f"{round(score.item(), 3)} at location {box}" ... ) Detected remote with confidence 0.833 at location [38.31, 72.1, 177.63, 118.45] Detected cat with confidence 0.831 at location [9.2, 51.38, 321.13, 469.0] Detected cat with confidence 0.804 at location [340.3, 16.85, 642.93, 370.95] Detected remote with confidence 0.683 at location [334.48, 73.49, 366.37, 190.01] Detected couch with confidence 0.535 at location [0.52, 1.19, 640.35, 475.1] ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict # First, sent images through CONDITIONAL_DETR base model to obtain encoder + decoder outputs outputs = self.model( pixel_values, pixel_mask=pixel_mask, decoder_attention_mask=decoder_attention_mask, encoder_outputs=encoder_outputs, inputs_embeds=inputs_embeds, decoder_inputs_embeds=decoder_inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] # class logits + predicted bounding boxes logits = self.class_labels_classifier(sequence_output) reference = outputs.reference_points if return_dict else outputs[-1] reference_before_sigmoid = inverse_sigmoid(reference).transpose(0, 1) outputs_coords = [] hs = sequence_output tmp = self.bbox_predictor(hs) tmp[..., :2] += reference_before_sigmoid pred_boxes = tmp.sigmoid() # pred_boxes = self.bbox_predictor(sequence_output).sigmoid() loss, loss_dict, auxiliary_outputs = None, None, None if labels is not None: # First: create the matcher matcher = ConditionalDetrHungarianMatcher( class_cost=self.config.class_cost, bbox_cost=self.config.bbox_cost, giou_cost=self.config.giou_cost ) # Second: create the criterion losses = ["labels", "boxes", "cardinality"] criterion = ConditionalDetrLoss( matcher=matcher, num_classes=self.config.num_labels, focal_alpha=self.config.focal_alpha, losses=losses, ) criterion.to(self.device) # Third: compute the losses, based on outputs and labels outputs_loss = {} outputs_loss["logits"] = logits outputs_loss["pred_boxes"] = pred_boxes if self.config.auxiliary_loss: intermediate = outputs.intermediate_hidden_states if return_dict else outputs[4] outputs_class = self.class_labels_classifier(intermediate) for lvl in range(hs.shape[0]): tmp = self.bbox_predictor(hs[lvl]) tmp[..., :2] += reference_before_sigmoid outputs_coord = tmp.sigmoid() outputs_coords.append(outputs_coord) outputs_coord = torch.stack(outputs_coords) auxiliary_outputs = self._set_aux_loss(outputs_class, outputs_coord) outputs_loss["auxiliary_outputs"] = auxiliary_outputs loss_dict = criterion(outputs_loss, labels) # Fourth: compute total loss, as a weighted sum of the various losses weight_dict = {"loss_ce": self.config.cls_loss_coefficient, "loss_bbox": self.config.bbox_loss_coefficient} weight_dict["loss_giou"] = self.config.giou_loss_coefficient if self.config.auxiliary_loss: aux_weight_dict = {} for i in range(self.config.decoder_layers - 1): aux_weight_dict.update({k + f"_{i}": v for k, v in weight_dict.items()}) weight_dict.update(aux_weight_dict) loss = sum(loss_dict[k] * weight_dict[k] for k in loss_dict.keys() if k in weight_dict) if not return_dict: if auxiliary_outputs is not None: output = (logits, pred_boxes) + auxiliary_outputs + outputs else: output = (logits, pred_boxes) + outputs return ((loss, loss_dict) + output) if loss is not None else output return ConditionalDetrObjectDetectionOutput( loss=loss, loss_dict=loss_dict, logits=logits, pred_boxes=pred_boxes, auxiliary_outputs=auxiliary_outputs, last_hidden_state=outputs.last_hidden_state, decoder_hidden_states=outputs.decoder_hidden_states, decoder_attentions=outputs.decoder_attentions, cross_attentions=outputs.cross_attentions, encoder_last_hidden_state=outputs.encoder_last_hidden_state, encoder_hidden_states=outputs.encoder_hidden_states, encoder_attentions=outputs.encoder_attentions, ) @add_start_docstrings( """ CONDITIONAL_DETR Model (consisting of a backbone and encoder-decoder Transformer) with a segmentation head on top, for tasks such as COCO panoptic. """, CONDITIONAL_DETR_START_DOCSTRING, ) class ConditionalDetrForSegmentation(ConditionalDetrPreTrainedModel): def __init__(self, config: ConditionalDetrConfig): super().__init__(config) # object detection model self.conditional_detr = ConditionalDetrForObjectDetection(config) # segmentation head hidden_size, number_of_heads = config.d_model, config.encoder_attention_heads intermediate_channel_sizes = self.conditional_detr.model.backbone.conv_encoder.intermediate_channel_sizes self.mask_head = ConditionalDetrMaskHeadSmallConv( hidden_size + number_of_heads, intermediate_channel_sizes[::-1][-3:], hidden_size ) self.bbox_attention = ConditionalDetrMHAttentionMap( hidden_size, hidden_size, number_of_heads, dropout=0.0, std=config.init_xavier_std ) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(CONDITIONAL_DETR_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=ConditionalDetrSegmentationOutput, config_class=_CONFIG_FOR_DOC) def forward( self, pixel_values: torch.FloatTensor, pixel_mask: Optional[torch.LongTensor] = None, decoder_attention_mask: Optional[torch.FloatTensor] = None, encoder_outputs: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, decoder_inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[List[dict]] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple[torch.FloatTensor], ConditionalDetrSegmentationOutput]: r""" labels (`List[Dict]` of len `(batch_size,)`, *optional*): Labels for computing the bipartite matching loss, DICE/F-1 loss and Focal loss. List of dicts, each dictionary containing at least the following 3 keys: 'class_labels', 'boxes' and 'masks' (the class labels, bounding boxes and segmentation masks of an image in the batch respectively). The class labels themselves should be a `torch.LongTensor` of len `(number of bounding boxes in the image,)`, the boxes a `torch.FloatTensor` of shape `(number of bounding boxes in the image, 4)` and the masks a `torch.FloatTensor` of shape `(number of bounding boxes in the image, height, width)`. Returns: Examples: ```python >>> import io >>> import requests >>> from PIL import Image >>> import torch >>> import numpy >>> from transformers import ( ... AutoImageProcessor, ... ConditionalDetrConfig, ... ConditionalDetrForSegmentation, ... ) >>> from transformers.image_transforms import rgb_to_id >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> image_processor = AutoImageProcessor.from_pretrained("microsoft/conditional-detr-resnet-50") >>> # randomly initialize all weights of the model >>> config = ConditionalDetrConfig() >>> model = ConditionalDetrForSegmentation(config) >>> # prepare image for the model >>> inputs = image_processor(images=image, return_tensors="pt") >>> # forward pass >>> outputs = model(**inputs) >>> # Use the `post_process_panoptic_segmentation` method of the `image_processor` to retrieve post-processed panoptic segmentation maps >>> # Segmentation results are returned as a list of dictionaries >>> result = image_processor.post_process_panoptic_segmentation(outputs, target_sizes=[(300, 500)]) >>> # A tensor of shape (height, width) where each value denotes a segment id, filled with -1 if no segment is found >>> panoptic_seg = result[0]["segmentation"] >>> # Get prediction score and segment_id to class_id mapping of each segment >>> panoptic_segments_info = result[0]["segments_info"] ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict batch_size, num_channels, height, width = pixel_values.shape device = pixel_values.device if pixel_mask is None: pixel_mask = torch.ones((batch_size, height, width), device=device) # First, get list of feature maps and object_queries features, object_queries_list = self.conditional_detr.model.backbone(pixel_values, pixel_mask=pixel_mask) # Second, apply 1x1 convolution to reduce the channel dimension to d_model (256 by default) feature_map, mask = features[-1] batch_size, num_channels, height, width = feature_map.shape projected_feature_map = self.conditional_detr.model.input_projection(feature_map) # Third, flatten the feature map + object_queries of shape NxCxHxW to NxCxHW, and permute it to NxHWxC # In other words, turn their shape into (batch_size, sequence_length, hidden_size) flattened_features = projected_feature_map.flatten(2).permute(0, 2, 1) object_queries = object_queries_list[-1].flatten(2).permute(0, 2, 1) flattened_mask = mask.flatten(1) # Fourth, sent flattened_features + flattened_mask + object_queries through encoder # flattened_features is a Tensor of shape (batch_size, heigth*width, hidden_size) # flattened_mask is a Tensor of shape (batch_size, heigth*width) if encoder_outputs is None: encoder_outputs = self.conditional_detr.model.encoder( inputs_embeds=flattened_features, attention_mask=flattened_mask, object_queries=object_queries, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) # If the user passed a tuple for encoder_outputs, we wrap it in a BaseModelOutput when return_dict=True elif return_dict and not isinstance(encoder_outputs, BaseModelOutput): encoder_outputs = BaseModelOutput( last_hidden_state=encoder_outputs[0], hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None, attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None, ) # Fifth, sent query embeddings + object_queries through the decoder (which is conditioned on the encoder output) query_position_embeddings = self.conditional_detr.model.query_position_embeddings.weight.unsqueeze(0).repeat( batch_size, 1, 1 ) queries = torch.zeros_like(query_position_embeddings) # decoder outputs consists of (dec_features, dec_hidden, dec_attn) decoder_outputs = self.conditional_detr.model.decoder( inputs_embeds=queries, attention_mask=None, object_queries=object_queries, query_position_embeddings=query_position_embeddings, encoder_hidden_states=encoder_outputs[0], encoder_attention_mask=flattened_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = decoder_outputs[0] # Sixth, compute logits, pred_boxes and pred_masks logits = self.conditional_detr.class_labels_classifier(sequence_output) pred_boxes = self.conditional_detr.bbox_predictor(sequence_output).sigmoid() memory = encoder_outputs[0].permute(0, 2, 1).view(batch_size, self.config.d_model, height, width) mask = flattened_mask.view(batch_size, height, width) # FIXME h_boxes takes the last one computed, keep this in mind # important: we need to reverse the mask, since in the original implementation the mask works reversed # bbox_mask is of shape (batch_size, num_queries, number_of_attention_heads in bbox_attention, height/32, width/32) bbox_mask = self.bbox_attention(sequence_output, memory, mask=~mask) seg_masks = self.mask_head(projected_feature_map, bbox_mask, [features[2][0], features[1][0], features[0][0]]) pred_masks = seg_masks.view( batch_size, self.conditional_detr.config.num_queries, seg_masks.shape[-2], seg_masks.shape[-1] ) loss, loss_dict, auxiliary_outputs = None, None, None if labels is not None: # First: create the matcher matcher = ConditionalDetrHungarianMatcher( class_cost=self.config.class_cost, bbox_cost=self.config.bbox_cost, giou_cost=self.config.giou_cost ) # Second: create the criterion losses = ["labels", "boxes", "cardinality", "masks"] criterion = ConditionalDetrLoss( matcher=matcher, num_classes=self.config.num_labels, focal_alpha=self.config.focal_alpha, losses=losses, ) criterion.to(self.device) # Third: compute the losses, based on outputs and labels outputs_loss = {} outputs_loss["logits"] = logits outputs_loss["pred_boxes"] = pred_boxes outputs_loss["pred_masks"] = pred_masks if self.config.auxiliary_loss: intermediate = decoder_outputs.intermediate_hidden_states if return_dict else decoder_outputs[-1] outputs_class = self.class_labels_classifier(intermediate) outputs_coord = self.bbox_predictor(intermediate).sigmoid() auxiliary_outputs = self._set_aux_loss(outputs_class, outputs_coord) outputs_loss["auxiliary_outputs"] = auxiliary_outputs loss_dict = criterion(outputs_loss, labels) # Fourth: compute total loss, as a weighted sum of the various losses weight_dict = {"loss_ce": 1, "loss_bbox": self.config.bbox_loss_coefficient} weight_dict["loss_giou"] = self.config.giou_loss_coefficient weight_dict["loss_mask"] = self.config.mask_loss_coefficient weight_dict["loss_dice"] = self.config.dice_loss_coefficient if self.config.auxiliary_loss: aux_weight_dict = {} for i in range(self.config.decoder_layers - 1): aux_weight_dict.update({k + f"_{i}": v for k, v in weight_dict.items()}) weight_dict.update(aux_weight_dict) loss = sum(loss_dict[k] * weight_dict[k] for k in loss_dict.keys() if k in weight_dict) if not return_dict: if auxiliary_outputs is not None: output = (logits, pred_boxes, pred_masks) + auxiliary_outputs + decoder_outputs + encoder_outputs else: output = (logits, pred_boxes, pred_masks) + decoder_outputs + encoder_outputs return ((loss, loss_dict) + output) if loss is not None else output return ConditionalDetrSegmentationOutput( loss=loss, loss_dict=loss_dict, logits=logits, pred_boxes=pred_boxes, pred_masks=pred_masks, auxiliary_outputs=auxiliary_outputs, last_hidden_state=decoder_outputs.last_hidden_state, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions, ) def _expand(tensor, length: int): return tensor.unsqueeze(1).repeat(1, int(length), 1, 1, 1).flatten(0, 1) # Copied from transformers.models.detr.modeling_detr.DetrMaskHeadSmallConv with Detr->ConditionalDetr class ConditionalDetrMaskHeadSmallConv(nn.Module): """ Simple convolutional head, using group norm. Upsampling is done using a FPN approach """ def __init__(self, dim, fpn_dims, context_dim): super().__init__() if dim % 8 != 0: raise ValueError( "The hidden_size + number of attention heads must be divisible by 8 as the number of groups in" " GroupNorm is set to 8" ) inter_dims = [dim, context_dim // 2, context_dim // 4, context_dim // 8, context_dim // 16, context_dim // 64] self.lay1 = nn.Conv2d(dim, dim, 3, padding=1) self.gn1 = nn.GroupNorm(8, dim) self.lay2 = nn.Conv2d(dim, inter_dims[1], 3, padding=1) self.gn2 = nn.GroupNorm(min(8, inter_dims[1]), inter_dims[1]) self.lay3 = nn.Conv2d(inter_dims[1], inter_dims[2], 3, padding=1) self.gn3 = nn.GroupNorm(min(8, inter_dims[2]), inter_dims[2]) self.lay4 = nn.Conv2d(inter_dims[2], inter_dims[3], 3, padding=1) self.gn4 = nn.GroupNorm(min(8, inter_dims[3]), inter_dims[3]) self.lay5 = nn.Conv2d(inter_dims[3], inter_dims[4], 3, padding=1) self.gn5 = nn.GroupNorm(min(8, inter_dims[4]), inter_dims[4]) self.out_lay = nn.Conv2d(inter_dims[4], 1, 3, padding=1) self.dim = dim self.adapter1 = nn.Conv2d(fpn_dims[0], inter_dims[1], 1) self.adapter2 = nn.Conv2d(fpn_dims[1], inter_dims[2], 1) self.adapter3 = nn.Conv2d(fpn_dims[2], inter_dims[3], 1) for m in self.modules(): if isinstance(m, nn.Conv2d): nn.init.kaiming_uniform_(m.weight, a=1) nn.init.constant_(m.bias, 0) def forward(self, x: Tensor, bbox_mask: Tensor, fpns: List[Tensor]): # here we concatenate x, the projected feature map, of shape (batch_size, d_model, heigth/32, width/32) with # the bbox_mask = the attention maps of shape (batch_size, n_queries, n_heads, height/32, width/32). # We expand the projected feature map to match the number of heads. x = torch.cat([_expand(x, bbox_mask.shape[1]), bbox_mask.flatten(0, 1)], 1) x = self.lay1(x) x = self.gn1(x) x = nn.functional.relu(x) x = self.lay2(x) x = self.gn2(x) x = nn.functional.relu(x) cur_fpn = self.adapter1(fpns[0]) if cur_fpn.size(0) != x.size(0): cur_fpn = _expand(cur_fpn, x.size(0) // cur_fpn.size(0)) x = cur_fpn + nn.functional.interpolate(x, size=cur_fpn.shape[-2:], mode="nearest") x = self.lay3(x) x = self.gn3(x) x = nn.functional.relu(x) cur_fpn = self.adapter2(fpns[1]) if cur_fpn.size(0) != x.size(0): cur_fpn = _expand(cur_fpn, x.size(0) // cur_fpn.size(0)) x = cur_fpn + nn.functional.interpolate(x, size=cur_fpn.shape[-2:], mode="nearest") x = self.lay4(x) x = self.gn4(x) x = nn.functional.relu(x) cur_fpn = self.adapter3(fpns[2]) if cur_fpn.size(0) != x.size(0): cur_fpn = _expand(cur_fpn, x.size(0) // cur_fpn.size(0)) x = cur_fpn + nn.functional.interpolate(x, size=cur_fpn.shape[-2:], mode="nearest") x = self.lay5(x) x = self.gn5(x) x = nn.functional.relu(x) x = self.out_lay(x) return x # Copied from transformers.models.detr.modeling_detr.DetrMHAttentionMap with Detr->ConditionalDetr class ConditionalDetrMHAttentionMap(nn.Module): """This is a 2D attention module, which only returns the attention softmax (no multiplication by value)""" def __init__(self, query_dim, hidden_dim, num_heads, dropout=0.0, bias=True, std=None): super().__init__() self.num_heads = num_heads self.hidden_dim = hidden_dim self.dropout = nn.Dropout(dropout) self.q_linear = nn.Linear(query_dim, hidden_dim, bias=bias) self.k_linear = nn.Linear(query_dim, hidden_dim, bias=bias) self.normalize_fact = float(hidden_dim / self.num_heads) ** -0.5 def forward(self, q, k, mask: Optional[Tensor] = None): q = self.q_linear(q) k = nn.functional.conv2d(k, self.k_linear.weight.unsqueeze(-1).unsqueeze(-1), self.k_linear.bias) queries_per_head = q.view(q.shape[0], q.shape[1], self.num_heads, self.hidden_dim // self.num_heads) keys_per_head = k.view(k.shape[0], self.num_heads, self.hidden_dim // self.num_heads, k.shape[-2], k.shape[-1]) weights = torch.einsum("bqnc,bnchw->bqnhw", queries_per_head * self.normalize_fact, keys_per_head) if mask is not None: weights.masked_fill_(mask.unsqueeze(1).unsqueeze(1), torch.finfo(weights.dtype).min) weights = nn.functional.softmax(weights.flatten(2), dim=-1).view(weights.size()) weights = self.dropout(weights) return weights # Copied from transformers.models.detr.modeling_detr.dice_loss def dice_loss(inputs, targets, num_boxes): """ Compute the DICE loss, similar to generalized IOU for masks Args: inputs: A float tensor of arbitrary shape. The predictions for each example. targets: A float tensor with the same shape as inputs. Stores the binary classification label for each element in inputs (0 for the negative class and 1 for the positive class). """ inputs = inputs.sigmoid() inputs = inputs.flatten(1) numerator = 2 * (inputs * targets).sum(1) denominator = inputs.sum(-1) + targets.sum(-1) loss = 1 - (numerator + 1) / (denominator + 1) return loss.sum() / num_boxes # Copied from transformers.models.detr.modeling_detr.sigmoid_focal_loss def sigmoid_focal_loss(inputs, targets, num_boxes, alpha: float = 0.25, gamma: float = 2): """ Loss used in RetinaNet for dense detection: https://arxiv.org/abs/1708.02002. Args: inputs (`torch.FloatTensor` of arbitrary shape): The predictions for each example. targets (`torch.FloatTensor` with the same shape as `inputs`) A tensor storing the binary classification label for each element in the `inputs` (0 for the negative class and 1 for the positive class). alpha (`float`, *optional*, defaults to `0.25`): Optional weighting factor in the range (0,1) to balance positive vs. negative examples. gamma (`int`, *optional*, defaults to `2`): Exponent of the modulating factor (1 - p_t) to balance easy vs hard examples. Returns: Loss tensor """ prob = inputs.sigmoid() ce_loss = nn.functional.binary_cross_entropy_with_logits(inputs, targets, reduction="none") # add modulating factor p_t = prob * targets + (1 - prob) * (1 - targets) loss = ce_loss * ((1 - p_t) ** gamma) if alpha >= 0: alpha_t = alpha * targets + (1 - alpha) * (1 - targets) loss = alpha_t * loss return loss.mean(1).sum() / num_boxes class ConditionalDetrLoss(nn.Module): """ This class computes the losses for ConditionalDetrForObjectDetection/ConditionalDetrForSegmentation. The process happens in two steps: 1) we compute hungarian assignment between ground truth boxes and the outputs of the model 2) we supervise each pair of matched ground-truth / prediction (supervise class and box). Args: matcher (`ConditionalDetrHungarianMatcher`): Module able to compute a matching between targets and proposals. num_classes (`int`): Number of object categories, omitting the special no-object category. focal_alpha (`float`): Alpha parameter in focal loss. losses (`List[str]`): List of all the losses to be applied. See `get_loss` for a list of all available losses. """ # Copied from transformers.models.deformable_detr.modeling_deformable_detr.DeformableDetrLoss.__init__ def __init__(self, matcher, num_classes, focal_alpha, losses): super().__init__() self.matcher = matcher self.num_classes = num_classes self.focal_alpha = focal_alpha self.losses = losses # Copied from transformers.models.deformable_detr.modeling_deformable_detr.DeformableDetrLoss.loss_labels def loss_labels(self, outputs, targets, indices, num_boxes): """ Classification loss (Binary focal loss) targets dicts must contain the key "class_labels" containing a tensor of dim [nb_target_boxes] """ if "logits" not in outputs: raise KeyError("No logits were found in the outputs") source_logits = outputs["logits"] idx = self._get_source_permutation_idx(indices) target_classes_o = torch.cat([t["class_labels"][J] for t, (_, J) in zip(targets, indices)]) target_classes = torch.full( source_logits.shape[:2], self.num_classes, dtype=torch.int64, device=source_logits.device ) target_classes[idx] = target_classes_o target_classes_onehot = torch.zeros( [source_logits.shape[0], source_logits.shape[1], source_logits.shape[2] + 1], dtype=source_logits.dtype, layout=source_logits.layout, device=source_logits.device, ) target_classes_onehot.scatter_(2, target_classes.unsqueeze(-1), 1) target_classes_onehot = target_classes_onehot[:, :, :-1] loss_ce = ( sigmoid_focal_loss(source_logits, target_classes_onehot, num_boxes, alpha=self.focal_alpha, gamma=2) * source_logits.shape[1] ) losses = {"loss_ce": loss_ce} return losses @torch.no_grad() # Copied from transformers.models.deformable_detr.modeling_deformable_detr.DeformableDetrLoss.loss_cardinality def loss_cardinality(self, outputs, targets, indices, num_boxes): """ Compute the cardinality error, i.e. the absolute error in the number of predicted non-empty boxes. This is not really a loss, it is intended for logging purposes only. It doesn't propagate gradients. """ logits = outputs["logits"] device = logits.device target_lengths = torch.as_tensor([len(v["class_labels"]) for v in targets], device=device) # Count the number of predictions that are NOT "no-object" (which is the last class) card_pred = (logits.argmax(-1) != logits.shape[-1] - 1).sum(1) card_err = nn.functional.l1_loss(card_pred.float(), target_lengths.float()) losses = {"cardinality_error": card_err} return losses # Copied from transformers.models.deformable_detr.modeling_deformable_detr.DeformableDetrLoss.loss_boxes def loss_boxes(self, outputs, targets, indices, num_boxes): """ Compute the losses related to the bounding boxes, the L1 regression loss and the GIoU loss. Targets dicts must contain the key "boxes" containing a tensor of dim [nb_target_boxes, 4]. The target boxes are expected in format (center_x, center_y, w, h), normalized by the image size. """ if "pred_boxes" not in outputs: raise KeyError("No predicted boxes found in outputs") idx = self._get_source_permutation_idx(indices) source_boxes = outputs["pred_boxes"][idx] target_boxes = torch.cat([t["boxes"][i] for t, (_, i) in zip(targets, indices)], dim=0) loss_bbox = nn.functional.l1_loss(source_boxes, target_boxes, reduction="none") losses = {} losses["loss_bbox"] = loss_bbox.sum() / num_boxes loss_giou = 1 - torch.diag( generalized_box_iou(center_to_corners_format(source_boxes), center_to_corners_format(target_boxes)) ) losses["loss_giou"] = loss_giou.sum() / num_boxes return losses # Copied from transformers.models.detr.modeling_detr.DetrLoss.loss_masks def loss_masks(self, outputs, targets, indices, num_boxes): """ Compute the losses related to the masks: the focal loss and the dice loss. Targets dicts must contain the key "masks" containing a tensor of dim [nb_target_boxes, h, w]. """ if "pred_masks" not in outputs: raise KeyError("No predicted masks found in outputs") source_idx = self._get_source_permutation_idx(indices) target_idx = self._get_target_permutation_idx(indices) source_masks = outputs["pred_masks"] source_masks = source_masks[source_idx] masks = [t["masks"] for t in targets] # TODO use valid to mask invalid areas due to padding in loss target_masks, valid = nested_tensor_from_tensor_list(masks).decompose() target_masks = target_masks.to(source_masks) target_masks = target_masks[target_idx] # upsample predictions to the target size source_masks = nn.functional.interpolate( source_masks[:, None], size=target_masks.shape[-2:], mode="bilinear", align_corners=False ) source_masks = source_masks[:, 0].flatten(1) target_masks = target_masks.flatten(1) target_masks = target_masks.view(source_masks.shape) losses = { "loss_mask": sigmoid_focal_loss(source_masks, target_masks, num_boxes), "loss_dice": dice_loss(source_masks, target_masks, num_boxes), } return losses # Copied from transformers.models.deformable_detr.modeling_deformable_detr.DeformableDetrLoss._get_source_permutation_idx def _get_source_permutation_idx(self, indices): # permute predictions following indices batch_idx = torch.cat([torch.full_like(source, i) for i, (source, _) in enumerate(indices)]) source_idx = torch.cat([source for (source, _) in indices]) return batch_idx, source_idx # Copied from transformers.models.deformable_detr.modeling_deformable_detr.DeformableDetrLoss._get_target_permutation_idx def _get_target_permutation_idx(self, indices): # permute targets following indices batch_idx = torch.cat([torch.full_like(target, i) for i, (_, target) in enumerate(indices)]) target_idx = torch.cat([target for (_, target) in indices]) return batch_idx, target_idx # Copied from transformers.models.detr.modeling_detr.DetrLoss.get_loss def get_loss(self, loss, outputs, targets, indices, num_boxes): loss_map = { "labels": self.loss_labels, "cardinality": self.loss_cardinality, "boxes": self.loss_boxes, "masks": self.loss_masks, } if loss not in loss_map: raise ValueError(f"Loss {loss} not supported") return loss_map[loss](outputs, targets, indices, num_boxes) # Copied from transformers.models.detr.modeling_detr.DetrLoss.forward def forward(self, outputs, targets): """ This performs the loss computation. Args: outputs (`dict`, *optional*): Dictionary of tensors, see the output specification of the model for the format. targets (`List[dict]`, *optional*): List of dicts, such that `len(targets) == batch_size`. The expected keys in each dict depends on the losses applied, see each loss' doc. """ outputs_without_aux = {k: v for k, v in outputs.items() if k != "auxiliary_outputs"} # Retrieve the matching between the outputs of the last layer and the targets indices = self.matcher(outputs_without_aux, targets) # Compute the average number of target boxes across all nodes, for normalization purposes num_boxes = sum(len(t["class_labels"]) for t in targets) num_boxes = torch.as_tensor([num_boxes], dtype=torch.float, device=next(iter(outputs.values())).device) # (Niels): comment out function below, distributed training to be added # if is_dist_avail_and_initialized(): # torch.distributed.all_reduce(num_boxes) # (Niels) in original implementation, num_boxes is divided by get_world_size() num_boxes = torch.clamp(num_boxes, min=1).item() # Compute all the requested losses losses = {} for loss in self.losses: losses.update(self.get_loss(loss, outputs, targets, indices, num_boxes)) # In case of auxiliary losses, we repeat this process with the output of each intermediate layer. if "auxiliary_outputs" in outputs: for i, auxiliary_outputs in enumerate(outputs["auxiliary_outputs"]): indices = self.matcher(auxiliary_outputs, targets) for loss in self.losses: if loss == "masks": # Intermediate masks losses are too costly to compute, we ignore them. continue l_dict = self.get_loss(loss, auxiliary_outputs, targets, indices, num_boxes) l_dict = {k + f"_{i}": v for k, v in l_dict.items()} losses.update(l_dict) return losses # Copied from transformers.models.detr.modeling_detr.DetrMLPPredictionHead with Detr->ConditionalDetr class ConditionalDetrMLPPredictionHead(nn.Module): """ Very simple multi-layer perceptron (MLP, also called FFN), used to predict the normalized center coordinates, height and width of a bounding box w.r.t. an image. Copied from https://github.com/facebookresearch/detr/blob/master/models/detr.py """ def __init__(self, input_dim, hidden_dim, output_dim, num_layers): super().__init__() self.num_layers = num_layers h = [hidden_dim] * (num_layers - 1) self.layers = nn.ModuleList(nn.Linear(n, k) for n, k in zip([input_dim] + h, h + [output_dim])) def forward(self, x): for i, layer in enumerate(self.layers): x = nn.functional.relu(layer(x)) if i < self.num_layers - 1 else layer(x) return x # Copied from transformers.models.deformable_detr.modeling_deformable_detr.DeformableDetrHungarianMatcher with DeformableDetr->ConditionalDetr class ConditionalDetrHungarianMatcher(nn.Module): """ This class computes an assignment between the targets and the predictions of the network. For efficiency reasons, the targets don't include the no_object. Because of this, in general, there are more predictions than targets. In this case, we do a 1-to-1 matching of the best predictions, while the others are un-matched (and thus treated as non-objects). Args: class_cost: The relative weight of the classification error in the matching cost. bbox_cost: The relative weight of the L1 error of the bounding box coordinates in the matching cost. giou_cost: The relative weight of the giou loss of the bounding box in the matching cost. """ def __init__(self, class_cost: float = 1, bbox_cost: float = 1, giou_cost: float = 1): super().__init__() requires_backends(self, ["scipy"]) self.class_cost = class_cost self.bbox_cost = bbox_cost self.giou_cost = giou_cost if class_cost == 0 and bbox_cost == 0 and giou_cost == 0: raise ValueError("All costs of the Matcher can't be 0") @torch.no_grad() def forward(self, outputs, targets): """ Args: outputs (`dict`): A dictionary that contains at least these entries: * "logits": Tensor of dim [batch_size, num_queries, num_classes] with the classification logits * "pred_boxes": Tensor of dim [batch_size, num_queries, 4] with the predicted box coordinates. targets (`List[dict]`): A list of targets (len(targets) = batch_size), where each target is a dict containing: * "class_labels": Tensor of dim [num_target_boxes] (where num_target_boxes is the number of ground-truth objects in the target) containing the class labels * "boxes": Tensor of dim [num_target_boxes, 4] containing the target box coordinates. Returns: `List[Tuple]`: A list of size `batch_size`, containing tuples of (index_i, index_j) where: - index_i is the indices of the selected predictions (in order) - index_j is the indices of the corresponding selected targets (in order) For each batch element, it holds: len(index_i) = len(index_j) = min(num_queries, num_target_boxes) """ batch_size, num_queries = outputs["logits"].shape[:2] # We flatten to compute the cost matrices in a batch out_prob = outputs["logits"].flatten(0, 1).sigmoid() # [batch_size * num_queries, num_classes] out_bbox = outputs["pred_boxes"].flatten(0, 1) # [batch_size * num_queries, 4] # Also concat the target labels and boxes target_ids = torch.cat([v["class_labels"] for v in targets]) target_bbox = torch.cat([v["boxes"] for v in targets]) # Compute the classification cost. alpha = 0.25 gamma = 2.0 neg_cost_class = (1 - alpha) * (out_prob**gamma) * (-(1 - out_prob + 1e-8).log()) pos_cost_class = alpha * ((1 - out_prob) ** gamma) * (-(out_prob + 1e-8).log()) class_cost = pos_cost_class[:, target_ids] - neg_cost_class[:, target_ids] # Compute the L1 cost between boxes bbox_cost = torch.cdist(out_bbox, target_bbox, p=1) # Compute the giou cost between boxes giou_cost = -generalized_box_iou(center_to_corners_format(out_bbox), center_to_corners_format(target_bbox)) # Final cost matrix cost_matrix = self.bbox_cost * bbox_cost + self.class_cost * class_cost + self.giou_cost * giou_cost cost_matrix = cost_matrix.view(batch_size, num_queries, -1).cpu() sizes = [len(v["boxes"]) for v in targets] indices = [linear_sum_assignment(c[i]) for i, c in enumerate(cost_matrix.split(sizes, -1))] return [(torch.as_tensor(i, dtype=torch.int64), torch.as_tensor(j, dtype=torch.int64)) for i, j in indices] # Copied from transformers.models.detr.modeling_detr._upcast def _upcast(t: Tensor) -> Tensor: # Protects from numerical overflows in multiplications by upcasting to the equivalent higher type if t.is_floating_point(): return t if t.dtype in (torch.float32, torch.float64) else t.float() else: return t if t.dtype in (torch.int32, torch.int64) else t.int() # Copied from transformers.models.detr.modeling_detr.box_area def box_area(boxes: Tensor) -> Tensor: """ Computes the area of a set of bounding boxes, which are specified by its (x1, y1, x2, y2) coordinates. Args: boxes (`torch.FloatTensor` of shape `(number_of_boxes, 4)`): Boxes for which the area will be computed. They are expected to be in (x1, y1, x2, y2) format with `0 <= x1 < x2` and `0 <= y1 < y2`. Returns: `torch.FloatTensor`: a tensor containing the area for each box. """ boxes = _upcast(boxes) return (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1]) # Copied from transformers.models.detr.modeling_detr.box_iou def box_iou(boxes1, boxes2): area1 = box_area(boxes1) area2 = box_area(boxes2) left_top = torch.max(boxes1[:, None, :2], boxes2[:, :2]) # [N,M,2] right_bottom = torch.min(boxes1[:, None, 2:], boxes2[:, 2:]) # [N,M,2] width_height = (right_bottom - left_top).clamp(min=0) # [N,M,2] inter = width_height[:, :, 0] * width_height[:, :, 1] # [N,M] union = area1[:, None] + area2 - inter iou = inter / union return iou, union # Copied from transformers.models.detr.modeling_detr.generalized_box_iou def generalized_box_iou(boxes1, boxes2): """ Generalized IoU from https://giou.stanford.edu/. The boxes should be in [x0, y0, x1, y1] (corner) format. Returns: `torch.FloatTensor`: a [N, M] pairwise matrix, where N = len(boxes1) and M = len(boxes2) """ # degenerate boxes gives inf / nan results # so do an early check if not (boxes1[:, 2:] >= boxes1[:, :2]).all(): raise ValueError(f"boxes1 must be in [x0, y0, x1, y1] (corner) format, but got {boxes1}") if not (boxes2[:, 2:] >= boxes2[:, :2]).all(): raise ValueError(f"boxes2 must be in [x0, y0, x1, y1] (corner) format, but got {boxes2}") iou, union = box_iou(boxes1, boxes2) top_left = torch.min(boxes1[:, None, :2], boxes2[:, :2]) bottom_right = torch.max(boxes1[:, None, 2:], boxes2[:, 2:]) width_height = (bottom_right - top_left).clamp(min=0) # [N,M,2] area = width_height[:, :, 0] * width_height[:, :, 1] return iou - (area - union) / area # Copied from transformers.models.detr.modeling_detr._max_by_axis def _max_by_axis(the_list): # type: (List[List[int]]) -> List[int] maxes = the_list[0] for sublist in the_list[1:]: for index, item in enumerate(sublist): maxes[index] = max(maxes[index], item) return maxes # Copied from transformers.models.detr.modeling_detr.NestedTensor class NestedTensor(object): def __init__(self, tensors, mask: Optional[Tensor]): self.tensors = tensors self.mask = mask def to(self, device): cast_tensor = self.tensors.to(device) mask = self.mask if mask is not None: cast_mask = mask.to(device) else: cast_mask = None return NestedTensor(cast_tensor, cast_mask) def decompose(self): return self.tensors, self.mask def __repr__(self): return str(self.tensors) # Copied from transformers.models.detr.modeling_detr.nested_tensor_from_tensor_list def nested_tensor_from_tensor_list(tensor_list: List[Tensor]): if tensor_list[0].ndim == 3: max_size = _max_by_axis([list(img.shape) for img in tensor_list]) batch_shape = [len(tensor_list)] + max_size batch_size, num_channels, height, width = batch_shape dtype = tensor_list[0].dtype device = tensor_list[0].device tensor = torch.zeros(batch_shape, dtype=dtype, device=device) mask = torch.ones((batch_size, height, width), dtype=torch.bool, device=device) for img, pad_img, m in zip(tensor_list, tensor, mask): pad_img[: img.shape[0], : img.shape[1], : img.shape[2]].copy_(img) m[: img.shape[1], : img.shape[2]] = False else: raise ValueError("Only 3-dimensional tensors are supported") return NestedTensor(tensor, mask)
0
hf_public_repos/transformers/src/transformers/models
hf_public_repos/transformers/src/transformers/models/conditional_detr/__init__.py
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available _import_structure = { "configuration_conditional_detr": [ "CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP", "ConditionalDetrConfig", "ConditionalDetrOnnxConfig", ] } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["feature_extraction_conditional_detr"] = ["ConditionalDetrFeatureExtractor"] _import_structure["image_processing_conditional_detr"] = ["ConditionalDetrImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["modeling_conditional_detr"] = [ "CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST", "ConditionalDetrForObjectDetection", "ConditionalDetrForSegmentation", "ConditionalDetrModel", "ConditionalDetrPreTrainedModel", ] if TYPE_CHECKING: from .configuration_conditional_detr import ( CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP, ConditionalDetrConfig, ConditionalDetrOnnxConfig, ) try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor from .image_processing_conditional_detr import ConditionalDetrImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_conditional_detr import ( CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST, ConditionalDetrForObjectDetection, ConditionalDetrForSegmentation, ConditionalDetrModel, ConditionalDetrPreTrainedModel, ) else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
0
hf_public_repos/transformers/src/transformers/models
hf_public_repos/transformers/src/transformers/models/conditional_detr/image_processing_conditional_detr.py
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Image processor class for Conditional DETR.""" import io import pathlib from collections import defaultdict from typing import Any, Callable, Dict, Iterable, List, Optional, Set, Tuple, Union import numpy as np from ...feature_extraction_utils import BatchFeature from ...image_processing_utils import BaseImageProcessor, get_size_dict from ...image_transforms import ( PaddingMode, center_to_corners_format, corners_to_center_format, id_to_rgb, pad, rescale, resize, rgb_to_id, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, AnnotationFormat, AnnotationType, ChannelDimension, ImageInput, PILImageResampling, get_image_size, infer_channel_dimension_format, is_scaled_image, make_list_of_images, to_numpy_array, valid_images, validate_annotations, ) from ...utils import ( TensorType, is_flax_available, is_jax_tensor, is_scipy_available, is_tf_available, is_tf_tensor, is_torch_available, is_torch_tensor, is_vision_available, logging, ) if is_torch_available(): import torch from torch import nn if is_vision_available(): import PIL if is_scipy_available(): import scipy.special import scipy.stats logger = logging.get_logger(__name__) # pylint: disable=invalid-name SUPPORTED_ANNOTATION_FORMATS = (AnnotationFormat.COCO_DETECTION, AnnotationFormat.COCO_PANOPTIC) # Copied from transformers.models.detr.image_processing_detr.get_size_with_aspect_ratio def get_size_with_aspect_ratio(image_size, size, max_size=None) -> Tuple[int, int]: """ Computes the output image size given the input image size and the desired output size. Args: image_size (`Tuple[int, int]`): The input image size. size (`int`): The desired output size. max_size (`int`, *optional*): The maximum allowed output size. """ height, width = image_size if max_size is not None: min_original_size = float(min((height, width))) max_original_size = float(max((height, width))) if max_original_size / min_original_size * size > max_size: size = int(round(max_size * min_original_size / max_original_size)) if (height <= width and height == size) or (width <= height and width == size): return height, width if width < height: ow = size oh = int(size * height / width) else: oh = size ow = int(size * width / height) return (oh, ow) # Copied from transformers.models.detr.image_processing_detr.get_resize_output_image_size def get_resize_output_image_size( input_image: np.ndarray, size: Union[int, Tuple[int, int], List[int]], max_size: Optional[int] = None, input_data_format: Optional[Union[str, ChannelDimension]] = None, ) -> Tuple[int, int]: """ Computes the output image size given the input image size and the desired output size. If the desired output size is a tuple or list, the output image size is returned as is. If the desired output size is an integer, the output image size is computed by keeping the aspect ratio of the input image size. Args: input_image (`np.ndarray`): The image to resize. size (`int` or `Tuple[int, int]` or `List[int]`): The desired output size. max_size (`int`, *optional*): The maximum allowed output size. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format of the input image. If not provided, it will be inferred from the input image. """ image_size = get_image_size(input_image, input_data_format) if isinstance(size, (list, tuple)): return size return get_size_with_aspect_ratio(image_size, size, max_size) # Copied from transformers.models.detr.image_processing_detr.get_numpy_to_framework_fn def get_numpy_to_framework_fn(arr) -> Callable: """ Returns a function that converts a numpy array to the framework of the input array. Args: arr (`np.ndarray`): The array to convert. """ if isinstance(arr, np.ndarray): return np.array if is_tf_available() and is_tf_tensor(arr): import tensorflow as tf return tf.convert_to_tensor if is_torch_available() and is_torch_tensor(arr): import torch return torch.tensor if is_flax_available() and is_jax_tensor(arr): import jax.numpy as jnp return jnp.array raise ValueError(f"Cannot convert arrays of type {type(arr)}") # Copied from transformers.models.detr.image_processing_detr.safe_squeeze def safe_squeeze(arr: np.ndarray, axis: Optional[int] = None) -> np.ndarray: """ Squeezes an array, but only if the axis specified has dim 1. """ if axis is None: return arr.squeeze() try: return arr.squeeze(axis=axis) except ValueError: return arr # Copied from transformers.models.detr.image_processing_detr.normalize_annotation def normalize_annotation(annotation: Dict, image_size: Tuple[int, int]) -> Dict: image_height, image_width = image_size norm_annotation = {} for key, value in annotation.items(): if key == "boxes": boxes = value boxes = corners_to_center_format(boxes) boxes /= np.asarray([image_width, image_height, image_width, image_height], dtype=np.float32) norm_annotation[key] = boxes else: norm_annotation[key] = value return norm_annotation # Copied from transformers.models.detr.image_processing_detr.max_across_indices def max_across_indices(values: Iterable[Any]) -> List[Any]: """ Return the maximum value across all indices of an iterable of values. """ return [max(values_i) for values_i in zip(*values)] # Copied from transformers.models.detr.image_processing_detr.get_max_height_width def get_max_height_width( images: List[np.ndarray], input_data_format: Optional[Union[str, ChannelDimension]] = None ) -> List[int]: """ Get the maximum height and width across all images in a batch. """ if input_data_format is None: input_data_format = infer_channel_dimension_format(images[0]) if input_data_format == ChannelDimension.FIRST: _, max_height, max_width = max_across_indices([img.shape for img in images]) elif input_data_format == ChannelDimension.LAST: max_height, max_width, _ = max_across_indices([img.shape for img in images]) else: raise ValueError(f"Invalid channel dimension format: {input_data_format}") return (max_height, max_width) # Copied from transformers.models.detr.image_processing_detr.make_pixel_mask def make_pixel_mask( image: np.ndarray, output_size: Tuple[int, int], input_data_format: Optional[Union[str, ChannelDimension]] = None ) -> np.ndarray: """ Make a pixel mask for the image, where 1 indicates a valid pixel and 0 indicates padding. Args: image (`np.ndarray`): Image to make the pixel mask for. output_size (`Tuple[int, int]`): Output size of the mask. """ input_height, input_width = get_image_size(image, channel_dim=input_data_format) mask = np.zeros(output_size, dtype=np.int64) mask[:input_height, :input_width] = 1 return mask # Copied from transformers.models.detr.image_processing_detr.convert_coco_poly_to_mask def convert_coco_poly_to_mask(segmentations, height: int, width: int) -> np.ndarray: """ Convert a COCO polygon annotation to a mask. Args: segmentations (`List[List[float]]`): List of polygons, each polygon represented by a list of x-y coordinates. height (`int`): Height of the mask. width (`int`): Width of the mask. """ try: from pycocotools import mask as coco_mask except ImportError: raise ImportError("Pycocotools is not installed in your environment.") masks = [] for polygons in segmentations: rles = coco_mask.frPyObjects(polygons, height, width) mask = coco_mask.decode(rles) if len(mask.shape) < 3: mask = mask[..., None] mask = np.asarray(mask, dtype=np.uint8) mask = np.any(mask, axis=2) masks.append(mask) if masks: masks = np.stack(masks, axis=0) else: masks = np.zeros((0, height, width), dtype=np.uint8) return masks # Copied from transformers.models.detr.image_processing_detr.prepare_coco_detection_annotation with DETR->ConditionalDetr def prepare_coco_detection_annotation( image, target, return_segmentation_masks: bool = False, input_data_format: Optional[Union[ChannelDimension, str]] = None, ): """ Convert the target in COCO format into the format expected by ConditionalDetr. """ image_height, image_width = get_image_size(image, channel_dim=input_data_format) image_id = target["image_id"] image_id = np.asarray([image_id], dtype=np.int64) # Get all COCO annotations for the given image. annotations = target["annotations"] annotations = [obj for obj in annotations if "iscrowd" not in obj or obj["iscrowd"] == 0] classes = [obj["category_id"] for obj in annotations] classes = np.asarray(classes, dtype=np.int64) # for conversion to coco api area = np.asarray([obj["area"] for obj in annotations], dtype=np.float32) iscrowd = np.asarray([obj["iscrowd"] if "iscrowd" in obj else 0 for obj in annotations], dtype=np.int64) boxes = [obj["bbox"] for obj in annotations] # guard against no boxes via resizing boxes = np.asarray(boxes, dtype=np.float32).reshape(-1, 4) boxes[:, 2:] += boxes[:, :2] boxes[:, 0::2] = boxes[:, 0::2].clip(min=0, max=image_width) boxes[:, 1::2] = boxes[:, 1::2].clip(min=0, max=image_height) keep = (boxes[:, 3] > boxes[:, 1]) & (boxes[:, 2] > boxes[:, 0]) new_target = {} new_target["image_id"] = image_id new_target["class_labels"] = classes[keep] new_target["boxes"] = boxes[keep] new_target["area"] = area[keep] new_target["iscrowd"] = iscrowd[keep] new_target["orig_size"] = np.asarray([int(image_height), int(image_width)], dtype=np.int64) if annotations and "keypoints" in annotations[0]: keypoints = [obj["keypoints"] for obj in annotations] # Converting the filtered keypoints list to a numpy array keypoints = np.asarray(keypoints, dtype=np.float32) # Apply the keep mask here to filter the relevant annotations keypoints = keypoints[keep] num_keypoints = keypoints.shape[0] keypoints = keypoints.reshape((-1, 3)) if num_keypoints else keypoints new_target["keypoints"] = keypoints if return_segmentation_masks: segmentation_masks = [obj["segmentation"] for obj in annotations] masks = convert_coco_poly_to_mask(segmentation_masks, image_height, image_width) new_target["masks"] = masks[keep] return new_target # Copied from transformers.models.detr.image_processing_detr.masks_to_boxes def masks_to_boxes(masks: np.ndarray) -> np.ndarray: """ Compute the bounding boxes around the provided panoptic segmentation masks. Args: masks: masks in format `[number_masks, height, width]` where N is the number of masks Returns: boxes: bounding boxes in format `[number_masks, 4]` in xyxy format """ if masks.size == 0: return np.zeros((0, 4)) h, w = masks.shape[-2:] y = np.arange(0, h, dtype=np.float32) x = np.arange(0, w, dtype=np.float32) # see https://github.com/pytorch/pytorch/issues/50276 y, x = np.meshgrid(y, x, indexing="ij") x_mask = masks * np.expand_dims(x, axis=0) x_max = x_mask.reshape(x_mask.shape[0], -1).max(-1) x = np.ma.array(x_mask, mask=~(np.array(masks, dtype=bool))) x_min = x.filled(fill_value=1e8) x_min = x_min.reshape(x_min.shape[0], -1).min(-1) y_mask = masks * np.expand_dims(y, axis=0) y_max = y_mask.reshape(x_mask.shape[0], -1).max(-1) y = np.ma.array(y_mask, mask=~(np.array(masks, dtype=bool))) y_min = y.filled(fill_value=1e8) y_min = y_min.reshape(y_min.shape[0], -1).min(-1) return np.stack([x_min, y_min, x_max, y_max], 1) # Copied from transformers.models.detr.image_processing_detr.prepare_coco_panoptic_annotation with DETR->ConditionalDetr def prepare_coco_panoptic_annotation( image: np.ndarray, target: Dict, masks_path: Union[str, pathlib.Path], return_masks: bool = True, input_data_format: Union[ChannelDimension, str] = None, ) -> Dict: """ Prepare a coco panoptic annotation for ConditionalDetr. """ image_height, image_width = get_image_size(image, channel_dim=input_data_format) annotation_path = pathlib.Path(masks_path) / target["file_name"] new_target = {} new_target["image_id"] = np.asarray([target["image_id"] if "image_id" in target else target["id"]], dtype=np.int64) new_target["size"] = np.asarray([image_height, image_width], dtype=np.int64) new_target["orig_size"] = np.asarray([image_height, image_width], dtype=np.int64) if "segments_info" in target: masks = np.asarray(PIL.Image.open(annotation_path), dtype=np.uint32) masks = rgb_to_id(masks) ids = np.array([segment_info["id"] for segment_info in target["segments_info"]]) masks = masks == ids[:, None, None] masks = masks.astype(np.uint8) if return_masks: new_target["masks"] = masks new_target["boxes"] = masks_to_boxes(masks) new_target["class_labels"] = np.array( [segment_info["category_id"] for segment_info in target["segments_info"]], dtype=np.int64 ) new_target["iscrowd"] = np.asarray( [segment_info["iscrowd"] for segment_info in target["segments_info"]], dtype=np.int64 ) new_target["area"] = np.asarray( [segment_info["area"] for segment_info in target["segments_info"]], dtype=np.float32 ) return new_target # Copied from transformers.models.detr.image_processing_detr.get_segmentation_image def get_segmentation_image( masks: np.ndarray, input_size: Tuple, target_size: Tuple, stuff_equiv_classes, deduplicate=False ): h, w = input_size final_h, final_w = target_size m_id = scipy.special.softmax(masks.transpose(0, 1), -1) if m_id.shape[-1] == 0: # We didn't detect any mask :( m_id = np.zeros((h, w), dtype=np.int64) else: m_id = m_id.argmax(-1).reshape(h, w) if deduplicate: # Merge the masks corresponding to the same stuff class for equiv in stuff_equiv_classes.values(): for eq_id in equiv: m_id[m_id == eq_id] = equiv[0] seg_img = id_to_rgb(m_id) seg_img = resize(seg_img, (final_w, final_h), resample=PILImageResampling.NEAREST) return seg_img # Copied from transformers.models.detr.image_processing_detr.get_mask_area def get_mask_area(seg_img: np.ndarray, target_size: Tuple[int, int], n_classes: int) -> np.ndarray: final_h, final_w = target_size np_seg_img = seg_img.astype(np.uint8) np_seg_img = np_seg_img.reshape(final_h, final_w, 3) m_id = rgb_to_id(np_seg_img) area = [(m_id == i).sum() for i in range(n_classes)] return area # Copied from transformers.models.detr.image_processing_detr.score_labels_from_class_probabilities def score_labels_from_class_probabilities(logits: np.ndarray) -> Tuple[np.ndarray, np.ndarray]: probs = scipy.special.softmax(logits, axis=-1) labels = probs.argmax(-1, keepdims=True) scores = np.take_along_axis(probs, labels, axis=-1) scores, labels = scores.squeeze(-1), labels.squeeze(-1) return scores, labels # Copied from transformers.models.detr.image_processing_detr.post_process_panoptic_sample with DetrForSegmentation->ConditionalDetrForSegmentation def post_process_panoptic_sample( out_logits: np.ndarray, masks: np.ndarray, boxes: np.ndarray, processed_size: Tuple[int, int], target_size: Tuple[int, int], is_thing_map: Dict, threshold=0.85, ) -> Dict: """ Converts the output of [`ConditionalDetrForSegmentation`] into panoptic segmentation predictions for a single sample. Args: out_logits (`torch.Tensor`): The logits for this sample. masks (`torch.Tensor`): The predicted segmentation masks for this sample. boxes (`torch.Tensor`): The prediced bounding boxes for this sample. The boxes are in the normalized format `(center_x, center_y, width, height)` and values between `[0, 1]`, relative to the size the image (disregarding padding). processed_size (`Tuple[int, int]`): The processed size of the image `(height, width)`, as returned by the preprocessing step i.e. the size after data augmentation but before batching. target_size (`Tuple[int, int]`): The target size of the image, `(height, width)` corresponding to the requested final size of the prediction. is_thing_map (`Dict`): A dictionary mapping class indices to a boolean value indicating whether the class is a thing or not. threshold (`float`, *optional*, defaults to 0.85): The threshold used to binarize the segmentation masks. """ # we filter empty queries and detection below threshold scores, labels = score_labels_from_class_probabilities(out_logits) keep = (labels != out_logits.shape[-1] - 1) & (scores > threshold) cur_scores = scores[keep] cur_classes = labels[keep] cur_boxes = center_to_corners_format(boxes[keep]) if len(cur_boxes) != len(cur_classes): raise ValueError("Not as many boxes as there are classes") cur_masks = masks[keep] cur_masks = resize(cur_masks[:, None], processed_size, resample=PILImageResampling.BILINEAR) cur_masks = safe_squeeze(cur_masks, 1) b, h, w = cur_masks.shape # It may be that we have several predicted masks for the same stuff class. # In the following, we track the list of masks ids for each stuff class (they are merged later on) cur_masks = cur_masks.reshape(b, -1) stuff_equiv_classes = defaultdict(list) for k, label in enumerate(cur_classes): if not is_thing_map[label]: stuff_equiv_classes[label].append(k) seg_img = get_segmentation_image(cur_masks, processed_size, target_size, stuff_equiv_classes, deduplicate=True) area = get_mask_area(cur_masks, processed_size, n_classes=len(cur_scores)) # We filter out any mask that is too small if cur_classes.size() > 0: # We know filter empty masks as long as we find some filtered_small = np.array([a <= 4 for a in area], dtype=bool) while filtered_small.any(): cur_masks = cur_masks[~filtered_small] cur_scores = cur_scores[~filtered_small] cur_classes = cur_classes[~filtered_small] seg_img = get_segmentation_image(cur_masks, (h, w), target_size, stuff_equiv_classes, deduplicate=True) area = get_mask_area(seg_img, target_size, n_classes=len(cur_scores)) filtered_small = np.array([a <= 4 for a in area], dtype=bool) else: cur_classes = np.ones((1, 1), dtype=np.int64) segments_info = [ {"id": i, "isthing": is_thing_map[cat], "category_id": int(cat), "area": a} for i, (cat, a) in enumerate(zip(cur_classes, area)) ] del cur_classes with io.BytesIO() as out: PIL.Image.fromarray(seg_img).save(out, format="PNG") predictions = {"png_string": out.getvalue(), "segments_info": segments_info} return predictions # Copied from transformers.models.detr.image_processing_detr.resize_annotation def resize_annotation( annotation: Dict[str, Any], orig_size: Tuple[int, int], target_size: Tuple[int, int], threshold: float = 0.5, resample: PILImageResampling = PILImageResampling.NEAREST, ): """ Resizes an annotation to a target size. Args: annotation (`Dict[str, Any]`): The annotation dictionary. orig_size (`Tuple[int, int]`): The original size of the input image. target_size (`Tuple[int, int]`): The target size of the image, as returned by the preprocessing `resize` step. threshold (`float`, *optional*, defaults to 0.5): The threshold used to binarize the segmentation masks. resample (`PILImageResampling`, defaults to `PILImageResampling.NEAREST`): The resampling filter to use when resizing the masks. """ ratios = tuple(float(s) / float(s_orig) for s, s_orig in zip(target_size, orig_size)) ratio_height, ratio_width = ratios new_annotation = {} new_annotation["size"] = target_size for key, value in annotation.items(): if key == "boxes": boxes = value scaled_boxes = boxes * np.asarray([ratio_width, ratio_height, ratio_width, ratio_height], dtype=np.float32) new_annotation["boxes"] = scaled_boxes elif key == "area": area = value scaled_area = area * (ratio_width * ratio_height) new_annotation["area"] = scaled_area elif key == "masks": masks = value[:, None] masks = np.array([resize(mask, target_size, resample=resample) for mask in masks]) masks = masks.astype(np.float32) masks = masks[:, 0] > threshold new_annotation["masks"] = masks elif key == "size": new_annotation["size"] = target_size else: new_annotation[key] = value return new_annotation # Copied from transformers.models.detr.image_processing_detr.binary_mask_to_rle def binary_mask_to_rle(mask): """ Converts given binary mask of shape `(height, width)` to the run-length encoding (RLE) format. Args: mask (`torch.Tensor` or `numpy.array`): A binary mask tensor of shape `(height, width)` where 0 denotes background and 1 denotes the target segment_id or class_id. Returns: `List`: Run-length encoded list of the binary mask. Refer to COCO API for more information about the RLE format. """ if is_torch_tensor(mask): mask = mask.numpy() pixels = mask.flatten() pixels = np.concatenate([[0], pixels, [0]]) runs = np.where(pixels[1:] != pixels[:-1])[0] + 1 runs[1::2] -= runs[::2] return list(runs) # Copied from transformers.models.detr.image_processing_detr.convert_segmentation_to_rle def convert_segmentation_to_rle(segmentation): """ Converts given segmentation map of shape `(height, width)` to the run-length encoding (RLE) format. Args: segmentation (`torch.Tensor` or `numpy.array`): A segmentation map of shape `(height, width)` where each value denotes a segment or class id. Returns: `List[List]`: A list of lists, where each list is the run-length encoding of a segment / class id. """ segment_ids = torch.unique(segmentation) run_length_encodings = [] for idx in segment_ids: mask = torch.where(segmentation == idx, 1, 0) rle = binary_mask_to_rle(mask) run_length_encodings.append(rle) return run_length_encodings # Copied from transformers.models.detr.image_processing_detr.remove_low_and_no_objects def remove_low_and_no_objects(masks, scores, labels, object_mask_threshold, num_labels): """ Binarize the given masks using `object_mask_threshold`, it returns the associated values of `masks`, `scores` and `labels`. Args: masks (`torch.Tensor`): A tensor of shape `(num_queries, height, width)`. scores (`torch.Tensor`): A tensor of shape `(num_queries)`. labels (`torch.Tensor`): A tensor of shape `(num_queries)`. object_mask_threshold (`float`): A number between 0 and 1 used to binarize the masks. Raises: `ValueError`: Raised when the first dimension doesn't match in all input tensors. Returns: `Tuple[`torch.Tensor`, `torch.Tensor`, `torch.Tensor`]`: The `masks`, `scores` and `labels` without the region < `object_mask_threshold`. """ if not (masks.shape[0] == scores.shape[0] == labels.shape[0]): raise ValueError("mask, scores and labels must have the same shape!") to_keep = labels.ne(num_labels) & (scores > object_mask_threshold) return masks[to_keep], scores[to_keep], labels[to_keep] # Copied from transformers.models.detr.image_processing_detr.check_segment_validity def check_segment_validity(mask_labels, mask_probs, k, mask_threshold=0.5, overlap_mask_area_threshold=0.8): # Get the mask associated with the k class mask_k = mask_labels == k mask_k_area = mask_k.sum() # Compute the area of all the stuff in query k original_area = (mask_probs[k] >= mask_threshold).sum() mask_exists = mask_k_area > 0 and original_area > 0 # Eliminate disconnected tiny segments if mask_exists: area_ratio = mask_k_area / original_area if not area_ratio.item() > overlap_mask_area_threshold: mask_exists = False return mask_exists, mask_k # Copied from transformers.models.detr.image_processing_detr.compute_segments def compute_segments( mask_probs, pred_scores, pred_labels, mask_threshold: float = 0.5, overlap_mask_area_threshold: float = 0.8, label_ids_to_fuse: Optional[Set[int]] = None, target_size: Tuple[int, int] = None, ): height = mask_probs.shape[1] if target_size is None else target_size[0] width = mask_probs.shape[2] if target_size is None else target_size[1] segmentation = torch.zeros((height, width), dtype=torch.int32, device=mask_probs.device) segments: List[Dict] = [] if target_size is not None: mask_probs = nn.functional.interpolate( mask_probs.unsqueeze(0), size=target_size, mode="bilinear", align_corners=False )[0] current_segment_id = 0 # Weigh each mask by its prediction score mask_probs *= pred_scores.view(-1, 1, 1) mask_labels = mask_probs.argmax(0) # [height, width] # Keep track of instances of each class stuff_memory_list: Dict[str, int] = {} for k in range(pred_labels.shape[0]): pred_class = pred_labels[k].item() should_fuse = pred_class in label_ids_to_fuse # Check if mask exists and large enough to be a segment mask_exists, mask_k = check_segment_validity( mask_labels, mask_probs, k, mask_threshold, overlap_mask_area_threshold ) if mask_exists: if pred_class in stuff_memory_list: current_segment_id = stuff_memory_list[pred_class] else: current_segment_id += 1 # Add current object segment to final segmentation map segmentation[mask_k] = current_segment_id segment_score = round(pred_scores[k].item(), 6) segments.append( { "id": current_segment_id, "label_id": pred_class, "was_fused": should_fuse, "score": segment_score, } ) if should_fuse: stuff_memory_list[pred_class] = current_segment_id return segmentation, segments class ConditionalDetrImageProcessor(BaseImageProcessor): r""" Constructs a Conditional Detr image processor. Args: format (`str`, *optional*, defaults to `"coco_detection"`): Data format of the annotations. One of "coco_detection" or "coco_panoptic". do_resize (`bool`, *optional*, defaults to `True`): Controls whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden by the `do_resize` parameter in the `preprocess` method. size (`Dict[str, int]` *optional*, defaults to `{"shortest_edge": 800, "longest_edge": 1333}`): Size of the image's (height, width) dimensions after resizing. Can be overridden by the `size` parameter in the `preprocess` method. resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BILINEAR`): Resampling filter to use if resizing the image. do_rescale (`bool`, *optional*, defaults to `True`): Controls whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by the `do_rescale` parameter in the `preprocess` method. rescale_factor (`int` or `float`, *optional*, defaults to `1/255`): Scale factor to use if rescaling the image. Can be overridden by the `rescale_factor` parameter in the `preprocess` method. do_normalize: Controls whether to normalize the image. Can be overridden by the `do_normalize` parameter in the `preprocess` method. image_mean (`float` or `List[float]`, *optional*, defaults to `IMAGENET_DEFAULT_MEAN`): Mean values to use when normalizing the image. Can be a single value or a list of values, one for each channel. Can be overridden by the `image_mean` parameter in the `preprocess` method. image_std (`float` or `List[float]`, *optional*, defaults to `IMAGENET_DEFAULT_STD`): Standard deviation values to use when normalizing the image. Can be a single value or a list of values, one for each channel. Can be overridden by the `image_std` parameter in the `preprocess` method. do_pad (`bool`, *optional*, defaults to `True`): Controls whether to pad the image to the largest image in a batch and create a pixel mask. Can be overridden by the `do_pad` parameter in the `preprocess` method. """ model_input_names = ["pixel_values", "pixel_mask"] # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.__init__ def __init__( self, format: Union[str, AnnotationFormat] = AnnotationFormat.COCO_DETECTION, do_resize: bool = True, size: Dict[str, int] = None, resample: PILImageResampling = PILImageResampling.BILINEAR, do_rescale: bool = True, rescale_factor: Union[int, float] = 1 / 255, do_normalize: bool = True, image_mean: Union[float, List[float]] = None, image_std: Union[float, List[float]] = None, do_pad: bool = True, **kwargs, ) -> None: if "pad_and_return_pixel_mask" in kwargs: do_pad = kwargs.pop("pad_and_return_pixel_mask") if "max_size" in kwargs: logger.warning_once( "The `max_size` parameter is deprecated and will be removed in v4.26. " "Please specify in `size['longest_edge'] instead`.", ) max_size = kwargs.pop("max_size") else: max_size = None if size is None else 1333 size = size if size is not None else {"shortest_edge": 800, "longest_edge": 1333} size = get_size_dict(size, max_size=max_size, default_to_square=False) super().__init__(**kwargs) self.format = format self.do_resize = do_resize self.size = size self.resample = resample self.do_rescale = do_rescale self.rescale_factor = rescale_factor self.do_normalize = do_normalize self.image_mean = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN self.image_std = image_std if image_std is not None else IMAGENET_DEFAULT_STD self.do_pad = do_pad @classmethod # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.from_dict with Detr->ConditionalDetr def from_dict(cls, image_processor_dict: Dict[str, Any], **kwargs): """ Overrides the `from_dict` method from the base class to make sure parameters are updated if image processor is created using from_dict and kwargs e.g. `ConditionalDetrImageProcessor.from_pretrained(checkpoint, size=600, max_size=800)` """ image_processor_dict = image_processor_dict.copy() if "max_size" in kwargs: image_processor_dict["max_size"] = kwargs.pop("max_size") if "pad_and_return_pixel_mask" in kwargs: image_processor_dict["pad_and_return_pixel_mask"] = kwargs.pop("pad_and_return_pixel_mask") return super().from_dict(image_processor_dict, **kwargs) # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.prepare_annotation with DETR->ConditionalDetr def prepare_annotation( self, image: np.ndarray, target: Dict, format: Optional[AnnotationFormat] = None, return_segmentation_masks: bool = None, masks_path: Optional[Union[str, pathlib.Path]] = None, input_data_format: Optional[Union[str, ChannelDimension]] = None, ) -> Dict: """ Prepare an annotation for feeding into ConditionalDetr model. """ format = format if format is not None else self.format if format == AnnotationFormat.COCO_DETECTION: return_segmentation_masks = False if return_segmentation_masks is None else return_segmentation_masks target = prepare_coco_detection_annotation( image, target, return_segmentation_masks, input_data_format=input_data_format ) elif format == AnnotationFormat.COCO_PANOPTIC: return_segmentation_masks = True if return_segmentation_masks is None else return_segmentation_masks target = prepare_coco_panoptic_annotation( image, target, masks_path=masks_path, return_masks=return_segmentation_masks, input_data_format=input_data_format, ) else: raise ValueError(f"Format {format} is not supported.") return target # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.prepare def prepare(self, image, target, return_segmentation_masks=None, masks_path=None): logger.warning_once( "The `prepare` method is deprecated and will be removed in a v4.33. " "Please use `prepare_annotation` instead. Note: the `prepare_annotation` method " "does not return the image anymore.", ) target = self.prepare_annotation(image, target, return_segmentation_masks, masks_path, self.format) return image, target # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.convert_coco_poly_to_mask def convert_coco_poly_to_mask(self, *args, **kwargs): logger.warning_once("The `convert_coco_poly_to_mask` method is deprecated and will be removed in v4.33. ") return convert_coco_poly_to_mask(*args, **kwargs) # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.prepare_coco_detection with DETR->ConditionalDetr def prepare_coco_detection(self, *args, **kwargs): logger.warning_once("The `prepare_coco_detection` method is deprecated and will be removed in v4.33. ") return prepare_coco_detection_annotation(*args, **kwargs) # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.prepare_coco_panoptic def prepare_coco_panoptic(self, *args, **kwargs): logger.warning_once("The `prepare_coco_panoptic` method is deprecated and will be removed in v4.33. ") return prepare_coco_panoptic_annotation(*args, **kwargs) # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.resize def resize( self, image: np.ndarray, size: Dict[str, int], resample: PILImageResampling = PILImageResampling.BILINEAR, data_format: Optional[ChannelDimension] = None, input_data_format: Optional[Union[str, ChannelDimension]] = None, **kwargs, ) -> np.ndarray: """ Resize the image to the given size. Size can be `min_size` (scalar) or `(height, width)` tuple. If size is an int, smaller edge of the image will be matched to this number. Args: image (`np.ndarray`): Image to resize. size (`Dict[str, int]`): Dictionary containing the size to resize to. Can contain the keys `shortest_edge` and `longest_edge` or `height` and `width`. resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BILINEAR`): Resampling filter to use if resizing the image. data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format for the output image. If unset, the channel dimension format of the input image is used. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format of the input image. If not provided, it will be inferred. """ if "max_size" in kwargs: logger.warning_once( "The `max_size` parameter is deprecated and will be removed in v4.26. " "Please specify in `size['longest_edge'] instead`.", ) max_size = kwargs.pop("max_size") else: max_size = None size = get_size_dict(size, max_size=max_size, default_to_square=False) if "shortest_edge" in size and "longest_edge" in size: size = get_resize_output_image_size( image, size["shortest_edge"], size["longest_edge"], input_data_format=input_data_format ) elif "height" in size and "width" in size: size = (size["height"], size["width"]) else: raise ValueError( "Size must contain 'height' and 'width' keys or 'shortest_edge' and 'longest_edge' keys. Got" f" {size.keys()}." ) image = resize( image, size=size, resample=resample, data_format=data_format, input_data_format=input_data_format, **kwargs ) return image # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.resize_annotation def resize_annotation( self, annotation, orig_size, size, resample: PILImageResampling = PILImageResampling.NEAREST, ) -> Dict: """ Resize the annotation to match the resized image. If size is an int, smaller edge of the mask will be matched to this number. """ return resize_annotation(annotation, orig_size=orig_size, target_size=size, resample=resample) # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.rescale def rescale( self, image: np.ndarray, rescale_factor: float, data_format: Optional[Union[str, ChannelDimension]] = None, input_data_format: Optional[Union[str, ChannelDimension]] = None, ) -> np.ndarray: """ Rescale the image by the given factor. image = image * rescale_factor. Args: image (`np.ndarray`): Image to rescale. rescale_factor (`float`): The value to use for rescaling. data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format for the output image. If unset, the channel dimension format of the input image is used. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. input_data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format for the input image. If unset, is inferred from the input image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. """ return rescale(image, rescale_factor, data_format=data_format, input_data_format=input_data_format) # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.normalize_annotation def normalize_annotation(self, annotation: Dict, image_size: Tuple[int, int]) -> Dict: """ Normalize the boxes in the annotation from `[top_left_x, top_left_y, bottom_right_x, bottom_right_y]` to `[center_x, center_y, width, height]` format. """ return normalize_annotation(annotation, image_size=image_size) # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor._pad_image def _pad_image( self, image: np.ndarray, output_size: Tuple[int, int], constant_values: Union[float, Iterable[float]] = 0, data_format: Optional[ChannelDimension] = None, input_data_format: Optional[Union[str, ChannelDimension]] = None, ) -> np.ndarray: """ Pad an image with zeros to the given size. """ input_height, input_width = get_image_size(image, channel_dim=input_data_format) output_height, output_width = output_size pad_bottom = output_height - input_height pad_right = output_width - input_width padding = ((0, pad_bottom), (0, pad_right)) padded_image = pad( image, padding, mode=PaddingMode.CONSTANT, constant_values=constant_values, data_format=data_format, input_data_format=input_data_format, ) return padded_image # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.pad def pad( self, images: List[np.ndarray], constant_values: Union[float, Iterable[float]] = 0, return_pixel_mask: bool = True, return_tensors: Optional[Union[str, TensorType]] = None, data_format: Optional[ChannelDimension] = None, input_data_format: Optional[Union[str, ChannelDimension]] = None, ) -> BatchFeature: """ Pads a batch of images to the bottom and right of the image with zeros to the size of largest height and width in the batch and optionally returns their corresponding pixel mask. Args: image (`np.ndarray`): Image to pad. constant_values (`float` or `Iterable[float]`, *optional*): The value to use for the padding if `mode` is `"constant"`. return_pixel_mask (`bool`, *optional*, defaults to `True`): Whether to return a pixel mask. return_tensors (`str` or `TensorType`, *optional*): The type of tensors to return. Can be one of: - Unset: Return a list of `np.ndarray`. - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`. - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`. - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`. - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`. data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format of the image. If not provided, it will be the same as the input image. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format of the input image. If not provided, it will be inferred. """ pad_size = get_max_height_width(images, input_data_format=input_data_format) padded_images = [ self._pad_image( image, pad_size, constant_values=constant_values, data_format=data_format, input_data_format=input_data_format, ) for image in images ] data = {"pixel_values": padded_images} if return_pixel_mask: masks = [ make_pixel_mask(image=image, output_size=pad_size, input_data_format=input_data_format) for image in images ] data["pixel_mask"] = masks return BatchFeature(data=data, tensor_type=return_tensors) # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.preprocess def preprocess( self, images: ImageInput, annotations: Optional[Union[AnnotationType, List[AnnotationType]]] = None, return_segmentation_masks: bool = None, masks_path: Optional[Union[str, pathlib.Path]] = None, do_resize: Optional[bool] = None, size: Optional[Dict[str, int]] = None, resample=None, # PILImageResampling do_rescale: Optional[bool] = None, rescale_factor: Optional[Union[int, float]] = None, do_normalize: Optional[bool] = None, image_mean: Optional[Union[float, List[float]]] = None, image_std: Optional[Union[float, List[float]]] = None, do_pad: Optional[bool] = None, format: Optional[Union[str, AnnotationFormat]] = None, return_tensors: Optional[Union[TensorType, str]] = None, data_format: Union[str, ChannelDimension] = ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]] = None, **kwargs, ) -> BatchFeature: """ Preprocess an image or a batch of images so that it can be used by the model. Args: images (`ImageInput`): Image or batch of images to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If passing in images with pixel values between 0 and 1, set `do_rescale=False`. annotations (`AnnotationType` or `List[AnnotationType]`, *optional*): List of annotations associated with the image or batch of images. If annotation is for object detection, the annotations should be a dictionary with the following keys: - "image_id" (`int`): The image id. - "annotations" (`List[Dict]`): List of annotations for an image. Each annotation should be a dictionary. An image can have no annotations, in which case the list should be empty. If annotation is for segmentation, the annotations should be a dictionary with the following keys: - "image_id" (`int`): The image id. - "segments_info" (`List[Dict]`): List of segments for an image. Each segment should be a dictionary. An image can have no segments, in which case the list should be empty. - "file_name" (`str`): The file name of the image. return_segmentation_masks (`bool`, *optional*, defaults to self.return_segmentation_masks): Whether to return segmentation masks. masks_path (`str` or `pathlib.Path`, *optional*): Path to the directory containing the segmentation masks. do_resize (`bool`, *optional*, defaults to self.do_resize): Whether to resize the image. size (`Dict[str, int]`, *optional*, defaults to self.size): Size of the image after resizing. resample (`PILImageResampling`, *optional*, defaults to self.resample): Resampling filter to use when resizing the image. do_rescale (`bool`, *optional*, defaults to self.do_rescale): Whether to rescale the image. rescale_factor (`float`, *optional*, defaults to self.rescale_factor): Rescale factor to use when rescaling the image. do_normalize (`bool`, *optional*, defaults to self.do_normalize): Whether to normalize the image. image_mean (`float` or `List[float]`, *optional*, defaults to self.image_mean): Mean to use when normalizing the image. image_std (`float` or `List[float]`, *optional*, defaults to self.image_std): Standard deviation to use when normalizing the image. do_pad (`bool`, *optional*, defaults to self.do_pad): Whether to pad the image. format (`str` or `AnnotationFormat`, *optional*, defaults to self.format): Format of the annotations. return_tensors (`str` or `TensorType`, *optional*, defaults to self.return_tensors): Type of tensors to return. If `None`, will return the list of images. data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`): The channel dimension format for the output image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - Unset: Use the channel dimension format of the input image. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format for the input image. If unset, the channel dimension format is inferred from the input image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. """ if "pad_and_return_pixel_mask" in kwargs: logger.warning_once( "The `pad_and_return_pixel_mask` argument is deprecated and will be removed in a future version, " "use `do_pad` instead." ) do_pad = kwargs.pop("pad_and_return_pixel_mask") max_size = None if "max_size" in kwargs: logger.warning_once( "The `max_size` argument is deprecated and will be removed in a future version, use" " `size['longest_edge']` instead." ) size = kwargs.pop("max_size") do_resize = self.do_resize if do_resize is None else do_resize size = self.size if size is None else size size = get_size_dict(size=size, max_size=max_size, default_to_square=False) resample = self.resample if resample is None else resample do_rescale = self.do_rescale if do_rescale is None else do_rescale rescale_factor = self.rescale_factor if rescale_factor is None else rescale_factor do_normalize = self.do_normalize if do_normalize is None else do_normalize image_mean = self.image_mean if image_mean is None else image_mean image_std = self.image_std if image_std is None else image_std do_pad = self.do_pad if do_pad is None else do_pad format = self.format if format is None else format if do_resize is not None and size is None: raise ValueError("Size and max_size must be specified if do_resize is True.") if do_rescale is not None and rescale_factor is None: raise ValueError("Rescale factor must be specified if do_rescale is True.") if do_normalize is not None and (image_mean is None or image_std is None): raise ValueError("Image mean and std must be specified if do_normalize is True.") images = make_list_of_images(images) if annotations is not None and isinstance(annotations, dict): annotations = [annotations] if annotations is not None and len(images) != len(annotations): raise ValueError( f"The number of images ({len(images)}) and annotations ({len(annotations)}) do not match." ) if not valid_images(images): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) format = AnnotationFormat(format) if annotations is not None: validate_annotations(format, SUPPORTED_ANNOTATION_FORMATS, annotations) if ( masks_path is not None and format == AnnotationFormat.COCO_PANOPTIC and not isinstance(masks_path, (pathlib.Path, str)) ): raise ValueError( "The path to the directory containing the mask PNG files should be provided as a" f" `pathlib.Path` or string object, but is {type(masks_path)} instead." ) # All transformations expect numpy arrays images = [to_numpy_array(image) for image in images] if is_scaled_image(images[0]) and do_rescale: logger.warning_once( "It looks like you are trying to rescale already rescaled images. If the input" " images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again." ) if input_data_format is None: # We assume that all images have the same channel dimension format. input_data_format = infer_channel_dimension_format(images[0]) # prepare (COCO annotations as a list of Dict -> DETR target as a single Dict per image) if annotations is not None: prepared_images = [] prepared_annotations = [] for image, target in zip(images, annotations): target = self.prepare_annotation( image, target, format, return_segmentation_masks=return_segmentation_masks, masks_path=masks_path, input_data_format=input_data_format, ) prepared_images.append(image) prepared_annotations.append(target) images = prepared_images annotations = prepared_annotations del prepared_images, prepared_annotations # transformations if do_resize: if annotations is not None: resized_images, resized_annotations = [], [] for image, target in zip(images, annotations): orig_size = get_image_size(image, input_data_format) resized_image = self.resize( image, size=size, max_size=max_size, resample=resample, input_data_format=input_data_format ) resized_annotation = self.resize_annotation( target, orig_size, get_image_size(resized_image, input_data_format) ) resized_images.append(resized_image) resized_annotations.append(resized_annotation) images = resized_images annotations = resized_annotations del resized_images, resized_annotations else: images = [ self.resize(image, size=size, resample=resample, input_data_format=input_data_format) for image in images ] if do_rescale: images = [self.rescale(image, rescale_factor, input_data_format=input_data_format) for image in images] if do_normalize: images = [ self.normalize(image, image_mean, image_std, input_data_format=input_data_format) for image in images ] if annotations is not None: annotations = [ self.normalize_annotation(annotation, get_image_size(image, input_data_format)) for annotation, image in zip(annotations, images) ] if do_pad: # Pads images and returns their mask: {'pixel_values': ..., 'pixel_mask': ...} data = self.pad( images, return_pixel_mask=True, data_format=data_format, input_data_format=input_data_format ) else: images = [ to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in images ] data = {"pixel_values": images} encoded_inputs = BatchFeature(data=data, tensor_type=return_tensors) if annotations is not None: encoded_inputs["labels"] = [ BatchFeature(annotation, tensor_type=return_tensors) for annotation in annotations ] return encoded_inputs # POSTPROCESSING METHODS - TODO: add support for other frameworks def post_process(self, outputs, target_sizes): """ Converts the output of [`ConditionalDetrForObjectDetection`] into the format expected by the Pascal VOC format (xmin, ymin, xmax, ymax). Only supports PyTorch. Args: outputs ([`ConditionalDetrObjectDetectionOutput`]): Raw outputs of the model. target_sizes (`torch.Tensor` of shape `(batch_size, 2)`): Tensor containing the size (h, w) of each image of the batch. For evaluation, this must be the original image size (before any data augmentation). For visualization, this should be the image size after data augment, but before padding. Returns: `List[Dict]`: A list of dictionaries, each dictionary containing the scores, labels and boxes for an image in the batch as predicted by the model. """ logging.warning_once( "`post_process` is deprecated and will be removed in v5 of Transformers, please use" " `post_process_object_detection` instead, with `threshold=0.` for equivalent results.", ) out_logits, out_bbox = outputs.logits, outputs.pred_boxes if len(out_logits) != len(target_sizes): raise ValueError("Make sure that you pass in as many target sizes as the batch dimension of the logits") if target_sizes.shape[1] != 2: raise ValueError("Each element of target_sizes must contain the size (h, w) of each image of the batch") prob = out_logits.sigmoid() topk_values, topk_indexes = torch.topk(prob.view(out_logits.shape[0], -1), 300, dim=1) scores = topk_values topk_boxes = torch.div(topk_indexes, out_logits.shape[2], rounding_mode="floor") labels = topk_indexes % out_logits.shape[2] boxes = center_to_corners_format(out_bbox) boxes = torch.gather(boxes, 1, topk_boxes.unsqueeze(-1).repeat(1, 1, 4)) # and from relative [0, 1] to absolute [0, height] coordinates img_h, img_w = target_sizes.unbind(1) scale_fct = torch.stack([img_w, img_h, img_w, img_h], dim=1) boxes = boxes * scale_fct[:, None, :] results = [{"scores": s, "labels": l, "boxes": b} for s, l, b in zip(scores, labels, boxes)] return results # Copied from transformers.models.deformable_detr.image_processing_deformable_detr.DeformableDetrImageProcessor.post_process_object_detection with DeformableDetr->ConditionalDetr def post_process_object_detection( self, outputs, threshold: float = 0.5, target_sizes: Union[TensorType, List[Tuple]] = None, top_k: int = 100 ): """ Converts the raw output of [`ConditionalDetrForObjectDetection`] into final bounding boxes in (top_left_x, top_left_y, bottom_right_x, bottom_right_y) format. Only supports PyTorch. Args: outputs ([`DetrObjectDetectionOutput`]): Raw outputs of the model. threshold (`float`, *optional*): Score threshold to keep object detection predictions. target_sizes (`torch.Tensor` or `List[Tuple[int, int]]`, *optional*): Tensor of shape `(batch_size, 2)` or list of tuples (`Tuple[int, int]`) containing the target size (height, width) of each image in the batch. If left to None, predictions will not be resized. top_k (`int`, *optional*, defaults to 100): Keep only top k bounding boxes before filtering by thresholding. Returns: `List[Dict]`: A list of dictionaries, each dictionary containing the scores, labels and boxes for an image in the batch as predicted by the model. """ out_logits, out_bbox = outputs.logits, outputs.pred_boxes if target_sizes is not None: if len(out_logits) != len(target_sizes): raise ValueError( "Make sure that you pass in as many target sizes as the batch dimension of the logits" ) prob = out_logits.sigmoid() prob = prob.view(out_logits.shape[0], -1) k_value = min(top_k, prob.size(1)) topk_values, topk_indexes = torch.topk(prob, k_value, dim=1) scores = topk_values topk_boxes = torch.div(topk_indexes, out_logits.shape[2], rounding_mode="floor") labels = topk_indexes % out_logits.shape[2] boxes = center_to_corners_format(out_bbox) boxes = torch.gather(boxes, 1, topk_boxes.unsqueeze(-1).repeat(1, 1, 4)) # and from relative [0, 1] to absolute [0, height] coordinates if isinstance(target_sizes, List): img_h = torch.Tensor([i[0] for i in target_sizes]) img_w = torch.Tensor([i[1] for i in target_sizes]) else: img_h, img_w = target_sizes.unbind(1) scale_fct = torch.stack([img_w, img_h, img_w, img_h], dim=1).to(boxes.device) boxes = boxes * scale_fct[:, None, :] results = [] for s, l, b in zip(scores, labels, boxes): score = s[s > threshold] label = l[s > threshold] box = b[s > threshold] results.append({"scores": score, "labels": label, "boxes": box}) return results # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.post_process_semantic_segmentation with Detr->ConditionalDetr def post_process_semantic_segmentation(self, outputs, target_sizes: List[Tuple[int, int]] = None): """ Converts the output of [`ConditionalDetrForSegmentation`] into semantic segmentation maps. Only supports PyTorch. Args: outputs ([`ConditionalDetrForSegmentation`]): Raw outputs of the model. target_sizes (`List[Tuple[int, int]]`, *optional*): A list of tuples (`Tuple[int, int]`) containing the target size (height, width) of each image in the batch. If unset, predictions will not be resized. Returns: `List[torch.Tensor]`: A list of length `batch_size`, where each item is a semantic segmentation map of shape (height, width) corresponding to the target_sizes entry (if `target_sizes` is specified). Each entry of each `torch.Tensor` correspond to a semantic class id. """ class_queries_logits = outputs.logits # [batch_size, num_queries, num_classes+1] masks_queries_logits = outputs.pred_masks # [batch_size, num_queries, height, width] # Remove the null class `[..., :-1]` masks_classes = class_queries_logits.softmax(dim=-1)[..., :-1] masks_probs = masks_queries_logits.sigmoid() # [batch_size, num_queries, height, width] # Semantic segmentation logits of shape (batch_size, num_classes, height, width) segmentation = torch.einsum("bqc, bqhw -> bchw", masks_classes, masks_probs) batch_size = class_queries_logits.shape[0] # Resize logits and compute semantic segmentation maps if target_sizes is not None: if batch_size != len(target_sizes): raise ValueError( "Make sure that you pass in as many target sizes as the batch dimension of the logits" ) semantic_segmentation = [] for idx in range(batch_size): resized_logits = nn.functional.interpolate( segmentation[idx].unsqueeze(dim=0), size=target_sizes[idx], mode="bilinear", align_corners=False ) semantic_map = resized_logits[0].argmax(dim=0) semantic_segmentation.append(semantic_map) else: semantic_segmentation = segmentation.argmax(dim=1) semantic_segmentation = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0])] return semantic_segmentation # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.post_process_instance_segmentation with Detr->ConditionalDetr def post_process_instance_segmentation( self, outputs, threshold: float = 0.5, mask_threshold: float = 0.5, overlap_mask_area_threshold: float = 0.8, target_sizes: Optional[List[Tuple[int, int]]] = None, return_coco_annotation: Optional[bool] = False, ) -> List[Dict]: """ Converts the output of [`ConditionalDetrForSegmentation`] into instance segmentation predictions. Only supports PyTorch. Args: outputs ([`ConditionalDetrForSegmentation`]): Raw outputs of the model. threshold (`float`, *optional*, defaults to 0.5): The probability score threshold to keep predicted instance masks. mask_threshold (`float`, *optional*, defaults to 0.5): Threshold to use when turning the predicted masks into binary values. overlap_mask_area_threshold (`float`, *optional*, defaults to 0.8): The overlap mask area threshold to merge or discard small disconnected parts within each binary instance mask. target_sizes (`List[Tuple]`, *optional*): List of length (batch_size), where each list item (`Tuple[int, int]]`) corresponds to the requested final size (height, width) of each prediction. If unset, predictions will not be resized. return_coco_annotation (`bool`, *optional*): Defaults to `False`. If set to `True`, segmentation maps are returned in COCO run-length encoding (RLE) format. Returns: `List[Dict]`: A list of dictionaries, one per image, each dictionary containing two keys: - **segmentation** -- A tensor of shape `(height, width)` where each pixel represents a `segment_id` or `List[List]` run-length encoding (RLE) of the segmentation map if return_coco_annotation is set to `True`. Set to `None` if no mask if found above `threshold`. - **segments_info** -- A dictionary that contains additional information on each segment. - **id** -- An integer representing the `segment_id`. - **label_id** -- An integer representing the label / semantic class id corresponding to `segment_id`. - **score** -- Prediction score of segment with `segment_id`. """ class_queries_logits = outputs.logits # [batch_size, num_queries, num_classes+1] masks_queries_logits = outputs.pred_masks # [batch_size, num_queries, height, width] batch_size = class_queries_logits.shape[0] num_labels = class_queries_logits.shape[-1] - 1 mask_probs = masks_queries_logits.sigmoid() # [batch_size, num_queries, height, width] # Predicted label and score of each query (batch_size, num_queries) pred_scores, pred_labels = nn.functional.softmax(class_queries_logits, dim=-1).max(-1) # Loop over items in batch size results: List[Dict[str, TensorType]] = [] for i in range(batch_size): mask_probs_item, pred_scores_item, pred_labels_item = remove_low_and_no_objects( mask_probs[i], pred_scores[i], pred_labels[i], threshold, num_labels ) # No mask found if mask_probs_item.shape[0] <= 0: height, width = target_sizes[i] if target_sizes is not None else mask_probs_item.shape[1:] segmentation = torch.zeros((height, width)) - 1 results.append({"segmentation": segmentation, "segments_info": []}) continue # Get segmentation map and segment information of batch item target_size = target_sizes[i] if target_sizes is not None else None segmentation, segments = compute_segments( mask_probs=mask_probs_item, pred_scores=pred_scores_item, pred_labels=pred_labels_item, mask_threshold=mask_threshold, overlap_mask_area_threshold=overlap_mask_area_threshold, label_ids_to_fuse=[], target_size=target_size, ) # Return segmentation map in run-length encoding (RLE) format if return_coco_annotation: segmentation = convert_segmentation_to_rle(segmentation) results.append({"segmentation": segmentation, "segments_info": segments}) return results # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.post_process_panoptic_segmentation with Detr->ConditionalDetr def post_process_panoptic_segmentation( self, outputs, threshold: float = 0.5, mask_threshold: float = 0.5, overlap_mask_area_threshold: float = 0.8, label_ids_to_fuse: Optional[Set[int]] = None, target_sizes: Optional[List[Tuple[int, int]]] = None, ) -> List[Dict]: """ Converts the output of [`ConditionalDetrForSegmentation`] into image panoptic segmentation predictions. Only supports PyTorch. Args: outputs ([`ConditionalDetrForSegmentation`]): The outputs from [`ConditionalDetrForSegmentation`]. threshold (`float`, *optional*, defaults to 0.5): The probability score threshold to keep predicted instance masks. mask_threshold (`float`, *optional*, defaults to 0.5): Threshold to use when turning the predicted masks into binary values. overlap_mask_area_threshold (`float`, *optional*, defaults to 0.8): The overlap mask area threshold to merge or discard small disconnected parts within each binary instance mask. label_ids_to_fuse (`Set[int]`, *optional*): The labels in this state will have all their instances be fused together. For instance we could say there can only be one sky in an image, but several persons, so the label ID for sky would be in that set, but not the one for person. target_sizes (`List[Tuple]`, *optional*): List of length (batch_size), where each list item (`Tuple[int, int]]`) corresponds to the requested final size (height, width) of each prediction in batch. If unset, predictions will not be resized. Returns: `List[Dict]`: A list of dictionaries, one per image, each dictionary containing two keys: - **segmentation** -- a tensor of shape `(height, width)` where each pixel represents a `segment_id` or `None` if no mask if found above `threshold`. If `target_sizes` is specified, segmentation is resized to the corresponding `target_sizes` entry. - **segments_info** -- A dictionary that contains additional information on each segment. - **id** -- an integer representing the `segment_id`. - **label_id** -- An integer representing the label / semantic class id corresponding to `segment_id`. - **was_fused** -- a boolean, `True` if `label_id` was in `label_ids_to_fuse`, `False` otherwise. Multiple instances of the same class / label were fused and assigned a single `segment_id`. - **score** -- Prediction score of segment with `segment_id`. """ if label_ids_to_fuse is None: logger.warning_once("`label_ids_to_fuse` unset. No instance will be fused.") label_ids_to_fuse = set() class_queries_logits = outputs.logits # [batch_size, num_queries, num_classes+1] masks_queries_logits = outputs.pred_masks # [batch_size, num_queries, height, width] batch_size = class_queries_logits.shape[0] num_labels = class_queries_logits.shape[-1] - 1 mask_probs = masks_queries_logits.sigmoid() # [batch_size, num_queries, height, width] # Predicted label and score of each query (batch_size, num_queries) pred_scores, pred_labels = nn.functional.softmax(class_queries_logits, dim=-1).max(-1) # Loop over items in batch size results: List[Dict[str, TensorType]] = [] for i in range(batch_size): mask_probs_item, pred_scores_item, pred_labels_item = remove_low_and_no_objects( mask_probs[i], pred_scores[i], pred_labels[i], threshold, num_labels ) # No mask found if mask_probs_item.shape[0] <= 0: height, width = target_sizes[i] if target_sizes is not None else mask_probs_item.shape[1:] segmentation = torch.zeros((height, width)) - 1 results.append({"segmentation": segmentation, "segments_info": []}) continue # Get segmentation map and segment information of batch item target_size = target_sizes[i] if target_sizes is not None else None segmentation, segments = compute_segments( mask_probs=mask_probs_item, pred_scores=pred_scores_item, pred_labels=pred_labels_item, mask_threshold=mask_threshold, overlap_mask_area_threshold=overlap_mask_area_threshold, label_ids_to_fuse=label_ids_to_fuse, target_size=target_size, ) results.append({"segmentation": segmentation, "segments_info": segments}) return results
0
hf_public_repos/transformers/src/transformers/models
hf_public_repos/transformers/src/transformers/models/gpt_bigcode/configuration_gpt_bigcode.py
# coding=utf-8 # Copyright 2023 The BigCode team and HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ GPTBigCode configuration""" from ...configuration_utils import PretrainedConfig from ...utils import logging logger = logging.get_logger(__name__) GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP = { "bigcode/gpt_bigcode-santacoder": "https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json", } class GPTBigCodeConfig(PretrainedConfig): """ This is the configuration class to store the configuration of a [`GPTBigCodeModel`]. It is used to instantiate a GPTBigCode model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the GPTBigCode [gpt_bigcode](https://huggingface.co/gpt_bigcode) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 50257): Vocabulary size of the GPT-2 model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`GPTBigCodeModel`]. n_positions (`int`, *optional*, defaults to 1024): The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). n_embd (`int`, *optional*, defaults to 768): Dimensionality of the embeddings and hidden states. n_layer (`int`, *optional*, defaults to 12): Number of hidden layers in the Transformer encoder. n_head (`int`, *optional*, defaults to 12): Number of attention heads for each attention layer in the Transformer encoder. n_inner (`int`, *optional*, defaults to None): Dimensionality of the inner feed-forward layers. `None` will set it to 4 times n_embd activation_function (`str`, *optional*, defaults to `"gelu_pytorch_tanh"`): Activation function, to be selected in the list `["relu", "silu", "gelu", "tanh", "gelu_new", "gelu_pytorch_tanh"]`. resid_pdrop (`float`, *optional*, defaults to 0.1): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. embd_pdrop (`float`, *optional*, defaults to 0.1): The dropout ratio for the embeddings. attn_pdrop (`float`, *optional*, defaults to 0.1): The dropout ratio for the attention. layer_norm_epsilon (`float`, *optional*, defaults to 1e-5): The epsilon to use in the layer normalization layers. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. scale_attn_weights (`bool`, *optional*, defaults to `True`): Scale attention weights by dividing by sqrt(hidden_size).. use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). attention_softmax_in_fp32 (`bool`, *optional*, defaults to `True`): Whether to call the fused softmax in float32. scale_attention_softmax_in_fp32 (`bool`, *optional*, defaults to `True`): Whether to scale the attention softmax in float32. attention_type (`bool`, *optional*, defaults to `True`): Whether to use Multi-Query Attion (`True`) or Multi-Head Attention (`False`). Example: ```python >>> from transformers import GPTBigCodeConfig, GPTBigCodeModel >>> # Initializing a GPTBigCode configuration >>> configuration = GPTBigCodeConfig() >>> # Initializing a model (with random weights) from the configuration >>> model = GPTBigCodeModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "gpt_bigcode" keys_to_ignore_at_inference = ["past_key_values"] attribute_map = { "hidden_size": "n_embd", "max_position_embeddings": "n_positions", "num_attention_heads": "n_head", "num_hidden_layers": "n_layer", } def __init__( self, vocab_size=50257, n_positions=1024, n_embd=768, n_layer=12, n_head=12, n_inner=None, activation_function="gelu_pytorch_tanh", resid_pdrop=0.1, embd_pdrop=0.1, attn_pdrop=0.1, layer_norm_epsilon=1e-5, initializer_range=0.02, scale_attn_weights=True, use_cache=True, bos_token_id=50256, eos_token_id=50256, attention_softmax_in_fp32=True, scale_attention_softmax_in_fp32=True, multi_query=True, **kwargs, ): self.vocab_size = vocab_size self.n_positions = n_positions self.n_embd = n_embd self.n_layer = n_layer self.n_head = n_head self.n_inner = n_inner self.activation_function = activation_function self.resid_pdrop = resid_pdrop self.embd_pdrop = embd_pdrop self.attn_pdrop = attn_pdrop self.layer_norm_epsilon = layer_norm_epsilon self.initializer_range = initializer_range self.scale_attn_weights = scale_attn_weights self.use_cache = use_cache self.attention_softmax_in_fp32 = attention_softmax_in_fp32 self.scale_attention_softmax_in_fp32 = scale_attention_softmax_in_fp32 self.multi_query = multi_query self.bos_token_id = bos_token_id self.eos_token_id = eos_token_id super().__init__(bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs)
0
hf_public_repos/transformers/src/transformers/models
hf_public_repos/transformers/src/transformers/models/gpt_bigcode/modeling_gpt_bigcode.py
# coding=utf-8 # Copyright 2023 The Bigcode team and HuggingFace Inc. team. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch GPTBigCode model.""" import math from typing import List, Optional, Tuple, Union import torch import torch.nn.functional as F import torch.utils.checkpoint from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACT2FN from ...modeling_attn_mask_utils import AttentionMaskConverter from ...modeling_outputs import ( BaseModelOutputWithPastAndCrossAttentions, CausalLMOutputWithCrossAttentions, SequenceClassifierOutputWithPast, TokenClassifierOutput, ) from ...modeling_utils import PreTrainedModel from ...utils import ( add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, is_flash_attn_2_available, is_flash_attn_greater_or_equal_2_10, logging, ) from .configuration_gpt_bigcode import GPTBigCodeConfig if is_flash_attn_2_available(): from flash_attn import flash_attn_func, flash_attn_varlen_func from flash_attn.bert_padding import index_first_axis, pad_input, unpad_input # noqa logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = "bigcode/gpt_bigcode-santacoder" _CONFIG_FOR_DOC = "GPTBigCodeConfig" GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST = [ "bigcode/gpt_bigcode-santacoder", # See all GPTBigCode models at https://huggingface.co/models?filter=gpt_bigcode ] # Fused kernels # Use separate functions for each case because conditionals prevent kernel fusion. # TODO: Could have better fused kernels depending on scaling, dropout and head mask. # Is it doable without writing 32 functions? @torch.jit.script def upcast_masked_softmax( x: torch.Tensor, mask: torch.Tensor, mask_value: torch.Tensor, scale: float, softmax_dtype: torch.dtype ): input_dtype = x.dtype x = x.to(softmax_dtype) * scale x = torch.where(mask, x, mask_value) x = torch.nn.functional.softmax(x, dim=-1).to(input_dtype) return x @torch.jit.script def upcast_softmax(x: torch.Tensor, scale: float, softmax_dtype: torch.dtype): input_dtype = x.dtype x = x.to(softmax_dtype) * scale x = torch.nn.functional.softmax(x, dim=-1).to(input_dtype) return x @torch.jit.script def masked_softmax(x: torch.Tensor, mask: torch.Tensor, mask_value: torch.Tensor): x = torch.where(mask, x, mask_value) x = torch.nn.functional.softmax(x, dim=-1) return x # Copied from transformers.models.llama.modeling_llama._get_unpad_data def _get_unpad_data(attention_mask): seqlens_in_batch = attention_mask.sum(dim=-1, dtype=torch.int32) indices = torch.nonzero(attention_mask.flatten(), as_tuple=False).flatten() max_seqlen_in_batch = seqlens_in_batch.max().item() cu_seqlens = F.pad(torch.cumsum(seqlens_in_batch, dim=0, dtype=torch.torch.int32), (1, 0)) return ( indices, cu_seqlens, max_seqlen_in_batch, ) class GPTBigCodeAttention(nn.Module): def __init__(self, config, is_cross_attention=False, layer_idx=None): super().__init__() self.config = config self.mask_value = None self.multi_query = config.multi_query self.embed_dim = config.hidden_size self.num_heads = config.num_attention_heads self.head_dim = self.embed_dim // self.num_heads self.kv_heads = 1 if self.multi_query else self.num_heads self.kv_dim = self.kv_heads * self.head_dim self.split_size = self.embed_dim self.is_causal = True if self.head_dim * self.num_heads != self.embed_dim: raise ValueError( f"`embed_dim` must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:" f" {self.num_heads})." ) self.scale_attn_weights = config.scale_attn_weights self.is_cross_attention = is_cross_attention self.layer_idx = layer_idx self.attention_softmax_in_fp32 = config.attention_softmax_in_fp32 self.scale_attention_softmax_in_fp32 = ( config.scale_attention_softmax_in_fp32 and config.attention_softmax_in_fp32 ) self.attn_pdrop = config.attn_pdrop if self.is_cross_attention: if self.multi_query: raise NotImplementedError("Multi-Query Attention not supported for cross_attention") self.c_attn = nn.Linear(self.embed_dim, 2 * self.embed_dim) self.q_attn = nn.Linear(self.embed_dim, self.embed_dim) else: self.c_attn = nn.Linear(self.embed_dim, self.embed_dim + 2 * self.kv_dim) self.c_proj = nn.Linear(self.embed_dim, self.embed_dim) self.attn_dropout = nn.Dropout(config.attn_pdrop) self.resid_dropout = nn.Dropout(config.resid_pdrop) def _get_mask_value(self, device, dtype): # torch.where expects a tensor. We use a cache to avoid recreating it every time. if self.mask_value is None or self.mask_value.dtype != dtype or self.mask_value.device != device: self.mask_value = torch.full([], torch.finfo(dtype).min, dtype=dtype, device=device) return self.mask_value def _attn(self, query, key, value, attention_mask=None, head_mask=None): dtype = query.dtype softmax_dtype = torch.float32 if self.attention_softmax_in_fp32 else dtype upcast = dtype != softmax_dtype unscale = self.layer_idx + 1 if self.scale_attention_softmax_in_fp32 and upcast else 1 scale_factor = unscale**-1 if self.scale_attn_weights: scale_factor /= self.head_dim**0.5 # MQA models: (batch_size, query_length, num_heads * head_dim) # MHA models: (batch_size, num_heads, query_length, head_dim) query_shape = query.shape batch_size = query_shape[0] key_length = key.size(-1) if self.multi_query: # (batch_size, query_length, num_heads, head_dim) x (batch_size, head_dim, key_length) # -> (batch_size, query_length, num_heads, key_length) query_length = query_shape[1] attn_shape = (batch_size, query_length, self.num_heads, key_length) attn_view = (batch_size, query_length * self.num_heads, key_length) # No copy needed for MQA 2, or when layer_past is provided. query = query.reshape(batch_size, query_length * self.num_heads, self.head_dim) else: # (batch_size, num_heads, query_length, head_dim) x (batch_size, num_heads, head_dim, key_length) # -> (batch_size, num_heads, query_length, key_length) query_length = query_shape[2] attn_shape = (batch_size, self.num_heads, query_length, key_length) attn_view = (batch_size * self.num_heads, query_length, key_length) # Always copies query = query.reshape(batch_size * self.num_heads, query_length, self.head_dim) # No copy when layer_past is provided. key = key.reshape(batch_size * self.num_heads, self.head_dim, key_length) attn_weights = torch.empty(attn_view, device=query.device, dtype=query.dtype) if query.device.type == "cpu": # This is needed because of a bug in pytorch https://github.com/pytorch/pytorch/issues/80588. # The bug was fixed in https://github.com/pytorch/pytorch/pull/96086, # but the fix has not been released as of pytorch version 2.0.0. attn_weights = torch.zeros_like(attn_weights) beta = 1 else: beta = 0 attn_weights = torch.baddbmm(attn_weights, query, key, beta=beta, alpha=scale_factor).view(attn_shape) if upcast: # Use a fused kernel to prevent a large overhead from casting and scaling. # Sub-optimal when the key length is not a multiple of 8. if attention_mask is None: attn_weights = upcast_softmax(attn_weights, unscale, softmax_dtype) else: mask_value = self._get_mask_value(attn_weights.device, softmax_dtype) attn_weights = upcast_masked_softmax(attn_weights, attention_mask, mask_value, unscale, softmax_dtype) else: if attention_mask is not None: mask_value = self._get_mask_value(attn_weights.device, softmax_dtype) # The fused kernel is very slow when the key length is not a multiple of 8, so we skip fusion. attn_weights = torch.where(attention_mask, attn_weights, mask_value) attn_weights = torch.nn.functional.softmax(attn_weights, dim=-1) attn_weights = self.attn_dropout(attn_weights) # Mask heads if we want to if head_mask is not None: if self.multi_query: head_mask = head_mask.transpose(1, 2) attn_weights = attn_weights * head_mask if self.multi_query: attn_output = torch.bmm(attn_weights.view(attn_view), value).view(query_shape) else: attn_output = torch.matmul(attn_weights, value) return attn_output, attn_weights def forward( self, hidden_states: torch.Tensor, layer_past: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, encoder_hidden_states: Optional[torch.Tensor] = None, encoder_attention_mask: Optional[torch.Tensor] = None, use_cache: Optional[bool] = False, output_attentions: Optional[bool] = False, ) -> Union[ Tuple[torch.Tensor, Optional[torch.Tensor]], Tuple[torch.Tensor, Optional[torch.Tensor], Tuple[torch.Tensor, ...]], ]: if encoder_hidden_states is not None: if not hasattr(self, "q_attn") or not self.is_cross_attention: raise ValueError( "If class is used as cross attention, the weights `q_attn` have to be defined. " "Please make sure to instantiate class with `GPTBigCodeAttention(..., is_cross_attention=True)`." ) query = self.q_attn(hidden_states) key_value = self.c_attn(encoder_hidden_states) attention_mask = encoder_attention_mask elif self.multi_query: query, key_value = self.c_attn(hidden_states).split((self.embed_dim, 2 * self.kv_dim), dim=2) else: # Note: We split as (self.num_heads, 3, self.head_dim) instead of (3, self.num_heads, self.head_dim), # i.e., the memory layout is not the same as GPT2. # This makes the concatenation with past_key_value more efficient. query, key_value = ( self.c_attn(hidden_states) .view(*hidden_states.shape[:2], self.num_heads, 3 * self.head_dim) .transpose(1, 2) .split((self.head_dim, 2 * self.head_dim), dim=3) ) if layer_past is not None: key_value = torch.cat((layer_past, key_value), dim=-2) present = key_value if use_cache else None key, value = key_value.split((self.head_dim, self.head_dim), dim=-1) attn_output, attn_weights = self._attn(query, key.transpose(-1, -2), value, attention_mask, head_mask) if not self.multi_query: attn_output = attn_output.transpose(1, 2).reshape(hidden_states.shape) attn_output = self.c_proj(attn_output) attn_output = self.resid_dropout(attn_output) outputs = (attn_output, present) if output_attentions: if self.multi_query: # Transpose to return weights in the usual format (batch_size, num_heads, query_length, key_length) attn_weights = attn_weights.transpose(1, 2) outputs += (attn_weights,) return outputs # a, present, (attentions) class GPTBigCodeFlashAttention2(GPTBigCodeAttention): """ GPTBigCode flash attention module. This module inherits from `GPTBigCodeAttention` as the weights of the module stays untouched. The only required change would be on the forward pass where it needs to correctly call the public API of flash attention and deal with padding tokens in case the input contains any of them. """ # Copied from transformers.models.llama.modeling_llama.LlamaFlashAttention2.__init__ def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) # TODO: Should be removed once Flash Attention for RoCm is bumped to 2.1. # flash_attn<2.1 generates top-left aligned causal mask, while what is needed here is bottom-right alignement, that was made default for flash_attn>=2.1. This attribute is used to handle this difference. Reference: https://github.com/Dao-AILab/flash-attention/releases/tag/v2.1.0. # Beware that with flash_attn<2.1, using q_seqlen != k_seqlen (except for the case q_seqlen == 1) produces a wrong mask (top-left). self._flash_attn_uses_top_left_mask = not is_flash_attn_greater_or_equal_2_10() def forward( self, hidden_states: torch.Tensor, layer_past: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, encoder_hidden_states: Optional[torch.Tensor] = None, encoder_attention_mask: Optional[torch.Tensor] = None, use_cache: Optional[bool] = False, output_attentions: Optional[bool] = False, ) -> Union[ Tuple[torch.Tensor, Optional[torch.Tensor]], Tuple[torch.Tensor, Optional[torch.Tensor], Tuple[torch.Tensor, ...]], ]: if encoder_hidden_states is not None: if not hasattr(self, "q_attn") or not self.is_cross_attention: raise ValueError( "If class is used as cross attention, the weights `q_attn` have to be defined. " "Please make sure to instantiate class with `GPTBigCodeAttention(..., is_cross_attention=True)`." ) query = self.q_attn(hidden_states) key_value = self.c_attn(encoder_hidden_states) attention_mask = encoder_attention_mask elif self.multi_query: query, key_value = self.c_attn(hidden_states).split((self.embed_dim, 2 * self.kv_dim), dim=2) else: # Note: We split as (self.num_heads, 3, self.head_dim) instead of (3, self.num_heads, self.head_dim), # i.e., the memory layout is not the same as GPT2. # This makes the concatenation with past_key_value more efficient. query, key_value = ( self.c_attn(hidden_states) .view(*hidden_states.shape[:2], self.num_heads, 3 * self.head_dim) .transpose(1, 2) .split((self.head_dim, 2 * self.head_dim), dim=3) ) if layer_past is not None: key_value = torch.cat((layer_past, key_value), dim=-2) present = key_value if use_cache else None key, value = key_value.split((self.head_dim, self.head_dim), dim=-1) # Flash attention requires the input to have the shape # batch_size x seq_length x head_dim x hidden_dim if self.multi_query: batch_size, query_length, _ = query.shape query = query.reshape(batch_size, query_length, self.num_heads, self.head_dim) key = key.unsqueeze(2) value = value.unsqueeze(2) else: query_length = query.shape[2] batch_size, _, tgt, _ = key.shape query = query.transpose(1, 2).reshape(batch_size, query_length, self.num_heads, self.head_dim) key = key.transpose(1, 2).reshape(batch_size, tgt, self.num_heads, self.head_dim) value = value.transpose(1, 2).reshape(batch_size, tgt, self.num_heads, self.head_dim) attn_dropout = self.attn_pdrop if self.training else 0.0 softmax_dtype = torch.float32 if self.attention_softmax_in_fp32 else query.dtype upcast = query.dtype != softmax_dtype softmax_scale = self.layer_idx + 1 if self.scale_attention_softmax_in_fp32 and upcast else 1 softmax_scale = softmax_scale**-1 if self.scale_attn_weights: softmax_scale /= self.head_dim**0.5 # In PEFT, usually we cast the layer norms in float32 for training stability reasons # therefore the input hidden states gets silently casted in float32. Hence, we need # cast them back in float16 just to be sure everything works as expected. input_dtype = query.dtype if input_dtype == torch.float32: if torch.is_autocast_enabled(): target_dtype = torch.get_autocast_gpu_dtype() # Handle the case where the model is quantized elif hasattr(self.config, "_pre_quantization_dtype"): target_dtype = self.config._pre_quantization_dtype else: target_dtype = self.c_attn.weight.dtype logger.warning_once( f"The input hidden states seems to be silently casted in float32, this might be related to" f" the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in" f" {target_dtype}." ) query = query.to(target_dtype) key = key.to(target_dtype) value = value.to(target_dtype) attn_output = self._flash_attention_forward( query, key, value, attention_mask, query_length, dropout=attn_dropout, softmax_scale=softmax_scale ) attn_weights_reshaped = attn_output.reshape(batch_size, query_length, self.num_heads * self.head_dim) attn_output = self.c_proj(attn_weights_reshaped) attn_output = self.resid_dropout(attn_output) outputs = (attn_output, present) if output_attentions: if self.multi_query: # Transpose to return weights in the usual format (batch_size, num_heads, query_length, key_length) attn_weights_reshaped = attn_weights_reshaped.transpose(1, 2) else: attn_weights_reshaped = None outputs += (attn_weights_reshaped,) return outputs # a, present, (attentions) # Copied from transformers.models.llama.modeling_llama.LlamaFlashAttention2._flash_attention_forward def _flash_attention_forward( self, query_states, key_states, value_states, attention_mask, query_length, dropout=0.0, softmax_scale=None ): """ Calls the forward method of Flash Attention - if the input hidden states contain at least one padding token first unpad the input, then computes the attention scores and pad the final attention scores. Args: query_states (`torch.Tensor`): Input query states to be passed to Flash Attention API key_states (`torch.Tensor`): Input key states to be passed to Flash Attention API value_states (`torch.Tensor`): Input value states to be passed to Flash Attention API attention_mask (`torch.Tensor`): The padding mask - corresponds to a tensor of size `(batch_size, seq_len)` where 0 stands for the position of padding tokens and 1 for the position of non-padding tokens. dropout (`int`, *optional*): Attention dropout softmax_scale (`float`, *optional*): The scaling of QK^T before applying softmax. Default to 1 / sqrt(head_dim) """ if not self._flash_attn_uses_top_left_mask: causal = self.is_causal else: # TODO: Remove the `query_length != 1` check once Flash Attention for RoCm is bumped to 2.1. For details, please see the comment in LlamaFlashAttention2 __init__. causal = self.is_causal and query_length != 1 # Contains at least one padding token in the sequence if attention_mask is not None: batch_size = query_states.shape[0] query_states, key_states, value_states, indices_q, cu_seq_lens, max_seq_lens = self._upad_input( query_states, key_states, value_states, attention_mask, query_length ) cu_seqlens_q, cu_seqlens_k = cu_seq_lens max_seqlen_in_batch_q, max_seqlen_in_batch_k = max_seq_lens attn_output_unpad = flash_attn_varlen_func( query_states, key_states, value_states, cu_seqlens_q=cu_seqlens_q, cu_seqlens_k=cu_seqlens_k, max_seqlen_q=max_seqlen_in_batch_q, max_seqlen_k=max_seqlen_in_batch_k, dropout_p=dropout, softmax_scale=softmax_scale, causal=causal, ) attn_output = pad_input(attn_output_unpad, indices_q, batch_size, query_length) else: attn_output = flash_attn_func( query_states, key_states, value_states, dropout, softmax_scale=softmax_scale, causal=causal ) return attn_output # Copied from transformers.models.llama.modeling_llama.LlamaFlashAttention2._upad_input def _upad_input(self, query_layer, key_layer, value_layer, attention_mask, query_length): indices_k, cu_seqlens_k, max_seqlen_in_batch_k = _get_unpad_data(attention_mask) batch_size, kv_seq_len, num_key_value_heads, head_dim = key_layer.shape key_layer = index_first_axis( key_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k ) value_layer = index_first_axis( value_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k ) if query_length == kv_seq_len: query_layer = index_first_axis( query_layer.reshape(batch_size * kv_seq_len, self.num_heads, head_dim), indices_k ) cu_seqlens_q = cu_seqlens_k max_seqlen_in_batch_q = max_seqlen_in_batch_k indices_q = indices_k elif query_length == 1: max_seqlen_in_batch_q = 1 cu_seqlens_q = torch.arange( batch_size + 1, dtype=torch.int32, device=query_layer.device ) # There is a memcpy here, that is very bad. indices_q = cu_seqlens_q[:-1] query_layer = query_layer.squeeze(1) else: # The -q_len: slice assumes left padding. attention_mask = attention_mask[:, -query_length:] query_layer, indices_q, cu_seqlens_q, max_seqlen_in_batch_q = unpad_input(query_layer, attention_mask) return ( query_layer, key_layer, value_layer, indices_q, (cu_seqlens_q, cu_seqlens_k), (max_seqlen_in_batch_q, max_seqlen_in_batch_k), ) class GPTBigCodeSdpaAttention(GPTBigCodeAttention): def _attn(self, query, key, value, attention_mask=None, head_mask=None): if head_mask is not None: # The super dispatch is done in the forward. raise ValueError( "PyTorch SDPA does not support head_mask. Please open an issue in Transformers repository." ) scale = None if not self.scale_attn_weights: scale = 1 # MQA models: (batch_size, query_length, num_heads * head_dim) # MHA models: (batch_size, num_heads, query_length, head_dim) query_shape = query.shape batch_size = query_shape[0] key.shape[-2] if self.multi_query: query_length = query_shape[1] # SDPA requires the dimension [..., sequence_length, head_dim]. query = query.view(batch_size, query_length, self.num_heads, self.head_dim).transpose(1, 2) # Without these unsqueeze, SDPA complains as the query and key/value have a different number of dimensions. key = key.unsqueeze(1) value = value.unsqueeze(1) # Although these expand are not numerically useful, PyTorch 2.1 can not dispatch to memory-efficient backend # and flash attention backend (No available kernel. Aborting execution.) from the shapes # query = [batch_size, num_heads, query_length, head_dim] # key = [batch_size, 1, past_length, head_dim] # value = [batch_size, 1, past_length, head_dim] # # so we could do: # # key = key.expand(-1, self.num_heads, -1, -1) # value = value.expand(-1, self.num_heads, -1, -1) # # However SDPA with memory-efficient backend is currently (torch==2.1.2) bugged with non-contiguous inputs with custom attn_mask, # so we always dispatch to the math path: https://github.com/pytorch/pytorch/issues/112577. # Arguably we could still do expand + contiguous when `query.device.type == "cuda"` in order to dispatch on memory-efficient # backend, but it feels very hacky. else: query_length = query_shape[-1] # See the comment above. if query.device.type == "cuda" and attention_mask is not None: query = query.contiguous() key = key.contiguous() value = value.contiguous() sdpa_result = torch.nn.functional.scaled_dot_product_attention( query, key, value, attn_mask=attention_mask, dropout_p=self.attn_pdrop if self.training else 0.0, # The query_length > 1 is necessary to match with AttentionMaskConverter.to_causal_4d that does not create a causal mask in case query_length == 1. is_causal=self.is_causal and attention_mask is None and query_length > 1, scale=scale, ) if self.multi_query: # (batch_size, num_heads, seq_len, head_dim) --> (batch_size, seq_len, num_heads, head_dim) sdpa_result = sdpa_result.transpose(1, 2) # Reshape is kind of expensive here, as it does a memory copy, # but I did not manage to make away without it (logits do not match when using view) # (batch_size, seq_len, num_heads, head_dim) --> (batch_size, seq_len, num_heads * head_dim) sdpa_result = sdpa_result.reshape(query_shape) return sdpa_result, None def forward( self, hidden_states: torch.Tensor, layer_past: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, encoder_hidden_states: Optional[torch.Tensor] = None, encoder_attention_mask: Optional[torch.Tensor] = None, use_cache: Optional[bool] = False, output_attentions: Optional[bool] = False, ) -> Union[ Tuple[torch.Tensor, Optional[torch.Tensor]], Tuple[torch.Tensor, Optional[torch.Tensor], Tuple[torch.Tensor, ...]], ]: if encoder_hidden_states is not None: if not hasattr(self, "q_attn") or not self.is_cross_attention: raise ValueError( "If class is used as cross attention, the weights `q_attn` have to be defined. " "Please make sure to instantiate class with `GPTBigCodeAttention(..., is_cross_attention=True)`." ) query = self.q_attn(hidden_states) key_value = self.c_attn(encoder_hidden_states) attention_mask = encoder_attention_mask elif self.multi_query: query, key_value = self.c_attn(hidden_states).split((self.embed_dim, 2 * self.kv_dim), dim=2) else: # Note: We split as (self.num_heads, 3, self.head_dim) instead of (3, self.num_heads, self.head_dim), # i.e., the memory layout is not the same as GPT2. # This makes the concatenation with past_key_value more efficient. query, key_value = ( self.c_attn(hidden_states) .view(*hidden_states.shape[:2], self.num_heads, 3 * self.head_dim) .transpose(1, 2) .split((self.head_dim, 2 * self.head_dim), dim=3) ) if layer_past is not None: key_value = torch.cat((layer_past, key_value), dim=-2) present = key_value if use_cache else None key, value = key_value.split((self.head_dim, self.head_dim), dim=-1) if not output_attentions and head_mask is None: # Difference with the original implementation: there is no need to transpose the key here, # as SDPA expects seq_length to be at index -2 for the key as well attn_output, attn_weights = self._attn(query, key, value, attention_mask, head_mask) else: # TODO: Improve this warning with e.g. `model.config._attn_implementation = "manual"` once this is implemented. logger.warning_once( "GPTBigCodeModel is using GPTBigCodeSdpaAttention, but `torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True` and `head_mask` not None." ' Falling back to the manual attention implementation, but specifying the manual implementation will be required from Transformers version v5.0.0 onwards. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.' ) attn_output, attn_weights = super()._attn(query, key.transpose(-1, -2), value, attention_mask, head_mask) if not self.multi_query: attn_output = attn_output.transpose(1, 2).reshape(hidden_states.shape) attn_output = self.c_proj(attn_output) attn_output = self.resid_dropout(attn_output) outputs = (attn_output, present) if output_attentions: if self.multi_query: # Transpose to return weights in the usual format (batch_size, num_heads, query_length, key_length) attn_weights = attn_weights.transpose(1, 2) outputs += (attn_weights,) return outputs class GPTBigCodeMLP(nn.Module): def __init__(self, intermediate_size, config): super().__init__() embed_dim = config.hidden_size self.c_fc = nn.Linear(embed_dim, intermediate_size) self.c_proj = nn.Linear(intermediate_size, embed_dim) self.act = ACT2FN[config.activation_function] self.dropout = nn.Dropout(config.resid_pdrop) # Copied from transformers.models.gpt2.modeling_gpt2.GPT2MLP.forward def forward(self, hidden_states: Optional[Tuple[torch.FloatTensor]]) -> torch.FloatTensor: hidden_states = self.c_fc(hidden_states) hidden_states = self.act(hidden_states) hidden_states = self.c_proj(hidden_states) hidden_states = self.dropout(hidden_states) return hidden_states GPTBIGCODE_ATTENTION_CLASSES = { "eager": GPTBigCodeAttention, "flash_attention_2": GPTBigCodeFlashAttention2, "sdpa": GPTBigCodeSdpaAttention, } class GPTBigCodeBlock(nn.Module): def __init__(self, config, layer_idx=None): super().__init__() hidden_size = config.hidden_size self.inner_dim = config.n_inner if config.n_inner is not None else 4 * hidden_size self.ln_1 = nn.LayerNorm(hidden_size, eps=config.layer_norm_epsilon) self.attn = GPTBIGCODE_ATTENTION_CLASSES[config._attn_implementation](config, layer_idx=layer_idx) self.ln_2 = nn.LayerNorm(hidden_size, eps=config.layer_norm_epsilon) if config.add_cross_attention: if config.multi_query: raise NotImplementedError("Cross-attention not implemented for MQA") self.crossattention = GPTBIGCODE_ATTENTION_CLASSES[config._attn_implementation]( config, is_cross_attention=True, layer_idx=layer_idx ) self.ln_cross_attn = nn.LayerNorm(hidden_size, eps=config.layer_norm_epsilon) self.mlp = GPTBigCodeMLP(self.inner_dim, config) def forward( self, hidden_states: Optional[Tuple[torch.Tensor]], layer_past: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, encoder_hidden_states: Optional[torch.Tensor] = None, encoder_attention_mask: Optional[torch.Tensor] = None, use_cache: Optional[bool] = False, output_attentions: Optional[bool] = False, ) -> Union[ Tuple[torch.Tensor], Tuple[torch.Tensor, torch.Tensor], Tuple[torch.Tensor, torch.Tensor, torch.Tensor] ]: residual = hidden_states hidden_states = self.ln_1(hidden_states) attn_outputs = self.attn( hidden_states, layer_past=layer_past, attention_mask=attention_mask, head_mask=head_mask, use_cache=use_cache, output_attentions=output_attentions, ) attn_output = attn_outputs[0] # output_attn: a, present, (attentions) outputs = attn_outputs[1:] # residual connection hidden_states = attn_output + residual if encoder_hidden_states is not None: # add one self-attention block for cross-attention if not hasattr(self, "crossattention"): raise ValueError( f"If `encoder_hidden_states` are passed, {self} has to be instantiated with " "cross-attention layers by setting `config.add_cross_attention=True`" ) residual = hidden_states hidden_states = self.ln_cross_attn(hidden_states) cross_attn_outputs = self.crossattention( hidden_states, attention_mask=attention_mask, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, output_attentions=output_attentions, ) attn_output = cross_attn_outputs[0] # residual connection hidden_states = residual + attn_output outputs = outputs + cross_attn_outputs[2:] # add cross attentions if we output attention weights residual = hidden_states hidden_states = self.ln_2(hidden_states) feed_forward_hidden_states = self.mlp(hidden_states) # residual connection hidden_states = residual + feed_forward_hidden_states if use_cache: outputs = (hidden_states,) + outputs else: outputs = (hidden_states,) + outputs[1:] return outputs # hidden_states, present, (attentions, cross_attentions) class GPTBigCodePreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = GPTBigCodeConfig base_model_prefix = "transformer" supports_gradient_checkpointing = True _no_split_modules = ["GPTBigCodeBlock"] _skip_keys_device_placement = "past_key_values" _supports_flash_attn_2 = True _supports_sdpa = True def __init__(self, *inputs, **kwargs): super().__init__(*inputs, **kwargs) def _init_weights(self, module): """Initialize the weights.""" if isinstance(module, (GPTBigCodeMLP, GPTBigCodeAttention)): # Reinitialize selected weights subject to the OpenAI GPT-2 Paper Scheme: # > A modified initialization which accounts for the accumulation on the residual path with model depth. Scale # > the weights of residual layers at initialization by a factor of 1/√N where N is the # of residual layers. # > -- GPT-2 :: https://openai.com/blog/better-language-models/ # # Reference (Megatron-LM): https://github.com/NVIDIA/Megatron-LM/blob/main/megatron/model/gpt_model.py module.c_proj.weight.data.normal_( mean=0.0, std=(self.config.initializer_range / math.sqrt(2 * self.config.n_layer)) ) module.c_proj._is_hf_initialized = True elif isinstance(module, nn.Linear): # Slightly different from the TF version which uses truncated_normal for initialization # cf https://github.com/pytorch/pytorch/pull/5617 module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) GPT_BIGCODE_START_DOCSTRING = r""" This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`GPTBigCodeConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ GPT_BIGCODE_INPUTS_DOCSTRING = r""" Args: input_ids (`torch.Tensor` of shape `(batch_size, input_ids_length)`): `input_ids_length` = `sequence_length` if `past_key_values` is `None` else `past_key_values[0][0].shape[-2]` (`sequence_length` of input past key value states). Indices of input sequence tokens in the vocabulary. If `past_key_values` is used, only `input_ids` that do not have their past calculated should be passed as `input_ids`. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) past_key_values (`Tuple[torch.Tensor]` of length `config.n_layers`): Contains precomputed hidden-states (key and values in the attention blocks) as computed by the model (see `past_key_values` output below). Can be used to speed up sequential decoding. The `input_ids` which have their past given to this model should not be passed as `input_ids` as they have already been computed. attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. If `past_key_values` is used, `attention_mask` needs to contain the masking strategy that was used for `past_key_values`. In other words, the `attention_mask` always has to have the length: `len(past_key_values) + len(input_ids)` [What are attention masks?](../glossary#attention-mask) token_type_ids (`torch.Tensor` of shape `(batch_size, input_ids_length)`, *optional*): Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0, 1]`: - 0 corresponds to a *sentence A* token, - 1 corresponds to a *sentence B* token. [What are token type IDs?](../glossary#token-type-ids) position_ids (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, config.max_position_embeddings - 1]`. [What are position IDs?](../glossary#position-ids) head_mask (`torch.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. inputs_embeds (`torch.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. If `past_key_values` is used, optionally only the last `inputs_embeds` have to be input (see `past_key_values`). use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ @add_start_docstrings( "The bare GPT_BIGCODE Model transformer outputting raw hidden-states without any specific head on top.", GPT_BIGCODE_START_DOCSTRING, ) class GPTBigCodeModel(GPTBigCodePreTrainedModel): def __init__(self, config): super().__init__(config) self.multi_query = config.multi_query self.embed_dim = config.hidden_size self.wte = nn.Embedding(config.vocab_size, self.embed_dim) self.wpe = nn.Embedding(config.max_position_embeddings, self.embed_dim) self.drop = nn.Dropout(config.embd_pdrop) self.h = nn.ModuleList([GPTBigCodeBlock(config, layer_idx=i) for i in range(config.num_hidden_layers)]) self.ln_f = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_epsilon) max_positions = config.max_position_embeddings self.register_buffer( "bias", torch.tril(torch.ones((max_positions, max_positions), dtype=torch.bool)), persistent=False ) self.gradient_checkpointing = False self._use_sdpa = config._attn_implementation == "sdpa" self._use_flash_attention_2 = config._attn_implementation == "flash_attention_2" # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.wte def set_input_embeddings(self, new_embeddings): self.wte = new_embeddings @add_start_docstrings_to_model_forward(GPT_BIGCODE_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=BaseModelOutputWithPastAndCrossAttentions, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids: Optional[torch.Tensor] = None, past_key_values: Optional[List[torch.Tensor]] = None, attention_mask: Optional[torch.Tensor] = None, token_type_ids: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, encoder_hidden_states: Optional[torch.Tensor] = None, encoder_attention_mask: Optional[torch.Tensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, BaseModelOutputWithPastAndCrossAttentions]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask) input_shape = input_ids.size() input_ids = input_ids.view(-1, input_shape[-1]) batch_size = input_ids.shape[0] elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] batch_size = inputs_embeds.shape[0] else: raise ValueError("You have to specify either input_ids or inputs_embeds") if batch_size <= 0: raise ValueError("batch_size has to be defined and > 0") device = input_ids.device if input_ids is not None else inputs_embeds.device if token_type_ids is not None: token_type_ids = token_type_ids.view(-1, input_shape[-1]) if past_key_values is None: past_length = 0 past_key_values = tuple([None] * len(self.h)) else: past_length = past_key_values[0].size(-2) if attention_mask is not None and len(attention_mask.shape) == 2 and position_ids is None: # create position_ids on the fly for batch generation position_ids = attention_mask.long().cumsum(-1) - 1 position_ids.masked_fill_(attention_mask == 0, 1) if past_length > 0: position_ids = position_ids[:, past_length : input_shape[-1] + past_length :] elif position_ids is None: position_ids = torch.arange(past_length, input_shape[-1] + past_length, dtype=torch.long, device=device) position_ids = position_ids.unsqueeze(0) # Self-attention mask. query_length = input_shape[-1] key_length = past_length + query_length self_attention_mask = self.bias[None, key_length - query_length : key_length, :key_length] if self._use_flash_attention_2: # 2d mask is passed through the layers attention_mask = attention_mask.bool() if (attention_mask is not None and 0 in attention_mask) else None encoder_attention_mask = ( encoder_attention_mask.bool() if (encoder_attention_mask is not None and 0 in encoder_attention_mask) else None ) else: # 4d mask is passed through the layers if attention_mask is not None: self_attention_mask = self_attention_mask * attention_mask.view(batch_size, 1, -1).to( dtype=torch.bool, device=self_attention_mask.device ) # MQA models: (batch_size, query_length, n_heads, key_length) # MHA models: (batch_size, n_heads, query_length, key_length) self_attention_mask = self_attention_mask.unsqueeze(2 if self.multi_query else 1) if self._use_sdpa and head_mask is None and not output_attentions: # output_attentions=True can not be supported when using SDPA, and we fall back on # the manual implementation that requires a 4D causal mask in all cases. if self.multi_query: # gpt_bigcode using MQA has the bad taste to use a causal mask with shape # [batch_size, target_length, 1, source_length], not compatible with SDPA, hence this transpose. self_attention_mask = self_attention_mask.transpose(1, 2) if query_length > 1 and attention_mask is not None: # From PyTorch 2.1 onwards, F.scaled_dot_product_attention with the memory-efficient attention backend # produces nans if sequences are completely unattended in the attention mask. Details: https://github.com/pytorch/pytorch/issues/110213 self_attention_mask = AttentionMaskConverter._unmask_unattended( self_attention_mask, attention_mask, unmasked_value=True ) # SDPA with a custom mask is much faster in fp16/fp32 dtype rather than bool. Cast here to floating point instead of at every layer. dtype = self.wte.weight.dtype self_attention_mask = torch.where( self_attention_mask, torch.full([], 0.0, dtype=dtype, device=self_attention_mask.device), torch.full( [], torch.finfo(self.wte.weight.dtype).min, dtype=dtype, device=self_attention_mask.device ), ) attention_mask = self_attention_mask # If a 2D or 3D attention mask is provided for the cross-attention # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length] if ( self.config.add_cross_attention and encoder_hidden_states is not None and encoder_attention_mask is not None ): if encoder_attention_mask.dim() == 2: encoder_attention_mask.unsqueeze(1) assert encoder_attention_mask.dim() == 3 encoder_attention_mask = encoder_attention_mask.bool().unsqueeze(2 if self.multi_query else 1) else: encoder_attention_mask = None # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # head_mask has shape n_layer x batch x n_heads x N x N head_mask = self.get_head_mask(head_mask, self.config.n_layer) if inputs_embeds is None: inputs_embeds = self.wte(input_ids) position_embeds = self.wpe(position_ids) hidden_states = inputs_embeds + position_embeds if token_type_ids is not None: token_type_embeds = self.wte(token_type_ids) hidden_states = hidden_states + token_type_embeds hidden_states = self.drop(hidden_states) output_shape = input_shape + (hidden_states.size(-1),) presents = [] if use_cache else None all_self_attentions = () if output_attentions else None all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None all_hidden_states = () if output_hidden_states else None for i, (block, layer_past) in enumerate(zip(self.h, past_key_values)): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if self.gradient_checkpointing and self.training: outputs = self._gradient_checkpointing_func( block.__call__, hidden_states, None, attention_mask, head_mask[i], encoder_hidden_states, encoder_attention_mask, use_cache, output_attentions, ) else: outputs = block( hidden_states, layer_past=layer_past, attention_mask=attention_mask, head_mask=head_mask[i], encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, use_cache=use_cache, output_attentions=output_attentions, ) hidden_states = outputs[0] if use_cache: presents.append(outputs[1]) if output_attentions: all_self_attentions = all_self_attentions + (outputs[2 if use_cache else 1],) if self.config.add_cross_attention: all_cross_attentions = all_cross_attentions + (outputs[3 if use_cache else 2],) hidden_states = self.ln_f(hidden_states) hidden_states = hidden_states.view(output_shape) # Add last hidden state if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple( v for v in [hidden_states, presents, all_hidden_states, all_self_attentions, all_cross_attentions] if v is not None ) return BaseModelOutputWithPastAndCrossAttentions( last_hidden_state=hidden_states, past_key_values=presents, hidden_states=all_hidden_states, attentions=all_self_attentions, cross_attentions=all_cross_attentions, ) @add_start_docstrings( """ The GPT_BIGCODE Model transformer with a language modeling head on top (linear layer with weights tied to the input embeddings). """, GPT_BIGCODE_START_DOCSTRING, ) class GPTBigCodeForCausalLM(GPTBigCodePreTrainedModel): _tied_weights_keys = ["lm_head.weight"] def __init__(self, config): super().__init__(config) self.transformer = GPTBigCodeModel(config) self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False) # Initialize weights and apply final processing self.post_init() def get_output_embeddings(self): return self.lm_head def set_output_embeddings(self, new_embeddings): self.lm_head = new_embeddings def prepare_inputs_for_generation(self, input_ids, past_key_values=None, inputs_embeds=None, **kwargs): token_type_ids = kwargs.get("token_type_ids", None) # Omit tokens covered by past_key_values if past_key_values: if self.config.multi_query: past_length = past_key_values[0].shape[1] else: past_length = past_key_values[0].shape[2] # Some generation methods already pass only the last input ID if input_ids.shape[1] > past_length: remove_prefix_length = past_length else: # Default to old behavior: keep only final ID remove_prefix_length = input_ids.shape[1] - 1 input_ids = input_ids[:, remove_prefix_length:] if token_type_ids is not None: token_type_ids = token_type_ids[:, -input_ids.shape[1] :] attention_mask = kwargs.get("attention_mask", None) position_ids = kwargs.get("position_ids", None) if attention_mask is not None and position_ids is None: # create position_ids on the fly for batch generation position_ids = attention_mask.long().cumsum(-1) - 1 position_ids.masked_fill_(attention_mask == 0, 1) if past_key_values: position_ids = position_ids[:, -input_ids.shape[1] :] else: position_ids = None # if `inputs_embeds` are passed, we only want to use them in the 1st generation step if inputs_embeds is not None and past_key_values is None: model_inputs = {"inputs_embeds": inputs_embeds} else: model_inputs = {"input_ids": input_ids} model_inputs.update( { "past_key_values": past_key_values, "use_cache": kwargs.get("use_cache"), "position_ids": position_ids, "attention_mask": attention_mask, "token_type_ids": token_type_ids, } ) return model_inputs @add_start_docstrings_to_model_forward(GPT_BIGCODE_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=CausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids: Optional[torch.Tensor] = None, past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None, attention_mask: Optional[torch.Tensor] = None, token_type_ids: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, encoder_hidden_states: Optional[torch.Tensor] = None, encoder_attention_mask: Optional[torch.Tensor] = None, labels: Optional[torch.Tensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, CausalLMOutputWithCrossAttentions]: r""" labels (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set `labels = input_ids` Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100` are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]` """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict transformer_outputs = self.transformer( input_ids, past_key_values=past_key_values, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) hidden_states = transformer_outputs[0] lm_logits = self.lm_head(hidden_states) loss = None if labels is not None: # Shift so that tokens < n predict n shift_logits = lm_logits[..., :-1, :].contiguous() shift_labels = labels[..., 1:].contiguous().to(shift_logits.device) # Flatten the tokens loss_fct = CrossEntropyLoss() loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1)) if not return_dict: output = (lm_logits,) + transformer_outputs[1:] return ((loss,) + output) if loss is not None else output return CausalLMOutputWithCrossAttentions( loss=loss, logits=lm_logits, past_key_values=transformer_outputs.past_key_values, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions, cross_attentions=transformer_outputs.cross_attentions, ) @staticmethod def _reorder_cache( past_key_values: Tuple[Tuple[torch.Tensor]], beam_idx: torch.Tensor ) -> Tuple[Tuple[torch.Tensor]]: """ This function is used to re-order the `past_key_values` cache if [`~PreTrainedModel.beam_search`] or [`~PreTrainedModel.beam_sample`] is called. This is required to match `past_key_values` with the correct beam_idx at every generation step. """ return tuple(layer_past.index_select(0, beam_idx.to(layer_past.device)) for layer_past in past_key_values) @add_start_docstrings( """ The GPTBigCode Model transformer with a sequence classification head on top (linear layer). [`GPTBigCodeForSequenceClassification`] uses the last token in order to do the classification, as other causal models (e.g. GPT-1) do. Since it does classification on the last token, it requires to know the position of the last token. If a `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in each row of the batch). """, GPT_BIGCODE_START_DOCSTRING, ) class GPTBigCodeForSequenceClassification(GPTBigCodePreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.transformer = GPTBigCodeModel(config) self.score = nn.Linear(config.n_embd, self.num_labels, bias=False) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(GPT_BIGCODE_INPUTS_DOCSTRING) def forward( self, input_ids: Optional[torch.Tensor] = None, past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None, attention_mask: Optional[torch.Tensor] = None, token_type_ids: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, labels: Optional[torch.Tensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, SequenceClassifierOutputWithPast]: r""" labels (`torch.Tensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict transformer_outputs = self.transformer( input_ids, past_key_values=past_key_values, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) hidden_states = transformer_outputs[0] logits = self.score(hidden_states) if input_ids is not None: batch_size, sequence_length = input_ids.shape[:2] else: batch_size, sequence_length = inputs_embeds.shape[:2] assert ( self.config.pad_token_id is not None or batch_size == 1 ), "Cannot handle batch sizes > 1 if no padding token is defined." if self.config.pad_token_id is None: sequence_lengths = -1 else: if input_ids is not None: # if no pad token found, use modulo instead of reverse indexing for ONNX compatibility sequence_lengths = torch.eq(input_ids, self.config.pad_token_id).int().argmax(-1) - 1 sequence_lengths = sequence_lengths % input_ids.shape[-1] sequence_lengths = sequence_lengths.to(logits.device) else: sequence_lengths = -1 logger.warning( f"{self.__class__.__name__} will not detect padding tokens in `inputs_embeds`. Results may be " "unexpected if using padding tokens in conjunction with `inputs_embeds.`" ) pooled_logits = logits[torch.arange(batch_size, device=logits.device), sequence_lengths] loss = None if labels is not None: labels = labels.to(logits.device) if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = "regression" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = "single_label_classification" else: self.config.problem_type = "multi_label_classification" if self.config.problem_type == "regression": loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(pooled_logits.squeeze(), labels.squeeze()) else: loss = loss_fct(pooled_logits, labels) elif self.config.problem_type == "single_label_classification": loss_fct = CrossEntropyLoss() loss = loss_fct(pooled_logits.view(-1, self.num_labels), labels.view(-1)) elif self.config.problem_type == "multi_label_classification": loss_fct = BCEWithLogitsLoss() loss = loss_fct(pooled_logits, labels) if not return_dict: output = (pooled_logits,) + transformer_outputs[1:] return ((loss,) + output) if loss is not None else output return SequenceClassifierOutputWithPast( loss=loss, logits=pooled_logits, past_key_values=transformer_outputs.past_key_values, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions, ) @add_start_docstrings( """ GPT_BIGCODE Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks. """, GPT_BIGCODE_START_DOCSTRING, ) class GPTBigCodeForTokenClassification(GPTBigCodePreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.transformer = GPTBigCodeModel(config) if hasattr(config, "classifier_dropout") and config.classifier_dropout is not None: classifier_dropout = config.classifier_dropout elif hasattr(config, "hidden_dropout") and config.hidden_dropout is not None: classifier_dropout = config.hidden_dropout else: classifier_dropout = 0.1 self.dropout = nn.Dropout(classifier_dropout) self.classifier = nn.Linear(config.hidden_size, config.num_labels) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(GPT_BIGCODE_INPUTS_DOCSTRING) def forward( self, input_ids: Optional[torch.Tensor] = None, past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None, attention_mask: Optional[torch.Tensor] = None, token_type_ids: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, labels: Optional[torch.Tensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, TokenClassifierOutput]: r""" labels (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict transformer_outputs = self.transformer( input_ids, past_key_values=past_key_values, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) hidden_states = transformer_outputs[0] hidden_states = self.dropout(hidden_states) logits = self.classifier(hidden_states) loss = None if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1).to(logits.device)) if not return_dict: output = (logits,) + transformer_outputs[2:] return ((loss,) + output) if loss is not None else output return TokenClassifierOutput( loss=loss, logits=logits, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions, )
0
hf_public_repos/transformers/src/transformers/models
hf_public_repos/transformers/src/transformers/models/gpt_bigcode/__init__.py
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) _import_structure = { "configuration_gpt_bigcode": ["GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTBigCodeConfig"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["modeling_gpt_bigcode"] = [ "GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST", "GPTBigCodeForSequenceClassification", "GPTBigCodeForTokenClassification", "GPTBigCodeForCausalLM", "GPTBigCodeModel", "GPTBigCodePreTrainedModel", ] if TYPE_CHECKING: from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_gpt_bigcode import ( GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST, GPTBigCodeForCausalLM, GPTBigCodeForSequenceClassification, GPTBigCodeForTokenClassification, GPTBigCodeModel, GPTBigCodePreTrainedModel, ) else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
0
hf_public_repos/transformers/src/transformers/models
hf_public_repos/transformers/src/transformers/models/bit/modeling_bit.py
# coding=utf-8 # Copyright 2022 Google AI and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ PyTorch BiT model. Also supports backbone for ViT hybrid.""" import collections import math from typing import Optional, Tuple import numpy as np import torch import torch.utils.checkpoint from torch import Tensor, nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACT2FN from ...modeling_outputs import ( BackboneOutput, BaseModelOutputWithNoAttention, BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention, ) from ...modeling_utils import PreTrainedModel from ...utils import ( add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings, ) from ...utils.backbone_utils import BackboneMixin from .configuration_bit import BitConfig logger = logging.get_logger(__name__) # General docstring _CONFIG_FOR_DOC = "BitConfig" # Base docstring _CHECKPOINT_FOR_DOC = "google/bit-50" _EXPECTED_OUTPUT_SHAPE = [1, 2048, 7, 7] # Image classification docstring _IMAGE_CLASS_CHECKPOINT = "google/bit-50" _IMAGE_CLASS_EXPECTED_OUTPUT = "tiger cat" BIT_PRETRAINED_MODEL_ARCHIVE_LIST = [ "google/bit-50", # See all BiT models at https://huggingface.co/models?filter=bit ] def get_padding_value(padding=None, kernel_size=7, stride=1, dilation=1) -> Tuple[Tuple, bool]: r""" Utility function to get the tuple padding value given the kernel_size and padding. Args: padding (Union[`str`, `int`], *optional*): Padding value, can be either `"same"`, `"valid"`. If a different value is provided the default padding from PyTorch is used. kernel_size (`int`, *optional*, defaults to 7): Kernel size of the convolution layers. stride (`int`, *optional*, defaults to 1): Stride value of the convolution layers. dilation (`int`, *optional*, defaults to 1): Dilation value of the convolution layers. """ dynamic = False if padding is None: padding = ((stride - 1) + dilation * (kernel_size - 1)) // 2 return padding, dynamic if isinstance(padding, str): # for any string padding, the padding will be calculated for you, one of three ways padding = padding.lower() if padding == "same": # TF compatible 'SAME' padding, has a performance and GPU memory allocation impact if stride == 1 and (dilation * (kernel_size - 1)) % 2 == 0: # static case, no extra overhead padding = ((stride - 1) + dilation * (kernel_size - 1)) // 2 else: # dynamic 'SAME' padding, has runtime/GPU memory overhead padding = 0 dynamic = True elif padding == "valid": # 'VALID' padding, same as padding=0 padding = 0 else: # Default to PyTorch style 'same'-ish symmetric padding padding = ((stride - 1) + dilation * (kernel_size - 1)) // 2 return padding, dynamic class WeightStandardizedConv2d(nn.Conv2d): """Conv2d with Weight Standardization. Includes TensorFlow compatible SAME padding. Used for ViT Hybrid model. Paper: [Micro-Batch Training with Batch-Channel Normalization and Weight Standardization](https://arxiv.org/abs/1903.10520v2) """ def __init__( self, in_channel, out_channels, kernel_size, stride=1, padding="SAME", dilation=1, groups=1, bias=False, eps=1e-6, ): padding, is_dynamic = get_padding_value(padding, kernel_size, stride=stride, dilation=dilation) super().__init__( in_channel, out_channels, kernel_size, stride=stride, padding=padding, dilation=dilation, groups=groups, bias=bias, ) if is_dynamic: self.pad = DynamicPad2d(kernel_size, stride, dilation) else: self.pad = None self.eps = eps def forward(self, hidden_state): if self.pad is not None: hidden_state = self.pad(hidden_state) weight = nn.functional.batch_norm( self.weight.reshape(1, self.out_channels, -1), None, None, training=True, momentum=0.0, eps=self.eps ).reshape_as(self.weight) hidden_state = nn.functional.conv2d( hidden_state, weight, self.bias, self.stride, self.padding, self.dilation, self.groups ) return hidden_state class BitGroupNormActivation(nn.GroupNorm): r""" A module that combines group normalization with an activation function. """ def __init__(self, config, num_channels, eps=1e-5, affine=True, apply_activation=True): super(BitGroupNormActivation, self).__init__(config.num_groups, num_channels, eps=eps, affine=affine) if apply_activation: self.activation = ACT2FN[config.hidden_act] else: self.activation = nn.Identity() def forward(self, hidden_state): hidden_state = nn.functional.group_norm(hidden_state, self.num_groups, self.weight, self.bias, self.eps) hidden_state = self.activation(hidden_state) return hidden_state class DynamicPad2d(nn.Module): r""" A module that wraps dynamic padding of any input, given the parameters of the convolutional layer and the input hidden states. """ def __init__(self, kernel_size, stride, dilation, value=0): super().__init__() # Safety checkers if isinstance(kernel_size, int): kernel_size = (kernel_size, kernel_size) if isinstance(stride, int): stride = (stride, stride) if isinstance(dilation, int): dilation = (dilation, dilation) self.kernel_size = kernel_size self.stride = stride self.dilation = dilation self.value = value def compute_padding(x, kernel_size, stride, dilation): return max((math.ceil(x / stride) - 1) * stride + (kernel_size - 1) * dilation + 1 - x, 0) self.compute_padding = compute_padding def __call__(self, input): # Get width and height input_height, input_width = input.size()[-2:] # Compute the padding values padding_height = self.compute_padding(input_height, self.kernel_size[0], self.stride[0], self.dilation[0]) padding_width = self.compute_padding(input_width, self.kernel_size[1], self.stride[1], self.dilation[1]) # apply pad if padding_height > 0 or padding_width > 0: input = nn.functional.pad( input, [ padding_width // 2, padding_width - padding_width // 2, padding_height // 2, padding_height - padding_height // 2, ], value=self.value, ) return input class BitMaxPool2d(nn.MaxPool2d): """Tensorflow like 'SAME' wrapper for 2D max pooling""" def __init__( self, kernel_size: int, stride=None, dilation=1, ceil_mode=False, padding=(0, 0), padding_value=0, use_dynamic_padding=True, ): kernel_size = kernel_size if isinstance(kernel_size, collections.abc.Iterable) else (kernel_size, kernel_size) stride = stride if isinstance(stride, collections.abc.Iterable) else (stride, stride) dilation = dilation if isinstance(dilation, collections.abc.Iterable) else (dilation, dilation) super().__init__(kernel_size, stride, padding, dilation, ceil_mode) if use_dynamic_padding: self.pad = DynamicPad2d(kernel_size, stride, dilation, padding_value) else: self.pad = nn.Identity() def forward(self, hidden_states): hidden_states = self.pad(hidden_states) return nn.functional.max_pool2d( hidden_states, self.kernel_size, self.stride, self.padding, self.dilation, self.ceil_mode ) class BitEmbeddings(nn.Module): """ BiT Embeddings (stem) composed of a single aggressive convolution. """ def __init__(self, config: BitConfig): super().__init__() self.convolution = WeightStandardizedConv2d( config.num_channels, config.embedding_size, kernel_size=7, stride=2, eps=1e-8, padding=config.global_padding, ) self.pooler = BitMaxPool2d(kernel_size=3, stride=2, use_dynamic_padding=config.embedding_dynamic_padding) # Use the same padding strategy as convolutional layers if config.global_padding is not None and config.global_padding.upper() == "SAME": self.pad = nn.Identity() else: self.pad = nn.ConstantPad2d(padding=(1, 1, 1, 1), value=0.0) if not config.layer_type == "preactivation": self.norm = BitGroupNormActivation(config, num_channels=config.embedding_size) else: self.norm = nn.Identity() self.num_channels = config.num_channels def forward(self, pixel_values: Tensor) -> Tensor: num_channels = pixel_values.shape[1] if num_channels != self.num_channels: raise ValueError( "Make sure that the channel dimension of the pixel values match with the one set in the configuration." ) embedding = self.convolution(pixel_values) embedding = self.pad(embedding) embedding = self.norm(embedding) embedding = self.pooler(embedding) return embedding # Copied from transformers.models.convnext.modeling_convnext.drop_path def drop_path(input: torch.Tensor, drop_prob: float = 0.0, training: bool = False) -> torch.Tensor: """ Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). Comment by Ross Wightman: This is the same as the DropConnect impl I created for EfficientNet, etc networks, however, the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper... See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for changing the layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use 'survival rate' as the argument. """ if drop_prob == 0.0 or not training: return input keep_prob = 1 - drop_prob shape = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets random_tensor = keep_prob + torch.rand(shape, dtype=input.dtype, device=input.device) random_tensor.floor_() # binarize output = input.div(keep_prob) * random_tensor return output # Copied from transformers.models.beit.modeling_beit.BeitDropPath with Beit->Bit class BitDropPath(nn.Module): """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).""" def __init__(self, drop_prob: Optional[float] = None) -> None: super().__init__() self.drop_prob = drop_prob def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: return drop_path(hidden_states, self.drop_prob, self.training) def extra_repr(self) -> str: return "p={}".format(self.drop_prob) def make_div(value, divisor=8): min_value = divisor new_value = max(min_value, int(value + divisor / 2) // divisor * divisor) if new_value < 0.9 * value: new_value += divisor return new_value class BitPreActivationBottleneckLayer(nn.Module): """Pre-activation (v2) bottleneck block. Follows the implementation of "Identity Mappings in Deep Residual Networks": https://github.com/KaimingHe/resnet-1k-layers/blob/master/resnet-pre-act.lua Except it puts the stride on 3x3 conv when available. """ def __init__( self, config, in_channels, out_channels=None, bottle_ratio=0.25, stride=1, dilation=1, first_dilation=None, groups=1, drop_path_rate=0.0, is_first_layer=False, ): super().__init__() first_dilation = first_dilation or dilation out_channels = out_channels or in_channels mid_channels = make_div(out_channels * bottle_ratio) if is_first_layer: self.downsample = BitDownsampleConv( config, in_channels, out_channels, stride=stride, preact=True, ) else: self.downsample = None self.norm1 = BitGroupNormActivation(config, in_channels) self.conv1 = WeightStandardizedConv2d(in_channels, mid_channels, 1, eps=1e-8, padding=config.global_padding) self.norm2 = BitGroupNormActivation(config, num_channels=mid_channels) self.conv2 = WeightStandardizedConv2d( mid_channels, mid_channels, 3, stride=stride, groups=groups, eps=1e-8, padding=config.global_padding ) self.norm3 = BitGroupNormActivation(config, mid_channels) self.conv3 = WeightStandardizedConv2d(mid_channels, out_channels, 1, eps=1e-8, padding=config.global_padding) self.drop_path = BitDropPath(drop_path_rate) if drop_path_rate > 0 else nn.Identity() def forward(self, hidden_states): hidden_states_preact = self.norm1(hidden_states) # shortcut branch shortcut = hidden_states if self.downsample is not None: shortcut = self.downsample(hidden_states_preact) # residual branch hidden_states = self.conv1(hidden_states_preact) hidden_states = self.conv2(self.norm2(hidden_states)) hidden_states = self.conv3(self.norm3(hidden_states)) hidden_states = self.drop_path(hidden_states) return hidden_states + shortcut class BitBottleneckLayer(nn.Module): """Non Pre-activation bottleneck block, equivalent to V1.5/V1b bottleneck. Used for ViT Hybrid.""" def __init__( self, config, in_channels, out_channels=None, bottle_ratio=0.25, stride=1, dilation=1, first_dilation=None, groups=1, drop_path_rate=0.0, is_first_layer=False, ): super().__init__() first_dilation = first_dilation or dilation out_channels = out_channels or in_channels mid_chs = make_div(out_channels * bottle_ratio) if is_first_layer: self.downsample = BitDownsampleConv( config, in_channels, out_channels, stride=stride, preact=False, ) else: self.downsample = None self.conv1 = WeightStandardizedConv2d(in_channels, mid_chs, 1, eps=1e-8, padding=config.global_padding) self.norm1 = BitGroupNormActivation(config, num_channels=mid_chs) self.conv2 = WeightStandardizedConv2d( mid_chs, mid_chs, 3, stride=stride, dilation=first_dilation, groups=groups, eps=1e-8, padding=config.global_padding, ) self.norm2 = BitGroupNormActivation(config, num_channels=mid_chs) self.conv3 = WeightStandardizedConv2d(mid_chs, out_channels, 1, eps=1e-8, padding=config.global_padding) self.norm3 = BitGroupNormActivation(config, num_channels=out_channels, apply_activation=False) self.drop_path = BitDropPath(drop_path_rate) if drop_path_rate > 0 else nn.Identity() self.activation = ACT2FN[config.hidden_act] def forward(self, hidden_states): # shortcut branch shortcut = hidden_states if self.downsample is not None: shortcut = self.downsample(hidden_states) # residual hidden_states = self.conv1(hidden_states) hidden_states = self.norm1(hidden_states) hidden_states = self.conv2(hidden_states) hidden_states = self.norm2(hidden_states) hidden_states = self.conv3(hidden_states) hidden_states = self.norm3(hidden_states) hidden_states = self.drop_path(hidden_states) hidden_states = self.activation(hidden_states + shortcut) return hidden_states class BitDownsampleConv(nn.Module): def __init__( self, config, in_channels, out_channels, stride=1, preact=True, ): super().__init__() self.conv = WeightStandardizedConv2d( in_channels, out_channels, 1, stride=stride, eps=1e-8, padding=config.global_padding ) self.norm = ( nn.Identity() if preact else BitGroupNormActivation(config, num_channels=out_channels, apply_activation=False) ) def forward(self, x): return self.norm(self.conv(x)) class BitStage(nn.Module): """ A ResNet v2 stage composed by stacked layers. """ def __init__( self, config, in_channels, out_channels, stride, dilation, depth, bottle_ratio=0.25, layer_dropout=None, ): super().__init__() first_dilation = 1 if dilation in (1, 2) else 2 # Get the layer type if config.layer_type == "bottleneck": layer_cls = BitBottleneckLayer else: layer_cls = BitPreActivationBottleneckLayer prev_chs = in_channels self.layers = nn.Sequential() for layer_idx in range(depth): # Get the current hyper-parameters stride, drop_path_rate, is_first_layer = self._get_updated_hyperparameters( layer_idx, stride, layer_dropout ) self.layers.add_module( str(layer_idx), layer_cls( config, prev_chs, out_channels, stride=stride, dilation=dilation, bottle_ratio=bottle_ratio, first_dilation=first_dilation, drop_path_rate=drop_path_rate, is_first_layer=is_first_layer, ), ) prev_chs = out_channels first_dilation = dilation def _get_updated_hyperparameters(self, layer_idx, stride, layer_dropout): r""" Get the new hyper-parameters with respect to the previous ones and the index of the current layer. """ if layer_dropout: drop_path_rate = layer_dropout[layer_idx] else: drop_path_rate = 0.0 if layer_idx != 0: stride = 1 is_first_layer = layer_idx == 0 return stride, drop_path_rate, is_first_layer def forward(self, input: Tensor) -> Tensor: hidden_state = input for _, layer in enumerate(self.layers): hidden_state = layer(hidden_state) return hidden_state class BitEncoder(nn.Module): def __init__(self, config: BitConfig): super().__init__() self.stages = nn.ModuleList([]) prev_chs = config.embedding_size # These needs to stay hardcoded current_stride = 4 dilation = 1 layer_dropouts = [ x.tolist() for x in torch.Tensor(np.linspace(0, config.drop_path_rate, sum(config.depths))).split(config.depths) ] for stage_idx, (current_depth, current_hidden_size, layer_dropout) in enumerate( zip(config.depths, config.hidden_sizes, layer_dropouts) ): # Get the updated hyper params out_channels, stride, dilation = self._get_updated_hyperparameters( stage_idx, current_stride, current_hidden_size, dilation, config ) stage = BitStage( config, prev_chs, out_channels, stride=stride, dilation=dilation, depth=current_depth, layer_dropout=layer_dropout, ) prev_chs = out_channels current_stride *= stride self.stages.add_module(str(stage_idx), stage) def _get_updated_hyperparameters(self, stage_idx, current_stride, current_hidden_size, dilation, config): out_channels = make_div(current_hidden_size * config.width_factor) stride = 1 if stage_idx == 0 else 2 if current_stride >= config.output_stride: dilation *= stride stride = 1 return out_channels, stride, dilation def forward( self, hidden_state: Tensor, output_hidden_states: bool = False, return_dict: bool = True ) -> BaseModelOutputWithNoAttention: hidden_states = () if output_hidden_states else None for stage_module in self.stages: if output_hidden_states: hidden_states = hidden_states + (hidden_state,) hidden_state = stage_module(hidden_state) if output_hidden_states: hidden_states = hidden_states + (hidden_state,) if not return_dict: return tuple(v for v in [hidden_state, hidden_states] if v is not None) return BaseModelOutputWithNoAttention( last_hidden_state=hidden_state, hidden_states=hidden_states, ) class BitPreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = BitConfig base_model_prefix = "bit" main_input_name = "pixel_values" def _init_weights(self, module): if isinstance(module, nn.Conv2d): nn.init.kaiming_normal_(module.weight, mode="fan_out", nonlinearity="relu") elif isinstance(module, (nn.BatchNorm2d, nn.GroupNorm)): nn.init.constant_(module.weight, 1) nn.init.constant_(module.bias, 0) BIT_START_DOCSTRING = r""" This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`BitConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ BIT_INPUTS_DOCSTRING = r""" Args: pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`BitImageProcessor.__call__`] for details. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ @add_start_docstrings( "The bare BiT model outputting raw features without any specific head on top.", BIT_START_DOCSTRING, ) class BitModel(BitPreTrainedModel): def __init__(self, config): super().__init__(config) self.config = config self.embedder = BitEmbeddings(config) self.encoder = BitEncoder(config) self.norm = ( BitGroupNormActivation(config, num_channels=config.hidden_sizes[-1]) if config.layer_type == "preactivation" else nn.Identity() ) self.pooler = nn.AdaptiveAvgPool2d((1, 1)) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(BIT_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=BaseModelOutputWithPoolingAndNoAttention, config_class=_CONFIG_FOR_DOC, modality="vision", expected_output=_EXPECTED_OUTPUT_SHAPE, ) def forward( self, pixel_values: Tensor, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None ) -> BaseModelOutputWithPoolingAndNoAttention: output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict embedding_output = self.embedder(pixel_values) encoder_outputs = self.encoder( embedding_output, output_hidden_states=output_hidden_states, return_dict=return_dict ) last_hidden_state = encoder_outputs[0] last_hidden_state = self.norm(last_hidden_state) pooled_output = self.pooler(last_hidden_state) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPoolingAndNoAttention( last_hidden_state=last_hidden_state, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, ) @add_start_docstrings( """ BiT Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for ImageNet. """, BIT_START_DOCSTRING, ) class BitForImageClassification(BitPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.bit = BitModel(config) # classification head self.classifier = nn.Sequential( nn.Flatten(), nn.Linear(config.hidden_sizes[-1], config.num_labels) if config.num_labels > 0 else nn.Identity(), ) # initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(BIT_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT, output_type=ImageClassifierOutputWithNoAttention, config_class=_CONFIG_FOR_DOC, expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT, ) def forward( self, pixel_values: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> ImageClassifierOutputWithNoAttention: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the image classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.bit(pixel_values, output_hidden_states=output_hidden_states, return_dict=return_dict) pooled_output = outputs.pooler_output if return_dict else outputs[1] logits = self.classifier(pooled_output) loss = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = "regression" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = "single_label_classification" else: self.config.problem_type = "multi_label_classification" if self.config.problem_type == "regression": loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(logits.squeeze(), labels.squeeze()) else: loss = loss_fct(logits, labels) elif self.config.problem_type == "single_label_classification": loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) elif self.config.problem_type == "multi_label_classification": loss_fct = BCEWithLogitsLoss() loss = loss_fct(logits, labels) if not return_dict: output = (logits,) + outputs[2:] return (loss,) + output if loss is not None else output return ImageClassifierOutputWithNoAttention(loss=loss, logits=logits, hidden_states=outputs.hidden_states) @add_start_docstrings( """ BiT backbone, to be used with frameworks like DETR and MaskFormer. """, BIT_START_DOCSTRING, ) class BitBackbone(BitPreTrainedModel, BackboneMixin): def __init__(self, config): super().__init__(config) super()._init_backbone(config) self.bit = BitModel(config) self.num_features = [config.embedding_size] + config.hidden_sizes # initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(BIT_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=BackboneOutput, config_class=_CONFIG_FOR_DOC) def forward( self, pixel_values: Tensor, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None ) -> BackboneOutput: """ Returns: Examples: ```python >>> from transformers import AutoImageProcessor, AutoBackbone >>> import torch >>> from PIL import Image >>> import requests >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> processor = AutoImageProcessor.from_pretrained("google/resnetnv2-50") >>> model = AutoBackbone.from_pretrained("google/resnetnv2-50") >>> inputs = processor(image, return_tensors="pt") >>> outputs = model(**inputs) ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) outputs = self.bit(pixel_values, output_hidden_states=True, return_dict=True) hidden_states = outputs.hidden_states feature_maps = () for idx, stage in enumerate(self.stage_names): if stage in self.out_features: feature_maps += (hidden_states[idx],) if not return_dict: output = (feature_maps,) if output_hidden_states: output += (outputs.hidden_states,) return output return BackboneOutput( feature_maps=feature_maps, hidden_states=outputs.hidden_states if output_hidden_states else None, attentions=None, )
0
hf_public_repos/transformers/src/transformers/models
hf_public_repos/transformers/src/transformers/models/bit/configuration_bit.py
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ BiT model configuration""" from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices logger = logging.get_logger(__name__) BIT_PRETRAINED_CONFIG_ARCHIVE_MAP = { "google/bit-50": "https://huggingface.co/google/bit-50/resolve/main/config.json", } class BitConfig(BackboneConfigMixin, PretrainedConfig): r""" This is the configuration class to store the configuration of a [`BitModel`]. It is used to instantiate an BiT model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the BiT [google/bit-50](https://huggingface.co/google/bit-50) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: num_channels (`int`, *optional*, defaults to 3): The number of input channels. embedding_size (`int`, *optional*, defaults to 64): Dimensionality (hidden size) for the embedding layer. hidden_sizes (`List[int]`, *optional*, defaults to `[256, 512, 1024, 2048]`): Dimensionality (hidden size) at each stage. depths (`List[int]`, *optional*, defaults to `[3, 4, 6, 3]`): Depth (number of layers) for each stage. layer_type (`str`, *optional*, defaults to `"preactivation"`): The layer to use, it can be either `"preactivation"` or `"bottleneck"`. hidden_act (`str`, *optional*, defaults to `"relu"`): The non-linear activation function in each block. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` are supported. global_padding (`str`, *optional*): Padding strategy to use for the convolutional layers. Can be either `"valid"`, `"same"`, or `None`. num_groups (`int`, *optional*, defaults to 32): Number of groups used for the `BitGroupNormActivation` layers. drop_path_rate (`float`, *optional*, defaults to 0.0): The drop path rate for the stochastic depth. embedding_dynamic_padding (`bool`, *optional*, defaults to `False`): Whether or not to make use of dynamic padding for the embedding layer. output_stride (`int`, *optional*, defaults to 32): The output stride of the model. width_factor (`int`, *optional*, defaults to 1): The width factor for the model. out_features (`List[str]`, *optional*): If used as backbone, list of features to output. Can be any of `"stem"`, `"stage1"`, `"stage2"`, etc. (depending on how many stages the model has). If unset and `out_indices` is set, will default to the corresponding stages. If unset and `out_indices` is unset, will default to the last stage. Must be in the same order as defined in the `stage_names` attribute. out_indices (`List[int]`, *optional*): If used as backbone, list of indices of features to output. Can be any of 0, 1, 2, etc. (depending on how many stages the model has). If unset and `out_features` is set, will default to the corresponding stages. If unset and `out_features` is unset, will default to the last stage. Must be in the same order as defined in the `stage_names` attribute. Example: ```python >>> from transformers import BitConfig, BitModel >>> # Initializing a BiT bit-50 style configuration >>> configuration = BitConfig() >>> # Initializing a model (with random weights) from the bit-50 style configuration >>> model = BitModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ``` """ model_type = "bit" layer_types = ["preactivation", "bottleneck"] supported_padding = ["SAME", "VALID"] def __init__( self, num_channels=3, embedding_size=64, hidden_sizes=[256, 512, 1024, 2048], depths=[3, 4, 6, 3], layer_type="preactivation", hidden_act="relu", global_padding=None, num_groups=32, drop_path_rate=0.0, embedding_dynamic_padding=False, output_stride=32, width_factor=1, out_features=None, out_indices=None, **kwargs, ): super().__init__(**kwargs) if layer_type not in self.layer_types: raise ValueError(f"layer_type={layer_type} is not one of {','.join(self.layer_types)}") if global_padding is not None: if global_padding.upper() in self.supported_padding: global_padding = global_padding.upper() else: raise ValueError(f"Padding strategy {global_padding} not supported") self.num_channels = num_channels self.embedding_size = embedding_size self.hidden_sizes = hidden_sizes self.depths = depths self.layer_type = layer_type self.hidden_act = hidden_act self.global_padding = global_padding self.num_groups = num_groups self.drop_path_rate = drop_path_rate self.embedding_dynamic_padding = embedding_dynamic_padding self.output_stride = output_stride self.width_factor = width_factor self.stage_names = ["stem"] + [f"stage{idx}" for idx in range(1, len(depths) + 1)] self._out_features, self._out_indices = get_aligned_output_features_output_indices( out_features=out_features, out_indices=out_indices, stage_names=self.stage_names )
0
hf_public_repos/transformers/src/transformers/models
hf_public_repos/transformers/src/transformers/models/bit/__init__.py
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available _import_structure = {"configuration_bit": ["BIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "BitConfig", "BitOnnxConfig"]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["modeling_bit"] = [ "BIT_PRETRAINED_MODEL_ARCHIVE_LIST", "BitForImageClassification", "BitModel", "BitPreTrainedModel", "BitBackbone", ] try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["image_processing_bit"] = ["BitImageProcessor"] if TYPE_CHECKING: from .configuration_bit import BIT_PRETRAINED_CONFIG_ARCHIVE_MAP, BitConfig, BitOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_bit import ( BIT_PRETRAINED_MODEL_ARCHIVE_LIST, BitBackbone, BitForImageClassification, BitModel, BitPreTrainedModel, ) try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_bit import BitImageProcessor else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure)
0
hf_public_repos/transformers/src/transformers/models
hf_public_repos/transformers/src/transformers/models/bit/convert_bit_to_pytorch.py
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Convert BiT checkpoints from the timm library.""" import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from timm import create_model from timm.data import resolve_data_config from timm.data.transforms_factory import create_transform from transformers import BitConfig, BitForImageClassification, BitImageProcessor from transformers.image_utils import PILImageResampling from transformers.utils import logging logging.set_verbosity_info() logger = logging.get_logger(__name__) def get_config(model_name): repo_id = "huggingface/label-files" filename = "imagenet-1k-id2label.json" id2label = json.load(open(hf_hub_download(repo_id, filename, repo_type="dataset"), "r")) id2label = {int(k): v for k, v in id2label.items()} label2id = {v: k for k, v in id2label.items()} conv_layer = "std_conv" if "bit" in model_name else False # note that when using BiT as backbone for ViT-hybrid checkpoints, # one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same", # config.conv_layer = "std_conv_same" config = BitConfig( conv_layer=conv_layer, num_labels=1000, id2label=id2label, label2id=label2id, ) return config def rename_key(name): if "stem.conv" in name: name = name.replace("stem.conv", "bit.embedder.convolution") if "blocks" in name: name = name.replace("blocks", "layers") if "head.fc" in name: name = name.replace("head.fc", "classifier.1") if name.startswith("norm"): name = "bit." + name if "bit" not in name and "classifier" not in name: name = "bit.encoder." + name return name # We will verify our results on an image of cute cats def prepare_img(): url = "http://images.cocodataset.org/val2017/000000039769.jpg" im = Image.open(requests.get(url, stream=True).raw) return im @torch.no_grad() def convert_bit_checkpoint(model_name, pytorch_dump_folder_path, push_to_hub=False): """ Copy/paste/tweak model's weights to our BiT structure. """ # define default BiT configuration config = get_config(model_name) # load original model from timm timm_model = create_model(model_name, pretrained=True) timm_model.eval() # load state_dict of original model state_dict = timm_model.state_dict() for key in state_dict.copy().keys(): val = state_dict.pop(key) state_dict[rename_key(key)] = val.squeeze() if "head" in key else val # load HuggingFace model model = BitForImageClassification(config) model.eval() model.load_state_dict(state_dict) # create image processor transform = create_transform(**resolve_data_config({}, model=timm_model)) timm_transforms = transform.transforms pillow_resamplings = { "bilinear": PILImageResampling.BILINEAR, "bicubic": PILImageResampling.BICUBIC, "nearest": PILImageResampling.NEAREST, } processor = BitImageProcessor( do_resize=True, size={"shortest_edge": timm_transforms[0].size}, resample=pillow_resamplings[timm_transforms[0].interpolation.value], do_center_crop=True, crop_size={"height": timm_transforms[1].size[0], "width": timm_transforms[1].size[1]}, do_normalize=True, image_mean=timm_transforms[-1].mean.tolist(), image_std=timm_transforms[-1].std.tolist(), ) image = prepare_img() timm_pixel_values = transform(image).unsqueeze(0) pixel_values = processor(image, return_tensors="pt").pixel_values # verify pixel values assert torch.allclose(timm_pixel_values, pixel_values) # verify logits with torch.no_grad(): outputs = model(pixel_values) logits = outputs.logits print("Logits:", logits[0, :3]) print("Predicted class:", model.config.id2label[logits.argmax(-1).item()]) timm_logits = timm_model(pixel_values) assert timm_logits.shape == outputs.logits.shape assert torch.allclose(timm_logits, outputs.logits, atol=1e-3) print("Looks ok!") if pytorch_dump_folder_path is not None: Path(pytorch_dump_folder_path).mkdir(exist_ok=True) print(f"Saving model {model_name} and processor to {pytorch_dump_folder_path}") model.save_pretrained(pytorch_dump_folder_path) processor.save_pretrained(pytorch_dump_folder_path) if push_to_hub: print(f"Pushing model {model_name} and processor to the hub") model.push_to_hub(f"ybelkada/{model_name}") processor.push_to_hub(f"ybelkada/{model_name}") if __name__ == "__main__": parser = argparse.ArgumentParser() # Required parameters parser.add_argument( "--model_name", default="resnetv2_50x1_bitm", type=str, help="Name of the BiT timm model you'd like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory." ) parser.add_argument( "--push_to_hub", action="store_true", help="Whether to push the model to the hub.", ) args = parser.parse_args() convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
0
hf_public_repos/transformers/src/transformers/models
hf_public_repos/transformers/src/transformers/models/bit/image_processing_bit.py
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Image processor class for BiT.""" from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( convert_to_rgb, get_resize_output_image_size, resize, to_channel_dimension_format, ) from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, infer_channel_dimension_format, is_scaled_image, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging logger = logging.get_logger(__name__) if is_vision_available(): import PIL class BitImageProcessor(BaseImageProcessor): r""" Constructs a BiT image processor. Args: do_resize (`bool`, *optional*, defaults to `True`): Whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden by `do_resize` in the `preprocess` method. size (`Dict[str, int]` *optional*, defaults to `{"shortest_edge": 224}`): Size of the image after resizing. The shortest edge of the image is resized to size["shortest_edge"], with the longest edge resized to keep the input aspect ratio. Can be overridden by `size` in the `preprocess` method. resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`): Resampling filter to use if resizing the image. Can be overridden by `resample` in the `preprocess` method. do_center_crop (`bool`, *optional*, defaults to `True`): Whether to center crop the image to the specified `crop_size`. Can be overridden by `do_center_crop` in the `preprocess` method. crop_size (`Dict[str, int]` *optional*, defaults to 224): Size of the output image after applying `center_crop`. Can be overridden by `crop_size` in the `preprocess` method. do_rescale (`bool`, *optional*, defaults to `True`): Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by `do_rescale` in the `preprocess` method. rescale_factor (`int` or `float`, *optional*, defaults to `1/255`): Scale factor to use if rescaling the image. Can be overridden by `rescale_factor` in the `preprocess` method. do_normalize: Whether to normalize the image. Can be overridden by `do_normalize` in the `preprocess` method. image_mean (`float` or `List[float]`, *optional*, defaults to `OPENAI_CLIP_MEAN`): Mean to use if normalizing the image. This is a float or list of floats the length of the number of channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method. image_std (`float` or `List[float]`, *optional*, defaults to `OPENAI_CLIP_MEAN`): Standard deviation to use if normalizing the image. This is a float or list of floats the length of the number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method. Can be overridden by the `image_std` parameter in the `preprocess` method. do_convert_rgb (`bool`, *optional*, defaults to `True`): Whether to convert the image to RGB. """ model_input_names = ["pixel_values"] def __init__( self, do_resize: bool = True, size: Dict[str, int] = None, resample: PILImageResampling = PILImageResampling.BICUBIC, do_center_crop: bool = True, crop_size: Dict[str, int] = None, do_rescale: bool = True, rescale_factor: Union[int, float] = 1 / 255, do_normalize: bool = True, image_mean: Optional[Union[float, List[float]]] = None, image_std: Optional[Union[float, List[float]]] = None, do_convert_rgb: bool = True, **kwargs, ) -> None: super().__init__(**kwargs) size = size if size is not None else {"shortest_edge": 224} size = get_size_dict(size, default_to_square=False) crop_size = crop_size if crop_size is not None else {"height": 224, "width": 224} crop_size = get_size_dict(crop_size, default_to_square=True, param_name="crop_size") self.do_resize = do_resize self.size = size self.resample = resample self.do_center_crop = do_center_crop self.crop_size = crop_size self.do_rescale = do_rescale self.rescale_factor = rescale_factor self.do_normalize = do_normalize self.image_mean = image_mean if image_mean is not None else OPENAI_CLIP_MEAN self.image_std = image_std if image_std is not None else OPENAI_CLIP_STD self.do_convert_rgb = do_convert_rgb # Copied from transformers.models.clip.image_processing_clip.CLIPImageProcessor.resize def resize( self, image: np.ndarray, size: Dict[str, int], resample: PILImageResampling = PILImageResampling.BICUBIC, data_format: Optional[Union[str, ChannelDimension]] = None, input_data_format: Optional[Union[str, ChannelDimension]] = None, **kwargs, ) -> np.ndarray: """ Resize an image. The shortest edge of the image is resized to size["shortest_edge"], with the longest edge resized to keep the input aspect ratio. Args: image (`np.ndarray`): Image to resize. size (`Dict[str, int]`): Size of the output image. resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`): Resampling filter to use when resiizing the image. data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format of the image. If not provided, it will be the same as the input image. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format of the input image. If not provided, it will be inferred. """ default_to_square = True if "shortest_edge" in size: size = size["shortest_edge"] default_to_square = False elif "height" in size and "width" in size: size = (size["height"], size["width"]) else: raise ValueError("Size must contain either 'shortest_edge' or 'height' and 'width'.") output_size = get_resize_output_image_size( image, size=size, default_to_square=default_to_square, input_data_format=input_data_format, ) return resize( image, size=output_size, resample=resample, data_format=data_format, input_data_format=input_data_format, **kwargs, ) def preprocess( self, images: ImageInput, do_resize: bool = None, size: Dict[str, int] = None, resample: PILImageResampling = None, do_center_crop: bool = None, crop_size: int = None, do_rescale: bool = None, rescale_factor: float = None, do_normalize: bool = None, image_mean: Optional[Union[float, List[float]]] = None, image_std: Optional[Union[float, List[float]]] = None, do_convert_rgb: bool = None, return_tensors: Optional[Union[str, TensorType]] = None, data_format: Optional[ChannelDimension] = ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]] = None, **kwargs, ) -> PIL.Image.Image: """ Preprocess an image or batch of images. Args: images (`ImageInput`): Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If passing in images with pixel values between 0 and 1, set `do_rescale=False`. do_resize (`bool`, *optional*, defaults to `self.do_resize`): Whether to resize the image. size (`Dict[str, int]`, *optional*, defaults to `self.size`): Size of the image after resizing. Shortest edge of the image is resized to size["shortest_edge"], with the longest edge resized to keep the input aspect ratio. resample (`int`, *optional*, defaults to `self.resample`): Resampling filter to use if resizing the image. This can be one of the enum `PILImageResampling`. Only has an effect if `do_resize` is set to `True`. do_center_crop (`bool`, *optional*, defaults to `self.do_center_crop`): Whether to center crop the image. crop_size (`Dict[str, int]`, *optional*, defaults to `self.crop_size`): Size of the center crop. Only has an effect if `do_center_crop` is set to `True`. do_rescale (`bool`, *optional*, defaults to `self.do_rescale`): Whether to rescale the image. rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`): Rescale factor to rescale the image by if `do_rescale` is set to `True`. do_normalize (`bool`, *optional*, defaults to `self.do_normalize`): Whether to normalize the image. image_mean (`float` or `List[float]`, *optional*, defaults to `self.image_mean`): Image mean to use for normalization. Only has an effect if `do_normalize` is set to `True`. image_std (`float` or `List[float]`, *optional*, defaults to `self.image_std`): Image standard deviation to use for normalization. Only has an effect if `do_normalize` is set to `True`. do_convert_rgb (`bool`, *optional*, defaults to `self.do_convert_rgb`): Whether to convert the image to RGB. return_tensors (`str` or `TensorType`, *optional*): The type of tensors to return. Can be one of: - Unset: Return a list of `np.ndarray`. - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`. - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`. - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`. - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`. data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`): The channel dimension format for the output image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - Unset: Use the channel dimension format of the input image. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format for the input image. If unset, the channel dimension format is inferred from the input image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. """ do_resize = do_resize if do_resize is not None else self.do_resize size = size if size is not None else self.size size = get_size_dict(size, param_name="size", default_to_square=False) resample = resample if resample is not None else self.resample do_center_crop = do_center_crop if do_center_crop is not None else self.do_center_crop crop_size = crop_size if crop_size is not None else self.crop_size crop_size = get_size_dict(crop_size, param_name="crop_size", default_to_square=True) do_rescale = do_rescale if do_rescale is not None else self.do_rescale rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor do_normalize = do_normalize if do_normalize is not None else self.do_normalize image_mean = image_mean if image_mean is not None else self.image_mean image_std = image_std if image_std is not None else self.image_std do_convert_rgb = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb images = make_list_of_images(images) if not valid_images(images): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) if do_resize and size is None: raise ValueError("Size must be specified if do_resize is True.") if do_center_crop and crop_size is None: raise ValueError("Crop size must be specified if do_center_crop is True.") if do_rescale and rescale_factor is None: raise ValueError("Rescale factor must be specified if do_rescale is True.") if do_normalize and (image_mean is None or image_std is None): raise ValueError("Image mean and std must be specified if do_normalize is True.") # PIL RGBA images are converted to RGB if do_convert_rgb: images = [convert_to_rgb(image) for image in images] # All transformations expect numpy arrays. images = [to_numpy_array(image) for image in images] if is_scaled_image(images[0]) and do_rescale: logger.warning_once( "It looks like you are trying to rescale already rescaled images. If the input" " images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again." ) if input_data_format is None: # We assume that all images have the same channel dimension format. input_data_format = infer_channel_dimension_format(images[0]) if do_resize: images = [ self.resize(image=image, size=size, resample=resample, input_data_format=input_data_format) for image in images ] if do_center_crop: images = [ self.center_crop(image=image, size=crop_size, input_data_format=input_data_format) for image in images ] if do_rescale: images = [ self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format) for image in images ] if do_normalize: images = [ self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format) for image in images ] images = [ to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in images ] data = {"pixel_values": images} return BatchFeature(data=data, tensor_type=return_tensors)
0
hf_public_repos/transformers/src/transformers/models
hf_public_repos/transformers/src/transformers/models/switch_transformers/modeling_switch_transformers.py
# coding=utf-8 # Copyright 2022 SwitchTransformers Authors and HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ PyTorch SwitchTransformers model.""" import copy import math import warnings from typing import Optional, Tuple, Union import torch import torch.nn as nn from torch.nn import CrossEntropyLoss from ...activations import ACT2FN from ...modeling_outputs import ( MoEModelOutput, MoEModelOutputWithPastAndCrossAttentions, Seq2SeqMoEModelOutput, Seq2SeqMoEOutput, ) from ...modeling_utils import PreTrainedModel from ...pytorch_utils import ALL_LAYERNORM_LAYERS, find_pruneable_heads_and_indices, prune_linear_layer from ...utils import ( DUMMY_INPUTS, DUMMY_MASK, add_start_docstrings, add_start_docstrings_to_model_forward, is_torch_fx_proxy, logging, replace_return_docstrings, ) from .configuration_switch_transformers import SwitchTransformersConfig logger = logging.get_logger(__name__) _CONFIG_FOR_DOC = "SwitchTransformersConfig" _CHECKPOINT_FOR_DOC = "google/switch-base-8" #################################################### # This dict contains ids and associated url # for the pretrained weights provided with the models #################################################### SWITCH_TRANSFORMERS_PRETRAINED_MODEL_ARCHIVE_LIST = [ "google/switch-base-8", "google/switch-base-16", "google/switch-base-32", "google/switch-base-64", "google/switch-base-128", "google/switch-base-256", "google/switch-large-128", "google/switch-xxl-128", "google/switch-c-2048", # See all SwitchTransformers models at https://huggingface.co/models?filter=switch_transformers ] def router_z_loss_func(router_logits: torch.Tensor) -> float: r""" Compute the router z-loss implemented in PyTorch. The router z-loss was introduced in [Designing Effective Sparse Expert Models](https://arxiv.org/abs/2202.08906). It encourages router logits to remain small in an effort to improve stability. Args: router_logits (`float`): Input logits of shape [batch_size, sequence_length, num_experts] Returns: Scalar router z-loss. """ num_groups, tokens_per_group, _ = router_logits.shape log_z = torch.logsumexp(router_logits, dim=-1) z_loss = log_z**2 return torch.sum(z_loss) / (num_groups * tokens_per_group) def load_balancing_loss_func(router_probs: torch.Tensor, expert_indices: torch.Tensor) -> float: r""" Computes auxiliary load balancing loss as in Switch Transformer - implemented in Pytorch. See Switch Transformer (https://arxiv.org/abs/2101.03961) for more details. This function implements the loss function presented in equations (4) - (6) of the paper. It aims at penalizing cases where the routing between experts is too unbalanced. Args: router_probs (`torch.Tensor`): Probability assigned to each expert per token. Shape: [batch_size, seqeunce_length, num_experts]. expert_indices (`torch.Tensor`): Indices tensor of shape [batch_size, seqeunce_length] identifying the selected expert for a given token. Returns: The auxiliary loss. """ num_experts = router_probs.shape[-1] # cast the expert indices to int64, otherwise one-hot encoding will fail if expert_indices.dtype != torch.int64: expert_indices = expert_indices.to(torch.int64) if len(expert_indices.shape) == 2: expert_indices = expert_indices.unsqueeze(2) expert_mask = torch.nn.functional.one_hot(expert_indices, num_experts) # For a given token, determine if it was routed to a given expert. expert_mask = torch.max(expert_mask, axis=-2).values # cast to float32 otherwise mean will fail expert_mask = expert_mask.to(torch.float32) tokens_per_group_and_expert = torch.mean(expert_mask, axis=-2) router_prob_per_group_and_expert = torch.mean(router_probs, axis=-2) return torch.mean(tokens_per_group_and_expert * router_prob_per_group_and_expert) * (num_experts**2) class SwitchTransformersTop1Router(nn.Module): """ Router using tokens choose top-1 experts assignment. This router uses the same mechanism as in Switch Transformer (https://arxiv.org/abs/2101.03961) and V-MoE (https://arxiv.org/abs/2106.05974): tokens choose their top experts. Items are sorted by router_probs and then routed to their choice of expert until the expert's expert_capacity is reached. **There is no guarantee that each token is processed by an expert**, or that each expert receives at least one token. """ def __init__(self, config: SwitchTransformersConfig): super().__init__() self.num_experts = config.num_experts self.expert_capacity = config.expert_capacity self.classifier = nn.Linear(config.hidden_size, self.num_experts, bias=config.router_bias) self.jitter_noise = config.router_jitter_noise self.ignore_padding_tokens = config.router_ignore_padding_tokens self.dtype = getattr(torch, config.router_dtype) def _compute_router_probabilities(self, hidden_states: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: r""" Computes router probabilities from input hidden states. Args: hidden_states (`torch.Tensor`): (batch_size, sequence_length, hidden_dim) from which router probabilities are computed. Returns: router_probabilities (`torch.Tensor`): Tensor of shape (batch_size, sequence_length, num_experts) corresponding to the probabilities for each token and expert. Used for routing tokens to experts. router_logits (`torch.Tensor`): Logits tensor of shape (batch_size, sequence_length, num_experts) corresponding to raw router logits. This is used later for computing router z-loss. """ # float32 is used to ensure stability. See the discussion of "selective precision" in # https://arxiv.org/abs/2101.03961. # We also store the previous dtype to cast back the output to the previous dtype self.input_dtype = hidden_states.dtype hidden_states = hidden_states.to(self.dtype) if self.training and self.jitter_noise > 0: # Multiply the token inputs by the uniform distribution - adding some noise hidden_states *= torch.empty_like(hidden_states).uniform_(1.0 - self.jitter_noise, 1.0 + self.jitter_noise) # Shape: [num_groups, tokens_per_group, num_experts] self._cast_classifier() router_logits = self.classifier(hidden_states) # Apply Softmax and cast back to the original `dtype` router_probabilities = nn.functional.softmax(router_logits, dim=-1, dtype=self.dtype).to(self.input_dtype) return router_probabilities, router_logits def _cast_classifier(self): r""" `bitsandbytes` `Linear8bitLt` layers does not support manual casting Therefore we need to check if they are an instance of the `Linear8bitLt` class by checking special attributes. """ if not (hasattr(self.classifier, "SCB") or hasattr(self.classifier, "CB")): self.classifier = self.classifier.to(self.dtype) def forward(self, hidden_states: torch.Tensor) -> Tuple: r""" Generic forward function for every Router class. Each Router expects to have the same input hidden states (`hidden_states`) corresponding to the hidden states for each token, the `expert_capacity` corresponding to the number of tokens the Router will send to each expert, some Routers can send up to few tokens to each expert. Each Router works as the following: it expects the hidden states for each token, gets the `router_probs` and `router_logits` from the `router_weights`. This will assign for each token, the raw probability to be assigned to an expert. Then each Router class will have to define its own `_compute_routing_instructions`. Args: hidden_states (`torch.Tensor`) : [num_groups, tokens_per_group, hidden_dim] inputs to send to experts. Returns: Tuple[`torch.Tensor`, `torch.Tensor`, `torch.Tensor`] Tuple containing the expert index, the router probs and the router logits. The router probabilities and logits are required to compute the loss. """ router_probs, router_logits = self._compute_router_probabilities(hidden_states) expert_index = torch.argmax(router_probs, dim=-1) expert_index = torch.nn.functional.one_hot(expert_index, num_classes=self.num_experts) # Mask tokens outside expert capacity. Sum over each sequence token_priority = torch.cumsum(expert_index, dim=-2) # mask if the token routed to to the expert will overflow expert_capacity_mask = token_priority <= self.expert_capacity expert_index = expert_index * expert_capacity_mask router_probs = torch.max(router_probs, dim=-1).values.unsqueeze(-1) return expert_index, router_probs, router_logits # Copied from transformers.models.t5.modeling_t5.T5LayerNorm with T5->SwitchTransformers class SwitchTransformersLayerNorm(nn.Module): def __init__(self, hidden_size, eps=1e-6): """ Construct a layernorm module in the SwitchTransformers style. No bias and no subtraction of mean. """ super().__init__() self.weight = nn.Parameter(torch.ones(hidden_size)) self.variance_epsilon = eps def forward(self, hidden_states): # SwitchTransformers uses a layer_norm which only scales and doesn't shift, which is also known as Root Mean # Square Layer Normalization https://arxiv.org/abs/1910.07467 thus varience is calculated # w/o mean and there is no bias. Additionally we want to make sure that the accumulation for # half-precision inputs is done in fp32 variance = hidden_states.to(torch.float32).pow(2).mean(-1, keepdim=True) hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon) # convert into half-precision if necessary if self.weight.dtype in [torch.float16, torch.bfloat16]: hidden_states = hidden_states.to(self.weight.dtype) return self.weight * hidden_states ALL_LAYERNORM_LAYERS.append(SwitchTransformersLayerNorm) # Copied from transformers.models.t5.modeling_t5.T5DenseActDense with T5->SwitchTransformers class SwitchTransformersDenseActDense(nn.Module): def __init__(self, config: SwitchTransformersConfig): super().__init__() self.wi = nn.Linear(config.d_model, config.d_ff, bias=False) self.wo = nn.Linear(config.d_ff, config.d_model, bias=False) self.dropout = nn.Dropout(config.dropout_rate) self.act = ACT2FN[config.dense_act_fn] def forward(self, hidden_states): hidden_states = self.wi(hidden_states) hidden_states = self.act(hidden_states) hidden_states = self.dropout(hidden_states) if ( isinstance(self.wo.weight, torch.Tensor) and hidden_states.dtype != self.wo.weight.dtype and self.wo.weight.dtype != torch.int8 ): hidden_states = hidden_states.to(self.wo.weight.dtype) hidden_states = self.wo(hidden_states) return hidden_states class SwitchTransformersSparseMLP(nn.Module): r""" Implementation of the Switch Transformers Sparse MLP module. """ def __init__(self, config: SwitchTransformersConfig, expert_class: nn.Module = SwitchTransformersDenseActDense): super().__init__() # Step 1: Get the correct router according to its class self.router = SwitchTransformersTop1Router(config) # Step 2: Get the experts self.experts = nn.ModuleDict() for idx in range(config.num_experts): self.experts[f"expert_{idx}"] = expert_class(config) def forward(self, hidden_states): r""" Hold on, this will be slightly tricky to understand In the correct order, a MoE layer does the following: 1- Gets the `router_mask` from the router. The shape of the mask is `(batch_size, sequence_length, num_expert)` and corresponds to the argmax of the `router_probs`. The probabilities are needed in the computation of the hidden states : they are broadcasted to the hidden states values (can be interpreted as a scaling factor). 2- Dispatch the tokens to its associated experts. We do a classic for loop over the experts and assign for each expert the corresponding hidden states. """ # Step 1: Get the router_mask from the router as wel as the probabilities router_mask, router_probs, router_logits = self.router(hidden_states) expert_index = torch.argmax(router_mask, dim=-1) # The routers introduced might not always map all the tokens, to a router, which means that some hidden states # can be unchanged from one layer to another. That is why the hidden states are cloned before updating only the seleced ones. next_states = hidden_states.clone() for idx, expert in enumerate(self.experts.values()): token_indices = router_mask[:, :, idx].bool() next_states[token_indices] = expert(hidden_states[token_indices]).to(next_states.dtype) hidden_states = router_probs * next_states return hidden_states, (router_logits, expert_index) class SwitchTransformersLayerFF(nn.Module): r""" Switch Transformers Feed Forward layer module. This is a wrapper around the Mixture of Experts module. Parameters: config : ([`SwitchTransformersConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. is_sparse (`bool`): Whether the MLP layer is a `Sparse` layer (contains a Mixture of Experts) or not """ def __init__(self, config: SwitchTransformersConfig, is_sparse=False): super().__init__() self.is_sparse = is_sparse # Check if it is a sparse layer, if not then it is a dense layer if not self.is_sparse: self.mlp = SwitchTransformersDenseActDense(config) else: self.mlp = SwitchTransformersSparseMLP(config) self.layer_norm = SwitchTransformersLayerNorm(config.d_model, eps=config.layer_norm_epsilon) self.dropout = nn.Dropout(config.dropout_rate) def forward(self, hidden_states, output_router_logits): forwarded_states = self.layer_norm(hidden_states) forwarded_states = self.mlp(forwarded_states) if isinstance(forwarded_states, tuple): forwarded_states, router_tuple = forwarded_states else: router_tuple = None output = hidden_states + self.dropout(forwarded_states) if output_router_logits and router_tuple is not None: output = (output, router_tuple) return output # Copied from transformers.models.t5.modeling_t5.T5Attention with T5->SwitchTransformers class SwitchTransformersAttention(nn.Module): def __init__(self, config: SwitchTransformersConfig, has_relative_attention_bias=False): super().__init__() self.is_decoder = config.is_decoder self.has_relative_attention_bias = has_relative_attention_bias self.relative_attention_num_buckets = config.relative_attention_num_buckets self.relative_attention_max_distance = config.relative_attention_max_distance self.d_model = config.d_model self.key_value_proj_dim = config.d_kv self.n_heads = config.num_heads self.dropout = config.dropout_rate self.inner_dim = self.n_heads * self.key_value_proj_dim # Mesh TensorFlow initialization to avoid scaling before softmax self.q = nn.Linear(self.d_model, self.inner_dim, bias=False) self.k = nn.Linear(self.d_model, self.inner_dim, bias=False) self.v = nn.Linear(self.d_model, self.inner_dim, bias=False) self.o = nn.Linear(self.inner_dim, self.d_model, bias=False) if self.has_relative_attention_bias: self.relative_attention_bias = nn.Embedding(self.relative_attention_num_buckets, self.n_heads) self.pruned_heads = set() self.gradient_checkpointing = False def prune_heads(self, heads): if len(heads) == 0: return heads, index = find_pruneable_heads_and_indices( heads, self.n_heads, self.key_value_proj_dim, self.pruned_heads ) # Prune linear layers self.q = prune_linear_layer(self.q, index) self.k = prune_linear_layer(self.k, index) self.v = prune_linear_layer(self.v, index) self.o = prune_linear_layer(self.o, index, dim=1) # Update hyper params self.n_heads = self.n_heads - len(heads) self.inner_dim = self.key_value_proj_dim * self.n_heads self.pruned_heads = self.pruned_heads.union(heads) @staticmethod def _relative_position_bucket(relative_position, bidirectional=True, num_buckets=32, max_distance=128): """ Adapted from Mesh Tensorflow: https://github.com/tensorflow/mesh/blob/0cb87fe07da627bf0b7e60475d59f95ed6b5be3d/mesh_tensorflow/transformer/transformer_layers.py#L593 Translate relative position to a bucket number for relative attention. The relative position is defined as memory_position - query_position, i.e. the distance in tokens from the attending position to the attended-to position. If bidirectional=False, then positive relative positions are invalid. We use smaller buckets for small absolute relative_position and larger buckets for larger absolute relative_positions. All relative positions >=max_distance map to the same bucket. All relative positions <=-max_distance map to the same bucket. This should allow for more graceful generalization to longer sequences than the model has been trained on Args: relative_position: an int32 Tensor bidirectional: a boolean - whether the attention is bidirectional num_buckets: an integer max_distance: an integer Returns: a Tensor with the same shape as relative_position, containing int32 values in the range [0, num_buckets) """ relative_buckets = 0 if bidirectional: num_buckets //= 2 relative_buckets += (relative_position > 0).to(torch.long) * num_buckets relative_position = torch.abs(relative_position) else: relative_position = -torch.min(relative_position, torch.zeros_like(relative_position)) # now relative_position is in the range [0, inf) # half of the buckets are for exact increments in positions max_exact = num_buckets // 2 is_small = relative_position < max_exact # The other half of the buckets are for logarithmically bigger bins in positions up to max_distance relative_position_if_large = max_exact + ( torch.log(relative_position.float() / max_exact) / math.log(max_distance / max_exact) * (num_buckets - max_exact) ).to(torch.long) relative_position_if_large = torch.min( relative_position_if_large, torch.full_like(relative_position_if_large, num_buckets - 1) ) relative_buckets += torch.where(is_small, relative_position, relative_position_if_large) return relative_buckets def compute_bias(self, query_length, key_length, device=None): """Compute binned relative position bias""" if device is None: device = self.relative_attention_bias.weight.device context_position = torch.arange(query_length, dtype=torch.long, device=device)[:, None] memory_position = torch.arange(key_length, dtype=torch.long, device=device)[None, :] relative_position = memory_position - context_position # shape (query_length, key_length) relative_position_bucket = self._relative_position_bucket( relative_position, # shape (query_length, key_length) bidirectional=(not self.is_decoder), num_buckets=self.relative_attention_num_buckets, max_distance=self.relative_attention_max_distance, ) values = self.relative_attention_bias(relative_position_bucket) # shape (query_length, key_length, num_heads) values = values.permute([2, 0, 1]).unsqueeze(0) # shape (1, num_heads, query_length, key_length) return values def forward( self, hidden_states, mask=None, key_value_states=None, position_bias=None, past_key_value=None, layer_head_mask=None, query_length=None, use_cache=False, output_attentions=False, ): """ Self-attention (if key_value_states is None) or attention over source sentence (provided by key_value_states). """ # Input is (batch_size, seq_length, dim) # Mask is (batch_size, key_length) (non-causal) or (batch_size, key_length, key_length) # past_key_value[0] is (batch_size, n_heads, q_len - 1, dim_per_head) batch_size, seq_length = hidden_states.shape[:2] real_seq_length = seq_length if past_key_value is not None: if len(past_key_value) != 2: raise ValueError( f"past_key_value should have 2 past states: keys and values. Got { len(past_key_value)} past states" ) real_seq_length += past_key_value[0].shape[2] if query_length is None else query_length key_length = real_seq_length if key_value_states is None else key_value_states.shape[1] def shape(states): """projection""" return states.view(batch_size, -1, self.n_heads, self.key_value_proj_dim).transpose(1, 2) def unshape(states): """reshape""" return states.transpose(1, 2).contiguous().view(batch_size, -1, self.inner_dim) def project(hidden_states, proj_layer, key_value_states, past_key_value): """projects hidden states correctly to key/query states""" if key_value_states is None: # self-attn # (batch_size, n_heads, seq_length, dim_per_head) hidden_states = shape(proj_layer(hidden_states)) elif past_key_value is None: # cross-attn # (batch_size, n_heads, seq_length, dim_per_head) hidden_states = shape(proj_layer(key_value_states)) if past_key_value is not None: if key_value_states is None: # self-attn # (batch_size, n_heads, key_length, dim_per_head) hidden_states = torch.cat([past_key_value, hidden_states], dim=2) elif past_key_value.shape[2] != key_value_states.shape[1]: # checking that the `sequence_length` of the `past_key_value` is the same as # the provided `key_value_states` to support prefix tuning # cross-attn # (batch_size, n_heads, seq_length, dim_per_head) hidden_states = shape(proj_layer(key_value_states)) else: # cross-attn hidden_states = past_key_value return hidden_states # get query states query_states = shape(self.q(hidden_states)) # (batch_size, n_heads, seq_length, dim_per_head) # get key/value states key_states = project( hidden_states, self.k, key_value_states, past_key_value[0] if past_key_value is not None else None ) value_states = project( hidden_states, self.v, key_value_states, past_key_value[1] if past_key_value is not None else None ) # compute scores scores = torch.matmul( query_states, key_states.transpose(3, 2) ) # equivalent of torch.einsum("bnqd,bnkd->bnqk", query_states, key_states), compatible with onnx op>9 if position_bias is None: if not self.has_relative_attention_bias: position_bias = torch.zeros( (1, self.n_heads, real_seq_length, key_length), device=scores.device, dtype=scores.dtype ) if self.gradient_checkpointing and self.training: position_bias.requires_grad = True else: position_bias = self.compute_bias(real_seq_length, key_length, device=scores.device) # if key and values are already calculated # we want only the last query position bias if past_key_value is not None: position_bias = position_bias[:, :, -hidden_states.size(1) :, :] if mask is not None: position_bias = position_bias + mask # (batch_size, n_heads, seq_length, key_length) if self.pruned_heads: mask = torch.ones(position_bias.shape[1]) mask[list(self.pruned_heads)] = 0 position_bias_masked = position_bias[:, mask.bool()] else: position_bias_masked = position_bias scores += position_bias_masked attn_weights = nn.functional.softmax(scores.float(), dim=-1).type_as( scores ) # (batch_size, n_heads, seq_length, key_length) attn_weights = nn.functional.dropout( attn_weights, p=self.dropout, training=self.training ) # (batch_size, n_heads, seq_length, key_length) # Mask heads if we want to if layer_head_mask is not None: attn_weights = attn_weights * layer_head_mask attn_output = unshape(torch.matmul(attn_weights, value_states)) # (batch_size, seq_length, dim) attn_output = self.o(attn_output) present_key_value_state = (key_states, value_states) if (self.is_decoder and use_cache) else None outputs = (attn_output,) + (present_key_value_state,) + (position_bias,) if output_attentions: outputs = outputs + (attn_weights,) return outputs # Copied from transformers.models.t5.modeling_t5.T5LayerSelfAttention with T5->SwitchTransformers class SwitchTransformersLayerSelfAttention(nn.Module): def __init__(self, config, has_relative_attention_bias=False): super().__init__() self.SelfAttention = SwitchTransformersAttention( config, has_relative_attention_bias=has_relative_attention_bias ) self.layer_norm = SwitchTransformersLayerNorm(config.d_model, eps=config.layer_norm_epsilon) self.dropout = nn.Dropout(config.dropout_rate) def forward( self, hidden_states, attention_mask=None, position_bias=None, layer_head_mask=None, past_key_value=None, use_cache=False, output_attentions=False, ): normed_hidden_states = self.layer_norm(hidden_states) attention_output = self.SelfAttention( normed_hidden_states, mask=attention_mask, position_bias=position_bias, layer_head_mask=layer_head_mask, past_key_value=past_key_value, use_cache=use_cache, output_attentions=output_attentions, ) hidden_states = hidden_states + self.dropout(attention_output[0]) outputs = (hidden_states,) + attention_output[1:] # add attentions if we output them return outputs # Copied from transformers.models.t5.modeling_t5.T5LayerCrossAttention with T5->SwitchTransformers class SwitchTransformersLayerCrossAttention(nn.Module): def __init__(self, config): super().__init__() self.EncDecAttention = SwitchTransformersAttention(config, has_relative_attention_bias=False) self.layer_norm = SwitchTransformersLayerNorm(config.d_model, eps=config.layer_norm_epsilon) self.dropout = nn.Dropout(config.dropout_rate) def forward( self, hidden_states, key_value_states, attention_mask=None, position_bias=None, layer_head_mask=None, past_key_value=None, use_cache=False, query_length=None, output_attentions=False, ): normed_hidden_states = self.layer_norm(hidden_states) attention_output = self.EncDecAttention( normed_hidden_states, mask=attention_mask, key_value_states=key_value_states, position_bias=position_bias, layer_head_mask=layer_head_mask, past_key_value=past_key_value, use_cache=use_cache, query_length=query_length, output_attentions=output_attentions, ) layer_output = hidden_states + self.dropout(attention_output[0]) outputs = (layer_output,) + attention_output[1:] # add attentions if we output them return outputs class SwitchTransformersBlock(nn.Module): def __init__(self, config, has_relative_attention_bias=False, is_sparse=False): super().__init__() self.is_decoder = config.is_decoder self.is_sparse = is_sparse self.layer = nn.ModuleList() self.layer.append( SwitchTransformersLayerSelfAttention(config, has_relative_attention_bias=has_relative_attention_bias) ) if self.is_decoder: self.layer.append(SwitchTransformersLayerCrossAttention(config)) self.layer.append(SwitchTransformersLayerFF(config, is_sparse=self.is_sparse)) def forward( self, hidden_states, attention_mask=None, position_bias=None, encoder_hidden_states=None, encoder_attention_mask=None, encoder_decoder_position_bias=None, layer_head_mask=None, cross_attn_layer_head_mask=None, past_key_value=None, use_cache=False, output_attentions=False, output_router_logits=True, return_dict=True, ): if past_key_value is not None: if not self.is_decoder: logger.warning("`past_key_values` is passed to the encoder. Please make sure this is intended.") expected_num_past_key_values = 2 if encoder_hidden_states is None else 4 if len(past_key_value) != expected_num_past_key_values: raise ValueError( f"There should be {expected_num_past_key_values} past states. " f"{'2 (past / key) for cross attention. ' if expected_num_past_key_values == 4 else ''}" f"Got {len(past_key_value)} past key / value states" ) self_attn_past_key_value = past_key_value[:2] cross_attn_past_key_value = past_key_value[2:] else: self_attn_past_key_value, cross_attn_past_key_value = None, None self_attention_outputs = self.layer[0]( hidden_states, attention_mask=attention_mask, position_bias=position_bias, layer_head_mask=layer_head_mask, past_key_value=self_attn_past_key_value, use_cache=use_cache, output_attentions=output_attentions, ) hidden_states, present_key_value_state = self_attention_outputs[:2] attention_outputs = self_attention_outputs[2:] # Keep self-attention outputs and relative position weights # clamp inf values to enable fp16 training if hidden_states.dtype == torch.float16 and torch.isinf(hidden_states).any(): clamp_value = torch.finfo(hidden_states.dtype).max - 1000 hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value) do_cross_attention = self.is_decoder and encoder_hidden_states is not None if do_cross_attention: # the actual query length is unknown for cross attention # if using past key value states. Need to inject it here if present_key_value_state is not None: query_length = present_key_value_state[0].shape[2] else: query_length = None cross_attention_outputs = self.layer[1]( hidden_states, key_value_states=encoder_hidden_states, attention_mask=encoder_attention_mask, position_bias=encoder_decoder_position_bias, layer_head_mask=cross_attn_layer_head_mask, past_key_value=cross_attn_past_key_value, query_length=query_length, use_cache=use_cache, output_attentions=output_attentions, ) hidden_states = cross_attention_outputs[0] # clamp inf values to enable fp16 training if hidden_states.dtype == torch.float16 and torch.isinf(hidden_states).any(): clamp_value = torch.finfo(hidden_states.dtype).max - 1000 hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value) # Combine self attn and cross attn key value states if present_key_value_state is not None: present_key_value_state = present_key_value_state + cross_attention_outputs[1] # Keep cross-attention outputs and relative position weights attention_outputs = attention_outputs + cross_attention_outputs[2:] # Apply Feed Forward layer hidden_states = self.layer[-1](hidden_states, output_router_logits) if isinstance(hidden_states, tuple): hidden_states, router_tuple = hidden_states else: router_tuple = (torch.zeros((1,), device=hidden_states.device, dtype=torch.int64),) # clamp inf values to enable fp16 training if hidden_states.dtype == torch.float16 and torch.isinf(hidden_states).any(): clamp_value = torch.finfo(hidden_states.dtype).max - 1000 hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value) outputs = (hidden_states,) if use_cache: outputs = outputs + (present_key_value_state,) + attention_outputs + (router_tuple,) else: outputs = outputs + attention_outputs + (router_tuple,) return outputs # hidden-states, present_key_value_states, (self-attention position bias), (self-attention weights), (cross-attention position bias), (cross-attention weights), (router_tuple) class SwitchTransformersPreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = SwitchTransformersConfig base_model_prefix = "switch_transformers" supports_gradient_checkpointing = True _no_split_modules = ["SwitchTransformersBlock"] @property def dummy_inputs(self): input_ids = torch.tensor(DUMMY_INPUTS) input_mask = torch.tensor(DUMMY_MASK) dummy_inputs = { "decoder_input_ids": input_ids, "input_ids": input_ids, "decoder_attention_mask": input_mask, } return dummy_inputs def _init_weights(self, module): """Initialize the weights""" factor = self.config.initializer_factor # Used for testing weights initialization if isinstance(module, SwitchTransformersLayerNorm): module.weight.data.fill_(factor * 1.0) elif isinstance( module, (SwitchTransformersModel, SwitchTransformersForConditionalGeneration, SwitchTransformersEncoderModel), ): # Mesh TensorFlow embeddings initialization # See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/layers.py#L1624 module.shared.weight.data.normal_(mean=0.0, std=factor * 1.0) if hasattr(module, "lm_head") and not self.config.tie_word_embeddings: module.lm_head.weight.data.normal_(mean=0.0, std=factor * 1.0) elif isinstance(module, SwitchTransformersDenseActDense): # Mesh TensorFlow FF initialization # See https://github.com/tensorflow/mesh/blob/master/mesh_tensorflow/transformer/transformer_layers.py#L56 # and https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/layers.py#L89 module.wi.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_model) ** -0.5)) if hasattr(module.wi, "bias") and module.wi.bias is not None: module.wi.bias.data.zero_() module.wo.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_ff) ** -0.5)) if hasattr(module.wo, "bias") and module.wo.bias is not None: module.wo.bias.data.zero_() elif isinstance(module, SwitchTransformersAttention): # Mesh TensorFlow attention initialization to avoid scaling before softmax # See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/transformer/attention.py#L136 d_model = self.config.d_model key_value_proj_dim = self.config.d_kv n_heads = self.config.num_heads module.q.weight.data.normal_(mean=0.0, std=factor * ((d_model * key_value_proj_dim) ** -0.5)) module.k.weight.data.normal_(mean=0.0, std=factor * (d_model**-0.5)) module.v.weight.data.normal_(mean=0.0, std=factor * (d_model**-0.5)) module.o.weight.data.normal_(mean=0.0, std=factor * ((n_heads * key_value_proj_dim) ** -0.5)) if module.has_relative_attention_bias: module.relative_attention_bias.weight.data.normal_(mean=0.0, std=factor * ((d_model) ** -0.5)) elif isinstance(module, SwitchTransformersSparseMLP): # Mesh TensorFlow attention initialization to avoid scaling before softmax # See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/transformer/attention.py#L136 d_model = self.config.d_model key_value_proj_dim = self.config.d_kv n_heads = self.config.num_heads module.router.classifier.weight.data.normal_(mean=0.0, std=factor * 1) for idx in range(self.config.num_experts): module.experts[f"expert_{idx}"].wi.weight.data.normal_(mean=0.0, std=factor * (d_model**-0.5)) module.experts[f"expert_{idx}"].wo.weight.data.normal_(mean=0.0, std=factor * (d_model**-0.5)) def _shift_right(self, input_ids): decoder_start_token_id = self.config.decoder_start_token_id pad_token_id = self.config.pad_token_id if decoder_start_token_id is None: raise ValueError( "self.model.config.decoder_start_token_id has to be defined. In SwitchTransformers it is usually set" " to the pad_token_id. See SwitchTransformers docs for more information" ) # shift inputs to the right if is_torch_fx_proxy(input_ids): # Item assignment is not supported natively for proxies. shifted_input_ids = torch.full(input_ids.shape[:-1] + (1,), decoder_start_token_id) shifted_input_ids = torch.cat([shifted_input_ids, input_ids[..., :-1]], dim=-1) else: shifted_input_ids = input_ids.new_zeros(input_ids.shape) shifted_input_ids[..., 1:] = input_ids[..., :-1].clone() shifted_input_ids[..., 0] = decoder_start_token_id if pad_token_id is None: raise ValueError("self.model.config.pad_token_id has to be defined.") # replace possible -100 values in labels by `pad_token_id` shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id) return shifted_input_ids class SwitchTransformersStack(SwitchTransformersPreTrainedModel): def __init__(self, config, embed_tokens=None): super().__init__(config) self.embed_tokens = nn.Embedding(config.vocab_size, config.d_model) if embed_tokens is not None: self.embed_tokens.weight = embed_tokens.weight self.is_decoder = config.is_decoder sparse_step = config.decoder_sparse_step if self.is_decoder else config.encoder_sparse_step config.num_layers = config.num_decoder_layers if self.is_decoder else config.num_layers self.block = nn.ModuleList() for i in range(config.num_layers): is_sparse = (i % sparse_step == 1) if sparse_step > 0 else False self.block.append( SwitchTransformersBlock(config, has_relative_attention_bias=bool(i == 0), is_sparse=is_sparse) ) self.final_layer_norm = SwitchTransformersLayerNorm(config.d_model, eps=config.layer_norm_epsilon) self.dropout = nn.Dropout(config.dropout_rate) # Initialize weights and apply final processing self.post_init() self.device_map = None self.gradient_checkpointing = False def get_input_embeddings(self): return self.embed_tokens def set_input_embeddings(self, new_embeddings): self.embed_tokens = new_embeddings def forward( self, input_ids=None, attention_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, inputs_embeds=None, head_mask=None, cross_attn_head_mask=None, past_key_values=None, use_cache=None, output_attentions=None, output_hidden_states=None, output_router_logits=True, return_dict=None, ): use_cache = use_cache if use_cache is not None else self.config.use_cache output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is not None and inputs_embeds is not None: err_msg_prefix = "decoder_" if self.is_decoder else "" raise ValueError( f"You cannot specify both {err_msg_prefix}input_ids and {err_msg_prefix}inputs_embeds at the same time" ) elif input_ids is not None: input_shape = input_ids.size() input_ids = input_ids.view(-1, input_shape[-1]) elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: err_msg_prefix = "decoder_" if self.is_decoder else "" raise ValueError(f"You have to specify either {err_msg_prefix}input_ids or {err_msg_prefix}inputs_embeds") if inputs_embeds is None: if self.embed_tokens is None: raise ValueError("You have to initialize the model with valid token embeddings") inputs_embeds = self.embed_tokens(input_ids) batch_size, seq_length = input_shape # required mask seq length can be calculated via length of past mask_seq_length = past_key_values[0][0].shape[2] + seq_length if past_key_values is not None else seq_length if use_cache is True: if not self.is_decoder: raise ValueError(f"`use_cache` can only be set to `True` if {self} is used as a decoder") if attention_mask is None: attention_mask = torch.ones(batch_size, mask_seq_length, device=inputs_embeds.device) if self.is_decoder and encoder_attention_mask is None and encoder_hidden_states is not None: encoder_seq_length = encoder_hidden_states.shape[1] encoder_attention_mask = torch.ones( batch_size, encoder_seq_length, device=inputs_embeds.device, dtype=torch.long ) # initialize past_key_values with `None` if past does not exist if past_key_values is None: past_key_values = [None] * len(self.block) # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] # ourselves in which case we just need to make it broadcastable to all heads. extended_attention_mask = self.get_extended_attention_mask(attention_mask, input_shape) # If a 2D or 3D attention mask is provided for the cross-attention # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length] if self.is_decoder and encoder_hidden_states is not None: encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size() encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length) if encoder_attention_mask is None: encoder_attention_mask = torch.ones(encoder_hidden_shape, device=inputs_embeds.device) encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask) else: encoder_extended_attention_mask = None if self.gradient_checkpointing and self.training: if use_cache: logger.warning_once( "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." ) use_cache = False # Prepare head mask if needed head_mask = self.get_head_mask(head_mask, self.config.num_layers) cross_attn_head_mask = self.get_head_mask(cross_attn_head_mask, self.config.num_layers) present_key_value_states = () if use_cache else None all_hidden_states = () if output_hidden_states else None all_attentions = () if output_attentions else None all_router_probs = () if output_router_logits else None all_cross_attentions = () if (output_attentions and self.is_decoder) else None position_bias = None encoder_decoder_position_bias = None hidden_states = self.dropout(inputs_embeds) for i, (layer_module, past_key_value) in enumerate(zip(self.block, past_key_values)): layer_head_mask = head_mask[i] cross_attn_layer_head_mask = cross_attn_head_mask[i] if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if self.gradient_checkpointing and self.training: layer_outputs = self._gradient_checkpointing_func( layer_module.forward, hidden_states, extended_attention_mask, position_bias, encoder_hidden_states, encoder_extended_attention_mask, encoder_decoder_position_bias, layer_head_mask, cross_attn_layer_head_mask, None, # past_key_value is always None with gradient checkpointing use_cache, output_attentions, ) else: layer_outputs = layer_module( hidden_states, attention_mask=extended_attention_mask, position_bias=position_bias, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_extended_attention_mask, encoder_decoder_position_bias=encoder_decoder_position_bias, layer_head_mask=layer_head_mask, cross_attn_layer_head_mask=cross_attn_layer_head_mask, past_key_value=past_key_value, use_cache=use_cache, output_attentions=output_attentions, output_router_logits=output_router_logits, ) router_probs = layer_outputs[-1] layer_outputs = layer_outputs[:-1] # layer_outputs is a tuple with: # hidden-states, key-value-states, (self-attention position bias), (self-attention weights), (cross-attention position bias), (cross-attention weights) if use_cache is False: layer_outputs = layer_outputs[:1] + (None,) + layer_outputs[1:] hidden_states, present_key_value_state = layer_outputs[:2] # We share the position biases between the layers - the first layer store them # layer_outputs = hidden-states, key-value-states (self-attention position bias), (self-attention weights), # (cross-attention position bias), (cross-attention weights) position_bias = layer_outputs[2] if self.is_decoder and encoder_hidden_states is not None: encoder_decoder_position_bias = layer_outputs[4 if output_attentions else 3] # append next layer key value states if use_cache: present_key_value_states = present_key_value_states + (present_key_value_state,) if output_attentions: all_attentions = all_attentions + (layer_outputs[3],) if self.is_decoder: all_cross_attentions = all_cross_attentions + (layer_outputs[5],) if output_router_logits: all_router_probs = all_router_probs + (router_probs,) hidden_states = self.final_layer_norm(hidden_states) hidden_states = self.dropout(hidden_states) # Add last layer if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple( v for v in [ hidden_states, present_key_value_states, all_hidden_states, all_attentions, all_cross_attentions, all_router_probs, ] if v is not None ) return MoEModelOutputWithPastAndCrossAttentions( last_hidden_state=hidden_states, past_key_values=present_key_value_states, hidden_states=all_hidden_states, attentions=all_attentions, cross_attentions=all_cross_attentions, router_probs=all_router_probs, ) SWITCH_TRANSFORMERS_START_DOCSTRING = r""" The SWITCH_TRANSFORMERS model was proposed in [Switch Transformers: Scaling to Trillion Parameter Models with Simple and Efficient Sparsity](https://arxiv.org/abs/2101.03961) by [William Fedus](https://arxiv.org/search/cs?searchtype=author&query=Fedus%2C+W), [Barret Zoph](https://arxiv.org/search/cs?searchtype=author&query=Zoph%2C+B), and [Noam Shazeer](https://arxiv.org/search/cs?searchtype=author&query=Shazeer%2C+N). It's an encoder-decoder T5-like model with sparse Feed Forward that stands for Mixture of Experts (MoE) architecture. This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`SwitchTransformersConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ SWITCH_TRANSFORMERS_INPUTS_DOCSTRING = r""" Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. SWITCH_TRANSFORMERS is a model with relative position embeddings so you should be able to pad the inputs on both the right and the left. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for detail. [What are input IDs?](../glossary#input-ids) To know more on how to prepare `input_ids` for pretraining take a look a [SWITCH_TRANSFORMERS Training](./switch_transformers#training). attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Indices of decoder input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are decoder input IDs?](../glossary#decoder-input-ids) SWITCH_TRANSFORMERS uses the `pad_token_id` as the starting token for `decoder_input_ids` generation. If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see `past_key_values`). To know more on how to prepare `decoder_input_ids` for pretraining take a look at [SWITCH_TRANSFORMERS Training](./switch_transformers#training). decoder_attention_mask (`torch.BoolTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also be used by default. head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the self-attention modules in the encoder. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. decoder_head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the self-attention modules in the decoder. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. cross_attn_head_mask (`torch.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. encoder_outputs (`tuple(tuple(torch.FloatTensor)`, *optional*): Tuple consists of (`last_hidden_state`, `optional`: *hidden_states*, `optional`: *attentions*) `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)` is a sequence of hidden states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. decoder_inputs_embeds (`torch.FloatTensor` of shape `(batch_size, target_sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `decoder_input_ids` you can choose to directly pass an embedded representation. If `past_key_values` is used, optionally only the last `decoder_inputs_embeds` have to be input (see `past_key_values`). This is useful if you want more control over how to convert `decoder_input_ids` indices into associated vectors than the model's internal embedding lookup matrix. If `decoder_input_ids` and `decoder_inputs_embeds` are both unset, `decoder_inputs_embeds` takes the value of `inputs_embeds`. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. output_router_logits (`bool`, *optional*): Whether or not to return the logits of all the routers. They are useful for computing the router loss, and should not be returned during inference. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ SWITCH_TRANSFORMERS_ENCODER_INPUTS_DOCSTRING = r""" Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. SWITCH_TRANSFORMERS is a model with relative position embeddings so you should be able to pad the inputs on both the right and the left. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for detail. To know more on how to prepare `input_ids` for pretraining take a look a [SWITCH_TRANSFORMERS Training](./switch_transformers#training). attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. output_router_logits (`bool`, *optional*): Whether or not to return the logits of all the routers. They are useful for computing the router loss, and should not be returned during inference. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ # Warning message for FutureWarning: head_mask was separated into two input args - head_mask, decoder_head_mask __HEAD_MASK_WARNING_MSG = """ The input argument `head_mask` was split into two arguments `head_mask` and `decoder_head_mask`. Currently, `decoder_head_mask` is set to copy `head_mask`, but this feature is deprecated and will be removed in future versions. If you do not want to use any `decoder_head_mask` now, please set `decoder_head_mask = torch.ones(num_layers, num_heads)`. """ @add_start_docstrings( "The bare SWITCH_TRANSFORMERS Model transformer outputting raw hidden-states without any specific head on top.", SWITCH_TRANSFORMERS_START_DOCSTRING, ) class SwitchTransformersModel(SwitchTransformersPreTrainedModel): _tied_weights_keys = ["encoder.embed_tokens.weight", "decoder.embed_tokens.weight"] def __init__(self, config: SwitchTransformersConfig): super().__init__(config) self.shared = nn.Embedding(config.vocab_size, config.d_model) encoder_config = copy.deepcopy(config) encoder_config.is_decoder = False encoder_config.use_cache = False encoder_config.is_encoder_decoder = False self.encoder = SwitchTransformersStack(encoder_config, self.shared) decoder_config = copy.deepcopy(config) decoder_config.is_decoder = True decoder_config.is_encoder_decoder = False self.decoder = SwitchTransformersStack(decoder_config, self.shared) # Initialize weights and apply final processing self.post_init() # Model parallel self.device_map = None def get_input_embeddings(self): return self.shared def set_input_embeddings(self, new_embeddings): self.shared = new_embeddings self.encoder.set_input_embeddings(new_embeddings) self.decoder.set_input_embeddings(new_embeddings) def _tie_weights(self): if self.config.tie_word_embeddings: self._tie_or_clone_weights(self.encoder.embed_tokens, self.shared) self._tie_or_clone_weights(self.decoder.embed_tokens, self.shared) def get_encoder(self): return self.encoder def get_decoder(self): return self.decoder def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) @add_start_docstrings_to_model_forward(SWITCH_TRANSFORMERS_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=Seq2SeqMoEModelOutput, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, decoder_input_ids: Optional[torch.LongTensor] = None, decoder_attention_mask: Optional[torch.BoolTensor] = None, head_mask: Optional[torch.FloatTensor] = None, decoder_head_mask: Optional[torch.FloatTensor] = None, cross_attn_head_mask: Optional[torch.Tensor] = None, encoder_outputs: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, inputs_embeds: Optional[torch.Tensor] = None, decoder_inputs_embeds: Optional[torch.Tensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, output_router_logits: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple[torch.FloatTensor], Seq2SeqMoEModelOutput]: r""" Returns: Example: ```python >>> from transformers import AutoTokenizer, SwitchTransformersModel >>> tokenizer = AutoTokenizer.from_pretrained("google/switch-base-8") >>> model = SwitchTransformersModel.from_pretrained("google/switch-base-8") >>> input_ids = tokenizer( ... "Studies have been shown that owning a dog is good for you", return_tensors="pt" ... ).input_ids # Batch size 1 >>> decoder_input_ids = tokenizer("Studies show that", return_tensors="pt").input_ids # Batch size 1 >>> # preprocess: Prepend decoder_input_ids with start token which is pad token for SwitchTransformersModel. >>> # This is not needed for torch's SwitchTransformersForConditionalGeneration as it does this internally using labels arg. >>> decoder_input_ids = model._shift_right(decoder_input_ids) >>> # forward pass >>> outputs = model(input_ids=input_ids, decoder_input_ids=decoder_input_ids) >>> last_hidden_states = outputs.last_hidden_state ```""" use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict # FutureWarning: head_mask was separated into two input args - head_mask, decoder_head_mask if head_mask is not None and decoder_head_mask is None: if self.config.num_layers == self.config.num_decoder_layers: warnings.warn(__HEAD_MASK_WARNING_MSG, FutureWarning) decoder_head_mask = head_mask if ( output_router_logits and self.config.num_sparse_encoder_layers == 0 and self.config.num_sparse_encoder_layers == 0 ): raise ValueError( "You asked to return `output_router_logits` but the transformer in dense, and does " " not contain any sparse MLP Layers. Set `output_router_logits = False` and restart" ) # Encode if needed (training, first prediction pass) if encoder_outputs is None: encoder_outputs = self.encoder( input_ids=input_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, output_router_logits=output_router_logits, return_dict=return_dict, ) elif return_dict and not isinstance(encoder_outputs, MoEModelOutput): encoder_outputs = MoEModelOutput( last_hidden_state=encoder_outputs[0], hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None, attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None, router_probs=encoder_outputs[3] if len(encoder_outputs) > 3 else None, ) hidden_states = encoder_outputs[0] # Decode decoder_outputs = self.decoder( input_ids=decoder_input_ids, attention_mask=decoder_attention_mask, inputs_embeds=decoder_inputs_embeds, past_key_values=past_key_values, encoder_hidden_states=hidden_states, encoder_attention_mask=attention_mask, head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, output_router_logits=output_router_logits, return_dict=return_dict, ) if not return_dict: return decoder_outputs + encoder_outputs return Seq2SeqMoEModelOutput( last_hidden_state=decoder_outputs.last_hidden_state, past_key_values=decoder_outputs.past_key_values, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, decoder_router_logits=decoder_outputs.router_probs, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions, encoder_router_logits=encoder_outputs.router_probs, ) @add_start_docstrings( """SWITCH_TRANSFORMERS Model with a `language modeling` head on top.""", SWITCH_TRANSFORMERS_START_DOCSTRING ) class SwitchTransformersForConditionalGeneration(SwitchTransformersPreTrainedModel): _tied_weights_keys = ["encoder.embed_tokens.weight", "decoder.embed_tokens.weight", "lm_head.weight"] def __init__(self, config: SwitchTransformersConfig): super().__init__(config) self.model_dim = config.d_model self.shared = nn.Embedding(config.vocab_size, config.d_model) encoder_config = copy.deepcopy(config) encoder_config.is_decoder = False encoder_config.use_cache = False encoder_config.is_encoder_decoder = False self.encoder = SwitchTransformersStack(encoder_config, self.shared) decoder_config = copy.deepcopy(config) decoder_config.is_decoder = True decoder_config.is_encoder_decoder = False decoder_config.num_layers = config.num_decoder_layers self.decoder = SwitchTransformersStack(decoder_config, self.shared) self.lm_head = nn.Linear(config.d_model, config.vocab_size, bias=False) self.router_z_loss_coef = config.router_z_loss_coef self.router_aux_loss_coef = config.router_aux_loss_coef # Initialize weights and apply final processing self.post_init() # Model parallel self.device_map = None def get_input_embeddings(self): return self.shared def set_input_embeddings(self, new_embeddings): self.shared = new_embeddings self.encoder.set_input_embeddings(new_embeddings) self.decoder.set_input_embeddings(new_embeddings) def _tie_weights(self): if self.config.tie_word_embeddings: self._tie_or_clone_weights(self.encoder.embed_tokens, self.shared) self._tie_or_clone_weights(self.decoder.embed_tokens, self.shared) def set_output_embeddings(self, new_embeddings): self.lm_head = new_embeddings def get_output_embeddings(self): return self.lm_head def get_encoder(self): return self.encoder def get_decoder(self): return self.decoder @add_start_docstrings_to_model_forward(SWITCH_TRANSFORMERS_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=Seq2SeqMoEOutput, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, decoder_input_ids: Optional[torch.LongTensor] = None, decoder_attention_mask: Optional[torch.BoolTensor] = None, head_mask: Optional[torch.FloatTensor] = None, decoder_head_mask: Optional[torch.FloatTensor] = None, cross_attn_head_mask: Optional[torch.Tensor] = None, encoder_outputs: Optional[Tuple[Tuple[torch.Tensor]]] = None, past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, decoder_inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, output_router_logits: Optional[bool] = True, return_dict: Optional[bool] = None, ) -> Union[Tuple[torch.FloatTensor], Seq2SeqMoEOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[-100, 0, ..., config.vocab_size - 1]`. All labels set to `-100` are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]` Returns: Examples: ```python >>> from transformers import AutoTokenizer, SwitchTransformersForConditionalGeneration >>> tokenizer = AutoTokenizer.from_pretrained("google/switch-base-8") >>> model = SwitchTransformersForConditionalGeneration.from_pretrained("google/switch-base-8") >>> # training >>> input_ids = tokenizer("The <extra_id_0> walks in <extra_id_1> park", return_tensors="pt").input_ids >>> labels = tokenizer("<extra_id_0> cute dog <extra_id_1> the <extra_id_2>", return_tensors="pt").input_ids >>> outputs = model(input_ids=input_ids, labels=labels) >>> loss = outputs.loss >>> logits = outputs.logits >>> # inference >>> input_ids = tokenizer( ... "summarize: studies have shown that owning a dog is good for you", return_tensors="pt" ... ).input_ids # Batch size 1 >>> outputs = model.generate(input_ids) >>> # . To, let’s say you have a dog. To summarize: >>> # Since the model has been trained on MLM, this will output gibberish ```""" use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict # FutureWarning: head_mask was separated into two input args - head_mask, decoder_head_mask if head_mask is not None and decoder_head_mask is None: if self.config.num_layers == self.config.num_decoder_layers: warnings.warn(__HEAD_MASK_WARNING_MSG, FutureWarning) decoder_head_mask = head_mask # Encode if needed (training, first prediction pass) if encoder_outputs is None: # Convert encoder inputs in embeddings if needed encoder_outputs = self.encoder( input_ids=input_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, output_router_logits=output_router_logits, return_dict=return_dict, ) elif return_dict and not isinstance(encoder_outputs, MoEModelOutput): encoder_outputs = MoEModelOutput( last_hidden_state=encoder_outputs[0], hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None, attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None, router_probs=encoder_outputs[3] if len(encoder_outputs) > 3 else None, ) hidden_states = encoder_outputs[0] if labels is not None and decoder_input_ids is None and decoder_inputs_embeds is None: # get decoder inputs from shifting lm labels to the right decoder_input_ids = self._shift_right(labels) # Decode decoder_outputs = self.decoder( input_ids=decoder_input_ids, attention_mask=decoder_attention_mask, inputs_embeds=decoder_inputs_embeds, past_key_values=past_key_values, encoder_hidden_states=hidden_states, encoder_attention_mask=attention_mask, head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, output_router_logits=output_router_logits, return_dict=return_dict, ) sequence_output = decoder_outputs[0] if self.config.tie_word_embeddings: # Rescale output before projecting on vocab # See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/transformer/transformer.py#L586 sequence_output = sequence_output * (self.model_dim**-0.5) lm_logits = self.lm_head(sequence_output) loss = None encoder_z_loss = None encoder_aux_loss = None decoder_z_loss = None decoder_aux_loss = None if output_router_logits: # Compute the router loss (z_loss + auxiliary loss) for each router in the encoder and decoder if self.encoder.config.encoder_sparse_step > 1: encoder_router_logits, encoder_expert_indexes = self._unpack_router_logits(encoder_outputs[-1]) encoder_z_loss = router_z_loss_func(encoder_router_logits) encoder_router_probs = nn.Softmax(dim=-1)(encoder_router_logits) encoder_aux_loss = load_balancing_loss_func(encoder_router_probs, encoder_expert_indexes) else: encoder_z_loss = 0 encoder_aux_loss = 0 if self.decoder.config.decoder_sparse_step > 1: decoder_router_logits, decoder_expert_indexes = self._unpack_router_logits(decoder_outputs[-1]) decoder_z_loss = router_z_loss_func(decoder_router_logits) decoder_router_probs = nn.Softmax(dim=-1)(decoder_router_logits) decoder_aux_loss = load_balancing_loss_func(decoder_router_probs, decoder_expert_indexes) else: decoder_z_loss = 0 decoder_aux_loss = 0 if labels is not None: loss_fct = CrossEntropyLoss(ignore_index=-100) # move labels to correct device to enable PP labels = labels.to(lm_logits.device) loss = loss_fct(lm_logits.view(-1, lm_logits.size(-1)), labels.view(-1)) if output_router_logits: z_loss = self.router_z_loss_coef * (encoder_z_loss + decoder_z_loss) aux_loss = self.router_aux_loss_coef * (encoder_aux_loss + decoder_aux_loss) loss = loss + z_loss + aux_loss if not return_dict: output = (lm_logits,) if output_router_logits: output += (encoder_z_loss, encoder_aux_loss, decoder_z_loss, decoder_aux_loss) output += (*decoder_outputs[1:], *encoder_outputs) return ((loss,) + output) if loss is not None else output return Seq2SeqMoEOutput( loss=loss, logits=lm_logits, encoder_z_loss=encoder_z_loss, encoder_aux_loss=encoder_aux_loss, decoder_z_loss=decoder_z_loss, decoder_aux_loss=decoder_aux_loss, past_key_values=decoder_outputs.past_key_values, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, decoder_router_logits=decoder_outputs.router_probs, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions, encoder_router_logits=encoder_outputs.router_probs, ) def _unpack_router_logits(self, router_outputs): total_router_logits = [] total_expert_indexes = [] for router_output in router_outputs: if len(router_output[0].shape) > 1: router_logits, expert_indexes = router_output total_router_logits.append(router_logits) total_expert_indexes.append(expert_indexes) return torch.cat(total_router_logits, dim=1), torch.cat(total_expert_indexes, dim=1) def prepare_inputs_for_generation( self, input_ids, past_key_values=None, attention_mask=None, head_mask=None, decoder_head_mask=None, cross_attn_head_mask=None, use_cache=None, encoder_outputs=None, **kwargs, ): # cut decoder_input_ids if past_key_values is used if past_key_values is not None: past_length = past_key_values[0][0].shape[2] # Some generation methods already pass only the last input ID if input_ids.shape[1] > past_length: remove_prefix_length = past_length else: # Default to old behavior: keep only final ID remove_prefix_length = input_ids.shape[1] - 1 input_ids = input_ids[:, remove_prefix_length:] return { "decoder_input_ids": input_ids, "past_key_values": past_key_values, "encoder_outputs": encoder_outputs, "attention_mask": attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, "use_cache": use_cache, } def prepare_decoder_input_ids_from_labels(self, labels: torch.Tensor): return self._shift_right(labels) def _reorder_cache(self, past_key_values, beam_idx): # if decoder past is not included in output # speedy decoding is disabled and no need to reorder if past_key_values is None: logger.warning("You might want to consider setting `use_cache=True` to speed up decoding") return past_key_values reordered_decoder_past = () for layer_past_states in past_key_values: # get the correct batch idx from layer past batch dim # batch dim of `past` is at 2nd position reordered_layer_past_states = () for layer_past_state in layer_past_states: # need to set correct `past` for each of the four key / value states reordered_layer_past_states = reordered_layer_past_states + ( layer_past_state.index_select(0, beam_idx.to(layer_past_state.device)), ) if reordered_layer_past_states[0].shape != layer_past_states[0].shape: raise ValueError( "expected reordered_layer_past_states to have the same shape than layer_past_states, " f"but got {reordered_layer_past_states[0].shape} and {layer_past_states[0].shape}" ) if len(reordered_layer_past_states) != len(layer_past_states): raise ValueError( "expected layer_past_states to have the same length as reordered_layer_past_states, " f"but got {len(layer_past_states)} and {len(reordered_layer_past_states)}" ) reordered_decoder_past = reordered_decoder_past + (reordered_layer_past_states,) return reordered_decoder_past @add_start_docstrings( "The bare SWITCH_TRANSFORMERS Model transformer outputting encoder's raw hidden-states without any specific head" " on top.", SWITCH_TRANSFORMERS_START_DOCSTRING, ) class SwitchTransformersEncoderModel(SwitchTransformersPreTrainedModel): _tied_weights_keys = ["encoder.embed_tokens.weight"] def __init__(self, config: SwitchTransformersConfig): super().__init__(config) self.shared = nn.Embedding(config.vocab_size, config.d_model) encoder_config = copy.deepcopy(config) encoder_config.use_cache = False encoder_config.is_encoder_decoder = False self.encoder = SwitchTransformersStack(encoder_config, self.shared) # Initialize weights and apply final processing self.post_init() # Model parallel self.device_map = None def get_input_embeddings(self): return self.shared def set_input_embeddings(self, new_embeddings): self.shared = new_embeddings self.encoder.set_input_embeddings(new_embeddings) def _tie_weights(self): if self.config.tie_word_embeddings: self._tie_or_clone_weights(self.encoder.embed_tokens, self.shared) def get_encoder(self): return self.encoder def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ for layer, heads in heads_to_prune.items(): self.encoder.block[layer].layer[0].SelfAttention.prune_heads(heads) @add_start_docstrings_to_model_forward(SWITCH_TRANSFORMERS_ENCODER_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=MoEModelOutput, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, output_router_logits: Optional[bool] = True, return_dict: Optional[bool] = None, ) -> Union[Tuple[torch.FloatTensor], MoEModelOutput]: r""" Returns: Example: ```python >>> from transformers import AutoTokenizer, SwitchTransformersEncoderModel >>> tokenizer = AutoTokenizer.from_pretrained("google/switch-base-8") >>> model = SwitchTransformersEncoderModel.from_pretrained("google/switch-base-8") >>> input_ids = tokenizer( ... "Studies have been shown that owning a dog is good for you", return_tensors="pt" ... ).input_ids # Batch size 1 >>> outputs = model(input_ids=input_ids) >>> last_hidden_states = outputs.last_hidden_state ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict encoder_outputs = self.encoder( input_ids=input_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, output_router_logits=output_router_logits, return_dict=return_dict, ) return encoder_outputs
0
hf_public_repos/transformers/src/transformers/models
hf_public_repos/transformers/src/transformers/models/switch_transformers/convert_big_switch.py
import argparse import json import os import tensorstore as ts import torch from flax import serialization from flax.traverse_util import flatten_dict, unflatten_dict from tensorflow.io import gfile from transformers.modeling_utils import dtype_byte_size from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import ( rename_keys, ) from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME from transformers.utils.hub import convert_file_size_to_int def rename_base_flax_keys(flax_key_tuple, flax_tensor): """ Post renaming of basic JAX keys to pytorch. """ if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3: # expert layer flax_key_tuple = flax_key_tuple[:-1] + ("weight",) flax_tensor = torch.permute(flax_tensor, (0, 2, 1)) elif flax_key_tuple[-1] == "kernel" and ".".join(flax_key_tuple): # linear layer flax_key_tuple = flax_key_tuple[:-1] + ("weight",) flax_tensor = flax_tensor.T elif flax_key_tuple[-1] in ["scale", "embedding"]: flax_key_tuple = flax_key_tuple[:-1] + ("weight",) return flax_key_tuple, flax_tensor def get_key_and_tensorstore_dict(layer, checkpoint_info, switch_checkpoint_path): if "metadata" in layer: split_layer = layer.split("metadata") curr_real_layer_name = "".join(split_layer[0])[:-1] split_layer = [tuple(("metadata" + split_layer[1]).split("/"))] elif "kvstore" in layer: split_layer = layer.split("kvstore") curr_real_layer_name = "".join(split_layer[0])[:-1] split_layer = [tuple(("kvstore" + split_layer[1]).split("/"))] else: split_layer = layer.split("/") curr_real_layer_name = "/".join(split_layer[:-1]) split_layer[-1] = (split_layer[-1],) if "kvstore/path" in layer: content = f"{switch_checkpoint_path}/{checkpoint_info[layer]}" elif "kvstore/driver" in layer: content = "file" else: content = checkpoint_info[layer] return curr_real_layer_name, split_layer, content def rename_and_save_block(current_block, save_path): current_block = rename_keys(current_block) new_current_block = {} for k, v in current_block.items(): new_current_block[k.replace("/", ".")] = v current_block = new_current_block torch.save(current_block, save_path) def shard_on_the_fly(switch_checkpoint_path, dump_path, max_shard_size, dtype, weights_name: str = WEIGHTS_NAME): max_shard_size = convert_file_size_to_int(max_shard_size) sharded_state_dicts = [] current_block = {} current_block_size = 0 total_size = 0 os.makedirs(dump_path, exist_ok=True) with gfile.GFile(switch_checkpoint_path + "/checkpoint", "rb") as fp: checkpoint_info = serialization.msgpack_restore(fp.read())["optimizer"]["target"] checkpoint_info = flatten_dict(checkpoint_info, sep="/") all_layers = {} for layer in checkpoint_info.keys(): curr_real_layer_name, split_layer, content = get_key_and_tensorstore_dict( layer, checkpoint_info, switch_checkpoint_path ) if curr_real_layer_name in all_layers: all_layers[curr_real_layer_name][split_layer[-1]] = content else: all_layers[curr_real_layer_name] = {split_layer[-1]: content} for key in all_layers.keys(): # open tensorstore file raw_weights = ts.open(unflatten_dict(all_layers[key])).result().read().result() raw_weights = torch.tensor(raw_weights) weight_size = raw_weights.numel() * dtype_byte_size(raw_weights.dtype) # use the renaming pattern from the small conversion scripts key, raw_weights = rename_base_flax_keys(tuple(key.split("/")), raw_weights) key = "/".join(key) # If this weight is going to tip up over the maximal size, we split. if current_block_size + weight_size > max_shard_size: save_path = os.path.join( dump_path, weights_name.replace(".bin", f"-{len(sharded_state_dicts)+1:05d}-of-???.bin") ) rename_and_save_block(current_block, save_path) sharded_state_dicts.append(current_block.keys()) del current_block current_block = {} current_block_size = 0 current_block[key] = raw_weights.to(getattr(torch, dtype)) current_block_size += weight_size total_size += weight_size # Add the last block save_path = os.path.join(dump_path, weights_name.replace(".bin", f"-{len(sharded_state_dicts)+1:05d}-of-???.bin")) rename_and_save_block(current_block, save_path) sharded_state_dicts.append(current_block.keys()) # If we only have one shard, we return it if len(sharded_state_dicts) == 1: return {weights_name: sharded_state_dicts[0]}, None # Otherwise, let's build the index weight_map = {} shards = {} for idx, shard in enumerate(sharded_state_dicts): shard_file = weights_name.replace( ".bin", f"-{idx+1:05d}-of-{len(sharded_state_dicts):05d}.bin" ) # len(sharded_state_dicts):05d} temp_filename = os.path.join(dump_path, weights_name.replace(".bin", f"-{idx+1:05d}-of-???.bin")) os.rename(temp_filename, os.path.join(dump_path, shard_file)) shards[shard_file] = shard for key in shard: weight_map[key] = shard_file # Add the metadata metadata = {"total_size": total_size} index = {"metadata": metadata, "weight_map": weight_map} with open(os.path.join(dump_path, WEIGHTS_INDEX_NAME), "w", encoding="utf-8") as f: content = json.dumps(index, indent=2, sort_keys=True) + "\n" f.write(content) return metadata, index if __name__ == "__main__": parser = argparse.ArgumentParser() # Required parameters parser.add_argument( "--switch_t5x_checkpoint_path", default="/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600", type=str, required=False, help="Path to a directory containing a folder per layer. Follows the original Google format.", ) parser.add_argument("--max_shard_size", default="10GB", required=False, help="Max shard size") parser.add_argument("--dtype", default="bfloat16", type=str, required=False, help="dtype of the saved model") parser.add_argument( "--pytorch_dump_folder_path", default="/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted", type=str, required=False, help="Path to the output pytorch model.", ) args = parser.parse_args() shard_on_the_fly( args.switch_t5x_checkpoint_path, args.pytorch_dump_folder_path, args.max_shard_size, args.dtype, ) def sanity_check(): from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, T5Tokenizer config = SwitchTransformersConfig.from_pretrained("google/switch-base-8") config.save_pretrained("/home/arthur_huggingface_co/transformers/switch_converted") model = SwitchTransformersForConditionalGeneration.from_pretrained( "/home/arthur_huggingface_co/transformers/switch_converted", device_map="auto" ) tokenizer = T5Tokenizer.from_pretrained("t5-small") text = "A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>." input_ids = tokenizer(text, return_tensors="pt").input_ids out = model.generate(input_ids, decoder_start_token_id=0) print(tokenizer.decode(out[0]))
0
hf_public_repos/transformers/src/transformers/models
hf_public_repos/transformers/src/transformers/models/switch_transformers/__init__.py
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) _import_structure = { "configuration_switch_transformers": [ "SWITCH_TRANSFORMERS_PRETRAINED_CONFIG_ARCHIVE_MAP", "SwitchTransformersConfig", "SwitchTransformersOnnxConfig", ] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["modeling_switch_transformers"] = [ "SWITCH_TRANSFORMERS_PRETRAINED_MODEL_ARCHIVE_LIST", "SwitchTransformersEncoderModel", "SwitchTransformersForConditionalGeneration", "SwitchTransformersModel", "SwitchTransformersPreTrainedModel", "SwitchTransformersTop1Router", "SwitchTransformersSparseMLP", ] if TYPE_CHECKING: from .configuration_switch_transformers import ( SWITCH_TRANSFORMERS_PRETRAINED_CONFIG_ARCHIVE_MAP, SwitchTransformersConfig, SwitchTransformersOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_switch_transformers import ( SWITCH_TRANSFORMERS_PRETRAINED_MODEL_ARCHIVE_LIST, SwitchTransformersEncoderModel, SwitchTransformersForConditionalGeneration, SwitchTransformersModel, SwitchTransformersPreTrainedModel, SwitchTransformersSparseMLP, SwitchTransformersTop1Router, ) else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
0
hf_public_repos/transformers/src/transformers/models
hf_public_repos/transformers/src/transformers/models/switch_transformers/convert_switch_transformers_original_flax_checkpoint_to_pytorch.py
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Convert SwitchTransformersX checkpoints from the original repository to JAX/FLAX model.""" import argparse import re from flax.traverse_util import flatten_dict, unflatten_dict from t5x import checkpoints from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model from transformers.utils import logging logging.set_verbosity_info() # should not include what is already done by the `from_pt` argument MOE_LAYER_NAME_MAPPING = { "/attention/": "/0/SelfAttention/", "/self_attention/": "/0/SelfAttention/", "/encoder_decoder_attention/": "/1/EncDecAttention/", "value": "v", "query": "q", "key": "k", "out": "o", "pre_self_attention_layer_norm": "0/layer_norm", "pre_cross_attention_layer_norm": "1/layer_norm", "pre_attention_layer_norm": "0/layer_norm", # previously 1, but seems wrong "token_embedder": "shared", "encoder_norm": "final_layer_norm", "decoder_norm": "final_layer_norm", "relpos_bias/rel_embedding": "block/0/layer/0/SelfAttention/relative_attention_bias/weight", "router/router_weights/w/": "router/classifier/", "roer/roer_weights/w/": "router/classifier/", "logits_dense": "lm_head", } def rename_keys(s_dict): # 1. in HF T5, we have block.{x}.layer.{y}. which corresponds to layer.{x} in # the original model keys = list(s_dict.keys()) for key in keys: layer_to_block_of_layer = r".*/layers_(\d+)" new_key = key if re.match(layer_to_block_of_layer, key): new_key = re.sub(r"layers_(\d+)", r"block/\1/layer", new_key) layer_to_block_of_layer = r"(encoder|decoder)\/" if re.match(layer_to_block_of_layer, key): groups = re.match(layer_to_block_of_layer, new_key).groups() if groups[0] == "encoder": new_key = re.sub(r"/mlp/", r"/1/mlp/", new_key) new_key = re.sub(r"/pre_mlp_layer_norm/", r"/1/layer_norm/", new_key) elif groups[0] == "decoder": new_key = re.sub(r"/mlp/", r"/2/mlp/", new_key) new_key = re.sub(r"/pre_mlp_layer_norm/", r"/2/layer_norm/", new_key) # 2. Convert other classic mappings for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items(): if old_key in new_key: new_key = new_key.replace(old_key, temp_key) print(f"{key} -> {new_key}") s_dict[new_key] = s_dict.pop(key) if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict: s_dict["encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight"] = s_dict[ "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" ].T if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict: s_dict["decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight"] = s_dict[ "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" ].T # 3. Take extra care of the EXPERTS layer for key in list(s_dict.keys()): if "expert" in key: num_experts = s_dict[key].shape[0] expert_weihts = s_dict[key] for idx in range(num_experts): s_dict[key.replace("expert/", f"experts/expert_{idx}/")] = expert_weihts[idx] print(f"{key} -> {key.replace('expert/', f'experts/expert_{idx}/')}") s_dict.pop(key) return s_dict GIN_TO_CONFIG_MAPPING = { "NUM_ENCODER_LAYERS": "num_layers", "NUM_DECODER_LAYERS": "num_decoder_layers", "NUM_HEADS": "num_heads", "HEAD_DIM": "d_kv", "EMBED_DIM": "d_model", "MLP_DIM": "d_ff", "NUM_SELECTED_EXPERTS": "num_selected_experts", "NUM_ENCODER_SPARSE_LAYERS": "num_sparse_encoder_layers", "NUM_DECODER_SPARSE_LAYERS": "num_sparse_decoder_layers", "dense.MlpBlock.activations": "feed_forward_proj", } def convert_gin_to_config(gin_file, num_experts): # Convert a google style config to the hugging face fromat import regex as re with open(gin_file, "r") as f: raw_gin = f.read() regex_match = re.findall(r"(.*) = ([0-9.]*)", raw_gin) args = {} for param, value in regex_match: if param in GIN_TO_CONFIG_MAPPING and value != "": args[GIN_TO_CONFIG_MAPPING[param]] = float(value) if "." in value else int(value) activation = re.findall(r"(.*activations) = \(\'(.*)\',\)", raw_gin)[0] args[GIN_TO_CONFIG_MAPPING[activation[0]]] = str(activation[1]) args["num_experts"] = num_experts config = SwitchTransformersConfig(**args) return config def convert_flax_checkpoint_to_pytorch( flax_checkpoint_path, config_file, gin_file=None, pytorch_dump_path="./", num_experts=8 ): # Initialise PyTorch model print(f"Loading flax weights from : {flax_checkpoint_path}") flax_params = checkpoints.load_t5x_checkpoint(flax_checkpoint_path) if gin_file is not None: config = convert_gin_to_config(gin_file, num_experts) else: config = SwitchTransformersConfig.from_pretrained(config_file) pt_model = SwitchTransformersForConditionalGeneration(config) flax_params = flax_params["target"] flax_params = flatten_dict(flax_params, sep="/") flax_params = rename_keys(flax_params) flax_params = unflatten_dict(flax_params, sep="/") # Load the flax params in the PT model load_flax_weights_in_pytorch_model(pt_model, flax_params) print(f"Save PyTorch model to {pytorch_dump_path}") pt_model.save_pretrained(pytorch_dump_path) if __name__ == "__main__": parser = argparse.ArgumentParser() # Required parameters parser.add_argument( "--switch_t5x_checkpoint_path", default=None, type=str, required=True, help=( "The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the" " model architecture. If not provided, a `gin_file` has to be provided." ), ) parser.add_argument( "--gin_file", default=None, type=str, required=False, help="Path to the gin config file. If not provided, a `config_file` has to be passed ", ) parser.add_argument( "--config_name", default=None, type=str, required=False, help="Config name of SwitchTransformers model." ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output pytorch model." ) parser.add_argument("--num_experts", default=8, type=int, required=False, help="Number of experts") args = parser.parse_args() convert_flax_checkpoint_to_pytorch( args.switch_t5x_checkpoint_path, args.config_name, args.gin_file, args.pytorch_dump_folder_path, args.num_experts, )
0
hf_public_repos/transformers/src/transformers/models
hf_public_repos/transformers/src/transformers/models/switch_transformers/configuration_switch_transformers.py
# coding=utf-8 # Copyright 2022, Google and HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Switch Transformers model configuration""" from ...configuration_utils import PretrainedConfig from ...utils import logging logger = logging.get_logger(__name__) SWITCH_TRANSFORMERS_PRETRAINED_CONFIG_ARCHIVE_MAP = { "google/switch-base-8": "https://huggingface.co/google/switch-base-8/blob/main/config.json", } class SwitchTransformersConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`SwitchTransformersModel`]. It is used to instantiate a SwitchTransformers model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the SwitchTransformers [google/switch-base-8](https://huggingface.co/google/switch-base-8) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Arguments: vocab_size (`int`, *optional*, defaults to 32128): Vocabulary size of the SwitchTransformers model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`SwitchTransformersModel`]. d_model (`int`, *optional*, defaults to 768): Size of the encoder layers and the pooler layer. d_kv (`int`, *optional*, defaults to 64): Size of the key, query, value projections per attention head. `d_kv` has to be equal to `d_model // num_heads`. d_ff (`int`, *optional*, defaults to 2048): Size of the intermediate feed forward layer in each `SwitchTransformersBlock`. expert_capacity (`int`, *optional*, defaults to 64): Number of tokens that can be stored in each expert. If set to 1, the model will behave like a regular Transformer. num_layers (`int`, *optional*, defaults to 12): Number of dense hidden layers in the Transformer encoder layer. num_sparse_encoder_layers (`int`, *optional*, defaults to 3): Number of sparse (MoE) dense hidden layers in the Transformer encoder layer. num_decoder_layers (`int`, *optional*, defaults to 12): Number of hidden layers in the Transformer decoder. Will use the same value as `num_layers` if not set. num_sparse_decoder_layers (`int`, *optional*, defaults to 3): Number of sparse (MoE) dense hidden layers in the Transformer decoder layer. num_heads (`int`, *optional*, defaults to 12): Number of attention heads for each attention layer in the Transformer encoder. num_experts (`int`, *optional*, defaults to 8): Number of experts for each SwitchTransformer layer. router_bias (`bool`, *optional*, defaults to `False`): Whether to add a bias to the router. router_jitter_noise (`float`, *optional*, defaults to 0.01): Amount of noise to add to the router. router_dtype (`str`, *optional*, default to `"float32"`): The `dtype` used for the routers. It is preferable to keep the `dtype` to `"float32"` as specified in the *selective precision* discussion in [the paper](https://arxiv.org/abs/2101.03961). router_ignore_padding_tokens (`bool`, *optional*, defaults to `False`): Whether to ignore padding tokens when routing. relative_attention_num_buckets (`int`, *optional*, defaults to 32): The number of buckets to use for each attention layer. relative_attention_max_distance (`int`, *optional*, defaults to 128): The maximum distance of the longer sequences for the bucket separation. dropout_rate (`float`, *optional*, defaults to 0.1): The ratio for all dropout layers. layer_norm_eps (`float`, *optional*, defaults to 1e-6): The epsilon used by the layer normalization layers. router_z_loss_coef (`float`, *optional*, defaults to 0.001): The z loss factor for the total loss. router_aux_loss_coef (`float`, *optional*, defaults to 0.001): The aux loss factor for the total loss. initializer_factor (`float`, *optional*, defaults to 1.0): A factor for initializing all weight matrices (should be kept to 1, used internally for initialization testing). dense_act_fn (`string`, *optional*, defaults to `"relu"`): Type of feed forward layer to be used. Should be one of `"relu"` or `"gated-gelu"`. SwitchTransformersv1.1 uses the `"gated-gelu"` feed forward projection. Original SwitchTransformers uses `"relu"`. add_router_probs (`bool`, *optional*, defaults to `False`): Whether to output router probabilities to compute router auxiliary loss. use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). """ model_type = "switch_transformers" keys_to_ignore_at_inference = ["past_key_values"] attribute_map = {"hidden_size": "d_model", "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers"} def __init__( self, vocab_size=32128, d_model=768, d_kv=64, d_ff=2048, expert_capacity=64, num_layers=12, num_sparse_encoder_layers=3, num_decoder_layers=12, num_sparse_decoder_layers=3, num_heads=12, num_experts=8, router_bias=False, router_jitter_noise=0.01, router_dtype="float32", router_ignore_padding_tokens=False, relative_attention_num_buckets=32, relative_attention_max_distance=128, dropout_rate=0.1, layer_norm_epsilon=1e-6, router_z_loss_coef=0.001, router_aux_loss_coef=0.001, initializer_factor=1.0, dense_act_fn="relu", is_encoder_decoder=True, add_router_probs=False, use_cache=True, pad_token_id=0, eos_token_id=1, **kwargs, ): self.vocab_size = vocab_size self.d_model = d_model self.d_kv = d_kv self.d_ff = d_ff self.num_sparse_encoder_layers = num_sparse_encoder_layers self.num_layers = num_layers self.num_decoder_layers = ( num_decoder_layers if num_decoder_layers is not None else self.num_layers ) # default = symmetry self.num_sparse_decoder_layers = num_sparse_decoder_layers # This tells us, each how many encoder layer we'll have to set a sparse layer. if self.num_sparse_encoder_layers > 0: self.encoder_sparse_step = self.num_layers // self.num_sparse_encoder_layers else: self.encoder_sparse_step = self.num_layers # HACK: this will create 0 sparse layers # This tells us, each how many encoder layer we'll have to set a sparse layer. if self.num_sparse_decoder_layers > 0: self.decoder_sparse_step = self.num_decoder_layers // self.num_sparse_decoder_layers else: self.decoder_sparse_step = self.num_decoder_layers # HACK: this will create 0 sparse layers self.num_heads = num_heads self.num_experts = num_experts self.expert_capacity = expert_capacity self.router_bias = router_bias self.router_jitter_noise = router_jitter_noise if router_dtype not in ["float32", "float16", "bfloat16"]: raise ValueError(f"`router_dtype` must be one of 'float32', 'float16' or 'bfloat16', got {router_dtype}") self.router_dtype = router_dtype self.router_ignore_padding_tokens = router_ignore_padding_tokens self.relative_attention_num_buckets = relative_attention_num_buckets self.relative_attention_max_distance = relative_attention_max_distance self.dropout_rate = dropout_rate self.layer_norm_epsilon = layer_norm_epsilon self.initializer_factor = initializer_factor self.use_cache = use_cache self.add_router_probs = add_router_probs self.router_z_loss_coef = router_z_loss_coef self.router_aux_loss_coef = router_aux_loss_coef self.dense_act_fn = dense_act_fn super().__init__( pad_token_id=pad_token_id, eos_token_id=eos_token_id, is_encoder_decoder=is_encoder_decoder, **kwargs, )
0
hf_public_repos/transformers/src/transformers/models
hf_public_repos/transformers/src/transformers/models/speech_to_text_2/modeling_speech_to_text_2.py
# coding=utf-8 # Copyright 2021 The Fairseq Authors and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ PyTorch Speech2Text2 model.""" import copy import math from typing import Optional, Tuple, Union import torch from torch import nn from torch.nn import CrossEntropyLoss from ...activations import ACT2FN from ...modeling_attn_mask_utils import _prepare_4d_attention_mask, _prepare_4d_causal_attention_mask from ...modeling_outputs import BaseModelOutputWithPastAndCrossAttentions, CausalLMOutputWithCrossAttentions from ...modeling_utils import PreTrainedModel from ...utils import add_start_docstrings, logging, replace_return_docstrings from .configuration_speech_to_text_2 import Speech2Text2Config logger = logging.get_logger(__name__) _CONFIG_FOR_DOC = "Speech2Text2Config" _CHECKPOINT_FOR_DOC = "facebook/s2t-wav2vec2-large-en-de" SPEECH_TO_TEXT_2_PRETRAINED_MODEL_ARCHIVE_LIST = [ "facebook/s2t-wav2vec2-large-en-de", # See all Speech2Text2 models at https://huggingface.co/models?filter=speech2text2 ] # Copied from transformers.models.speech_to_text.modeling_speech_to_text.Speech2TextSinusoidalPositionalEmbedding with Speech2Text->Speech2Text2 class Speech2Text2SinusoidalPositionalEmbedding(nn.Module): """This module produces sinusoidal positional embeddings of any length.""" def __init__(self, num_positions: int, embedding_dim: int, padding_idx: Optional[int] = None): super().__init__() self.offset = 2 self.embedding_dim = embedding_dim self.padding_idx = padding_idx self.make_weights(num_positions + self.offset, embedding_dim, padding_idx) def make_weights(self, num_embeddings: int, embedding_dim: int, padding_idx: Optional[int] = None): emb_weights = self.get_embedding(num_embeddings, embedding_dim, padding_idx) if hasattr(self, "weights"): # in forward put the weights on the correct dtype and device of the param emb_weights = emb_weights.to(dtype=self.weights.dtype, device=self.weights.device) self.weights = nn.Parameter(emb_weights) self.weights.requires_grad = False self.weights.detach_() @staticmethod def get_embedding(num_embeddings: int, embedding_dim: int, padding_idx: Optional[int] = None): """ Build sinusoidal embeddings. This matches the implementation in tensor2tensor, but differs slightly from the description in Section 3.5 of "Attention Is All You Need". """ half_dim = embedding_dim // 2 emb = math.log(10000) / (half_dim - 1) emb = torch.exp(torch.arange(half_dim, dtype=torch.float) * -emb) emb = torch.arange(num_embeddings, dtype=torch.float).unsqueeze(1) * emb.unsqueeze(0) emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1).view(num_embeddings, -1) if embedding_dim % 2 == 1: # zero pad emb = torch.cat([emb, torch.zeros(num_embeddings, 1)], dim=1) if padding_idx is not None: emb[padding_idx, :] = 0 return emb.to(torch.get_default_dtype()) @torch.no_grad() def forward(self, input_ids: torch.Tensor, past_key_values_length: int = 0): bsz, seq_len = input_ids.size() # Create the position ids from the input token ids. Any padded tokens remain padded. position_ids = self.create_position_ids_from_input_ids(input_ids, self.padding_idx, past_key_values_length).to( input_ids.device ) # expand embeddings if needed max_pos = self.padding_idx + 1 + seq_len if max_pos > self.weights.size(0): self.make_weights(max_pos + self.offset, self.embedding_dim, self.padding_idx) return self.weights.index_select(0, position_ids.view(-1)).view(bsz, seq_len, -1).detach() def create_position_ids_from_input_ids( self, input_ids: torch.Tensor, padding_idx: int, past_key_values_length: Optional[int] = 0 ): """ Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding symbols are ignored. This is modified from fairseq's `utils.make_positions`. Args: x: torch.Tensor x: Returns: torch.Tensor """ # The series of casts and type-conversions here are carefully balanced to both work with ONNX export and XLA. mask = input_ids.ne(padding_idx).int() incremental_indices = (torch.cumsum(mask, dim=1).type_as(mask) + past_key_values_length) * mask return incremental_indices.long() + padding_idx # Copied from transformers.models.bart.modeling_bart.BartAttention with Bart->Speech2Text2 class Speech2Text2Attention(nn.Module): """Multi-headed attention from 'Attention Is All You Need' paper""" def __init__( self, embed_dim: int, num_heads: int, dropout: float = 0.0, is_decoder: bool = False, bias: bool = True, is_causal: bool = False, config: Optional[Speech2Text2Config] = None, ): super().__init__() self.embed_dim = embed_dim self.num_heads = num_heads self.dropout = dropout self.head_dim = embed_dim // num_heads self.config = config if (self.head_dim * num_heads) != self.embed_dim: raise ValueError( f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}" f" and `num_heads`: {num_heads})." ) self.scaling = self.head_dim**-0.5 self.is_decoder = is_decoder self.is_causal = is_causal self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias) def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int): return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous() def forward( self, hidden_states: torch.Tensor, key_value_states: Optional[torch.Tensor] = None, past_key_value: Optional[Tuple[torch.Tensor]] = None, attention_mask: Optional[torch.Tensor] = None, layer_head_mask: Optional[torch.Tensor] = None, output_attentions: bool = False, ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: """Input shape: Batch x Time x Channel""" # if key_value_states are provided this layer is used as a cross-attention layer # for the decoder is_cross_attention = key_value_states is not None bsz, tgt_len, _ = hidden_states.size() # get query proj query_states = self.q_proj(hidden_states) * self.scaling # get key, value proj # `past_key_value[0].shape[2] == key_value_states.shape[1]` # is checking that the `sequence_length` of the `past_key_value` is the same as # the provided `key_value_states` to support prefix tuning if ( is_cross_attention and past_key_value is not None and past_key_value[0].shape[2] == key_value_states.shape[1] ): # reuse k,v, cross_attentions key_states = past_key_value[0] value_states = past_key_value[1] elif is_cross_attention: # cross_attentions key_states = self._shape(self.k_proj(key_value_states), -1, bsz) value_states = self._shape(self.v_proj(key_value_states), -1, bsz) elif past_key_value is not None: # reuse k, v, self_attention key_states = self._shape(self.k_proj(hidden_states), -1, bsz) value_states = self._shape(self.v_proj(hidden_states), -1, bsz) key_states = torch.cat([past_key_value[0], key_states], dim=2) value_states = torch.cat([past_key_value[1], value_states], dim=2) else: # self_attention key_states = self._shape(self.k_proj(hidden_states), -1, bsz) value_states = self._shape(self.v_proj(hidden_states), -1, bsz) if self.is_decoder: # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states. # Further calls to cross_attention layer can then reuse all cross-attention # key/value_states (first "if" case) # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of # all previous decoder key/value_states. Further calls to uni-directional self-attention # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case) # if encoder bi-directional self-attention `past_key_value` is always `None` past_key_value = (key_states, value_states) proj_shape = (bsz * self.num_heads, -1, self.head_dim) query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape) key_states = key_states.reshape(*proj_shape) value_states = value_states.reshape(*proj_shape) src_len = key_states.size(1) attn_weights = torch.bmm(query_states, key_states.transpose(1, 2)) if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len): raise ValueError( f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is" f" {attn_weights.size()}" ) if attention_mask is not None: if attention_mask.size() != (bsz, 1, tgt_len, src_len): raise ValueError( f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}" ) attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) attn_weights = nn.functional.softmax(attn_weights, dim=-1) if layer_head_mask is not None: if layer_head_mask.size() != (self.num_heads,): raise ValueError( f"Head mask for a single layer should be of size {(self.num_heads,)}, but is" f" {layer_head_mask.size()}" ) attn_weights = layer_head_mask.view(1, -1, 1, 1) * attn_weights.view(bsz, self.num_heads, tgt_len, src_len) attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) if output_attentions: # this operation is a bit awkward, but it's required to # make sure that attn_weights keeps its gradient. # In order to do so, attn_weights have to be reshaped # twice and have to be reused in the following attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len) else: attn_weights_reshaped = None attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training) attn_output = torch.bmm(attn_probs, value_states) if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim): raise ValueError( f"`attn_output` should be of size {(bsz * self.num_heads, tgt_len, self.head_dim)}, but is" f" {attn_output.size()}" ) attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim) attn_output = attn_output.transpose(1, 2) # Use the `embed_dim` from the config (stored in the class) rather than `hidden_state` because `attn_output` can be # partitioned across GPUs when using tensor-parallelism. attn_output = attn_output.reshape(bsz, tgt_len, self.embed_dim) attn_output = self.out_proj(attn_output) return attn_output, attn_weights_reshaped, past_key_value class Speech2Text2DecoderLayer(nn.Module): def __init__(self, config: Speech2Text2Config): super().__init__() self.embed_dim = config.d_model self.self_attn = Speech2Text2Attention( embed_dim=self.embed_dim, num_heads=config.decoder_attention_heads, dropout=config.attention_dropout, is_decoder=True, ) self.dropout = config.dropout self.activation_fn = ACT2FN[config.activation_function] self.activation_dropout = config.activation_dropout self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim) if config.is_decoder: self.encoder_attn = Speech2Text2Attention( self.embed_dim, config.decoder_attention_heads, dropout=config.attention_dropout, is_decoder=True, ) self.encoder_attn_layer_norm = nn.LayerNorm(self.embed_dim) self.fc1 = nn.Linear(self.embed_dim, config.decoder_ffn_dim) self.fc2 = nn.Linear(config.decoder_ffn_dim, self.embed_dim) self.final_layer_norm = nn.LayerNorm(self.embed_dim) def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, encoder_hidden_states: Optional[torch.Tensor] = None, encoder_attention_mask: Optional[torch.Tensor] = None, layer_head_mask: Optional[torch.Tensor] = None, cross_attn_layer_head_mask: Optional[torch.Tensor] = None, past_key_value: Optional[Tuple[torch.Tensor]] = None, output_attentions: Optional[bool] = False, use_cache: Optional[bool] = True, ): """ Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` attention_mask (`torch.FloatTensor`): attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. encoder_hidden_states (`torch.FloatTensor`): cross attention input to the layer of shape `(batch, seq_len, embed_dim)` encoder_attention_mask (`torch.FloatTensor`): encoder attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size `(encoder_attention_heads,)`. cross_attn_layer_head_mask (`torch.FloatTensor`): mask for cross-attention heads in a given layer of size *(decoder_attention_heads,)*. past_key_value (`Tuple(torch.FloatTensor)`): cached past key and value projection states output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. """ residual = hidden_states # Self Attention # decoder uni-directional self-attention cached key/values tuple is at positions 1,2 self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None # add present self-attn cache to positions 1,2 of present_key_value tuple hidden_states, self_attn_weights, present_key_value = self.self_attn( hidden_states=hidden_states, past_key_value=self_attn_past_key_value, attention_mask=attention_mask, layer_head_mask=layer_head_mask, output_attentions=output_attentions, ) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states hidden_states = self.self_attn_layer_norm(hidden_states) # Cross-Attention Block cross_attn_present_key_value = None cross_attn_weights = None if encoder_hidden_states is not None: residual = hidden_states # cross_attn cached key/values tuple is at positions 3,4 of present_key_value tuple cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None hidden_states, cross_attn_weights, cross_attn_present_key_value = self.encoder_attn( hidden_states=hidden_states, key_value_states=encoder_hidden_states, attention_mask=encoder_attention_mask, layer_head_mask=cross_attn_layer_head_mask, past_key_value=cross_attn_past_key_value, output_attentions=output_attentions, ) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states hidden_states = self.encoder_attn_layer_norm(hidden_states) # add cross-attn to positions 3,4 of present_key_value tuple present_key_value = present_key_value + cross_attn_present_key_value # Fully Connected residual = hidden_states hidden_states = self.activation_fn(self.fc1(hidden_states)) hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training) hidden_states = self.fc2(hidden_states) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states hidden_states = self.final_layer_norm(hidden_states) outputs = (hidden_states,) if output_attentions: outputs += (self_attn_weights, cross_attn_weights) if use_cache: outputs += (present_key_value,) return outputs class Speech2Text2PreTrainedModel(PreTrainedModel): config_class = Speech2Text2Config base_model_prefix = "model" supports_gradient_checkpointing = True def _init_weights(self, module): std = self.config.init_std if isinstance(module, (nn.Linear, nn.Conv1d)): module.weight.data.normal_(mean=0.0, std=std) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=std) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() SPEECH_TO_TEXT_2_START_DOCSTRING = r""" This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`Speech2Text2Config`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ class Speech2Text2Decoder(Speech2Text2PreTrainedModel): """ Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a [`Speech2Text2DecoderLayer`] Args: config: Speech2Text2Config embed_tokens (nn.Embedding): output embedding """ def __init__(self, config: Speech2Text2Config): super().__init__(config) self.dropout = config.dropout self.layerdrop = config.decoder_layerdrop self.padding_idx = config.pad_token_id self.max_target_positions = config.max_target_positions self.embed_scale = math.sqrt(config.d_model) if config.scale_embedding else 1.0 self.embed_tokens = nn.Embedding(config.vocab_size, config.d_model, self.padding_idx) self.embed_positions = Speech2Text2SinusoidalPositionalEmbedding( self.max_target_positions, config.d_model, self.padding_idx, ) self.layers = nn.ModuleList([Speech2Text2DecoderLayer(config) for _ in range(config.decoder_layers)]) self.gradient_checkpointing = False # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.embed_tokens def set_input_embeddings(self, value): self.embed_tokens = value def forward( self, input_ids=None, attention_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, head_mask=None, cross_attn_head_mask=None, past_key_values=None, inputs_embeds=None, use_cache=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): r""" Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it. Indices can be obtained using [`Speech2Text2Tokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. encoder_attention_mask (`torch.LongTensor` of shape `(batch_size, encoder_sequence_length)`, *optional*): Mask to avoid performing cross-attention on padding tokens indices of encoder input_ids. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the attention modules in encoder to avoid performing cross-attention on hidden heads. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict # retrieve input_ids and inputs_embeds if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time") elif input_ids is not None: input_shape = input_ids.size() input_ids = input_ids.view(-1, input_shape[-1]) elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds") # past_key_values_length past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0 if inputs_embeds is None: inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale attention_mask = _prepare_4d_causal_attention_mask( attention_mask, input_shape, inputs_embeds, past_key_values_length ) # expand encoder attention mask if encoder_hidden_states is not None and encoder_attention_mask is not None: # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] encoder_attention_mask = _prepare_4d_attention_mask( encoder_attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1] ) # embed positions positions = self.embed_positions(input_ids, past_key_values_length=past_key_values_length) hidden_states = inputs_embeds + positions hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) if self.gradient_checkpointing and self.training: if use_cache: logger.warning_once( "`use_cache = True` is incompatible with gradient checkpointing. Setting `use_cache =" " False`..." ) use_cache = False # decoder layers all_hidden_states = () if output_hidden_states else None all_self_attns = () if output_attentions else None all_cross_attentions = () if (output_attentions and encoder_hidden_states is not None) else None next_decoder_cache = () if use_cache else None # check if head_mask/cross_attn_head_mask has a correct number of layers specified if desired for attn_mask, mask_name in zip([head_mask, cross_attn_head_mask], ["head_mask", "cross_attn_head_mask"]): if attn_mask is not None: if attn_mask.size()[0] != (len(self.layers)): raise ValueError( f"The `{mask_name}` should be specified for {len(self.layers)} layers, but it is for" f" {head_mask.size()[0]}." ) for idx, decoder_layer in enumerate(self.layers): # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description) if output_hidden_states: all_hidden_states += (hidden_states,) if self.training: dropout_probability = torch.rand([]) if dropout_probability < self.layerdrop: continue past_key_value = past_key_values[idx] if past_key_values is not None else None if self.gradient_checkpointing and self.training: layer_outputs = self._gradient_checkpointing_func( decoder_layer.__call__, hidden_states, attention_mask, encoder_hidden_states, encoder_attention_mask, head_mask[idx] if head_mask is not None else None, cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None, None, ) else: layer_outputs = decoder_layer( hidden_states, attention_mask=attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, layer_head_mask=(head_mask[idx] if head_mask is not None else None), cross_attn_layer_head_mask=( cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None ), past_key_value=past_key_value, output_attentions=output_attentions, use_cache=use_cache, ) hidden_states = layer_outputs[0] if use_cache: next_decoder_cache += (layer_outputs[3 if output_attentions else 1],) if output_attentions: all_self_attns += (layer_outputs[1],) if encoder_hidden_states is not None: all_cross_attentions += (layer_outputs[2],) # add hidden states from the last decoder layer if output_hidden_states: all_hidden_states += (hidden_states,) next_cache = next_decoder_cache if use_cache else None if not return_dict: return tuple( v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns, all_cross_attentions] if v is not None ) return BaseModelOutputWithPastAndCrossAttentions( last_hidden_state=hidden_states, past_key_values=next_cache, hidden_states=all_hidden_states, attentions=all_self_attns, cross_attentions=all_cross_attentions, ) @add_start_docstrings( "The Speech2Text2 Model with a language modeling head. Can be used for summarization.", SPEECH_TO_TEXT_2_START_DOCSTRING, ) class Speech2Text2DecoderWrapper(Speech2Text2PreTrainedModel): """ This wrapper class is a helper class to correctly load pretrained checkpoints when the causal language model is used in combination with the [`EncoderDecoderModel`] framework. """ def __init__(self, config): super().__init__(config) self.decoder = Speech2Text2Decoder(config) def forward(self, *args, **kwargs): return self.decoder(*args, **kwargs) @add_start_docstrings( "The Speech2Text2 Decoder with a language modeling head. Can be used as the decoder part of" " [`EncoderDecoderModel`] and [`SpeechEncoderDecoder`].", SPEECH_TO_TEXT_2_START_DOCSTRING, ) class Speech2Text2ForCausalLM(Speech2Text2PreTrainedModel): _tied_weights_keys = ["lm_head.weight"] def __init__(self, config): config = copy.deepcopy(config) config.is_decoder = True config.is_encoder_decoder = False super().__init__(config) self.model = Speech2Text2DecoderWrapper(config) self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.model.decoder.embed_tokens def set_input_embeddings(self, value): self.model.decoder.embed_tokens = value def get_output_embeddings(self): return self.lm_head def set_output_embeddings(self, new_embeddings): self.lm_head = new_embeddings def set_decoder(self, decoder): self.model.decoder = decoder def get_decoder(self): return self.model.decoder @replace_return_docstrings(output_type=CausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.Tensor] = None, encoder_hidden_states: Optional[torch.FloatTensor] = None, encoder_attention_mask: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.Tensor] = None, cross_attn_head_mask: Optional[torch.Tensor] = None, past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple[torch.FloatTensor], CausalLMOutputWithCrossAttentions]: r""" Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it. Indices can be obtained using [`Speech2Text2Tokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if the model is configured as a decoder. encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`: head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. The two additional tensors are only required when the model is used as a decoder in a Sequence to Sequence model. Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. Returns: Example: ```python >>> from transformers import ( ... SpeechEncoderDecoderModel, ... Speech2Text2ForCausalLM, ... Wav2Vec2Model, ... Speech2Text2Config, ... Wav2Vec2Config, ... Wav2Vec2FeatureExtractor, ... Speech2Text2Tokenizer, ... ) >>> from datasets import load_dataset >>> feature_extractor = Wav2Vec2FeatureExtractor() >>> tokenizer = Speech2Text2Tokenizer.from_pretrained("facebook/s2t-wav2vec2-large-en-de") >>> encoder = Wav2Vec2Model(Wav2Vec2Config()) >>> decoder = Speech2Text2ForCausalLM(Speech2Text2Config()) >>> # init random speech2text model >>> model = SpeechEncoderDecoderModel(encoder=encoder, decoder=decoder) >>> model.config.pad_token_id = tokenizer.pad_token_id >>> model.config.decoder_start_token_id = tokenizer.bos_token_id >>> # pre-process inputs and labels >>> ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") >>> inputs = feature_extractor( ... ds[0]["audio"]["array"], sampling_rate=ds[0]["audio"]["sampling_rate"], return_tensors="pt" ... ) >>> input_values = inputs.input_values >>> decoder_input_ids = tokenizer(ds[0]["text"], return_tensors="pt").input_ids >>> # compute loss >>> loss = model(inputs=input_values, labels=decoder_input_ids).loss >>> # backprop loss >>> loss.backward() # doctest: +IGNORE_RESULT ```""" output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn) outputs = self.model.decoder( input_ids=input_ids, attention_mask=attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, head_mask=head_mask, cross_attn_head_mask=cross_attn_head_mask, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) logits = self.lm_head(outputs[0]) loss = None if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.config.vocab_size), labels.view(-1)) if not return_dict: output = (logits,) + outputs[1:] return (loss,) + output if loss is not None else output return CausalLMOutputWithCrossAttentions( loss=loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, cross_attentions=outputs.cross_attentions, ) def prepare_inputs_for_generation( self, input_ids, past_key_values=None, attention_mask=None, use_cache=None, **kwargs ): # if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly if attention_mask is None: attention_mask = input_ids.new_ones(input_ids.shape) if past_key_values: past_length = past_key_values[0][0].shape[2] # Some generation methods already pass only the last input ID if input_ids.shape[1] > past_length: remove_prefix_length = past_length else: # Default to old behavior: keep only final ID remove_prefix_length = input_ids.shape[1] - 1 input_ids = input_ids[:, remove_prefix_length:] # first step, decoder_cached_states are empty return { "input_ids": input_ids, # encoder_outputs is defined. input_ids not needed "attention_mask": attention_mask, "past_key_values": past_key_values, "use_cache": use_cache, } @staticmethod def _reorder_cache(past_key_values, beam_idx): reordered_past = () for layer_past in past_key_values: reordered_past += ( tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past), ) return reordered_past
0
hf_public_repos/transformers/src/transformers/models
hf_public_repos/transformers/src/transformers/models/speech_to_text_2/processing_speech_to_text_2.py
# coding=utf-8 # Copyright 2021 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Speech processor class for Speech2Text2 """ import warnings from contextlib import contextmanager from ...processing_utils import ProcessorMixin class Speech2Text2Processor(ProcessorMixin): r""" Constructs a Speech2Text2 processor which wraps a Speech2Text2 feature extractor and a Speech2Text2 tokenizer into a single processor. [`Speech2Text2Processor`] offers all the functionalities of [`AutoFeatureExtractor`] and [`Speech2Text2Tokenizer`]. See the [`~Speech2Text2Processor.__call__`] and [`~Speech2Text2Processor.decode`] for more information. Args: feature_extractor (`AutoFeatureExtractor`): An instance of [`AutoFeatureExtractor`]. The feature extractor is a required input. tokenizer (`Speech2Text2Tokenizer`): An instance of [`Speech2Text2Tokenizer`]. The tokenizer is a required input. """ feature_extractor_class = "AutoFeatureExtractor" tokenizer_class = "Speech2Text2Tokenizer" def __init__(self, feature_extractor, tokenizer): super().__init__(feature_extractor, tokenizer) self.current_processor = self.feature_extractor self._in_target_context_manager = False def __call__(self, *args, **kwargs): """ When used in normal mode, this method forwards all its arguments to AutoFeatureExtractor's [`~AutoFeatureExtractor.__call__`] and returns its output. If used in the context [`~Speech2Text2Processor.as_target_processor`] this method forwards all its arguments to Speech2Text2Tokenizer's [`~Speech2Text2Tokenizer.__call__`]. Please refer to the doctsring of the above two methods for more information. """ # For backward compatibility if self._in_target_context_manager: return self.current_processor(*args, **kwargs) if "raw_speech" in kwargs: warnings.warn("Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.") audio = kwargs.pop("raw_speech") else: audio = kwargs.pop("audio", None) sampling_rate = kwargs.pop("sampling_rate", None) text = kwargs.pop("text", None) if len(args) > 0: audio = args[0] args = args[1:] if audio is None and text is None: raise ValueError("You need to specify either an `audio` or `text` input to process.") if audio is not None: inputs = self.feature_extractor(audio, *args, sampling_rate=sampling_rate, **kwargs) if text is not None: encodings = self.tokenizer(text, **kwargs) if text is None: return inputs elif audio is None: return encodings else: inputs["labels"] = encodings["input_ids"] return inputs def batch_decode(self, *args, **kwargs): """ This method forwards all its arguments to Speech2Text2Tokenizer's [`~PreTrainedTokenizer.batch_decode`]. Please refer to the docstring of this method for more information. """ return self.tokenizer.batch_decode(*args, **kwargs) def decode(self, *args, **kwargs): """ This method forwards all its arguments to Speech2Text2Tokenizer's [`~PreTrainedTokenizer.decode`]. Please refer to the docstring of this method for more information. """ return self.tokenizer.decode(*args, **kwargs) @contextmanager def as_target_processor(self): """ Temporarily sets the tokenizer for processing the input. Useful for encoding the labels when fine-tuning Speech2Text2. """ warnings.warn( "`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your " "labels by using the argument `text` of the regular `__call__` method (either in the same call as " "your audio inputs, or in a separate call." ) self._in_target_context_manager = True self.current_processor = self.tokenizer yield self.current_processor = self.feature_extractor self._in_target_context_manager = False
0
hf_public_repos/transformers/src/transformers/models
hf_public_repos/transformers/src/transformers/models/speech_to_text_2/__init__.py
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_speech_available, is_torch_available, ) _import_structure = { "configuration_speech_to_text_2": ["SPEECH_TO_TEXT_2_PRETRAINED_CONFIG_ARCHIVE_MAP", "Speech2Text2Config"], "processing_speech_to_text_2": ["Speech2Text2Processor"], "tokenization_speech_to_text_2": ["Speech2Text2Tokenizer"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["modeling_speech_to_text_2"] = [ "SPEECH_TO_TEXT_2_PRETRAINED_MODEL_ARCHIVE_LIST", "Speech2Text2ForCausalLM", "Speech2Text2PreTrainedModel", ] if TYPE_CHECKING: from .configuration_speech_to_text_2 import SPEECH_TO_TEXT_2_PRETRAINED_CONFIG_ARCHIVE_MAP, Speech2Text2Config from .processing_speech_to_text_2 import Speech2Text2Processor from .tokenization_speech_to_text_2 import Speech2Text2Tokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_speech_to_text_2 import ( SPEECH_TO_TEXT_2_PRETRAINED_MODEL_ARCHIVE_LIST, Speech2Text2ForCausalLM, Speech2Text2PreTrainedModel, ) else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
0
hf_public_repos/transformers/src/transformers/models
hf_public_repos/transformers/src/transformers/models/speech_to_text_2/tokenization_speech_to_text_2.py
# coding=utf-8 # Copyright 2021 The Facebook Inc. and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tokenization class for Speech2Text2.""" import json import os from typing import Dict, List, Optional, Tuple from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging logger = logging.get_logger(__name__) VOCAB_FILES_NAMES = { "vocab_file": "vocab.json", "tokenizer_config_file": "tokenizer_config.json", "merges_file": "merges.txt", } PRETRAINED_VOCAB_FILES_MAP = { "vocab_file": { "facebook/s2t-wav2vec2-large-en-de": ( "https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/vocab.json" ), }, "tokenizer_config_file": { "facebook/s2t-wav2vec2-large-en-de": ( "https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/tokenizer_config.json" ), }, "merges_file": { "facebook/s2t-wav2vec2-large-en-de": ( "https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/merges.txt" ), }, } BPE_TOKEN_MERGES = "</w>" BPE_TOKEN_VOCAB = "@@ " def get_pairs(word): """ Return set of symbol pairs in a word. word is represented as tuple of symbols (symbols being variable-length strings) """ pairs = set() prev_char = word[0] for char in word[1:]: pairs.add((prev_char, char)) prev_char = char return pairs # Speech2Text2 has no max input length PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {"facebook/s2t-wav2vec2-large-en-de": 1024} class Speech2Text2Tokenizer(PreTrainedTokenizer): """ Constructs a Speech2Text2Tokenizer. This tokenizer inherits from [`PreTrainedTokenizer`] which contains some of the main methods. Users should refer to the superclass for more information regarding such methods. Args: vocab_file (`str`): File containing the vocabulary. bos_token (`str`, *optional*, defaults to `"<s>"`): The beginning of sentence token. eos_token (`str`, *optional*, defaults to `"</s>"`): The end of sentence token. unk_token (`str`, *optional*, defaults to `"<unk>"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. pad_token (`str`, *optional*, defaults to `"<pad>"`): The token used for padding, for example when batching sequences of different lengths. **kwargs Additional keyword arguments passed along to [`PreTrainedTokenizer`] """ vocab_files_names = VOCAB_FILES_NAMES pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES model_input_names = ["input_ids", "attention_mask"] def __init__( self, vocab_file, bos_token="<s>", pad_token="<pad>", eos_token="</s>", unk_token="<unk>", do_lower_case=False, merges_file=None, **kwargs, ): self.do_lower_case = do_lower_case with open(vocab_file, encoding="utf-8") as vocab_handle: self.encoder = json.load(vocab_handle) self.decoder = {v: k for k, v in self.encoder.items()} if merges_file is None: logger.info(f"No merges files provided. {self.__class__.__name__} can only be used for decoding.") self.bpe_ranks = None self.cache = None else: with open(merges_file, encoding="utf-8") as merges_handle: merges = merges_handle.read().split("\n")[:-1] merges = [tuple(merge.split()[:2]) for merge in merges] self.bpe_ranks = dict(zip(merges, range(len(merges)))) self.cache = {} super().__init__( unk_token=unk_token, bos_token=bos_token, eos_token=eos_token, pad_token=pad_token, do_lower_case=do_lower_case, **kwargs, ) @property def vocab_size(self) -> int: return len(self.decoder) def get_vocab(self) -> Dict: return dict(self.encoder, **self.added_tokens_encoder) def bpe(self, token): word = tuple(token[:-1]) + (token[-1] + BPE_TOKEN_MERGES,) if token in self.cache: return self.cache[token] pairs = get_pairs(word) if not pairs: return token while True: bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float("inf"))) if bigram not in self.bpe_ranks: break first, second = bigram new_word = [] i = 0 while i < len(word): try: j = word.index(first, i) except ValueError: new_word.extend(word[i:]) break else: new_word.extend(word[i:j]) i = j if word[i] == first and i < len(word) - 1 and word[i + 1] == second: new_word.append(first + second) i += 2 else: new_word.append(word[i]) i += 1 new_word = tuple(new_word) word = new_word if len(word) == 1: break else: pairs = get_pairs(word) word = " ".join(word) if word == "\n " + BPE_TOKEN_MERGES: word = "\n" + BPE_TOKEN_MERGES if word.endswith(BPE_TOKEN_MERGES): word = word.replace(BPE_TOKEN_MERGES, "") word = word.replace(" ", BPE_TOKEN_VOCAB) self.cache[token] = word return word def _tokenize(self, text): """Tokenize a string.""" if self.bpe_ranks is None: raise ValueError( "This tokenizer was instantiated without a `merges.txt` file, so" " that it can only be used for decoding, not for encoding. " "Make sure to provide `merges.txt` file at instantiation to enable " "encoding." ) if self.do_lower_case: text = text.lower() text = text.split() split_tokens = [] for token in text: if token: split_tokens.extend(list(self.bpe(token).split(" "))) return split_tokens def _convert_token_to_id(self, token: str) -> int: """Converts a token (str) in an index (integer) using the vocab.""" return self.encoder.get(token, self.encoder.get(self.unk_token)) def _convert_id_to_token(self, index: int) -> str: """Converts an index (integer) in a token (str) using the vocab.""" result = self.decoder.get(index, self.unk_token) return result def convert_tokens_to_string(self, tokens: List[str]) -> str: """ Converts a list of output tokens into a single string. """ # combine tokens string = " ".join(tokens) # make sure @@ tokens are concatenated string = "".join(string.split(BPE_TOKEN_VOCAB)) return string def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: if not os.path.isdir(save_directory): logger.error(f"Vocabulary path ({save_directory}) should be a directory") return vocab_file = os.path.join( save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) merges_file = os.path.join( save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] ) with open(vocab_file, "w", encoding="utf-8") as f: f.write(json.dumps(self.encoder, indent=2, sort_keys=True, ensure_ascii=False) + "\n") index = 0 if self.bpe_ranks is None: return (vocab_file,) with open(merges_file, "w", encoding="utf-8") as writer: for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda kv: kv[1]): if index != token_index: logger.warning( f"Saving vocabulary to {merges_file}: BPE merge indices are not consecutive." " Please check that the tokenizer is not corrupted!" ) index = token_index writer.write(" ".join(bpe_tokens) + "\n") index += 1 return (vocab_file, merges_file)
0
hf_public_repos/transformers/src/transformers/models
hf_public_repos/transformers/src/transformers/models/speech_to_text_2/configuration_speech_to_text_2.py
# coding=utf-8 # Copyright 2021 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Speech2Text model configuration""" from ...configuration_utils import PretrainedConfig from ...utils import logging logger = logging.get_logger(__name__) SPEECH_TO_TEXT_2_PRETRAINED_CONFIG_ARCHIVE_MAP = { "facebook/s2t-wav2vec2-large-en-de": ( "https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json" ), # See all Speech2Text models at https://huggingface.co/models?filter=speech2text2 } class Speech2Text2Config(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`Speech2Text2ForCausalLM`]. It is used to instantiate an Speech2Text2 model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the Speech2Text2 [facebook/s2t-wav2vec2-large-en-de](https://huggingface.co/facebook/s2t-wav2vec2-large-en-de) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 50265): Vocabulary size of the Speech2Text model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`Speech2TextModel`] d_model (`int`, *optional*, defaults to 1024): Dimensionality of the layers and the pooler layer. decoder_layers (`int`, *optional*, defaults to 12): Number of decoder layers. decoder_attention_heads (`int`, *optional*, defaults to 16): Number of attention heads for each attention layer in the Transformer decoder. decoder_ffn_dim (`int`, *optional*, defaults to 4096): Dimensionality of the "intermediate" (often named feed-forward) layer in decoder. activation_function (`str` or `function`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the pooler. If string, `"gelu"`, `"relu"`, `"silu"` and `"gelu_new"` are supported. dropout (`float`, *optional*, defaults to 0.1): The dropout probability for all fully connected layers in the embeddings, and pooler. attention_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. activation_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for activations inside the fully connected layer. init_std (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. https://arxiv.org/abs/1909.11556>`__ for more details. decoder_layerdrop (`float`, *optional*, defaults to 0.0): The LayerDrop probability for the decoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556) for more details. use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). max_target_positions (`int`, *optional*, defaults to 1024): The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). Example: ```python >>> from transformers import Speech2Text2Config, Speech2Text2ForCausalLM >>> # Initializing a Speech2Text2 s2t_transformer_s style configuration >>> configuration = Speech2Text2Config() >>> # Initializing a model (with random weights) from the s2t_transformer_s style configuration >>> model = Speech2Text2ForCausalLM(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "speech_to_text_2" keys_to_ignore_at_inference = ["past_key_values"] attribute_map = {"num_attention_heads": "decoder_attention_heads", "hidden_size": "d_model"} def __init__( self, vocab_size=10000, decoder_layers=6, decoder_ffn_dim=2048, decoder_attention_heads=4, decoder_layerdrop=0.0, use_cache=True, activation_function="relu", d_model=256, dropout=0.1, attention_dropout=0.0, activation_dropout=0.0, init_std=0.02, decoder_start_token_id=2, scale_embedding=True, pad_token_id=1, bos_token_id=0, eos_token_id=2, max_target_positions=1024, **kwargs, ): self.vocab_size = vocab_size self.d_model = d_model self.decoder_ffn_dim = decoder_ffn_dim self.decoder_layers = decoder_layers self.decoder_attention_heads = decoder_attention_heads self.dropout = dropout self.attention_dropout = attention_dropout self.activation_dropout = activation_dropout self.activation_function = activation_function self.init_std = init_std self.decoder_layerdrop = decoder_layerdrop self.use_cache = use_cache self.num_hidden_layers = decoder_layers self.scale_embedding = scale_embedding # scale factor will be sqrt(d_model) if True self.max_target_positions = max_target_positions super().__init__( pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, decoder_start_token_id=decoder_start_token_id, **kwargs, )
0
hf_public_repos/transformers/src/transformers/models
hf_public_repos/transformers/src/transformers/models/informer/modeling_informer.py
# coding=utf-8 # Copyright 2023 Amazon and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ PyTorch Informer model.""" from typing import List, Optional, Tuple, Union import numpy as np import torch from torch import nn from ...activations import ACT2FN from ...modeling_attn_mask_utils import _prepare_4d_attention_mask, _prepare_4d_causal_attention_mask from ...modeling_outputs import ( BaseModelOutput, BaseModelOutputWithPastAndCrossAttentions, SampleTSPredictionOutput, Seq2SeqTSModelOutput, Seq2SeqTSPredictionOutput, ) from ...modeling_utils import PreTrainedModel from ...time_series_utils import NegativeBinomialOutput, NormalOutput, StudentTOutput from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings from .configuration_informer import InformerConfig logger = logging.get_logger(__name__) _CONFIG_FOR_DOC = "InformerConfig" INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = [ "huggingface/informer-tourism-monthly", # See all Informer models at https://huggingface.co/models?filter=informer ] # Copied from transformers.models.time_series_transformer.modeling_time_series_transformer.TimeSeriesFeatureEmbedder with TimeSeries->Informer class InformerFeatureEmbedder(nn.Module): """ Embed a sequence of categorical features. Args: cardinalities (`list[int]`): List of cardinalities of the categorical features. embedding_dims (`list[int]`): List of embedding dimensions of the categorical features. """ def __init__(self, cardinalities: List[int], embedding_dims: List[int]) -> None: super().__init__() self.num_features = len(cardinalities) self.embedders = nn.ModuleList([nn.Embedding(c, d) for c, d in zip(cardinalities, embedding_dims)]) def forward(self, features: torch.Tensor) -> torch.Tensor: if self.num_features > 1: # we slice the last dimension, giving an array of length # self.num_features with shape (N,T) or (N) cat_feature_slices = torch.chunk(features, self.num_features, dim=-1) else: cat_feature_slices = [features] return torch.cat( [ embed(cat_feature_slice.squeeze(-1)) for embed, cat_feature_slice in zip(self.embedders, cat_feature_slices) ], dim=-1, ) # Copied from transformers.models.time_series_transformer.modeling_time_series_transformer.TimeSeriesStdScaler with TimeSeriesTransformer->Informer,TimeSeries->Informer class InformerStdScaler(nn.Module): """ Standardize features by calculating the mean and scaling along the first dimension, and then normalizes it by subtracting from the mean and dividing by the standard deviation. """ def __init__(self, config: InformerConfig): super().__init__() self.dim = config.scaling_dim if hasattr(config, "scaling_dim") else 1 self.keepdim = config.keepdim if hasattr(config, "keepdim") else True self.minimum_scale = config.minimum_scale if hasattr(config, "minimum_scale") else 1e-5 def forward( self, data: torch.Tensor, observed_indicator: torch.Tensor ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: """ Parameters: data (`torch.Tensor` of shape `(batch_size, sequence_length, num_input_channels)`): input for Batch norm calculation observed_indicator (`torch.BoolTensor` of shape `(batch_size, sequence_length, num_input_channels)`): Calculating the scale on the observed indicator. Returns: tuple of `torch.Tensor` of shapes (`(batch_size, sequence_length, num_input_channels)`,`(batch_size, 1, num_input_channels)`, `(batch_size, 1, num_input_channels)`) """ denominator = observed_indicator.sum(self.dim, keepdim=self.keepdim) denominator = denominator.clamp_min(1.0) loc = (data * observed_indicator).sum(self.dim, keepdim=self.keepdim) / denominator variance = (((data - loc) * observed_indicator) ** 2).sum(self.dim, keepdim=self.keepdim) / denominator scale = torch.sqrt(variance + self.minimum_scale) return (data - loc) / scale, loc, scale # Copied from transformers.models.time_series_transformer.modeling_time_series_transformer.TimeSeriesMeanScaler with TimeSeriesTransformer->Informer,TimeSeries->Informer class InformerMeanScaler(nn.Module): """ Computes a scaling factor as the weighted average absolute value along the first dimension, and scales the data accordingly. """ def __init__(self, config: InformerConfig): super().__init__() self.dim = config.scaling_dim if hasattr(config, "scaling_dim") else 1 self.keepdim = config.keepdim if hasattr(config, "keepdim") else True self.minimum_scale = config.minimum_scale if hasattr(config, "minimum_scale") else 1e-10 self.default_scale = config.default_scale if hasattr(config, "default_scale") else None def forward( self, data: torch.Tensor, observed_indicator: torch.Tensor ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: """ Parameters: data (`torch.Tensor` of shape `(batch_size, sequence_length, num_input_channels)`): input for Batch norm calculation observed_indicator (`torch.BoolTensor` of shape `(batch_size, sequence_length, num_input_channels)`): Calculating the scale on the observed indicator. Returns: tuple of `torch.Tensor` of shapes (`(batch_size, sequence_length, num_input_channels)`,`(batch_size, 1, num_input_channels)`, `(batch_size, 1, num_input_channels)`) """ ts_sum = (data * observed_indicator).abs().sum(self.dim, keepdim=True) num_observed = observed_indicator.sum(self.dim, keepdim=True) scale = ts_sum / torch.clamp(num_observed, min=1) # If `default_scale` is provided, we use it, otherwise we use the scale # of the batch. if self.default_scale is None: batch_sum = ts_sum.sum(dim=0) batch_observations = torch.clamp(num_observed.sum(0), min=1) default_scale = torch.squeeze(batch_sum / batch_observations) else: default_scale = self.default_scale * torch.ones_like(scale) # apply default scale where there are no observations scale = torch.where(num_observed > 0, scale, default_scale) # ensure the scale is at least `self.minimum_scale` scale = torch.clamp(scale, min=self.minimum_scale) scaled_data = data / scale if not self.keepdim: scale = scale.squeeze(dim=self.dim) return scaled_data, torch.zeros_like(scale), scale # Copied from transformers.models.time_series_transformer.modeling_time_series_transformer.TimeSeriesNOPScaler with TimeSeriesTransformer->Informer,TimeSeries->Informer class InformerNOPScaler(nn.Module): """ Assigns a scaling factor equal to 1 along the first dimension, and therefore applies no scaling to the input data. """ def __init__(self, config: InformerConfig): super().__init__() self.dim = config.scaling_dim if hasattr(config, "scaling_dim") else 1 self.keepdim = config.keepdim if hasattr(config, "keepdim") else True def forward( self, data: torch.Tensor, observed_indicator: torch.Tensor = None ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: """ Parameters: data (`torch.Tensor` of shape `(batch_size, sequence_length, num_input_channels)`): input for Batch norm calculation Returns: tuple of `torch.Tensor` of shapes (`(batch_size, sequence_length, num_input_channels)`,`(batch_size, 1, num_input_channels)`, `(batch_size, 1, num_input_channels)`) """ scale = torch.ones_like(data, requires_grad=False).mean(dim=self.dim, keepdim=self.keepdim) loc = torch.zeros_like(data, requires_grad=False).mean(dim=self.dim, keepdim=self.keepdim) return data, loc, scale # Copied from transformers.models.time_series_transformer.modeling_time_series_transformer.weighted_average def weighted_average(input_tensor: torch.Tensor, weights: Optional[torch.Tensor] = None, dim=None) -> torch.Tensor: """ Computes the weighted average of a given tensor across a given `dim`, masking values associated with weight zero, meaning instead of `nan * 0 = nan` you will get `0 * 0 = 0`. Args: input_tensor (`torch.FloatTensor`): Input tensor, of which the average must be computed. weights (`torch.FloatTensor`, *optional*): Weights tensor, of the same shape as `input_tensor`. dim (`int`, *optional*): The dim along which to average `input_tensor`. Returns: `torch.FloatTensor`: The tensor with values averaged along the specified `dim`. """ if weights is not None: weighted_tensor = torch.where(weights != 0, input_tensor * weights, torch.zeros_like(input_tensor)) sum_weights = torch.clamp(weights.sum(dim=dim) if dim else weights.sum(), min=1.0) return (weighted_tensor.sum(dim=dim) if dim else weighted_tensor.sum()) / sum_weights else: return input_tensor.mean(dim=dim) # Copied from transformers.models.time_series_transformer.modeling_time_series_transformer.nll def nll(input: torch.distributions.Distribution, target: torch.Tensor) -> torch.Tensor: """ Computes the negative log likelihood loss from input distribution with respect to target. """ return -input.log_prob(target) # Copied from transformers.models.marian.modeling_marian.MarianSinusoidalPositionalEmbedding with Marian->Informer class InformerSinusoidalPositionalEmbedding(nn.Embedding): """This module produces sinusoidal positional embeddings of any length.""" def __init__(self, num_positions: int, embedding_dim: int, padding_idx: Optional[int] = None) -> None: super().__init__(num_positions, embedding_dim) self.weight = self._init_weight(self.weight) @staticmethod def _init_weight(out: nn.Parameter) -> nn.Parameter: """ Identical to the XLM create_sinusoidal_embeddings except features are not interleaved. The cos features are in the 2nd half of the vector. [dim // 2:] """ n_pos, dim = out.shape position_enc = np.array( [[pos / np.power(10000, 2 * (j // 2) / dim) for j in range(dim)] for pos in range(n_pos)] ) out.requires_grad = False # set early to avoid an error in pytorch-1.8+ sentinel = dim // 2 if dim % 2 == 0 else (dim // 2) + 1 out[:, 0:sentinel] = torch.FloatTensor(np.sin(position_enc[:, 0::2])) out[:, sentinel:] = torch.FloatTensor(np.cos(position_enc[:, 1::2])) out.detach_() return out @torch.no_grad() def forward(self, input_ids_shape: torch.Size, past_key_values_length: int = 0) -> torch.Tensor: """`input_ids_shape` is expected to be [bsz x seqlen].""" bsz, seq_len = input_ids_shape[:2] positions = torch.arange( past_key_values_length, past_key_values_length + seq_len, dtype=torch.long, device=self.weight.device ) return super().forward(positions) # Copied from transformers.models.time_series_transformer.modeling_time_series_transformer.TimeSeriesValueEmbedding with TimeSeries->Info class InformerValueEmbedding(nn.Module): def __init__(self, feature_size, d_model): super().__init__() self.value_projection = nn.Linear(in_features=feature_size, out_features=d_model, bias=False) def forward(self, x): return self.value_projection(x) # Copied from transformers.models.bart.modeling_bart.BartAttention with Bart->Informer class InformerAttention(nn.Module): """Multi-headed attention from 'Attention Is All You Need' paper""" def __init__( self, embed_dim: int, num_heads: int, dropout: float = 0.0, is_decoder: bool = False, bias: bool = True, is_causal: bool = False, config: Optional[InformerConfig] = None, ): super().__init__() self.embed_dim = embed_dim self.num_heads = num_heads self.dropout = dropout self.head_dim = embed_dim // num_heads self.config = config if (self.head_dim * num_heads) != self.embed_dim: raise ValueError( f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}" f" and `num_heads`: {num_heads})." ) self.scaling = self.head_dim**-0.5 self.is_decoder = is_decoder self.is_causal = is_causal self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias) def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int): return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous() def forward( self, hidden_states: torch.Tensor, key_value_states: Optional[torch.Tensor] = None, past_key_value: Optional[Tuple[torch.Tensor]] = None, attention_mask: Optional[torch.Tensor] = None, layer_head_mask: Optional[torch.Tensor] = None, output_attentions: bool = False, ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: """Input shape: Batch x Time x Channel""" # if key_value_states are provided this layer is used as a cross-attention layer # for the decoder is_cross_attention = key_value_states is not None bsz, tgt_len, _ = hidden_states.size() # get query proj query_states = self.q_proj(hidden_states) * self.scaling # get key, value proj # `past_key_value[0].shape[2] == key_value_states.shape[1]` # is checking that the `sequence_length` of the `past_key_value` is the same as # the provided `key_value_states` to support prefix tuning if ( is_cross_attention and past_key_value is not None and past_key_value[0].shape[2] == key_value_states.shape[1] ): # reuse k,v, cross_attentions key_states = past_key_value[0] value_states = past_key_value[1] elif is_cross_attention: # cross_attentions key_states = self._shape(self.k_proj(key_value_states), -1, bsz) value_states = self._shape(self.v_proj(key_value_states), -1, bsz) elif past_key_value is not None: # reuse k, v, self_attention key_states = self._shape(self.k_proj(hidden_states), -1, bsz) value_states = self._shape(self.v_proj(hidden_states), -1, bsz) key_states = torch.cat([past_key_value[0], key_states], dim=2) value_states = torch.cat([past_key_value[1], value_states], dim=2) else: # self_attention key_states = self._shape(self.k_proj(hidden_states), -1, bsz) value_states = self._shape(self.v_proj(hidden_states), -1, bsz) if self.is_decoder: # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states. # Further calls to cross_attention layer can then reuse all cross-attention # key/value_states (first "if" case) # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of # all previous decoder key/value_states. Further calls to uni-directional self-attention # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case) # if encoder bi-directional self-attention `past_key_value` is always `None` past_key_value = (key_states, value_states) proj_shape = (bsz * self.num_heads, -1, self.head_dim) query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape) key_states = key_states.reshape(*proj_shape) value_states = value_states.reshape(*proj_shape) src_len = key_states.size(1) attn_weights = torch.bmm(query_states, key_states.transpose(1, 2)) if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len): raise ValueError( f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is" f" {attn_weights.size()}" ) if attention_mask is not None: if attention_mask.size() != (bsz, 1, tgt_len, src_len): raise ValueError( f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}" ) attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) attn_weights = nn.functional.softmax(attn_weights, dim=-1) if layer_head_mask is not None: if layer_head_mask.size() != (self.num_heads,): raise ValueError( f"Head mask for a single layer should be of size {(self.num_heads,)}, but is" f" {layer_head_mask.size()}" ) attn_weights = layer_head_mask.view(1, -1, 1, 1) * attn_weights.view(bsz, self.num_heads, tgt_len, src_len) attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) if output_attentions: # this operation is a bit awkward, but it's required to # make sure that attn_weights keeps its gradient. # In order to do so, attn_weights have to be reshaped # twice and have to be reused in the following attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len) else: attn_weights_reshaped = None attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training) attn_output = torch.bmm(attn_probs, value_states) if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim): raise ValueError( f"`attn_output` should be of size {(bsz * self.num_heads, tgt_len, self.head_dim)}, but is" f" {attn_output.size()}" ) attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim) attn_output = attn_output.transpose(1, 2) # Use the `embed_dim` from the config (stored in the class) rather than `hidden_state` because `attn_output` can be # partitioned across GPUs when using tensor-parallelism. attn_output = attn_output.reshape(bsz, tgt_len, self.embed_dim) attn_output = self.out_proj(attn_output) return attn_output, attn_weights_reshaped, past_key_value class InformerProbSparseAttention(nn.Module): """Probabilistic Attention mechanism to select the "active" queries rather than the "lazy" queries and provides a sparse Transformer thus mitigating the quadratic compute and memory requirements of vanilla attention""" def __init__( self, embed_dim: int, num_heads: int, dropout: float = 0.0, is_decoder: bool = False, sampling_factor: int = 5, bias: bool = True, ): super().__init__() self.factor = sampling_factor self.embed_dim = embed_dim self.num_heads = num_heads self.dropout = dropout self.head_dim = embed_dim // num_heads if (self.head_dim * num_heads) != self.embed_dim: raise ValueError( f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}" f" and `num_heads`: {num_heads})." ) self.scaling = self.head_dim**-0.5 self.is_decoder = is_decoder self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias) def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int): return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous() def forward( self, hidden_states: torch.Tensor, key_value_states: Optional[torch.Tensor] = None, past_key_value: Optional[Tuple[torch.Tensor]] = None, attention_mask: Optional[torch.Tensor] = None, layer_head_mask: Optional[torch.Tensor] = None, output_attentions: bool = False, ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: """Input shape: Batch x Time x Channel""" # if key_value_states are provided this layer is used as a cross-attention layer # for the decoder is_cross_attention = key_value_states is not None bsz, tgt_len, _ = hidden_states.size() # get query proj query_states = self.q_proj(hidden_states) * self.scaling # get key, value proj # `past_key_value[0].shape[2] == key_value_states.shape[1]` # is checking that the `sequence_length` of the `past_key_value` is the same as # the provided `key_value_states` to support prefix tuning if ( is_cross_attention and past_key_value is not None and past_key_value[0].shape[2] == key_value_states.shape[1] ): # reuse k,v, cross_attentions key_states = past_key_value[0] value_states = past_key_value[1] elif is_cross_attention: # cross_attentions key_states = self._shape(self.k_proj(key_value_states), -1, bsz) value_states = self._shape(self.v_proj(key_value_states), -1, bsz) elif past_key_value is not None: # reuse k, v, self_attention key_states = self._shape(self.k_proj(hidden_states), -1, bsz) value_states = self._shape(self.v_proj(hidden_states), -1, bsz) key_states = torch.cat([past_key_value[0], key_states], dim=2) value_states = torch.cat([past_key_value[1], value_states], dim=2) else: # self_attention key_states = self._shape(self.k_proj(hidden_states), -1, bsz) value_states = self._shape(self.v_proj(hidden_states), -1, bsz) if self.is_decoder: # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states. # Further calls to cross_attention layer can then reuse all cross-attention # key/value_states (first "if" case) # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of # all previous decoder key/value_states. Further calls to uni-directional self-attention # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case) # if encoder bi-directional self-attention `past_key_value` is always `None` past_key_value = (key_states, value_states) proj_shape = (bsz * self.num_heads, -1, self.head_dim) query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape) key_states = key_states.reshape(*proj_shape) value_states = value_states.reshape(*proj_shape) key_states_time_length = key_states.size(1) # L_K log_key_states_time_length = np.ceil(np.log1p(key_states_time_length)).astype("int").item() # log_L_K query_states_time_length = query_states.size(1) # L_Q log_query_states_time_length = np.ceil(np.log1p(query_states_time_length)).astype("int").item() # log_L_Q u_part = min(self.factor * query_states_time_length * log_key_states_time_length, key_states_time_length) u = min(self.factor * log_query_states_time_length, query_states_time_length) if key_states_time_length > 0: index_sample = torch.randint(0, key_states_time_length, (u_part,)) k_sample = key_states[:, index_sample, :] else: k_sample = key_states queries_keys_sample = torch.bmm(query_states, k_sample.transpose(1, 2)) # Q_K_sampled # find the Top_k query with sparsity measurement if u > 0: sparsity_measurement = queries_keys_sample.max(dim=-1)[0] - torch.div( queries_keys_sample.sum(dim=-1), key_states_time_length ) # M top_u_sparsity_measurement = sparsity_measurement.topk(u, sorted=False)[1] # M_top # calculate q_reduce: query_states[:, top_u_sparsity_measurement] dim_for_slice = torch.arange(query_states.size(0)).unsqueeze(-1) q_reduce = query_states[dim_for_slice, top_u_sparsity_measurement] else: q_reduce = query_states top_u_sparsity_measurement = None # Use q_reduce to calculate attention weights attn_weights = torch.bmm(q_reduce, key_states.transpose(1, 2)) src_len = key_states.size(1) if attn_weights.size() != (bsz * self.num_heads, u, src_len): raise ValueError( f"Attention weights should be of size {(bsz * self.num_heads, u, src_len)}, but is" f" {attn_weights.size()}" ) if attention_mask is not None: if attention_mask.size() != (bsz, 1, tgt_len, src_len): raise ValueError( f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}" ) prob_mask = attention_mask.expand(bsz, self.num_heads, tgt_len, src_len).reshape( bsz * self.num_heads, tgt_len, src_len ) if top_u_sparsity_measurement is not None: dim_for_slice = torch.arange(prob_mask.size(0)).unsqueeze(-1) prob_mask = prob_mask[dim_for_slice, top_u_sparsity_measurement, :] attn_weights = attn_weights.view(bsz, self.num_heads, u, src_len) + prob_mask.view( bsz, self.num_heads, u, src_len ) attn_weights = attn_weights.view(bsz * self.num_heads, u, src_len) attn_weights = nn.functional.softmax(attn_weights, dim=-1) if layer_head_mask is not None: if layer_head_mask.size() != (self.num_heads,): raise ValueError( f"Head mask for a single layer should be of size {(self.num_heads,)}, but is" f" {layer_head_mask.size()}" ) attn_weights = layer_head_mask.view(1, -1, 1, 1) * attn_weights.view(bsz, self.num_heads, u, src_len) attn_weights = attn_weights.view(bsz * self.num_heads, u, src_len) if output_attentions: # this operation is a bit awkward, but it's required to # make sure that attn_weights keeps its gradient. # In order to do so, attn_weights have to be reshaped # twice and have to be reused in the following attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, u, src_len) attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, u, src_len) else: attn_weights_reshaped = None attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training) attn_output = torch.bmm(attn_probs, value_states) # calculate context for updating the attn_output, based on: # https://github.com/zhouhaoyi/Informer2020/blob/ac59c7447135473fb2aafeafe94395f884d5c7a5/models/attn.py#L74 if self.is_decoder: # cast to float32 before operation to avoid overflow context = value_states.cumsum(dim=-2, dtype=torch.float32).to(value_states.dtype) else: v_mean_dim_time = value_states.mean(dim=-2) context = ( v_mean_dim_time.unsqueeze(dim=1) .expand(bsz * self.num_heads, query_states_time_length, v_mean_dim_time.size(-1)) .clone() ) if top_u_sparsity_measurement is not None: # update context: copy the attention output to the context at top_u_sparsity_measurement index dim_for_slice = torch.arange(context.size(0)).unsqueeze(-1) context[dim_for_slice, top_u_sparsity_measurement, :] = attn_output attn_output = context if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim): raise ValueError( f"`attn_output` should be of size {(bsz * self.num_heads, tgt_len, self.head_dim)}, but is" f" {attn_output.size()}" ) attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim) attn_output = attn_output.transpose(1, 2) # Use the `embed_dim` from the config (stored in the class) rather than `hidden_state` because `attn_output` can be # partitioned across GPUs when using tensor-parallelism. attn_output = attn_output.reshape(bsz, tgt_len, self.embed_dim) attn_output = self.out_proj(attn_output) return attn_output, attn_weights_reshaped, past_key_value # source: https://github.com/zhouhaoyi/Informer2020/blob/main/models/encoder.py class InformerConvLayer(nn.Module): def __init__(self, c_in): super().__init__() self.downConv = nn.Conv1d( in_channels=c_in, out_channels=c_in, kernel_size=3, padding=1, padding_mode="circular", ) self.norm = nn.BatchNorm1d(c_in) self.activation = nn.ELU() self.maxPool = nn.MaxPool1d(kernel_size=3, stride=2, padding=1) def forward(self, x): x = self.downConv(x.permute(0, 2, 1)) x = self.norm(x) x = self.activation(x) x = self.maxPool(x) x = x.transpose(1, 2) return x class InformerEncoderLayer(nn.Module): def __init__(self, config: InformerConfig): super().__init__() self.embed_dim = config.d_model if config.attention_type == "prob": self.self_attn = InformerProbSparseAttention( embed_dim=self.embed_dim, num_heads=config.encoder_attention_heads, dropout=config.attention_dropout, sampling_factor=config.sampling_factor, ) else: self.self_attn = InformerAttention( embed_dim=self.embed_dim, num_heads=config.encoder_attention_heads, dropout=config.attention_dropout, ) self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim) self.dropout = config.dropout self.activation_fn = ACT2FN[config.activation_function] self.activation_dropout = config.activation_dropout self.fc1 = nn.Linear(self.embed_dim, config.encoder_ffn_dim) self.fc2 = nn.Linear(config.encoder_ffn_dim, self.embed_dim) self.final_layer_norm = nn.LayerNorm(self.embed_dim) def forward( self, hidden_states: torch.FloatTensor, attention_mask: torch.FloatTensor, layer_head_mask: torch.FloatTensor, output_attentions: Optional[bool] = False, ) -> Tuple[torch.FloatTensor, Optional[torch.FloatTensor]]: """ Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` attention_mask (`torch.FloatTensor`): attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size `(encoder_attention_heads,)`. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. """ residual = hidden_states hidden_states, attn_weights, _ = self.self_attn( hidden_states=hidden_states, attention_mask=attention_mask, layer_head_mask=layer_head_mask, output_attentions=output_attentions, ) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states hidden_states = self.self_attn_layer_norm(hidden_states) residual = hidden_states hidden_states = self.activation_fn(self.fc1(hidden_states)) hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training) hidden_states = self.fc2(hidden_states) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states hidden_states = self.final_layer_norm(hidden_states) if hidden_states.dtype == torch.float16 and ( torch.isinf(hidden_states).any() or torch.isnan(hidden_states).any() ): clamp_value = torch.finfo(hidden_states.dtype).max - 1000 hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value) outputs = (hidden_states,) if output_attentions: outputs += (attn_weights,) return outputs class InformerDecoderLayer(nn.Module): def __init__(self, config: InformerConfig): super().__init__() self.embed_dim = config.d_model if config.attention_type == "prob": self.self_attn = InformerProbSparseAttention( embed_dim=self.embed_dim, num_heads=config.decoder_attention_heads, dropout=config.attention_dropout, sampling_factor=config.sampling_factor, is_decoder=True, ) else: self.self_attn = InformerAttention( embed_dim=self.embed_dim, num_heads=config.decoder_attention_heads, dropout=config.attention_dropout, is_decoder=True, ) self.dropout = config.dropout self.activation_fn = ACT2FN[config.activation_function] self.activation_dropout = config.activation_dropout self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim) self.encoder_attn = InformerAttention( self.embed_dim, config.decoder_attention_heads, dropout=config.attention_dropout, is_decoder=True, ) self.encoder_attn_layer_norm = nn.LayerNorm(self.embed_dim) self.fc1 = nn.Linear(self.embed_dim, config.decoder_ffn_dim) self.fc2 = nn.Linear(config.decoder_ffn_dim, self.embed_dim) self.final_layer_norm = nn.LayerNorm(self.embed_dim) def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, encoder_hidden_states: Optional[torch.Tensor] = None, encoder_attention_mask: Optional[torch.Tensor] = None, layer_head_mask: Optional[torch.Tensor] = None, cross_attn_layer_head_mask: Optional[torch.Tensor] = None, past_key_value: Optional[Tuple[torch.Tensor]] = None, output_attentions: Optional[bool] = False, use_cache: Optional[bool] = True, ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]: """ Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` attention_mask (`torch.FloatTensor`): attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. encoder_hidden_states (`torch.FloatTensor`): cross attention input to the layer of shape `(batch, seq_len, embed_dim)` encoder_attention_mask (`torch.FloatTensor`): encoder attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size `(encoder_attention_heads,)`. cross_attn_layer_head_mask (`torch.FloatTensor`): mask for cross-attention heads in a given layer of size `(decoder_attention_heads,)`. past_key_value (`Tuple(torch.FloatTensor)`): cached past key and value projection states output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. """ residual = hidden_states # Self Attention # decoder uni-directional self-attention cached key/values tuple is at positions 1,2 self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None # add present self-attn cache to positions 1,2 of present_key_value tuple hidden_states, self_attn_weights, present_key_value = self.self_attn( hidden_states=hidden_states, past_key_value=self_attn_past_key_value, attention_mask=attention_mask, layer_head_mask=layer_head_mask, output_attentions=output_attentions, ) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states hidden_states = self.self_attn_layer_norm(hidden_states) # Cross-Attention Block cross_attn_present_key_value = None cross_attn_weights = None if encoder_hidden_states is not None: residual = hidden_states # cross_attn cached key/values tuple is at positions 3,4 of present_key_value tuple cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None hidden_states, cross_attn_weights, cross_attn_present_key_value = self.encoder_attn( hidden_states=hidden_states, key_value_states=encoder_hidden_states, attention_mask=encoder_attention_mask, layer_head_mask=cross_attn_layer_head_mask, past_key_value=cross_attn_past_key_value, output_attentions=output_attentions, ) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states hidden_states = self.encoder_attn_layer_norm(hidden_states) # add cross-attn to positions 3,4 of present_key_value tuple present_key_value = present_key_value + cross_attn_present_key_value # Fully Connected residual = hidden_states hidden_states = self.activation_fn(self.fc1(hidden_states)) hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training) hidden_states = self.fc2(hidden_states) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states hidden_states = self.final_layer_norm(hidden_states) outputs = (hidden_states,) if output_attentions: outputs += (self_attn_weights, cross_attn_weights) if use_cache: outputs += (present_key_value,) return outputs class InformerPreTrainedModel(PreTrainedModel): config_class = InformerConfig base_model_prefix = "model" main_input_name = "past_values" supports_gradient_checkpointing = True def _init_weights(self, module): std = self.config.init_std if isinstance(module, (nn.Linear, nn.Conv1d)): module.weight.data.normal_(mean=0.0, std=std) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=std) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() INFORMER_START_DOCSTRING = r""" This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`TimeSeriesTransformerConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ INFORMER_INPUTS_DOCSTRING = r""" Args: past_values (`torch.FloatTensor` of shape `(batch_size, sequence_length)` or `(batch_size, sequence_length, input_size)`): Past values of the time series, that serve as context in order to predict the future. The sequence size of this tensor must be larger than the `context_length` of the model, since the model will use the larger size to construct lag features, i.e. additional values from the past which are added in order to serve as "extra context". The `sequence_length` here is equal to `config.context_length` + `max(config.lags_sequence)`, which if no `lags_sequence` is configured, is equal to `config.context_length` + 7 (as by default, the largest look-back index in `config.lags_sequence` is 7). The property `_past_length` returns the actual length of the past. The `past_values` is what the Transformer encoder gets as input (with optional additional features, such as `static_categorical_features`, `static_real_features`, `past_time_features` and lags). Optionally, missing values need to be replaced with zeros and indicated via the `past_observed_mask`. For multivariate time series, the `input_size` > 1 dimension is required and corresponds to the number of variates in the time series per time step. past_time_features (`torch.FloatTensor` of shape `(batch_size, sequence_length, num_features)`): Required time features, which the model internally will add to `past_values`. These could be things like "month of year", "day of the month", etc. encoded as vectors (for instance as Fourier features). These could also be so-called "age" features, which basically help the model know "at which point in life" a time-series is. Age features have small values for distant past time steps and increase monotonically the more we approach the current time step. Holiday features are also a good example of time features. These features serve as the "positional encodings" of the inputs. So contrary to a model like BERT, where the position encodings are learned from scratch internally as parameters of the model, the Time Series Transformer requires to provide additional time features. The Time Series Transformer only learns additional embeddings for `static_categorical_features`. Additional dynamic real covariates can be concatenated to this tensor, with the caveat that these features must but known at prediction time. The `num_features` here is equal to `config.`num_time_features` + `config.num_dynamic_real_features`. past_observed_mask (`torch.BoolTensor` of shape `(batch_size, sequence_length)` or `(batch_size, sequence_length, input_size)`, *optional*): Boolean mask to indicate which `past_values` were observed and which were missing. Mask values selected in `[0, 1]`: - 1 for values that are **observed**, - 0 for values that are **missing** (i.e. NaNs that were replaced by zeros). static_categorical_features (`torch.LongTensor` of shape `(batch_size, number of static categorical features)`, *optional*): Optional static categorical features for which the model will learn an embedding, which it will add to the values of the time series. Static categorical features are features which have the same value for all time steps (static over time). A typical example of a static categorical feature is a time series ID. static_real_features (`torch.FloatTensor` of shape `(batch_size, number of static real features)`, *optional*): Optional static real features which the model will add to the values of the time series. Static real features are features which have the same value for all time steps (static over time). A typical example of a static real feature is promotion information. future_values (`torch.FloatTensor` of shape `(batch_size, prediction_length)` or `(batch_size, prediction_length, input_size)`, *optional*): Future values of the time series, that serve as labels for the model. The `future_values` is what the Transformer needs during training to learn to output, given the `past_values`. The sequence length here is equal to `prediction_length`. See the demo notebook and code snippets for details. Optionally, during training any missing values need to be replaced with zeros and indicated via the `future_observed_mask`. For multivariate time series, the `input_size` > 1 dimension is required and corresponds to the number of variates in the time series per time step. future_time_features (`torch.FloatTensor` of shape `(batch_size, prediction_length, num_features)`): Required time features for the prediction window, which the model internally will add to `future_values`. These could be things like "month of year", "day of the month", etc. encoded as vectors (for instance as Fourier features). These could also be so-called "age" features, which basically help the model know "at which point in life" a time-series is. Age features have small values for distant past time steps and increase monotonically the more we approach the current time step. Holiday features are also a good example of time features. These features serve as the "positional encodings" of the inputs. So contrary to a model like BERT, where the position encodings are learned from scratch internally as parameters of the model, the Time Series Transformer requires to provide additional time features. The Time Series Transformer only learns additional embeddings for `static_categorical_features`. Additional dynamic real covariates can be concatenated to this tensor, with the caveat that these features must but known at prediction time. The `num_features` here is equal to `config.`num_time_features` + `config.num_dynamic_real_features`. future_observed_mask (`torch.BoolTensor` of shape `(batch_size, sequence_length)` or `(batch_size, sequence_length, input_size)`, *optional*): Boolean mask to indicate which `future_values` were observed and which were missing. Mask values selected in `[0, 1]`: - 1 for values that are **observed**, - 0 for values that are **missing** (i.e. NaNs that were replaced by zeros). This mask is used to filter out missing values for the final loss calculation. attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on certain token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) decoder_attention_mask (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Mask to avoid performing attention on certain token indices. By default, a causal mask will be used, to make sure the model can only look at previous inputs in order to predict the future. head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*): Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. decoder_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. encoder_outputs (`tuple(tuple(torch.FloatTensor)`, *optional*): Tuple consists of `last_hidden_state`, `hidden_states` (*optional*) and `attentions` (*optional*) `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)` (*optional*) is a sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ class InformerEncoder(InformerPreTrainedModel): """ Informer encoder consisting of *config.encoder_layers* self attention layers with distillation layers. Each attention layer is an [`InformerEncoderLayer`]. Args: config: InformerConfig """ def __init__(self, config: InformerConfig): super().__init__(config) self.dropout = config.dropout self.layerdrop = config.encoder_layerdrop self.gradient_checkpointing = False if config.prediction_length is None: raise ValueError("The `prediction_length` config needs to be specified.") self.value_embedding = InformerValueEmbedding(feature_size=config.feature_size, d_model=config.d_model) self.embed_positions = InformerSinusoidalPositionalEmbedding( config.context_length + config.prediction_length, config.d_model ) self.layers = nn.ModuleList([InformerEncoderLayer(config) for _ in range(config.encoder_layers)]) self.layernorm_embedding = nn.LayerNorm(config.d_model) if config.distil: self.conv_layers = nn.ModuleList( [InformerConvLayer(config.d_model) for _ in range(config.encoder_layers - 1)] ) self.conv_layers.append(None) else: self.conv_layers = [None] * config.encoder_layers # Initialize weights and apply final processing self.post_init() def forward( self, attention_mask: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, BaseModelOutput]: r""" Args: attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*): Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict hidden_states = self.value_embedding(inputs_embeds) embed_pos = self.embed_positions(inputs_embeds.size()) hidden_states = self.layernorm_embedding(hidden_states + embed_pos) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) # expand attention_mask if attention_mask is not None: # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] attention_mask = _prepare_4d_attention_mask(attention_mask, inputs_embeds.dtype) encoder_states = () if output_hidden_states else None all_attentions = () if output_attentions else None # check if head_mask has a correct number of layers specified if desired if head_mask is not None: if head_mask.size()[0] != (len(self.layers)): raise ValueError( f"The head_mask should be specified for {len(self.layers)} layers, but it is for" f" {head_mask.size()[0]}." ) for idx, (encoder_layer, conv_layer) in enumerate(zip(self.layers, self.conv_layers)): if output_hidden_states: encoder_states = encoder_states + (hidden_states,) # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description) to_drop = False if self.training: dropout_probability = torch.rand([]) if dropout_probability < self.layerdrop: # skip the layer to_drop = True if to_drop: layer_outputs = (None, None) else: if self.gradient_checkpointing and self.training: layer_outputs = self._gradient_checkpointing_func( encoder_layer.__call__, hidden_states, attention_mask, (head_mask[idx] if head_mask is not None else None), output_attentions, ) if conv_layer is not None: output = self._gradient_checkpointing_func(conv_layer, layer_outputs[0]) layer_outputs = (output,) + layer_outputs[1:] else: layer_outputs = encoder_layer( hidden_states, attention_mask, layer_head_mask=(head_mask[idx] if head_mask is not None else None), output_attentions=output_attentions, ) if conv_layer is not None: output = conv_layer(layer_outputs[0]) layer_outputs = (output,) + layer_outputs[1:] hidden_states = layer_outputs[0] if output_attentions: all_attentions = all_attentions + (layer_outputs[1],) if output_hidden_states: encoder_states = encoder_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None) return BaseModelOutput( last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions ) # Copied from transformers.models.time_series_transformer.modeling_time_series_transformer.TimeSeriesTransformerDecoder with TimeSeriesTransformer->Informer,TimeSeriesTransformerConfig->InformerConfig,time-series-transformer->informer,Transformer->Informer,TimeSeries->Informer class InformerDecoder(InformerPreTrainedModel): """ Informer decoder consisting of *config.decoder_layers* layers. Each layer is a [`InformerDecoderLayer`] Args: config: InformerConfig """ def __init__(self, config: InformerConfig): super().__init__(config) self.dropout = config.dropout self.layerdrop = config.decoder_layerdrop if config.prediction_length is None: raise ValueError("The `prediction_length` config needs to be specified.") self.value_embedding = InformerValueEmbedding(feature_size=config.feature_size, d_model=config.d_model) self.embed_positions = InformerSinusoidalPositionalEmbedding( config.context_length + config.prediction_length, config.d_model ) self.layers = nn.ModuleList([InformerDecoderLayer(config) for _ in range(config.decoder_layers)]) self.layernorm_embedding = nn.LayerNorm(config.d_model) self.gradient_checkpointing = False # Initialize weights and apply final processing self.post_init() def forward( self, attention_mask: Optional[torch.Tensor] = None, encoder_hidden_states: Optional[torch.FloatTensor] = None, encoder_attention_mask: Optional[torch.LongTensor] = None, head_mask: Optional[torch.Tensor] = None, cross_attn_head_mask: Optional[torch.Tensor] = None, past_key_values: Optional[List[torch.FloatTensor]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, BaseModelOutputWithPastAndCrossAttentions]: r""" Args: attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. encoder_attention_mask (`torch.LongTensor` of shape `(batch_size, encoder_sequence_length)`, *optional*): Mask to avoid performing cross-attention on padding tokens indices of encoder input_ids. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the cross-attention modules in the decoder to avoid performing cross-attention on hidden heads. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict input_shape = inputs_embeds.size()[:-1] # past_key_values_length past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0 attention_mask = _prepare_4d_causal_attention_mask( attention_mask, input_shape, inputs_embeds, past_key_values_length ) # expand encoder attention mask if encoder_hidden_states is not None and encoder_attention_mask is not None: # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] encoder_attention_mask = _prepare_4d_attention_mask( encoder_attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1] ) hidden_states = self.value_embedding(inputs_embeds) embed_pos = self.embed_positions(inputs_embeds.size(), past_key_values_length=self.config.context_length) hidden_states = self.layernorm_embedding(hidden_states + embed_pos) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) if self.gradient_checkpointing and self.training: if use_cache: logger.warning_once( "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." ) use_cache = False # decoder layers all_hidden_states = () if output_hidden_states else None all_self_attns = () if output_attentions else None all_cross_attentions = () if (output_attentions and encoder_hidden_states is not None) else None next_decoder_cache = () if use_cache else None # check if head_mask/cross_attn_head_mask has a correct number of layers specified if desired for attn_mask, mask_name in zip([head_mask, cross_attn_head_mask], ["head_mask", "cross_attn_head_mask"]): if attn_mask is not None: if attn_mask.size()[0] != (len(self.layers)): raise ValueError( f"The `{mask_name}` should be specified for {len(self.layers)} layers, but it is for" f" {head_mask.size()[0]}." ) for idx, decoder_layer in enumerate(self.layers): # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description) if output_hidden_states: all_hidden_states += (hidden_states,) if self.training: dropout_probability = torch.rand([]) if dropout_probability < self.layerdrop: continue past_key_value = past_key_values[idx] if past_key_values is not None else None if self.gradient_checkpointing and self.training: layer_outputs = self._gradient_checkpointing_func( decoder_layer.__call__, hidden_states, attention_mask, encoder_hidden_states, encoder_attention_mask, head_mask[idx] if head_mask is not None else None, cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None, None, output_attentions, use_cache, ) else: layer_outputs = decoder_layer( hidden_states, attention_mask=attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, layer_head_mask=(head_mask[idx] if head_mask is not None else None), cross_attn_layer_head_mask=( cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None ), past_key_value=past_key_value, output_attentions=output_attentions, use_cache=use_cache, ) hidden_states = layer_outputs[0] if use_cache: next_decoder_cache += (layer_outputs[3 if output_attentions else 1],) if output_attentions: all_self_attns += (layer_outputs[1],) if encoder_hidden_states is not None: all_cross_attentions += (layer_outputs[2],) # add hidden states from the last decoder layer if output_hidden_states: all_hidden_states += (hidden_states,) next_cache = next_decoder_cache if use_cache else None if not return_dict: return tuple( v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns, all_cross_attentions] if v is not None ) return BaseModelOutputWithPastAndCrossAttentions( last_hidden_state=hidden_states, past_key_values=next_cache, hidden_states=all_hidden_states, attentions=all_self_attns, cross_attentions=all_cross_attentions, ) @add_start_docstrings( "The bare Informer Model outputting raw hidden-states without any specific head on top.", INFORMER_START_DOCSTRING, ) # Copied from transformers.models.time_series_transformer.modeling_time_series_transformer.TimeSeriesTransformerModel with TimeSeriesTransformer->Informer,TIME_SERIES_TRANSFORMER->INFORMER,time-series-transformer->informer,TimeSeries->Informer class InformerModel(InformerPreTrainedModel): def __init__(self, config: InformerConfig): super().__init__(config) if config.scaling == "mean" or config.scaling is True: self.scaler = InformerMeanScaler(config) elif config.scaling == "std": self.scaler = InformerStdScaler(config) else: self.scaler = InformerNOPScaler(config) if config.num_static_categorical_features > 0: self.embedder = InformerFeatureEmbedder( cardinalities=config.cardinality, embedding_dims=config.embedding_dimension, ) # transformer encoder-decoder and mask initializer self.encoder = InformerEncoder(config) self.decoder = InformerDecoder(config) # Initialize weights and apply final processing self.post_init() @property def _past_length(self) -> int: return self.config.context_length + max(self.config.lags_sequence) def get_lagged_subsequences( self, sequence: torch.Tensor, subsequences_length: int, shift: int = 0 ) -> torch.Tensor: """ Returns lagged subsequences of a given sequence. Returns a tensor of shape (N, S, C, I), where S = subsequences_length and I = len(indices), containing lagged subsequences. Specifically, lagged[i, j, :, k] = sequence[i, -indices[k]-S+j, :]. Args: sequence: Tensor The sequence from which lagged subsequences should be extracted. Shape: (N, T, C). subsequences_length : int Length of the subsequences to be extracted. shift: int Shift the lags by this amount back. """ sequence_length = sequence.shape[1] indices = [lag - shift for lag in self.config.lags_sequence] if max(indices) + subsequences_length > sequence_length: raise ValueError( f"lags cannot go further than history length, found lag {max(indices)} " f"while history length is only {sequence_length}" ) lagged_values = [] for lag_index in indices: begin_index = -lag_index - subsequences_length end_index = -lag_index if lag_index > 0 else None lagged_values.append(sequence[:, begin_index:end_index, ...]) return torch.stack(lagged_values, dim=-1) def create_network_inputs( self, past_values: torch.Tensor, past_time_features: torch.Tensor, static_categorical_features: Optional[torch.Tensor] = None, static_real_features: Optional[torch.Tensor] = None, past_observed_mask: Optional[torch.Tensor] = None, future_values: Optional[torch.Tensor] = None, future_time_features: Optional[torch.Tensor] = None, ): # time feature time_feat = ( torch.cat( ( past_time_features[:, self._past_length - self.config.context_length :, ...], future_time_features, ), dim=1, ) if future_values is not None else past_time_features[:, self._past_length - self.config.context_length :, ...] ) # target if past_observed_mask is None: past_observed_mask = torch.ones_like(past_values) context = past_values[:, -self.config.context_length :] observed_context = past_observed_mask[:, -self.config.context_length :] _, loc, scale = self.scaler(context, observed_context) inputs = ( (torch.cat((past_values, future_values), dim=1) - loc) / scale if future_values is not None else (past_values - loc) / scale ) # static features log_abs_loc = loc.abs().log1p() if self.config.input_size == 1 else loc.squeeze(1).abs().log1p() log_scale = scale.log() if self.config.input_size == 1 else scale.squeeze(1).log() static_feat = torch.cat((log_abs_loc, log_scale), dim=1) if static_real_features is not None: static_feat = torch.cat((static_real_features, static_feat), dim=1) if static_categorical_features is not None: embedded_cat = self.embedder(static_categorical_features) static_feat = torch.cat((embedded_cat, static_feat), dim=1) expanded_static_feat = static_feat.unsqueeze(1).expand(-1, time_feat.shape[1], -1) # all features features = torch.cat((expanded_static_feat, time_feat), dim=-1) # lagged features subsequences_length = ( self.config.context_length + self.config.prediction_length if future_values is not None else self.config.context_length ) lagged_sequence = self.get_lagged_subsequences(sequence=inputs, subsequences_length=subsequences_length) lags_shape = lagged_sequence.shape reshaped_lagged_sequence = lagged_sequence.reshape(lags_shape[0], lags_shape[1], -1) if reshaped_lagged_sequence.shape[1] != time_feat.shape[1]: raise ValueError( f"input length {reshaped_lagged_sequence.shape[1]} and time feature lengths {time_feat.shape[1]} does not match" ) # transformer inputs transformer_inputs = torch.cat((reshaped_lagged_sequence, features), dim=-1) return transformer_inputs, loc, scale, static_feat def get_encoder(self): return self.encoder def get_decoder(self): return self.decoder @add_start_docstrings_to_model_forward(INFORMER_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=Seq2SeqTSModelOutput, config_class=_CONFIG_FOR_DOC) def forward( self, past_values: torch.Tensor, past_time_features: torch.Tensor, past_observed_mask: torch.Tensor, static_categorical_features: Optional[torch.Tensor] = None, static_real_features: Optional[torch.Tensor] = None, future_values: Optional[torch.Tensor] = None, future_time_features: Optional[torch.Tensor] = None, decoder_attention_mask: Optional[torch.LongTensor] = None, head_mask: Optional[torch.Tensor] = None, decoder_head_mask: Optional[torch.Tensor] = None, cross_attn_head_mask: Optional[torch.Tensor] = None, encoder_outputs: Optional[List[torch.FloatTensor]] = None, past_key_values: Optional[List[torch.FloatTensor]] = None, output_hidden_states: Optional[bool] = None, output_attentions: Optional[bool] = None, use_cache: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Seq2SeqTSModelOutput, Tuple]: r""" Returns: Examples: ```python >>> from huggingface_hub import hf_hub_download >>> import torch >>> from transformers import InformerModel >>> file = hf_hub_download( ... repo_id="hf-internal-testing/tourism-monthly-batch", filename="train-batch.pt", repo_type="dataset" ... ) >>> batch = torch.load(file) >>> model = InformerModel.from_pretrained("huggingface/informer-tourism-monthly") >>> # during training, one provides both past and future values >>> # as well as possible additional features >>> outputs = model( ... past_values=batch["past_values"], ... past_time_features=batch["past_time_features"], ... past_observed_mask=batch["past_observed_mask"], ... static_categorical_features=batch["static_categorical_features"], ... static_real_features=batch["static_real_features"], ... future_values=batch["future_values"], ... future_time_features=batch["future_time_features"], ... ) >>> last_hidden_state = outputs.last_hidden_state ```""" output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict transformer_inputs, loc, scale, static_feat = self.create_network_inputs( past_values=past_values, past_time_features=past_time_features, past_observed_mask=past_observed_mask, static_categorical_features=static_categorical_features, static_real_features=static_real_features, future_values=future_values, future_time_features=future_time_features, ) if encoder_outputs is None: enc_input = transformer_inputs[:, : self.config.context_length, ...] encoder_outputs = self.encoder( inputs_embeds=enc_input, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) # If the user passed a tuple for encoder_outputs, we wrap it in a BaseModelOutput when return_dict=True elif return_dict and not isinstance(encoder_outputs, BaseModelOutput): encoder_outputs = BaseModelOutput( last_hidden_state=encoder_outputs[0], hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None, attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None, ) dec_input = transformer_inputs[:, self.config.context_length :, ...] decoder_outputs = self.decoder( inputs_embeds=dec_input, attention_mask=decoder_attention_mask, encoder_hidden_states=encoder_outputs[0], head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) if not return_dict: return decoder_outputs + encoder_outputs + (loc, scale, static_feat) return Seq2SeqTSModelOutput( last_hidden_state=decoder_outputs.last_hidden_state, past_key_values=decoder_outputs.past_key_values, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions, loc=loc, scale=scale, static_features=static_feat, ) @add_start_docstrings( "The Informer Model with a distribution head on top for time-series forecasting.", INFORMER_START_DOCSTRING, ) # Copied from transformers.models.time_series_transformer.modeling_time_series_transformer.TimeSeriesTransformerForPrediction with TimeSeriesTransformer->Informer,TIME_SERIES_TRANSFORMER->INFORMER,time-series-transformer->informer class InformerForPrediction(InformerPreTrainedModel): def __init__(self, config: InformerConfig): super().__init__(config) self.model = InformerModel(config) if config.distribution_output == "student_t": self.distribution_output = StudentTOutput(dim=config.input_size) elif config.distribution_output == "normal": self.distribution_output = NormalOutput(dim=config.input_size) elif config.distribution_output == "negative_binomial": self.distribution_output = NegativeBinomialOutput(dim=config.input_size) else: raise ValueError(f"Unknown distribution output {config.distribution_output}") self.parameter_projection = self.distribution_output.get_parameter_projection(self.model.config.d_model) self.target_shape = self.distribution_output.event_shape if config.loss == "nll": self.loss = nll else: raise ValueError(f"Unknown loss function {config.loss}") # Initialize weights of distribution_output and apply final processing self.post_init() def output_params(self, dec_output): return self.parameter_projection(dec_output) def get_encoder(self): return self.model.get_encoder() def get_decoder(self): return self.model.get_decoder() @torch.jit.ignore def output_distribution(self, params, loc=None, scale=None, trailing_n=None) -> torch.distributions.Distribution: sliced_params = params if trailing_n is not None: sliced_params = [p[:, -trailing_n:] for p in params] return self.distribution_output.distribution(sliced_params, loc=loc, scale=scale) @add_start_docstrings_to_model_forward(INFORMER_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=Seq2SeqTSModelOutput, config_class=_CONFIG_FOR_DOC) def forward( self, past_values: torch.Tensor, past_time_features: torch.Tensor, past_observed_mask: torch.Tensor, static_categorical_features: Optional[torch.Tensor] = None, static_real_features: Optional[torch.Tensor] = None, future_values: Optional[torch.Tensor] = None, future_time_features: Optional[torch.Tensor] = None, future_observed_mask: Optional[torch.Tensor] = None, decoder_attention_mask: Optional[torch.LongTensor] = None, head_mask: Optional[torch.Tensor] = None, decoder_head_mask: Optional[torch.Tensor] = None, cross_attn_head_mask: Optional[torch.Tensor] = None, encoder_outputs: Optional[List[torch.FloatTensor]] = None, past_key_values: Optional[List[torch.FloatTensor]] = None, output_hidden_states: Optional[bool] = None, output_attentions: Optional[bool] = None, use_cache: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Seq2SeqTSModelOutput, Tuple]: r""" Returns: Examples: ```python >>> from huggingface_hub import hf_hub_download >>> import torch >>> from transformers import InformerForPrediction >>> file = hf_hub_download( ... repo_id="hf-internal-testing/tourism-monthly-batch", filename="train-batch.pt", repo_type="dataset" ... ) >>> batch = torch.load(file) >>> model = InformerForPrediction.from_pretrained( ... "huggingface/informer-tourism-monthly" ... ) >>> # during training, one provides both past and future values >>> # as well as possible additional features >>> outputs = model( ... past_values=batch["past_values"], ... past_time_features=batch["past_time_features"], ... past_observed_mask=batch["past_observed_mask"], ... static_categorical_features=batch["static_categorical_features"], ... static_real_features=batch["static_real_features"], ... future_values=batch["future_values"], ... future_time_features=batch["future_time_features"], ... ) >>> loss = outputs.loss >>> loss.backward() >>> # during inference, one only provides past values >>> # as well as possible additional features >>> # the model autoregressively generates future values >>> outputs = model.generate( ... past_values=batch["past_values"], ... past_time_features=batch["past_time_features"], ... past_observed_mask=batch["past_observed_mask"], ... static_categorical_features=batch["static_categorical_features"], ... static_real_features=batch["static_real_features"], ... future_time_features=batch["future_time_features"], ... ) >>> mean_prediction = outputs.sequences.mean(dim=1) ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict if future_values is not None: use_cache = False outputs = self.model( past_values=past_values, past_time_features=past_time_features, past_observed_mask=past_observed_mask, static_categorical_features=static_categorical_features, static_real_features=static_real_features, future_values=future_values, future_time_features=future_time_features, decoder_attention_mask=decoder_attention_mask, head_mask=head_mask, decoder_head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, encoder_outputs=encoder_outputs, past_key_values=past_key_values, output_hidden_states=output_hidden_states, output_attentions=output_attentions, use_cache=use_cache, return_dict=return_dict, ) prediction_loss = None params = None if future_values is not None: params = self.output_params(outputs[0]) # outputs.last_hidden_state # loc is 3rd last and scale is 2nd last output distribution = self.output_distribution(params, loc=outputs[-3], scale=outputs[-2]) loss = self.loss(distribution, future_values) if future_observed_mask is None: future_observed_mask = torch.ones_like(future_values) if len(self.target_shape) == 0: loss_weights = future_observed_mask else: loss_weights, _ = future_observed_mask.min(dim=-1, keepdim=False) prediction_loss = weighted_average(loss, weights=loss_weights) if not return_dict: outputs = ((params,) + outputs[1:]) if params is not None else outputs[1:] return ((prediction_loss,) + outputs) if prediction_loss is not None else outputs return Seq2SeqTSPredictionOutput( loss=prediction_loss, params=params, past_key_values=outputs.past_key_values, decoder_hidden_states=outputs.decoder_hidden_states, decoder_attentions=outputs.decoder_attentions, cross_attentions=outputs.cross_attentions, encoder_last_hidden_state=outputs.encoder_last_hidden_state, encoder_hidden_states=outputs.encoder_hidden_states, encoder_attentions=outputs.encoder_attentions, loc=outputs.loc, scale=outputs.scale, static_features=outputs.static_features, ) @torch.no_grad() def generate( self, past_values: torch.Tensor, past_time_features: torch.Tensor, future_time_features: torch.Tensor, past_observed_mask: Optional[torch.Tensor] = None, static_categorical_features: Optional[torch.Tensor] = None, static_real_features: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, ) -> SampleTSPredictionOutput: r""" Greedily generate sequences of sample predictions from a model with a probability distribution head. Parameters: past_values (`torch.FloatTensor` of shape `(batch_size, sequence_length)` or `(batch_size, sequence_length, input_size)`): Past values of the time series, that serve as context in order to predict the future. The sequence size of this tensor must be larger than the `context_length` of the model, since the model will use the larger size to construct lag features, i.e. additional values from the past which are added in order to serve as "extra context". The `sequence_length` here is equal to `config.context_length` + `max(config.lags_sequence)`, which if no `lags_sequence` is configured, is equal to `config.context_length` + 7 (as by default, the largest look-back index in `config.lags_sequence` is 7). The property `_past_length` returns the actual length of the past. The `past_values` is what the Transformer encoder gets as input (with optional additional features, such as `static_categorical_features`, `static_real_features`, `past_time_features` and lags). Optionally, missing values need to be replaced with zeros and indicated via the `past_observed_mask`. For multivariate time series, the `input_size` > 1 dimension is required and corresponds to the number of variates in the time series per time step. past_time_features (`torch.FloatTensor` of shape `(batch_size, sequence_length, num_features)`): Required time features, which the model internally will add to `past_values`. These could be things like "month of year", "day of the month", etc. encoded as vectors (for instance as Fourier features). These could also be so-called "age" features, which basically help the model know "at which point in life" a time-series is. Age features have small values for distant past time steps and increase monotonically the more we approach the current time step. Holiday features are also a good example of time features. These features serve as the "positional encodings" of the inputs. So contrary to a model like BERT, where the position encodings are learned from scratch internally as parameters of the model, the Time Series Transformer requires to provide additional time features. The Time Series Transformer only learns additional embeddings for `static_categorical_features`. Additional dynamic real covariates can be concatenated to this tensor, with the caveat that these features must but known at prediction time. The `num_features` here is equal to `config.`num_time_features` + `config.num_dynamic_real_features`. future_time_features (`torch.FloatTensor` of shape `(batch_size, prediction_length, num_features)`): Required time features for the prediction window, which the model internally will add to sampled predictions. These could be things like "month of year", "day of the month", etc. encoded as vectors (for instance as Fourier features). These could also be so-called "age" features, which basically help the model know "at which point in life" a time-series is. Age features have small values for distant past time steps and increase monotonically the more we approach the current time step. Holiday features are also a good example of time features. These features serve as the "positional encodings" of the inputs. So contrary to a model like BERT, where the position encodings are learned from scratch internally as parameters of the model, the Time Series Transformer requires to provide additional time features. The Time Series Transformer only learns additional embeddings for `static_categorical_features`. Additional dynamic real covariates can be concatenated to this tensor, with the caveat that these features must but known at prediction time. The `num_features` here is equal to `config.`num_time_features` + `config.num_dynamic_real_features`. past_observed_mask (`torch.BoolTensor` of shape `(batch_size, sequence_length)` or `(batch_size, sequence_length, input_size)`, *optional*): Boolean mask to indicate which `past_values` were observed and which were missing. Mask values selected in `[0, 1]`: - 1 for values that are **observed**, - 0 for values that are **missing** (i.e. NaNs that were replaced by zeros). static_categorical_features (`torch.LongTensor` of shape `(batch_size, number of static categorical features)`, *optional*): Optional static categorical features for which the model will learn an embedding, which it will add to the values of the time series. Static categorical features are features which have the same value for all time steps (static over time). A typical example of a static categorical feature is a time series ID. static_real_features (`torch.FloatTensor` of shape `(batch_size, number of static real features)`, *optional*): Optional static real features which the model will add to the values of the time series. Static real features are features which have the same value for all time steps (static over time). A typical example of a static real feature is promotion information. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. Return: [`SampleTSPredictionOutput`] where the outputs `sequences` tensor will have shape `(batch_size, number of samples, prediction_length)` or `(batch_size, number of samples, prediction_length, input_size)` for multivariate predictions. """ outputs = self( static_categorical_features=static_categorical_features, static_real_features=static_real_features, past_time_features=past_time_features, past_values=past_values, past_observed_mask=past_observed_mask, future_time_features=future_time_features, future_values=None, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=True, use_cache=True, ) decoder = self.model.get_decoder() enc_last_hidden = outputs.encoder_last_hidden_state loc = outputs.loc scale = outputs.scale static_feat = outputs.static_features num_parallel_samples = self.config.num_parallel_samples repeated_loc = loc.repeat_interleave(repeats=num_parallel_samples, dim=0) repeated_scale = scale.repeat_interleave(repeats=num_parallel_samples, dim=0) repeated_past_values = ( past_values.repeat_interleave(repeats=num_parallel_samples, dim=0) - repeated_loc ) / repeated_scale expanded_static_feat = static_feat.unsqueeze(1).expand(-1, future_time_features.shape[1], -1) features = torch.cat((expanded_static_feat, future_time_features), dim=-1) repeated_features = features.repeat_interleave(repeats=num_parallel_samples, dim=0) repeated_enc_last_hidden = enc_last_hidden.repeat_interleave(repeats=num_parallel_samples, dim=0) future_samples = [] # greedy decoding for k in range(self.config.prediction_length): lagged_sequence = self.model.get_lagged_subsequences( sequence=repeated_past_values, subsequences_length=1 + k, shift=1, ) lags_shape = lagged_sequence.shape reshaped_lagged_sequence = lagged_sequence.reshape(lags_shape[0], lags_shape[1], -1) decoder_input = torch.cat((reshaped_lagged_sequence, repeated_features[:, : k + 1]), dim=-1) dec_output = decoder(inputs_embeds=decoder_input, encoder_hidden_states=repeated_enc_last_hidden) dec_last_hidden = dec_output.last_hidden_state params = self.parameter_projection(dec_last_hidden[:, -1:]) distr = self.output_distribution(params, loc=repeated_loc, scale=repeated_scale) next_sample = distr.sample() repeated_past_values = torch.cat( (repeated_past_values, (next_sample - repeated_loc) / repeated_scale), dim=1 ) future_samples.append(next_sample) concat_future_samples = torch.cat(future_samples, dim=1) return SampleTSPredictionOutput( sequences=concat_future_samples.reshape( (-1, num_parallel_samples, self.config.prediction_length) + self.target_shape, ) )
0
hf_public_repos/transformers/src/transformers/models
hf_public_repos/transformers/src/transformers/models/informer/__init__.py
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _import_structure = { "configuration_informer": [ "INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "InformerConfig", ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["modeling_informer"] = [ "INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "InformerForPrediction", "InformerModel", "InformerPreTrainedModel", ] if TYPE_CHECKING: from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_informer import ( INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, InformerForPrediction, InformerModel, InformerPreTrainedModel, ) else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
0
hf_public_repos/transformers/src/transformers/models
hf_public_repos/transformers/src/transformers/models/informer/configuration_informer.py
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Informer model configuration""" from typing import List, Optional, Union from ...configuration_utils import PretrainedConfig from ...utils import logging logger = logging.get_logger(__name__) INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP = { "huggingface/informer-tourism-monthly": ( "https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json" ), # See all Informer models at https://huggingface.co/models?filter=informer } class InformerConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of an [`InformerModel`]. It is used to instantiate an Informer model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the Informer [huggingface/informer-tourism-monthly](https://huggingface.co/huggingface/informer-tourism-monthly) architecture. Configuration objects inherit from [`PretrainedConfig`] can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: prediction_length (`int`): The prediction length for the decoder. In other words, the prediction horizon of the model. This value is typically dictated by the dataset and we recommend to set it appropriately. context_length (`int`, *optional*, defaults to `prediction_length`): The context length for the encoder. If `None`, the context length will be the same as the `prediction_length`. distribution_output (`string`, *optional*, defaults to `"student_t"`): The distribution emission head for the model. Could be either "student_t", "normal" or "negative_binomial". loss (`string`, *optional*, defaults to `"nll"`): The loss function for the model corresponding to the `distribution_output` head. For parametric distributions it is the negative log likelihood (nll) - which currently is the only supported one. input_size (`int`, *optional*, defaults to 1): The size of the target variable which by default is 1 for univariate targets. Would be > 1 in case of multivariate targets. scaling (`string` or `bool`, *optional* defaults to `"mean"`): Whether to scale the input targets via "mean" scaler, "std" scaler or no scaler if `None`. If `True`, the scaler is set to "mean". lags_sequence (`list[int]`, *optional*, defaults to `[1, 2, 3, 4, 5, 6, 7]`): The lags of the input time series as covariates often dictated by the frequency of the data. Default is `[1, 2, 3, 4, 5, 6, 7]` but we recommend to change it based on the dataset appropriately. num_time_features (`int`, *optional*, defaults to 0): The number of time features in the input time series. num_dynamic_real_features (`int`, *optional*, defaults to 0): The number of dynamic real valued features. num_static_categorical_features (`int`, *optional*, defaults to 0): The number of static categorical features. num_static_real_features (`int`, *optional*, defaults to 0): The number of static real valued features. cardinality (`list[int]`, *optional*): The cardinality (number of different values) for each of the static categorical features. Should be a list of integers, having the same length as `num_static_categorical_features`. Cannot be `None` if `num_static_categorical_features` is > 0. embedding_dimension (`list[int]`, *optional*): The dimension of the embedding for each of the static categorical features. Should be a list of integers, having the same length as `num_static_categorical_features`. Cannot be `None` if `num_static_categorical_features` is > 0. d_model (`int`, *optional*, defaults to 64): Dimensionality of the transformer layers. encoder_layers (`int`, *optional*, defaults to 2): Number of encoder layers. decoder_layers (`int`, *optional*, defaults to 2): Number of decoder layers. encoder_attention_heads (`int`, *optional*, defaults to 2): Number of attention heads for each attention layer in the Transformer encoder. decoder_attention_heads (`int`, *optional*, defaults to 2): Number of attention heads for each attention layer in the Transformer decoder. encoder_ffn_dim (`int`, *optional*, defaults to 32): Dimension of the "intermediate" (often named feed-forward) layer in encoder. decoder_ffn_dim (`int`, *optional*, defaults to 32): Dimension of the "intermediate" (often named feed-forward) layer in decoder. activation_function (`str` or `function`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder and decoder. If string, `"gelu"` and `"relu"` are supported. dropout (`float`, *optional*, defaults to 0.1): The dropout probability for all fully connected layers in the encoder, and decoder. encoder_layerdrop (`float`, *optional*, defaults to 0.1): The dropout probability for the attention and fully connected layers for each encoder layer. decoder_layerdrop (`float`, *optional*, defaults to 0.1): The dropout probability for the attention and fully connected layers for each decoder layer. attention_dropout (`float`, *optional*, defaults to 0.1): The dropout probability for the attention probabilities. activation_dropout (`float`, *optional*, defaults to 0.1): The dropout probability used between the two layers of the feed-forward networks. num_parallel_samples (`int`, *optional*, defaults to 100): The number of samples to generate in parallel for each time step of inference. init_std (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated normal weight initialization distribution. use_cache (`bool`, *optional*, defaults to `True`): Whether to use the past key/values attentions (if applicable to the model) to speed up decoding. attention_type (`str`, *optional*, defaults to "prob"): Attention used in encoder. This can be set to "prob" (Informer's ProbAttention) or "full" (vanilla transformer's canonical self-attention). sampling_factor (`int`, *optional*, defaults to 5): ProbSparse sampling factor (only makes affect when `attention_type`="prob"). It is used to control the reduced query matrix (Q_reduce) input length. distil (`bool`, *optional*, defaults to `True`): Whether to use distilling in encoder. Example: ```python >>> from transformers import InformerConfig, InformerModel >>> # Initializing an Informer configuration with 12 time steps for prediction >>> configuration = InformerConfig(prediction_length=12) >>> # Randomly initializing a model (with random weights) from the configuration >>> model = InformerModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "informer" attribute_map = { "hidden_size": "d_model", "num_attention_heads": "encoder_attention_heads", "num_hidden_layers": "encoder_layers", } def __init__( self, prediction_length: Optional[int] = None, context_length: Optional[int] = None, distribution_output: str = "student_t", loss: str = "nll", input_size: int = 1, lags_sequence: List[int] = None, scaling: Optional[Union[str, bool]] = "mean", num_dynamic_real_features: int = 0, num_static_real_features: int = 0, num_static_categorical_features: int = 0, num_time_features: int = 0, cardinality: Optional[List[int]] = None, embedding_dimension: Optional[List[int]] = None, d_model: int = 64, encoder_ffn_dim: int = 32, decoder_ffn_dim: int = 32, encoder_attention_heads: int = 2, decoder_attention_heads: int = 2, encoder_layers: int = 2, decoder_layers: int = 2, is_encoder_decoder: bool = True, activation_function: str = "gelu", dropout: float = 0.05, encoder_layerdrop: float = 0.1, decoder_layerdrop: float = 0.1, attention_dropout: float = 0.1, activation_dropout: float = 0.1, num_parallel_samples: int = 100, init_std: float = 0.02, use_cache=True, # Informer arguments attention_type: str = "prob", sampling_factor: int = 5, distil: bool = True, **kwargs, ): # time series specific configuration self.prediction_length = prediction_length self.context_length = context_length or prediction_length self.distribution_output = distribution_output self.loss = loss self.input_size = input_size self.num_time_features = num_time_features self.lags_sequence = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7] self.scaling = scaling self.num_dynamic_real_features = num_dynamic_real_features self.num_static_real_features = num_static_real_features self.num_static_categorical_features = num_static_categorical_features # set cardinality if cardinality and num_static_categorical_features > 0: if len(cardinality) != num_static_categorical_features: raise ValueError( "The cardinality should be a list of the same length as `num_static_categorical_features`" ) self.cardinality = cardinality else: self.cardinality = [0] # set embedding_dimension if embedding_dimension and num_static_categorical_features > 0: if len(embedding_dimension) != num_static_categorical_features: raise ValueError( "The embedding dimension should be a list of the same length as `num_static_categorical_features`" ) self.embedding_dimension = embedding_dimension else: self.embedding_dimension = [min(50, (cat + 1) // 2) for cat in self.cardinality] self.num_parallel_samples = num_parallel_samples # Transformer architecture configuration self.feature_size = input_size * len(self.lags_sequence) + self._number_of_features self.d_model = d_model self.encoder_attention_heads = encoder_attention_heads self.decoder_attention_heads = decoder_attention_heads self.encoder_ffn_dim = encoder_ffn_dim self.decoder_ffn_dim = decoder_ffn_dim self.encoder_layers = encoder_layers self.decoder_layers = decoder_layers self.dropout = dropout self.attention_dropout = attention_dropout self.activation_dropout = activation_dropout self.encoder_layerdrop = encoder_layerdrop self.decoder_layerdrop = decoder_layerdrop self.activation_function = activation_function self.init_std = init_std self.use_cache = use_cache # Informer self.attention_type = attention_type self.sampling_factor = sampling_factor self.distil = distil super().__init__(is_encoder_decoder=is_encoder_decoder, **kwargs) @property def _number_of_features(self) -> int: return ( sum(self.embedding_dimension) + self.num_dynamic_real_features + self.num_time_features + self.num_static_real_features + self.input_size * 2 # the log1p(abs(loc)) and log(scale) features )
0
hf_public_repos/transformers/src/transformers/models
hf_public_repos/transformers/src/transformers/models/luke/modeling_luke.py
# coding=utf-8 # Copyright Studio Ousia and The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch LUKE model.""" import math from dataclasses import dataclass from typing import Optional, Tuple, Union import torch import torch.utils.checkpoint from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACT2FN, gelu from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling from ...modeling_utils import PreTrainedModel from ...pytorch_utils import apply_chunking_to_forward from ...utils import ( ModelOutput, add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings, ) from .configuration_luke import LukeConfig logger = logging.get_logger(__name__) _CONFIG_FOR_DOC = "LukeConfig" _CHECKPOINT_FOR_DOC = "studio-ousia/luke-base" LUKE_PRETRAINED_MODEL_ARCHIVE_LIST = [ "studio-ousia/luke-base", "studio-ousia/luke-large", # See all LUKE models at https://huggingface.co/models?filter=luke ] @dataclass class BaseLukeModelOutputWithPooling(BaseModelOutputWithPooling): """ Base class for outputs of the LUKE model. Args: last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the model. entity_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, entity_length, hidden_size)`): Sequence of entity hidden-states at the output of the last layer of the model. pooler_output (`torch.FloatTensor` of shape `(batch_size, hidden_size)`): Last layer hidden-state of the first token of the sequence (classification token) further processed by a Linear layer and a Tanh activation function. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. entity_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, entity_length, hidden_size)`. Entity hidden-states of the model at the output of each layer plus the initial entity embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length + entity_length, sequence_length + entity_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ entity_last_hidden_state: torch.FloatTensor = None entity_hidden_states: Optional[Tuple[torch.FloatTensor]] = None @dataclass class BaseLukeModelOutput(BaseModelOutput): """ Base class for model's outputs, with potential hidden states and attentions. Args: last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the model. entity_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, entity_length, hidden_size)`): Sequence of entity hidden-states at the output of the last layer of the model. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. entity_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, entity_length, hidden_size)`. Entity hidden-states of the model at the output of each layer plus the initial entity embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ entity_last_hidden_state: torch.FloatTensor = None entity_hidden_states: Optional[Tuple[torch.FloatTensor]] = None @dataclass class LukeMaskedLMOutput(ModelOutput): """ Base class for model's outputs, with potential hidden states and attentions. Args: loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): The sum of masked language modeling (MLM) loss and entity prediction loss. mlm_loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Masked language modeling (MLM) loss. mep_loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Masked entity prediction (MEP) loss. logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`): Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). entity_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`): Prediction scores of the entity prediction head (scores for each entity vocabulary token before SoftMax). hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. entity_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, entity_length, hidden_size)`. Entity hidden-states of the model at the output of each layer plus the initial entity embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: Optional[torch.FloatTensor] = None mlm_loss: Optional[torch.FloatTensor] = None mep_loss: Optional[torch.FloatTensor] = None logits: torch.FloatTensor = None entity_logits: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None entity_hidden_states: Optional[Tuple[torch.FloatTensor]] = None attentions: Optional[Tuple[torch.FloatTensor]] = None @dataclass class EntityClassificationOutput(ModelOutput): """ Outputs of entity classification models. Args: loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Classification loss. logits (`torch.FloatTensor` of shape `(batch_size, config.num_labels)`): Classification scores (before SoftMax). hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. entity_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, entity_length, hidden_size)`. Entity hidden-states of the model at the output of each layer plus the initial entity embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: Optional[torch.FloatTensor] = None logits: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None entity_hidden_states: Optional[Tuple[torch.FloatTensor]] = None attentions: Optional[Tuple[torch.FloatTensor]] = None @dataclass class EntityPairClassificationOutput(ModelOutput): """ Outputs of entity pair classification models. Args: loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Classification loss. logits (`torch.FloatTensor` of shape `(batch_size, config.num_labels)`): Classification scores (before SoftMax). hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. entity_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, entity_length, hidden_size)`. Entity hidden-states of the model at the output of each layer plus the initial entity embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: Optional[torch.FloatTensor] = None logits: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None entity_hidden_states: Optional[Tuple[torch.FloatTensor]] = None attentions: Optional[Tuple[torch.FloatTensor]] = None @dataclass class EntitySpanClassificationOutput(ModelOutput): """ Outputs of entity span classification models. Args: loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Classification loss. logits (`torch.FloatTensor` of shape `(batch_size, entity_length, config.num_labels)`): Classification scores (before SoftMax). hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. entity_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, entity_length, hidden_size)`. Entity hidden-states of the model at the output of each layer plus the initial entity embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: Optional[torch.FloatTensor] = None logits: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None entity_hidden_states: Optional[Tuple[torch.FloatTensor]] = None attentions: Optional[Tuple[torch.FloatTensor]] = None @dataclass class LukeSequenceClassifierOutput(ModelOutput): """ Outputs of sentence classification models. Args: loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Classification (or regression if config.num_labels==1) loss. logits (`torch.FloatTensor` of shape `(batch_size, config.num_labels)`): Classification (or regression if config.num_labels==1) scores (before SoftMax). hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. entity_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, entity_length, hidden_size)`. Entity hidden-states of the model at the output of each layer plus the initial entity embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: Optional[torch.FloatTensor] = None logits: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None entity_hidden_states: Optional[Tuple[torch.FloatTensor]] = None attentions: Optional[Tuple[torch.FloatTensor]] = None @dataclass class LukeTokenClassifierOutput(ModelOutput): """ Base class for outputs of token classification models. Args: loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided) : Classification loss. logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.num_labels)`): Classification scores (before SoftMax). hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. entity_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, entity_length, hidden_size)`. Entity hidden-states of the model at the output of each layer plus the initial entity embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: Optional[torch.FloatTensor] = None logits: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None entity_hidden_states: Optional[Tuple[torch.FloatTensor]] = None attentions: Optional[Tuple[torch.FloatTensor]] = None @dataclass class LukeQuestionAnsweringModelOutput(ModelOutput): """ Outputs of question answering models. Args: loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Total span extraction loss is the sum of a Cross-Entropy for the start and end positions. start_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length)`): Span-start scores (before SoftMax). end_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length)`): Span-end scores (before SoftMax). hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. entity_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, entity_length, hidden_size)`. Entity hidden-states of the model at the output of each layer plus the initial entity embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: Optional[torch.FloatTensor] = None start_logits: torch.FloatTensor = None end_logits: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None entity_hidden_states: Optional[Tuple[torch.FloatTensor]] = None attentions: Optional[Tuple[torch.FloatTensor]] = None @dataclass class LukeMultipleChoiceModelOutput(ModelOutput): """ Outputs of multiple choice models. Args: loss (`torch.FloatTensor` of shape *(1,)*, *optional*, returned when `labels` is provided): Classification loss. logits (`torch.FloatTensor` of shape `(batch_size, num_choices)`): *num_choices* is the second dimension of the input tensors. (see *input_ids* above). Classification scores (before SoftMax). hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. entity_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, entity_length, hidden_size)`. Entity hidden-states of the model at the output of each layer plus the initial entity embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: Optional[torch.FloatTensor] = None logits: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None entity_hidden_states: Optional[Tuple[torch.FloatTensor]] = None attentions: Optional[Tuple[torch.FloatTensor]] = None class LukeEmbeddings(nn.Module): """ Same as BertEmbeddings with a tiny tweak for positional embeddings indexing. """ def __init__(self, config): super().__init__() self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id) self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size) self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size) # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load # any TensorFlow checkpoint file self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) # End copy self.padding_idx = config.pad_token_id self.position_embeddings = nn.Embedding( config.max_position_embeddings, config.hidden_size, padding_idx=self.padding_idx ) def forward( self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None, ): if position_ids is None: if input_ids is not None: # Create the position ids from the input token ids. Any padded tokens remain padded. position_ids = create_position_ids_from_input_ids(input_ids, self.padding_idx).to(input_ids.device) else: position_ids = self.create_position_ids_from_inputs_embeds(inputs_embeds) if input_ids is not None: input_shape = input_ids.size() else: input_shape = inputs_embeds.size()[:-1] if token_type_ids is None: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device) if inputs_embeds is None: inputs_embeds = self.word_embeddings(input_ids) position_embeddings = self.position_embeddings(position_ids) token_type_embeddings = self.token_type_embeddings(token_type_ids) embeddings = inputs_embeds + position_embeddings + token_type_embeddings embeddings = self.LayerNorm(embeddings) embeddings = self.dropout(embeddings) return embeddings def create_position_ids_from_inputs_embeds(self, inputs_embeds): """ We are provided embeddings directly. We cannot infer which are padded so just generate sequential position ids. Args: inputs_embeds: torch.Tensor Returns: torch.Tensor """ input_shape = inputs_embeds.size()[:-1] sequence_length = input_shape[1] position_ids = torch.arange( self.padding_idx + 1, sequence_length + self.padding_idx + 1, dtype=torch.long, device=inputs_embeds.device ) return position_ids.unsqueeze(0).expand(input_shape) class LukeEntityEmbeddings(nn.Module): def __init__(self, config: LukeConfig): super().__init__() self.config = config self.entity_embeddings = nn.Embedding(config.entity_vocab_size, config.entity_emb_size, padding_idx=0) if config.entity_emb_size != config.hidden_size: self.entity_embedding_dense = nn.Linear(config.entity_emb_size, config.hidden_size, bias=False) self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size) self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward( self, entity_ids: torch.LongTensor, position_ids: torch.LongTensor, token_type_ids: torch.LongTensor = None ): if token_type_ids is None: token_type_ids = torch.zeros_like(entity_ids) entity_embeddings = self.entity_embeddings(entity_ids) if self.config.entity_emb_size != self.config.hidden_size: entity_embeddings = self.entity_embedding_dense(entity_embeddings) position_embeddings = self.position_embeddings(position_ids.clamp(min=0)) position_embedding_mask = (position_ids != -1).type_as(position_embeddings).unsqueeze(-1) position_embeddings = position_embeddings * position_embedding_mask position_embeddings = torch.sum(position_embeddings, dim=-2) position_embeddings = position_embeddings / position_embedding_mask.sum(dim=-2).clamp(min=1e-7) token_type_embeddings = self.token_type_embeddings(token_type_ids) embeddings = entity_embeddings + position_embeddings + token_type_embeddings embeddings = self.LayerNorm(embeddings) embeddings = self.dropout(embeddings) return embeddings class LukeSelfAttention(nn.Module): def __init__(self, config): super().__init__() if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"): raise ValueError( f"The hidden size {config.hidden_size,} is not a multiple of the number of attention " f"heads {config.num_attention_heads}." ) self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.use_entity_aware_attention = config.use_entity_aware_attention self.query = nn.Linear(config.hidden_size, self.all_head_size) self.key = nn.Linear(config.hidden_size, self.all_head_size) self.value = nn.Linear(config.hidden_size, self.all_head_size) if self.use_entity_aware_attention: self.w2e_query = nn.Linear(config.hidden_size, self.all_head_size) self.e2w_query = nn.Linear(config.hidden_size, self.all_head_size) self.e2e_query = nn.Linear(config.hidden_size, self.all_head_size) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) def transpose_for_scores(self, x): new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) x = x.view(*new_x_shape) return x.permute(0, 2, 1, 3) def forward( self, word_hidden_states, entity_hidden_states, attention_mask=None, head_mask=None, output_attentions=False, ): word_size = word_hidden_states.size(1) if entity_hidden_states is None: concat_hidden_states = word_hidden_states else: concat_hidden_states = torch.cat([word_hidden_states, entity_hidden_states], dim=1) key_layer = self.transpose_for_scores(self.key(concat_hidden_states)) value_layer = self.transpose_for_scores(self.value(concat_hidden_states)) if self.use_entity_aware_attention and entity_hidden_states is not None: # compute query vectors using word-word (w2w), word-entity (w2e), entity-word (e2w), entity-entity (e2e) # query layers w2w_query_layer = self.transpose_for_scores(self.query(word_hidden_states)) w2e_query_layer = self.transpose_for_scores(self.w2e_query(word_hidden_states)) e2w_query_layer = self.transpose_for_scores(self.e2w_query(entity_hidden_states)) e2e_query_layer = self.transpose_for_scores(self.e2e_query(entity_hidden_states)) # compute w2w, w2e, e2w, and e2e key vectors used with the query vectors computed above w2w_key_layer = key_layer[:, :, :word_size, :] e2w_key_layer = key_layer[:, :, :word_size, :] w2e_key_layer = key_layer[:, :, word_size:, :] e2e_key_layer = key_layer[:, :, word_size:, :] # compute attention scores based on the dot product between the query and key vectors w2w_attention_scores = torch.matmul(w2w_query_layer, w2w_key_layer.transpose(-1, -2)) w2e_attention_scores = torch.matmul(w2e_query_layer, w2e_key_layer.transpose(-1, -2)) e2w_attention_scores = torch.matmul(e2w_query_layer, e2w_key_layer.transpose(-1, -2)) e2e_attention_scores = torch.matmul(e2e_query_layer, e2e_key_layer.transpose(-1, -2)) # combine attention scores to create the final attention score matrix word_attention_scores = torch.cat([w2w_attention_scores, w2e_attention_scores], dim=3) entity_attention_scores = torch.cat([e2w_attention_scores, e2e_attention_scores], dim=3) attention_scores = torch.cat([word_attention_scores, entity_attention_scores], dim=2) else: query_layer = self.transpose_for_scores(self.query(concat_hidden_states)) attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) attention_scores = attention_scores / math.sqrt(self.attention_head_size) if attention_mask is not None: # Apply the attention mask is (precomputed for all layers in LukeModel forward() function) attention_scores = attention_scores + attention_mask # Normalize the attention scores to probabilities. attention_probs = nn.functional.softmax(attention_scores, dim=-1) # This is actually dropping out entire tokens to attend to, which might # seem a bit unusual, but is taken from the original Transformer paper. attention_probs = self.dropout(attention_probs) # Mask heads if we want to if head_mask is not None: attention_probs = attention_probs * head_mask context_layer = torch.matmul(attention_probs, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) context_layer = context_layer.view(*new_context_layer_shape) output_word_hidden_states = context_layer[:, :word_size, :] if entity_hidden_states is None: output_entity_hidden_states = None else: output_entity_hidden_states = context_layer[:, word_size:, :] if output_attentions: outputs = (output_word_hidden_states, output_entity_hidden_states, attention_probs) else: outputs = (output_word_hidden_states, output_entity_hidden_states) return outputs # Copied from transformers.models.bert.modeling_bert.BertSelfOutput class LukeSelfOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states class LukeAttention(nn.Module): def __init__(self, config): super().__init__() self.self = LukeSelfAttention(config) self.output = LukeSelfOutput(config) self.pruned_heads = set() def prune_heads(self, heads): raise NotImplementedError("LUKE does not support the pruning of attention heads") def forward( self, word_hidden_states, entity_hidden_states, attention_mask=None, head_mask=None, output_attentions=False, ): word_size = word_hidden_states.size(1) self_outputs = self.self( word_hidden_states, entity_hidden_states, attention_mask, head_mask, output_attentions, ) if entity_hidden_states is None: concat_self_outputs = self_outputs[0] concat_hidden_states = word_hidden_states else: concat_self_outputs = torch.cat(self_outputs[:2], dim=1) concat_hidden_states = torch.cat([word_hidden_states, entity_hidden_states], dim=1) attention_output = self.output(concat_self_outputs, concat_hidden_states) word_attention_output = attention_output[:, :word_size, :] if entity_hidden_states is None: entity_attention_output = None else: entity_attention_output = attention_output[:, word_size:, :] # add attentions if we output them outputs = (word_attention_output, entity_attention_output) + self_outputs[2:] return outputs # Copied from transformers.models.bert.modeling_bert.BertIntermediate class LukeIntermediate(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.intermediate_size) if isinstance(config.hidden_act, str): self.intermediate_act_fn = ACT2FN[config.hidden_act] else: self.intermediate_act_fn = config.hidden_act def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) return hidden_states # Copied from transformers.models.bert.modeling_bert.BertOutput class LukeOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.intermediate_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states class LukeLayer(nn.Module): def __init__(self, config): super().__init__() self.chunk_size_feed_forward = config.chunk_size_feed_forward self.seq_len_dim = 1 self.attention = LukeAttention(config) self.intermediate = LukeIntermediate(config) self.output = LukeOutput(config) def forward( self, word_hidden_states, entity_hidden_states, attention_mask=None, head_mask=None, output_attentions=False, ): word_size = word_hidden_states.size(1) self_attention_outputs = self.attention( word_hidden_states, entity_hidden_states, attention_mask, head_mask, output_attentions=output_attentions, ) if entity_hidden_states is None: concat_attention_output = self_attention_outputs[0] else: concat_attention_output = torch.cat(self_attention_outputs[:2], dim=1) outputs = self_attention_outputs[2:] # add self attentions if we output attention weights layer_output = apply_chunking_to_forward( self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, concat_attention_output ) word_layer_output = layer_output[:, :word_size, :] if entity_hidden_states is None: entity_layer_output = None else: entity_layer_output = layer_output[:, word_size:, :] outputs = (word_layer_output, entity_layer_output) + outputs return outputs def feed_forward_chunk(self, attention_output): intermediate_output = self.intermediate(attention_output) layer_output = self.output(intermediate_output, attention_output) return layer_output class LukeEncoder(nn.Module): def __init__(self, config): super().__init__() self.config = config self.layer = nn.ModuleList([LukeLayer(config) for _ in range(config.num_hidden_layers)]) self.gradient_checkpointing = False def forward( self, word_hidden_states, entity_hidden_states, attention_mask=None, head_mask=None, output_attentions=False, output_hidden_states=False, return_dict=True, ): all_word_hidden_states = () if output_hidden_states else None all_entity_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None for i, layer_module in enumerate(self.layer): if output_hidden_states: all_word_hidden_states = all_word_hidden_states + (word_hidden_states,) all_entity_hidden_states = all_entity_hidden_states + (entity_hidden_states,) layer_head_mask = head_mask[i] if head_mask is not None else None if self.gradient_checkpointing and self.training: layer_outputs = self._gradient_checkpointing_func( layer_module.__call__, word_hidden_states, entity_hidden_states, attention_mask, layer_head_mask, output_attentions, ) else: layer_outputs = layer_module( word_hidden_states, entity_hidden_states, attention_mask, layer_head_mask, output_attentions, ) word_hidden_states = layer_outputs[0] if entity_hidden_states is not None: entity_hidden_states = layer_outputs[1] if output_attentions: all_self_attentions = all_self_attentions + (layer_outputs[2],) if output_hidden_states: all_word_hidden_states = all_word_hidden_states + (word_hidden_states,) all_entity_hidden_states = all_entity_hidden_states + (entity_hidden_states,) if not return_dict: return tuple( v for v in [ word_hidden_states, all_word_hidden_states, all_self_attentions, entity_hidden_states, all_entity_hidden_states, ] if v is not None ) return BaseLukeModelOutput( last_hidden_state=word_hidden_states, hidden_states=all_word_hidden_states, attentions=all_self_attentions, entity_last_hidden_state=entity_hidden_states, entity_hidden_states=all_entity_hidden_states, ) # Copied from transformers.models.bert.modeling_bert.BertPooler class LukePooler(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.activation = nn.Tanh() def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: # We "pool" the model by simply taking the hidden state corresponding # to the first token. first_token_tensor = hidden_states[:, 0] pooled_output = self.dense(first_token_tensor) pooled_output = self.activation(pooled_output) return pooled_output class EntityPredictionHeadTransform(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.entity_emb_size) if isinstance(config.hidden_act, str): self.transform_act_fn = ACT2FN[config.hidden_act] else: self.transform_act_fn = config.hidden_act self.LayerNorm = nn.LayerNorm(config.entity_emb_size, eps=config.layer_norm_eps) def forward(self, hidden_states): hidden_states = self.dense(hidden_states) hidden_states = self.transform_act_fn(hidden_states) hidden_states = self.LayerNorm(hidden_states) return hidden_states class EntityPredictionHead(nn.Module): def __init__(self, config): super().__init__() self.config = config self.transform = EntityPredictionHeadTransform(config) self.decoder = nn.Linear(config.entity_emb_size, config.entity_vocab_size, bias=False) self.bias = nn.Parameter(torch.zeros(config.entity_vocab_size)) def forward(self, hidden_states): hidden_states = self.transform(hidden_states) hidden_states = self.decoder(hidden_states) + self.bias return hidden_states class LukePreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = LukeConfig base_model_prefix = "luke" supports_gradient_checkpointing = True _no_split_modules = ["LukeAttention", "LukeEntityEmbeddings"] def _init_weights(self, module: nn.Module): """Initialize the weights""" if isinstance(module, nn.Linear): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): if module.embedding_dim == 1: # embedding for bias parameters module.weight.data.zero_() else: module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) LUKE_START_DOCSTRING = r""" This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`LukeConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ LUKE_INPUTS_DOCSTRING = r""" Args: input_ids (`torch.LongTensor` of shape `({0})`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*): Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0, 1]`: - 0 corresponds to a *sentence A* token, - 1 corresponds to a *sentence B* token. [What are token type IDs?](../glossary#token-type-ids) position_ids (`torch.LongTensor` of shape `({0})`, *optional*): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, config.max_position_embeddings - 1]`. [What are position IDs?](../glossary#position-ids) entity_ids (`torch.LongTensor` of shape `(batch_size, entity_length)`): Indices of entity tokens in the entity vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. entity_attention_mask (`torch.FloatTensor` of shape `(batch_size, entity_length)`, *optional*): Mask to avoid performing attention on padding entity token indices. Mask values selected in `[0, 1]`: - 1 for entity tokens that are **not masked**, - 0 for entity tokens that are **masked**. entity_token_type_ids (`torch.LongTensor` of shape `(batch_size, entity_length)`, *optional*): Segment token indices to indicate first and second portions of the entity token inputs. Indices are selected in `[0, 1]`: - 0 corresponds to a *portion A* entity token, - 1 corresponds to a *portion B* entity token. entity_position_ids (`torch.LongTensor` of shape `(batch_size, entity_length, max_mention_length)`, *optional*): Indices of positions of each input entity in the position embeddings. Selected in the range `[0, config.max_position_embeddings - 1]`. inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ @add_start_docstrings( "The bare LUKE model transformer outputting raw hidden-states for both word tokens and entities without any" " specific head on top.", LUKE_START_DOCSTRING, ) class LukeModel(LukePreTrainedModel): def __init__(self, config: LukeConfig, add_pooling_layer: bool = True): super().__init__(config) self.config = config self.embeddings = LukeEmbeddings(config) self.entity_embeddings = LukeEntityEmbeddings(config) self.encoder = LukeEncoder(config) self.pooler = LukePooler(config) if add_pooling_layer else None # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.embeddings.word_embeddings def set_input_embeddings(self, value): self.embeddings.word_embeddings = value def get_entity_embeddings(self): return self.entity_embeddings.entity_embeddings def set_entity_embeddings(self, value): self.entity_embeddings.entity_embeddings = value def _prune_heads(self, heads_to_prune): raise NotImplementedError("LUKE does not support the pruning of attention heads") @add_start_docstrings_to_model_forward(LUKE_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @replace_return_docstrings(output_type=BaseLukeModelOutputWithPooling, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, entity_ids: Optional[torch.LongTensor] = None, entity_attention_mask: Optional[torch.FloatTensor] = None, entity_token_type_ids: Optional[torch.LongTensor] = None, entity_position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, BaseLukeModelOutputWithPooling]: r""" Returns: Examples: ```python >>> from transformers import AutoTokenizer, LukeModel >>> tokenizer = AutoTokenizer.from_pretrained("studio-ousia/luke-base") >>> model = LukeModel.from_pretrained("studio-ousia/luke-base") # Compute the contextualized entity representation corresponding to the entity mention "Beyoncé" >>> text = "Beyoncé lives in Los Angeles." >>> entity_spans = [(0, 7)] # character-based entity span corresponding to "Beyoncé" >>> encoding = tokenizer(text, entity_spans=entity_spans, add_prefix_space=True, return_tensors="pt") >>> outputs = model(**encoding) >>> word_last_hidden_state = outputs.last_hidden_state >>> entity_last_hidden_state = outputs.entity_last_hidden_state # Input Wikipedia entities to obtain enriched contextualized representations of word tokens >>> text = "Beyoncé lives in Los Angeles." >>> entities = [ ... "Beyoncé", ... "Los Angeles", ... ] # Wikipedia entity titles corresponding to the entity mentions "Beyoncé" and "Los Angeles" >>> entity_spans = [ ... (0, 7), ... (17, 28), ... ] # character-based entity spans corresponding to "Beyoncé" and "Los Angeles" >>> encoding = tokenizer( ... text, entities=entities, entity_spans=entity_spans, add_prefix_space=True, return_tensors="pt" ... ) >>> outputs = model(**encoding) >>> word_last_hidden_state = outputs.last_hidden_state >>> entity_last_hidden_state = outputs.entity_last_hidden_state ```""" output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask) input_shape = input_ids.size() elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: raise ValueError("You have to specify either input_ids or inputs_embeds") batch_size, seq_length = input_shape device = input_ids.device if input_ids is not None else inputs_embeds.device if attention_mask is None: attention_mask = torch.ones((batch_size, seq_length), device=device) if token_type_ids is None: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device) if entity_ids is not None: entity_seq_length = entity_ids.size(1) if entity_attention_mask is None: entity_attention_mask = torch.ones((batch_size, entity_seq_length), device=device) if entity_token_type_ids is None: entity_token_type_ids = torch.zeros((batch_size, entity_seq_length), dtype=torch.long, device=device) # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) # First, compute word embeddings word_embedding_output = self.embeddings( input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds, ) # Second, compute extended attention mask extended_attention_mask = self.get_extended_attention_mask(attention_mask, entity_attention_mask) # Third, compute entity embeddings and concatenate with word embeddings if entity_ids is None: entity_embedding_output = None else: entity_embedding_output = self.entity_embeddings(entity_ids, entity_position_ids, entity_token_type_ids) # Fourth, send embeddings through the model encoder_outputs = self.encoder( word_embedding_output, entity_embedding_output, attention_mask=extended_attention_mask, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) # Fifth, get the output. LukeModel outputs the same as BertModel, namely sequence_output of shape (batch_size, seq_len, hidden_size) sequence_output = encoder_outputs[0] # Sixth, we compute the pooled_output, word_sequence_output and entity_sequence_output based on the sequence_output pooled_output = self.pooler(sequence_output) if self.pooler is not None else None if not return_dict: return (sequence_output, pooled_output) + encoder_outputs[1:] return BaseLukeModelOutputWithPooling( last_hidden_state=sequence_output, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, entity_last_hidden_state=encoder_outputs.entity_last_hidden_state, entity_hidden_states=encoder_outputs.entity_hidden_states, ) def get_extended_attention_mask( self, word_attention_mask: torch.LongTensor, entity_attention_mask: Optional[torch.LongTensor] ): """ Makes broadcastable attention and causal masks so that future and masked tokens are ignored. Arguments: word_attention_mask (`torch.LongTensor`): Attention mask for word tokens with ones indicating tokens to attend to, zeros for tokens to ignore. entity_attention_mask (`torch.LongTensor`, *optional*): Attention mask for entity tokens with ones indicating tokens to attend to, zeros for tokens to ignore. Returns: `torch.Tensor` The extended attention mask, with a the same dtype as `attention_mask.dtype`. """ attention_mask = word_attention_mask if entity_attention_mask is not None: attention_mask = torch.cat([attention_mask, entity_attention_mask], dim=-1) if attention_mask.dim() == 3: extended_attention_mask = attention_mask[:, None, :, :] elif attention_mask.dim() == 2: extended_attention_mask = attention_mask[:, None, None, :] else: raise ValueError(f"Wrong shape for attention_mask (shape {attention_mask.shape})") extended_attention_mask = extended_attention_mask.to(dtype=self.dtype) # fp16 compatibility extended_attention_mask = (1.0 - extended_attention_mask) * torch.finfo(self.dtype).min return extended_attention_mask def create_position_ids_from_input_ids(input_ids, padding_idx): """ Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding symbols are ignored. This is modified from fairseq's `utils.make_positions`. Args: x: torch.Tensor x: Returns: torch.Tensor """ # The series of casts and type-conversions here are carefully balanced to both work with ONNX export and XLA. mask = input_ids.ne(padding_idx).int() incremental_indices = (torch.cumsum(mask, dim=1).type_as(mask)) * mask return incremental_indices.long() + padding_idx # Copied from transformers.models.roberta.modeling_roberta.RobertaLMHead class LukeLMHead(nn.Module): """Roberta Head for masked language modeling.""" def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.decoder = nn.Linear(config.hidden_size, config.vocab_size) self.bias = nn.Parameter(torch.zeros(config.vocab_size)) self.decoder.bias = self.bias def forward(self, features, **kwargs): x = self.dense(features) x = gelu(x) x = self.layer_norm(x) # project back to size of vocabulary with bias x = self.decoder(x) return x def _tie_weights(self): # To tie those two weights if they get disconnected (on TPU or when the bias is resized) # For accelerate compatibility and to not break backward compatibility if self.decoder.bias.device.type == "meta": self.decoder.bias = self.bias else: self.bias = self.decoder.bias @add_start_docstrings( """ The LUKE model with a language modeling head and entity prediction head on top for masked language modeling and masked entity prediction. """, LUKE_START_DOCSTRING, ) class LukeForMaskedLM(LukePreTrainedModel): _tied_weights_keys = ["lm_head.decoder.weight", "lm_head.decoder.bias", "entity_predictions.decoder.weight"] def __init__(self, config): super().__init__(config) self.luke = LukeModel(config) self.lm_head = LukeLMHead(config) self.entity_predictions = EntityPredictionHead(config) self.loss_fn = nn.CrossEntropyLoss() # Initialize weights and apply final processing self.post_init() def tie_weights(self): super().tie_weights() self._tie_or_clone_weights(self.entity_predictions.decoder, self.luke.entity_embeddings.entity_embeddings) def get_output_embeddings(self): return self.lm_head.decoder def set_output_embeddings(self, new_embeddings): self.lm_head.decoder = new_embeddings @add_start_docstrings_to_model_forward(LUKE_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @replace_return_docstrings(output_type=LukeMaskedLMOutput, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, entity_ids: Optional[torch.LongTensor] = None, entity_attention_mask: Optional[torch.LongTensor] = None, entity_token_type_ids: Optional[torch.LongTensor] = None, entity_position_ids: Optional[torch.LongTensor] = None, labels: Optional[torch.LongTensor] = None, entity_labels: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, LukeMaskedLMOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]` entity_labels (`torch.LongTensor` of shape `(batch_size, entity_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]` Returns: """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.luke( input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, entity_ids=entity_ids, entity_attention_mask=entity_attention_mask, entity_token_type_ids=entity_token_type_ids, entity_position_ids=entity_position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=True, ) loss = None mlm_loss = None logits = self.lm_head(outputs.last_hidden_state) if labels is not None: # move labels to correct device to enable model parallelism labels = labels.to(logits.device) mlm_loss = self.loss_fn(logits.view(-1, self.config.vocab_size), labels.view(-1)) if loss is None: loss = mlm_loss mep_loss = None entity_logits = None if outputs.entity_last_hidden_state is not None: entity_logits = self.entity_predictions(outputs.entity_last_hidden_state) if entity_labels is not None: mep_loss = self.loss_fn(entity_logits.view(-1, self.config.entity_vocab_size), entity_labels.view(-1)) if loss is None: loss = mep_loss else: loss = loss + mep_loss if not return_dict: return tuple( v for v in [ loss, mlm_loss, mep_loss, logits, entity_logits, outputs.hidden_states, outputs.entity_hidden_states, outputs.attentions, ] if v is not None ) return LukeMaskedLMOutput( loss=loss, mlm_loss=mlm_loss, mep_loss=mep_loss, logits=logits, entity_logits=entity_logits, hidden_states=outputs.hidden_states, entity_hidden_states=outputs.entity_hidden_states, attentions=outputs.attentions, ) @add_start_docstrings( """ The LUKE model with a classification head on top (a linear layer on top of the hidden state of the first entity token) for entity classification tasks, such as Open Entity. """, LUKE_START_DOCSTRING, ) class LukeForEntityClassification(LukePreTrainedModel): def __init__(self, config): super().__init__(config) self.luke = LukeModel(config) self.num_labels = config.num_labels self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, config.num_labels) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(LUKE_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @replace_return_docstrings(output_type=EntityClassificationOutput, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, entity_ids: Optional[torch.LongTensor] = None, entity_attention_mask: Optional[torch.FloatTensor] = None, entity_token_type_ids: Optional[torch.LongTensor] = None, entity_position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, EntityClassificationOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size,)` or `(batch_size, num_labels)`, *optional*): Labels for computing the classification loss. If the shape is `(batch_size,)`, the cross entropy loss is used for the single-label classification. In this case, labels should contain the indices that should be in `[0, ..., config.num_labels - 1]`. If the shape is `(batch_size, num_labels)`, the binary cross entropy loss is used for the multi-label classification. In this case, labels should only contain `[0, 1]`, where 0 and 1 indicate false and true, respectively. Returns: Examples: ```python >>> from transformers import AutoTokenizer, LukeForEntityClassification >>> tokenizer = AutoTokenizer.from_pretrained("studio-ousia/luke-large-finetuned-open-entity") >>> model = LukeForEntityClassification.from_pretrained("studio-ousia/luke-large-finetuned-open-entity") >>> text = "Beyoncé lives in Los Angeles." >>> entity_spans = [(0, 7)] # character-based entity span corresponding to "Beyoncé" >>> inputs = tokenizer(text, entity_spans=entity_spans, return_tensors="pt") >>> outputs = model(**inputs) >>> logits = outputs.logits >>> predicted_class_idx = logits.argmax(-1).item() >>> print("Predicted class:", model.config.id2label[predicted_class_idx]) Predicted class: person ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.luke( input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, entity_ids=entity_ids, entity_attention_mask=entity_attention_mask, entity_token_type_ids=entity_token_type_ids, entity_position_ids=entity_position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=True, ) feature_vector = outputs.entity_last_hidden_state[:, 0, :] feature_vector = self.dropout(feature_vector) logits = self.classifier(feature_vector) loss = None if labels is not None: # When the number of dimension of `labels` is 1, cross entropy is used as the loss function. The binary # cross entropy is used otherwise. # move labels to correct device to enable model parallelism labels = labels.to(logits.device) if labels.ndim == 1: loss = nn.functional.cross_entropy(logits, labels) else: loss = nn.functional.binary_cross_entropy_with_logits(logits.view(-1), labels.view(-1).type_as(logits)) if not return_dict: return tuple( v for v in [loss, logits, outputs.hidden_states, outputs.entity_hidden_states, outputs.attentions] if v is not None ) return EntityClassificationOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, entity_hidden_states=outputs.entity_hidden_states, attentions=outputs.attentions, ) @add_start_docstrings( """ The LUKE model with a classification head on top (a linear layer on top of the hidden states of the two entity tokens) for entity pair classification tasks, such as TACRED. """, LUKE_START_DOCSTRING, ) class LukeForEntityPairClassification(LukePreTrainedModel): def __init__(self, config): super().__init__(config) self.luke = LukeModel(config) self.num_labels = config.num_labels self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size * 2, config.num_labels, False) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(LUKE_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @replace_return_docstrings(output_type=EntityPairClassificationOutput, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, entity_ids: Optional[torch.LongTensor] = None, entity_attention_mask: Optional[torch.FloatTensor] = None, entity_token_type_ids: Optional[torch.LongTensor] = None, entity_position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, EntityPairClassificationOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size,)` or `(batch_size, num_labels)`, *optional*): Labels for computing the classification loss. If the shape is `(batch_size,)`, the cross entropy loss is used for the single-label classification. In this case, labels should contain the indices that should be in `[0, ..., config.num_labels - 1]`. If the shape is `(batch_size, num_labels)`, the binary cross entropy loss is used for the multi-label classification. In this case, labels should only contain `[0, 1]`, where 0 and 1 indicate false and true, respectively. Returns: Examples: ```python >>> from transformers import AutoTokenizer, LukeForEntityPairClassification >>> tokenizer = AutoTokenizer.from_pretrained("studio-ousia/luke-large-finetuned-tacred") >>> model = LukeForEntityPairClassification.from_pretrained("studio-ousia/luke-large-finetuned-tacred") >>> text = "Beyoncé lives in Los Angeles." >>> entity_spans = [ ... (0, 7), ... (17, 28), ... ] # character-based entity spans corresponding to "Beyoncé" and "Los Angeles" >>> inputs = tokenizer(text, entity_spans=entity_spans, return_tensors="pt") >>> outputs = model(**inputs) >>> logits = outputs.logits >>> predicted_class_idx = logits.argmax(-1).item() >>> print("Predicted class:", model.config.id2label[predicted_class_idx]) Predicted class: per:cities_of_residence ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.luke( input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, entity_ids=entity_ids, entity_attention_mask=entity_attention_mask, entity_token_type_ids=entity_token_type_ids, entity_position_ids=entity_position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=True, ) feature_vector = torch.cat( [outputs.entity_last_hidden_state[:, 0, :], outputs.entity_last_hidden_state[:, 1, :]], dim=1 ) feature_vector = self.dropout(feature_vector) logits = self.classifier(feature_vector) loss = None if labels is not None: # When the number of dimension of `labels` is 1, cross entropy is used as the loss function. The binary # cross entropy is used otherwise. # move labels to correct device to enable model parallelism labels = labels.to(logits.device) if labels.ndim == 1: loss = nn.functional.cross_entropy(logits, labels) else: loss = nn.functional.binary_cross_entropy_with_logits(logits.view(-1), labels.view(-1).type_as(logits)) if not return_dict: return tuple( v for v in [loss, logits, outputs.hidden_states, outputs.entity_hidden_states, outputs.attentions] if v is not None ) return EntityPairClassificationOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, entity_hidden_states=outputs.entity_hidden_states, attentions=outputs.attentions, ) @add_start_docstrings( """ The LUKE model with a span classification head on top (a linear layer on top of the hidden states output) for tasks such as named entity recognition. """, LUKE_START_DOCSTRING, ) class LukeForEntitySpanClassification(LukePreTrainedModel): def __init__(self, config): super().__init__(config) self.luke = LukeModel(config) self.num_labels = config.num_labels self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size * 3, config.num_labels) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(LUKE_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @replace_return_docstrings(output_type=EntitySpanClassificationOutput, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, entity_ids: Optional[torch.LongTensor] = None, entity_attention_mask: Optional[torch.LongTensor] = None, entity_token_type_ids: Optional[torch.LongTensor] = None, entity_position_ids: Optional[torch.LongTensor] = None, entity_start_positions: Optional[torch.LongTensor] = None, entity_end_positions: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, EntitySpanClassificationOutput]: r""" entity_start_positions (`torch.LongTensor`): The start positions of entities in the word token sequence. entity_end_positions (`torch.LongTensor`): The end positions of entities in the word token sequence. labels (`torch.LongTensor` of shape `(batch_size, entity_length)` or `(batch_size, entity_length, num_labels)`, *optional*): Labels for computing the classification loss. If the shape is `(batch_size, entity_length)`, the cross entropy loss is used for the single-label classification. In this case, labels should contain the indices that should be in `[0, ..., config.num_labels - 1]`. If the shape is `(batch_size, entity_length, num_labels)`, the binary cross entropy loss is used for the multi-label classification. In this case, labels should only contain `[0, 1]`, where 0 and 1 indicate false and true, respectively. Returns: Examples: ```python >>> from transformers import AutoTokenizer, LukeForEntitySpanClassification >>> tokenizer = AutoTokenizer.from_pretrained("studio-ousia/luke-large-finetuned-conll-2003") >>> model = LukeForEntitySpanClassification.from_pretrained("studio-ousia/luke-large-finetuned-conll-2003") >>> text = "Beyoncé lives in Los Angeles" # List all possible entity spans in the text >>> word_start_positions = [0, 8, 14, 17, 21] # character-based start positions of word tokens >>> word_end_positions = [7, 13, 16, 20, 28] # character-based end positions of word tokens >>> entity_spans = [] >>> for i, start_pos in enumerate(word_start_positions): ... for end_pos in word_end_positions[i:]: ... entity_spans.append((start_pos, end_pos)) >>> inputs = tokenizer(text, entity_spans=entity_spans, return_tensors="pt") >>> outputs = model(**inputs) >>> logits = outputs.logits >>> predicted_class_indices = logits.argmax(-1).squeeze().tolist() >>> for span, predicted_class_idx in zip(entity_spans, predicted_class_indices): ... if predicted_class_idx != 0: ... print(text[span[0] : span[1]], model.config.id2label[predicted_class_idx]) Beyoncé PER Los Angeles LOC ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.luke( input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, entity_ids=entity_ids, entity_attention_mask=entity_attention_mask, entity_token_type_ids=entity_token_type_ids, entity_position_ids=entity_position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=True, ) hidden_size = outputs.last_hidden_state.size(-1) entity_start_positions = entity_start_positions.unsqueeze(-1).expand(-1, -1, hidden_size) if entity_start_positions.device != outputs.last_hidden_state.device: entity_start_positions = entity_start_positions.to(outputs.last_hidden_state.device) start_states = torch.gather(outputs.last_hidden_state, -2, entity_start_positions) entity_end_positions = entity_end_positions.unsqueeze(-1).expand(-1, -1, hidden_size) if entity_end_positions.device != outputs.last_hidden_state.device: entity_end_positions = entity_end_positions.to(outputs.last_hidden_state.device) end_states = torch.gather(outputs.last_hidden_state, -2, entity_end_positions) feature_vector = torch.cat([start_states, end_states, outputs.entity_last_hidden_state], dim=2) feature_vector = self.dropout(feature_vector) logits = self.classifier(feature_vector) loss = None if labels is not None: # move labels to correct device to enable model parallelism labels = labels.to(logits.device) # When the number of dimension of `labels` is 2, cross entropy is used as the loss function. The binary # cross entropy is used otherwise. if labels.ndim == 2: loss = nn.functional.cross_entropy(logits.view(-1, self.num_labels), labels.view(-1)) else: loss = nn.functional.binary_cross_entropy_with_logits(logits.view(-1), labels.view(-1).type_as(logits)) if not return_dict: return tuple( v for v in [loss, logits, outputs.hidden_states, outputs.entity_hidden_states, outputs.attentions] if v is not None ) return EntitySpanClassificationOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, entity_hidden_states=outputs.entity_hidden_states, attentions=outputs.attentions, ) @add_start_docstrings( """ The LUKE Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks. """, LUKE_START_DOCSTRING, ) class LukeForSequenceClassification(LukePreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.luke = LukeModel(config) self.dropout = nn.Dropout( config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob ) self.classifier = nn.Linear(config.hidden_size, config.num_labels) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(LUKE_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=LukeSequenceClassifierOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, entity_ids: Optional[torch.LongTensor] = None, entity_attention_mask: Optional[torch.FloatTensor] = None, entity_token_type_ids: Optional[torch.LongTensor] = None, entity_position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, LukeSequenceClassifierOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.luke( input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, entity_ids=entity_ids, entity_attention_mask=entity_attention_mask, entity_token_type_ids=entity_token_type_ids, entity_position_ids=entity_position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=True, ) pooled_output = outputs.pooler_output pooled_output = self.dropout(pooled_output) logits = self.classifier(pooled_output) loss = None if labels is not None: # move labels to correct device to enable model parallelism labels = labels.to(logits.device) if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = "regression" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = "single_label_classification" else: self.config.problem_type = "multi_label_classification" if self.config.problem_type == "regression": loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(logits.squeeze(), labels.squeeze()) else: loss = loss_fct(logits, labels) elif self.config.problem_type == "single_label_classification": loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) elif self.config.problem_type == "multi_label_classification": loss_fct = BCEWithLogitsLoss() loss = loss_fct(logits, labels) if not return_dict: return tuple( v for v in [loss, logits, outputs.hidden_states, outputs.entity_hidden_states, outputs.attentions] if v is not None ) return LukeSequenceClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, entity_hidden_states=outputs.entity_hidden_states, attentions=outputs.attentions, ) @add_start_docstrings( """ The LUKE Model with a token classification head on top (a linear layer on top of the hidden-states output). To solve Named-Entity Recognition (NER) task using LUKE, `LukeForEntitySpanClassification` is more suitable than this class. """, LUKE_START_DOCSTRING, ) class LukeForTokenClassification(LukePreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.luke = LukeModel(config, add_pooling_layer=False) self.dropout = nn.Dropout( config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob ) self.classifier = nn.Linear(config.hidden_size, config.num_labels) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(LUKE_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=LukeTokenClassifierOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, entity_ids: Optional[torch.LongTensor] = None, entity_attention_mask: Optional[torch.FloatTensor] = None, entity_token_type_ids: Optional[torch.LongTensor] = None, entity_position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, LukeTokenClassifierOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the multiple choice classification loss. Indices should be in `[0, ..., num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See `input_ids` above) """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.luke( input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, entity_ids=entity_ids, entity_attention_mask=entity_attention_mask, entity_token_type_ids=entity_token_type_ids, entity_position_ids=entity_position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=True, ) sequence_output = outputs.last_hidden_state sequence_output = self.dropout(sequence_output) logits = self.classifier(sequence_output) loss = None if labels is not None: # move labels to correct device to enable model parallelism labels = labels.to(logits.device) loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) if not return_dict: return tuple( v for v in [loss, logits, outputs.hidden_states, outputs.entity_hidden_states, outputs.attentions] if v is not None ) return LukeTokenClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, entity_hidden_states=outputs.entity_hidden_states, attentions=outputs.attentions, ) @add_start_docstrings( """ The LUKE Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of the hidden-states output to compute `span start logits` and `span end logits`). """, LUKE_START_DOCSTRING, ) class LukeForQuestionAnswering(LukePreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.luke = LukeModel(config, add_pooling_layer=False) self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(LUKE_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=LukeQuestionAnsweringModelOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.FloatTensor] = None, entity_ids: Optional[torch.LongTensor] = None, entity_attention_mask: Optional[torch.FloatTensor] = None, entity_token_type_ids: Optional[torch.LongTensor] = None, entity_position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, start_positions: Optional[torch.LongTensor] = None, end_positions: Optional[torch.LongTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, LukeQuestionAnsweringModelOutput]: r""" start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for position (index) of the start of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for position (index) of the end of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.luke( input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, entity_ids=entity_ids, entity_attention_mask=entity_attention_mask, entity_token_type_ids=entity_token_type_ids, entity_position_ids=entity_position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=True, ) sequence_output = outputs.last_hidden_state logits = self.qa_outputs(sequence_output) start_logits, end_logits = logits.split(1, dim=-1) start_logits = start_logits.squeeze(-1) end_logits = end_logits.squeeze(-1) total_loss = None if start_positions is not None and end_positions is not None: # If we are on multi-GPU, split add a dimension if len(start_positions.size()) > 1: start_positions = start_positions.squeeze(-1) if len(end_positions.size()) > 1: end_positions = end_positions.squeeze(-1) # sometimes the start/end positions are outside our model inputs, we ignore these terms ignored_index = start_logits.size(1) start_positions.clamp_(0, ignored_index) end_positions.clamp_(0, ignored_index) loss_fct = CrossEntropyLoss(ignore_index=ignored_index) start_loss = loss_fct(start_logits, start_positions) end_loss = loss_fct(end_logits, end_positions) total_loss = (start_loss + end_loss) / 2 if not return_dict: return tuple( v for v in [ total_loss, start_logits, end_logits, outputs.hidden_states, outputs.entity_hidden_states, outputs.attentions, ] if v is not None ) return LukeQuestionAnsweringModelOutput( loss=total_loss, start_logits=start_logits, end_logits=end_logits, hidden_states=outputs.hidden_states, entity_hidden_states=outputs.entity_hidden_states, attentions=outputs.attentions, ) @add_start_docstrings( """ The LUKE Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a softmax) e.g. for RocStories/SWAG tasks. """, LUKE_START_DOCSTRING, ) class LukeForMultipleChoice(LukePreTrainedModel): def __init__(self, config): super().__init__(config) self.luke = LukeModel(config) self.dropout = nn.Dropout( config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob ) self.classifier = nn.Linear(config.hidden_size, 1) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(LUKE_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=LukeMultipleChoiceModelOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, entity_ids: Optional[torch.LongTensor] = None, entity_attention_mask: Optional[torch.FloatTensor] = None, entity_token_type_ids: Optional[torch.LongTensor] = None, entity_position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, LukeMultipleChoiceModelOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the multiple choice classification loss. Indices should be in `[0, ..., num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See `input_ids` above) """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1] input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None inputs_embeds = ( inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1)) if inputs_embeds is not None else None ) entity_ids = entity_ids.view(-1, entity_ids.size(-1)) if entity_ids is not None else None entity_attention_mask = ( entity_attention_mask.view(-1, entity_attention_mask.size(-1)) if entity_attention_mask is not None else None ) entity_token_type_ids = ( entity_token_type_ids.view(-1, entity_token_type_ids.size(-1)) if entity_token_type_ids is not None else None ) entity_position_ids = ( entity_position_ids.view(-1, entity_position_ids.size(-2), entity_position_ids.size(-1)) if entity_position_ids is not None else None ) outputs = self.luke( input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, entity_ids=entity_ids, entity_attention_mask=entity_attention_mask, entity_token_type_ids=entity_token_type_ids, entity_position_ids=entity_position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=True, ) pooled_output = outputs.pooler_output pooled_output = self.dropout(pooled_output) logits = self.classifier(pooled_output) reshaped_logits = logits.view(-1, num_choices) loss = None if labels is not None: # move labels to correct device to enable model parallelism labels = labels.to(reshaped_logits.device) loss_fct = CrossEntropyLoss() loss = loss_fct(reshaped_logits, labels) if not return_dict: return tuple( v for v in [ loss, reshaped_logits, outputs.hidden_states, outputs.entity_hidden_states, outputs.attentions, ] if v is not None ) return LukeMultipleChoiceModelOutput( loss=loss, logits=reshaped_logits, hidden_states=outputs.hidden_states, entity_hidden_states=outputs.entity_hidden_states, attentions=outputs.attentions, )
0
hf_public_repos/transformers/src/transformers/models
hf_public_repos/transformers/src/transformers/models/luke/tokenization_luke.py
# coding=utf-8 # Copyright Studio-Ouisa and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tokenization classes for LUKE.""" import itertools import json import os from collections.abc import Mapping from functools import lru_cache from typing import Dict, List, Optional, Tuple, Union import numpy as np import regex as re from ...tokenization_utils import PreTrainedTokenizer from ...tokenization_utils_base import ( ENCODE_KWARGS_DOCSTRING, AddedToken, BatchEncoding, EncodedInput, PaddingStrategy, TensorType, TextInput, TextInputPair, TruncationStrategy, to_py_obj, ) from ...utils import add_end_docstrings, is_tf_tensor, is_torch_tensor, logging logger = logging.get_logger(__name__) EntitySpan = Tuple[int, int] EntitySpanInput = List[EntitySpan] Entity = str EntityInput = List[Entity] VOCAB_FILES_NAMES = { "vocab_file": "vocab.json", "merges_file": "merges.txt", "entity_vocab_file": "entity_vocab.json", } PRETRAINED_VOCAB_FILES_MAP = { "vocab_file": { "studio-ousia/luke-base": "https://huggingface.co/studio-ousia/luke-base/resolve/main/vocab.json", "studio-ousia/luke-large": "https://huggingface.co/studio-ousia/luke-large/resolve/main/vocab.json", }, "merges_file": { "studio-ousia/luke-base": "https://huggingface.co/studio-ousia/luke-base/resolve/main/merges.txt", "studio-ousia/luke-large": "https://huggingface.co/studio-ousia/luke-large/resolve/main/merges.txt", }, "entity_vocab_file": { "studio-ousia/luke-base": "https://huggingface.co/studio-ousia/luke-base/resolve/main/entity_vocab.json", "studio-ousia/luke-large": "https://huggingface.co/studio-ousia/luke-large/resolve/main/entity_vocab.json", }, } PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = { "studio-ousia/luke-base": 512, "studio-ousia/luke-large": 512, } ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING = r""" return_token_type_ids (`bool`, *optional*): Whether to return token type IDs. If left to the default, will return the token type IDs according to the specific tokenizer's default, defined by the `return_outputs` attribute. [What are token type IDs?](../glossary#token-type-ids) return_attention_mask (`bool`, *optional*): Whether to return the attention mask. If left to the default, will return the attention mask according to the specific tokenizer's default, defined by the `return_outputs` attribute. [What are attention masks?](../glossary#attention-mask) return_overflowing_tokens (`bool`, *optional*, defaults to `False`): Whether or not to return overflowing token sequences. If a pair of sequences of input ids (or a batch of pairs) is provided with `truncation_strategy = longest_first` or `True`, an error is raised instead of returning overflowing tokens. return_special_tokens_mask (`bool`, *optional*, defaults to `False`): Whether or not to return special tokens mask information. return_offsets_mapping (`bool`, *optional*, defaults to `False`): Whether or not to return `(char_start, char_end)` for each token. This is only available on fast tokenizers inheriting from [`PreTrainedTokenizerFast`], if using Python's tokenizer, this method will raise `NotImplementedError`. return_length (`bool`, *optional*, defaults to `False`): Whether or not to return the lengths of the encoded inputs. verbose (`bool`, *optional*, defaults to `True`): Whether or not to print more information and warnings. **kwargs: passed to the `self.tokenize()` method Return: [`BatchEncoding`]: A [`BatchEncoding`] with the following fields: - **input_ids** -- List of token ids to be fed to a model. [What are input IDs?](../glossary#input-ids) - **token_type_ids** -- List of token type ids to be fed to a model (when `return_token_type_ids=True` or if *"token_type_ids"* is in `self.model_input_names`). [What are token type IDs?](../glossary#token-type-ids) - **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when `return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names`). [What are attention masks?](../glossary#attention-mask) - **entity_ids** -- List of entity ids to be fed to a model. [What are input IDs?](../glossary#input-ids) - **entity_position_ids** -- List of entity positions in the input sequence to be fed to a model. - **entity_token_type_ids** -- List of entity token type ids to be fed to a model (when `return_token_type_ids=True` or if *"entity_token_type_ids"* is in `self.model_input_names`). [What are token type IDs?](../glossary#token-type-ids) - **entity_attention_mask** -- List of indices specifying which entities should be attended to by the model (when `return_attention_mask=True` or if *"entity_attention_mask"* is in `self.model_input_names`). [What are attention masks?](../glossary#attention-mask) - **entity_start_positions** -- List of the start positions of entities in the word token sequence (when `task="entity_span_classification"`). - **entity_end_positions** -- List of the end positions of entities in the word token sequence (when `task="entity_span_classification"`). - **overflowing_tokens** -- List of overflowing tokens sequences (when a `max_length` is specified and `return_overflowing_tokens=True`). - **num_truncated_tokens** -- Number of tokens truncated (when a `max_length` is specified and `return_overflowing_tokens=True`). - **special_tokens_mask** -- List of 0s and 1s, with 1 specifying added special tokens and 0 specifying regular sequence tokens (when `add_special_tokens=True` and `return_special_tokens_mask=True`). - **length** -- The length of the inputs (when `return_length=True`) """ @lru_cache() # Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode def bytes_to_unicode(): """ Returns list of utf-8 byte and a mapping to unicode strings. We specifically avoids mapping to whitespace/control characters the bpe code barfs on. The reversible bpe codes work on unicode strings. This means you need a large # of unicode characters in your vocab if you want to avoid UNKs. When you're at something like a 10B token dataset you end up needing around 5K for decent coverage. This is a significant percentage of your normal, say, 32K bpe vocab. To avoid that, we want lookup tables between utf-8 bytes and unicode strings. """ bs = ( list(range(ord("!"), ord("~") + 1)) + list(range(ord("¡"), ord("¬") + 1)) + list(range(ord("®"), ord("ÿ") + 1)) ) cs = bs[:] n = 0 for b in range(2**8): if b not in bs: bs.append(b) cs.append(2**8 + n) n += 1 cs = [chr(n) for n in cs] return dict(zip(bs, cs)) # Copied from transformers.models.roberta.tokenization_roberta.get_pairs def get_pairs(word): """ Return set of symbol pairs in a word. Word is represented as tuple of symbols (symbols being variable-length strings). """ pairs = set() prev_char = word[0] for char in word[1:]: pairs.add((prev_char, char)) prev_char = char return pairs class LukeTokenizer(PreTrainedTokenizer): """ Constructs a LUKE tokenizer, derived from the GPT-2 tokenizer, using byte-level Byte-Pair-Encoding. This tokenizer has been trained to treat spaces like parts of the tokens (a bit like sentencepiece) so a word will be encoded differently whether it is at the beginning of the sentence (without space) or not: ```python >>> from transformers import LukeTokenizer >>> tokenizer = LukeTokenizer.from_pretrained("studio-ousia/luke-base") >>> tokenizer("Hello world")["input_ids"] [0, 31414, 232, 2] >>> tokenizer(" Hello world")["input_ids"] [0, 20920, 232, 2] ``` You can get around that behavior by passing `add_prefix_space=True` when instantiating this tokenizer or when you call it on some text, but since the model was not pretrained this way, it might yield a decrease in performance. <Tip> When used with `is_split_into_words=True`, this tokenizer will add a space before each word (even the first one). </Tip> This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. It also creates entity sequences, namely `entity_ids`, `entity_attention_mask`, `entity_token_type_ids`, and `entity_position_ids` to be used by the LUKE model. Args: vocab_file (`str`): Path to the vocabulary file. merges_file (`str`): Path to the merges file. entity_vocab_file (`str`): Path to the entity vocabulary file. task (`str`, *optional*): Task for which you want to prepare sequences. One of `"entity_classification"`, `"entity_pair_classification"`, or `"entity_span_classification"`. If you specify this argument, the entity sequence is automatically created based on the given entity span(s). max_entity_length (`int`, *optional*, defaults to 32): The maximum length of `entity_ids`. max_mention_length (`int`, *optional*, defaults to 30): The maximum number of tokens inside an entity span. entity_token_1 (`str`, *optional*, defaults to `<ent>`): The special token used to represent an entity span in a word token sequence. This token is only used when `task` is set to `"entity_classification"` or `"entity_pair_classification"`. entity_token_2 (`str`, *optional*, defaults to `<ent2>`): The special token used to represent an entity span in a word token sequence. This token is only used when `task` is set to `"entity_pair_classification"`. errors (`str`, *optional*, defaults to `"replace"`): Paradigm to follow when decoding bytes to UTF-8. See [bytes.decode](https://docs.python.org/3/library/stdtypes.html#bytes.decode) for more information. bos_token (`str`, *optional*, defaults to `"<s>"`): The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token. <Tip> When building a sequence using special tokens, this is not the token that is used for the beginning of sequence. The token used is the `cls_token`. </Tip> eos_token (`str`, *optional*, defaults to `"</s>"`): The end of sequence token. <Tip> When building a sequence using special tokens, this is not the token that is used for the end of sequence. The token used is the `sep_token`. </Tip> sep_token (`str`, *optional*, defaults to `"</s>"`): The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for sequence classification or for a text and a question for question answering. It is also used as the last token of a sequence built with special tokens. cls_token (`str`, *optional*, defaults to `"<s>"`): The classifier token which is used when doing sequence classification (classification of the whole sequence instead of per-token classification). It is the first token of the sequence when built with special tokens. unk_token (`str`, *optional*, defaults to `"<unk>"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. pad_token (`str`, *optional*, defaults to `"<pad>"`): The token used for padding, for example when batching sequences of different lengths. mask_token (`str`, *optional*, defaults to `"<mask>"`): The token used for masking values. This is the token used when training this model with masked language modeling. This is the token which the model will try to predict. add_prefix_space (`bool`, *optional*, defaults to `False`): Whether or not to add an initial space to the input. This allows to treat the leading word just as any other word. (LUKE tokenizer detect beginning of words by the preceding space). """ vocab_files_names = VOCAB_FILES_NAMES pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES model_input_names = ["input_ids", "attention_mask"] def __init__( self, vocab_file, merges_file, entity_vocab_file, task=None, max_entity_length=32, max_mention_length=30, entity_token_1="<ent>", entity_token_2="<ent2>", entity_unk_token="[UNK]", entity_pad_token="[PAD]", entity_mask_token="[MASK]", entity_mask2_token="[MASK2]", errors="replace", bos_token="<s>", eos_token="</s>", sep_token="</s>", cls_token="<s>", unk_token="<unk>", pad_token="<pad>", mask_token="<mask>", add_prefix_space=False, **kwargs, ): bos_token = AddedToken(bos_token, lstrip=False, rstrip=False) if isinstance(bos_token, str) else bos_token eos_token = AddedToken(eos_token, lstrip=False, rstrip=False) if isinstance(eos_token, str) else eos_token sep_token = AddedToken(sep_token, lstrip=False, rstrip=False) if isinstance(sep_token, str) else sep_token cls_token = AddedToken(cls_token, lstrip=False, rstrip=False) if isinstance(cls_token, str) else cls_token unk_token = AddedToken(unk_token, lstrip=False, rstrip=False) if isinstance(unk_token, str) else unk_token pad_token = AddedToken(pad_token, lstrip=False, rstrip=False) if isinstance(pad_token, str) else pad_token # Mask token behave like a normal word, i.e. include the space before it mask_token = AddedToken(mask_token, lstrip=True, rstrip=False) if isinstance(mask_token, str) else mask_token with open(vocab_file, encoding="utf-8") as vocab_handle: self.encoder = json.load(vocab_handle) self.decoder = {v: k for k, v in self.encoder.items()} self.errors = errors # how to handle errors in decoding self.byte_encoder = bytes_to_unicode() self.byte_decoder = {v: k for k, v in self.byte_encoder.items()} with open(merges_file, encoding="utf-8") as merges_handle: bpe_merges = merges_handle.read().split("\n")[1:-1] bpe_merges = [tuple(merge.split()) for merge in bpe_merges] self.bpe_ranks = dict(zip(bpe_merges, range(len(bpe_merges)))) self.cache = {} self.add_prefix_space = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions self.pat = re.compile(r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""") # we add 2 special tokens for downstream tasks # for more information about lstrip and rstrip, see https://github.com/huggingface/transformers/pull/2778 entity_token_1 = ( AddedToken(entity_token_1, lstrip=False, rstrip=False) if isinstance(entity_token_1, str) else entity_token_1 ) entity_token_2 = ( AddedToken(entity_token_2, lstrip=False, rstrip=False) if isinstance(entity_token_2, str) else entity_token_2 ) kwargs["additional_special_tokens"] = kwargs.get("additional_special_tokens", []) kwargs["additional_special_tokens"] += [entity_token_1, entity_token_2] with open(entity_vocab_file, encoding="utf-8") as entity_vocab_handle: self.entity_vocab = json.load(entity_vocab_handle) for entity_special_token in [entity_unk_token, entity_pad_token, entity_mask_token, entity_mask2_token]: if entity_special_token not in self.entity_vocab: raise ValueError( f"Specified entity special token ``{entity_special_token}`` is not found in entity_vocab. " f"Probably an incorrect entity vocab file is loaded: {entity_vocab_file}." ) self.entity_unk_token_id = self.entity_vocab[entity_unk_token] self.entity_pad_token_id = self.entity_vocab[entity_pad_token] self.entity_mask_token_id = self.entity_vocab[entity_mask_token] self.entity_mask2_token_id = self.entity_vocab[entity_mask2_token] self.task = task if task is None or task == "entity_span_classification": self.max_entity_length = max_entity_length elif task == "entity_classification": self.max_entity_length = 1 elif task == "entity_pair_classification": self.max_entity_length = 2 else: raise ValueError( f"Task {task} not supported. Select task from ['entity_classification', 'entity_pair_classification'," " 'entity_span_classification'] only." ) self.max_mention_length = max_mention_length super().__init__( errors=errors, bos_token=bos_token, eos_token=eos_token, unk_token=unk_token, sep_token=sep_token, cls_token=cls_token, pad_token=pad_token, mask_token=mask_token, add_prefix_space=add_prefix_space, task=task, max_entity_length=32, max_mention_length=30, entity_token_1="<ent>", entity_token_2="<ent2>", entity_unk_token=entity_unk_token, entity_pad_token=entity_pad_token, entity_mask_token=entity_mask_token, entity_mask2_token=entity_mask2_token, **kwargs, ) @property # Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Luke, RoBERTa->LUKE def vocab_size(self): return len(self.encoder) # Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.get_vocab with Roberta->Luke, RoBERTa->LUKE def get_vocab(self): vocab = dict(self.encoder).copy() vocab.update(self.added_tokens_encoder) return vocab # Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.bpe with Roberta->Luke, RoBERTa->LUKE def bpe(self, token): if token in self.cache: return self.cache[token] word = tuple(token) pairs = get_pairs(word) if not pairs: return token while True: bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float("inf"))) if bigram not in self.bpe_ranks: break first, second = bigram new_word = [] i = 0 while i < len(word): try: j = word.index(first, i) except ValueError: new_word.extend(word[i:]) break else: new_word.extend(word[i:j]) i = j if word[i] == first and i < len(word) - 1 and word[i + 1] == second: new_word.append(first + second) i += 2 else: new_word.append(word[i]) i += 1 new_word = tuple(new_word) word = new_word if len(word) == 1: break else: pairs = get_pairs(word) word = " ".join(word) self.cache[token] = word return word # Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer._tokenize with Roberta->Luke, RoBERTa->LUKE def _tokenize(self, text): """Tokenize a string.""" bpe_tokens = [] for token in re.findall(self.pat, text): token = "".join( self.byte_encoder[b] for b in token.encode("utf-8") ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(token).split(" ")) return bpe_tokens # Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer._convert_token_to_id with Roberta->Luke, RoBERTa->LUKE def _convert_token_to_id(self, token): """Converts a token (str) in an id using the vocab.""" return self.encoder.get(token, self.encoder.get(self.unk_token)) # Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer._convert_id_to_token with Roberta->Luke, RoBERTa->LUKE def _convert_id_to_token(self, index): """Converts an index (integer) in a token (str) using the vocab.""" return self.decoder.get(index) # Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.convert_tokens_to_string with Roberta->Luke, RoBERTa->LUKE def convert_tokens_to_string(self, tokens): """Converts a sequence of tokens (string) in a single string.""" text = "".join(tokens) text = bytearray([self.byte_decoder[c] for c in text]).decode("utf-8", errors=self.errors) return text # Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.build_inputs_with_special_tokens with Roberta->Luke, RoBERTa->LUKE def build_inputs_with_special_tokens( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: """ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A LUKE sequence has the following format: - single sequence: `<s> X </s>` - pair of sequences: `<s> A </s></s> B </s>` Args: token_ids_0 (`List[int]`): List of IDs to which the special tokens will be added. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens. """ if token_ids_1 is None: return [self.cls_token_id] + token_ids_0 + [self.sep_token_id] cls = [self.cls_token_id] sep = [self.sep_token_id] return cls + token_ids_0 + sep + sep + token_ids_1 + sep # Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.get_special_tokens_mask with Roberta->Luke, RoBERTa->LUKE def get_special_tokens_mask( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False ) -> List[int]: """ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `prepare_for_model` method. Args: token_ids_0 (`List[int]`): List of IDs. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. already_has_special_tokens (`bool`, *optional*, defaults to `False`): Whether or not the token list is already formatted with special tokens for the model. Returns: `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. """ if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True ) if token_ids_1 is None: return [1] + ([0] * len(token_ids_0)) + [1] return [1] + ([0] * len(token_ids_0)) + [1, 1] + ([0] * len(token_ids_1)) + [1] # Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.create_token_type_ids_from_sequences with Roberta->Luke, RoBERTa->LUKE def create_token_type_ids_from_sequences( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: """ Create a mask from the two sequences passed to be used in a sequence-pair classification task. LUKE does not make use of token type ids, therefore a list of zeros is returned. Args: token_ids_0 (`List[int]`): List of IDs. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of zeros. """ sep = [self.sep_token_id] cls = [self.cls_token_id] if token_ids_1 is None: return len(cls + token_ids_0 + sep) * [0] return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0] # Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.prepare_for_tokenization with Roberta->Luke, RoBERTa->LUKE def prepare_for_tokenization(self, text, is_split_into_words=False, **kwargs): add_prefix_space = kwargs.pop("add_prefix_space", self.add_prefix_space) if (is_split_into_words or add_prefix_space) and (len(text) > 0 and not text[0].isspace()): text = " " + text return (text, kwargs) @add_end_docstrings(ENCODE_KWARGS_DOCSTRING, ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING) def __call__( self, text: Union[TextInput, List[TextInput]], text_pair: Optional[Union[TextInput, List[TextInput]]] = None, entity_spans: Optional[Union[EntitySpanInput, List[EntitySpanInput]]] = None, entity_spans_pair: Optional[Union[EntitySpanInput, List[EntitySpanInput]]] = None, entities: Optional[Union[EntityInput, List[EntityInput]]] = None, entities_pair: Optional[Union[EntityInput, List[EntityInput]]] = None, add_special_tokens: bool = True, padding: Union[bool, str, PaddingStrategy] = False, truncation: Union[bool, str, TruncationStrategy] = None, max_length: Optional[int] = None, max_entity_length: Optional[int] = None, stride: int = 0, is_split_into_words: Optional[bool] = False, pad_to_multiple_of: Optional[int] = None, return_tensors: Optional[Union[str, TensorType]] = None, return_token_type_ids: Optional[bool] = None, return_attention_mask: Optional[bool] = None, return_overflowing_tokens: bool = False, return_special_tokens_mask: bool = False, return_offsets_mapping: bool = False, return_length: bool = False, verbose: bool = True, **kwargs, ) -> BatchEncoding: """ Main method to tokenize and prepare for the model one or several sequence(s) or one or several pair(s) of sequences, depending on the task you want to prepare them for. Args: text (`str`, `List[str]`, `List[List[str]]`): The sequence or batch of sequences to be encoded. Each sequence must be a string. Note that this tokenizer does not support tokenization based on pretokenized strings. text_pair (`str`, `List[str]`, `List[List[str]]`): The sequence or batch of sequences to be encoded. Each sequence must be a string. Note that this tokenizer does not support tokenization based on pretokenized strings. entity_spans (`List[Tuple[int, int]]`, `List[List[Tuple[int, int]]]`, *optional*): The sequence or batch of sequences of entity spans to be encoded. Each sequence consists of tuples each with two integers denoting character-based start and end positions of entities. If you specify `"entity_classification"` or `"entity_pair_classification"` as the `task` argument in the constructor, the length of each sequence must be 1 or 2, respectively. If you specify `entities`, the length of each sequence must be equal to the length of each sequence of `entities`. entity_spans_pair (`List[Tuple[int, int]]`, `List[List[Tuple[int, int]]]`, *optional*): The sequence or batch of sequences of entity spans to be encoded. Each sequence consists of tuples each with two integers denoting character-based start and end positions of entities. If you specify the `task` argument in the constructor, this argument is ignored. If you specify `entities_pair`, the length of each sequence must be equal to the length of each sequence of `entities_pair`. entities (`List[str]`, `List[List[str]]`, *optional*): The sequence or batch of sequences of entities to be encoded. Each sequence consists of strings representing entities, i.e., special entities (e.g., [MASK]) or entity titles of Wikipedia (e.g., Los Angeles). This argument is ignored if you specify the `task` argument in the constructor. The length of each sequence must be equal to the length of each sequence of `entity_spans`. If you specify `entity_spans` without specifying this argument, the entity sequence or the batch of entity sequences is automatically constructed by filling it with the [MASK] entity. entities_pair (`List[str]`, `List[List[str]]`, *optional*): The sequence or batch of sequences of entities to be encoded. Each sequence consists of strings representing entities, i.e., special entities (e.g., [MASK]) or entity titles of Wikipedia (e.g., Los Angeles). This argument is ignored if you specify the `task` argument in the constructor. The length of each sequence must be equal to the length of each sequence of `entity_spans_pair`. If you specify `entity_spans_pair` without specifying this argument, the entity sequence or the batch of entity sequences is automatically constructed by filling it with the [MASK] entity. max_entity_length (`int`, *optional*): The maximum length of `entity_ids`. """ # Input type checking for clearer error is_valid_single_text = isinstance(text, str) is_valid_batch_text = isinstance(text, (list, tuple)) and (len(text) == 0 or (isinstance(text[0], str))) if not (is_valid_single_text or is_valid_batch_text): raise ValueError("text input must be of type `str` (single example) or `List[str]` (batch).") is_valid_single_text_pair = isinstance(text_pair, str) is_valid_batch_text_pair = isinstance(text_pair, (list, tuple)) and ( len(text_pair) == 0 or isinstance(text_pair[0], str) ) if not (text_pair is None or is_valid_single_text_pair or is_valid_batch_text_pair): raise ValueError("text_pair input must be of type `str` (single example) or `List[str]` (batch).") is_batched = bool(isinstance(text, (list, tuple))) if is_batched: batch_text_or_text_pairs = list(zip(text, text_pair)) if text_pair is not None else text if entities is None: batch_entities_or_entities_pairs = None else: batch_entities_or_entities_pairs = ( list(zip(entities, entities_pair)) if entities_pair is not None else entities ) if entity_spans is None: batch_entity_spans_or_entity_spans_pairs = None else: batch_entity_spans_or_entity_spans_pairs = ( list(zip(entity_spans, entity_spans_pair)) if entity_spans_pair is not None else entity_spans ) return self.batch_encode_plus( batch_text_or_text_pairs=batch_text_or_text_pairs, batch_entity_spans_or_entity_spans_pairs=batch_entity_spans_or_entity_spans_pairs, batch_entities_or_entities_pairs=batch_entities_or_entities_pairs, add_special_tokens=add_special_tokens, padding=padding, truncation=truncation, max_length=max_length, max_entity_length=max_entity_length, stride=stride, is_split_into_words=is_split_into_words, pad_to_multiple_of=pad_to_multiple_of, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs, ) else: return self.encode_plus( text=text, text_pair=text_pair, entity_spans=entity_spans, entity_spans_pair=entity_spans_pair, entities=entities, entities_pair=entities_pair, add_special_tokens=add_special_tokens, padding=padding, truncation=truncation, max_length=max_length, max_entity_length=max_entity_length, stride=stride, is_split_into_words=is_split_into_words, pad_to_multiple_of=pad_to_multiple_of, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs, ) def _encode_plus( self, text: Union[TextInput], text_pair: Optional[Union[TextInput]] = None, entity_spans: Optional[EntitySpanInput] = None, entity_spans_pair: Optional[EntitySpanInput] = None, entities: Optional[EntityInput] = None, entities_pair: Optional[EntityInput] = None, add_special_tokens: bool = True, padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD, truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE, max_length: Optional[int] = None, max_entity_length: Optional[int] = None, stride: int = 0, is_split_into_words: Optional[bool] = False, pad_to_multiple_of: Optional[int] = None, return_tensors: Optional[Union[str, TensorType]] = None, return_token_type_ids: Optional[bool] = None, return_attention_mask: Optional[bool] = None, return_overflowing_tokens: bool = False, return_special_tokens_mask: bool = False, return_offsets_mapping: bool = False, return_length: bool = False, verbose: bool = True, **kwargs, ) -> BatchEncoding: if return_offsets_mapping: raise NotImplementedError( "return_offset_mapping is not available when using Python tokenizers. " "To use this feature, change your tokenizer to one deriving from " "transformers.PreTrainedTokenizerFast. " "More information on available tokenizers at " "https://github.com/huggingface/transformers/pull/2674" ) if is_split_into_words: raise NotImplementedError("is_split_into_words is not supported in this tokenizer.") ( first_ids, second_ids, first_entity_ids, second_entity_ids, first_entity_token_spans, second_entity_token_spans, ) = self._create_input_sequence( text=text, text_pair=text_pair, entities=entities, entities_pair=entities_pair, entity_spans=entity_spans, entity_spans_pair=entity_spans_pair, **kwargs, ) # prepare_for_model will create the attention_mask and token_type_ids return self.prepare_for_model( first_ids, pair_ids=second_ids, entity_ids=first_entity_ids, pair_entity_ids=second_entity_ids, entity_token_spans=first_entity_token_spans, pair_entity_token_spans=second_entity_token_spans, add_special_tokens=add_special_tokens, padding=padding_strategy.value, truncation=truncation_strategy.value, max_length=max_length, max_entity_length=max_entity_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, return_tensors=return_tensors, prepend_batch_axis=True, return_attention_mask=return_attention_mask, return_token_type_ids=return_token_type_ids, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_length=return_length, verbose=verbose, ) def _batch_encode_plus( self, batch_text_or_text_pairs: Union[List[TextInput], List[TextInputPair]], batch_entity_spans_or_entity_spans_pairs: Optional[ Union[List[EntitySpanInput], List[Tuple[EntitySpanInput, EntitySpanInput]]] ] = None, batch_entities_or_entities_pairs: Optional[ Union[List[EntityInput], List[Tuple[EntityInput, EntityInput]]] ] = None, add_special_tokens: bool = True, padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD, truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE, max_length: Optional[int] = None, max_entity_length: Optional[int] = None, stride: int = 0, is_split_into_words: Optional[bool] = False, pad_to_multiple_of: Optional[int] = None, return_tensors: Optional[Union[str, TensorType]] = None, return_token_type_ids: Optional[bool] = None, return_attention_mask: Optional[bool] = None, return_overflowing_tokens: bool = False, return_special_tokens_mask: bool = False, return_offsets_mapping: bool = False, return_length: bool = False, verbose: bool = True, **kwargs, ) -> BatchEncoding: if return_offsets_mapping: raise NotImplementedError( "return_offset_mapping is not available when using Python tokenizers. " "To use this feature, change your tokenizer to one deriving from " "transformers.PreTrainedTokenizerFast." ) if is_split_into_words: raise NotImplementedError("is_split_into_words is not supported in this tokenizer.") # input_ids is a list of tuples (one for each example in the batch) input_ids = [] entity_ids = [] entity_token_spans = [] for index, text_or_text_pair in enumerate(batch_text_or_text_pairs): if not isinstance(text_or_text_pair, (list, tuple)): text, text_pair = text_or_text_pair, None else: text, text_pair = text_or_text_pair entities, entities_pair = None, None if batch_entities_or_entities_pairs is not None: entities_or_entities_pairs = batch_entities_or_entities_pairs[index] if entities_or_entities_pairs: if isinstance(entities_or_entities_pairs[0], str): entities, entities_pair = entities_or_entities_pairs, None else: entities, entities_pair = entities_or_entities_pairs entity_spans, entity_spans_pair = None, None if batch_entity_spans_or_entity_spans_pairs is not None: entity_spans_or_entity_spans_pairs = batch_entity_spans_or_entity_spans_pairs[index] if len(entity_spans_or_entity_spans_pairs) > 0 and isinstance( entity_spans_or_entity_spans_pairs[0], list ): entity_spans, entity_spans_pair = entity_spans_or_entity_spans_pairs else: entity_spans, entity_spans_pair = entity_spans_or_entity_spans_pairs, None ( first_ids, second_ids, first_entity_ids, second_entity_ids, first_entity_token_spans, second_entity_token_spans, ) = self._create_input_sequence( text=text, text_pair=text_pair, entities=entities, entities_pair=entities_pair, entity_spans=entity_spans, entity_spans_pair=entity_spans_pair, **kwargs, ) input_ids.append((first_ids, second_ids)) entity_ids.append((first_entity_ids, second_entity_ids)) entity_token_spans.append((first_entity_token_spans, second_entity_token_spans)) batch_outputs = self._batch_prepare_for_model( input_ids, batch_entity_ids_pairs=entity_ids, batch_entity_token_spans_pairs=entity_token_spans, add_special_tokens=add_special_tokens, padding_strategy=padding_strategy, truncation_strategy=truncation_strategy, max_length=max_length, max_entity_length=max_entity_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, return_attention_mask=return_attention_mask, return_token_type_ids=return_token_type_ids, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_length=return_length, return_tensors=return_tensors, verbose=verbose, ) return BatchEncoding(batch_outputs) def _check_entity_input_format(self, entities: Optional[EntityInput], entity_spans: Optional[EntitySpanInput]): if not isinstance(entity_spans, list): raise ValueError("entity_spans should be given as a list") elif len(entity_spans) > 0 and not isinstance(entity_spans[0], tuple): raise ValueError( "entity_spans should be given as a list of tuples containing the start and end character indices" ) if entities is not None: if not isinstance(entities, list): raise ValueError("If you specify entities, they should be given as a list") if len(entities) > 0 and not isinstance(entities[0], str): raise ValueError("If you specify entities, they should be given as a list of entity names") if len(entities) != len(entity_spans): raise ValueError("If you specify entities, entities and entity_spans must be the same length") def _create_input_sequence( self, text: Union[TextInput], text_pair: Optional[Union[TextInput]] = None, entities: Optional[EntityInput] = None, entities_pair: Optional[EntityInput] = None, entity_spans: Optional[EntitySpanInput] = None, entity_spans_pair: Optional[EntitySpanInput] = None, **kwargs, ) -> Tuple[list, list, list, list, list, list]: def get_input_ids(text): tokens = self.tokenize(text, **kwargs) return self.convert_tokens_to_ids(tokens) def get_input_ids_and_entity_token_spans(text, entity_spans): if entity_spans is None: return get_input_ids(text), None cur = 0 input_ids = [] entity_token_spans = [None] * len(entity_spans) split_char_positions = sorted(frozenset(itertools.chain(*entity_spans))) char_pos2token_pos = {} for split_char_position in split_char_positions: orig_split_char_position = split_char_position if ( split_char_position > 0 and text[split_char_position - 1] == " " ): # whitespace should be prepended to the following token split_char_position -= 1 if cur != split_char_position: input_ids += get_input_ids(text[cur:split_char_position]) cur = split_char_position char_pos2token_pos[orig_split_char_position] = len(input_ids) input_ids += get_input_ids(text[cur:]) entity_token_spans = [ (char_pos2token_pos[char_start], char_pos2token_pos[char_end]) for char_start, char_end in entity_spans ] return input_ids, entity_token_spans first_ids, second_ids = None, None first_entity_ids, second_entity_ids = None, None first_entity_token_spans, second_entity_token_spans = None, None if self.task is None: if entity_spans is None: first_ids = get_input_ids(text) else: self._check_entity_input_format(entities, entity_spans) first_ids, first_entity_token_spans = get_input_ids_and_entity_token_spans(text, entity_spans) if entities is None: first_entity_ids = [self.entity_mask_token_id] * len(entity_spans) else: first_entity_ids = [self.entity_vocab.get(entity, self.entity_unk_token_id) for entity in entities] if text_pair is not None: if entity_spans_pair is None: second_ids = get_input_ids(text_pair) else: self._check_entity_input_format(entities_pair, entity_spans_pair) second_ids, second_entity_token_spans = get_input_ids_and_entity_token_spans( text_pair, entity_spans_pair ) if entities_pair is None: second_entity_ids = [self.entity_mask_token_id] * len(entity_spans_pair) else: second_entity_ids = [ self.entity_vocab.get(entity, self.entity_unk_token_id) for entity in entities_pair ] elif self.task == "entity_classification": if not (isinstance(entity_spans, list) and len(entity_spans) == 1 and isinstance(entity_spans[0], tuple)): raise ValueError( "Entity spans should be a list containing a single tuple " "containing the start and end character indices of an entity" ) first_entity_ids = [self.entity_mask_token_id] first_ids, first_entity_token_spans = get_input_ids_and_entity_token_spans(text, entity_spans) # add special tokens to input ids entity_token_start, entity_token_end = first_entity_token_spans[0] first_ids = ( first_ids[:entity_token_end] + [self.additional_special_tokens_ids[0]] + first_ids[entity_token_end:] ) first_ids = ( first_ids[:entity_token_start] + [self.additional_special_tokens_ids[0]] + first_ids[entity_token_start:] ) first_entity_token_spans = [(entity_token_start, entity_token_end + 2)] elif self.task == "entity_pair_classification": if not ( isinstance(entity_spans, list) and len(entity_spans) == 2 and isinstance(entity_spans[0], tuple) and isinstance(entity_spans[1], tuple) ): raise ValueError( "Entity spans should be provided as a list of two tuples, " "each tuple containing the start and end character indices of an entity" ) head_span, tail_span = entity_spans first_entity_ids = [self.entity_mask_token_id, self.entity_mask2_token_id] first_ids, first_entity_token_spans = get_input_ids_and_entity_token_spans(text, entity_spans) head_token_span, tail_token_span = first_entity_token_spans token_span_with_special_token_ids = [ (head_token_span, self.additional_special_tokens_ids[0]), (tail_token_span, self.additional_special_tokens_ids[1]), ] if head_token_span[0] < tail_token_span[0]: first_entity_token_spans[0] = (head_token_span[0], head_token_span[1] + 2) first_entity_token_spans[1] = (tail_token_span[0] + 2, tail_token_span[1] + 4) token_span_with_special_token_ids = reversed(token_span_with_special_token_ids) else: first_entity_token_spans[0] = (head_token_span[0] + 2, head_token_span[1] + 4) first_entity_token_spans[1] = (tail_token_span[0], tail_token_span[1] + 2) for (entity_token_start, entity_token_end), special_token_id in token_span_with_special_token_ids: first_ids = first_ids[:entity_token_end] + [special_token_id] + first_ids[entity_token_end:] first_ids = first_ids[:entity_token_start] + [special_token_id] + first_ids[entity_token_start:] elif self.task == "entity_span_classification": if not (isinstance(entity_spans, list) and len(entity_spans) > 0 and isinstance(entity_spans[0], tuple)): raise ValueError( "Entity spans should be provided as a list of tuples, " "each tuple containing the start and end character indices of an entity" ) first_ids, first_entity_token_spans = get_input_ids_and_entity_token_spans(text, entity_spans) first_entity_ids = [self.entity_mask_token_id] * len(entity_spans) else: raise ValueError(f"Task {self.task} not supported") return ( first_ids, second_ids, first_entity_ids, second_entity_ids, first_entity_token_spans, second_entity_token_spans, ) @add_end_docstrings(ENCODE_KWARGS_DOCSTRING, ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING) def _batch_prepare_for_model( self, batch_ids_pairs: List[Tuple[List[int], None]], batch_entity_ids_pairs: List[Tuple[Optional[List[int]], Optional[List[int]]]], batch_entity_token_spans_pairs: List[Tuple[Optional[List[Tuple[int, int]]], Optional[List[Tuple[int, int]]]]], add_special_tokens: bool = True, padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD, truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE, max_length: Optional[int] = None, max_entity_length: Optional[int] = None, stride: int = 0, pad_to_multiple_of: Optional[int] = None, return_tensors: Optional[str] = None, return_token_type_ids: Optional[bool] = None, return_attention_mask: Optional[bool] = None, return_overflowing_tokens: bool = False, return_special_tokens_mask: bool = False, return_length: bool = False, verbose: bool = True, ) -> BatchEncoding: """ Prepares a sequence of input id, or a pair of sequences of inputs ids so that it can be used by the model. It adds special tokens, truncates sequences if overflowing while taking into account the special tokens and manages a moving window (with user defined stride) for overflowing tokens Args: batch_ids_pairs: list of tokenized input ids or input ids pairs batch_entity_ids_pairs: list of entity ids or entity ids pairs batch_entity_token_spans_pairs: list of entity spans or entity spans pairs max_entity_length: The maximum length of the entity sequence. """ batch_outputs = {} for input_ids, entity_ids, entity_token_span_pairs in zip( batch_ids_pairs, batch_entity_ids_pairs, batch_entity_token_spans_pairs ): first_ids, second_ids = input_ids first_entity_ids, second_entity_ids = entity_ids first_entity_token_spans, second_entity_token_spans = entity_token_span_pairs outputs = self.prepare_for_model( first_ids, second_ids, entity_ids=first_entity_ids, pair_entity_ids=second_entity_ids, entity_token_spans=first_entity_token_spans, pair_entity_token_spans=second_entity_token_spans, add_special_tokens=add_special_tokens, padding=PaddingStrategy.DO_NOT_PAD.value, # we pad in batch afterward truncation=truncation_strategy.value, max_length=max_length, max_entity_length=max_entity_length, stride=stride, pad_to_multiple_of=None, # we pad in batch afterward return_attention_mask=False, # we pad in batch afterward return_token_type_ids=return_token_type_ids, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_length=return_length, return_tensors=None, # We convert the whole batch to tensors at the end prepend_batch_axis=False, verbose=verbose, ) for key, value in outputs.items(): if key not in batch_outputs: batch_outputs[key] = [] batch_outputs[key].append(value) batch_outputs = self.pad( batch_outputs, padding=padding_strategy.value, max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, return_attention_mask=return_attention_mask, ) batch_outputs = BatchEncoding(batch_outputs, tensor_type=return_tensors) return batch_outputs @add_end_docstrings(ENCODE_KWARGS_DOCSTRING, ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING) def prepare_for_model( self, ids: List[int], pair_ids: Optional[List[int]] = None, entity_ids: Optional[List[int]] = None, pair_entity_ids: Optional[List[int]] = None, entity_token_spans: Optional[List[Tuple[int, int]]] = None, pair_entity_token_spans: Optional[List[Tuple[int, int]]] = None, add_special_tokens: bool = True, padding: Union[bool, str, PaddingStrategy] = False, truncation: Union[bool, str, TruncationStrategy] = None, max_length: Optional[int] = None, max_entity_length: Optional[int] = None, stride: int = 0, pad_to_multiple_of: Optional[int] = None, return_tensors: Optional[Union[str, TensorType]] = None, return_token_type_ids: Optional[bool] = None, return_attention_mask: Optional[bool] = None, return_overflowing_tokens: bool = False, return_special_tokens_mask: bool = False, return_offsets_mapping: bool = False, return_length: bool = False, verbose: bool = True, prepend_batch_axis: bool = False, **kwargs, ) -> BatchEncoding: """ Prepares a sequence of input id, entity id and entity span, or a pair of sequences of inputs ids, entity ids, entity spans so that it can be used by the model. It adds special tokens, truncates sequences if overflowing while taking into account the special tokens and manages a moving window (with user defined stride) for overflowing tokens. Please Note, for *pair_ids* different than `None` and *truncation_strategy = longest_first* or `True`, it is not possible to return overflowing tokens. Such a combination of arguments will raise an error. Args: ids (`List[int]`): Tokenized input ids of the first sequence. pair_ids (`List[int]`, *optional*): Tokenized input ids of the second sequence. entity_ids (`List[int]`, *optional*): Entity ids of the first sequence. pair_entity_ids (`List[int]`, *optional*): Entity ids of the second sequence. entity_token_spans (`List[Tuple[int, int]]`, *optional*): Entity spans of the first sequence. pair_entity_token_spans (`List[Tuple[int, int]]`, *optional*): Entity spans of the second sequence. max_entity_length (`int`, *optional*): The maximum length of the entity sequence. """ # Backward compatibility for 'truncation_strategy', 'pad_to_max_length' padding_strategy, truncation_strategy, max_length, kwargs = self._get_padding_truncation_strategies( padding=padding, truncation=truncation, max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, verbose=verbose, **kwargs, ) # Compute lengths pair = bool(pair_ids is not None) len_ids = len(ids) len_pair_ids = len(pair_ids) if pair else 0 if return_token_type_ids and not add_special_tokens: raise ValueError( "Asking to return token_type_ids while setting add_special_tokens to False " "results in an undefined behavior. Please set add_special_tokens to True or " "set return_token_type_ids to None." ) if ( return_overflowing_tokens and truncation_strategy == TruncationStrategy.LONGEST_FIRST and pair_ids is not None ): raise ValueError( "Not possible to return overflowing tokens for pair of sequences with the " "`longest_first`. Please select another truncation strategy than `longest_first`, " "for instance `only_second` or `only_first`." ) # Load from model defaults if return_token_type_ids is None: return_token_type_ids = "token_type_ids" in self.model_input_names if return_attention_mask is None: return_attention_mask = "attention_mask" in self.model_input_names encoded_inputs = {} # Compute the total size of the returned word encodings total_len = len_ids + len_pair_ids + (self.num_special_tokens_to_add(pair=pair) if add_special_tokens else 0) # Truncation: Handle max sequence length and max_entity_length overflowing_tokens = [] if truncation_strategy != TruncationStrategy.DO_NOT_TRUNCATE and max_length and total_len > max_length: # truncate words up to max_length ids, pair_ids, overflowing_tokens = self.truncate_sequences( ids, pair_ids=pair_ids, num_tokens_to_remove=total_len - max_length, truncation_strategy=truncation_strategy, stride=stride, ) if return_overflowing_tokens: encoded_inputs["overflowing_tokens"] = overflowing_tokens encoded_inputs["num_truncated_tokens"] = total_len - max_length # Add special tokens if add_special_tokens: sequence = self.build_inputs_with_special_tokens(ids, pair_ids) token_type_ids = self.create_token_type_ids_from_sequences(ids, pair_ids) entity_token_offset = 1 # 1 * <s> token pair_entity_token_offset = len(ids) + 3 # 1 * <s> token & 2 * <sep> tokens else: sequence = ids + pair_ids if pair else ids token_type_ids = [0] * len(ids) + ([0] * len(pair_ids) if pair else []) entity_token_offset = 0 pair_entity_token_offset = len(ids) # Build output dictionary encoded_inputs["input_ids"] = sequence if return_token_type_ids: encoded_inputs["token_type_ids"] = token_type_ids if return_special_tokens_mask: if add_special_tokens: encoded_inputs["special_tokens_mask"] = self.get_special_tokens_mask(ids, pair_ids) else: encoded_inputs["special_tokens_mask"] = [0] * len(sequence) # Set max entity length if not max_entity_length: max_entity_length = self.max_entity_length if entity_ids is not None: total_entity_len = 0 num_invalid_entities = 0 valid_entity_ids = [ent_id for ent_id, span in zip(entity_ids, entity_token_spans) if span[1] <= len(ids)] valid_entity_token_spans = [span for span in entity_token_spans if span[1] <= len(ids)] total_entity_len += len(valid_entity_ids) num_invalid_entities += len(entity_ids) - len(valid_entity_ids) valid_pair_entity_ids, valid_pair_entity_token_spans = None, None if pair_entity_ids is not None: valid_pair_entity_ids = [ ent_id for ent_id, span in zip(pair_entity_ids, pair_entity_token_spans) if span[1] <= len(pair_ids) ] valid_pair_entity_token_spans = [span for span in pair_entity_token_spans if span[1] <= len(pair_ids)] total_entity_len += len(valid_pair_entity_ids) num_invalid_entities += len(pair_entity_ids) - len(valid_pair_entity_ids) if num_invalid_entities != 0: logger.warning( f"{num_invalid_entities} entities are ignored because their entity spans are invalid due to the" " truncation of input tokens" ) if truncation_strategy != TruncationStrategy.DO_NOT_TRUNCATE and total_entity_len > max_entity_length: # truncate entities up to max_entity_length valid_entity_ids, valid_pair_entity_ids, overflowing_entities = self.truncate_sequences( valid_entity_ids, pair_ids=valid_pair_entity_ids, num_tokens_to_remove=total_entity_len - max_entity_length, truncation_strategy=truncation_strategy, stride=stride, ) valid_entity_token_spans = valid_entity_token_spans[: len(valid_entity_ids)] if valid_pair_entity_token_spans is not None: valid_pair_entity_token_spans = valid_pair_entity_token_spans[: len(valid_pair_entity_ids)] if return_overflowing_tokens: encoded_inputs["overflowing_entities"] = overflowing_entities encoded_inputs["num_truncated_entities"] = total_entity_len - max_entity_length final_entity_ids = valid_entity_ids + valid_pair_entity_ids if valid_pair_entity_ids else valid_entity_ids encoded_inputs["entity_ids"] = list(final_entity_ids) entity_position_ids = [] entity_start_positions = [] entity_end_positions = [] for token_spans, offset in ( (valid_entity_token_spans, entity_token_offset), (valid_pair_entity_token_spans, pair_entity_token_offset), ): if token_spans is not None: for start, end in token_spans: start += offset end += offset position_ids = list(range(start, end))[: self.max_mention_length] position_ids += [-1] * (self.max_mention_length - end + start) entity_position_ids.append(position_ids) entity_start_positions.append(start) entity_end_positions.append(end - 1) encoded_inputs["entity_position_ids"] = entity_position_ids if self.task == "entity_span_classification": encoded_inputs["entity_start_positions"] = entity_start_positions encoded_inputs["entity_end_positions"] = entity_end_positions if return_token_type_ids: encoded_inputs["entity_token_type_ids"] = [0] * len(encoded_inputs["entity_ids"]) # Check lengths self._eventual_warn_about_too_long_sequence(encoded_inputs["input_ids"], max_length, verbose) # Padding if padding_strategy != PaddingStrategy.DO_NOT_PAD or return_attention_mask: encoded_inputs = self.pad( encoded_inputs, max_length=max_length, max_entity_length=max_entity_length, padding=padding_strategy.value, pad_to_multiple_of=pad_to_multiple_of, return_attention_mask=return_attention_mask, ) if return_length: encoded_inputs["length"] = len(encoded_inputs["input_ids"]) batch_outputs = BatchEncoding( encoded_inputs, tensor_type=return_tensors, prepend_batch_axis=prepend_batch_axis ) return batch_outputs def pad( self, encoded_inputs: Union[ BatchEncoding, List[BatchEncoding], Dict[str, EncodedInput], Dict[str, List[EncodedInput]], List[Dict[str, EncodedInput]], ], padding: Union[bool, str, PaddingStrategy] = True, max_length: Optional[int] = None, max_entity_length: Optional[int] = None, pad_to_multiple_of: Optional[int] = None, return_attention_mask: Optional[bool] = None, return_tensors: Optional[Union[str, TensorType]] = None, verbose: bool = True, ) -> BatchEncoding: """ Pad a single encoded input or a batch of encoded inputs up to predefined length or to the max sequence length in the batch. Padding side (left/right) padding token ids are defined at the tokenizer level (with `self.padding_side`, `self.pad_token_id` and `self.pad_token_type_id`) .. note:: If the `encoded_inputs` passed are dictionary of numpy arrays, PyTorch tensors or TensorFlow tensors, the result will use the same type unless you provide a different tensor type with `return_tensors`. In the case of PyTorch tensors, you will lose the specific device of your tensors however. Args: encoded_inputs ([`BatchEncoding`], list of [`BatchEncoding`], `Dict[str, List[int]]`, `Dict[str, List[List[int]]` or `List[Dict[str, List[int]]]`): Tokenized inputs. Can represent one input ([`BatchEncoding`] or `Dict[str, List[int]]`) or a batch of tokenized inputs (list of [`BatchEncoding`], *Dict[str, List[List[int]]]* or *List[Dict[str, List[int]]]*) so you can use this method during preprocessing as well as in a PyTorch Dataloader collate function. Instead of `List[int]` you can have tensors (numpy arrays, PyTorch tensors or TensorFlow tensors), see the note above for the return type. padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `True`): Select a strategy to pad the returned sequences (according to the model's padding side and padding index) among: - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided). - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different lengths). max_length (`int`, *optional*): Maximum length of the returned list and optionally padding length (see above). max_entity_length (`int`, *optional*): The maximum length of the entity sequence. pad_to_multiple_of (`int`, *optional*): If set will pad the sequence to a multiple of the provided value. This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability `>= 7.5` (Volta). return_attention_mask (`bool`, *optional*): Whether to return the attention mask. If left to the default, will return the attention mask according to the specific tokenizer's default, defined by the `return_outputs` attribute. [What are attention masks?](../glossary#attention-mask) return_tensors (`str` or [`~utils.TensorType`], *optional*): If set, will return tensors instead of list of python integers. Acceptable values are: - `'tf'`: Return TensorFlow `tf.constant` objects. - `'pt'`: Return PyTorch `torch.Tensor` objects. - `'np'`: Return Numpy `np.ndarray` objects. verbose (`bool`, *optional*, defaults to `True`): Whether or not to print more information and warnings. """ # If we have a list of dicts, let's convert it in a dict of lists # We do this to allow using this method as a collate_fn function in PyTorch Dataloader if isinstance(encoded_inputs, (list, tuple)) and isinstance(encoded_inputs[0], Mapping): encoded_inputs = {key: [example[key] for example in encoded_inputs] for key in encoded_inputs[0].keys()} # The model's main input name, usually `input_ids`, has be passed for padding if self.model_input_names[0] not in encoded_inputs: raise ValueError( "You should supply an encoding or a list of encodings to this method " f"that includes {self.model_input_names[0]}, but you provided {list(encoded_inputs.keys())}" ) required_input = encoded_inputs[self.model_input_names[0]] if not required_input: if return_attention_mask: encoded_inputs["attention_mask"] = [] return encoded_inputs # If we have PyTorch/TF/NumPy tensors/arrays as inputs, we cast them as python objects # and rebuild them afterwards if no return_tensors is specified # Note that we lose the specific device the tensor may be on for PyTorch first_element = required_input[0] if isinstance(first_element, (list, tuple)): # first_element might be an empty list/tuple in some edge cases so we grab the first non empty element. index = 0 while len(required_input[index]) == 0: index += 1 if index < len(required_input): first_element = required_input[index][0] # At this state, if `first_element` is still a list/tuple, it's an empty one so there is nothing to do. if not isinstance(first_element, (int, list, tuple)): if is_tf_tensor(first_element): return_tensors = "tf" if return_tensors is None else return_tensors elif is_torch_tensor(first_element): return_tensors = "pt" if return_tensors is None else return_tensors elif isinstance(first_element, np.ndarray): return_tensors = "np" if return_tensors is None else return_tensors else: raise ValueError( f"type of {first_element} unknown: {type(first_element)}. " "Should be one of a python, numpy, pytorch or tensorflow object." ) for key, value in encoded_inputs.items(): encoded_inputs[key] = to_py_obj(value) # Convert padding_strategy in PaddingStrategy padding_strategy, _, max_length, _ = self._get_padding_truncation_strategies( padding=padding, max_length=max_length, verbose=verbose ) if max_entity_length is None: max_entity_length = self.max_entity_length required_input = encoded_inputs[self.model_input_names[0]] if required_input and not isinstance(required_input[0], (list, tuple)): encoded_inputs = self._pad( encoded_inputs, max_length=max_length, max_entity_length=max_entity_length, padding_strategy=padding_strategy, pad_to_multiple_of=pad_to_multiple_of, return_attention_mask=return_attention_mask, ) return BatchEncoding(encoded_inputs, tensor_type=return_tensors) batch_size = len(required_input) if any(len(v) != batch_size for v in encoded_inputs.values()): raise ValueError("Some items in the output dictionary have a different batch size than others.") if padding_strategy == PaddingStrategy.LONGEST: max_length = max(len(inputs) for inputs in required_input) max_entity_length = ( max(len(inputs) for inputs in encoded_inputs["entity_ids"]) if "entity_ids" in encoded_inputs else 0 ) padding_strategy = PaddingStrategy.MAX_LENGTH batch_outputs = {} for i in range(batch_size): inputs = {k: v[i] for k, v in encoded_inputs.items()} outputs = self._pad( inputs, max_length=max_length, max_entity_length=max_entity_length, padding_strategy=padding_strategy, pad_to_multiple_of=pad_to_multiple_of, return_attention_mask=return_attention_mask, ) for key, value in outputs.items(): if key not in batch_outputs: batch_outputs[key] = [] batch_outputs[key].append(value) return BatchEncoding(batch_outputs, tensor_type=return_tensors) def _pad( self, encoded_inputs: Union[Dict[str, EncodedInput], BatchEncoding], max_length: Optional[int] = None, max_entity_length: Optional[int] = None, padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD, pad_to_multiple_of: Optional[int] = None, return_attention_mask: Optional[bool] = None, ) -> dict: """ Pad encoded inputs (on left/right and up to predefined length or max length in the batch) Args: encoded_inputs: Dictionary of tokenized inputs (`List[int]`) or batch of tokenized inputs (`List[List[int]]`). max_length: maximum length of the returned list and optionally padding length (see below). Will truncate by taking into account the special tokens. max_entity_length: The maximum length of the entity sequence. padding_strategy: PaddingStrategy to use for padding. - PaddingStrategy.LONGEST Pad to the longest sequence in the batch - PaddingStrategy.MAX_LENGTH: Pad to the max length (default) - PaddingStrategy.DO_NOT_PAD: Do not pad The tokenizer padding sides are defined in self.padding_side: - 'left': pads on the left of the sequences - 'right': pads on the right of the sequences pad_to_multiple_of: (optional) Integer if set will pad the sequence to a multiple of the provided value. This is especially useful to enable the use of Tensor Core on NVIDIA hardware with compute capability `>= 7.5` (Volta). return_attention_mask: (optional) Set to False to avoid returning attention mask (default: set to model specifics) """ entities_provided = bool("entity_ids" in encoded_inputs) # Load from model defaults if return_attention_mask is None: return_attention_mask = "attention_mask" in self.model_input_names if padding_strategy == PaddingStrategy.LONGEST: max_length = len(encoded_inputs["input_ids"]) if entities_provided: max_entity_length = len(encoded_inputs["entity_ids"]) if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0): max_length = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of if ( entities_provided and max_entity_length is not None and pad_to_multiple_of is not None and (max_entity_length % pad_to_multiple_of != 0) ): max_entity_length = ((max_entity_length // pad_to_multiple_of) + 1) * pad_to_multiple_of needs_to_be_padded = padding_strategy != PaddingStrategy.DO_NOT_PAD and ( len(encoded_inputs["input_ids"]) != max_length or (entities_provided and len(encoded_inputs["entity_ids"]) != max_entity_length) ) # Initialize attention mask if not present. if return_attention_mask and "attention_mask" not in encoded_inputs: encoded_inputs["attention_mask"] = [1] * len(encoded_inputs["input_ids"]) if entities_provided and return_attention_mask and "entity_attention_mask" not in encoded_inputs: encoded_inputs["entity_attention_mask"] = [1] * len(encoded_inputs["entity_ids"]) if needs_to_be_padded: difference = max_length - len(encoded_inputs["input_ids"]) if entities_provided: entity_difference = max_entity_length - len(encoded_inputs["entity_ids"]) if self.padding_side == "right": if return_attention_mask: encoded_inputs["attention_mask"] = encoded_inputs["attention_mask"] + [0] * difference if entities_provided: encoded_inputs["entity_attention_mask"] = ( encoded_inputs["entity_attention_mask"] + [0] * entity_difference ) if "token_type_ids" in encoded_inputs: encoded_inputs["token_type_ids"] = encoded_inputs["token_type_ids"] + [0] * difference if entities_provided: encoded_inputs["entity_token_type_ids"] = ( encoded_inputs["entity_token_type_ids"] + [0] * entity_difference ) if "special_tokens_mask" in encoded_inputs: encoded_inputs["special_tokens_mask"] = encoded_inputs["special_tokens_mask"] + [1] * difference encoded_inputs["input_ids"] = encoded_inputs["input_ids"] + [self.pad_token_id] * difference if entities_provided: encoded_inputs["entity_ids"] = ( encoded_inputs["entity_ids"] + [self.entity_pad_token_id] * entity_difference ) encoded_inputs["entity_position_ids"] = ( encoded_inputs["entity_position_ids"] + [[-1] * self.max_mention_length] * entity_difference ) if self.task == "entity_span_classification": encoded_inputs["entity_start_positions"] = ( encoded_inputs["entity_start_positions"] + [0] * entity_difference ) encoded_inputs["entity_end_positions"] = ( encoded_inputs["entity_end_positions"] + [0] * entity_difference ) elif self.padding_side == "left": if return_attention_mask: encoded_inputs["attention_mask"] = [0] * difference + encoded_inputs["attention_mask"] if entities_provided: encoded_inputs["entity_attention_mask"] = [0] * entity_difference + encoded_inputs[ "entity_attention_mask" ] if "token_type_ids" in encoded_inputs: encoded_inputs["token_type_ids"] = [0] * difference + encoded_inputs["token_type_ids"] if entities_provided: encoded_inputs["entity_token_type_ids"] = [0] * entity_difference + encoded_inputs[ "entity_token_type_ids" ] if "special_tokens_mask" in encoded_inputs: encoded_inputs["special_tokens_mask"] = [1] * difference + encoded_inputs["special_tokens_mask"] encoded_inputs["input_ids"] = [self.pad_token_id] * difference + encoded_inputs["input_ids"] if entities_provided: encoded_inputs["entity_ids"] = [self.entity_pad_token_id] * entity_difference + encoded_inputs[ "entity_ids" ] encoded_inputs["entity_position_ids"] = [ [-1] * self.max_mention_length ] * entity_difference + encoded_inputs["entity_position_ids"] if self.task == "entity_span_classification": encoded_inputs["entity_start_positions"] = [0] * entity_difference + encoded_inputs[ "entity_start_positions" ] encoded_inputs["entity_end_positions"] = [0] * entity_difference + encoded_inputs[ "entity_end_positions" ] else: raise ValueError("Invalid padding strategy:" + str(self.padding_side)) return encoded_inputs def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: if not os.path.isdir(save_directory): logger.error(f"Vocabulary path ({save_directory}) should be a directory") return vocab_file = os.path.join( save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) merge_file = os.path.join( save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] ) with open(vocab_file, "w", encoding="utf-8") as f: f.write(json.dumps(self.encoder, indent=2, sort_keys=True, ensure_ascii=False) + "\n") index = 0 with open(merge_file, "w", encoding="utf-8") as writer: writer.write("#version: 0.2\n") for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda kv: kv[1]): if index != token_index: logger.warning( f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive." " Please check that the tokenizer is not corrupted!" ) index = token_index writer.write(" ".join(bpe_tokens) + "\n") index += 1 entity_vocab_file = os.path.join( save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["entity_vocab_file"] ) with open(entity_vocab_file, "w", encoding="utf-8") as f: f.write(json.dumps(self.entity_vocab, indent=2, sort_keys=True, ensure_ascii=False) + "\n") return vocab_file, merge_file, entity_vocab_file
0
hf_public_repos/transformers/src/transformers/models
hf_public_repos/transformers/src/transformers/models/luke/convert_luke_original_pytorch_checkpoint_to_pytorch.py
# coding=utf-8 # Copyright 2020 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Convert LUKE checkpoint.""" import argparse import json import os import torch from transformers import LukeConfig, LukeModel, LukeTokenizer, RobertaTokenizer from transformers.tokenization_utils_base import AddedToken @torch.no_grad() def convert_luke_checkpoint(checkpoint_path, metadata_path, entity_vocab_path, pytorch_dump_folder_path, model_size): # Load configuration defined in the metadata file with open(metadata_path) as metadata_file: metadata = json.load(metadata_file) config = LukeConfig(use_entity_aware_attention=True, **metadata["model_config"]) # Load in the weights from the checkpoint_path state_dict = torch.load(checkpoint_path, map_location="cpu") # Load the entity vocab file entity_vocab = load_entity_vocab(entity_vocab_path) tokenizer = RobertaTokenizer.from_pretrained(metadata["model_config"]["bert_model_name"]) # Add special tokens to the token vocabulary for downstream tasks entity_token_1 = AddedToken("<ent>", lstrip=False, rstrip=False) entity_token_2 = AddedToken("<ent2>", lstrip=False, rstrip=False) tokenizer.add_special_tokens({"additional_special_tokens": [entity_token_1, entity_token_2]}) config.vocab_size += 2 print(f"Saving tokenizer to {pytorch_dump_folder_path}") tokenizer.save_pretrained(pytorch_dump_folder_path) with open(os.path.join(pytorch_dump_folder_path, LukeTokenizer.vocab_files_names["entity_vocab_file"]), "w") as f: json.dump(entity_vocab, f) tokenizer = LukeTokenizer.from_pretrained(pytorch_dump_folder_path) # Initialize the embeddings of the special tokens word_emb = state_dict["embeddings.word_embeddings.weight"] ent_emb = word_emb[tokenizer.convert_tokens_to_ids(["@"])[0]].unsqueeze(0) ent2_emb = word_emb[tokenizer.convert_tokens_to_ids(["#"])[0]].unsqueeze(0) state_dict["embeddings.word_embeddings.weight"] = torch.cat([word_emb, ent_emb, ent2_emb]) # Initialize the query layers of the entity-aware self-attention mechanism for layer_index in range(config.num_hidden_layers): for matrix_name in ["query.weight", "query.bias"]: prefix = f"encoder.layer.{layer_index}.attention.self." state_dict[prefix + "w2e_" + matrix_name] = state_dict[prefix + matrix_name] state_dict[prefix + "e2w_" + matrix_name] = state_dict[prefix + matrix_name] state_dict[prefix + "e2e_" + matrix_name] = state_dict[prefix + matrix_name] # Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks entity_emb = state_dict["entity_embeddings.entity_embeddings.weight"] entity_emb[entity_vocab["[MASK2]"]] = entity_emb[entity_vocab["[MASK]"]] model = LukeModel(config=config).eval() missing_keys, unexpected_keys = model.load_state_dict(state_dict, strict=False) if not (len(missing_keys) == 1 and missing_keys[0] == "embeddings.position_ids"): raise ValueError(f"Missing keys {', '.join(missing_keys)}. Expected only missing embeddings.position_ids") if not (all(key.startswith("entity_predictions") or key.startswith("lm_head") for key in unexpected_keys)): raise ValueError( "Unexpected keys" f" {', '.join([key for key in unexpected_keys if not (key.startswith('entity_predictions') or key.startswith('lm_head'))])}" ) # Check outputs tokenizer = LukeTokenizer.from_pretrained(pytorch_dump_folder_path, task="entity_classification") text = ( "Top seed Ana Ivanovic said on Thursday she could hardly believe her luck as a fortuitous netcord helped the" " new world number one avoid a humiliating second- round exit at Wimbledon ." ) span = (39, 42) encoding = tokenizer(text, entity_spans=[span], add_prefix_space=True, return_tensors="pt") outputs = model(**encoding) # Verify word hidden states if model_size == "large": expected_shape = torch.Size((1, 42, 1024)) expected_slice = torch.tensor( [[0.0133, 0.0865, 0.0095], [0.3093, -0.2576, -0.7418], [-0.1720, -0.2117, -0.2869]] ) else: # base expected_shape = torch.Size((1, 42, 768)) expected_slice = torch.tensor([[0.0037, 0.1368, -0.0091], [0.1099, 0.3329, -0.1095], [0.0765, 0.5335, 0.1179]]) if not (outputs.last_hidden_state.shape == expected_shape): raise ValueError( f"Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}" ) if not torch.allclose(outputs.last_hidden_state[0, :3, :3], expected_slice, atol=1e-4): raise ValueError # Verify entity hidden states if model_size == "large": expected_shape = torch.Size((1, 1, 1024)) expected_slice = torch.tensor([[0.0466, -0.0106, -0.0179]]) else: # base expected_shape = torch.Size((1, 1, 768)) expected_slice = torch.tensor([[0.1457, 0.1044, 0.0174]]) if not (outputs.entity_last_hidden_state.shape != expected_shape): raise ValueError( f"Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is" f" {expected_shape}" ) if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3], expected_slice, atol=1e-4): raise ValueError # Finally, save our PyTorch model and tokenizer print("Saving PyTorch model to {}".format(pytorch_dump_folder_path)) model.save_pretrained(pytorch_dump_folder_path) def load_entity_vocab(entity_vocab_path): entity_vocab = {} with open(entity_vocab_path, "r", encoding="utf-8") as f: for index, line in enumerate(f): title, _ = line.rstrip().split("\t") entity_vocab[title] = index return entity_vocab if __name__ == "__main__": parser = argparse.ArgumentParser() # Required parameters parser.add_argument("--checkpoint_path", type=str, help="Path to a pytorch_model.bin file.") parser.add_argument( "--metadata_path", default=None, type=str, help="Path to a metadata.json file, defining the configuration." ) parser.add_argument( "--entity_vocab_path", default=None, type=str, help="Path to an entity_vocab.tsv file, containing the entity vocabulary.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to where to dump the output PyTorch model." ) parser.add_argument( "--model_size", default="base", type=str, choices=["base", "large"], help="Size of the model to be converted." ) args = parser.parse_args() convert_luke_checkpoint( args.checkpoint_path, args.metadata_path, args.entity_vocab_path, args.pytorch_dump_folder_path, args.model_size, )
0
hf_public_repos/transformers/src/transformers/models
hf_public_repos/transformers/src/transformers/models/luke/__init__.py
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _import_structure = { "configuration_luke": ["LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP", "LukeConfig"], "tokenization_luke": ["LukeTokenizer"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["modeling_luke"] = [ "LUKE_PRETRAINED_MODEL_ARCHIVE_LIST", "LukeForEntityClassification", "LukeForEntityPairClassification", "LukeForEntitySpanClassification", "LukeForMultipleChoice", "LukeForQuestionAnswering", "LukeForSequenceClassification", "LukeForTokenClassification", "LukeForMaskedLM", "LukeModel", "LukePreTrainedModel", ] if TYPE_CHECKING: from .configuration_luke import LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP, LukeConfig from .tokenization_luke import LukeTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_luke import ( LUKE_PRETRAINED_MODEL_ARCHIVE_LIST, LukeForEntityClassification, LukeForEntityPairClassification, LukeForEntitySpanClassification, LukeForMaskedLM, LukeForMultipleChoice, LukeForQuestionAnswering, LukeForSequenceClassification, LukeForTokenClassification, LukeModel, LukePreTrainedModel, ) else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
0
hf_public_repos/transformers/src/transformers/models
hf_public_repos/transformers/src/transformers/models/luke/configuration_luke.py
# coding=utf-8 # Copyright Studio Ousia and The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ LUKE configuration""" from ...configuration_utils import PretrainedConfig from ...utils import logging logger = logging.get_logger(__name__) LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP = { "studio-ousia/luke-base": "https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json", "studio-ousia/luke-large": "https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json", } class LukeConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`LukeModel`]. It is used to instantiate a LUKE model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the LUKE [studio-ousia/luke-base](https://huggingface.co/studio-ousia/luke-base) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 50267): Vocabulary size of the LUKE model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`LukeModel`]. entity_vocab_size (`int`, *optional*, defaults to 500000): Entity vocabulary size of the LUKE model. Defines the number of different entities that can be represented by the `entity_ids` passed when calling [`LukeModel`]. hidden_size (`int`, *optional*, defaults to 768): Dimensionality of the encoder layers and the pooler layer. entity_emb_size (`int`, *optional*, defaults to 256): The number of dimensions of the entity embedding. num_hidden_layers (`int`, *optional*, defaults to 12): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 12): Number of attention heads for each attention layer in the Transformer encoder. intermediate_size (`int`, *optional*, defaults to 3072): Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder. hidden_act (`str` or `Callable`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"silu"` and `"gelu_new"` are supported. hidden_dropout_prob (`float`, *optional*, defaults to 0.1): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1): The dropout ratio for the attention probabilities. max_position_embeddings (`int`, *optional*, defaults to 512): The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). type_vocab_size (`int`, *optional*, defaults to 2): The vocabulary size of the `token_type_ids` passed when calling [`LukeModel`]. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. layer_norm_eps (`float`, *optional*, defaults to 1e-12): The epsilon used by the layer normalization layers. use_entity_aware_attention (`bool`, *optional*, defaults to `True`): Whether or not the model should use the entity-aware self-attention mechanism proposed in [LUKE: Deep Contextualized Entity Representations with Entity-aware Self-attention (Yamada et al.)](https://arxiv.org/abs/2010.01057). classifier_dropout (`float`, *optional*): The dropout ratio for the classification head. pad_token_id (`int`, *optional*, defaults to 1): Padding token id. bos_token_id (`int`, *optional*, defaults to 0): Beginning of stream token id. eos_token_id (`int`, *optional*, defaults to 2): End of stream token id. Examples: ```python >>> from transformers import LukeConfig, LukeModel >>> # Initializing a LUKE configuration >>> configuration = LukeConfig() >>> # Initializing a model from the configuration >>> model = LukeModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "luke" def __init__( self, vocab_size=50267, entity_vocab_size=500000, hidden_size=768, entity_emb_size=256, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=2, initializer_range=0.02, layer_norm_eps=1e-12, use_entity_aware_attention=True, classifier_dropout=None, pad_token_id=1, bos_token_id=0, eos_token_id=2, **kwargs, ): """Constructs LukeConfig.""" super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs) self.vocab_size = vocab_size self.entity_vocab_size = entity_vocab_size self.hidden_size = hidden_size self.entity_emb_size = entity_emb_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.hidden_act = hidden_act self.intermediate_size = intermediate_size self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.initializer_range = initializer_range self.layer_norm_eps = layer_norm_eps self.use_entity_aware_attention = use_entity_aware_attention self.classifier_dropout = classifier_dropout
0
hf_public_repos/transformers/src/transformers/models
hf_public_repos/transformers/src/transformers/models/mixtral/convert_mixtral_weights_to_hf.py
# Copyright 2023 Mistral AI and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import json import os import torch from transformers import ( MixtralConfig, MixtralForCausalLM, ) """ Sample usage: ``` python src/transformers/models/mixtral/convert_mixtral_weights_to_hf.py \ --input_dir /path/to/downloaded/mixtral/weights --model_size 7B --output_dir /output/path ``` Thereafter, models can be loaded via: ```py from transformers import MixtralForCausalLM model = MixtralForCausalLM.from_pretrained("/output/path") ``` Important note: you need to be able to host the whole model in RAM to execute this script (even if the biggest versions come in several checkpoints they each contain a part of each weight of the model, so we need to load them all in RAM). """ def compute_intermediate_size(n, ffn_dim_multiplier=1, multiple_of=256): return multiple_of * ((int(ffn_dim_multiplier * int(8 * n / 3)) + multiple_of - 1) // multiple_of) def read_json(path): with open(path, "r") as f: return json.load(f) def write_json(text, path): with open(path, "w") as f: json.dump(text, f) def write_model(model_path, input_base_path, model_size, safe_serialization=True): os.makedirs(model_path, exist_ok=True) params = read_json(os.path.join(input_base_path, "params.json")) num_shards = 1 # For some reason this is a string in the params.json sliding_window = int(params["sliding_window"]) if "sliding_window" in params else None n_layers = params["num_hidden_layers"] n_heads = params["num_attention_heads"] n_heads_per_shard = n_heads // num_shards dim = params["hidden_size"] dims_per_head = dim // n_heads base = params.get("rope_theta", 10000.0) max_position_embeddings = 4096 * 8 num_local_experts = params["num_local_experts"] ffn_dim = params["intermediate_size"] vocab_size = params["vocab_size"] if "num_key_value_heads" in params: num_key_value_heads = params["num_key_value_heads"] # for GQA / MQA num_local_key_value_heads = num_key_value_heads // num_shards key_value_dim = dims_per_head * num_local_key_value_heads else: # compatibility with other checkpoints num_key_value_heads = n_heads num_local_key_value_heads = n_heads_per_shard key_value_dim = dim # permute for sliced rotary def permute(w, n_heads=n_heads, dim1=dim, dim2=dim): return w.view(n_heads, dim1 // n_heads // 2, 2, dim2).transpose(1, 2).reshape(dim1, dim2) print(f"Fetching all parameters from the checkpoint at {input_base_path}.") # Load weights loaded = [ torch.load(os.path.join(input_base_path, f"consolidated.{i:02d}.pt"), map_location="cpu") for i in range(8) ] merged_state_dict = {} for state_dict in loaded: merged_state_dict.update(state_dict) state_dict = {} for layer_i in range(n_layers): # Sharded # Note that attention.w{q,k,v,o}, feed_fordward.w[1,2,3], attention_norm.weight and ffn_norm.weight share # the same storage object, saving attention_norm and ffn_norm will save other weights too, which is # redundant as other weights will be stitched from multiple shards. To avoid that, they are cloned. state_dict.update( { f"model.layers.{layer_i}.input_layernorm.weight": merged_state_dict[ f"layers.{layer_i}.attention_norm.weight" ].clone(), f"model.layers.{layer_i}.post_attention_layernorm.weight": merged_state_dict[ f"layers.{layer_i}.ffn_norm.weight" ].clone(), } ) state_dict[f"model.layers.{layer_i}.self_attn.q_proj.weight"] = permute( merged_state_dict[f"layers.{layer_i}.attention.wq.weight"] .view(n_heads_per_shard, dims_per_head, dim) .reshape(dim, dim) ) state_dict[f"model.layers.{layer_i}.self_attn.k_proj.weight"] = permute( merged_state_dict[f"layers.{layer_i}.attention.wk.weight"] .view(num_local_key_value_heads, dims_per_head, dim) .reshape(key_value_dim, dim), num_key_value_heads, key_value_dim, dim, ) state_dict[f"model.layers.{layer_i}.self_attn.v_proj.weight"] = ( merged_state_dict[f"layers.{layer_i}.attention.wv.weight"] .view(num_local_key_value_heads, dims_per_head, dim) .reshape(key_value_dim, dim) ) state_dict[f"model.layers.{layer_i}.self_attn.o_proj.weight"] = merged_state_dict[ f"layers.{layer_i}.attention.wo.weight" ] w1 = merged_state_dict[f"layers.{layer_i}.block_sparse_moe.w1"] w2 = merged_state_dict[f"layers.{layer_i}.block_sparse_moe.w2"] w3 = merged_state_dict[f"layers.{layer_i}.block_sparse_moe.w3"] experts_w1 = [ w1[ffn_dim * expert_idx : ffn_dim * (expert_idx + 1), :].contiguous().clone() for expert_idx in range(num_local_experts) ] for idx, expert_block in enumerate(experts_w1): expert_key = f"model.layers.{layer_i}.block_sparse_moe.experts.{idx}.w1" state_dict[expert_key + ".weight"] = expert_block.clone() experts_w2 = [ w2[ffn_dim * expert_idx : ffn_dim * (expert_idx + 1), :].contiguous().clone() for expert_idx in range(num_local_experts) ] for idx, expert_block in enumerate(experts_w2): expert_key = f"model.layers.{layer_i}.block_sparse_moe.experts.{idx}.w2" state_dict[expert_key + ".weight"] = expert_block.T.clone().contiguous() experts_w3 = [ w3[ffn_dim * expert_idx : ffn_dim * (expert_idx + 1), :].contiguous().clone() for expert_idx in range(num_local_experts) ] for idx, expert_block in enumerate(experts_w3): expert_key = f"model.layers.{layer_i}.block_sparse_moe.experts.{idx}.w3" state_dict[expert_key + ".weight"] = expert_block.clone() state_dict[f"model.layers.{layer_i}.block_sparse_moe.gate.weight"] = merged_state_dict[ f"layers.{layer_i}.block_sparse_moe.gate.weight" ] state_dict.update( { "model.norm.weight": merged_state_dict["norm.weight"], "model.embed_tokens.weight": merged_state_dict["tok_embeddings.weight"], "lm_head.weight": merged_state_dict["output.weight"], } ) config = MixtralConfig( hidden_size=dim, intermediate_size=ffn_dim, num_attention_heads=params["num_attention_heads"], num_hidden_layers=params["num_hidden_layers"], rms_norm_eps=params["rms_norm_eps"], num_key_value_heads=num_key_value_heads, vocab_size=vocab_size, rope_theta=base, max_position_embeddings=max_position_embeddings, sliding_window=sliding_window, num_local_experts=num_local_experts, ) print("Loading the checkpoint in a Mixtral model.") with torch.device("meta"): model = MixtralForCausalLM(config) # Avoid saving this as part of the config. del model.config._name_or_path model.config.torch_dtype = torch.float16 print("Saving in the Transformers format.") model.load_state_dict(state_dict, strict=True, assign=True) for n, p in model.named_parameters(): assert p.device.type != "meta", f"{n} has not been loaded!" model.save_pretrained(model_path, safe_serialization=safe_serialization) def main(): parser = argparse.ArgumentParser() parser.add_argument( "--input_dir", help="Location of Mixtral weights, which contains tokenizer.model and model folders", required=True, ) parser.add_argument( "--model_size", choices=["7B"], help="'f' models correspond to the finetuned versions, and are specific to the Mixtral official release. For more details on Mixtral, checkout the original repo: https://huggingface.co/mistral-ai", default="7B", ) parser.add_argument("--output_dir", help="Location to write HF model", required=True) parser.add_argument("--safe_serialization", type=bool, help="Whether or not to save using `safetensors`.") args = parser.parse_args() write_model( model_path=args.output_dir, input_base_path=args.input_dir, model_size=args.model_size, safe_serialization=args.safe_serialization, ) if __name__ == "__main__": main()
0
hf_public_repos/transformers/src/transformers/models
hf_public_repos/transformers/src/transformers/models/mixtral/__init__.py
# Copyright 2023 Mixtral AI and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) _import_structure = { "configuration_mixtral": ["MIXTRAL_PRETRAINED_CONFIG_ARCHIVE_MAP", "MixtralConfig"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["modeling_mixtral"] = [ "MixtralForCausalLM", "MixtralModel", "MixtralPreTrainedModel", "MixtralForSequenceClassification", ] if TYPE_CHECKING: from .configuration_mixtral import MIXTRAL_PRETRAINED_CONFIG_ARCHIVE_MAP, MixtralConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mixtral import ( MixtralForCausalLM, MixtralForSequenceClassification, MixtralModel, MixtralPreTrainedModel, ) else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
0
hf_public_repos/transformers/src/transformers/models
hf_public_repos/transformers/src/transformers/models/mixtral/configuration_mixtral.py
# coding=utf-8 # Copyright 2023 Mixtral AI and the HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Mixtral model configuration""" from ...configuration_utils import PretrainedConfig from ...utils import logging logger = logging.get_logger(__name__) MIXTRAL_PRETRAINED_CONFIG_ARCHIVE_MAP = { "mistral-ai/Mixtral-8x7B": "https://huggingface.co/mistral-ai/Mixtral-8x7B/resolve/main/config.json", } class MixtralConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`MixtralModel`]. It is used to instantiate an Mixtral model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the Mixtral-7B-v0.1 or Mixtral-7B-Instruct-v0.1. [mixtralai/Mixtral-8x7B](https://huggingface.co/mixtralai/Mixtral-8x7B) [mixtralai/Mixtral-7B-Instruct-v0.1](https://huggingface.co/mixtralai/Mixtral-7B-Instruct-v0.1) Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 32000): Vocabulary size of the Mixtral model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`MixtralModel`] hidden_size (`int`, *optional*, defaults to 4096): Dimension of the hidden representations. intermediate_size (`int`, *optional*, defaults to 14336): Dimension of the MLP representations. num_hidden_layers (`int`, *optional*, defaults to 32): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 32): Number of attention heads for each attention layer in the Transformer encoder. num_key_value_heads (`int`, *optional*, defaults to 8): This is the number of key_value heads that should be used to implement Grouped Query Attention. If `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if `num_key_value_heads=1 the model will use Multi Query Attention (MQA) otherwise GQA is used. When converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed by meanpooling all the original heads within that group. For more details checkout [this paper](https://arxiv.org/pdf/2305.13245.pdf). If it is not specified, will default to `8`. hidden_act (`str` or `function`, *optional*, defaults to `"silu"`): The non-linear activation function (function or string) in the decoder. max_position_embeddings (`int`, *optional*, defaults to `4096*32`): The maximum sequence length that this model might ever be used with. Mixtral's sliding window attention allows sequence of up to 4096*32 tokens. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. rms_norm_eps (`float`, *optional*, defaults to 1e-05): The epsilon used by the rms normalization layers. use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). Only relevant if `config.is_decoder=True`. pad_token_id (`int`, *optional*): The id of the padding token. bos_token_id (`int`, *optional*, defaults to 1): The id of the "beginning-of-sequence" token. eos_token_id (`int`, *optional*, defaults to 2): The id of the "end-of-sequence" token. tie_word_embeddings (`bool`, *optional*, defaults to `False`): Whether the model's input and output word embeddings should be tied. rope_theta (`float`, *optional*, defaults to 1000000.0): The base period of the RoPE embeddings. sliding_window (`int`, *optional*): Sliding window attention window size. If not specified, will default to `4096`. attention_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. num_experts_per_tok (`int`, *optional*, defaults to 2): The number of experts to root per-token, can be also interpreted as the `top-p` routing parameter num_local_experts (`int`, *optional*, defaults to 8): Number of experts per Sparse MLP layer. output_router_logits (`bool`, *optional*, defaults to `False`): Whether or not the router logits should be returned by the model. Enabeling this will also allow the model to output the auxiliary loss. See [here]() for more details router_aux_loss_coef (`float`, *optional*, defaults to 0.001): The aux loss factor for the total loss. ```python >>> from transformers import MixtralModel, MixtralConfig >>> # Initializing a Mixtral 7B style configuration >>> configuration = MixtralConfig() >>> # Initializing a model from the Mixtral 7B style configuration >>> model = MixtralModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "mixtral" keys_to_ignore_at_inference = ["past_key_values"] def __init__( self, vocab_size=32000, hidden_size=4096, intermediate_size=14336, num_hidden_layers=32, num_attention_heads=32, num_key_value_heads=8, hidden_act="silu", max_position_embeddings=4096 * 32, initializer_range=0.02, rms_norm_eps=1e-5, use_cache=True, pad_token_id=None, bos_token_id=1, eos_token_id=2, tie_word_embeddings=False, rope_theta=1e6, sliding_window=None, attention_dropout=0.0, num_experts_per_tok=2, num_local_experts=8, output_router_logits=False, router_aux_loss_coef=0.001, **kwargs, ): self.vocab_size = vocab_size self.max_position_embeddings = max_position_embeddings self.hidden_size = hidden_size self.intermediate_size = intermediate_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.sliding_window = sliding_window # for backward compatibility if num_key_value_heads is None: num_key_value_heads = num_attention_heads self.num_key_value_heads = num_key_value_heads self.hidden_act = hidden_act self.initializer_range = initializer_range self.rms_norm_eps = rms_norm_eps self.use_cache = use_cache self.rope_theta = rope_theta self.attention_dropout = attention_dropout self.num_experts_per_tok = num_experts_per_tok self.num_local_experts = num_local_experts self.output_router_logits = output_router_logits self.router_aux_loss_coef = router_aux_loss_coef super().__init__( pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, tie_word_embeddings=tie_word_embeddings, **kwargs, )
0
hf_public_repos/transformers/src/transformers/models
hf_public_repos/transformers/src/transformers/models/mixtral/modeling_mixtral.py
# coding=utf-8 # Copyright 2023 Mistral AI and the HuggingFace Inc. team. All rights reserved. # # This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX # and OPT implementations in this library. It has been modified from its # original forms to accommodate minor architectural differences compared # to GPT-NeoX and OPT used by the Meta AI team that trained the model. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ PyTorch Mixtral model.""" import inspect import math import warnings from typing import List, Optional, Tuple, Union import torch import torch.nn.functional as F import torch.utils.checkpoint from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACT2FN from ...cache_utils import Cache, DynamicCache from ...modeling_attn_mask_utils import ( _prepare_4d_causal_attention_mask, _prepare_4d_causal_attention_mask_for_sdpa, ) from ...modeling_outputs import ( MoeCausalLMOutputWithPast, MoeModelOutputWithPast, SequenceClassifierOutputWithPast, ) from ...modeling_utils import PreTrainedModel from ...pytorch_utils import is_torch_greater_or_equal_than_1_13 from ...utils import ( add_start_docstrings, add_start_docstrings_to_model_forward, is_flash_attn_2_available, is_flash_attn_greater_or_equal_2_10, logging, replace_return_docstrings, ) from ...utils.import_utils import is_torch_fx_available from .configuration_mixtral import MixtralConfig if is_flash_attn_2_available(): from flash_attn import flash_attn_func, flash_attn_varlen_func from flash_attn.bert_padding import index_first_axis, pad_input, unpad_input # noqa _flash_supports_window_size = "window_size" in list(inspect.signature(flash_attn_func).parameters) # This makes `_prepare_4d_causal_attention_mask` a leaf function in the FX graph. # It means that the function will not be traced through and simply appear as a node in the graph. if is_torch_fx_available(): if not is_torch_greater_or_equal_than_1_13: import torch.fx _prepare_4d_causal_attention_mask = torch.fx.wrap(_prepare_4d_causal_attention_mask) logger = logging.get_logger(__name__) _CONFIG_FOR_DOC = "MixtralConfig" def load_balancing_loss_func(gate_logits: torch.Tensor, num_experts: torch.Tensor = None, top_k=2) -> float: r""" Computes auxiliary load balancing loss as in Switch Transformer - implemented in Pytorch. See Switch Transformer (https://arxiv.org/abs/2101.03961) for more details. This function implements the loss function presented in equations (4) - (6) of the paper. It aims at penalizing cases where the routing between experts is too unbalanced. Args: gate_logits (Union[`torch.Tensor`, Tuple[torch.Tensor]): Logits from the `gate`, should be a tuple of model.config.num_hidden_layers tensors of shape [batch_size X sequence_length, num_experts]. num_experts (`int`, *optional*): Number of experts Returns: The auxiliary loss. """ if gate_logits is None or not isinstance(gate_logits, tuple): return 0 if isinstance(gate_logits, tuple): compute_device = gate_logits[0].device concatenated_gate_logits = torch.cat([layer_gate.to(compute_device) for layer_gate in gate_logits], dim=0) routing_weights = torch.nn.functional.softmax(concatenated_gate_logits, dim=-1) _, selected_experts = torch.topk(routing_weights, top_k, dim=-1) expert_mask = torch.nn.functional.one_hot(selected_experts, num_experts) # Compute the percentage of tokens routed to each experts tokens_per_expert = torch.mean(expert_mask.float(), dim=0) # Compute the average probability of routing to these experts router_prob_per_expert = torch.mean(routing_weights, dim=0) overall_loss = torch.sum(tokens_per_expert * router_prob_per_expert.unsqueeze(0)) return overall_loss * num_experts # Copied from transformers.models.llama.modeling_llama._get_unpad_data def _get_unpad_data(attention_mask): seqlens_in_batch = attention_mask.sum(dim=-1, dtype=torch.int32) indices = torch.nonzero(attention_mask.flatten(), as_tuple=False).flatten() max_seqlen_in_batch = seqlens_in_batch.max().item() cu_seqlens = F.pad(torch.cumsum(seqlens_in_batch, dim=0, dtype=torch.torch.int32), (1, 0)) return ( indices, cu_seqlens, max_seqlen_in_batch, ) # Copied from transformers.models.llama.modeling_llama.LlamaRMSNorm with Llama->Mixtral class MixtralRMSNorm(nn.Module): def __init__(self, hidden_size, eps=1e-6): """ MixtralRMSNorm is equivalent to T5LayerNorm """ super().__init__() self.weight = nn.Parameter(torch.ones(hidden_size)) self.variance_epsilon = eps def forward(self, hidden_states): input_dtype = hidden_states.dtype hidden_states = hidden_states.to(torch.float32) variance = hidden_states.pow(2).mean(-1, keepdim=True) hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon) return self.weight * hidden_states.to(input_dtype) # Copied from transformers.models.llama.modeling_llama.LlamaRotaryEmbedding with Llama->Mixtral class MixtralRotaryEmbedding(nn.Module): def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None): super().__init__() self.dim = dim self.max_position_embeddings = max_position_embeddings self.base = base inv_freq = 1.0 / (self.base ** (torch.arange(0, self.dim, 2).float().to(device) / self.dim)) self.register_buffer("inv_freq", inv_freq, persistent=False) # Build here to make `torch.jit.trace` work. self._set_cos_sin_cache( seq_len=max_position_embeddings, device=self.inv_freq.device, dtype=torch.get_default_dtype() ) def _set_cos_sin_cache(self, seq_len, device, dtype): self.max_seq_len_cached = seq_len t = torch.arange(self.max_seq_len_cached, device=device, dtype=self.inv_freq.dtype) freqs = torch.outer(t, self.inv_freq) # Different from paper, but it uses a different permutation in order to obtain the same calculation emb = torch.cat((freqs, freqs), dim=-1) self.register_buffer("cos_cached", emb.cos().to(dtype), persistent=False) self.register_buffer("sin_cached", emb.sin().to(dtype), persistent=False) def forward(self, x, seq_len=None): # x: [bs, num_attention_heads, seq_len, head_size] if seq_len > self.max_seq_len_cached: self._set_cos_sin_cache(seq_len=seq_len, device=x.device, dtype=x.dtype) return ( self.cos_cached[:seq_len].to(dtype=x.dtype), self.sin_cached[:seq_len].to(dtype=x.dtype), ) # Copied from transformers.models.llama.modeling_llama.rotate_half def rotate_half(x): """Rotates half the hidden dims of the input.""" x1 = x[..., : x.shape[-1] // 2] x2 = x[..., x.shape[-1] // 2 :] return torch.cat((-x2, x1), dim=-1) # Copied from transformers.models.llama.modeling_llama.apply_rotary_pos_emb def apply_rotary_pos_emb(q, k, cos, sin, position_ids, unsqueeze_dim=1): """Applies Rotary Position Embedding to the query and key tensors. Args: q (`torch.Tensor`): The query tensor. k (`torch.Tensor`): The key tensor. cos (`torch.Tensor`): The cosine part of the rotary embedding. sin (`torch.Tensor`): The sine part of the rotary embedding. position_ids (`torch.Tensor`): The position indices of the tokens corresponding to the query and key tensors. For example, this can be used to pass offsetted position ids when working with a KV-cache. unsqueeze_dim (`int`, *optional*, defaults to 1): The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2. Returns: `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding. """ cos = cos[position_ids].unsqueeze(unsqueeze_dim) sin = sin[position_ids].unsqueeze(unsqueeze_dim) q_embed = (q * cos) + (rotate_half(q) * sin) k_embed = (k * cos) + (rotate_half(k) * sin) return q_embed, k_embed # Copied from transformers.models.llama.modeling_llama.repeat_kv def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor: """ This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch, num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim) """ batch, num_key_value_heads, slen, head_dim = hidden_states.shape if n_rep == 1: return hidden_states hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim) return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim) # Copied from transformers.models.mistral.modeling_mistral.MistralAttention with Mistral->Mixtral class MixtralAttention(nn.Module): """ Multi-headed attention from 'Attention Is All You Need' paper. Modified to use sliding window attention: Longformer and "Generating Long Sequences with Sparse Transformers". """ def __init__(self, config: MixtralConfig, layer_idx: Optional[int] = None): super().__init__() self.config = config self.layer_idx = layer_idx if layer_idx is None: logger.warning_once( f"Instantiating {self.__class__.__name__} without passing a `layer_idx` is not recommended and will " "lead to errors during the forward call if caching is used. Please make sure to provide a `layer_idx` " "when creating this class." ) self.hidden_size = config.hidden_size self.num_heads = config.num_attention_heads self.head_dim = self.hidden_size // self.num_heads self.num_key_value_heads = config.num_key_value_heads self.num_key_value_groups = self.num_heads // self.num_key_value_heads self.max_position_embeddings = config.max_position_embeddings self.rope_theta = config.rope_theta self.is_causal = True self.attention_dropout = config.attention_dropout if (self.head_dim * self.num_heads) != self.hidden_size: raise ValueError( f"hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size}" f" and `num_heads`: {self.num_heads})." ) self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=False) self.k_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False) self.v_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False) self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=False) self.rotary_emb = MixtralRotaryEmbedding( self.head_dim, max_position_embeddings=self.max_position_embeddings, base=self.rope_theta, ) def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int): return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous() def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_value: Optional[Cache] = None, output_attentions: bool = False, use_cache: bool = False, **kwargs, ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: if "padding_mask" in kwargs: warnings.warn( "Passing `padding_mask` is deprecated and will be removed in v4.37. Please make sure use `attention_mask` instead.`" ) bsz, q_len, _ = hidden_states.size() query_states = self.q_proj(hidden_states) key_states = self.k_proj(hidden_states) value_states = self.v_proj(hidden_states) query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) kv_seq_len = key_states.shape[-2] if past_key_value is not None: if self.layer_idx is None: raise ValueError( f"The cache structure has changed since version v4.36. If you are using {self.__class__.__name__} " "for auto-regressive decoding with k/v caching, please make sure to initialize the attention class " "with a layer index." ) kv_seq_len += past_key_value.get_usable_length(kv_seq_len, self.layer_idx) cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len) query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids) if past_key_value is not None: cache_kwargs = {"sin": sin, "cos": cos} # Specific to RoPE models key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs) # repeat k/v heads if n_kv_heads < n_heads key_states = repeat_kv(key_states, self.num_key_value_groups) value_states = repeat_kv(value_states, self.num_key_value_groups) attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim) if attn_weights.size() != (bsz, self.num_heads, q_len, kv_seq_len): raise ValueError( f"Attention weights should be of size {(bsz, self.num_heads, q_len, kv_seq_len)}, but is" f" {attn_weights.size()}" ) if attention_mask is not None: if attention_mask.size() != (bsz, 1, q_len, kv_seq_len): raise ValueError( f"Attention mask should be of size {(bsz, 1, q_len, kv_seq_len)}, but is {attention_mask.size()}" ) attn_weights = attn_weights + attention_mask # upcast attention to fp32 attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype) attn_weights = nn.functional.dropout(attn_weights, p=self.attention_dropout, training=self.training) attn_output = torch.matmul(attn_weights, value_states) if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim): raise ValueError( f"`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is" f" {attn_output.size()}" ) attn_output = attn_output.transpose(1, 2).contiguous() attn_output = attn_output.reshape(bsz, q_len, self.hidden_size) attn_output = self.o_proj(attn_output) if not output_attentions: attn_weights = None return attn_output, attn_weights, past_key_value # Copied from transformers.models.mistral.modeling_mistral.MistralFlashAttention2 with Mistral->Mixtral class MixtralFlashAttention2(MixtralAttention): """ Mixtral flash attention module. This module inherits from `MixtralAttention` as the weights of the module stays untouched. The only required change would be on the forward pass where it needs to correctly call the public API of flash attention and deal with padding tokens in case the input contains any of them. """ # Copied from transformers.models.llama.modeling_llama.LlamaFlashAttention2.__init__ def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) # TODO: Should be removed once Flash Attention for RoCm is bumped to 2.1. # flash_attn<2.1 generates top-left aligned causal mask, while what is needed here is bottom-right alignement, that was made default for flash_attn>=2.1. This attribute is used to handle this difference. Reference: https://github.com/Dao-AILab/flash-attention/releases/tag/v2.1.0. # Beware that with flash_attn<2.1, using q_seqlen != k_seqlen (except for the case q_seqlen == 1) produces a wrong mask (top-left). self._flash_attn_uses_top_left_mask = not is_flash_attn_greater_or_equal_2_10() def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_value: Optional[Cache] = None, output_attentions: bool = False, use_cache: bool = False, **kwargs, ): if "padding_mask" in kwargs: warnings.warn( "Passing `padding_mask` is deprecated and will be removed in v4.37. Please make sure use `attention_mask` instead.`" ) # overwrite attention_mask with padding_mask attention_mask = kwargs.pop("padding_mask") bsz, q_len, _ = hidden_states.size() query_states = self.q_proj(hidden_states) key_states = self.k_proj(hidden_states) value_states = self.v_proj(hidden_states) query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) kv_seq_len = key_states.shape[-2] if past_key_value is not None: if self.layer_idx is None: raise ValueError( f"The cache structure has changed since version v4.36. If you are using {self.__class__.__name__} " "for auto-regressive decoding with k/v caching, please make sure to initialize the attention class " "with a layer index." ) kv_seq_len += past_key_value.get_usable_length(kv_seq_len, self.layer_idx) # Because the input can be padded, the absolute sequence length depends on the max position id. rotary_seq_len = max(kv_seq_len, position_ids[:, -1].max().item()) + 1 cos, sin = self.rotary_emb(value_states, seq_len=rotary_seq_len) query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids) use_sliding_windows = ( _flash_supports_window_size and getattr(self.config, "sliding_window", None) is not None and kv_seq_len > self.config.sliding_window ) if not _flash_supports_window_size: logger.warning_once( "The current flash attention version does not support sliding window attention, for a more memory efficient implementation" " make sure to upgrade flash-attn library." ) if past_key_value is not None: # Activate slicing cache only if the config has a value `sliding_windows` attribute cache_has_contents = past_key_value.get_seq_length(self.layer_idx) > 0 if ( getattr(self.config, "sliding_window", None) is not None and kv_seq_len > self.config.sliding_window and cache_has_contents ): slicing_tokens = 1 - self.config.sliding_window past_key = past_key_value[self.layer_idx][0] past_value = past_key_value[self.layer_idx][1] past_key = past_key[:, :, slicing_tokens:, :].contiguous() past_value = past_value[:, :, slicing_tokens:, :].contiguous() if past_key.shape[-2] != self.config.sliding_window - 1: raise ValueError( f"past key must have a shape of (`batch_size, num_heads, self.config.sliding_window-1, head_dim`), got" f" {past_key.shape}" ) if attention_mask is not None: attention_mask = attention_mask[:, slicing_tokens:] attention_mask = torch.cat([attention_mask, torch.ones_like(attention_mask[:, -1:])], dim=-1) cache_kwargs = {"sin": sin, "cos": cos} # Specific to RoPE models key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs) # repeat k/v heads if n_kv_heads < n_heads key_states = repeat_kv(key_states, self.num_key_value_groups) value_states = repeat_kv(value_states, self.num_key_value_groups) dropout_rate = 0.0 if not self.training else self.attention_dropout # In PEFT, usually we cast the layer norms in float32 for training stability reasons # therefore the input hidden states gets silently casted in float32. Hence, we need # cast them back in float16 just to be sure everything works as expected. input_dtype = query_states.dtype if input_dtype == torch.float32: if torch.is_autocast_enabled(): target_dtype = torch.get_autocast_gpu_dtype() # Handle the case where the model is quantized elif hasattr(self.config, "_pre_quantization_dtype"): target_dtype = self.config._pre_quantization_dtype else: target_dtype = self.q_proj.weight.dtype logger.warning_once( f"The input hidden states seems to be silently casted in float32, this might be related to" f" the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in" f" {target_dtype}." ) query_states = query_states.to(target_dtype) key_states = key_states.to(target_dtype) value_states = value_states.to(target_dtype) # Reashape to the expected shape for Flash Attention query_states = query_states.transpose(1, 2) key_states = key_states.transpose(1, 2) value_states = value_states.transpose(1, 2) attn_output = self._flash_attention_forward( query_states, key_states, value_states, attention_mask, q_len, dropout=dropout_rate, use_sliding_windows=use_sliding_windows, ) attn_output = attn_output.reshape(bsz, q_len, self.hidden_size).contiguous() attn_output = self.o_proj(attn_output) if not output_attentions: attn_weights = None return attn_output, attn_weights, past_key_value def _flash_attention_forward( self, query_states, key_states, value_states, attention_mask, query_length, dropout=0.0, softmax_scale=None, use_sliding_windows=False, ): """ Calls the forward method of Flash Attention - if the input hidden states contain at least one padding token first unpad the input, then computes the attention scores and pad the final attention scores. Args: query_states (`torch.Tensor`): Input query states to be passed to Flash Attention API key_states (`torch.Tensor`): Input key states to be passed to Flash Attention API value_states (`torch.Tensor`): Input value states to be passed to Flash Attention API attention_mask (`torch.Tensor`): The padding mask - corresponds to a tensor of size `(batch_size, seq_len)` where 0 stands for the position of padding tokens and 1 for the position of non-padding tokens. dropout (`int`, *optional*): Attention dropout softmax_scale (`float`, *optional*): The scaling of QK^T before applying softmax. Default to 1 / sqrt(head_dim) use_sliding_windows (`bool`, *optional*): Whether to activate sliding window attention. """ if not self._flash_attn_uses_top_left_mask: causal = self.is_causal else: # TODO: Remove the `query_length != 1` check once Flash Attention for RoCm is bumped to 2.1. For details, please see the comment in LlamaFlashAttention2 __init__. causal = self.is_causal and query_length != 1 # Contains at least one padding token in the sequence if attention_mask is not None: batch_size = query_states.shape[0] query_states, key_states, value_states, indices_q, cu_seq_lens, max_seq_lens = self._upad_input( query_states, key_states, value_states, attention_mask, query_length ) cu_seqlens_q, cu_seqlens_k = cu_seq_lens max_seqlen_in_batch_q, max_seqlen_in_batch_k = max_seq_lens if not use_sliding_windows: attn_output_unpad = flash_attn_varlen_func( query_states, key_states, value_states, cu_seqlens_q=cu_seqlens_q, cu_seqlens_k=cu_seqlens_k, max_seqlen_q=max_seqlen_in_batch_q, max_seqlen_k=max_seqlen_in_batch_k, dropout_p=dropout, softmax_scale=softmax_scale, causal=causal, ) else: attn_output_unpad = flash_attn_varlen_func( query_states, key_states, value_states, cu_seqlens_q=cu_seqlens_q, cu_seqlens_k=cu_seqlens_k, max_seqlen_q=max_seqlen_in_batch_q, max_seqlen_k=max_seqlen_in_batch_k, dropout_p=dropout, softmax_scale=softmax_scale, causal=causal, window_size=(self.config.sliding_window, self.config.sliding_window), ) attn_output = pad_input(attn_output_unpad, indices_q, batch_size, query_length) else: if not use_sliding_windows: attn_output = flash_attn_func( query_states, key_states, value_states, dropout, softmax_scale=softmax_scale, causal=causal, ) else: attn_output = flash_attn_func( query_states, key_states, value_states, dropout, softmax_scale=softmax_scale, causal=causal, window_size=(self.config.sliding_window, self.config.sliding_window), ) return attn_output def _upad_input(self, query_layer, key_layer, value_layer, attention_mask, query_length): batch_size, kv_seq_len, num_heads, head_dim = key_layer.shape # On the first iteration we need to properly re-create the padding mask # by slicing it on the proper place if kv_seq_len != attention_mask.shape[-1]: attention_mask_num_tokens = attention_mask.shape[-1] attention_mask = attention_mask[:, attention_mask_num_tokens - kv_seq_len :] indices_k, cu_seqlens_k, max_seqlen_in_batch_k = _get_unpad_data(attention_mask) key_layer = index_first_axis(key_layer.reshape(batch_size * kv_seq_len, num_heads, head_dim), indices_k) value_layer = index_first_axis(value_layer.reshape(batch_size * kv_seq_len, num_heads, head_dim), indices_k) if query_length == kv_seq_len: query_layer = index_first_axis( query_layer.reshape(batch_size * kv_seq_len, num_heads, head_dim), indices_k ) cu_seqlens_q = cu_seqlens_k max_seqlen_in_batch_q = max_seqlen_in_batch_k indices_q = indices_k elif query_length == 1: max_seqlen_in_batch_q = 1 cu_seqlens_q = torch.arange( batch_size + 1, dtype=torch.int32, device=query_layer.device ) # There is a memcpy here, that is very bad. indices_q = cu_seqlens_q[:-1] query_layer = query_layer.squeeze(1) else: # The -q_len: slice assumes left padding. attention_mask = attention_mask[:, -query_length:] query_layer, indices_q, cu_seqlens_q, max_seqlen_in_batch_q = unpad_input(query_layer, attention_mask) return ( query_layer, key_layer, value_layer, indices_q, (cu_seqlens_q, cu_seqlens_k), (max_seqlen_in_batch_q, max_seqlen_in_batch_k), ) # Copied from transformers.models.llama.modeling_llama.LlamaSdpaAttention with Llama->Mixtral class MixtralSdpaAttention(MixtralAttention): """ Mixtral attention module using torch.nn.functional.scaled_dot_product_attention. This module inherits from `MixtralAttention` as the weights of the module stays untouched. The only changes are on the forward pass to adapt to SDPA API. """ # Adapted from MixtralAttention.forward def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_value: Optional[Cache] = None, output_attentions: bool = False, use_cache: bool = False, ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: if output_attentions: # TODO: Improve this warning with e.g. `model.config.attn_implementation = "manual"` once this is implemented. logger.warning_once( "MixtralModel is using MixtralSdpaAttention, but `torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to the manual attention implementation, " 'but specifying the manual implementation will be required from Transformers version v5.0.0 onwards. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.' ) return super().forward( hidden_states=hidden_states, attention_mask=attention_mask, position_ids=position_ids, past_key_value=past_key_value, output_attentions=output_attentions, use_cache=use_cache, ) bsz, q_len, _ = hidden_states.size() query_states = self.q_proj(hidden_states) key_states = self.k_proj(hidden_states) value_states = self.v_proj(hidden_states) query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) kv_seq_len = key_states.shape[-2] if past_key_value is not None: kv_seq_len += past_key_value.get_usable_length(kv_seq_len, self.layer_idx) cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len) query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids) if past_key_value is not None: cache_kwargs = {"sin": sin, "cos": cos} # Specific to RoPE models key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs) key_states = repeat_kv(key_states, self.num_key_value_groups) value_states = repeat_kv(value_states, self.num_key_value_groups) if attention_mask is not None: if attention_mask.size() != (bsz, 1, q_len, kv_seq_len): raise ValueError( f"Attention mask should be of size {(bsz, 1, q_len, kv_seq_len)}, but is {attention_mask.size()}" ) # SDPA with memory-efficient backend is currently (torch==2.1.2) bugged with non-contiguous inputs with custom attn_mask, # Reference: https://github.com/pytorch/pytorch/issues/112577. if query_states.device.type == "cuda" and attention_mask is not None: query_states = query_states.contiguous() key_states = key_states.contiguous() value_states = value_states.contiguous() attn_output = torch.nn.functional.scaled_dot_product_attention( query_states, key_states, value_states, attn_mask=attention_mask, dropout_p=self.attention_dropout if self.training else 0.0, # The q_len > 1 is necessary to match with AttentionMaskConverter.to_causal_4d that does not create a causal mask in case q_len == 1. is_causal=self.is_causal and attention_mask is None and q_len > 1, ) attn_output = attn_output.transpose(1, 2).contiguous() attn_output = attn_output.reshape(bsz, q_len, self.hidden_size) attn_output = self.o_proj(attn_output) return attn_output, None, past_key_value MIXTRAL_ATTENTION_CLASSES = { "eager": MixtralAttention, "flash_attention_2": MixtralFlashAttention2, "sdpa": MixtralSdpaAttention, } class MixtralBLockSparseTop2MLP(nn.Module): def __init__(self, config: MixtralConfig): super().__init__() self.ffn_dim = config.intermediate_size self.hidden_dim = config.hidden_size self.w1 = nn.Linear(self.hidden_dim, self.ffn_dim, bias=False) self.w2 = nn.Linear(self.ffn_dim, self.hidden_dim, bias=False) self.w3 = nn.Linear(self.hidden_dim, self.ffn_dim, bias=False) self.act_fn = ACT2FN[config.hidden_act] def forward(self, hidden_states): current_hidden_states = self.act_fn(self.w1(hidden_states)) * self.w3(hidden_states) current_hidden_states = self.w2(current_hidden_states) return current_hidden_states class MixtralSparseMoeBlock(nn.Module): """ This implementation is strictly equivalent to standard MoE with full capacity (no dropped tokens). It's faster since it formulates MoE operations in terms of block-sparse operations to accomodate imbalanced assignments of tokens to experts, whereas standard MoE either (1) drop tokens at the cost of reduced performance or (2) set capacity factor to number of experts and thus waste computation and memory on padding. """ def __init__(self, config): super().__init__() self.hidden_dim = config.hidden_size self.ffn_dim = config.intermediate_size self.num_experts = config.num_local_experts self.top_k = config.num_experts_per_tok # gating self.gate = nn.Linear(self.hidden_dim, self.num_experts, bias=False) self.experts = nn.ModuleList([MixtralBLockSparseTop2MLP(config) for _ in range(self.num_experts)]) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: """ """ batch_size, sequence_length, hidden_dim = hidden_states.shape hidden_states = hidden_states.view(-1, hidden_dim) # router_logits: (batch * sequence_length, n_experts) router_logits = self.gate(hidden_states) routing_weights = F.softmax(router_logits, dim=1, dtype=torch.float) routing_weights, selected_experts = torch.topk(routing_weights, self.top_k, dim=-1) routing_weights /= routing_weights.sum(dim=-1, keepdim=True) # we cast back to the input dtype routing_weights = routing_weights.to(hidden_states.dtype) final_hidden_states = torch.zeros( (batch_size * sequence_length, hidden_dim), dtype=hidden_states.dtype, device=hidden_states.device ) # One hot encode the selected experts to create an expert mask # this will be used to easily index which expert is going to be sollicitated expert_mask = torch.nn.functional.one_hot(selected_experts, num_classes=self.num_experts).permute(2, 1, 0) # Loop over all available experts in the model and perform the computation on each expert for expert_idx in range(self.num_experts): expert_layer = self.experts[expert_idx] idx, top_x = torch.where(expert_mask[expert_idx]) if top_x.shape[0] == 0: continue # in torch it is faster to index using lists than torch tensors top_x_list = top_x.tolist() idx_list = idx.tolist() # Index the correct hidden states and compute the expert hidden state for # the current expert. We need to make sure to multiply the output hidden # states by `routing_weights` on the corresponding tokens (top-1 and top-2) current_state = hidden_states[None, top_x_list].reshape(-1, hidden_dim) current_hidden_states = expert_layer(current_state) * routing_weights[top_x_list, idx_list, None] # However `index_add_` only support torch tensors for indexing so we'll use # the `top_x` tensor here. final_hidden_states.index_add_(0, top_x, current_hidden_states.to(hidden_states.dtype)) final_hidden_states = final_hidden_states.reshape(batch_size, sequence_length, hidden_dim) return final_hidden_states, router_logits class MixtralDecoderLayer(nn.Module): def __init__(self, config: MixtralConfig, layer_idx: int): super().__init__() self.hidden_size = config.hidden_size self.self_attn = MIXTRAL_ATTENTION_CLASSES[config._attn_implementation](config, layer_idx) self.block_sparse_moe = MixtralSparseMoeBlock(config) self.input_layernorm = MixtralRMSNorm(config.hidden_size, eps=config.rms_norm_eps) self.post_attention_layernorm = MixtralRMSNorm(config.hidden_size, eps=config.rms_norm_eps) def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_value: Optional[Tuple[torch.Tensor]] = None, output_attentions: Optional[bool] = False, output_router_logits: Optional[bool] = False, use_cache: Optional[bool] = False, **kwargs, ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]: if "padding_mask" in kwargs: warnings.warn( "Passing `padding_mask` is deprecated and will be removed in v4.37. Please make sure use `attention_mask` instead.`" ) """ Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` attention_mask (`torch.FloatTensor`, *optional*): attention mask of size `(batch, sequence_length)` where padding elements are indicated by 0. past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_router_logits (`bool`, *optional*): Whether or not to return the logits of all the routers. They are useful for computing the router loss, and should not be returned during inference. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). """ residual = hidden_states hidden_states = self.input_layernorm(hidden_states) # Self Attention hidden_states, self_attn_weights, present_key_value = self.self_attn( hidden_states=hidden_states, attention_mask=attention_mask, position_ids=position_ids, past_key_value=past_key_value, output_attentions=output_attentions, use_cache=use_cache, ) hidden_states = residual + hidden_states # Fully Connected residual = hidden_states hidden_states = self.post_attention_layernorm(hidden_states) hidden_states, router_logits = self.block_sparse_moe(hidden_states) hidden_states = residual + hidden_states outputs = (hidden_states,) if output_attentions: outputs += (self_attn_weights,) if use_cache: outputs += (present_key_value,) if output_router_logits: outputs += (router_logits,) return outputs MIXTRAL_START_DOCSTRING = r""" This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`MixtralConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ @add_start_docstrings( "The bare Mixtral Model outputting raw hidden-states without any specific head on top.", MIXTRAL_START_DOCSTRING, ) # Copied from transformers.models.mistral.modeling_mistral.MistralPreTrainedModel with Mistral->Mixtral class MixtralPreTrainedModel(PreTrainedModel): config_class = MixtralConfig base_model_prefix = "model" supports_gradient_checkpointing = True _no_split_modules = ["MixtralDecoderLayer"] _skip_keys_device_placement = "past_key_values" _supports_flash_attn_2 = True _supports_sdpa = True _supports_cache_class = True def _init_weights(self, module): std = self.config.initializer_range if isinstance(module, nn.Linear): module.weight.data.normal_(mean=0.0, std=std) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=std) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() MIXTRAL_INPUTS_DOCSTRING = r""" Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see `past_key_values`). If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`] and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more information on the default strategy. - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, config.n_positions - 1]`. [What are position IDs?](../glossary#position-ids) past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. output_router_logits (`bool`, *optional*): Whether or not to return the logits of all the routers. They are useful for computing the router loss, and should not be returned during inference. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ @add_start_docstrings( "The bare Mixtral Model outputting raw hidden-states without any specific head on top.", MIXTRAL_START_DOCSTRING, ) # Copied from transformers.models.mistral.modeling_mistral.MistralModel with MISTRAL->MIXTRAL,Mistral->Mixtral class MixtralModel(MixtralPreTrainedModel): """ Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`MixtralDecoderLayer`] Args: config: MixtralConfig """ def __init__(self, config: MixtralConfig): super().__init__(config) self.padding_idx = config.pad_token_id self.vocab_size = config.vocab_size self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx) self.layers = nn.ModuleList( [MixtralDecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)] ) self._attn_implementation = config._attn_implementation self.norm = MixtralRMSNorm(config.hidden_size, eps=config.rms_norm_eps) self.gradient_checkpointing = False # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.embed_tokens def set_input_embeddings(self, value): self.embed_tokens = value # Ignore copy @add_start_docstrings_to_model_forward(MIXTRAL_INPUTS_DOCSTRING) def forward( self, input_ids: torch.LongTensor = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[List[torch.FloatTensor]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, output_router_logits: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, MoeModelOutputWithPast]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_router_logits = ( output_router_logits if output_router_logits is not None else self.config.output_router_logits ) output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict # retrieve input_ids and inputs_embeds if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time") elif input_ids is not None: batch_size, seq_length = input_ids.shape elif inputs_embeds is not None: batch_size, seq_length, _ = inputs_embeds.shape else: raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds") past_key_values_length = 0 if self.gradient_checkpointing and self.training: if use_cache: logger.warning_once( "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." ) use_cache = False if use_cache: use_legacy_cache = not isinstance(past_key_values, Cache) if use_legacy_cache: past_key_values = DynamicCache.from_legacy_cache(past_key_values) past_key_values_length = past_key_values.get_usable_length(seq_length) if position_ids is None: device = input_ids.device if input_ids is not None else inputs_embeds.device position_ids = torch.arange( past_key_values_length, seq_length + past_key_values_length, dtype=torch.long, device=device ) position_ids = position_ids.unsqueeze(0).view(-1, seq_length) else: position_ids = position_ids.view(-1, seq_length).long() if inputs_embeds is None: inputs_embeds = self.embed_tokens(input_ids) if attention_mask is not None and self._attn_implementation == "flash_attention_2" and use_cache: is_padding_right = attention_mask[:, -1].sum().item() != batch_size if is_padding_right: raise ValueError( "You are attempting to perform batched generation with padding_side='right'" " this may lead to unexpected behaviour for Flash Attention version of Mixtral. Make sure to " " call `tokenizer.padding_side = 'left'` before tokenizing the input. " ) if self._attn_implementation == "flash_attention_2": # 2d mask is passed through the layers attention_mask = attention_mask if (attention_mask is not None and 0 in attention_mask) else None elif self._attn_implementation == "sdpa" and not output_attentions: # output_attentions=True can not be supported when using SDPA, and we fall back on # the manual implementation that requires a 4D causal mask in all cases. attention_mask = _prepare_4d_causal_attention_mask_for_sdpa( attention_mask, (batch_size, seq_length), inputs_embeds, past_key_values_length, ) else: # 4d mask is passed through the layers attention_mask = _prepare_4d_causal_attention_mask( attention_mask, (batch_size, seq_length), inputs_embeds, past_key_values_length, sliding_window=self.config.sliding_window, ) hidden_states = inputs_embeds # decoder layers all_hidden_states = () if output_hidden_states else None all_self_attns = () if output_attentions else None all_router_logits = () if output_router_logits else None next_decoder_cache = None for decoder_layer in self.layers: if output_hidden_states: all_hidden_states += (hidden_states,) if self.gradient_checkpointing and self.training: layer_outputs = self._gradient_checkpointing_func( decoder_layer.__call__, hidden_states, attention_mask, position_ids, past_key_values, output_attentions, output_router_logits, use_cache, ) else: layer_outputs = decoder_layer( hidden_states, attention_mask=attention_mask, position_ids=position_ids, past_key_value=past_key_values, output_attentions=output_attentions, output_router_logits=output_router_logits, use_cache=use_cache, ) hidden_states = layer_outputs[0] if use_cache: next_decoder_cache = layer_outputs[2 if output_attentions else 1] if output_attentions: all_self_attns += (layer_outputs[1],) if output_router_logits: all_router_logits += (layer_outputs[-1],) hidden_states = self.norm(hidden_states) # add hidden states from the last decoder layer if output_hidden_states: all_hidden_states += (hidden_states,) next_cache = None if use_cache: next_cache = next_decoder_cache.to_legacy_cache() if use_legacy_cache else next_decoder_cache if not return_dict: return tuple( v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns, all_router_logits] if v is not None ) return MoeModelOutputWithPast( last_hidden_state=hidden_states, past_key_values=next_cache, hidden_states=all_hidden_states, attentions=all_self_attns, router_logits=all_router_logits, ) class MixtralForCausalLM(MixtralPreTrainedModel): _tied_weights_keys = ["lm_head.weight"] def __init__(self, config): super().__init__(config) self.model = MixtralModel(config) self.vocab_size = config.vocab_size self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) self.router_aux_loss_coef = config.router_aux_loss_coef self.num_experts = config.num_local_experts self.num_experts_per_tok = config.num_experts_per_tok # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.model.embed_tokens def set_input_embeddings(self, value): self.model.embed_tokens = value def get_output_embeddings(self): return self.lm_head def set_output_embeddings(self, new_embeddings): self.lm_head = new_embeddings def set_decoder(self, decoder): self.model = decoder def get_decoder(self): return self.model @add_start_docstrings_to_model_forward(MIXTRAL_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=MoeCausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC) # Ignore copy def forward( self, input_ids: torch.LongTensor = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[List[torch.FloatTensor]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, output_router_logits: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, MoeCausalLMOutputWithPast]: r""" Args: labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. Returns: Example: ```python >>> from transformers import AutoTokenizer, MixtralForCausalLM >>> model = MixtralForCausalLM.from_pretrained("mistralai/Mixtral-8x7B-v0.1") >>> tokenizer = AutoTokenizer.from_pretrained("mistralai/Mixtral-8x7B-v0.1") >>> prompt = "Hey, are you conscious? Can you talk to me?" >>> inputs = tokenizer(prompt, return_tensors="pt") >>> # Generate >>> generate_ids = model.generate(inputs.input_ids, max_length=30) >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0] "Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you." ```""" output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_router_logits = ( output_router_logits if output_router_logits is not None else self.config.output_router_logits ) output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn) outputs = self.model( input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, output_router_logits=output_router_logits, return_dict=return_dict, ) hidden_states = outputs[0] logits = self.lm_head(hidden_states) logits = logits.float() loss = None if labels is not None: # Shift so that tokens < n predict n shift_logits = logits[..., :-1, :].contiguous() shift_labels = labels[..., 1:].contiguous() # Flatten the tokens loss_fct = CrossEntropyLoss() shift_logits = shift_logits.view(-1, self.config.vocab_size) shift_labels = shift_labels.view(-1) # Enable model parallelism shift_labels = shift_labels.to(shift_logits.device) loss = loss_fct(shift_logits, shift_labels) aux_loss = None if output_router_logits: aux_loss = load_balancing_loss_func( outputs.router_logits if return_dict else outputs[-1], self.num_experts, self.num_experts_per_tok ) if labels is not None: loss += self.router_aux_loss_coef * aux_loss if not return_dict: output = (logits,) + outputs[1:] if output_router_logits: output = (aux_loss,) + output return (loss,) + output if loss is not None else output return MoeCausalLMOutputWithPast( loss=loss, aux_loss=aux_loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, router_logits=outputs.router_logits, ) def prepare_inputs_for_generation( self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, **kwargs ): # Omit tokens covered by past_key_values if past_key_values is not None: if isinstance(past_key_values, Cache): cache_length = past_key_values.get_seq_length() past_length = past_key_values.seen_tokens max_cache_length = past_key_values.get_max_length() else: cache_length = past_length = past_key_values[0][0].shape[2] max_cache_length = None # Keep only the unprocessed tokens: # 1 - If the length of the attention_mask exceeds the length of input_ids, then we are in a setting where # some of the inputs are exclusively passed as part of the cache (e.g. when passing input_embeds as # input) if attention_mask is not None and attention_mask.shape[1] > input_ids.shape[1]: input_ids = input_ids[:, -(attention_mask.shape[1] - past_length) :] # 2 - If the past_length is smaller than input_ids', then input_ids holds all input tokens. We can discard # input_ids based on the past_length. elif past_length < input_ids.shape[1]: input_ids = input_ids[:, past_length:] # 3 - Otherwise (past_length >= input_ids.shape[1]), let's assume input_ids only has unprocessed tokens. # If we are about to go beyond the maximum cache length, we need to crop the input attention mask. if ( max_cache_length is not None and attention_mask is not None and cache_length + input_ids.shape[1] > max_cache_length ): attention_mask = attention_mask[:, -max_cache_length:] position_ids = kwargs.get("position_ids", None) if attention_mask is not None and position_ids is None: # create position_ids on the fly for batch generation position_ids = attention_mask.long().cumsum(-1) - 1 position_ids.masked_fill_(attention_mask == 0, 1) if past_key_values: position_ids = position_ids[:, -input_ids.shape[1] :] # if `inputs_embeds` are passed, we only want to use them in the 1st generation step if inputs_embeds is not None and past_key_values is None: model_inputs = {"inputs_embeds": inputs_embeds} else: model_inputs = {"input_ids": input_ids} model_inputs.update( { "position_ids": position_ids, "past_key_values": past_key_values, "use_cache": kwargs.get("use_cache"), "attention_mask": attention_mask, } ) return model_inputs @staticmethod def _reorder_cache(past_key_values, beam_idx): reordered_past = () for layer_past in past_key_values: reordered_past += ( tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past), ) return reordered_past @add_start_docstrings( """ The Mixtral Model transformer with a sequence classification head on top (linear layer). [`MixtralForSequenceClassification`] uses the last token in order to do the classification, as other causal models (e.g. GPT-2) do. Since it does classification on the last token, it requires to know the position of the last token. If a `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in each row of the batch). """, MIXTRAL_START_DOCSTRING, ) # Copied from transformers.models.llama.modeling_llama.LlamaForSequenceClassification with Llama->Mixtral, LLAMA->MIXTRAL class MixtralForSequenceClassification(MixtralPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.model = MixtralModel(config) self.score = nn.Linear(config.hidden_size, self.num_labels, bias=False) # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.model.embed_tokens def set_input_embeddings(self, value): self.model.embed_tokens = value @add_start_docstrings_to_model_forward(MIXTRAL_INPUTS_DOCSTRING) def forward( self, input_ids: torch.LongTensor = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[List[torch.FloatTensor]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, SequenceClassifierOutputWithPast]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict transformer_outputs = self.model( input_ids, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) hidden_states = transformer_outputs[0] logits = self.score(hidden_states) if input_ids is not None: batch_size = input_ids.shape[0] else: batch_size = inputs_embeds.shape[0] if self.config.pad_token_id is None and batch_size != 1: raise ValueError("Cannot handle batch sizes > 1 if no padding token is defined.") if self.config.pad_token_id is None: sequence_lengths = -1 else: if input_ids is not None: # if no pad token found, use modulo instead of reverse indexing for ONNX compatibility sequence_lengths = torch.eq(input_ids, self.config.pad_token_id).int().argmax(-1) - 1 sequence_lengths = sequence_lengths % input_ids.shape[-1] sequence_lengths = sequence_lengths.to(logits.device) else: sequence_lengths = -1 pooled_logits = logits[torch.arange(batch_size, device=logits.device), sequence_lengths] loss = None if labels is not None: labels = labels.to(logits.device) if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = "regression" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = "single_label_classification" else: self.config.problem_type = "multi_label_classification" if self.config.problem_type == "regression": loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(pooled_logits.squeeze(), labels.squeeze()) else: loss = loss_fct(pooled_logits, labels) elif self.config.problem_type == "single_label_classification": loss_fct = CrossEntropyLoss() loss = loss_fct(pooled_logits.view(-1, self.num_labels), labels.view(-1)) elif self.config.problem_type == "multi_label_classification": loss_fct = BCEWithLogitsLoss() loss = loss_fct(pooled_logits, labels) if not return_dict: output = (pooled_logits,) + transformer_outputs[1:] return ((loss,) + output) if loss is not None else output return SequenceClassifierOutputWithPast( loss=loss, logits=pooled_logits, past_key_values=transformer_outputs.past_key_values, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions, )
0
hf_public_repos/transformers/src/transformers/models
hf_public_repos/transformers/src/transformers/models/longt5/convert_longt5x_checkpoint_to_flax.py
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Convert T5/LongT5X checkpoints from the original repository to JAX/FLAX model. This script is an extension of 'src/transformers/models/t5/convert_t5x_checkpoint_to_flax. """ import argparse from t5x import checkpoints from transformers import AutoConfig, FlaxAutoModelForSeq2SeqLM def convert_t5x_checkpoint_to_flax(t5x_checkpoint_path, config_name, flax_dump_folder_path): config = AutoConfig.from_pretrained(config_name) flax_model = FlaxAutoModelForSeq2SeqLM.from_config(config=config) t5x_model = checkpoints.load_t5x_checkpoint(t5x_checkpoint_path) split_mlp_wi = "wi_0" in t5x_model["target"]["encoder"]["layers_0"]["mlp"] if config.model_type == "t5": encoder_attn_name = "SelfAttention" if config.model_type == "longt5" and config.encoder_attention_type == "local": encoder_attn_name = "LocalSelfAttention" elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global": encoder_attn_name = "TransientGlobalSelfAttention" else: raise ValueError( "Given config is expected to have `model_type='t5'`, or `model_type='longt5` with `encoder_attention_type`" " attribute with a value from ['local', 'transient-global]." ) # Encoder for layer_index in range(config.num_layers): layer_name = f"layers_{str(layer_index)}" # Self-Attention t5x_attention_key = t5x_model["target"]["encoder"][layer_name]["attention"]["key"]["kernel"] t5x_attention_out = t5x_model["target"]["encoder"][layer_name]["attention"]["out"]["kernel"] t5x_attention_query = t5x_model["target"]["encoder"][layer_name]["attention"]["query"]["kernel"] t5x_attention_value = t5x_model["target"]["encoder"][layer_name]["attention"]["value"]["kernel"] # Global input layer norm if config.model_type == "longt5" and config.encoder_attention_type == "transient-global": t5x_global_layer_norm = t5x_model["target"]["encoder"][layer_name]["attention"]["T5LayerNorm_0"]["scale"] # Layer Normalization t5x_attention_layer_norm = t5x_model["target"]["encoder"][layer_name]["pre_attention_layer_norm"]["scale"] if split_mlp_wi: t5x_mlp_wi_0 = t5x_model["target"]["encoder"][layer_name]["mlp"]["wi_0"]["kernel"] t5x_mlp_wi_1 = t5x_model["target"]["encoder"][layer_name]["mlp"]["wi_1"]["kernel"] else: t5x_mlp_wi = t5x_model["target"]["encoder"][layer_name]["mlp"]["wi"]["kernel"] t5x_mlp_wo = t5x_model["target"]["encoder"][layer_name]["mlp"]["wo"]["kernel"] # Layer Normalization t5x_mlp_layer_norm = t5x_model["target"]["encoder"][layer_name]["pre_mlp_layer_norm"]["scale"] # Assigning flax_model_encoder_layer_block = flax_model.params["encoder"]["block"][str(layer_index)]["layer"] flax_model_encoder_layer_block["0"][encoder_attn_name]["k"]["kernel"] = t5x_attention_key flax_model_encoder_layer_block["0"][encoder_attn_name]["o"]["kernel"] = t5x_attention_out flax_model_encoder_layer_block["0"][encoder_attn_name]["q"]["kernel"] = t5x_attention_query flax_model_encoder_layer_block["0"][encoder_attn_name]["v"]["kernel"] = t5x_attention_value flax_model_encoder_layer_block["0"]["layer_norm"]["weight"] = t5x_attention_layer_norm # Global input layer norm if config.model_type == "longt5" and config.encoder_attention_type == "transient-global": flax_model_encoder_layer_block["0"][encoder_attn_name]["global_input_layer_norm"][ "weight" ] = t5x_global_layer_norm if split_mlp_wi: flax_model_encoder_layer_block["1"]["DenseReluDense"]["wi_0"]["kernel"] = t5x_mlp_wi_0 flax_model_encoder_layer_block["1"]["DenseReluDense"]["wi_1"]["kernel"] = t5x_mlp_wi_1 else: flax_model_encoder_layer_block["1"]["DenseReluDense"]["wi"]["kernel"] = t5x_mlp_wi flax_model_encoder_layer_block["1"]["DenseReluDense"]["wo"]["kernel"] = t5x_mlp_wo flax_model_encoder_layer_block["1"]["layer_norm"]["weight"] = t5x_mlp_layer_norm flax_model.params["encoder"]["block"][str(layer_index)]["layer"] = flax_model_encoder_layer_block # Only for layer 0: t5x_encoder_rel_embedding = t5x_model["target"]["encoder"]["relpos_bias"]["rel_embedding"].T flax_model.params["encoder"]["block"]["0"]["layer"]["0"][encoder_attn_name]["relative_attention_bias"][ "embedding" ] = t5x_encoder_rel_embedding # Side/global relative position_bias + layer norm if config.model_type == "longt5" and config.encoder_attention_type == "transient-global": t5x_encoder_global_rel_embedding = t5x_model["target"]["encoder"]["side_relpos_bias"]["rel_embedding"].T flax_model.params["encoder"]["block"]["0"]["layer"]["0"][encoder_attn_name]["global_relative_attention_bias"][ "embedding" ] = t5x_encoder_global_rel_embedding # Assigning t5x_encoder_norm = t5x_model["target"]["encoder"]["encoder_norm"]["scale"] flax_model.params["encoder"]["final_layer_norm"]["weight"] = t5x_encoder_norm # Decoder for layer_index in range(config.num_layers): layer_name = f"layers_{str(layer_index)}" # Self-Attention t5x_attention_key = t5x_model["target"]["decoder"][layer_name]["self_attention"]["key"]["kernel"] t5x_attention_out = t5x_model["target"]["decoder"][layer_name]["self_attention"]["out"]["kernel"] t5x_attention_query = t5x_model["target"]["decoder"][layer_name]["self_attention"]["query"]["kernel"] t5x_attention_value = t5x_model["target"]["decoder"][layer_name]["self_attention"]["value"]["kernel"] # Layer Normalization t5x_pre_attention_layer_norm = t5x_model["target"]["decoder"][layer_name]["pre_self_attention_layer_norm"][ "scale" ] # Encoder-Decoder-Attention t5x_enc_dec_attention_module = t5x_model["target"]["decoder"][layer_name]["encoder_decoder_attention"] t5x_enc_dec_attention_key = t5x_enc_dec_attention_module["key"]["kernel"] t5x_enc_dec_attention_out = t5x_enc_dec_attention_module["out"]["kernel"] t5x_enc_dec_attention_query = t5x_enc_dec_attention_module["query"]["kernel"] t5x_enc_dec_attention_value = t5x_enc_dec_attention_module["value"]["kernel"] # Layer Normalization t5x_cross_layer_norm = t5x_model["target"]["decoder"][layer_name]["pre_cross_attention_layer_norm"]["scale"] # MLP if split_mlp_wi: t5x_mlp_wi_0 = t5x_model["target"]["decoder"][layer_name]["mlp"]["wi_0"]["kernel"] t5x_mlp_wi_1 = t5x_model["target"]["decoder"][layer_name]["mlp"]["wi_1"]["kernel"] else: t5x_mlp_wi = t5x_model["target"]["decoder"][layer_name]["mlp"]["wi"]["kernel"] t5x_mlp_wo = t5x_model["target"]["decoder"][layer_name]["mlp"]["wo"]["kernel"] # Layer Normalization tx5_mlp_layer_norm = t5x_model["target"]["decoder"][layer_name]["pre_mlp_layer_norm"]["scale"] # Assigning flax_model_decoder_layer_block = flax_model.params["decoder"]["block"][str(layer_index)]["layer"] flax_model_decoder_layer_block["0"]["SelfAttention"]["k"]["kernel"] = t5x_attention_key flax_model_decoder_layer_block["0"]["SelfAttention"]["o"]["kernel"] = t5x_attention_out flax_model_decoder_layer_block["0"]["SelfAttention"]["q"]["kernel"] = t5x_attention_query flax_model_decoder_layer_block["0"]["SelfAttention"]["v"]["kernel"] = t5x_attention_value flax_model_decoder_layer_block["0"]["layer_norm"]["weight"] = t5x_pre_attention_layer_norm flax_model_decoder_layer_block["1"]["EncDecAttention"]["k"]["kernel"] = t5x_enc_dec_attention_key flax_model_decoder_layer_block["1"]["EncDecAttention"]["o"]["kernel"] = t5x_enc_dec_attention_out flax_model_decoder_layer_block["1"]["EncDecAttention"]["q"]["kernel"] = t5x_enc_dec_attention_query flax_model_decoder_layer_block["1"]["EncDecAttention"]["v"]["kernel"] = t5x_enc_dec_attention_value flax_model_decoder_layer_block["1"]["layer_norm"]["weight"] = t5x_cross_layer_norm if split_mlp_wi: flax_model_decoder_layer_block["2"]["DenseReluDense"]["wi_0"]["kernel"] = t5x_mlp_wi_0 flax_model_decoder_layer_block["2"]["DenseReluDense"]["wi_1"]["kernel"] = t5x_mlp_wi_1 else: flax_model_decoder_layer_block["2"]["DenseReluDense"]["wi"]["kernel"] = t5x_mlp_wi flax_model_decoder_layer_block["2"]["DenseReluDense"]["wo"]["kernel"] = t5x_mlp_wo flax_model_decoder_layer_block["2"]["layer_norm"]["weight"] = tx5_mlp_layer_norm flax_model.params["decoder"]["block"][str(layer_index)]["layer"] = flax_model_decoder_layer_block # Decoder Normalization tx5_decoder_norm = t5x_model["target"]["decoder"]["decoder_norm"]["scale"] flax_model.params["decoder"]["final_layer_norm"]["weight"] = tx5_decoder_norm # Only for layer 0: t5x_decoder_rel_embedding = t5x_model["target"]["decoder"]["relpos_bias"]["rel_embedding"].T flax_model.params["decoder"]["block"]["0"]["layer"]["0"]["SelfAttention"]["relative_attention_bias"][ "embedding" ] = t5x_decoder_rel_embedding # Token Embeddings tx5_token_embeddings = t5x_model["target"]["token_embedder"]["embedding"] flax_model.params["shared"]["embedding"] = tx5_token_embeddings # LM Head (only in v1.1 and LongT5 checkpoints) if "logits_dense" in t5x_model["target"]["decoder"]: flax_model.params["lm_head"]["kernel"] = t5x_model["target"]["decoder"]["logits_dense"]["kernel"] flax_model.save_pretrained(flax_dump_folder_path) print("T5X Model was sucessfully converted!") if __name__ == "__main__": parser = argparse.ArgumentParser() # Required parameters parser.add_argument( "--t5x_checkpoint_path", default=None, type=str, required=True, help="Path the T5X checkpoint." ) parser.add_argument("--config_name", default=None, type=str, required=True, help="Config name of LongT5/T5 model.") parser.add_argument( "--flax_dump_folder_path", default=None, type=str, required=True, help="Path to the output FLAX model." ) args = parser.parse_args() convert_t5x_checkpoint_to_flax(args.t5x_checkpoint_path, args.config_name, args.flax_dump_folder_path)
0
hf_public_repos/transformers/src/transformers/models
hf_public_repos/transformers/src/transformers/models/longt5/__init__.py
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available _import_structure = { "configuration_longt5": ["LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP", "LongT5Config", "LongT5OnnxConfig"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["modeling_longt5"] = [ "LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST", "LongT5EncoderModel", "LongT5ForConditionalGeneration", "LongT5Model", "LongT5PreTrainedModel", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["modeling_flax_longt5"] = [ "FlaxLongT5ForConditionalGeneration", "FlaxLongT5Model", "FlaxLongT5PreTrainedModel", ] if TYPE_CHECKING: from .configuration_longt5 import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongT5Config, LongT5OnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_longt5 import ( LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST, LongT5EncoderModel, LongT5ForConditionalGeneration, LongT5Model, LongT5PreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_longt5 import ( FlaxLongT5ForConditionalGeneration, FlaxLongT5Model, FlaxLongT5PreTrainedModel, ) else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
0
hf_public_repos/transformers/src/transformers/models
hf_public_repos/transformers/src/transformers/models/longt5/modeling_longt5.py
# coding=utf-8 # Copyright 2022 Google LLC., LongT5 Authors and HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ PyTorch LongT5 model.""" import copy import math import warnings from typing import Any, List, Optional, Tuple, Union import torch from torch import nn from torch.nn import CrossEntropyLoss from ...activations import ACT2FN from ...modeling_outputs import ( BaseModelOutput, BaseModelOutputWithPastAndCrossAttentions, Seq2SeqLMOutput, Seq2SeqModelOutput, ) from ...modeling_utils import PreTrainedModel from ...pytorch_utils import ALL_LAYERNORM_LAYERS, find_pruneable_heads_and_indices, prune_linear_layer from ...utils import ( DUMMY_INPUTS, DUMMY_MASK, add_start_docstrings, add_start_docstrings_to_model_forward, is_torch_fx_proxy, logging, replace_return_docstrings, ) from .configuration_longt5 import LongT5Config logger = logging.get_logger(__name__) _CONFIG_FOR_DOC = "LongT5Config" _CHECKPOINT_FOR_DOC = "google/long-t5-local-base" # TODO: Update before the merge LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST = [ "google/long-t5-local-base", "google/long-t5-local-large", "google/long-t5-tglobal-base", "google/long-t5-tglobal-large", ] def _pad_to_multiple(x: torch.Tensor, block_len: int, dim: int, pad_value: int = 0) -> torch.Tensor: """Pad a tensor so that a sequence length will be a multiple of `block_len`""" pad_len = -x.shape[dim] % block_len # Handle cases when an empty input sequence is given if not all(x.shape): new_shape = list(x.shape) new_shape[dim] += pad_len return torch.zeros(new_shape, dtype=x.dtype) pad = [(0, 0)] * x.ndim pad[dim] = (0, pad_len) pad = sum(pad[::-1], ()) x = nn.functional.pad(x, pad=pad, mode="constant", value=pad_value) return x def _split_into_blocks(x: torch.Tensor, block_len: int, dim: int) -> torch.Tensor: """Split an input tensor into blocks of a given `block_len` along the given `dim`. If the dimension length is not a multiple of `block_len`, it will be padded first with selected `pad_value`. """ # pad tensor to multiple of block_len if x.shape[dim] % block_len != 0: x = _pad_to_multiple(x, block_len, dim, pad_value=0) num_blocks = x.shape[dim] // block_len output_shape = x.shape[:dim] + (num_blocks, block_len) + x.shape[(dim + 1) :] # If 0 is in output_shape, we cannot apply reshape because of incompatibility with ONNX conversion if 0 in output_shape: return torch.empty(output_shape, dtype=x.dtype, device=x.device) return x.reshape(output_shape) def _concatenate_3_blocks(x: torch.Tensor, block_dim: int, sequence_dim: int, pad_value: int = 0) -> torch.Tensor: """Concatenate three consecutive blocks for each input block for local attentiont. For more information, see: https://arxiv.org/pdf/2112.07916.pdf. """ num_blocks = x.shape[block_dim] pad = [(0, 0)] * x.ndim pad[block_dim] = (1, 1) pad = sum(pad[::-1], ()) # [batch_size, num_blocks, block_len] -> [batch_size, num_blocks + 2, block_len] x = nn.functional.pad(x, pad=pad, mode="constant", value=pad_value) blocks_list: List[torch.Tensor] = [] for i in range(3): # We use indexing approach here: # https://numpy.org/doc/stable/user/basics.indexing.html#dealing-with-variable-numbers-of-indices-within-programs indices = [slice(0, None)] * x.ndim indices[block_dim] = slice(i, i + num_blocks) indices = tuple(indices) blocks_list.append(x[indices]) # [batch_size, num_blocks, 3 * block_len, ...] return torch.cat(blocks_list, dim=sequence_dim) def _make_3block_relative_position_ids(block_len: int) -> torch.Tensor: """Makes 3-blocked relative position ids for local attention.""" position_ids = torch.arange(3 * block_len, dtype=torch.int32) center_position_ids = position_ids[block_len:-block_len] # [block_len, 3 * block_len] relative_position_ids = position_ids.unsqueeze(0) - center_position_ids.unsqueeze(1) return relative_position_ids def _mask_local_attention_mask(local_attention_mask: torch.Tensor, block_len: int) -> torch.Tensor: """Mask local attention mask to enforce that tokens are not allowed to attend tokens farther than ``local_radius.""" relative_position_ids = _make_3block_relative_position_ids(block_len) locality_mask = torch.abs(relative_position_ids) < block_len locality_mask = locality_mask[None, None, :, :] locality_mask = locality_mask.to(local_attention_mask.device) return torch.logical_and(local_attention_mask, locality_mask) def _get_local_attention_mask(attention_mask: torch.Tensor, block_len: int, device: torch.device) -> torch.Tensor: """Prepare attention mask to be applied for a local attention.""" # [batch_size, num_blocks, block_len] _blocked_attention_mask = _split_into_blocks(attention_mask, block_len, dim=1) # [batch_size, num_block, 3 * block_len] _3blocked_attention_mask = _concatenate_3_blocks(_blocked_attention_mask, block_dim=1, sequence_dim=2) _blocked_attention_mask = _blocked_attention_mask.unsqueeze(-1) _3blocked_attention_mask = _3blocked_attention_mask.unsqueeze(-2) # [batch_size, num_block, block_len, 3 * block_len] local_attention_mask = torch.logical_and(_blocked_attention_mask, _3blocked_attention_mask) local_attention_mask = _mask_local_attention_mask(local_attention_mask, block_len) # [batch_size, 1, num_block, block_len, 3 * block_len] return local_attention_mask.unsqueeze(1).to(device) def _make_global_fixed_block_ids( attention_mask: torch.Tensor, global_block_size: int ) -> Tuple[torch.Tensor, torch.Tensor]: """Obtain the "fixed block" global id corresponding to each input token. This implementation is a simlified version of the original Flaxformr implementation adopted from: https://github.com/google/flaxformer/blob/main/flaxformer/architectures/longt5/long_attention.py. In our scenario, as we use this strategy only for a decoder, orphan tokens, i.e. those tokens which do not make for the whole fixed block, are assigned to the preceding block. Padding tokens from the original sequence are represented by -1. """ batch_size, seq_len = attention_mask.shape[:2] def handle_orphan_tokens(block_ids: torch.Tensor) -> torch.Tensor: block_ends = (torch.arange(seq_len) % global_block_size) == global_block_size - 1 block_ends = block_ends.to(block_ids.device) true_block_ends = torch.logical_and(block_ends, block_ids >= 0) full_blocks = true_block_ends.sum(-1).unsqueeze(-1).type(block_ids.dtype) - 1 block_ids = torch.where(block_ids < full_blocks, block_ids, full_blocks) return block_ids fixed_block_mask = torch.ones_like(attention_mask, device=attention_mask.device) / global_block_size fixed_block_mask = torch.cumsum(fixed_block_mask, axis=1) - fixed_block_mask mask = torch.where(attention_mask != 0.0, 1.0, -1000.0).type(attention_mask.dtype) global_block_ids = torch.floor(mask + fixed_block_mask - 1.0).type(attention_mask.dtype) _global_block_ids_lower_bound = torch.tensor(-1, dtype=global_block_ids.dtype, device=global_block_ids.device) global_block_ids = torch.where( global_block_ids > _global_block_ids_lower_bound, global_block_ids, _global_block_ids_lower_bound ) # set padding tokens to -1 global_block_ids = (global_block_ids * attention_mask) + (attention_mask - 1) # [batch_size, seq_len] global_block_ids = handle_orphan_tokens(global_block_ids) num_globals = seq_len // global_block_size # [batch_size, seq_len // global_block_size] if num_globals > 0: _sequence_block_ids_max = torch.max(global_block_ids, dim=-1).values.repeat(num_globals, 1).transpose(0, 1) else: _sequence_block_ids_max = torch.zeros( batch_size, 0, dtype=global_block_ids.dtype, device=global_block_ids.device ) global_segment_ids = torch.cumsum(torch.ones(batch_size, num_globals), dim=-1) - 1 global_segment_ids = global_segment_ids.to(attention_mask.device) global_segment_ids = torch.where(global_segment_ids <= _sequence_block_ids_max, 1, 0) return global_block_ids.type(torch.int), global_segment_ids.type(torch.int) def _make_side_relative_position_ids(attention_mask: torch.Tensor, global_block_size: int) -> torch.Tensor: """Create the relative position tensor for local -> global attention.""" block_ids, global_segment_ids = _make_global_fixed_block_ids(attention_mask, global_block_size) global_seq_len = global_segment_ids.shape[-1] global_positions = torch.arange(global_seq_len, device=block_ids.device) side_relative_position = global_positions - block_ids[..., None] return side_relative_position.type(torch.int64) def _create_global_aggregates( hidden_states: torch.Tensor, block_ids: torch.Tensor, global_seq_len: int ) -> torch.Tensor: """Compute individual block aggregates by summing over individual blocks.""" # (batch..., seq_len, global_seq_len)) block_ids = block_ids.where( block_ids >= 0, torch.tensor(global_seq_len, dtype=block_ids.dtype, device=block_ids.device) ) one_hot_block_ids = nn.functional.one_hot(block_ids.type(torch.int64), global_seq_len + 1)[:, :, :-1] return torch.einsum("...nd,...ng->...gd", hidden_states, one_hot_block_ids.type(hidden_states.dtype)) # Copied from transformers.models.t5.modeling_t5.T5LayerNorm with T5->LongT5 class LongT5LayerNorm(nn.Module): def __init__(self, hidden_size, eps=1e-6): """ Construct a layernorm module in the LongT5 style. No bias and no subtraction of mean. """ super().__init__() self.weight = nn.Parameter(torch.ones(hidden_size)) self.variance_epsilon = eps def forward(self, hidden_states): # LongT5 uses a layer_norm which only scales and doesn't shift, which is also known as Root Mean # Square Layer Normalization https://arxiv.org/abs/1910.07467 thus varience is calculated # w/o mean and there is no bias. Additionally we want to make sure that the accumulation for # half-precision inputs is done in fp32 variance = hidden_states.to(torch.float32).pow(2).mean(-1, keepdim=True) hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon) # convert into half-precision if necessary if self.weight.dtype in [torch.float16, torch.bfloat16]: hidden_states = hidden_states.to(self.weight.dtype) return self.weight * hidden_states try: from apex.normalization import FusedRMSNorm LongT5LayerNorm = FusedRMSNorm # noqa logger.info("Discovered apex.normalization.FusedRMSNorm - will use it instead of LongT5LayerNorm") except ImportError: # using the normal LongT5LayerNorm pass except Exception: logger.warning("discovered apex but it failed to load, falling back to LongT5LayerNorm") pass ALL_LAYERNORM_LAYERS.append(LongT5LayerNorm) # Copied from transformers.models.t5.modeling_t5.T5DenseActDense with T5->LongT5 class LongT5DenseActDense(nn.Module): def __init__(self, config: LongT5Config): super().__init__() self.wi = nn.Linear(config.d_model, config.d_ff, bias=False) self.wo = nn.Linear(config.d_ff, config.d_model, bias=False) self.dropout = nn.Dropout(config.dropout_rate) self.act = ACT2FN[config.dense_act_fn] def forward(self, hidden_states): hidden_states = self.wi(hidden_states) hidden_states = self.act(hidden_states) hidden_states = self.dropout(hidden_states) if ( isinstance(self.wo.weight, torch.Tensor) and hidden_states.dtype != self.wo.weight.dtype and self.wo.weight.dtype != torch.int8 ): hidden_states = hidden_states.to(self.wo.weight.dtype) hidden_states = self.wo(hidden_states) return hidden_states class LongT5DenseGatedActDense(nn.Module): def __init__(self, config: LongT5Config): super().__init__() self.wi_0 = nn.Linear(config.d_model, config.d_ff, bias=False) self.wi_1 = nn.Linear(config.d_model, config.d_ff, bias=False) self.wo = nn.Linear(config.d_ff, config.d_model, bias=False) self.dropout = nn.Dropout(config.dropout_rate) self.act = ACT2FN[config.dense_act_fn] def forward(self, hidden_states): hidden_gelu = self.act(self.wi_0(hidden_states)) hidden_linear = self.wi_1(hidden_states) hidden_states = hidden_gelu * hidden_linear hidden_states = self.dropout(hidden_states) hidden_states = self.wo(hidden_states) return hidden_states # Copied from transformers.models.t5.modeling_t5.T5LayerFF with T5->LongT5 class LongT5LayerFF(nn.Module): def __init__(self, config: LongT5Config): super().__init__() if config.is_gated_act: self.DenseReluDense = LongT5DenseGatedActDense(config) else: self.DenseReluDense = LongT5DenseActDense(config) self.layer_norm = LongT5LayerNorm(config.d_model, eps=config.layer_norm_epsilon) self.dropout = nn.Dropout(config.dropout_rate) def forward(self, hidden_states): forwarded_states = self.layer_norm(hidden_states) forwarded_states = self.DenseReluDense(forwarded_states) hidden_states = hidden_states + self.dropout(forwarded_states) return hidden_states # Copied from transformers.models.t5.modeling_t5.T5Attention with T5->LongT5 class LongT5Attention(nn.Module): def __init__(self, config: LongT5Config, has_relative_attention_bias=False): super().__init__() self.is_decoder = config.is_decoder self.has_relative_attention_bias = has_relative_attention_bias self.relative_attention_num_buckets = config.relative_attention_num_buckets self.relative_attention_max_distance = config.relative_attention_max_distance self.d_model = config.d_model self.key_value_proj_dim = config.d_kv self.n_heads = config.num_heads self.dropout = config.dropout_rate self.inner_dim = self.n_heads * self.key_value_proj_dim # Mesh TensorFlow initialization to avoid scaling before softmax self.q = nn.Linear(self.d_model, self.inner_dim, bias=False) self.k = nn.Linear(self.d_model, self.inner_dim, bias=False) self.v = nn.Linear(self.d_model, self.inner_dim, bias=False) self.o = nn.Linear(self.inner_dim, self.d_model, bias=False) if self.has_relative_attention_bias: self.relative_attention_bias = nn.Embedding(self.relative_attention_num_buckets, self.n_heads) self.pruned_heads = set() self.gradient_checkpointing = False def prune_heads(self, heads): if len(heads) == 0: return heads, index = find_pruneable_heads_and_indices( heads, self.n_heads, self.key_value_proj_dim, self.pruned_heads ) # Prune linear layers self.q = prune_linear_layer(self.q, index) self.k = prune_linear_layer(self.k, index) self.v = prune_linear_layer(self.v, index) self.o = prune_linear_layer(self.o, index, dim=1) # Update hyper params self.n_heads = self.n_heads - len(heads) self.inner_dim = self.key_value_proj_dim * self.n_heads self.pruned_heads = self.pruned_heads.union(heads) @staticmethod def _relative_position_bucket(relative_position, bidirectional=True, num_buckets=32, max_distance=128): """ Adapted from Mesh Tensorflow: https://github.com/tensorflow/mesh/blob/0cb87fe07da627bf0b7e60475d59f95ed6b5be3d/mesh_tensorflow/transformer/transformer_layers.py#L593 Translate relative position to a bucket number for relative attention. The relative position is defined as memory_position - query_position, i.e. the distance in tokens from the attending position to the attended-to position. If bidirectional=False, then positive relative positions are invalid. We use smaller buckets for small absolute relative_position and larger buckets for larger absolute relative_positions. All relative positions >=max_distance map to the same bucket. All relative positions <=-max_distance map to the same bucket. This should allow for more graceful generalization to longer sequences than the model has been trained on Args: relative_position: an int32 Tensor bidirectional: a boolean - whether the attention is bidirectional num_buckets: an integer max_distance: an integer Returns: a Tensor with the same shape as relative_position, containing int32 values in the range [0, num_buckets) """ relative_buckets = 0 if bidirectional: num_buckets //= 2 relative_buckets += (relative_position > 0).to(torch.long) * num_buckets relative_position = torch.abs(relative_position) else: relative_position = -torch.min(relative_position, torch.zeros_like(relative_position)) # now relative_position is in the range [0, inf) # half of the buckets are for exact increments in positions max_exact = num_buckets // 2 is_small = relative_position < max_exact # The other half of the buckets are for logarithmically bigger bins in positions up to max_distance relative_position_if_large = max_exact + ( torch.log(relative_position.float() / max_exact) / math.log(max_distance / max_exact) * (num_buckets - max_exact) ).to(torch.long) relative_position_if_large = torch.min( relative_position_if_large, torch.full_like(relative_position_if_large, num_buckets - 1) ) relative_buckets += torch.where(is_small, relative_position, relative_position_if_large) return relative_buckets def compute_bias(self, query_length, key_length, device=None): """Compute binned relative position bias""" if device is None: device = self.relative_attention_bias.weight.device context_position = torch.arange(query_length, dtype=torch.long, device=device)[:, None] memory_position = torch.arange(key_length, dtype=torch.long, device=device)[None, :] relative_position = memory_position - context_position # shape (query_length, key_length) relative_position_bucket = self._relative_position_bucket( relative_position, # shape (query_length, key_length) bidirectional=(not self.is_decoder), num_buckets=self.relative_attention_num_buckets, max_distance=self.relative_attention_max_distance, ) values = self.relative_attention_bias(relative_position_bucket) # shape (query_length, key_length, num_heads) values = values.permute([2, 0, 1]).unsqueeze(0) # shape (1, num_heads, query_length, key_length) return values def forward( self, hidden_states, mask=None, key_value_states=None, position_bias=None, past_key_value=None, layer_head_mask=None, query_length=None, use_cache=False, output_attentions=False, ): """ Self-attention (if key_value_states is None) or attention over source sentence (provided by key_value_states). """ # Input is (batch_size, seq_length, dim) # Mask is (batch_size, key_length) (non-causal) or (batch_size, key_length, key_length) # past_key_value[0] is (batch_size, n_heads, q_len - 1, dim_per_head) batch_size, seq_length = hidden_states.shape[:2] real_seq_length = seq_length if past_key_value is not None: if len(past_key_value) != 2: raise ValueError( f"past_key_value should have 2 past states: keys and values. Got { len(past_key_value)} past states" ) real_seq_length += past_key_value[0].shape[2] if query_length is None else query_length key_length = real_seq_length if key_value_states is None else key_value_states.shape[1] def shape(states): """projection""" return states.view(batch_size, -1, self.n_heads, self.key_value_proj_dim).transpose(1, 2) def unshape(states): """reshape""" return states.transpose(1, 2).contiguous().view(batch_size, -1, self.inner_dim) def project(hidden_states, proj_layer, key_value_states, past_key_value): """projects hidden states correctly to key/query states""" if key_value_states is None: # self-attn # (batch_size, n_heads, seq_length, dim_per_head) hidden_states = shape(proj_layer(hidden_states)) elif past_key_value is None: # cross-attn # (batch_size, n_heads, seq_length, dim_per_head) hidden_states = shape(proj_layer(key_value_states)) if past_key_value is not None: if key_value_states is None: # self-attn # (batch_size, n_heads, key_length, dim_per_head) hidden_states = torch.cat([past_key_value, hidden_states], dim=2) elif past_key_value.shape[2] != key_value_states.shape[1]: # checking that the `sequence_length` of the `past_key_value` is the same as # the provided `key_value_states` to support prefix tuning # cross-attn # (batch_size, n_heads, seq_length, dim_per_head) hidden_states = shape(proj_layer(key_value_states)) else: # cross-attn hidden_states = past_key_value return hidden_states # get query states query_states = shape(self.q(hidden_states)) # (batch_size, n_heads, seq_length, dim_per_head) # get key/value states key_states = project( hidden_states, self.k, key_value_states, past_key_value[0] if past_key_value is not None else None ) value_states = project( hidden_states, self.v, key_value_states, past_key_value[1] if past_key_value is not None else None ) # compute scores scores = torch.matmul( query_states, key_states.transpose(3, 2) ) # equivalent of torch.einsum("bnqd,bnkd->bnqk", query_states, key_states), compatible with onnx op>9 if position_bias is None: if not self.has_relative_attention_bias: position_bias = torch.zeros( (1, self.n_heads, real_seq_length, key_length), device=scores.device, dtype=scores.dtype ) if self.gradient_checkpointing and self.training: position_bias.requires_grad = True else: position_bias = self.compute_bias(real_seq_length, key_length, device=scores.device) # if key and values are already calculated # we want only the last query position bias if past_key_value is not None: position_bias = position_bias[:, :, -hidden_states.size(1) :, :] if mask is not None: position_bias = position_bias + mask # (batch_size, n_heads, seq_length, key_length) if self.pruned_heads: mask = torch.ones(position_bias.shape[1]) mask[list(self.pruned_heads)] = 0 position_bias_masked = position_bias[:, mask.bool()] else: position_bias_masked = position_bias scores += position_bias_masked attn_weights = nn.functional.softmax(scores.float(), dim=-1).type_as( scores ) # (batch_size, n_heads, seq_length, key_length) attn_weights = nn.functional.dropout( attn_weights, p=self.dropout, training=self.training ) # (batch_size, n_heads, seq_length, key_length) # Mask heads if we want to if layer_head_mask is not None: attn_weights = attn_weights * layer_head_mask attn_output = unshape(torch.matmul(attn_weights, value_states)) # (batch_size, seq_length, dim) attn_output = self.o(attn_output) present_key_value_state = (key_states, value_states) if (self.is_decoder and use_cache) else None outputs = (attn_output,) + (present_key_value_state,) + (position_bias,) if output_attentions: outputs = outputs + (attn_weights,) return outputs class LongT5LocalAttention(nn.Module): def __init__(self, config: LongT5Config, has_relative_attention_bias: bool = False) -> None: super().__init__() self.is_decoder = config.is_decoder self.has_relative_attention_bias = has_relative_attention_bias self.relative_attention_num_buckets = config.relative_attention_num_buckets self.relative_attention_max_distance = config.relative_attention_max_distance self.d_model = config.d_model self.key_value_proj_dim = config.d_kv self.n_heads = config.num_heads self.local_radius = config.local_radius self.block_len = self.local_radius + 1 self.dropout = config.dropout_rate self.inner_dim = self.n_heads * self.key_value_proj_dim # Mesh TensorFlow initialization to avoid scaling before softmax self.q = nn.Linear(self.d_model, self.inner_dim, bias=False) self.k = nn.Linear(self.d_model, self.inner_dim, bias=False) self.v = nn.Linear(self.d_model, self.inner_dim, bias=False) self.o = nn.Linear(self.inner_dim, self.d_model, bias=False) if self.has_relative_attention_bias: self.relative_attention_bias = nn.Embedding(self.relative_attention_num_buckets, self.n_heads) self.pruned_heads = set() self.gradient_checkpointing = False # Copied from transformers.models.t5.modeling_t5.T5Attention.prune_heads def prune_heads(self, heads): if len(heads) == 0: return heads, index = find_pruneable_heads_and_indices( heads, self.n_heads, self.key_value_proj_dim, self.pruned_heads ) # Prune linear layers self.q = prune_linear_layer(self.q, index) self.k = prune_linear_layer(self.k, index) self.v = prune_linear_layer(self.v, index) self.o = prune_linear_layer(self.o, index, dim=1) # Update hyper params self.n_heads = self.n_heads - len(heads) self.inner_dim = self.key_value_proj_dim * self.n_heads self.pruned_heads = self.pruned_heads.union(heads) @staticmethod # Copied from transformers.models.t5.modeling_t5.T5Attention._relative_position_bucket def _relative_position_bucket(relative_position, bidirectional=True, num_buckets=32, max_distance=128): """ Adapted from Mesh Tensorflow: https://github.com/tensorflow/mesh/blob/0cb87fe07da627bf0b7e60475d59f95ed6b5be3d/mesh_tensorflow/transformer/transformer_layers.py#L593 Translate relative position to a bucket number for relative attention. The relative position is defined as memory_position - query_position, i.e. the distance in tokens from the attending position to the attended-to position. If bidirectional=False, then positive relative positions are invalid. We use smaller buckets for small absolute relative_position and larger buckets for larger absolute relative_positions. All relative positions >=max_distance map to the same bucket. All relative positions <=-max_distance map to the same bucket. This should allow for more graceful generalization to longer sequences than the model has been trained on Args: relative_position: an int32 Tensor bidirectional: a boolean - whether the attention is bidirectional num_buckets: an integer max_distance: an integer Returns: a Tensor with the same shape as relative_position, containing int32 values in the range [0, num_buckets) """ relative_buckets = 0 if bidirectional: num_buckets //= 2 relative_buckets += (relative_position > 0).to(torch.long) * num_buckets relative_position = torch.abs(relative_position) else: relative_position = -torch.min(relative_position, torch.zeros_like(relative_position)) # now relative_position is in the range [0, inf) # half of the buckets are for exact increments in positions max_exact = num_buckets // 2 is_small = relative_position < max_exact # The other half of the buckets are for logarithmically bigger bins in positions up to max_distance relative_position_if_large = max_exact + ( torch.log(relative_position.float() / max_exact) / math.log(max_distance / max_exact) * (num_buckets - max_exact) ).to(torch.long) relative_position_if_large = torch.min( relative_position_if_large, torch.full_like(relative_position_if_large, num_buckets - 1) ) relative_buckets += torch.where(is_small, relative_position, relative_position_if_large) return relative_buckets def compute_bias(self, block_length: int): """Compute binned relative position bias""" target_device = ( self.relative_attention_bias.weight.device if self.relative_attention_bias.weight.device.type != "meta" else None ) memory_position = torch.arange(3 * block_length, dtype=torch.long, device=target_device) context_position = memory_position[block_length:-block_length] # (block_length, 3 * block_length) relative_position = memory_position[None, :] - context_position[:, None] relative_position_bucket = self._relative_position_bucket( relative_position, # (block_length, 3 * block_length) bidirectional=(not self.is_decoder), num_buckets=self.relative_attention_num_buckets, max_distance=self.relative_attention_max_distance, ) # (block_length, 3 * block_length, num_heads) values = self.relative_attention_bias(relative_position_bucket) # (1, 1, num_heads, block_length, 3 * block_length) values = values.permute([2, 0, 1]).unsqueeze(0).unsqueeze(0) return values def forward( self, hidden_states, mask=None, position_bias=None, layer_head_mask=None, output_attentions=False, ): batch_size, seq_length = hidden_states.shape[:2] def shape(states): """projection""" return states.view(batch_size, -1, self.n_heads, self.key_value_proj_dim) def unshape(states): """reshape""" return states.contiguous().view(batch_size, -1, self.inner_dim) # get query/key/value states -> (batch_size, seq_length, n_heads, dim_per_head) query_states = shape(self.q(hidden_states)) key_states = shape(self.k(hidden_states)) value_states = shape(self.v(hidden_states)) # Split into blocks -> (batch_size, num_blocks, block_len, n_heads, dim_per_head) query_states = _split_into_blocks(query_states, self.block_len, dim=1) key_states = _split_into_blocks(key_states, self.block_len, dim=1) value_states = _split_into_blocks(value_states, self.block_len, dim=1) # Concatenate 3 blocks for keys and values -> (batch_size, num_blocks, 3 * block_len, n_heads, dim_per_head) key_states = _concatenate_3_blocks(key_states, block_dim=1, sequence_dim=2) value_states = _concatenate_3_blocks(value_states, block_dim=1, sequence_dim=2) # Compute scores scores = torch.einsum( "...qhd,...khd->...hqk", query_states, key_states ) # (batch_size, num_block, n_heads, block_len, 3 * block_len) if position_bias is None: # position_bias shape: # (1, 1, n_heads, block_len, 3 * block_len) if not self.has_relative_attention_bias: position_bias = torch.zeros( (1, 1, self.n_heads, self.block_len, 3 * self.block_len), device=scores.device, dtype=scores.dtype ) if self.gradient_checkpointing and self.training: position_bias.requires_grad = True else: position_bias = self.compute_bias(self.block_len) if mask is not None: # Replace masked positions with -1e10 (according to the original implementation) mask = torch.where(mask > 0, 0.0, -1e10) # We need to adjust position bias shape to be sum with mask position_bias = position_bias + mask.transpose(1, 2) scores += position_bias # (batch_size, num_blocks, n_heads, block_len, 3 * block_len) attn_weights = nn.functional.softmax(scores.float(), dim=-1).type_as(scores) # (batch_size, num_blocks, n_heads, block_len, 3 * block_len) attn_weights = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training) # Mask heads if we want to if layer_head_mask is not None: attn_weights = attn_weights * layer_head_mask attn_weights = attn_weights.type(value_states.dtype) attn_output = unshape(torch.einsum("...hqk,...khd->...qhd", attn_weights, value_states)) attn_output = attn_output[:, :seq_length, :] attn_output = self.o(attn_output) present_key_value_state = None outputs = (attn_output,) + (present_key_value_state,) + (position_bias,) if output_attentions: outputs = outputs + (attn_weights,) return outputs class LongT5TransientGlobalAttention(nn.Module): def __init__(self, config: LongT5Config, has_relative_attention_bias: bool = False) -> None: super().__init__() self.is_decoder = config.is_decoder self.has_relative_attention_bias = has_relative_attention_bias self.relative_attention_num_buckets = config.relative_attention_num_buckets self.relative_attention_max_distance = config.relative_attention_max_distance self.d_model = config.d_model self.key_value_proj_dim = config.d_kv self.n_heads = config.num_heads self.local_radius = config.local_radius self.block_len = self.local_radius + 1 self.global_block_size = config.global_block_size self.dropout = config.dropout_rate self.inner_dim = self.n_heads * self.key_value_proj_dim # Mesh TensorFlow initialization to avoid scaling before softmax self.q = nn.Linear(self.d_model, self.inner_dim, bias=False) self.k = nn.Linear(self.d_model, self.inner_dim, bias=False) self.v = nn.Linear(self.d_model, self.inner_dim, bias=False) self.o = nn.Linear(self.inner_dim, self.d_model, bias=False) if self.has_relative_attention_bias: self.relative_attention_bias = nn.Embedding(self.relative_attention_num_buckets, self.n_heads) self.pruned_heads = set() # Relativen attention bias & Layer norm for global attention if self.has_relative_attention_bias: self.global_relative_attention_bias = nn.Embedding(self.relative_attention_num_buckets, self.n_heads) self.global_input_layer_norm = LongT5LayerNorm(config.d_model, eps=config.layer_norm_epsilon) # Copied from transformers.models.t5.modeling_t5.T5Attention.prune_heads def prune_heads(self, heads): if len(heads) == 0: return heads, index = find_pruneable_heads_and_indices( heads, self.n_heads, self.key_value_proj_dim, self.pruned_heads ) # Prune linear layers self.q = prune_linear_layer(self.q, index) self.k = prune_linear_layer(self.k, index) self.v = prune_linear_layer(self.v, index) self.o = prune_linear_layer(self.o, index, dim=1) # Update hyper params self.n_heads = self.n_heads - len(heads) self.inner_dim = self.key_value_proj_dim * self.n_heads self.pruned_heads = self.pruned_heads.union(heads) @staticmethod # Copied from transformers.models.t5.modeling_t5.T5Attention._relative_position_bucket def _relative_position_bucket(relative_position, bidirectional=True, num_buckets=32, max_distance=128): """ Adapted from Mesh Tensorflow: https://github.com/tensorflow/mesh/blob/0cb87fe07da627bf0b7e60475d59f95ed6b5be3d/mesh_tensorflow/transformer/transformer_layers.py#L593 Translate relative position to a bucket number for relative attention. The relative position is defined as memory_position - query_position, i.e. the distance in tokens from the attending position to the attended-to position. If bidirectional=False, then positive relative positions are invalid. We use smaller buckets for small absolute relative_position and larger buckets for larger absolute relative_positions. All relative positions >=max_distance map to the same bucket. All relative positions <=-max_distance map to the same bucket. This should allow for more graceful generalization to longer sequences than the model has been trained on Args: relative_position: an int32 Tensor bidirectional: a boolean - whether the attention is bidirectional num_buckets: an integer max_distance: an integer Returns: a Tensor with the same shape as relative_position, containing int32 values in the range [0, num_buckets) """ relative_buckets = 0 if bidirectional: num_buckets //= 2 relative_buckets += (relative_position > 0).to(torch.long) * num_buckets relative_position = torch.abs(relative_position) else: relative_position = -torch.min(relative_position, torch.zeros_like(relative_position)) # now relative_position is in the range [0, inf) # half of the buckets are for exact increments in positions max_exact = num_buckets // 2 is_small = relative_position < max_exact # The other half of the buckets are for logarithmically bigger bins in positions up to max_distance relative_position_if_large = max_exact + ( torch.log(relative_position.float() / max_exact) / math.log(max_distance / max_exact) * (num_buckets - max_exact) ).to(torch.long) relative_position_if_large = torch.min( relative_position_if_large, torch.full_like(relative_position_if_large, num_buckets - 1) ) relative_buckets += torch.where(is_small, relative_position, relative_position_if_large) return relative_buckets def compute_bias(self, block_length: int): """Compute binned relative position bias""" target_device = ( self.relative_attention_bias.weight.device if self.relative_attention_bias.weight.device.type != "meta" else None ) memory_position = torch.arange(3 * block_length, dtype=torch.long, device=target_device) context_position = memory_position[block_length:-block_length] # (block_length, 3 * block_length) relative_position = memory_position[None, :] - context_position[:, None] relative_position_bucket = self._relative_position_bucket( relative_position, # (block_length, 3 * block_length) bidirectional=(not self.is_decoder), num_buckets=self.relative_attention_num_buckets, max_distance=self.relative_attention_max_distance, ) # (block_length, 3 * block_length, num_heads) values = self.relative_attention_bias(relative_position_bucket) # (1, 1, num_heads, block_length, 3 * block_length) values = values.permute([2, 0, 1]).unsqueeze(0).unsqueeze(0) return values def compute_side_bias(self, mask: torch.Tensor, global_segment_ids: torch.Tensor) -> torch.Tensor: # (batch_size, 1, seq_len, global_seq_len) side_attention_mask = torch.eq(mask[..., None], global_segment_ids[:, None, :])[:, None, ...] attention_side_bias = torch.where(side_attention_mask > 0, 0.0, -1e10) # (batch_size, seq_len, global_seq_len) side_relative_position = _make_side_relative_position_ids(mask, self.global_block_size) side_relative_position_bucket = self._relative_position_bucket( side_relative_position, bidirectional=(not self.is_decoder), num_buckets=self.relative_attention_num_buckets, max_distance=self.relative_attention_max_distance, ) # (batch_size, seq_len, global_seq_len, num_heads) side_bias = self.global_relative_attention_bias(side_relative_position_bucket) # (batch_size, num_heads, seq_len, global_seq_len) side_bias = side_bias.permute([0, 3, 1, 2]) # (batch_size, num_heads, seq_len, global_seq_len) attention_side_bias = attention_side_bias + side_bias return attention_side_bias def forward( self, hidden_states, mask=None, position_bias=None, layer_head_mask=None, output_attentions=False, ): batch_size, seq_length = hidden_states.shape[:2] def shape(states): """projection""" return states.view(batch_size, -1, self.n_heads, self.key_value_proj_dim) def unshape(states): """reshape""" return states.contiguous().view(batch_size, -1, self.inner_dim) # Prepare components for transient-global attention # Obtain block_ids and global_segment_ids # global_seq_len := seq_len // self.global_block_size # shapes: (batch_size, seq_len) & (batch_size, global_seq_len) block_ids, global_segment_ids = _make_global_fixed_block_ids( mask if mask is not None else torch.ones(hidden_states.shape[:-1]), self.global_block_size, ) # Create global inputs _global_seq_len = global_segment_ids.shape[-1] global_inputs = _create_global_aggregates(hidden_states, block_ids, _global_seq_len) global_inputs = self.global_input_layer_norm(global_inputs) # get query states -> (batch_size, seq_length, n_heads, dim_per_head) query_states = shape(self.q(hidden_states)) key_states = shape(self.k(hidden_states)) value_states = shape(self.v(hidden_states)) # Get global/side key/value states shape: (batch_size, global_seq_len, n_heads, dim_per_head) side_key_states = shape(self.k(global_inputs)) side_value_states = shape(self.v(global_inputs)) # Split into blocks -> (batch_size, num_blocks, block_len, n_heads, dim_per_head) query_states = _split_into_blocks(query_states, self.block_len, dim=1) key_states = _split_into_blocks(key_states, self.block_len, dim=1) value_states = _split_into_blocks(value_states, self.block_len, dim=1) # Concatenate 3 blocks for keys and values -> (batch_size, num_blocks, 3 * block_len, n_heads, dim_per_head) key_states = _concatenate_3_blocks(key_states, block_dim=1, sequence_dim=2) value_states = _concatenate_3_blocks(value_states, block_dim=1, sequence_dim=2) # Tile side inputs across local key/value blocks # New shape: (batch_size, num_blocks, global_seq_len, n_heads, dim_per_head) reps = [1] * (side_key_states.ndim + 1) reps[1] = key_states.shape[1] side_key_states = side_key_states.unsqueeze(1).repeat(reps) side_value_states = side_value_states.unsqueeze(1).repeat(reps) # Concatenate "local" and "side"/"global" key/value states to allow each token to attend global aggregated ones # New shape: (batch_size, num_blocks, 3 * block_len + global_seq_len, n_heads, dim_per_head) key_states = torch.cat([key_states, side_key_states], dim=2) value_states = torch.cat([value_states, side_value_states], dim=2) # Compute scores -> (batch_size, num_block, n_heads, block_len, 3 * block_len + global_seq_len) scores = torch.einsum("...qhd,...khd->...hqk", query_states, key_states) if mask is not None: # We need to adjust position bias shape to be sum with mask local_attention_mask = _get_local_attention_mask(mask, self.block_len, hidden_states.device) # Replace masked positions with -10_000 (according to the original implementation) local_attention_mask = torch.where(local_attention_mask > 0, 0.0, -1e10) else: local_attention_mask = None if position_bias is None: # position_bias shape: # (1, 1, n_heads, block_len, 3 * block_len) if not self.has_relative_attention_bias: position_bias = torch.zeros( (1, 1, self.n_heads, self.block_len, 3 * self.block_len), device=scores.device, dtype=scores.dtype, ) if self.gradient_checkpointing and self.training: position_bias.requires_grad = True else: position_bias = self.compute_bias(self.block_len) if local_attention_mask is not None: # (batch_size, 1, n_heads, block_len, 3 * block_len) position_bias = position_bias + local_attention_mask.transpose(1, 2) position_bias = position_bias.type(scores.dtype) # Calculate global/side bias - shape: # (batch_size, num_heads, seq_len, global_seq_len) if mask is None: mask = torch.ones(batch_size, seq_length) # (batch_size, num_heads, seq_len, global_seq_len) side_position_bias = self.compute_side_bias(mask, global_segment_ids) # (batch_size, num_blocks, num_heads, block_len, global_seq_len) side_position_bias = _split_into_blocks(side_position_bias, self.block_len, dim=-2).transpose(1, 2) side_position_bias = side_position_bias.type(scores.dtype).to(scores.device) # (batch_size, num_blocks, num_heads, block_len, 3 * block_len + global_seq_len) position_bias = torch.cat([position_bias, side_position_bias], dim=-1) scores += position_bias # (batch_size, num_blocks, n_heads, block_len, 3 * block_len + global_seq_len) attn_weights = nn.functional.softmax(scores.float(), dim=-1).type_as(scores) attn_weights = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training) # Mask heads if we want to if layer_head_mask is not None: attn_weights = attn_weights * layer_head_mask attn_weights = attn_weights.type(value_states.dtype) attn_output = unshape(torch.einsum("...hqk,...khd->...qhd", attn_weights, value_states)) attn_output = attn_output[:, :seq_length, :] attn_output = self.o(attn_output) present_key_value_state = None outputs = (attn_output,) + (present_key_value_state,) + (position_bias,) if output_attentions: outputs = outputs + (attn_weights,) return outputs # Copied from transformers.models.t5.modeling_t5.T5LayerSelfAttention with T5->LongT5 class LongT5LayerSelfAttention(nn.Module): def __init__(self, config, has_relative_attention_bias=False): super().__init__() self.SelfAttention = LongT5Attention(config, has_relative_attention_bias=has_relative_attention_bias) self.layer_norm = LongT5LayerNorm(config.d_model, eps=config.layer_norm_epsilon) self.dropout = nn.Dropout(config.dropout_rate) def forward( self, hidden_states, attention_mask=None, position_bias=None, layer_head_mask=None, past_key_value=None, use_cache=False, output_attentions=False, ): normed_hidden_states = self.layer_norm(hidden_states) attention_output = self.SelfAttention( normed_hidden_states, mask=attention_mask, position_bias=position_bias, layer_head_mask=layer_head_mask, past_key_value=past_key_value, use_cache=use_cache, output_attentions=output_attentions, ) hidden_states = hidden_states + self.dropout(attention_output[0]) outputs = (hidden_states,) + attention_output[1:] # add attentions if we output them return outputs class LongT5LayerLocalSelfAttention(nn.Module): """Local self attention used in encoder""" def __init__(self, config, has_relative_attention_bias=False): super().__init__() self.LocalSelfAttention = LongT5LocalAttention(config, has_relative_attention_bias=has_relative_attention_bias) self.layer_norm = LongT5LayerNorm(config.d_model, eps=config.layer_norm_epsilon) self.dropout = nn.Dropout(config.dropout_rate) def forward( self, hidden_states, attention_mask=None, position_bias=None, layer_head_mask=None, output_attentions=False, **kwargs: Any, # to accept past_key_value and use_cache kwargs ): normed_hidden_states = self.layer_norm(hidden_states) attention_output = self.LocalSelfAttention( normed_hidden_states, mask=attention_mask, position_bias=position_bias, layer_head_mask=layer_head_mask, output_attentions=output_attentions, ) hidden_states = hidden_states + self.dropout(attention_output[0]) outputs = (hidden_states,) + attention_output[1:] # add attentions if we output them return outputs class LongT5LayerTransientGlobalSelfAttention(nn.Module): """Transient-Global self attention used in encoder""" def __init__(self, config, has_relative_attention_bias=False): super().__init__() self.TransientGlobalSelfAttention = LongT5TransientGlobalAttention( config, has_relative_attention_bias=has_relative_attention_bias ) self.layer_norm = LongT5LayerNorm(config.d_model, eps=config.layer_norm_epsilon) self.dropout = nn.Dropout(config.dropout_rate) def forward( self, hidden_states, attention_mask=None, position_bias=None, layer_head_mask=None, output_attentions=False, **kwargs: Any, # to accept past_key_value and use_cache kwargs ): normed_hidden_states = self.layer_norm(hidden_states) attention_output = self.TransientGlobalSelfAttention( normed_hidden_states, mask=attention_mask, position_bias=position_bias, layer_head_mask=layer_head_mask, output_attentions=output_attentions, ) hidden_states = hidden_states + self.dropout(attention_output[0]) outputs = (hidden_states,) + attention_output[1:] # add attentions if we output them return outputs # Copied from transformers.models.t5.modeling_t5.T5LayerCrossAttention with T5->LongT5 class LongT5LayerCrossAttention(nn.Module): def __init__(self, config): super().__init__() self.EncDecAttention = LongT5Attention(config, has_relative_attention_bias=False) self.layer_norm = LongT5LayerNorm(config.d_model, eps=config.layer_norm_epsilon) self.dropout = nn.Dropout(config.dropout_rate) def forward( self, hidden_states, key_value_states, attention_mask=None, position_bias=None, layer_head_mask=None, past_key_value=None, use_cache=False, query_length=None, output_attentions=False, ): normed_hidden_states = self.layer_norm(hidden_states) attention_output = self.EncDecAttention( normed_hidden_states, mask=attention_mask, key_value_states=key_value_states, position_bias=position_bias, layer_head_mask=layer_head_mask, past_key_value=past_key_value, use_cache=use_cache, query_length=query_length, output_attentions=output_attentions, ) layer_output = hidden_states + self.dropout(attention_output[0]) outputs = (layer_output,) + attention_output[1:] # add attentions if we output them return outputs class LongT5Block(nn.Module): def __init__(self, config, has_relative_attention_bias=False): super().__init__() self.is_decoder = config.is_decoder if config.is_decoder: attention_layer = LongT5LayerSelfAttention elif config.encoder_attention_type == "local": attention_layer = LongT5LayerLocalSelfAttention elif config.encoder_attention_type == "transient-global": attention_layer = LongT5LayerTransientGlobalSelfAttention else: raise ValueError( "For encoder attention mechanism, either `local` or `transient-global` attention type is expected, " f"but got {config.encoder_attention_type}." ) self.layer = nn.ModuleList() self.layer.append(attention_layer(config, has_relative_attention_bias=has_relative_attention_bias)) if self.is_decoder: self.layer.append(LongT5LayerCrossAttention(config)) self.layer.append(LongT5LayerFF(config)) def forward( self, hidden_states, attention_mask=None, position_bias=None, encoder_hidden_states=None, encoder_attention_mask=None, encoder_decoder_position_bias=None, layer_head_mask=None, cross_attn_layer_head_mask=None, past_key_value=None, use_cache=False, output_attentions=False, return_dict=True, ): if past_key_value is not None: if not self.is_decoder: logger.warning("`past_key_values` is passed to the encoder. Please make sure this is intended.") expected_num_past_key_values = 2 if encoder_hidden_states is None else 4 if len(past_key_value) != expected_num_past_key_values: raise ValueError( f"There should be {expected_num_past_key_values} past states. " f"{'2 (past / key) for cross attention. ' if expected_num_past_key_values == 4 else ''}" f"Got {len(past_key_value)} past key / value states" ) self_attn_past_key_value = past_key_value[:2] cross_attn_past_key_value = past_key_value[2:] else: self_attn_past_key_value, cross_attn_past_key_value = None, None self_attention_outputs = self.layer[0]( hidden_states, attention_mask=attention_mask, position_bias=position_bias, layer_head_mask=layer_head_mask, past_key_value=self_attn_past_key_value, use_cache=use_cache, output_attentions=output_attentions, ) hidden_states, present_key_value_state = self_attention_outputs[:2] attention_outputs = self_attention_outputs[2:] # Keep self-attention outputs and relative position weights # clamp inf values to enable fp16 inference - check https://github.com/huggingface/transformers/pull/19229/ if hidden_states.dtype == torch.float16 and torch.isinf(hidden_states).any(): clamp_value = torch.finfo(hidden_states.dtype).max - 1000 hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value) do_cross_attention = self.is_decoder and encoder_hidden_states is not None if do_cross_attention: # the actual query length is unknown for cross attention # if using past key value states. Need to inject it here if present_key_value_state is not None: query_length = present_key_value_state[0].shape[2] else: query_length = None cross_attention_outputs = self.layer[1]( hidden_states, key_value_states=encoder_hidden_states, attention_mask=encoder_attention_mask, position_bias=encoder_decoder_position_bias, layer_head_mask=cross_attn_layer_head_mask, past_key_value=cross_attn_past_key_value, query_length=query_length, use_cache=use_cache, output_attentions=output_attentions, ) hidden_states = cross_attention_outputs[0] # clamp inf values to enable fp16 inference - check https://github.com/huggingface/transformers/pull/19229/ if hidden_states.dtype == torch.float16 and torch.isinf(hidden_states).any(): clamp_value = torch.finfo(hidden_states.dtype).max - 1000 hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value) # Combine self attn and cross attn key value states if present_key_value_state is not None: present_key_value_state = present_key_value_state + cross_attention_outputs[1] # Keep cross-attention outputs and relative position weights attention_outputs = attention_outputs + cross_attention_outputs[2:] # Apply Feed Forward layer hidden_states = self.layer[-1](hidden_states) # clamp inf values to enable fp16 inference - check https://github.com/huggingface/transformers/pull/19229/ if hidden_states.dtype == torch.float16 and torch.isinf(hidden_states).any(): clamp_value = torch.finfo(hidden_states.dtype).max - 1000 hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value) outputs = (hidden_states,) if use_cache: outputs = outputs + (present_key_value_state,) + attention_outputs else: outputs = outputs + attention_outputs return outputs # hidden-states, present_key_value_states, (self-attention position bias), (self-attention weights), (cross-attention position bias), (cross-attention weights) class LongT5PreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = LongT5Config base_model_prefix = "transformer" supports_gradient_checkpointing = True _no_split_modules = ["LongT5Block"] @property # Copied from transformers.models.t5.modeling_t5.T5PreTrainedModel.dummy_inputs def dummy_inputs(self): input_ids = torch.tensor(DUMMY_INPUTS) input_mask = torch.tensor(DUMMY_MASK) dummy_inputs = { "decoder_input_ids": input_ids, "input_ids": input_ids, "decoder_attention_mask": input_mask, } return dummy_inputs def _init_weights(self, module): """Initialize the weights""" factor = self.config.initializer_factor # Used for testing weights initialization if isinstance(module, LongT5LayerNorm): module.weight.data.fill_(factor * 1.0) elif isinstance(module, (LongT5Model, LongT5ForConditionalGeneration, LongT5EncoderModel)): # Mesh TensorFlow embeddings initialization # See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/layers.py#L1624 module.shared.weight.data.normal_(mean=0.0, std=factor * 1.0) elif isinstance(module, LongT5DenseActDense): # Mesh TensorFlow FF initialization # See https://github.com/tensorflow/mesh/blob/master/mesh_tensorflow/transformer/transformer_layers.py#L56 # and https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/layers.py#L89 module.wi.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_model) ** -0.5)) if hasattr(module.wi, "bias") and module.wi.bias is not None: module.wi.bias.data.zero_() module.wo.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_ff) ** -0.5)) if hasattr(module.wo, "bias") and module.wo.bias is not None: module.wo.bias.data.zero_() elif isinstance(module, LongT5DenseGatedActDense): module.wi_0.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_model) ** -0.5)) if hasattr(module.wi_0, "bias") and module.wi_0.bias is not None: module.wi_0.bias.data.zero_() module.wi_1.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_model) ** -0.5)) if hasattr(module.wi_1, "bias") and module.wi_1.bias is not None: module.wi_1.bias.data.zero_() module.wo.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_ff) ** -0.5)) if hasattr(module.wo, "bias") and module.wo.bias is not None: module.wo.bias.data.zero_() elif isinstance(module, (LongT5Attention, LongT5LocalAttention, LongT5TransientGlobalAttention)): # Mesh TensorFlow attention initialization to avoid scaling before softmax # See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/transformer/attention.py#L136 d_model = self.config.d_model key_value_proj_dim = self.config.d_kv n_heads = self.config.num_heads module.q.weight.data.normal_(mean=0.0, std=factor * ((d_model * key_value_proj_dim) ** -0.5)) module.k.weight.data.normal_(mean=0.0, std=factor * (d_model**-0.5)) module.v.weight.data.normal_(mean=0.0, std=factor * (d_model**-0.5)) module.o.weight.data.normal_(mean=0.0, std=factor * ((n_heads * key_value_proj_dim) ** -0.5)) if module.has_relative_attention_bias: module.relative_attention_bias.weight.data.normal_(mean=0.0, std=factor * ((d_model) ** -0.5)) if isinstance(module, LongT5TransientGlobalAttention): module.global_relative_attention_bias.weight.data.normal_( mean=0.0, std=factor * ((d_model) ** -0.5) ) # Copied from transformers.models.t5.modeling_t5.T5PreTrainedModel._shift_right with T5->LongT5 def _shift_right(self, input_ids): decoder_start_token_id = self.config.decoder_start_token_id pad_token_id = self.config.pad_token_id if decoder_start_token_id is None: raise ValueError( "self.model.config.decoder_start_token_id has to be defined. In LongT5 it is usually set to the pad_token_id. " "See LongT5 docs for more information." ) # shift inputs to the right if is_torch_fx_proxy(input_ids): # Item assignment is not supported natively for proxies. shifted_input_ids = torch.full(input_ids.shape[:-1] + (1,), decoder_start_token_id) shifted_input_ids = torch.cat([shifted_input_ids, input_ids[..., :-1]], dim=-1) else: shifted_input_ids = input_ids.new_zeros(input_ids.shape) shifted_input_ids[..., 1:] = input_ids[..., :-1].clone() shifted_input_ids[..., 0] = decoder_start_token_id if pad_token_id is None: raise ValueError("self.model.config.pad_token_id has to be defined.") # replace possible -100 values in labels by `pad_token_id` shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id) return shifted_input_ids class LongT5Stack(LongT5PreTrainedModel): def __init__(self, config, embed_tokens=None): super().__init__(config) self.embed_tokens = nn.Embedding(config.vocab_size, config.d_model) if embed_tokens is not None: self.embed_tokens.weight = embed_tokens.weight self.is_decoder = config.is_decoder self.local_radius = config.local_radius self.block_len = self.local_radius + 1 self.block = nn.ModuleList( [LongT5Block(config, has_relative_attention_bias=bool(i == 0)) for i in range(config.num_layers)] ) self.final_layer_norm = LongT5LayerNorm(config.d_model, eps=config.layer_norm_epsilon) self.dropout = nn.Dropout(config.dropout_rate) self.gradient_checkpointing = False # Initialize weights and apply final processing self.post_init() # Copied from transformers.models.t5.modeling_t5.T5Stack.get_input_embeddings def get_input_embeddings(self): return self.embed_tokens # Copied from transformers.models.t5.modeling_t5.T5Stack.set_input_embeddings def set_input_embeddings(self, new_embeddings): self.embed_tokens = new_embeddings def forward( self, input_ids=None, attention_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, inputs_embeds=None, head_mask=None, cross_attn_head_mask=None, past_key_values=None, use_cache=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): use_cache = use_cache if use_cache is not None else self.config.use_cache output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is not None and inputs_embeds is not None: err_msg_prefix = "decoder_" if self.is_decoder else "" raise ValueError( f"You cannot specify both {err_msg_prefix}input_ids and {err_msg_prefix}inputs_embeds at the same time" ) elif input_ids is not None: input_shape = input_ids.size() input_ids = input_ids.view(-1, input_shape[-1]) elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: err_msg_prefix = "decoder_" if self.is_decoder else "" raise ValueError(f"You have to specify either {err_msg_prefix}input_ids or {err_msg_prefix}inputs_embeds") if inputs_embeds is None: assert self.embed_tokens is not None, "You have to initialize the model with valid token embeddings" inputs_embeds = self.embed_tokens(input_ids) batch_size, seq_length = input_shape # required mask seq length can be calculated via length of past mask_seq_length = past_key_values[0][0].shape[2] + seq_length if past_key_values is not None else seq_length if use_cache is True: assert self.is_decoder, f"`use_cache` can only be set to `True` if {self} is used as a decoder" if attention_mask is None: attention_mask = torch.ones(batch_size, mask_seq_length, device=inputs_embeds.device) # initialize past_key_values with `None` if past does not exist if past_key_values is None: past_key_values = [None] * len(self.block) # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] # ourselves in which case we just need to make it broadcastable to all heads. # We use local attention in encoder self-attention, otherwise standard self & cross attentions are used if self.is_decoder: extended_attention_mask = self.get_extended_attention_mask( attention_mask, input_shape, inputs_embeds.device ) elif self.config.encoder_attention_type == "local": extended_attention_mask = _get_local_attention_mask(attention_mask, self.block_len, inputs_embeds.device) else: # we need to use both local attention mask and standard extended mask for transient-global attention extended_attention_mask = attention_mask # If a 2D or 3D attention mask is provided for the cross-attention # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length] if self.is_decoder and encoder_hidden_states is not None: encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size() encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length) if encoder_attention_mask is None: encoder_attention_mask = torch.ones(encoder_hidden_shape, device=inputs_embeds.device) encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask) else: encoder_extended_attention_mask = None if self.gradient_checkpointing and self.training: if use_cache: logger.warning_once( "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." ) use_cache = False # Prepare head mask if needed head_mask = self.get_head_mask(head_mask, self.config.num_layers) cross_attn_head_mask = self.get_head_mask(cross_attn_head_mask, self.config.num_layers) present_key_value_states = () if use_cache else None all_hidden_states = () if output_hidden_states else None all_attentions = () if output_attentions else None all_cross_attentions = () if (output_attentions and self.is_decoder) else None position_bias = None encoder_decoder_position_bias = None hidden_states = self.dropout(inputs_embeds) for i, (layer_module, past_key_value) in enumerate(zip(self.block, past_key_values)): layer_head_mask = head_mask[i] cross_attn_layer_head_mask = cross_attn_head_mask[i] if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if self.gradient_checkpointing and self.training: layer_outputs = self._gradient_checkpointing_func( layer_module.forward, hidden_states, extended_attention_mask, position_bias, encoder_hidden_states, encoder_extended_attention_mask, encoder_decoder_position_bias, layer_head_mask, cross_attn_layer_head_mask, None, # past_key_value is always None with gradient checkpointing use_cache, output_attentions, ) else: layer_outputs = layer_module( hidden_states, attention_mask=extended_attention_mask, position_bias=position_bias, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_extended_attention_mask, encoder_decoder_position_bias=encoder_decoder_position_bias, layer_head_mask=layer_head_mask, cross_attn_layer_head_mask=cross_attn_layer_head_mask, past_key_value=past_key_value, use_cache=use_cache, output_attentions=output_attentions, ) # layer_outputs is a tuple with: # hidden-states, key-value-states, (self-attention position bias), (self-attention weights), (cross-attention position bias), (cross-attention weights) if use_cache is False: layer_outputs = layer_outputs[:1] + (None,) + layer_outputs[1:] hidden_states, present_key_value_state = layer_outputs[:2] # We share the position biases between the layers - the first layer store them # layer_outputs = hidden-states, key-value-states (self-attention position bias), (self-attention weights), # (cross-attention position bias), (cross-attention weights) position_bias = layer_outputs[2] if self.is_decoder and encoder_hidden_states is not None: encoder_decoder_position_bias = layer_outputs[4 if output_attentions else 3] # append next layer key value states if use_cache: present_key_value_states = present_key_value_states + (present_key_value_state,) if output_attentions: all_attentions = all_attentions + (layer_outputs[3],) if self.is_decoder: all_cross_attentions = all_cross_attentions + (layer_outputs[5],) hidden_states = self.final_layer_norm(hidden_states) hidden_states = self.dropout(hidden_states) # Add last layer if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple( v for v in [ hidden_states, present_key_value_states, all_hidden_states, all_attentions, all_cross_attentions, ] if v is not None ) return BaseModelOutputWithPastAndCrossAttentions( last_hidden_state=hidden_states, past_key_values=present_key_value_states, hidden_states=all_hidden_states, attentions=all_attentions, cross_attentions=all_cross_attentions, ) LONGT5_START_DOCSTRING = r""" The LongT5 model was proposed in [LongT5: Efficient Text-To-Text Transformer for Long Sequences](https://arxiv.org/abs/2112.07916) by Mandy Guo, Joshua Ainslie, David Uthus, Santiago Ontanon, Jianmo Ni, Yun-Hsuan Sung and Yinfei Yang. It's an encoder-decoder transformer pre-trained in a text-to-text denoising generative setting. LongT5 model is an extension of T5 model, and it enables using one of the two different efficient attention mechanisms - (1) Local attention, or (2) Transient-Global attention. This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`LongT5Config`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ LONGT5_INPUTS_DOCSTRING = r""" Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. LongT5 is a model with relative position embeddings so you should be able to pad the inputs on both the right and the left. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for detail. [What are input IDs?](../glossary#input-ids) To know more on how to prepare `input_ids` for pretraining take a look a [LONGT5 Training](./longt5#training). attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Indices of decoder input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are decoder input IDs?](../glossary#decoder-input-ids) LONGT5 uses the `pad_token_id` as the starting token for `decoder_input_ids` generation. If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see `past_key_values`). To know more on how to prepare `decoder_input_ids` for pretraining take a look at [LONGT5 Training](./longt5#training). decoder_attention_mask (`torch.BoolTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also be used by default. head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the self-attention modules in the encoder. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. decoder_head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the self-attention modules in the decoder. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. cross_attn_head_mask (`torch.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. encoder_outputs (`tuple(tuple(torch.FloatTensor)`, *optional*): Tuple consists of (`last_hidden_state`, `optional`: *hidden_states*, `optional`: *attentions*) `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)` is a sequence of hidden states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. decoder_inputs_embeds (`torch.FloatTensor` of shape `(batch_size, target_sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `decoder_input_ids` you can choose to directly pass an embedded representation. If `past_key_values` is used, optionally only the last `decoder_inputs_embeds` have to be input (see `past_key_values`). This is useful if you want more control over how to convert `decoder_input_ids` indices into associated vectors than the model's internal embedding lookup matrix. If `decoder_input_ids` and `decoder_inputs_embeds` are both unset, `decoder_inputs_embeds` takes the value of `inputs_embeds`. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ LONGT5_ENCODER_INPUTS_DOCSTRING = r""" Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. LongT5 is a model with relative position embeddings so you should be able to pad the inputs on both the right and the left. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for detail. To know more on how to prepare `input_ids` for pretraining take a look a [LONGT5 Training](./longt5#training). attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ # Warning message for FutureWarning: head_mask was separated into two input args - head_mask, decoder_head_mask __HEAD_MASK_WARNING_MSG = """ The input argument `head_mask` was split into two arguments `head_mask` and `decoder_head_mask`. Currently, `decoder_head_mask` is set to copy `head_mask`, but this feature is deprecated and will be removed in future versions. If you do not want to use any `decoder_head_mask` now, please set `decoder_head_mask = torch.ones(num_layers, num_heads)`. """ @add_start_docstrings( "The bare LONGT5 Model transformer outputting raw hidden-states without any specific head on top.", LONGT5_START_DOCSTRING, ) class LongT5Model(LongT5PreTrainedModel): _keys_to_ignore_on_load_unexpected = [ r"decoder.block.0.layer.1.EncDecAttention.relative_attention_bias.weight", ] _tied_weights_keys = ["encoder.embed_tokens.weight", "decoder.embed_tokens.weight"] def __init__(self, config: LongT5Config): super().__init__(config) self.shared = nn.Embedding(config.vocab_size, config.d_model) encoder_config = copy.deepcopy(config) encoder_config.is_decoder = False encoder_config.use_cache = False encoder_config.is_encoder_decoder = False self.encoder = LongT5Stack(encoder_config, self.shared) decoder_config = copy.deepcopy(config) decoder_config.is_decoder = True decoder_config.is_encoder_decoder = False decoder_config.num_layers = config.num_decoder_layers self.decoder = LongT5Stack(decoder_config, self.shared) # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.shared def set_input_embeddings(self, new_embeddings): self.shared = new_embeddings self.encoder.set_input_embeddings(new_embeddings) self.decoder.set_input_embeddings(new_embeddings) def _tie_weights(self): if self.config.tie_word_embeddings: self._tie_or_clone_weights(self.encoder.embed_tokens, self.shared) self._tie_or_clone_weights(self.decoder.embed_tokens, self.shared) def get_encoder(self): return self.encoder def get_decoder(self): return self.decoder def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) @add_start_docstrings_to_model_forward(LONGT5_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=Seq2SeqModelOutput, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, decoder_input_ids: Optional[torch.LongTensor] = None, decoder_attention_mask: Optional[torch.BoolTensor] = None, head_mask: Optional[torch.FloatTensor] = None, decoder_head_mask: Optional[torch.FloatTensor] = None, cross_attn_head_mask: Optional[torch.Tensor] = None, encoder_outputs: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, inputs_embeds: Optional[torch.Tensor] = None, decoder_inputs_embeds: Optional[torch.Tensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple[torch.FloatTensor], Seq2SeqModelOutput]: r""" Returns: Example: ```python >>> from transformers import AutoTokenizer, LongT5Model >>> tokenizer = AutoTokenizer.from_pretrained("google/long-t5-local-base") >>> model = LongT5Model.from_pretrained("google/long-t5-local-base") >>> # Let's try a very long encoder input. >>> input_ids = tokenizer( ... 100 * "Studies have been shown that owning a dog is good for you", return_tensors="pt" ... ).input_ids # Batch size 1 >>> decoder_input_ids = tokenizer("Studies show that", return_tensors="pt").input_ids # Batch size 1 >>> # forward pass >>> outputs = model(input_ids=input_ids, decoder_input_ids=decoder_input_ids) >>> last_hidden_states = outputs.last_hidden_state ```""" use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict # FutureWarning: head_mask was separated into two input args - head_mask, decoder_head_mask if head_mask is not None and decoder_head_mask is None: if self.config.num_layers == self.config.num_decoder_layers: warnings.warn(__HEAD_MASK_WARNING_MSG, FutureWarning) decoder_head_mask = head_mask # Encode if needed (training, first prediction pass) if encoder_outputs is None: encoder_outputs = self.encoder( input_ids=input_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) elif return_dict and not isinstance(encoder_outputs, BaseModelOutput): encoder_outputs = BaseModelOutput( last_hidden_state=encoder_outputs[0], hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None, attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None, ) hidden_states = encoder_outputs[0] # Decode decoder_outputs = self.decoder( input_ids=decoder_input_ids, attention_mask=decoder_attention_mask, inputs_embeds=decoder_inputs_embeds, past_key_values=past_key_values, encoder_hidden_states=hidden_states, encoder_attention_mask=attention_mask, head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) if not return_dict: return decoder_outputs + encoder_outputs return Seq2SeqModelOutput( last_hidden_state=decoder_outputs.last_hidden_state, past_key_values=decoder_outputs.past_key_values, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions, ) @add_start_docstrings("""LONGT5 Model with a `language modeling` head on top.""", LONGT5_START_DOCSTRING) class LongT5ForConditionalGeneration(LongT5PreTrainedModel): _keys_to_ignore_on_load_unexpected = [ r"decoder.block.0.layer.1.EncDecAttention.relative_attention_bias.weight", ] _tied_weights_keys = ["encoder.embed_tokens.weight", "decoder.embed_tokens.weight", "lm_head.weight"] def __init__(self, config: LongT5Config): super().__init__(config) self.model_dim = config.d_model self.shared = nn.Embedding(config.vocab_size, config.d_model) encoder_config = copy.deepcopy(config) encoder_config.is_decoder = False encoder_config.use_cache = False encoder_config.is_encoder_decoder = False self.encoder = LongT5Stack(encoder_config, self.shared) decoder_config = copy.deepcopy(config) decoder_config.is_decoder = True decoder_config.is_encoder_decoder = False decoder_config.num_layers = config.num_decoder_layers self.decoder = LongT5Stack(decoder_config, self.shared) self.lm_head = nn.Linear(config.d_model, config.vocab_size, bias=False) # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.shared def set_input_embeddings(self, new_embeddings): self.shared = new_embeddings self.encoder.set_input_embeddings(new_embeddings) self.decoder.set_input_embeddings(new_embeddings) def _tie_weights(self): if self.config.tie_word_embeddings: self._tie_or_clone_weights(self.encoder.embed_tokens, self.shared) self._tie_or_clone_weights(self.decoder.embed_tokens, self.shared) def set_output_embeddings(self, new_embeddings): self.lm_head = new_embeddings def get_output_embeddings(self): return self.lm_head def get_encoder(self): return self.encoder def get_decoder(self): return self.decoder @add_start_docstrings_to_model_forward(LONGT5_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=Seq2SeqLMOutput, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, decoder_input_ids: Optional[torch.LongTensor] = None, decoder_attention_mask: Optional[torch.BoolTensor] = None, head_mask: Optional[torch.FloatTensor] = None, decoder_head_mask: Optional[torch.FloatTensor] = None, cross_attn_head_mask: Optional[torch.Tensor] = None, encoder_outputs: Optional[Tuple[Tuple[torch.Tensor]]] = None, past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, decoder_inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple[torch.FloatTensor], Seq2SeqLMOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[-100, 0, ..., config.vocab_size - 1]`. All labels set to `-100` are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]` Returns: Examples: ```python >>> from transformers import AutoTokenizer, LongT5ForConditionalGeneration >>> tokenizer = AutoTokenizer.from_pretrained("Stancld/longt5-tglobal-large-16384-pubmed-3k_steps") >>> model = LongT5ForConditionalGeneration.from_pretrained( ... "Stancld/longt5-tglobal-large-16384-pubmed-3k_steps" ... ) >>> # Let's try a very long input. >>> inputs = tokenizer(100 * "studies have shown that owning a dog is good for you ", return_tensors="pt") >>> input_ids = inputs.input_ids >>> outputs = model.generate(input_ids) >>> print(tokenizer.decode(outputs[0], skip_special_tokens=True)) abstractthe aim of this article is to provide an overview of the literature on the role of dog ```""" use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict # FutureWarning: head_mask was separated into two input args - head_mask, decoder_head_mask if head_mask is not None and decoder_head_mask is None: if self.config.num_layers == self.config.num_decoder_layers: warnings.warn(__HEAD_MASK_WARNING_MSG, FutureWarning) decoder_head_mask = head_mask # Encode if needed (training, first prediction pass) if encoder_outputs is None: # Convert encoder inputs in embeddings if needed encoder_outputs = self.encoder( input_ids=input_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) elif return_dict and not isinstance(encoder_outputs, BaseModelOutput): encoder_outputs = BaseModelOutput( last_hidden_state=encoder_outputs[0], hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None, attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None, ) hidden_states = encoder_outputs[0] if labels is not None and decoder_input_ids is None and decoder_inputs_embeds is None: # get decoder inputs from shifting lm labels to the right decoder_input_ids = self._shift_right(labels) # Decode decoder_outputs = self.decoder( input_ids=decoder_input_ids, attention_mask=decoder_attention_mask, inputs_embeds=decoder_inputs_embeds, past_key_values=past_key_values, encoder_hidden_states=hidden_states, encoder_attention_mask=attention_mask, head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = decoder_outputs[0] if self.config.tie_word_embeddings: # Rescale output before projecting on vocab # See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/transformer/transformer.py#L586 sequence_output = sequence_output * (self.model_dim**-0.5) lm_logits = self.lm_head(sequence_output) loss = None if labels is not None: loss_fct = CrossEntropyLoss(ignore_index=-100) labels = labels.to(lm_logits.device) loss = loss_fct(lm_logits.view(-1, lm_logits.size(-1)), labels.view(-1)) # TODO(thom): Add z_loss https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/layers.py#L666 if not return_dict: output = (lm_logits,) + decoder_outputs[1:] + encoder_outputs return ((loss,) + output) if loss is not None else output return Seq2SeqLMOutput( loss=loss, logits=lm_logits, past_key_values=decoder_outputs.past_key_values, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions, ) def prepare_inputs_for_generation( self, input_ids, past_key_values=None, attention_mask=None, head_mask=None, decoder_head_mask=None, cross_attn_head_mask=None, use_cache=None, encoder_outputs=None, **kwargs, ): # cut decoder_input_ids if past_key_values is used if past_key_values is not None: past_length = past_key_values[0][0].shape[2] # Some generation methods already pass only the last input ID if input_ids.shape[1] > past_length: remove_prefix_length = past_length else: # Default to old behavior: keep only final ID remove_prefix_length = input_ids.shape[1] - 1 input_ids = input_ids[:, remove_prefix_length:] return { "decoder_input_ids": input_ids, "past_key_values": past_key_values, "encoder_outputs": encoder_outputs, "attention_mask": attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, "use_cache": use_cache, } def prepare_decoder_input_ids_from_labels(self, labels: torch.Tensor): return self._shift_right(labels) def _reorder_cache(self, past_key_values, beam_idx): # if decoder past is not included in output # speedy decoding is disabled and no need to reorder if past_key_values is None: logger.warning("You might want to consider setting `use_cache=True` to speed up decoding") return past_key_values reordered_decoder_past = () for layer_past_states in past_key_values: # get the correct batch idx from layer past batch dim # batch dim of `past` is at 2nd position reordered_layer_past_states = () for layer_past_state in layer_past_states: # need to set correct `past` for each of the four key / value states reordered_layer_past_states = reordered_layer_past_states + ( layer_past_state.index_select(0, beam_idx.to(layer_past_state.device)), ) assert reordered_layer_past_states[0].shape == layer_past_states[0].shape assert len(reordered_layer_past_states) == len(layer_past_states) reordered_decoder_past = reordered_decoder_past + (reordered_layer_past_states,) return reordered_decoder_past @add_start_docstrings( "The bare LONGT5 Model transformer outputting encoder's raw hidden-states without any specific head on top.", LONGT5_START_DOCSTRING, ) class LongT5EncoderModel(LongT5PreTrainedModel): _tied_weights_keys = ["encoder.embed_tokens.weight"] _keys_to_ignore_on_load_unexpected = [r"decoder"] def __init__(self, config: LongT5Config): super().__init__(config) self.shared = nn.Embedding(config.vocab_size, config.d_model) encoder_config = copy.deepcopy(config) encoder_config.use_cache = False encoder_config.is_encoder_decoder = False self.encoder = LongT5Stack(encoder_config, self.shared) # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.shared def set_input_embeddings(self, new_embeddings): self.shared = new_embeddings self.encoder.set_input_embeddings(new_embeddings) def _tie_weights(self): if self.config.tie_word_embeddings: self._tie_or_clone_weights(self.encoder.embed_tokens, self.shared) def get_encoder(self): return self.encoder def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) @add_start_docstrings_to_model_forward(LONGT5_ENCODER_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=BaseModelOutput, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple[torch.FloatTensor], BaseModelOutput]: r""" Returns: Example: ```python >>> from transformers import AutoTokenizer, LongT5ForConditionalGeneration >>> tokenizer = AutoTokenizer.from_pretrained("google/long-t5-local-base") >>> model = LongT5EncoderModel.from_pretrained("google/long-t5-local-base") >>> input_ids = tokenizer( ... 100 * "Studies have been shown that owning a dog is good for you ", return_tensors="pt" ... ).input_ids # Batch size 1 >>> outputs = model(input_ids=input_ids) >>> last_hidden_states = outputs.last_hidden_state ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict encoder_outputs = self.encoder( input_ids=input_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) return encoder_outputs
0
hf_public_repos/transformers/src/transformers/models
hf_public_repos/transformers/src/transformers/models/longt5/modeling_flax_longt5.py
# coding=utf-8 # Copyright 2022 LongT5 Authors and HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Flax LongT5 model.""" import copy from typing import Any, Callable, List, Optional, Tuple import flax.linen as nn import jax import jax.numpy as jnp import numpy as np from flax.core.frozen_dict import FrozenDict, freeze, unfreeze from flax.linen import combine_masks, make_causal_mask from flax.linen import partitioning as nn_partitioning from flax.linen.attention import dot_product_attention_weights from flax.traverse_util import flatten_dict, unflatten_dict from jax.random import PRNGKey from ...modeling_flax_outputs import ( FlaxBaseModelOutput, FlaxBaseModelOutputWithPastAndCrossAttentions, FlaxCausalLMOutputWithCrossAttentions, FlaxSeq2SeqLMOutput, FlaxSeq2SeqModelOutput, ) from ...modeling_flax_utils import ( ACT2FN, FlaxPreTrainedModel, append_call_sample_docstring, append_replace_return_docstrings, overwrite_call_docstring, ) from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings from .configuration_longt5 import LongT5Config logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = "google/long-t5-local-base" _CONFIG_FOR_DOC = "LongT5Config" remat = nn_partitioning.remat # Copied from transformers.models.bart.modeling_flax_bart.shift_tokens_right def shift_tokens_right(input_ids: jnp.ndarray, pad_token_id: int, decoder_start_token_id: int) -> jnp.ndarray: """ Shift input ids one token to the right. """ shifted_input_ids = jnp.zeros_like(input_ids) shifted_input_ids = shifted_input_ids.at[:, 1:].set(input_ids[:, :-1]) shifted_input_ids = shifted_input_ids.at[:, 0].set(decoder_start_token_id) shifted_input_ids = jnp.where(shifted_input_ids == -100, pad_token_id, shifted_input_ids) return shifted_input_ids def _pad_to_multiple(x: jnp.ndarray, block_len: int, axis: int, pad_value: int = 0) -> jnp.ndarray: """Pad an array so that a sequence length will be a multiple of `block_len`""" pad_len = -x.shape[axis] % block_len pad = [(0, 0)] * x.ndim pad[axis] = (0, pad_len) x = jnp.pad(x, pad_width=pad, mode="constant", constant_values=pad_value) return x def _split_into_blocks(x: jnp.ndarray, block_len: int, axis: int) -> jnp.ndarray: """Split an input array into blocks of a given `block_len` along the given `axis`. If the dimension length is not a multiple of `block_len`, it will be padded first with selected `pad_value`. """ # pad tensor to multiple of block_len if x.shape[axis] % block_len != 0: x = _pad_to_multiple(x, block_len, axis, pad_value=0) num_blocks = x.shape[axis] // block_len output_shape = x.shape[:axis] + (num_blocks, block_len) + x.shape[(axis + 1) :] return x.reshape(output_shape) def _concatenate_3_blocks(x: jnp.ndarray, block_axis: int, sequence_axis: int, pad_value: int = 0) -> jnp.ndarray: """Concatenate three consecutive blocks for each input block for local attentiont. For more information, see: https://arxiv.org/pdf/2112.07916.pdf. """ num_blocks = x.shape[block_axis] pad = [(0, 0)] * x.ndim pad[block_axis] = (1, 1) # [batch_size, num_blocks, block_len] -> [batch_size, num_blocks + 2, block_len] x = jnp.pad(x, pad_width=pad, mode="constant", constant_values=pad_value) blocks_list: List[np.array] = [] for i in range(3): # We use indexing approach here: # https://numpy.org/doc/stable/user/basics.indexing.html#dealing-with-variable-numbers-of-indices-within-programs indices = [slice(0, None)] * x.ndim indices[block_axis] = slice(i, i + num_blocks) indices = tuple(indices) blocks_list.append(x[indices]) return jnp.concatenate(blocks_list, axis=sequence_axis) # [batch_size, num_blocks, 3 * block_len, ...] def _make_3block_relative_position_ids(block_len: int) -> jnp.ndarray: """Makes 3-blocked relative position ids for local attention.""" position_ids = jnp.arange(3 * block_len, dtype=jnp.int32) center_position_ids = position_ids[block_len:-block_len] relative_position_ids = position_ids[None, :] - center_position_ids[:, None] # [block_len, 3 * block_len] return relative_position_ids def _mask_local_attention_mask(local_attention_mask: np.ndarray, block_len: int) -> jnp.ndarray: """Mask local attention mask to enforce that tokens are not allowed to attend tokens farther than ``local_radius.""" relative_position_ids = _make_3block_relative_position_ids(block_len) locality_mask = jnp.abs(relative_position_ids) < block_len locality_mask = locality_mask[None, None, :, :] return jnp.logical_and(local_attention_mask, locality_mask) def _get_local_attention_mask(attention_mask: np.ndarray, block_len: int) -> jnp.ndarray: """Prepare attention mask to be applied for a local attention.""" # [batch_size, num_blocks, block_len] _blocked_attention_mask = _split_into_blocks(attention_mask, block_len, axis=1) # [batch_size, num_block, 3 * block_len] _3blocked_attention_mask = _concatenate_3_blocks(_blocked_attention_mask, block_axis=1, sequence_axis=2) _blocked_attention_mask = _blocked_attention_mask[..., None] _3blocked_attention_mask = _3blocked_attention_mask[..., None, :] # [batch_size, num_block, block_len, 3 * block_len] local_attention_mask = jnp.logical_and(_blocked_attention_mask, _3blocked_attention_mask) local_attention_mask = _mask_local_attention_mask(local_attention_mask, block_len) # [batch_size, 1, num_block, block_len, 3 * block_len] return local_attention_mask[:, None, ...] def _make_global_fixed_block_ids(attention_mask: np.ndarray, global_block_size: int) -> Tuple[jnp.ndarray, np.ndarray]: """Obtain the "fixed block" global id corresponding to each input token. This implementation is a simlified version of the original Flaxformr implementation adopted from: https://github.com/google/flaxformer/blob/main/flaxformer/architectures/longt5/long_attention.py. In our scenario, as we use this strategy only for a decoder, orphan tokens, i.e. those tokens which do not make for the whole fixed block, are assigned to the preceding block. Padding tokens from the original sequence are represented by -1. """ batch_size, seq_len = attention_mask.shape[:2] def handle_orphan_tokens(block_ids: np.ndarray) -> jnp.ndarray: block_ends = (jnp.arange(seq_len) % global_block_size) == global_block_size - 1 true_block_ends = jnp.logical_and(block_ends, block_ids >= 0) full_blocks = true_block_ends.sum(-1)[..., None] block_ids = jnp.minimum(block_ids, full_blocks - 1) return block_ids fixed_block_mask = jnp.ones_like(attention_mask) / global_block_size fixed_block_mask = jnp.cumsum(fixed_block_mask, axis=1) - fixed_block_mask mask = jnp.where(attention_mask != 0.0, 1.0, -1000.0) global_block_ids = jnp.maximum( jnp.floor(mask + fixed_block_mask - 1.0), jnp.array(-1.0, dtype=attention_mask.dtype) ) # set padding tokens to -1 global_block_ids = (global_block_ids * attention_mask) + (attention_mask - 1) # [batch_size, seq_len] global_block_ids = handle_orphan_tokens(global_block_ids) num_globals = seq_len // global_block_size # [batch_size, seq_len // global_block_size] if num_globals > 0: _sequence_block_ids_max = jnp.repeat(global_block_ids.max(axis=-1)[:, None], repeats=num_globals, axis=1) else: _sequence_block_ids_max = jnp.zeros((batch_size, 0), dtype=global_block_ids.dtype) global_segment_ids = jnp.cumsum(jnp.ones((batch_size, num_globals)), axis=-1) - 1 global_segment_ids = jnp.where(global_segment_ids <= _sequence_block_ids_max, 1, 0) return global_block_ids, global_segment_ids def _make_side_relative_position_ids(attention_mask: np.ndarray, global_block_size: int) -> np.ndarray: """Create the relative position tensor for local -> global attention.""" block_ids, global_segment_ids = _make_global_fixed_block_ids(attention_mask, global_block_size) global_seq_len = global_segment_ids.shape[-1] global_positions = jnp.arange(global_seq_len) side_relative_position = global_positions - block_ids[..., None] return side_relative_position def _create_global_aggregates(hidden_states: np.ndarray, block_ids: np.ndarray, global_seq_len: int) -> np.ndarray: """Compute individual block aggregates by summing over individual blocks.""" # (batch..., seq_len, global_seq_len)) one_hot_block_ids = jax.nn.one_hot(block_ids, global_seq_len) return jnp.einsum("...nd,...ng->...gd", hidden_states, one_hot_block_ids) # Copied from transformers.models.t5.modeling_flax_t5.FlaxT5LayerNorm with T5->LongT5 class FlaxLongT5LayerNorm(nn.Module): hidden_size: int dtype: jnp.dtype = jnp.float32 eps: float = 1e-6 weight_init: Callable[..., np.ndarray] = jax.nn.initializers.ones def setup(self): self.weight = self.param("weight", self.weight_init, (self.hidden_size,)) def __call__(self, hidden_states): """ Construct a layernorm module in the LongT5 style; No bias and no subtraction of mean. """ # layer norm should always be calculated in float32 variance = jnp.power(hidden_states.astype("f4"), 2).mean(axis=-1, keepdims=True) hidden_states = hidden_states / jnp.sqrt(variance + self.eps) return self.weight * hidden_states # Copied from transformers.models.t5.modeling_flax_t5.FlaxT5DenseActDense with T5->LongT5 class FlaxLongT5DenseActDense(nn.Module): config: LongT5Config dtype: jnp.dtype = jnp.float32 def setup(self): wi_init_std = self.config.initializer_factor * (self.config.d_model**-0.5) wo_init_std = self.config.initializer_factor * (self.config.d_ff**-0.5) self.wi = nn.Dense( self.config.d_ff, use_bias=False, kernel_init=jax.nn.initializers.normal(wi_init_std), dtype=self.dtype, ) self.wo = nn.Dense( self.config.d_model, use_bias=False, kernel_init=jax.nn.initializers.normal(wo_init_std), dtype=self.dtype, ) self.dropout = nn.Dropout(self.config.dropout_rate) self.act = ACT2FN[self.config.dense_act_fn] def __call__(self, hidden_states, deterministic=True): hidden_states = self.wi(hidden_states) hidden_states = self.act(hidden_states) hidden_states = self.dropout(hidden_states, deterministic=deterministic) hidden_states = self.wo(hidden_states) return hidden_states # Copied from transformers.models.t5.modeling_flax_t5.FlaxT5DenseGatedActDense with T5->LongT5 class FlaxLongT5DenseGatedActDense(nn.Module): config: LongT5Config dtype: jnp.dtype = jnp.float32 # the dtype of the computation def setup(self): wi_init_std = self.config.initializer_factor * (self.config.d_model**-0.5) wo_init_std = self.config.initializer_factor * (self.config.d_ff**-0.5) self.wi_0 = nn.Dense( self.config.d_ff, use_bias=False, kernel_init=jax.nn.initializers.normal(wi_init_std), dtype=self.dtype, ) self.wi_1 = nn.Dense( self.config.d_ff, use_bias=False, kernel_init=jax.nn.initializers.normal(wi_init_std), dtype=self.dtype, ) self.wo = nn.Dense( self.config.d_model, use_bias=False, kernel_init=jax.nn.initializers.normal(wo_init_std), dtype=self.dtype, ) self.dropout = nn.Dropout(self.config.dropout_rate) self.act = ACT2FN[self.config.dense_act_fn] def __call__(self, hidden_states, deterministic): hidden_gelu = self.act(self.wi_0(hidden_states)) hidden_linear = self.wi_1(hidden_states) hidden_states = hidden_gelu * hidden_linear hidden_states = self.dropout(hidden_states, deterministic=deterministic) hidden_states = self.wo(hidden_states) return hidden_states # Copied from transformers.models.t5.modeling_flax_t5.FlaxT5LayerFF with T5->LongT5 class FlaxLongT5LayerFF(nn.Module): config: LongT5Config dtype: jnp.dtype = jnp.float32 # the dtype of the computation def setup(self): if self.config.is_gated_act: self.DenseReluDense = FlaxLongT5DenseGatedActDense(self.config, dtype=self.dtype) else: self.DenseReluDense = FlaxLongT5DenseActDense(self.config, dtype=self.dtype) self.layer_norm = FlaxLongT5LayerNorm( self.config.d_model, eps=self.config.layer_norm_epsilon, dtype=self.dtype ) self.dropout = nn.Dropout(self.config.dropout_rate) def __call__(self, hidden_states, deterministic=True): forwarded_states = self.layer_norm(hidden_states) forwarded_states = self.DenseReluDense(forwarded_states, deterministic=deterministic) hidden_states = hidden_states + self.dropout(forwarded_states, deterministic=deterministic) return hidden_states # Copied from transformers.models.t5.modeling_flax_t5.FlaxT5Attention with T5->LongT5 class FlaxLongT5Attention(nn.Module): config: LongT5Config has_relative_attention_bias: bool = False causal: bool = False dtype: jnp.dtype = jnp.float32 # the dtype of the computation def setup(self): self.relative_attention_num_buckets = self.config.relative_attention_num_buckets self.relative_attention_max_distance = self.config.relative_attention_max_distance self.d_model = self.config.d_model self.key_value_proj_dim = self.config.d_kv self.n_heads = self.config.num_heads self.dropout = self.config.dropout_rate self.inner_dim = self.n_heads * self.key_value_proj_dim q_init_std = self.config.initializer_factor * ((self.inner_dim * self.key_value_proj_dim) ** -0.5) kv_init_std = self.config.initializer_factor * (self.inner_dim**-0.5) o_init_std = self.config.initializer_factor * (self.inner_dim**-0.5) self.q = nn.Dense( self.inner_dim, use_bias=False, kernel_init=jax.nn.initializers.normal(q_init_std), dtype=self.dtype, ) self.k = nn.Dense( self.inner_dim, use_bias=False, kernel_init=jax.nn.initializers.normal(kv_init_std), dtype=self.dtype, ) self.v = nn.Dense( self.inner_dim, use_bias=False, kernel_init=jax.nn.initializers.normal(kv_init_std), dtype=self.dtype, ) self.o = nn.Dense( self.d_model, use_bias=False, kernel_init=jax.nn.initializers.normal(o_init_std), dtype=self.dtype, ) if self.has_relative_attention_bias: self.relative_attention_bias = nn.Embed( self.relative_attention_num_buckets, self.n_heads, embedding_init=jax.nn.initializers.normal(kv_init_std), dtype=self.dtype, ) @staticmethod def _relative_position_bucket(relative_position, bidirectional=True, num_buckets=32, max_distance=128): """ Adapted from Mesh Tensorflow: https://github.com/tensorflow/mesh/blob/0cb87fe07da627bf0b7e60475d59f95ed6b5be3d/mesh_tensorflow/transformer/transformer_layers.py#L593 Translate relative position to a bucket number for relative attention. The relative position is defined as memory_position - query_position, i.e. the distance in tokens from the attending position to the attended-to position. If bidirectional=False, then positive relative positions are invalid. We use smaller buckets for small absolute relative_position and larger buckets for larger absolute relative_positions. All relative positions >=max_distance map to the same bucket. All relative positions <=-max_distance map to the same bucket. This should allow for more graceful generalization to longer sequences than the model has been trained on """ relative_buckets = 0 if bidirectional: num_buckets //= 2 relative_buckets += (relative_position > 0) * num_buckets relative_position = jnp.abs(relative_position) else: relative_position = -jnp.clip(relative_position, a_max=0) # now relative_position is in the range [0, inf) # half of the buckets are for exact increments in positions max_exact = num_buckets // 2 is_small = relative_position < max_exact # The other half of the buckets are for logarithmically bigger bins in positions up to max_distance relative_position_if_large = max_exact + ( jnp.log(relative_position / max_exact) / jnp.log(max_distance / max_exact) * (num_buckets - max_exact) ) relative_position_if_large = jnp.clip(relative_position_if_large, a_max=num_buckets - 1) relative_buckets += jnp.where(is_small, relative_position, relative_position_if_large) return relative_buckets.astype("i4") def compute_bias(self, query_length, key_length): """Compute binned relative position bias""" context_position = jnp.arange(query_length, dtype="i4")[:, None] memory_position = jnp.arange(key_length, dtype="i4")[None, :] relative_position = memory_position - context_position relative_position_bucket = self._relative_position_bucket( relative_position, bidirectional=(not self.causal), num_buckets=self.relative_attention_num_buckets, max_distance=self.relative_attention_max_distance, ) values = self.relative_attention_bias(relative_position_bucket) values = values.transpose((2, 0, 1))[None, :, :, :] return values def _split_heads(self, hidden_states): return hidden_states.reshape(hidden_states.shape[:2] + (self.n_heads, self.key_value_proj_dim)) def _merge_heads(self, hidden_states): return hidden_states.reshape(hidden_states.shape[:2] + (self.inner_dim,)) @nn.compact def _concatenate_to_cache(self, key, value, query, attention_mask): """ This function takes projected key, value states from a single input token and concatenates the states to cached states from previous steps. This function is slighly adapted from the official Flax repository: https://github.com/google/flax/blob/491ce18759622506588784b4fca0e4bf05f8c8cd/flax/linen/attention.py#L252 """ # detect if we're initializing by absence of existing cache data. is_initialized = self.has_variable("cache", "cached_key") cached_key = self.variable("cache", "cached_key", jnp.zeros, key.shape, key.dtype) cached_value = self.variable("cache", "cached_value", jnp.zeros, value.shape, value.dtype) cache_index = self.variable("cache", "cache_index", lambda: jnp.array(0, dtype=jnp.int32)) if is_initialized: *batch_dims, max_length, num_heads, depth_per_head = cached_key.value.shape # update key, value caches with our new 1d spatial slices cur_index = cache_index.value indices = (0,) * len(batch_dims) + (cur_index, 0, 0) key = jax.lax.dynamic_update_slice(cached_key.value, key, indices) value = jax.lax.dynamic_update_slice(cached_value.value, value, indices) cached_key.value = key cached_value.value = value num_updated_cache_vectors = query.shape[1] cache_index.value = cache_index.value + num_updated_cache_vectors # causal mask for cached decoder self-attention: our single query position should only attend to those key positions # that have already been generated and cached, not the remaining zero elements. pad_mask = jnp.broadcast_to( jnp.arange(max_length) < cur_index + num_updated_cache_vectors, tuple(batch_dims) + (1, num_updated_cache_vectors, max_length), ) attention_mask = combine_masks(pad_mask, attention_mask) return key, value, attention_mask def _create_position_bias( self, key_states, query_states, attention_mask, init_cache, seq_length, causal_attention_mask_shift ): cache_is_filled = self.causal and self.has_variable("cache", "cached_key") and (not init_cache) key_length = key_states.shape[1] query_length = key_length if cache_is_filled else query_states.shape[1] if self.has_relative_attention_bias: position_bias = self.compute_bias(query_length, key_length) elif attention_mask is not None: position_bias = jnp.zeros_like(attention_mask) else: position_bias = jnp.zeros((1, self.n_heads, query_length, key_length), dtype=self.dtype) # if key and values are already calculated, only the last query position bias should be taken if cache_is_filled: max_decoder_length = self.variables["cache"]["cached_key"].shape[1] position_bias = jax.lax.dynamic_slice( position_bias, (0, 0, causal_attention_mask_shift, 0), (1, self.n_heads, seq_length, max_decoder_length), ) return position_bias def __call__( self, hidden_states, attention_mask=None, key_value_states=None, position_bias=None, use_cache=False, output_attentions=False, deterministic=True, init_cache=False, ): """ Self-attention (if key_value_states is None) or attention over source sentence (provided by key_value_states). """ batch_size, seq_length = hidden_states.shape[:2] # q, k, v projections query_states = self.q(hidden_states) # (batch_size, n_heads, seq_length, dim_per_head) key_states = self.k(hidden_states) if key_value_states is None else self.k(key_value_states) value_states = self.v(hidden_states) if key_value_states is None else self.v(key_value_states) # reshape to (batch_size, seq_length, n_heads, head_dim) query_states = self._split_heads(query_states) key_states = self._split_heads(key_states) value_states = self._split_heads(value_states) # counter-act scaling in dot_product_attention_weights function query_states *= jnp.sqrt(query_states.shape[-1]) # for fast decoding causal attention mask should be shifted causal_attention_mask_shift = ( self.variables["cache"]["cache_index"] if (self.has_variable("cache", "cached_key") and self.causal) else 0 ) # create causal attention_mask; attention_mask has to be defined when model is causal if self.causal: causal_attention_mask = make_causal_mask(attention_mask, dtype="bool") # fast decoding for generate requires special attention_mask if self.has_variable("cache", "cached_key"): max_decoder_length = self.variables["cache"]["cached_key"].shape[1] causal_attention_mask = jax.lax.dynamic_slice( causal_attention_mask, (0, 0, causal_attention_mask_shift, 0), (1, 1, seq_length, max_decoder_length), ) # broadcast causal attention mask & attention mask to fit for merge causal_attention_mask = jnp.broadcast_to( causal_attention_mask, (batch_size,) + causal_attention_mask.shape[1:] ) attention_mask = jnp.broadcast_to( jnp.expand_dims(attention_mask, axis=(-3, -2)), causal_attention_mask.shape ) attention_mask = combine_masks(attention_mask, causal_attention_mask) elif attention_mask is not None: attention_mask = jnp.expand_dims(attention_mask, axis=(-3, -2)) # During fast autoregressive decoding, we feed one position at a time, # and cache the keys and values step by step. if self.causal and (self.has_variable("cache", "cached_key") or init_cache): key_states, value_states, attention_mask = self._concatenate_to_cache( key_states, value_states, query_states, attention_mask ) # replace masked positions with -10_000 if attention_mask is not None: mask_value = jnp.finfo(self.dtype).min attention_mask = jax.lax.select( attention_mask > 0, jnp.full(attention_mask.shape, 0.0).astype(self.dtype), jnp.full(attention_mask.shape, mask_value).astype(self.dtype), ) if position_bias is None: # compute position bias (only for first layer) position_bias = self._create_position_bias( key_states, query_states, attention_mask, init_cache, seq_length, causal_attention_mask_shift ) if attention_mask is not None: position_bias = position_bias + attention_mask # create dropout rng dropout_rng = None if not deterministic and self.dropout > 0.0: dropout_rng = self.make_rng("dropout") # Softmax(QK^T) attn_weights = dot_product_attention_weights( query_states, key_states, bias=position_bias, dropout_rng=dropout_rng, dropout_rate=self.dropout, broadcast_dropout=True, deterministic=deterministic, dtype=self.dtype, ) # multiply with value states attn_output = jnp.einsum("...hqk,...khd->...qhd", attn_weights, value_states) # bring back to (batch_size, seq_length, d_model) attn_output = self._merge_heads(attn_output) # apply output matrix attn_output = self.o(attn_output) outputs = (attn_output, position_bias) if output_attentions: outputs = outputs + (attn_weights,) return outputs class FlaxLongT5LocalAttention(nn.Module): config: LongT5Config has_relative_attention_bias: bool = False dtype: jnp.dtype = jnp.float32 # the dtype of the computation def setup(self): self.relative_attention_num_buckets = self.config.relative_attention_num_buckets self.relative_attention_max_distance = self.config.relative_attention_max_distance self.d_model = self.config.d_model self.key_value_proj_dim = self.config.d_kv self.n_heads = self.config.num_heads self.local_radius = self.config.local_radius self.block_len = self.local_radius + 1 self.dropout = self.config.dropout_rate self.inner_dim = self.n_heads * self.key_value_proj_dim q_init_std = self.config.initializer_factor * ((self.inner_dim * self.key_value_proj_dim) ** -0.5) kv_init_std = self.config.initializer_factor * (self.inner_dim**-0.5) o_init_std = self.config.initializer_factor * (self.inner_dim**-0.5) self.q = nn.Dense( self.inner_dim, use_bias=False, kernel_init=jax.nn.initializers.normal(q_init_std), dtype=self.dtype, ) self.k = nn.Dense( self.inner_dim, use_bias=False, kernel_init=jax.nn.initializers.normal(kv_init_std), dtype=self.dtype, ) self.v = nn.Dense( self.inner_dim, use_bias=False, kernel_init=jax.nn.initializers.normal(kv_init_std), dtype=self.dtype, ) self.o = nn.Dense( self.d_model, use_bias=False, kernel_init=jax.nn.initializers.normal(o_init_std), dtype=self.dtype, ) if self.has_relative_attention_bias: self.relative_attention_bias = nn.Embed( self.relative_attention_num_buckets, self.n_heads, embedding_init=jax.nn.initializers.normal(kv_init_std), ) @staticmethod # Copied from transformers.models.t5.modeling_flax_t5.FlaxT5Attention._relative_position_bucket def _relative_position_bucket(relative_position, bidirectional=True, num_buckets=32, max_distance=128): """ Adapted from Mesh Tensorflow: https://github.com/tensorflow/mesh/blob/0cb87fe07da627bf0b7e60475d59f95ed6b5be3d/mesh_tensorflow/transformer/transformer_layers.py#L593 Translate relative position to a bucket number for relative attention. The relative position is defined as memory_position - query_position, i.e. the distance in tokens from the attending position to the attended-to position. If bidirectional=False, then positive relative positions are invalid. We use smaller buckets for small absolute relative_position and larger buckets for larger absolute relative_positions. All relative positions >=max_distance map to the same bucket. All relative positions <=-max_distance map to the same bucket. This should allow for more graceful generalization to longer sequences than the model has been trained on """ relative_buckets = 0 if bidirectional: num_buckets //= 2 relative_buckets += (relative_position > 0) * num_buckets relative_position = jnp.abs(relative_position) else: relative_position = -jnp.clip(relative_position, a_max=0) # now relative_position is in the range [0, inf) # half of the buckets are for exact increments in positions max_exact = num_buckets // 2 is_small = relative_position < max_exact # The other half of the buckets are for logarithmically bigger bins in positions up to max_distance relative_position_if_large = max_exact + ( jnp.log(relative_position / max_exact) / jnp.log(max_distance / max_exact) * (num_buckets - max_exact) ) relative_position_if_large = jnp.clip(relative_position_if_large, a_max=num_buckets - 1) relative_buckets += jnp.where(is_small, relative_position, relative_position_if_large) return relative_buckets.astype("i4") def compute_bias(self, block_length: int): """Compute binned relative position bias""" memory_position = jnp.arange(3 * block_length, dtype="i4") context_position = memory_position[block_length:-block_length] relative_position = memory_position[None, :] - context_position[:, None] relative_position_bucket = self._relative_position_bucket( relative_position, bidirectional=True, num_buckets=self.relative_attention_num_buckets, max_distance=self.relative_attention_max_distance, ) values = self.relative_attention_bias(relative_position_bucket) values = values.transpose((2, 0, 1))[None, None, :, :, :] return values def _split_heads(self, hidden_states): return hidden_states.reshape(hidden_states.shape[:2] + (self.n_heads, self.key_value_proj_dim)) def _merge_heads(self, hidden_states): return hidden_states.reshape(hidden_states.shape[0], -1, self.inner_dim) def _create_position_bias(self, block_len: int, attention_mask: Optional[np.ndarray]) -> np.ndarray: # position_bias shape: # (1, 1, n_heads, block_len, 3 * block_len) if self.has_relative_attention_bias: position_bias = self.compute_bias(block_len) elif attention_mask is not None: position_bias = jnp.zeros_like(attention_mask) else: position_bias = jnp.zeros((1, 1, self.n_heads, block_len, 3 * block_len), dtype=self.dtype) return position_bias def __call__( self, hidden_states, attention_mask=None, key_value_states=None, position_bias=None, output_attentions=False, deterministic=True, ): """ Self-attention (if key_value_states is None) or attention over source sentence (provided by key_value_states). """ batch_size, seq_length = hidden_states.shape[:2] # q, k, v projections query_states = self.q(hidden_states) # (batch_size, n_heads, seq_length, dim_per_head) key_states = self.k(hidden_states) if key_value_states is None else self.k(key_value_states) value_states = self.v(hidden_states) if key_value_states is None else self.v(key_value_states) # reshape to (batch_size, seq_length, n_heads, head_dim) query_states = self._split_heads(query_states) key_states = self._split_heads(key_states) value_states = self._split_heads(value_states) # Split into blocks -> (batch_size, num_blocks, block_len, n_heads, head_dim) query_states = _split_into_blocks(query_states, self.block_len, axis=1) key_states = _split_into_blocks(key_states, self.block_len, axis=1) value_states = _split_into_blocks(value_states, self.block_len, axis=1) # Concatenate 3 blocks for keys and values -> (batch_size, num_blocks, 3 * block_len, n_heads, dim_per_head) key_states = _concatenate_3_blocks(key_states, block_axis=1, sequence_axis=2) value_states = _concatenate_3_blocks(value_states, block_axis=1, sequence_axis=2) # counter-act scaling in dot_product_attention_weights function query_states *= jnp.sqrt(query_states.shape[-1]) if attention_mask is not None: attention_mask = _get_local_attention_mask(attention_mask, self.block_len) # replace masked positions with -10_000 attention_mask = jax.lax.select( attention_mask > 0, jnp.full(attention_mask.shape, 0.0).astype(self.dtype), jnp.full(attention_mask.shape, -1e10).astype(self.dtype), ) if position_bias is None: # compute position bias (only for first layer) position_bias = self._create_position_bias(self.block_len, attention_mask) if attention_mask is not None: position_bias = position_bias + attention_mask.swapaxes(1, 2) # create dropout rng dropout_rng = None if not deterministic and self.dropout > 0.0: dropout_rng = self.make_rng("dropout") # Softmax(QK^T) attn_weights = dot_product_attention_weights( query_states, key_states, bias=position_bias, dropout_rng=dropout_rng, dropout_rate=self.dropout, broadcast_dropout=True, deterministic=deterministic, dtype=self.dtype, ) # multiply with value states attn_output = jnp.einsum("...hqk,...khd->...qhd", attn_weights, value_states) # bring back to (batch_size, seq_length, d_model) attn_output = self._merge_heads(attn_output) attn_output = attn_output[:, :seq_length, :] # apply output matrix attn_output = self.o(attn_output) outputs = (attn_output, position_bias) if output_attentions: outputs = outputs + (attn_weights,) return outputs class FlaxLongT5TransientGlobalAttention(nn.Module): config: LongT5Config has_relative_attention_bias: bool = False dtype: jnp.dtype = jnp.float32 # the dtype of the computation def setup(self): self.relative_attention_num_buckets = self.config.relative_attention_num_buckets self.relative_attention_max_distance = self.config.relative_attention_max_distance self.d_model = self.config.d_model self.key_value_proj_dim = self.config.d_kv self.n_heads = self.config.num_heads self.local_radius = self.config.local_radius self.block_len = self.local_radius + 1 self.global_block_size = self.config.global_block_size self.dropout = self.config.dropout_rate self.inner_dim = self.n_heads * self.key_value_proj_dim q_init_std = self.config.initializer_factor * ((self.inner_dim * self.key_value_proj_dim) ** -0.5) kv_init_std = self.config.initializer_factor * (self.inner_dim**-0.5) o_init_std = self.config.initializer_factor * (self.inner_dim**-0.5) self.q = nn.Dense( self.inner_dim, use_bias=False, kernel_init=jax.nn.initializers.normal(q_init_std), dtype=self.dtype, ) self.k = nn.Dense( self.inner_dim, use_bias=False, kernel_init=jax.nn.initializers.normal(kv_init_std), dtype=self.dtype, ) self.v = nn.Dense( self.inner_dim, use_bias=False, kernel_init=jax.nn.initializers.normal(kv_init_std), dtype=self.dtype, ) self.o = nn.Dense( self.d_model, use_bias=False, kernel_init=jax.nn.initializers.normal(o_init_std), dtype=self.dtype, ) if self.has_relative_attention_bias: self.relative_attention_bias = nn.Embed( self.relative_attention_num_buckets, self.n_heads, embedding_init=jax.nn.initializers.normal(kv_init_std), ) # Relativen attention bias & Layer norm for global attention if self.has_relative_attention_bias: self.global_relative_attention_bias = nn.Embed( self.relative_attention_num_buckets, self.n_heads, embedding_init=jax.nn.initializers.normal(kv_init_std), ) self.global_input_layer_norm = FlaxLongT5LayerNorm( self.config.d_model, eps=self.config.layer_norm_epsilon, dtype=self.dtype ) @staticmethod # Copied from transformers.models.t5.modeling_flax_t5.FlaxT5Attention._relative_position_bucket def _relative_position_bucket(relative_position, bidirectional=True, num_buckets=32, max_distance=128): """ Adapted from Mesh Tensorflow: https://github.com/tensorflow/mesh/blob/0cb87fe07da627bf0b7e60475d59f95ed6b5be3d/mesh_tensorflow/transformer/transformer_layers.py#L593 Translate relative position to a bucket number for relative attention. The relative position is defined as memory_position - query_position, i.e. the distance in tokens from the attending position to the attended-to position. If bidirectional=False, then positive relative positions are invalid. We use smaller buckets for small absolute relative_position and larger buckets for larger absolute relative_positions. All relative positions >=max_distance map to the same bucket. All relative positions <=-max_distance map to the same bucket. This should allow for more graceful generalization to longer sequences than the model has been trained on """ relative_buckets = 0 if bidirectional: num_buckets //= 2 relative_buckets += (relative_position > 0) * num_buckets relative_position = jnp.abs(relative_position) else: relative_position = -jnp.clip(relative_position, a_max=0) # now relative_position is in the range [0, inf) # half of the buckets are for exact increments in positions max_exact = num_buckets // 2 is_small = relative_position < max_exact # The other half of the buckets are for logarithmically bigger bins in positions up to max_distance relative_position_if_large = max_exact + ( jnp.log(relative_position / max_exact) / jnp.log(max_distance / max_exact) * (num_buckets - max_exact) ) relative_position_if_large = jnp.clip(relative_position_if_large, a_max=num_buckets - 1) relative_buckets += jnp.where(is_small, relative_position, relative_position_if_large) return relative_buckets.astype("i4") def compute_bias(self, block_length: int): """Compute binned relative position bias""" memory_position = jnp.arange(3 * block_length, dtype="i4") context_position = memory_position[block_length:-block_length] relative_position = memory_position[None, :] - context_position[:, None] relative_position_bucket = self._relative_position_bucket( relative_position, bidirectional=True, num_buckets=self.relative_attention_num_buckets, max_distance=self.relative_attention_max_distance, ) values = self.relative_attention_bias(relative_position_bucket) values = values.transpose((2, 0, 1))[None, None, :, :, :] return values def compute_side_bias(self, attention_mask: np.ndarray, global_segment_ids: np.ndarray) -> np.ndarray: # (batch_size, 1, 1, seq_len, global_seq_len) side_attention_mask = jnp.equal(attention_mask[..., None], global_segment_ids[:, None, :])[:, None, ...] attention_side_bias = jax.lax.select( side_attention_mask > 0, jnp.full(side_attention_mask.shape, 0.0).astype(self.dtype), jnp.full(side_attention_mask.shape, -1e10).astype(self.dtype), ) # (batch_size, seq_len, global_seq_len) side_relative_position = _make_side_relative_position_ids(attention_mask, self.global_block_size) side_relative_position_bucket = self._relative_position_bucket( side_relative_position, bidirectional=True, num_buckets=self.relative_attention_num_buckets, max_distance=self.relative_attention_max_distance, ) # (batch_size, seq_len, global_seq_len, num_heads) side_bias = self.global_relative_attention_bias(side_relative_position_bucket) # (batch_size, 1, num_heads, seq_len, global_seq_len) side_bias = jnp.transpose(side_bias, (0, 3, 1, 2)) # (batch_size, num_heads, seq_len, global_seq_len) attention_side_bias = attention_side_bias + side_bias return attention_side_bias def _split_heads(self, hidden_states): return hidden_states.reshape(hidden_states.shape[:2] + (self.n_heads, self.key_value_proj_dim)) def _merge_heads(self, hidden_states): return hidden_states.reshape(hidden_states.shape[0], -1, self.inner_dim) def _create_position_bias(self, block_len: int, attention_mask: Optional[np.ndarray]) -> np.ndarray: # position_bias shape: # (1, 1, n_heads, block_len, 3 * block_len) if self.has_relative_attention_bias: position_bias = self.compute_bias(block_len) elif attention_mask is not None: position_bias = jnp.zeros_like(attention_mask) else: position_bias = jnp.zeros((1, 1, self.n_heads, block_len, 3 * block_len), dtype=self.dtype) return position_bias def __call__( self, hidden_states, attention_mask=None, key_value_states=None, position_bias=None, output_attentions=False, deterministic=True, ): """ Self-attention (if key_value_states is None) or attention over source sentence (provided by key_value_states). """ batch_size, seq_length = hidden_states.shape[:2] # Prepare components for transient-global attention # Obtain block_ids and global_segment_ids # global_seq_len := seq_len // self.global_block_size # shapes: (batch_size, seq_len) & (batch_size, global_seq_len) block_ids, global_segment_ids = _make_global_fixed_block_ids( attention_mask if attention_mask is not None else jnp.ones((batch_size, seq_length)), self.global_block_size, ) # Create global inputs _global_seq_len = global_segment_ids.shape[-1] global_inputs = _create_global_aggregates(hidden_states, block_ids, _global_seq_len) global_inputs = self.global_input_layer_norm(global_inputs) # q, k, v projections query_states = self.q(hidden_states) # (batch_size, n_heads, seq_length, dim_per_head) key_states = self.k(hidden_states) if key_value_states is None else self.k(key_value_states) value_states = self.v(hidden_states) if key_value_states is None else self.v(key_value_states) # reshape to (batch_size, seq_length, n_heads, head_dim) query_states = self._split_heads(query_states) key_states = self._split_heads(key_states) value_states = self._split_heads(value_states) # Get global/side key/value_states side_key_states = self.k(global_inputs) side_value_states = self.v(global_inputs) # reshape to (batch_size, global_seq_len, n_heads, head_dim) side_key_states = self._split_heads(side_key_states) side_value_states = self._split_heads(side_value_states) # Split into blocks -> (batch_size, num_blocks, block_len, n_heads, head_dim) query_states = _split_into_blocks(query_states, self.block_len, axis=1) key_states = _split_into_blocks(key_states, self.block_len, axis=1) value_states = _split_into_blocks(value_states, self.block_len, axis=1) # Concatenate 3 blocks for keys and values -> (batch_size, num_blocks, 3 * block_len, n_heads, dim_per_head) key_states = _concatenate_3_blocks(key_states, block_axis=1, sequence_axis=2) value_states = _concatenate_3_blocks(value_states, block_axis=1, sequence_axis=2) # Tile side inputs across local key/value blocks # New shape: (batch_size, num_blocks, global_seq_len, n_heads, dim_per_head) reps = [1] * (side_key_states.ndim + 1) reps[1] = key_states.shape[1] side_key_states = jnp.tile(side_key_states[:, None, ...], reps) side_value_states = jnp.tile(side_value_states[:, None, ...], reps) # Concatenate "local" and "side"/"global" key/value states to allow each token to attend global aggregated ones # New shape: (batch_size, num_blocks, 3 * block_len + global_seq_len, n_heads, dim_per_head) key_states = jnp.concatenate((key_states, side_key_states), axis=2) value_states = jnp.concatenate((value_states, side_value_states), axis=2) # counter-act scaling in dot_product_attention_weights function query_states *= jnp.sqrt(query_states.shape[-1]) if attention_mask is not None: local_attention_mask = _get_local_attention_mask(attention_mask, self.block_len) local_attention_mask = jax.lax.select( local_attention_mask > 0, jnp.full(local_attention_mask.shape, 0.0).astype(self.dtype), jnp.full(local_attention_mask.shape, -1e10).astype(self.dtype), ) else: local_attention_mask = None if position_bias is None: # compute position bias (only for first layer) position_bias = self._create_position_bias(self.block_len, attention_mask) if local_attention_mask is not None: position_bias = position_bias + local_attention_mask.swapaxes(1, 2) # Calculate global/side bias - shape: # (batch_size, num_heads, seq_len, global_seq_len) if attention_mask is None: attention_mask = jnp.ones((batch_size, seq_length)) side_position_bias = self.compute_side_bias(attention_mask, global_segment_ids) side_position_bias = _split_into_blocks(side_position_bias, self.block_len, axis=-2) side_position_bias = jnp.swapaxes(side_position_bias, 1, 2) position_bias = jnp.concatenate((position_bias, side_position_bias), axis=-1) # create dropout rng dropout_rng = None if not deterministic and self.dropout > 0.0: dropout_rng = self.make_rng("dropout") # Softmax(QK^T) attn_weights = dot_product_attention_weights( query_states, key_states, bias=position_bias, dropout_rng=dropout_rng, dropout_rate=self.dropout, broadcast_dropout=True, deterministic=deterministic, dtype=self.dtype, ) # multiply with value states attn_output = jnp.einsum("...hqk,...khd->...qhd", attn_weights, value_states) # bring back to (batch_size, seq_length, d_model) attn_output = self._merge_heads(attn_output) attn_output = attn_output[:, :seq_length, :] # apply output matrix attn_output = self.o(attn_output) outputs = (attn_output, position_bias) if output_attentions: outputs = outputs + (attn_weights,) return outputs class FlaxLongT5LayerLocalSelfAttention(nn.Module): """Local self attention used in encoder""" config: LongT5Config has_relative_attention_bias: bool = False dtype: jnp.dtype = jnp.float32 # the dtype of the computation def setup(self): self.LocalSelfAttention = FlaxLongT5LocalAttention( self.config, has_relative_attention_bias=self.has_relative_attention_bias, dtype=self.dtype ) self.layer_norm = FlaxLongT5LayerNorm( self.config.d_model, eps=self.config.layer_norm_epsilon, dtype=self.dtype ) self.dropout = nn.Dropout(self.config.dropout_rate) def __call__( self, hidden_states, attention_mask=None, position_bias=None, output_attentions=False, deterministic=True, **kwargs: Any, # to accept init_cache kwargs ): normed_hidden_states = self.layer_norm(hidden_states) attention_output = self.LocalSelfAttention( normed_hidden_states, attention_mask=attention_mask, position_bias=position_bias, output_attentions=output_attentions, deterministic=deterministic, ) hidden_states = hidden_states + self.dropout(attention_output[0], deterministic=deterministic) outputs = (hidden_states,) + attention_output[1:] # add attentions if we output them return outputs class FlaxLongT5LayerTransientGlobalSelfAttention(nn.Module): """Transient-Global self attention used in encoder""" config: LongT5Config has_relative_attention_bias: bool = False dtype: jnp.dtype = jnp.float32 # the dtype of the computation def setup(self): self.TransientGlobalSelfAttention = FlaxLongT5TransientGlobalAttention( self.config, has_relative_attention_bias=self.has_relative_attention_bias, dtype=self.dtype ) self.layer_norm = FlaxLongT5LayerNorm( self.config.d_model, eps=self.config.layer_norm_epsilon, dtype=self.dtype ) self.dropout = nn.Dropout(self.config.dropout_rate) def __call__( self, hidden_states, attention_mask=None, position_bias=None, output_attentions=False, deterministic=True, **kwargs: Any, # to accept init_cache kwargs ): normed_hidden_states = self.layer_norm(hidden_states) attention_output = self.TransientGlobalSelfAttention( normed_hidden_states, attention_mask=attention_mask, position_bias=position_bias, output_attentions=output_attentions, deterministic=deterministic, ) hidden_states = hidden_states + self.dropout(attention_output[0], deterministic=deterministic) outputs = (hidden_states,) + attention_output[1:] # add attentions if we output them return outputs # Copied from transformers.models.t5.modeling_flax_t5.FlaxT5LayerSelfAttention with T5->LongT5 class FlaxLongT5LayerSelfAttention(nn.Module): config: LongT5Config has_relative_attention_bias: bool = False dtype: jnp.dtype = jnp.float32 # the dtype of the computation def setup(self): self.SelfAttention = FlaxLongT5Attention( self.config, has_relative_attention_bias=self.has_relative_attention_bias, causal=self.config.causal, dtype=self.dtype, ) self.layer_norm = FlaxLongT5LayerNorm( self.config.d_model, eps=self.config.layer_norm_epsilon, dtype=self.dtype ) self.dropout = nn.Dropout(self.config.dropout_rate) def __call__( self, hidden_states, attention_mask=None, position_bias=None, output_attentions=False, deterministic=True, init_cache=False, ): normed_hidden_states = self.layer_norm(hidden_states) attention_output = self.SelfAttention( normed_hidden_states, attention_mask=attention_mask, position_bias=position_bias, output_attentions=output_attentions, deterministic=deterministic, init_cache=init_cache, ) hidden_states = hidden_states + self.dropout(attention_output[0], deterministic=deterministic) outputs = (hidden_states,) + attention_output[1:] # add attentions if we output them return outputs # Copied from transformers.models.t5.modeling_flax_t5.FlaxT5LayerCrossAttention with T5->LongT5 class FlaxLongT5LayerCrossAttention(nn.Module): config: LongT5Config dtype: jnp.dtype = jnp.float32 # the dtype of the computation def setup(self): self.EncDecAttention = FlaxLongT5Attention( self.config, has_relative_attention_bias=False, causal=False, dtype=self.dtype ) self.layer_norm = FlaxLongT5LayerNorm( self.config.d_model, eps=self.config.layer_norm_epsilon, dtype=self.dtype ) self.dropout = nn.Dropout(self.config.dropout_rate) def __call__( self, hidden_states, key_value_states, attention_mask=None, position_bias=None, output_attentions=False, deterministic=True, ): normed_hidden_states = self.layer_norm(hidden_states) attention_output = self.EncDecAttention( normed_hidden_states, attention_mask=attention_mask, key_value_states=key_value_states, position_bias=position_bias, output_attentions=output_attentions, ) hidden_states = hidden_states + self.dropout(attention_output[0], deterministic=deterministic) outputs = (hidden_states,) + attention_output[1:] # add attentions if we output them return outputs class FlaxLongT5Block(nn.Module): config: LongT5Config has_relative_attention_bias: bool = False dtype: jnp.dtype = jnp.float32 # the dtype of the computation def setup(self): self.causal = self.config.causal if self.causal: attention_layer = FlaxLongT5LayerSelfAttention elif self.config.encoder_attention_type == "local": attention_layer = FlaxLongT5LayerLocalSelfAttention elif self.config.encoder_attention_type == "transient-global": attention_layer = FlaxLongT5LayerTransientGlobalSelfAttention else: raise ValueError( "For encoder attention mechanism, either `local` or `transient-global` attention type is expected, " f"but got {self.config.encoder_attention_type}." ) self.layer = ( attention_layer( self.config, has_relative_attention_bias=self.has_relative_attention_bias, name=str(0), dtype=self.dtype, ), ) feed_forward_index = 1 if self.causal: self.layer += (FlaxLongT5LayerCrossAttention(self.config, name=str(1), dtype=self.dtype),) feed_forward_index += 1 self.layer += (FlaxLongT5LayerFF(self.config, name=str(feed_forward_index), dtype=self.dtype),) # Copied from transformers.models.t5.modeling_flax_t5.FlaxT5Block.__call__ with T5->LongT5 def __call__( self, hidden_states, attention_mask=None, position_bias=None, encoder_hidden_states=None, encoder_attention_mask=None, encoder_decoder_position_bias=None, output_attentions=False, return_dict=True, deterministic=True, init_cache=False, ): self_attention_outputs = self.layer[0]( hidden_states, attention_mask=attention_mask, position_bias=position_bias, output_attentions=output_attentions, deterministic=deterministic, init_cache=init_cache, ) hidden_states = self_attention_outputs[0] attention_outputs = self_attention_outputs[1:] # Keep self-attention outputs and relative position weights do_cross_attention = self.causal and encoder_hidden_states is not None if do_cross_attention: cross_attention_outputs = self.layer[1]( hidden_states, key_value_states=encoder_hidden_states, attention_mask=encoder_attention_mask, position_bias=encoder_decoder_position_bias, output_attentions=output_attentions, deterministic=deterministic, ) hidden_states = cross_attention_outputs[0] # Keep cross-attention outputs and relative position weights attention_outputs = attention_outputs + cross_attention_outputs[1:] # Apply Feed Forward layer hidden_states = self.layer[-1](hidden_states, deterministic=deterministic) outputs = (hidden_states,) outputs = outputs + attention_outputs # returns hidden-states, present_key_value_states, (self-attention position bias), (self-attention weights), # (cross-attention position bias), (cross-attention weights) return outputs # Copied from transformers.models.t5.modeling_flax_t5.FlaxT5LayerCollection with T5->LongT5 class FlaxLongT5LayerCollection(nn.Module): config: LongT5Config has_relative_attention_bias: bool dtype: jnp.dtype = jnp.float32 # the dtype of the computation def setup(self): self.layer = FlaxLongT5Block( self.config, has_relative_attention_bias=self.has_relative_attention_bias, dtype=self.dtype ) def __call__( self, hidden_states, attention_mask=None, position_bias=None, encoder_hidden_states=None, encoder_attention_mask=None, encoder_decoder_position_bias=None, output_attentions=False, deterministic=True, init_cache=False, ): return self.layer( hidden_states, attention_mask=attention_mask, position_bias=position_bias, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, encoder_decoder_position_bias=encoder_decoder_position_bias, output_attentions=output_attentions, deterministic=deterministic, init_cache=init_cache, ) # Copied from transformers.models.t5.modeling_flax_t5.FlaxT5BlockCollection with T5->LongT5 class FlaxLongT5BlockCollection(nn.Module): config: LongT5Config dtype: jnp.dtype = jnp.float32 # the dtype of the computation gradient_checkpointing: bool = False def setup(self): self.causal = self.config.causal if self.gradient_checkpointing: FlaxLongT5CheckpointLayer = remat(FlaxLongT5LayerCollection, static_argnums=(6, 7, 8)) self.blocks = [ FlaxLongT5CheckpointLayer( self.config, has_relative_attention_bias=(i == 0), dtype=self.dtype, name=str(i), ) for i in range(self.config.num_layers) ] else: self.blocks = [ FlaxLongT5LayerCollection( self.config, has_relative_attention_bias=(i == 0), dtype=self.dtype, name=str(i), ) for i in range(self.config.num_layers) ] def __call__( self, hidden_states=None, attention_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, output_attentions: bool = False, output_hidden_states: bool = False, deterministic: bool = True, init_cache: bool = False, ): # Prepare head mask if needed all_hidden_states = () if output_hidden_states else None all_attentions = () if output_attentions else None all_cross_attentions = () if (output_attentions and self.causal) else None position_bias = None encoder_decoder_position_bias = None for i, layer_module in enumerate(self.blocks): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_outputs = layer_module( hidden_states, attention_mask, position_bias, encoder_hidden_states, encoder_attention_mask, encoder_decoder_position_bias, output_attentions, deterministic, init_cache, ) hidden_states = layer_outputs[0] # We share the position biases between the layers - the first layer store them # layer_outputs = hidden-states, key-value-states (self-attention position bias), (self-attention weights), # (cross-attention position bias), (cross-attention weights) position_bias = layer_outputs[1] if self.causal and encoder_hidden_states is not None: encoder_decoder_position_bias = layer_outputs[3 if output_attentions else 2] if output_attentions: all_attentions = all_attentions + (layer_outputs[2],) if self.causal: all_cross_attentions = all_cross_attentions + (layer_outputs[4],) return FlaxBaseModelOutputWithPastAndCrossAttentions( last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions, cross_attentions=all_cross_attentions, ) # Copied from transformers.models.t5.modeling_flax_t5.FlaxT5Stack with T5->LongT5 class FlaxLongT5Stack(nn.Module): config: LongT5Config embed_tokens: nn.Embed dtype: jnp.dtype = jnp.float32 # the dtype of the computation gradient_checkpointing: bool = False def setup(self): self.causal = self.config.causal self.block = FlaxLongT5BlockCollection( self.config, dtype=self.dtype, gradient_checkpointing=self.gradient_checkpointing ) self.final_layer_norm = FlaxLongT5LayerNorm( self.config.d_model, eps=self.config.layer_norm_epsilon, dtype=self.dtype ) self.dropout = nn.Dropout(self.config.dropout_rate) def __call__( self, input_ids=None, attention_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, deterministic: bool = True, init_cache: bool = False, ): hidden_states = self.embed_tokens(input_ids) hidden_states = self.dropout(hidden_states, deterministic=deterministic) outputs = self.block( hidden_states, attention_mask=attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, deterministic=deterministic, init_cache=init_cache, ) hidden_states = outputs[0] hidden_states = self.final_layer_norm(hidden_states) hidden_states = self.dropout(hidden_states, deterministic=deterministic) # Add last layer all_hidden_states = None if output_hidden_states: all_hidden_states = outputs.hidden_states all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: if output_hidden_states: return ( hidden_states, all_hidden_states, ) + outputs[2:] return (hidden_states,) + outputs[1:] return FlaxBaseModelOutputWithPastAndCrossAttentions( last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=outputs.attentions, cross_attentions=outputs.cross_attentions, ) LONGT5_ENCODE_INPUTS_DOCSTRING = r""" Args: input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. LongT5 is a model with relative position embeddings so you should be able to pad the inputs on both the right and the left. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for detail. To know more on how to prepare `input_ids` for pretraining take a look a [LONGT5 Training](./longt5#training). attention_mask (`jnp.ndarray` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ LONGT5_DECODE_INPUTS_DOCSTRING = r""" Args: decoder_input_ids (`jnp.ndarray` of shape `(batch_size, target_sequence_length)`): Indices of decoder input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are decoder input IDs?](../glossary#decoder-input-ids) For training, `decoder_input_ids` should be provided. encoder_outputs (`tuple(tuple(jnp.ndarray)`): Tuple consists of (`last_hidden_state`, *optional*: `hidden_states`, *optional*: `attentions`) `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)`, *optional*) is a sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. encoder_attention_mask (`jnp.ndarray` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) decoder_attention_mask (`jnp.ndarray` of shape `(batch_size, target_sequence_length)`, *optional*): Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also be used by default. If you want to change padding behavior, you should modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more information on the default strategy. past_key_values (`Dict[str, np.ndarray]`, *optional*, returned by `init_cache` or when passing previous `past_key_values`): Dictionary of pre-computed hidden-states (key and values in the attention blocks) that can be used for fast auto-regressive decoding. Pre-computed key and value hidden-states are of shape *[batch_size, max_length]*. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ LONGT5_INPUTS_DOCSTRING = r""" Args: input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. LongT5 is a model with relative position embeddings so you should be able to pad the inputs on both the right and the left. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for detail. [What are input IDs?](../glossary#input-ids) To know more on how to prepare `input_ids` for pretraining take a look a [LONGT5 Training](./longt5#training). attention_mask (`jnp.ndarray` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) decoder_input_ids (`jnp.ndarray` of shape `(batch_size, target_sequence_length)`, *optional*): Indices of decoder input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are decoder input IDs?](../glossary#decoder-input-ids) LONGT5 uses the `pad_token_id` as the starting token for `decoder_input_ids` generation. If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see `past_key_values`). To know more on how to prepare `decoder_input_ids` for pretraining take a look at [LONGT5 Training](./longt5#training). decoder_attention_mask (`jnp.ndarray` of shape `(batch_size, target_sequence_length)`, *optional*): Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also be used by default. encoder_outputs (`tuple(tuple(jnp.ndarray)`, *optional*): Tuple consists of (`last_hidden_state`, `optional`: *hidden_states*, `optional`: *attentions*) `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)` is a sequence of hidden states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. past_key_values (`tuple(tuple(jnp.ndarray))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ class FlaxLongT5PreTrainedModel(FlaxPreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = LongT5Config base_model_prefix = "transformer" module_class: nn.Module = None def __init__( self, config: LongT5Config, input_shape: Tuple[int] = (1, 1), seed: int = 0, dtype: jnp.dtype = jnp.float32, _do_init: bool = True, **kwargs, ): module = self.module_class(config=config, dtype=dtype, **kwargs) super().__init__(config, module, input_shape=input_shape, seed=seed, dtype=dtype, _do_init=_do_init) def enable_gradient_checkpointing(self): self._module = self.module_class( config=self.config, dtype=self.dtype, gradient_checkpointing=True, ) def init_weights(self, rng: jax.random.PRNGKey, input_shape: Tuple, params: FrozenDict = None) -> FrozenDict: # init input tensors input_ids = jnp.zeros(input_shape, dtype="i4") attention_mask = jnp.ones_like(input_ids) decoder_input_ids = jnp.ones_like(input_ids) decoder_attention_mask = jnp.ones_like(input_ids) params_rng, dropout_rng = jax.random.split(rng) rngs = {"params": params_rng, "dropout": dropout_rng} random_params = self.module.init( rngs, input_ids, attention_mask, decoder_input_ids, decoder_attention_mask, )["params"] if params is not None: random_params = flatten_dict(unfreeze(random_params)) params = flatten_dict(unfreeze(params)) for missing_key in self._missing_keys: params[missing_key] = random_params[missing_key] self._missing_keys = set() return freeze(unflatten_dict(params)) else: return random_params @add_start_docstrings_to_model_forward(LONGT5_INPUTS_DOCSTRING) def __call__( self, input_ids: jnp.ndarray, attention_mask: Optional[jnp.ndarray] = None, decoder_input_ids: jnp.ndarray = None, decoder_attention_mask: Optional[jnp.ndarray] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, train: bool = False, params: dict = None, dropout_rng: PRNGKey = None, ): output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.return_dict if decoder_input_ids is None: raise ValueError( "Make sure to provide both `input_ids` and `decoder_input_ids`. `decoder_input_ids` is not passed" " here." ) # prepare encoder inputs if attention_mask is None: attention_mask = jnp.ones_like(input_ids) # prepare decoder inputs if decoder_attention_mask is None: decoder_attention_mask = jnp.ones_like(decoder_input_ids) # Handle any PRNG if needed rngs = {"dropout": dropout_rng} if dropout_rng is not None else {} return self.module.apply( {"params": params or self.params}, input_ids=jnp.array(input_ids, dtype="i4"), attention_mask=jnp.array(attention_mask, dtype="i4"), decoder_input_ids=jnp.array(decoder_input_ids, dtype="i4"), decoder_attention_mask=jnp.array(decoder_attention_mask, dtype="i4"), output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, deterministic=not train, rngs=rngs, ) def init_cache(self, batch_size, max_length, encoder_outputs): r""" Args: batch_size (`int`): batch_size used for fast auto-regressive decoding. Defines the batch size of the initialized cache. max_length (`int`): maximum possible length for auto-regressive decoding. Defines the sequence length of the initialized cache. encoder_outputs (`Union[FlaxBaseModelOutput, tuple(tuple(jnp.ndarray)]`): `encoder_outputs` consists of (`last_hidden_state`, *optional*: `hidden_states`, *optional*: `attentions`). `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)`, *optional*) is a sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. """ # init input variables to retrieve cache decoder_input_ids = jnp.ones((batch_size, max_length), dtype="i4") decoder_attention_mask = jnp.ones_like(decoder_input_ids) def _decoder_forward(module, decoder_input_ids, decoder_attention_mask, **kwargs): decoder_module = module._get_decoder_module() return decoder_module( decoder_input_ids, decoder_attention_mask, **kwargs, ) init_variables = self.module.init( jax.random.PRNGKey(0), decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, encoder_hidden_states=encoder_outputs[0], init_cache=True, method=_decoder_forward, # we only need to call the decoder to init the cache ) return unfreeze(init_variables["cache"]) @add_start_docstrings(LONGT5_ENCODE_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=FlaxBaseModelOutput, config_class=LongT5Config) def encode( self, input_ids: jnp.ndarray, attention_mask: Optional[jnp.ndarray] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, train: bool = False, params: dict = None, dropout_rng: PRNGKey = None, ): r""" Returns: Example: ```python >>> from transformers import AutoTokenizer, FlaxLongT5ForConditionalGeneration >>> tokenizer = AutoTokenizer.from_pretrained("t5-base") >>> model = FlaxLongT5ForConditionalGeneration.from_pretrained("google/long-t5-local-base") >>> text = "My friends are cool but they eat too many carbs." >>> inputs = tokenizer(text, return_tensors="np") >>> encoder_outputs = model.encode(**inputs) ```""" output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.return_dict if attention_mask is None: attention_mask = jnp.ones_like(input_ids) # Handle any PRNG if needed rngs = {} if dropout_rng is not None: rngs["dropout"] = dropout_rng def _encoder_forward(module, input_ids, attention_mask, **kwargs): encode_module = module._get_encoder_module() return encode_module(input_ids, attention_mask, **kwargs) return self.module.apply( {"params": params or self.params}, input_ids=jnp.array(input_ids, dtype="i4"), attention_mask=jnp.array(attention_mask, dtype="i4"), output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, deterministic=not train, rngs=rngs, method=_encoder_forward, ) @add_start_docstrings(LONGT5_DECODE_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=FlaxBaseModelOutputWithPastAndCrossAttentions, config_class=LongT5Config) def decode( self, decoder_input_ids, encoder_outputs, encoder_attention_mask: Optional[jnp.ndarray] = None, decoder_attention_mask: Optional[jnp.ndarray] = None, past_key_values: dict = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, train: bool = False, params: dict = None, dropout_rng: PRNGKey = None, ): r""" Returns: Example: ```python >>> from transformers import AutoTokenizer, FlaxLongT5ForConditionalGeneration >>> import jax.numpy as jnp >>> tokenizer = AutoTokenizer.from_pretrained("t5-base") >>> model = FlaxLongT5ForConditionalGeneration.from_pretrained("google/long-t5-local-base") >>> text = "My friends are cool but they eat too many carbs." >>> inputs = tokenizer(text, return_tensors="np") >>> encoder_outputs = model.encode(**inputs) >>> decoder_start_token_id = model.config.decoder_start_token_id >>> decoder_input_ids = jnp.ones((inputs.input_ids.shape[0], 1), dtype="i4") * decoder_start_token_id >>> outputs = model.decode(decoder_input_ids, encoder_outputs) >>> logits = outputs.logits ```""" output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.return_dict encoder_hidden_states = encoder_outputs[0] if encoder_attention_mask is None: batch_size, sequence_length = encoder_hidden_states.shape[:2] encoder_attention_mask = jnp.ones((batch_size, sequence_length)) batch_size, sequence_length = decoder_input_ids.shape if decoder_attention_mask is None: decoder_attention_mask = jnp.ones((batch_size, sequence_length)) # Handle any PRNG if needed rngs = {} if dropout_rng is not None: rngs["dropout"] = dropout_rng inputs = {"params": params or self.params} # if past_key_values are passed then cache is already initialized a private flag init_cache has to be # passed down to ensure cache is used. It has to be made sure that cache is marked as mutable so that # it can be changed by FlaxLongT5Attention module if past_key_values: inputs["cache"] = past_key_values mutable = ["cache"] else: mutable = False def _decoder_forward(module, decoder_input_ids, decoder_attention_mask, **kwargs): decoder_module = module._get_decoder_module() return decoder_module( decoder_input_ids, decoder_attention_mask, **kwargs, ) outputs = self.module.apply( inputs, decoder_input_ids=jnp.array(decoder_input_ids, dtype="i4"), decoder_attention_mask=jnp.array(decoder_attention_mask, dtype="i4"), encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=jnp.array(encoder_attention_mask, dtype="i4"), output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, deterministic=not train, rngs=rngs, mutable=mutable, method=_decoder_forward, ) # add updated cache to model output if past_key_values is not None and return_dict: outputs, past = outputs outputs["past_key_values"] = unfreeze(past["cache"]) return outputs elif past_key_values is not None and not return_dict: outputs, past = outputs outputs = outputs[:1] + (unfreeze(past["cache"]),) + outputs[1:] return outputs LONGT5_START_DOCSTRING = r""" The LongT5 model was proposed in [LongT5: Efficient Text-To-Text Transformer for Long Sequences](https://arxiv.org/abs/2112.07916) by Mandy Guo, Joshua Ainslie, David Uthus, Santiago Ontanon, Jianmo Ni, Yun-Hsuan Sung and Yinfei Yang. It's an encoder-decoder transformer pre-trained in a text-to-text denoising generative setting. LongT5 model is an extension of T5 model, and it enables using one of the two different efficient attention mechanisms - (1) Local attention, or (2) Transient-Global attention. This model inherits from [`FlaxPreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a Flax Linen [flax.nn.Module](https://flax.readthedocs.io/en/latest/_autosummary/flax.nn.module.html) subclass. Use it as a regular Flax Module and refer to the Flax documentation for all matter related to general usage and behavior. Finally, this model supports inherent JAX features such as: - [Just-In-Time (JIT) compilation](https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit) - [Automatic Differentiation](https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation) - [Vectorization](https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap) - [Parallelization](https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap) Parameters: config ([`LongT5Config`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~FlaxPreTrainedModel.from_pretrained`] method to load the model weights. dtype (`jax.numpy.dtype`, *optional*, defaults to `jax.numpy.float32`): The data type of the computation. Can be one of `jax.numpy.float32`, `jax.numpy.float16` (on GPUs) and `jax.numpy.bfloat16` (on TPUs). This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If specified all the computation will be performed with the given `dtype`. **Note that this only specifies the dtype of the computation and does not influence the dtype of model parameters.** If you wish to change the dtype of the model parameters, see [`~FlaxPreTrainedModel.to_fp16`] and [`~FlaxPreTrainedModel.to_bf16`]. """ @add_start_docstrings( "The bare LONGT5 Model transformer outputting raw hidden-stateswithout any specific head on top.", LONGT5_START_DOCSTRING, ) # Copied from transformers.models.t5.modeling_flax_t5.FlaxT5Module with T5->LongT5 class FlaxLongT5Module(nn.Module): config: LongT5Config dtype: jnp.dtype = jnp.float32 # the dtype of the computation gradient_checkpointing: bool = False def _get_encoder_module(self): return self.encoder def _get_decoder_module(self): return self.decoder def setup(self): self.shared = nn.Embed( self.config.vocab_size, self.config.d_model, embedding_init=jax.nn.initializers.normal(self.config.initializer_factor * 1.0), dtype=self.dtype, ) encoder_config = copy.deepcopy(self.config) encoder_config.causal = False self.encoder = FlaxLongT5Stack( encoder_config, embed_tokens=self.shared, dtype=self.dtype, gradient_checkpointing=self.gradient_checkpointing, ) decoder_config = copy.deepcopy(self.config) decoder_config.causal = True decoder_config.num_layers = self.config.num_decoder_layers self.decoder = FlaxLongT5Stack( decoder_config, embed_tokens=self.shared, dtype=self.dtype, gradient_checkpointing=self.gradient_checkpointing, ) def __call__( self, input_ids=None, attention_mask=None, decoder_input_ids=None, decoder_attention_mask=None, encoder_outputs=None, output_attentions=None, output_hidden_states=None, return_dict=None, deterministic: bool = True, ): return_dict = return_dict if return_dict is not None else self.config.use_return_dict # Encode if needed (training, first prediction pass) encoder_outputs = self.encoder( input_ids=input_ids, attention_mask=attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, deterministic=deterministic, ) # Decode decoder_outputs = self.decoder( input_ids=decoder_input_ids, attention_mask=decoder_attention_mask, encoder_hidden_states=encoder_outputs[0], encoder_attention_mask=attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, deterministic=deterministic, ) if not return_dict: return decoder_outputs + encoder_outputs return FlaxSeq2SeqModelOutput( last_hidden_state=decoder_outputs.last_hidden_state, past_key_values=decoder_outputs.past_key_values, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions, ) # Copied from transformers.models.t5.modeling_flax_t5.FlaxT5Model with T5->LongT5 class FlaxLongT5Model(FlaxLongT5PreTrainedModel): module_class = FlaxLongT5Module append_call_sample_docstring(FlaxLongT5Model, _CHECKPOINT_FOR_DOC, FlaxSeq2SeqModelOutput, _CONFIG_FOR_DOC) FLAX_LONGT5_MODEL_DOCSTRING = """ Returns: Example: ```python >>> from transformers import AutoTokenizer, FlaxLongT5Model >>> tokenizer = AutoTokenizer.from_pretrained("t5-base") >>> model = FlaxLongT5Model.from_pretrained("google/long-t5-local-base") >>> input_ids = tokenizer( ... "Studies have been shown that owning a dog is good for you", return_tensors="np" ... ).input_ids >>> decoder_input_ids = tokenizer("Studies show that", return_tensors="np").input_ids >>> # forward pass >>> outputs = model(input_ids=input_ids, decoder_input_ids=decoder_input_ids) >>> last_hidden_states = outputs.last_hidden_state ``` """ overwrite_call_docstring(FlaxLongT5Model, LONGT5_INPUTS_DOCSTRING + FLAX_LONGT5_MODEL_DOCSTRING) append_replace_return_docstrings(FlaxLongT5Model, output_type=FlaxSeq2SeqLMOutput, config_class=_CONFIG_FOR_DOC) @add_start_docstrings("""LONGT5 Model with a `language modeling` head on top.""", LONGT5_START_DOCSTRING) # Copied from transformers.models.t5.modeling_flax_t5.FlaxT5ForConditionalGenerationModule with T5->LongT5 class FlaxLongT5ForConditionalGenerationModule(nn.Module): config: LongT5Config dtype: jnp.dtype = jnp.float32 # the dtype of the computation gradient_checkpointing: bool = False def _get_encoder_module(self): return self.encoder def _get_decoder_module(self): return self.decoder def setup(self): self.model_dim = self.config.d_model self.shared = nn.Embed( self.config.vocab_size, self.config.d_model, embedding_init=jax.nn.initializers.normal(self.config.initializer_factor), dtype=self.dtype, ) encoder_config = copy.deepcopy(self.config) encoder_config.causal = False encoder_config.use_cache = False encoder_config.is_encoder_decoder = False self.encoder = FlaxLongT5Stack( encoder_config, self.shared, dtype=self.dtype, gradient_checkpointing=self.gradient_checkpointing ) decoder_config = copy.deepcopy(self.config) decoder_config.causal = True decoder_config.is_encoder_decoder = False decoder_config.num_layers = self.config.num_decoder_layers self.decoder = FlaxLongT5Stack( decoder_config, self.shared, dtype=self.dtype, gradient_checkpointing=self.gradient_checkpointing ) self.lm_head = nn.Dense( self.config.vocab_size, use_bias=False, kernel_init=jax.nn.initializers.normal(self.config.initializer_factor), dtype=self.dtype, ) def __call__( self, input_ids=None, attention_mask=None, decoder_input_ids=None, decoder_attention_mask=None, encoder_outputs=None, output_attentions=None, output_hidden_states=None, return_dict=None, deterministic: bool = True, ): return_dict = return_dict if return_dict is not None else self.config.use_return_dict # Encode encoder_outputs = self.encoder( input_ids=input_ids, attention_mask=attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, deterministic=deterministic, ) hidden_states = encoder_outputs[0] # Decode decoder_outputs = self.decoder( input_ids=decoder_input_ids, attention_mask=decoder_attention_mask, encoder_hidden_states=hidden_states, encoder_attention_mask=attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, deterministic=deterministic, ) sequence_output = decoder_outputs[0] if self.config.tie_word_embeddings: # Rescale output before projecting on vocab # See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/transformer/transformer.py#L586 sequence_output = sequence_output * (self.model_dim**-0.5) if self.config.tie_word_embeddings: shared_embedding = self.shared.variables["params"]["embedding"] lm_logits = self.lm_head.apply({"params": {"kernel": shared_embedding.T}}, sequence_output) else: lm_logits = self.lm_head(sequence_output) if not return_dict: return (lm_logits,) + decoder_outputs[1:] + encoder_outputs return FlaxSeq2SeqLMOutput( logits=lm_logits, past_key_values=decoder_outputs.past_key_values, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions, ) class FlaxLongT5ForConditionalGeneration(FlaxLongT5PreTrainedModel): module_class = FlaxLongT5ForConditionalGenerationModule @add_start_docstrings(LONGT5_DECODE_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=FlaxCausalLMOutputWithCrossAttentions, config_class=LongT5Config) def decode( self, decoder_input_ids, encoder_outputs, encoder_attention_mask: Optional[jnp.ndarray] = None, decoder_attention_mask: Optional[jnp.ndarray] = None, past_key_values: dict = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, train: bool = False, params: dict = None, dropout_rng: PRNGKey = None, ): r""" Returns: Example: ```python >>> from transformers import AutoTokenizer, FlaxLongT5ForConditionalGeneration >>> import jax.numpy as jnp >>> tokenizer = AutoTokenizer.from_pretrained("t5-base") >>> model = FlaxLongT5ForConditionalGeneration.from_pretrained("google/long-t5-local-base") >>> text = "summarize: My friends are cool but they eat too many carbs." >>> inputs = tokenizer(text, return_tensors="np") >>> encoder_outputs = model.encode(**inputs) >>> decoder_start_token_id = model.config.decoder_start_token_id >>> decoder_input_ids = jnp.ones((inputs.input_ids.shape[0], 1), dtype="i4") * decoder_start_token_id >>> outputs = model.decode(decoder_input_ids, encoder_outputs) >>> logits = outputs.logits ```""" output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.return_dict encoder_hidden_states = encoder_outputs[0] if encoder_attention_mask is None: batch_size, sequence_length = encoder_hidden_states.shape[:2] encoder_attention_mask = jnp.ones((batch_size, sequence_length)) batch_size, sequence_length = decoder_input_ids.shape if decoder_attention_mask is None: decoder_attention_mask = jnp.ones((batch_size, sequence_length)) # Handle any PRNG if needed rngs = {} if dropout_rng is not None: rngs["dropout"] = dropout_rng inputs = {"params": params or self.params} # if past_key_values are passed then cache is already initialized a private flag init_cache has to be # passed down to ensure cache is used. It has to be made sure that cache is marked as mutable so that # it can be changed by FlaxLongT5Attention module if past_key_values: inputs["cache"] = past_key_values mutable = ["cache"] else: mutable = False def _decoder_forward(module, decoder_input_ids, decoder_attention_mask, **kwargs): decoder_module = module._get_decoder_module() decoder_outputs = decoder_module( decoder_input_ids, decoder_attention_mask, **kwargs, ) sequence_output = decoder_outputs[0] if self.config.tie_word_embeddings: # Rescale output before projecting on vocab # See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/transformer/transformer.py#L586 sequence_output = sequence_output * (self.config.d_model**-0.5) if self.config.tie_word_embeddings: shared_embedding = module.shared.variables["params"]["embedding"] lm_logits = module.lm_head.apply({"params": {"kernel": shared_embedding.T}}, sequence_output) else: lm_logits = module.lm_head(sequence_output) return lm_logits, decoder_outputs outputs = self.module.apply( inputs, decoder_input_ids=jnp.array(decoder_input_ids, dtype="i4"), decoder_attention_mask=jnp.array(decoder_attention_mask, dtype="i4"), encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=jnp.array(encoder_attention_mask, dtype="i4"), output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, deterministic=not train, rngs=rngs, mutable=mutable, method=_decoder_forward, ) if past_key_values is None: lm_logits, decoder_outputs = outputs else: (lm_logits, decoder_outputs), past = outputs if return_dict: outputs = FlaxCausalLMOutputWithCrossAttentions( logits=lm_logits, hidden_states=decoder_outputs.hidden_states, attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, ) else: outputs = (lm_logits,) + decoder_outputs[1:] # add updated cache to model output if past_key_values is not None and return_dict: outputs["past_key_values"] = unfreeze(past["cache"]) return outputs elif past_key_values is not None and not return_dict: outputs = outputs[:1] + (unfreeze(past["cache"]),) + outputs[1:] return outputs def prepare_inputs_for_generation( self, decoder_input_ids, max_length, attention_mask: Optional[jax.Array] = None, decoder_attention_mask: Optional[jax.Array] = None, encoder_outputs=None, **kwargs, ): # initializing the cache batch_size, seq_length = decoder_input_ids.shape past_key_values = self.init_cache(batch_size, max_length, encoder_outputs) # Note that usually one would have to put 0's in the attention_mask for x > input_ids.shape[-1] and x < cache_length. # But since the decoder uses a causal mask, those positions are masked anyways. # Thus we can create a single static attention_mask here, which is more efficient for compilation extended_attention_mask = jnp.ones((batch_size, max_length), dtype="i4") if decoder_attention_mask is not None: extended_attention_mask = jax.lax.dynamic_update_slice( extended_attention_mask, decoder_attention_mask, (0, 0) ) return { "past_key_values": past_key_values, "encoder_outputs": encoder_outputs, "encoder_attention_mask": attention_mask, "decoder_attention_mask": extended_attention_mask, } def update_inputs_for_generation(self, model_outputs, model_kwargs): model_kwargs["past_key_values"] = model_outputs.past_key_values return model_kwargs FLAX_LONGT5_CONDITIONAL_GENERATION_DOCSTRING = """ Returns: Example: ```python >>> from transformers import AutoTokenizer, FlaxLongT5ForConditionalGeneration >>> tokenizer = AutoTokenizer.from_pretrained("t5-base") >>> model = FlaxLongT5ForConditionalGeneration.from_pretrained("google/long-t5-local-base") >>> ARTICLE_TO_SUMMARIZE = "summarize: My friends are cool but they eat too many carbs." >>> inputs = tokenizer([ARTICLE_TO_SUMMARIZE], return_tensors="np") >>> # Generate Summary >>> summary_ids = model.generate(inputs["input_ids"]).sequences >>> print(tokenizer.decode(summary_ids[0], skip_special_tokens=True, clean_up_tokenization_spaces=False)) ``` """ overwrite_call_docstring( FlaxLongT5ForConditionalGeneration, LONGT5_INPUTS_DOCSTRING + FLAX_LONGT5_CONDITIONAL_GENERATION_DOCSTRING ) append_replace_return_docstrings( FlaxLongT5ForConditionalGeneration, output_type=FlaxSeq2SeqLMOutput, config_class=_CONFIG_FOR_DOC )
0
hf_public_repos/transformers/src/transformers/models
hf_public_repos/transformers/src/transformers/models/longt5/configuration_longt5.py
# coding=utf-8 # Copyright 2022, The LongT5 Authors and HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ LongT5 model configuration""" from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxSeq2SeqConfigWithPast from ...utils import logging logger = logging.get_logger(__name__) LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP = { "google/long-t5-local-base": "https://huggingface.co/google/long-t5-local-base/blob/main/config.json", "google/long-t5-local-large": "https://huggingface.co/google/long-t5-local-large/blob/main/config.json", "google/long-t5-tglobal-base": "https://huggingface.co/google/long-t5-tglobal-base/blob/main/config.json", "google/long-t5-tglobal-large": "https://huggingface.co/google/long-t5-tglobal-large/blob/main/config.json", } class LongT5Config(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`LongT5Model`] or a [`FlaxLongT5Model`]. It is used to instantiate a LongT5 model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the LongT5 [google/long-t5-local-base](https://huggingface.co/google/long-t5-local-base) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Arguments: vocab_size (`int`, *optional*, defaults to 32128): Vocabulary size of the LongT5 model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`LongT5Model`]. d_model (`int`, *optional*, defaults to 512): Size of the encoder layers and the pooler layer. d_kv (`int`, *optional*, defaults to 64): Size of the key, query, value projections per attention head. `d_kv` has to be equal to `d_model // num_heads`. d_ff (`int`, *optional*, defaults to 2048): Size of the intermediate feed forward layer in each `LongT5Block`. num_layers (`int`, *optional*, defaults to 6): Number of hidden layers in the Transformer encoder. num_decoder_layers (`int`, *optional*): Number of hidden layers in the Transformer decoder. Will use the same value as `num_layers` if not set. num_heads (`int`, *optional*, defaults to 8): Number of attention heads for each attention layer in the Transformer encoder. local_radius (`int`, *optional*, defaults to 127) Number of tokens to the left/right for each token to locally self-attend in a local attention mechanism. global_block_size (`int`, *optional*, defaults to 16) Lenght of blocks an input sequence is divided into for a global token representation. Used only for `encoder_attention_type = "transient-global"`. relative_attention_num_buckets (`int`, *optional*, defaults to 32): The number of buckets to use for each attention layer. relative_attention_max_distance (`int`, *optional*, defaults to 128): The maximum distance of the longer sequences for the bucket separation. dropout_rate (`float`, *optional*, defaults to 0.1): The ratio for all dropout layers. layer_norm_eps (`float`, *optional*, defaults to 1e-6): The epsilon used by the layer normalization layers. initializer_factor (`float`, *optional*, defaults to 1): A factor for initializing all weight matrices (should be kept to 1, used internally for initialization testing). feed_forward_proj (`string`, *optional*, defaults to `"relu"`): Type of feed forward layer to be used. Should be one of `"relu"` or `"gated-gelu"`. LongT5v1.1 uses the `"gated-gelu"` feed forward projection. Original LongT5 implementation uses `"gated-gelu"`. encoder_attention_type (`string`, *optional*, defaults to `"local"`): Type of encoder attention to be used. Should be one of `"local"` or `"transient-global"`, which are supported by LongT5 implementation. use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). """ model_type = "longt5" keys_to_ignore_at_inference = ["past_key_values"] attribute_map = {"hidden_size": "d_model", "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers"} def __init__( self, vocab_size=32128, d_model=512, d_kv=64, d_ff=2048, num_layers=6, num_decoder_layers=None, num_heads=8, local_radius=127, global_block_size=16, relative_attention_num_buckets=32, relative_attention_max_distance=128, dropout_rate=0.1, layer_norm_epsilon=1e-6, initializer_factor=1.0, feed_forward_proj="relu", is_encoder_decoder=True, encoder_attention_type="local", use_cache=True, pad_token_id=0, eos_token_id=1, **kwargs, ): self.vocab_size = vocab_size self.d_model = d_model self.d_kv = d_kv self.d_ff = d_ff self.num_layers = num_layers # default = symmetry self.num_decoder_layers = num_decoder_layers if num_decoder_layers is not None else self.num_layers self.num_heads = num_heads self.local_radius = local_radius self.global_block_size = global_block_size self.relative_attention_num_buckets = relative_attention_num_buckets self.relative_attention_max_distance = relative_attention_max_distance self.dropout_rate = dropout_rate self.layer_norm_epsilon = layer_norm_epsilon self.initializer_factor = initializer_factor self.feed_forward_proj = feed_forward_proj self.encoder_attention_type = encoder_attention_type self.use_cache = use_cache act_info = self.feed_forward_proj.split("-") self.dense_act_fn = act_info[-1] self.is_gated_act = act_info[0] == "gated" if len(act_info) > 1 and act_info[0] != "gated" or len(act_info) > 2: raise ValueError( f"`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer. " "Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. " "'gated-gelu' or 'relu'" ) # for backwards compatibility if feed_forward_proj == "gated-gelu": self.dense_act_fn = "gelu_new" super().__init__( pad_token_id=pad_token_id, eos_token_id=eos_token_id, is_encoder_decoder=is_encoder_decoder, **kwargs, ) class LongT5OnnxConfig(OnnxSeq2SeqConfigWithPast): @property def inputs(self) -> Mapping[str, Mapping[int, str]]: common_inputs = { "input_ids": {0: "batch", 1: "encoder_sequence"}, "attention_mask": {0: "batch", 1: "encoder_sequence"}, } if self.use_past: common_inputs["attention_mask"][1] = "past_encoder_sequence + sequence" common_inputs["decoder_input_ids"] = {0: "batch"} common_inputs["decoder_attention_mask"] = {0: "batch", 1: "past_decoder_sequence + sequence"} else: common_inputs["decoder_input_ids"] = {0: "batch", 1: "decoder_sequence"} common_inputs["decoder_attention_mask"] = {0: "batch", 1: "decoder_sequence"} if self.use_past: self.fill_with_past_key_values_(common_inputs, direction="inputs") return common_inputs @property def default_onnx_opset(self) -> int: return 13
0
hf_public_repos/transformers/src/transformers/models
hf_public_repos/transformers/src/transformers/models/vivit/image_processing_vivit.py
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Image processor class for Vivit.""" from typing import Dict, List, Optional, Union import numpy as np from transformers.utils import is_vision_available from transformers.utils.generic import TensorType from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( get_resize_output_image_size, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, infer_channel_dimension_format, is_scaled_image, is_valid_image, to_numpy_array, valid_images, ) from ...utils import logging if is_vision_available(): import PIL logger = logging.get_logger(__name__) def make_batched(videos) -> List[List[ImageInput]]: if isinstance(videos, (list, tuple)) and isinstance(videos[0], (list, tuple)) and is_valid_image(videos[0][0]): return videos elif isinstance(videos, (list, tuple)) and is_valid_image(videos[0]): return [videos] elif is_valid_image(videos): return [[videos]] raise ValueError(f"Could not make batched video from {videos}") class VivitImageProcessor(BaseImageProcessor): r""" Constructs a Vivit image processor. Args: do_resize (`bool`, *optional*, defaults to `True`): Whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden by the `do_resize` parameter in the `preprocess` method. size (`Dict[str, int]` *optional*, defaults to `{"shortest_edge": 256}`): Size of the output image after resizing. The shortest edge of the image will be resized to `size["shortest_edge"]` while maintaining the aspect ratio of the original image. Can be overriden by `size` in the `preprocess` method. resample (`PILImageResampling`, *optional*, defaults to `Resampling.BILINEAR`): Resampling filter to use if resizing the image. Can be overridden by the `resample` parameter in the `preprocess` method. do_center_crop (`bool`, *optional*, defaults to `True`): Whether to center crop the image to the specified `crop_size`. Can be overridden by the `do_center_crop` parameter in the `preprocess` method. crop_size (`Dict[str, int]`, *optional*, defaults to `{"height": 224, "width": 224}`): Size of the image after applying the center crop. Can be overridden by the `crop_size` parameter in the `preprocess` method. do_rescale (`bool`, *optional*, defaults to `True`): Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by the `do_rescale` parameter in the `preprocess` method. rescale_factor (`int` or `float`, *optional*, defaults to `1/127.5`): Defines the scale factor to use if rescaling the image. Can be overridden by the `rescale_factor` parameter in the `preprocess` method. offset (`bool`, *optional*, defaults to `True`): Whether to scale the image in both negative and positive directions. Can be overriden by the `offset` in the `preprocess` method. do_normalize (`bool`, *optional*, defaults to `True`): Whether to normalize the image. Can be overridden by the `do_normalize` parameter in the `preprocess` method. image_mean (`float` or `List[float]`, *optional*, defaults to `IMAGENET_STANDARD_MEAN`): Mean to use if normalizing the image. This is a float or list of floats the length of the number of channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method. image_std (`float` or `List[float]`, *optional*, defaults to `IMAGENET_STANDARD_STD`): Standard deviation to use if normalizing the image. This is a float or list of floats the length of the number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method. """ model_input_names = ["pixel_values"] def __init__( self, do_resize: bool = True, size: Dict[str, int] = None, resample: PILImageResampling = PILImageResampling.BILINEAR, do_center_crop: bool = True, crop_size: Dict[str, int] = None, do_rescale: bool = True, rescale_factor: Union[int, float] = 1 / 127.5, offset: bool = True, do_normalize: bool = True, image_mean: Optional[Union[float, List[float]]] = None, image_std: Optional[Union[float, List[float]]] = None, **kwargs, ) -> None: super().__init__(**kwargs) size = size if size is not None else {"shortest_edge": 256} size = get_size_dict(size, default_to_square=False) crop_size = crop_size if crop_size is not None else {"height": 224, "width": 224} crop_size = get_size_dict(crop_size, param_name="crop_size") self.do_resize = do_resize self.size = size self.do_center_crop = do_center_crop self.crop_size = crop_size self.resample = resample self.do_rescale = do_rescale self.rescale_factor = rescale_factor self.offset = offset self.do_normalize = do_normalize self.image_mean = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN self.image_std = image_std if image_std is not None else IMAGENET_STANDARD_STD def resize( self, image: np.ndarray, size: Dict[str, int], resample: PILImageResampling = PILImageResampling.BILINEAR, data_format: Optional[Union[str, ChannelDimension]] = None, input_data_format: Optional[Union[str, ChannelDimension]] = None, **kwargs, ) -> np.ndarray: """ Resize an image. Args: image (`np.ndarray`): Image to resize. size (`Dict[str, int]`): Size of the output image. If `size` is of the form `{"height": h, "width": w}`, the output image will have the size `(h, w)`. If `size` is of the form `{"shortest_edge": s}`, the output image will have its shortest edge of length `s` while keeping the aspect ratio of the original image. resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BILINEAR`): Resampling filter to use when resiizing the image. data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format of the image. If not provided, it will be the same as the input image. input_data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format of the input image. If not provided, it will be inferred. """ size = get_size_dict(size, default_to_square=False) if "shortest_edge" in size: output_size = get_resize_output_image_size( image, size["shortest_edge"], default_to_square=False, input_data_format=input_data_format ) elif "height" in size and "width" in size: output_size = (size["height"], size["width"]) else: raise ValueError(f"Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}") return resize( image, size=output_size, resample=resample, data_format=data_format, input_data_format=input_data_format, **kwargs, ) # Copied from transformers.models.efficientnet.image_processing_efficientnet.EfficientNetImageProcessor.rescale def rescale( self, image: np.ndarray, scale: Union[int, float], offset: bool = True, data_format: Optional[Union[str, ChannelDimension]] = None, input_data_format: Optional[Union[str, ChannelDimension]] = None, **kwargs, ): """ Rescale an image by a scale factor. If `offset` is `True`, the image has its values rescaled by `scale` and then offset by 1. If `scale` is 1/127.5, the image is rescaled between [-1, 1]. image = image * scale - 1 If `offset` is `False`, and `scale` is 1/255, the image is rescaled between [0, 1]. image = image * scale Args: image (`np.ndarray`): Image to rescale. scale (`int` or `float`): Scale to apply to the image. offset (`bool`, *optional*): Whether to scale the image in both negative and positive directions. data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format of the image. If not provided, it will be the same as the input image. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format of the input image. If not provided, it will be inferred. """ rescaled_image = rescale( image, scale=scale, data_format=data_format, input_data_format=input_data_format, **kwargs ) if offset: rescaled_image = rescaled_image - 1 return rescaled_image def _preprocess_image( self, image: ImageInput, do_resize: bool = None, size: Dict[str, int] = None, resample: PILImageResampling = None, do_center_crop: bool = None, crop_size: Dict[str, int] = None, do_rescale: bool = None, rescale_factor: float = None, offset: bool = None, do_normalize: bool = None, image_mean: Optional[Union[float, List[float]]] = None, image_std: Optional[Union[float, List[float]]] = None, data_format: Optional[ChannelDimension] = ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]] = None, ) -> np.ndarray: """Preprocesses a single image.""" if do_resize and size is None or resample is None: raise ValueError("Size and resample must be specified if do_resize is True.") if do_center_crop and crop_size is None: raise ValueError("Crop size must be specified if do_center_crop is True.") if do_rescale and rescale_factor is None: raise ValueError("Rescale factor must be specified if do_rescale is True.") if do_normalize and (image_mean is None or image_std is None): raise ValueError("Image mean and std must be specified if do_normalize is True.") if offset and not do_rescale: raise ValueError("For offset, do_rescale must also be set to True.") # All transformations expect numpy arrays. image = to_numpy_array(image) if is_scaled_image(image) and do_rescale: logger.warning_once( "It looks like you are trying to rescale already rescaled images. If the input" " images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again." ) if input_data_format is None: input_data_format = infer_channel_dimension_format(image) if do_resize: image = self.resize(image=image, size=size, resample=resample, input_data_format=input_data_format) if do_center_crop: image = self.center_crop(image, size=crop_size, input_data_format=input_data_format) if do_rescale: image = self.rescale(image=image, scale=rescale_factor, offset=offset, input_data_format=input_data_format) if do_normalize: image = self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format) image = to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) return image def preprocess( self, videos: ImageInput, do_resize: bool = None, size: Dict[str, int] = None, resample: PILImageResampling = None, do_center_crop: bool = None, crop_size: Dict[str, int] = None, do_rescale: bool = None, rescale_factor: float = None, offset: bool = None, do_normalize: bool = None, image_mean: Optional[Union[float, List[float]]] = None, image_std: Optional[Union[float, List[float]]] = None, return_tensors: Optional[Union[str, TensorType]] = None, data_format: ChannelDimension = ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]] = None, **kwargs, ) -> PIL.Image.Image: """ Preprocess an image or batch of images. Args: videos (`ImageInput`): Video frames to preprocess. Expects a single or batch of video frames with pixel values ranging from 0 to 255. If passing in frames with pixel values between 0 and 1, set `do_rescale=False`. do_resize (`bool`, *optional*, defaults to `self.do_resize`): Whether to resize the image. size (`Dict[str, int]`, *optional*, defaults to `self.size`): Size of the image after applying resize. resample (`PILImageResampling`, *optional*, defaults to `self.resample`): Resampling filter to use if resizing the image. This can be one of the enum `PILImageResampling`, Only has an effect if `do_resize` is set to `True`. do_center_crop (`bool`, *optional*, defaults to `self.do_centre_crop`): Whether to centre crop the image. crop_size (`Dict[str, int]`, *optional*, defaults to `self.crop_size`): Size of the image after applying the centre crop. do_rescale (`bool`, *optional*, defaults to `self.do_rescale`): Whether to rescale the image values between `[-1 - 1]` if `offset` is `True`, `[0, 1]` otherwise. rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`): Rescale factor to rescale the image by if `do_rescale` is set to `True`. offset (`bool`, *optional*, defaults to `self.offset`): Whether to scale the image in both negative and positive directions. do_normalize (`bool`, *optional*, defaults to `self.do_normalize`): Whether to normalize the image. image_mean (`float` or `List[float]`, *optional*, defaults to `self.image_mean`): Image mean. image_std (`float` or `List[float]`, *optional*, defaults to `self.image_std`): Image standard deviation. return_tensors (`str` or `TensorType`, *optional*): The type of tensors to return. Can be one of: - Unset: Return a list of `np.ndarray`. - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`. - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`. - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`. - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`. data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`): The channel dimension format for the output image. Can be one of: - `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `ChannelDimension.LAST`: image in (height, width, num_channels) format. - Unset: Use the inferred channel dimension format of the input image. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format for the input image. If unset, the channel dimension format is inferred from the input image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. """ do_resize = do_resize if do_resize is not None else self.do_resize resample = resample if resample is not None else self.resample do_center_crop = do_center_crop if do_center_crop is not None else self.do_center_crop do_rescale = do_rescale if do_rescale is not None else self.do_rescale rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor offset = offset if offset is not None else self.offset do_normalize = do_normalize if do_normalize is not None else self.do_normalize image_mean = image_mean if image_mean is not None else self.image_mean image_std = image_std if image_std is not None else self.image_std size = size if size is not None else self.size size = get_size_dict(size, default_to_square=False) crop_size = crop_size if crop_size is not None else self.crop_size crop_size = get_size_dict(crop_size, param_name="crop_size") if not valid_images(videos): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) videos = make_batched(videos) videos = [ [ self._preprocess_image( image=img, do_resize=do_resize, size=size, resample=resample, do_center_crop=do_center_crop, crop_size=crop_size, do_rescale=do_rescale, rescale_factor=rescale_factor, offset=offset, do_normalize=do_normalize, image_mean=image_mean, image_std=image_std, data_format=data_format, input_data_format=input_data_format, ) for img in video ] for video in videos ] data = {"pixel_values": videos} return BatchFeature(data=data, tensor_type=return_tensors)
0
hf_public_repos/transformers/src/transformers/models
hf_public_repos/transformers/src/transformers/models/vivit/configuration_vivit.py
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ ViViT model configuration""" from ...configuration_utils import PretrainedConfig from ...utils import logging logger = logging.get_logger(__name__) VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP = { "google/vivit-b-16x2-kinetics400": ( "https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json" ), # See all Vivit models at https://huggingface.co/models?filter=vivit } class VivitConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`VivitModel`]. It is used to instantiate a ViViT model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the ViViT [google/vivit-b-16x2-kinetics400](https://huggingface.co/google/vivit-b-16x2-kinetics400) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: image_size (`int`, *optional*, defaults to 224): The size (resolution) of each image. num_frames (`int`, *optional*, defaults to 32): The number of frames in each video. tubelet_size (`List[int]`, *optional*, defaults to `[2, 16, 16]`): The size (resolution) of each tubelet. num_channels (`int`, *optional*, defaults to 3): The number of input channels. hidden_size (`int`, *optional*, defaults to 768): Dimensionality of the encoder layers and the pooler layer. num_hidden_layers (`int`, *optional*, defaults to 12): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 12): Number of attention heads for each attention layer in the Transformer encoder. intermediate_size (`int`, *optional*, defaults to 3072): Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. hidden_act (`str` or `function`, *optional*, defaults to `"gelu_fast"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"selu"`, `"gelu_fast"` and `"gelu_new"` are supported. hidden_dropout_prob (`float`, *optional*, defaults to 0.0): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. attention_probs_dropout_prob (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. layer_norm_eps (`float`, *optional*, defaults to 1e-06): The epsilon used by the layer normalization layers. qkv_bias (`bool`, *optional*, defaults to `True`): Whether to add a bias to the queries, keys and values. Example: ```python >>> from transformers import VivitConfig, VivitModel >>> # Initializing a ViViT google/vivit-b-16x2-kinetics400 style configuration >>> configuration = VivitConfig() >>> # Initializing a model (with random weights) from the google/vivit-b-16x2-kinetics400 style configuration >>> model = VivitModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "vivit" def __init__( self, image_size=224, num_frames=32, tubelet_size=[2, 16, 16], num_channels=3, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act="gelu_fast", hidden_dropout_prob=0.0, attention_probs_dropout_prob=0.0, initializer_range=0.02, layer_norm_eps=1e-06, qkv_bias=True, **kwargs, ): self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.initializer_range = initializer_range self.layer_norm_eps = layer_norm_eps self.image_size = image_size self.num_frames = num_frames self.tubelet_size = tubelet_size self.num_channels = num_channels self.qkv_bias = qkv_bias super().__init__(**kwargs)
0
hf_public_repos/transformers/src/transformers/models
hf_public_repos/transformers/src/transformers/models/vivit/__init__.py
# flake8: noqa # There's no way to ignore "F401 '...' imported but unused" warnings in this # module, but to preserve other warnings. So, don't check this module at all. # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available _import_structure = { "configuration_vivit": ["VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "VivitConfig"], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["image_processing_vivit"] = ["VivitImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["modeling_vivit"] = [ "VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST", "VivitModel", "VivitPreTrainedModel", "VivitForVideoClassification", ] if TYPE_CHECKING: from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_vivit import VivitImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vivit import ( VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST, VivitForVideoClassification, VivitModel, VivitPreTrainedModel, ) else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
0
hf_public_repos/transformers/src/transformers/models
hf_public_repos/transformers/src/transformers/models/vivit/convert_vivit_flax_to_pytorch.py
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Convert Flax ViViT checkpoints from the original repository to PyTorch. URL: https://github.com/google-research/scenic/tree/main/scenic/projects/vivit """ import argparse import json import os.path from collections import OrderedDict import numpy as np import requests import torch from flax.training.checkpoints import restore_checkpoint from huggingface_hub import hf_hub_download from transformers import VivitConfig, VivitForVideoClassification, VivitImageProcessor from transformers.image_utils import PILImageResampling def download_checkpoint(path): url = "https://storage.googleapis.com/scenic-bucket/vivit/kinetics_400/vivit_base_16x2_unfactorized/checkpoint" with open(path, "wb") as f: with requests.get(url, stream=True) as req: for chunk in req.iter_content(chunk_size=2048): f.write(chunk) def get_vivit_config() -> VivitConfig: config = VivitConfig() config.num_labels = 400 repo_id = "huggingface/label-files" filename = "kinetics400-id2label.json" id2label = json.load(open(hf_hub_download(repo_id, filename, repo_type="dataset"), "r")) id2label = {int(k): v for k, v in id2label.items()} config.id2label = id2label config.label2id = {v: k for k, v in id2label.items()} return config # We will verify our results on a video of eating spaghetti # Frame indices used: [ 47, 51, 55, 59, 63, 67, 71, 75, 80, 84, 88, 92, 96, 100, 104, 108, 113, 117, # 121, 125, 129, 133, 137, 141, 146, 150, 154, 158, 162, 166, 170, 174] def prepare_video(): file = hf_hub_download( repo_id="hf-internal-testing/spaghetti-video", filename="eating_spaghetti_32_frames.npy", repo_type="dataset" ) video = np.load(file) return list(video) def transform_attention(current: np.ndarray): if np.ndim(current) == 2: return transform_attention_bias(current) elif np.ndim(current) == 3: return transform_attention_kernel(current) else: raise Exception(f"Invalid number of dimesions: {np.ndim(current)}") def transform_attention_bias(current: np.ndarray): return current.flatten() def transform_attention_kernel(current: np.ndarray): return np.reshape(current, (current.shape[0], current.shape[1] * current.shape[2])).T def transform_attention_output_weight(current: np.ndarray): return np.reshape(current, (current.shape[0] * current.shape[1], current.shape[2])).T def transform_state_encoder_block(state_dict, i): state = state_dict["optimizer"]["target"]["Transformer"][f"encoderblock_{i}"] prefix = f"encoder.layer.{i}." new_state = { prefix + "intermediate.dense.bias": state["MlpBlock_0"]["Dense_0"]["bias"], prefix + "intermediate.dense.weight": np.transpose(state["MlpBlock_0"]["Dense_0"]["kernel"]), prefix + "output.dense.bias": state["MlpBlock_0"]["Dense_1"]["bias"], prefix + "output.dense.weight": np.transpose(state["MlpBlock_0"]["Dense_1"]["kernel"]), prefix + "layernorm_before.bias": state["LayerNorm_0"]["bias"], prefix + "layernorm_before.weight": state["LayerNorm_0"]["scale"], prefix + "layernorm_after.bias": state["LayerNorm_1"]["bias"], prefix + "layernorm_after.weight": state["LayerNorm_1"]["scale"], prefix + "attention.attention.query.bias": transform_attention( state["MultiHeadDotProductAttention_0"]["query"]["bias"] ), prefix + "attention.attention.query.weight": transform_attention( state["MultiHeadDotProductAttention_0"]["query"]["kernel"] ), prefix + "attention.attention.key.bias": transform_attention( state["MultiHeadDotProductAttention_0"]["key"]["bias"] ), prefix + "attention.attention.key.weight": transform_attention( state["MultiHeadDotProductAttention_0"]["key"]["kernel"] ), prefix + "attention.attention.value.bias": transform_attention( state["MultiHeadDotProductAttention_0"]["value"]["bias"] ), prefix + "attention.attention.value.weight": transform_attention( state["MultiHeadDotProductAttention_0"]["value"]["kernel"] ), prefix + "attention.output.dense.bias": state["MultiHeadDotProductAttention_0"]["out"]["bias"], prefix + "attention.output.dense.weight": transform_attention_output_weight( state["MultiHeadDotProductAttention_0"]["out"]["kernel"] ), } return new_state def get_n_layers(state_dict): return sum([1 if "encoderblock_" in k else 0 for k in state_dict["optimizer"]["target"]["Transformer"].keys()]) def transform_state(state_dict, classification_head=False): transformer_layers = get_n_layers(state_dict) new_state = OrderedDict() new_state["layernorm.bias"] = state_dict["optimizer"]["target"]["Transformer"]["encoder_norm"]["bias"] new_state["layernorm.weight"] = state_dict["optimizer"]["target"]["Transformer"]["encoder_norm"]["scale"] new_state["embeddings.patch_embeddings.projection.weight"] = np.transpose( state_dict["optimizer"]["target"]["embedding"]["kernel"], (4, 3, 0, 1, 2) ) new_state["embeddings.patch_embeddings.projection.bias"] = state_dict["optimizer"]["target"]["embedding"]["bias"] new_state["embeddings.cls_token"] = state_dict["optimizer"]["target"]["cls"] new_state["embeddings.position_embeddings"] = state_dict["optimizer"]["target"]["Transformer"]["posembed_input"][ "pos_embedding" ] for i in range(transformer_layers): new_state.update(transform_state_encoder_block(state_dict, i)) if classification_head: new_state = {"vivit." + k: v for k, v in new_state.items()} new_state["classifier.weight"] = np.transpose(state_dict["optimizer"]["target"]["output_projection"]["kernel"]) new_state["classifier.bias"] = np.transpose(state_dict["optimizer"]["target"]["output_projection"]["bias"]) return {k: torch.tensor(v) for k, v in new_state.items()} # checks that image processor settings are the same as in the original implementation # original: https://github.com/google-research/scenic/blob/main/scenic/projects/vivit/data/video_tfrecord_dataset.py # dataset specific config: # https://github.com/google-research/scenic/blob/main/scenic/projects/vivit/configs/kinetics400/vivit_base_k400.py def get_processor() -> VivitImageProcessor: extractor = VivitImageProcessor() assert extractor.do_resize is True assert extractor.size == {"shortest_edge": 256} assert extractor.do_center_crop is True assert extractor.crop_size == {"width": 224, "height": 224} assert extractor.resample == PILImageResampling.BILINEAR # here: https://github.com/deepmind/dmvr/blob/master/dmvr/modalities.py # one can seen that add_image has default values for normalization_mean and normalization_std set to 0 and 1 # which effectively means no normalization (and ViViT does not overwrite those when calling this func) assert extractor.do_normalize is False assert extractor.do_rescale is True assert extractor.rescale_factor == 1 / 255 # zero-centering = True in original implementation assert extractor.do_zero_centering is True return extractor def convert(output_path: str): flax_model_path = "checkpoint" if not os.path.exists(flax_model_path): download_checkpoint(flax_model_path) state_dict = restore_checkpoint(flax_model_path, None) new_state = transform_state(state_dict, classification_head=True) config = get_vivit_config() assert config.image_size == 224 assert config.num_frames == 32 model = VivitForVideoClassification(config) model.load_state_dict(new_state) model.eval() extractor = get_processor() video = prepare_video() inputs = extractor(video, return_tensors="pt") outputs = model(**inputs) expected_shape = torch.Size([1, 400]) expected_slice = torch.tensor([-1.0543, 2.0764, -0.2104, 0.4439, -0.9658]) assert outputs.logits.shape == expected_shape assert torch.allclose(outputs.logits[0, :5], expected_slice, atol=1e-4), outputs.logits[0, :5] model.save_pretrained(output_path) extractor.save_pretrained(output_path) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--output_model_name", "-o", type=str, help="Output path for the converted HuggingFace model") args = parser.parse_args() convert(args.output_model_name)
0
hf_public_repos/transformers/src/transformers/models
hf_public_repos/transformers/src/transformers/models/vivit/modeling_vivit.py
# coding=utf-8 # Copyright 2023 Google AI and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ PyTorch ViViT model.""" import math from typing import Optional, Set, Tuple, Union import torch import torch.utils.checkpoint from torch import nn from torch.nn import CrossEntropyLoss, MSELoss from ...activations import ACT2FN from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling, ImageClassifierOutput from ...modeling_utils import PreTrainedModel from ...pytorch_utils import find_pruneable_heads_and_indices, prune_linear_layer from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings from .configuration_vivit import VivitConfig logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = "google/vivit-b-16x2-kinetics400" _CONFIG_FOR_DOC = "VivitConfig" VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST = [ "google/vivit-b-16x2-kinetics400", # See all Vivit models at https://huggingface.co/models?filter=vivit ] class VivitTubeletEmbeddings(nn.Module): """ Construct Vivit Tubelet embeddings. This module turns a batch of videos of shape (batch_size, num_frames, num_channels, height, width) into a tensor of shape (batch_size, seq_len, hidden_size) to be consumed by a Transformer encoder. The seq_len (the number of patches) equals (number of frames // tubelet_size[0]) * (height // tubelet_size[1]) * (width // tubelet_size[2]). """ def __init__(self, config): super().__init__() self.num_frames = config.num_frames self.image_size = config.image_size self.patch_size = config.tubelet_size self.num_patches = ( (self.image_size // self.patch_size[2]) * (self.image_size // self.patch_size[1]) * (self.num_frames // self.patch_size[0]) ) self.embed_dim = config.hidden_size self.projection = nn.Conv3d( config.num_channels, config.hidden_size, kernel_size=config.tubelet_size, stride=config.tubelet_size ) def forward(self, pixel_values): batch_size, num_frames, num_channels, height, width = pixel_values.shape if height != self.image_size or width != self.image_size: raise ValueError( f"Input image size ({height}*{width}) doesn't match model ({self.image_size}*{self.image_size})." ) # permute to (batch_size, num_channels, num_frames, height, width) pixel_values = pixel_values.permute(0, 2, 1, 3, 4) x = self.projection(pixel_values) # out_batch_size, out_num_channels, out_num_frames, out_height, out_width = x.shape x = self.projection(pixel_values).flatten(2).transpose(1, 2) return x class VivitEmbeddings(nn.Module): """ Vivit Embeddings. Creates embeddings from a video using VivitTubeletEmbeddings, adds CLS token and positional embeddings. """ def __init__(self, config): super().__init__() self.cls_token = nn.Parameter(torch.zeros(1, 1, config.hidden_size)) self.patch_embeddings = VivitTubeletEmbeddings(config) self.position_embeddings = nn.Parameter( torch.zeros(1, self.patch_embeddings.num_patches + 1, config.hidden_size) ) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.config = config def forward(self, pixel_values): batch_size = pixel_values.shape[0] embeddings = self.patch_embeddings(pixel_values) cls_tokens = self.cls_token.tile([batch_size, 1, 1]) embeddings = torch.cat((cls_tokens, embeddings), dim=1) # add positional encoding to each token embeddings = embeddings + self.position_embeddings embeddings = self.dropout(embeddings) return embeddings # Copied from transformers.models.vit.modeling_vit.ViTSelfAttention with ViT->Vivit class VivitSelfAttention(nn.Module): def __init__(self, config: VivitConfig) -> None: super().__init__() if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"): raise ValueError( f"The hidden size {config.hidden_size,} is not a multiple of the number of attention " f"heads {config.num_attention_heads}." ) self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.query = nn.Linear(config.hidden_size, self.all_head_size, bias=config.qkv_bias) self.key = nn.Linear(config.hidden_size, self.all_head_size, bias=config.qkv_bias) self.value = nn.Linear(config.hidden_size, self.all_head_size, bias=config.qkv_bias) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) def transpose_for_scores(self, x: torch.Tensor) -> torch.Tensor: new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) x = x.view(new_x_shape) return x.permute(0, 2, 1, 3) def forward( self, hidden_states, head_mask: Optional[torch.Tensor] = None, output_attentions: bool = False ) -> Union[Tuple[torch.Tensor, torch.Tensor], Tuple[torch.Tensor]]: mixed_query_layer = self.query(hidden_states) key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) query_layer = self.transpose_for_scores(mixed_query_layer) # Take the dot product between "query" and "key" to get the raw attention scores. attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) attention_scores = attention_scores / math.sqrt(self.attention_head_size) # Normalize the attention scores to probabilities. attention_probs = nn.functional.softmax(attention_scores, dim=-1) # This is actually dropping out entire tokens to attend to, which might # seem a bit unusual, but is taken from the original Transformer paper. attention_probs = self.dropout(attention_probs) # Mask heads if we want to if head_mask is not None: attention_probs = attention_probs * head_mask context_layer = torch.matmul(attention_probs, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) context_layer = context_layer.view(new_context_layer_shape) outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) return outputs # Copied from transformers.models.vit.modeling_vit.ViTSelfOutput with ViT->Vivit class VivitSelfOutput(nn.Module): """ The residual connection is defined in VivitLayer instead of here (as is the case with other models), due to the layernorm applied before each block. """ def __init__(self, config: VivitConfig) -> None: super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) return hidden_states # Copied from transformers.models.vit.modeling_vit.ViTAttention with ViT->Vivit class VivitAttention(nn.Module): def __init__(self, config: VivitConfig) -> None: super().__init__() self.attention = VivitSelfAttention(config) self.output = VivitSelfOutput(config) self.pruned_heads = set() def prune_heads(self, heads: Set[int]) -> None: if len(heads) == 0: return heads, index = find_pruneable_heads_and_indices( heads, self.attention.num_attention_heads, self.attention.attention_head_size, self.pruned_heads ) # Prune linear layers self.attention.query = prune_linear_layer(self.attention.query, index) self.attention.key = prune_linear_layer(self.attention.key, index) self.attention.value = prune_linear_layer(self.attention.value, index) self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) # Update hyper params and store pruned heads self.attention.num_attention_heads = self.attention.num_attention_heads - len(heads) self.attention.all_head_size = self.attention.attention_head_size * self.attention.num_attention_heads self.pruned_heads = self.pruned_heads.union(heads) def forward( self, hidden_states: torch.Tensor, head_mask: Optional[torch.Tensor] = None, output_attentions: bool = False, ) -> Union[Tuple[torch.Tensor, torch.Tensor], Tuple[torch.Tensor]]: self_outputs = self.attention(hidden_states, head_mask, output_attentions) attention_output = self.output(self_outputs[0], hidden_states) outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them return outputs class VivitIntermediate(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.intermediate_size) self.dropout = nn.Dropout(config.hidden_dropout_prob) if isinstance(config.hidden_act, str): self.intermediate_act_fn = ACT2FN[config.hidden_act] else: self.intermediate_act_fn = config.hidden_act def forward(self, hidden_states): hidden_states = self.dense(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) hidden_states = self.dropout(hidden_states) return hidden_states class VivitOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.intermediate_size, config.hidden_size) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states, input_tensor): hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = hidden_states + input_tensor return hidden_states class VivitLayer(nn.Module): """This corresponds to the EncoderBlock class in the scenic/vivit implementation.""" def __init__(self, config): super().__init__() self.chunk_size_feed_forward = config.chunk_size_feed_forward self.seq_len_dim = 1 self.attention = VivitAttention(config) self.intermediate = VivitIntermediate(config) self.output = VivitOutput(config) self.layernorm_before = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.layernorm_after = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) def forward(self, hidden_states, head_mask=None, output_attentions=False): self_attention_outputs = self.attention( # in Vivit, layernorm is applied before self-attention self.layernorm_before(hidden_states), head_mask, output_attentions=output_attentions, ) attention_output = self_attention_outputs[0] # add self attentions if we output attention weights outputs = self_attention_outputs[1:] # first residual connection hidden_states = attention_output + hidden_states # in Vivit, layernorm is also applied after self-attention layer_output = self.layernorm_after(hidden_states) layer_output = self.intermediate(layer_output) # second residual connection is done here layer_output = self.output(layer_output, hidden_states) outputs = (layer_output,) + outputs return outputs class VivitEncoder(nn.Module): def __init__(self, config): super().__init__() self.config = config self.layer = nn.ModuleList([VivitLayer(config) for _ in range(config.num_hidden_layers)]) self.gradient_checkpointing = False def forward( self, hidden_states, head_mask=None, output_attentions=False, output_hidden_states=False, return_dict=True, ): all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None for i, layer_module in enumerate(self.layer): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_head_mask = head_mask[i] if head_mask is not None else None if self.gradient_checkpointing and self.training: layer_outputs = self._gradient_checkpointing_func( layer_module.__call__, hidden_states, layer_head_mask, output_attentions, ) else: layer_outputs = layer_module(hidden_states, layer_head_mask, output_attentions) hidden_states = layer_outputs[0] if output_attentions: all_self_attentions = all_self_attentions + (layer_outputs[1],) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None) return BaseModelOutput( last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_self_attentions, ) class VivitPooler(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.activation = nn.Tanh() def forward(self, hidden_states): # We "pool" the model by simply taking the hidden state corresponding # to the first token. first_token_tensor = hidden_states[:, 0] pooled_output = self.dense(first_token_tensor) pooled_output = self.activation(pooled_output) return pooled_output class VivitPreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = VivitConfig base_model_prefix = "vivit" main_input_name = "pixel_values" supports_gradient_checkpointing = True def _init_weights(self, module): """Initialize the weights""" if isinstance(module, (nn.Linear, nn.Conv3d)): # Slightly different from the TF version which uses truncated_normal for initialization # cf https://github.com/pytorch/pytorch/pull/5617 module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) elif isinstance(module, nn.Parameter): module.data.normal_(mean=0.0, std=self.config.initializer_range) VIVIT_START_DOCSTRING = r""" This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`VivitConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ VIVIT_INPUTS_DOCSTRING = r""" Args: pixel_values (`torch.FloatTensor` of shape `(batch_size, num_frames, num_channels, height, width)`): Pixel values. Pixel values can be obtained using [`VivitImageProcessor`]. See [`VivitImageProcessor.preprocess`] for details. head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ @add_start_docstrings( "The bare ViViT Transformer model outputting raw hidden-states without any specific head on top.", VIVIT_START_DOCSTRING, ) class VivitModel(VivitPreTrainedModel): def __init__(self, config, add_pooling_layer=True): super().__init__(config) self.config = config self.embeddings = VivitEmbeddings(config) self.encoder = VivitEncoder(config) self.layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.pooler = VivitPooler(config) if add_pooling_layer else None # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.embeddings.patch_embeddings def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. Args: heads_to_prune: dict of {layer_num: list of heads to prune in this layer} """ for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) @add_start_docstrings_to_model_forward(VIVIT_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=_CONFIG_FOR_DOC) def forward( self, pixel_values: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple[torch.FloatTensor], BaseModelOutputWithPooling]: r""" Returns: Examples: ```python >>> import av >>> import numpy as np >>> from transformers import VivitImageProcessor, VivitModel >>> from huggingface_hub import hf_hub_download >>> np.random.seed(0) >>> def read_video_pyav(container, indices): ... ''' ... Decode the video with PyAV decoder. ... Args: ... container (`av.container.input.InputContainer`): PyAV container. ... indices (`List[int]`): List of frame indices to decode. ... Returns: ... result (np.ndarray): np array of decoded frames of shape (num_frames, height, width, 3). ... ''' ... frames = [] ... container.seek(0) ... start_index = indices[0] ... end_index = indices[-1] ... for i, frame in enumerate(container.decode(video=0)): ... if i > end_index: ... break ... if i >= start_index and i in indices: ... frames.append(frame) ... return np.stack([x.to_ndarray(format="rgb24") for x in frames]) >>> def sample_frame_indices(clip_len, frame_sample_rate, seg_len): ... ''' ... Sample a given number of frame indices from the video. ... Args: ... clip_len (`int`): Total number of frames to sample. ... frame_sample_rate (`int`): Sample every n-th frame. ... seg_len (`int`): Maximum allowed index of sample's last frame. ... Returns: ... indices (`List[int]`): List of sampled frame indices ... ''' ... converted_len = int(clip_len * frame_sample_rate) ... end_idx = np.random.randint(converted_len, seg_len) ... start_idx = end_idx - converted_len ... indices = np.linspace(start_idx, end_idx, num=clip_len) ... indices = np.clip(indices, start_idx, end_idx - 1).astype(np.int64) ... return indices >>> # video clip consists of 300 frames (10 seconds at 30 FPS) >>> file_path = hf_hub_download( ... repo_id="nielsr/video-demo", filename="eating_spaghetti.mp4", repo_type="dataset" ... ) >>> container = av.open(file_path) >>> # sample 32 frames >>> indices = sample_frame_indices(clip_len=32, frame_sample_rate=1, seg_len=container.streams.video[0].frames) >>> video = read_video_pyav(container=container, indices=indices) >>> image_processor = VivitImageProcessor.from_pretrained("google/vivit-b-16x2-kinetics400") >>> model = VivitModel.from_pretrained("google/vivit-b-16x2-kinetics400") >>> # prepare video for the model >>> inputs = image_processor(list(video), return_tensors="pt") >>> # forward pass >>> outputs = model(**inputs) >>> last_hidden_states = outputs.last_hidden_state >>> list(last_hidden_states.shape) [1, 3137, 768] ```""" output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if pixel_values is None: raise ValueError("You have to specify pixel_values") head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) embedding_output = self.embeddings(pixel_values) encoder_outputs = self.encoder( embedding_output, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = encoder_outputs[0] sequence_output = self.layernorm(sequence_output) pooled_output = self.pooler(sequence_output) if self.pooler is not None else None if not return_dict: return (sequence_output, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPooling( last_hidden_state=sequence_output, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, ) @add_start_docstrings( """ViViT Transformer model with a video classification head on top (a linear layer on top of the final hidden state of the [CLS] token) e.g. for Kinetics-400.""", VIVIT_START_DOCSTRING, ) class VivitForVideoClassification(VivitPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.vivit = VivitModel(config, add_pooling_layer=False) # Classifier head self.classifier = nn.Linear(config.hidden_size, config.num_labels) if config.num_labels > 0 else nn.Identity() # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(VIVIT_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=ImageClassifierOutput, config_class=_CONFIG_FOR_DOC) def forward( self, pixel_values: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple[torch.FloatTensor], ImageClassifierOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the image classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). Returns: Examples: ```python >>> import av >>> import numpy as np >>> import torch >>> from transformers import VivitImageProcessor, VivitForVideoClassification >>> from huggingface_hub import hf_hub_download >>> np.random.seed(0) >>> def read_video_pyav(container, indices): ... ''' ... Decode the video with PyAV decoder. ... Args: ... container (`av.container.input.InputContainer`): PyAV container. ... indices (`List[int]`): List of frame indices to decode. ... Returns: ... result (np.ndarray): np array of decoded frames of shape (num_frames, height, width, 3). ... ''' ... frames = [] ... container.seek(0) ... start_index = indices[0] ... end_index = indices[-1] ... for i, frame in enumerate(container.decode(video=0)): ... if i > end_index: ... break ... if i >= start_index and i in indices: ... frames.append(frame) ... return np.stack([x.to_ndarray(format="rgb24") for x in frames]) >>> def sample_frame_indices(clip_len, frame_sample_rate, seg_len): ... ''' ... Sample a given number of frame indices from the video. ... Args: ... clip_len (`int`): Total number of frames to sample. ... frame_sample_rate (`int`): Sample every n-th frame. ... seg_len (`int`): Maximum allowed index of sample's last frame. ... Returns: ... indices (`List[int]`): List of sampled frame indices ... ''' ... converted_len = int(clip_len * frame_sample_rate) ... end_idx = np.random.randint(converted_len, seg_len) ... start_idx = end_idx - converted_len ... indices = np.linspace(start_idx, end_idx, num=clip_len) ... indices = np.clip(indices, start_idx, end_idx - 1).astype(np.int64) ... return indices >>> # video clip consists of 300 frames (10 seconds at 30 FPS) >>> file_path = hf_hub_download( ... repo_id="nielsr/video-demo", filename="eating_spaghetti.mp4", repo_type="dataset" ... ) >>> container = av.open(file_path) >>> # sample 32 frames >>> indices = sample_frame_indices(clip_len=32, frame_sample_rate=4, seg_len=container.streams.video[0].frames) >>> video = read_video_pyav(container=container, indices=indices) >>> image_processor = VivitImageProcessor.from_pretrained("google/vivit-b-16x2-kinetics400") >>> model = VivitForVideoClassification.from_pretrained("google/vivit-b-16x2-kinetics400") >>> inputs = image_processor(list(video), return_tensors="pt") >>> with torch.no_grad(): ... outputs = model(**inputs) ... logits = outputs.logits >>> # model predicts one of the 400 Kinetics-400 classes >>> predicted_label = logits.argmax(-1).item() >>> print(model.config.id2label[predicted_label]) LABEL_116 ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.vivit( pixel_values, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] logits = self.classifier(sequence_output[:, 0, :]) loss = None if labels is not None: if self.num_labels == 1: # We are doing regression loss_fct = MSELoss() loss = loss_fct(logits.view(-1), labels.view(-1)) else: loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) if not return_dict: output = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return ImageClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, )
0
hf_public_repos/transformers/src/transformers/models
hf_public_repos/transformers/src/transformers/models/efficientformer/image_processing_efficientformer.py
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Image processor class for EfficientFormer.""" from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( get_resize_output_image_size, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, ChannelDimension, ImageInput, PILImageResampling, infer_channel_dimension_format, is_batched, is_scaled_image, to_numpy_array, valid_images, ) from ...utils import TensorType, logging logger = logging.get_logger(__name__) class EfficientFormerImageProcessor(BaseImageProcessor): r""" Constructs a EfficientFormer image processor. Args: do_resize (`bool`, *optional*, defaults to `True`): Whether to resize the image's (height, width) dimensions to the specified `(size["height"], size["width"])`. Can be overridden by the `do_resize` parameter in the `preprocess` method. size (`dict`, *optional*, defaults to `{"height": 224, "width": 224}`): Size of the output image after resizing. Can be overridden by the `size` parameter in the `preprocess` method. resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BILINEAR`): Resampling filter to use if resizing the image. Can be overridden by the `resample` parameter in the `preprocess` method. do_center_crop (`bool`, *optional*, defaults to `True`): Whether to center crop the image to the specified `crop_size`. Can be overridden by `do_center_crop` in the `preprocess` method. crop_size (`Dict[str, int]` *optional*, defaults to 224): Size of the output image after applying `center_crop`. Can be overridden by `crop_size` in the `preprocess` method. do_rescale (`bool`, *optional*, defaults to `True`): Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by the `do_rescale` parameter in the `preprocess` method. rescale_factor (`int` or `float`, *optional*, defaults to `1/255`): Scale factor to use if rescaling the image. Can be overridden by the `rescale_factor` parameter in the `preprocess` method. do_normalize: Whether to normalize the image. Can be overridden by the `do_normalize` parameter in the `preprocess` method. image_mean (`float` or `List[float]`, *optional*, defaults to `IMAGENET_STANDARD_MEAN`): Mean to use if normalizing the image. This is a float or list of floats the length of the number of channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method. image_std (`float` or `List[float]`, *optional*, defaults to `IMAGENET_STANDARD_STD`): Standard deviation to use if normalizing the image. This is a float or list of floats the length of the number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method. """ model_input_names = ["pixel_values"] def __init__( self, do_resize: bool = True, size: Optional[Dict[str, int]] = None, resample: PILImageResampling = PILImageResampling.BICUBIC, do_center_crop: bool = True, do_rescale: bool = True, rescale_factor: Union[int, float] = 1 / 255, crop_size: Dict[str, int] = None, do_normalize: bool = True, image_mean: Optional[Union[float, List[float]]] = None, image_std: Optional[Union[float, List[float]]] = None, **kwargs, ) -> None: super().__init__(**kwargs) size = size if size is not None else {"height": 224, "width": 224} size = get_size_dict(size) crop_size = crop_size if crop_size is not None else {"height": 224, "width": 224} crop_size = get_size_dict(crop_size, default_to_square=True, param_name="crop_size") self.do_resize = do_resize self.do_rescale = do_rescale self.do_normalize = do_normalize self.do_center_crop = do_center_crop self.crop_size = crop_size self.size = size self.resample = resample self.rescale_factor = rescale_factor self.image_mean = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN self.image_std = image_std if image_std is not None else IMAGENET_DEFAULT_STD def resize( self, image: np.ndarray, size: Dict[str, int], resample: PILImageResampling = PILImageResampling.BILINEAR, data_format: Optional[Union[str, ChannelDimension]] = None, input_data_format: Optional[Union[str, ChannelDimension]] = None, **kwargs, ) -> np.ndarray: """ Resize an image to `(size["height"], size["width"])`. Args: image (`np.ndarray`): Image to resize. size (`Dict[str, int]`): Dictionary in the format `{"height": int, "width": int}` specifying the size of the output image. resample: `PILImageResampling` filter to use when resizing the image e.g. `PILImageResampling.BILINEAR`. data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format for the output image. If unset, the channel dimension format of the input image is used. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format of the input image. If not provided, it will be inferred. Returns: `np.ndarray`: The resized image. """ size = get_size_dict(size) if "shortest_edge" in size: size = get_resize_output_image_size( image, size=size["shortest_edge"], default_to_square=False, input_data_format=input_data_format ) # size = get_resize_output_image_size(image, size["shortest_edge"], size["longest_edge"]) elif "height" in size and "width" in size: size = (size["height"], size["width"]) else: raise ValueError(f"Size must contain 'height' and 'width' keys or 'shortest_edge' key. Got {size.keys()}") return resize( image, size=size, resample=resample, data_format=data_format, input_data_format=input_data_format, **kwargs ) def preprocess( self, images: ImageInput, do_resize: Optional[bool] = None, size: Dict[str, int] = None, resample: PILImageResampling = None, do_center_crop: bool = None, crop_size: int = None, do_rescale: Optional[bool] = None, rescale_factor: Optional[float] = None, do_normalize: Optional[bool] = None, image_mean: Optional[Union[float, List[float]]] = None, image_std: Optional[Union[float, List[float]]] = None, return_tensors: Optional[Union[str, TensorType]] = None, data_format: Union[str, ChannelDimension] = ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]] = None, **kwargs, ) -> BatchFeature: """ Preprocess an image or batch of images. Args: images (`ImageInput`): Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If passing in images with pixel values between 0 and 1, set `do_rescale=False`. do_resize (`bool`, *optional*, defaults to `self.do_resize`): Whether to resize the image. size (`Dict[str, int]`, *optional*, defaults to `self.size`): Dictionary in the format `{"height": h, "width": w}` specifying the size of the output image after resizing. resample (`PILImageResampling` filter, *optional*, defaults to `self.resample`): `PILImageResampling` filter to use if resizing the image e.g. `PILImageResampling.BILINEAR`. Only has an effect if `do_resize` is set to `True`. do_center_crop (`bool`, *optional*, defaults to `self.do_center_crop`): Whether to center crop the image. do_rescale (`bool`, *optional*, defaults to `self.do_rescale`): Whether to rescale the image values between [0 - 1]. rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`): Rescale factor to rescale the image by if `do_rescale` is set to `True`. crop_size (`Dict[str, int]`, *optional*, defaults to `self.crop_size`): Size of the center crop. Only has an effect if `do_center_crop` is set to `True`. do_normalize (`bool`, *optional*, defaults to `self.do_normalize`): Whether to normalize the image. image_mean (`float` or `List[float]`, *optional*, defaults to `self.image_mean`): Image mean to use if `do_normalize` is set to `True`. image_std (`float` or `List[float]`, *optional*, defaults to `self.image_std`): Image standard deviation to use if `do_normalize` is set to `True`. return_tensors (`str` or `TensorType`, *optional*): The type of tensors to return. Can be one of: - Unset: Return a list of `np.ndarray`. - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`. - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`. - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`. - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`. data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`): The channel dimension format for the output image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - Unset: Use the channel dimension format of the input image. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format for the input image. If unset, the channel dimension format is inferred from the input image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. """ do_resize = do_resize if do_resize is not None else self.do_resize do_rescale = do_rescale if do_rescale is not None else self.do_rescale do_normalize = do_normalize if do_normalize is not None else self.do_normalize do_center_crop = do_center_crop if do_center_crop is not None else self.do_center_crop crop_size = crop_size if crop_size is not None else self.crop_size crop_size = get_size_dict(crop_size, param_name="crop_size", default_to_square=True) resample = resample if resample is not None else self.resample rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor image_mean = image_mean if image_mean is not None else self.image_mean image_std = image_std if image_std is not None else self.image_std size = size if size is not None else self.size size_dict = get_size_dict(size) if not is_batched(images): images = [images] if not valid_images(images): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) if do_resize and size is None: raise ValueError("Size must be specified if do_resize is True.") if do_center_crop and crop_size is None: raise ValueError("Crop size must be specified if do_center_crop is True.") if do_rescale and rescale_factor is None: raise ValueError("Rescale factor must be specified if do_rescale is True.") # All transformations expect numpy arrays. images = [to_numpy_array(image) for image in images] if is_scaled_image(images[0]) and do_rescale: logger.warning_once( "It looks like you are trying to rescale already rescaled images. If the input" " images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again." ) if input_data_format is None: # We assume that all images have the same channel dimension format. input_data_format = infer_channel_dimension_format(images[0]) if do_resize: images = [ self.resize(image=image, size=size_dict, resample=resample, input_data_format=input_data_format) for image in images ] if do_center_crop: images = [ self.center_crop(image=image, size=crop_size, input_data_format=input_data_format) for image in images ] if do_rescale: images = [ self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format) for image in images ] if do_normalize: images = [ self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format) for image in images ] images = [ to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in images ] data = {"pixel_values": images} return BatchFeature(data=data, tensor_type=return_tensors)
0