text
stringlengths
7
328k
id
stringlengths
14
166
metadata
dict
__index_level_0__
int64
0
459
# coding=utf-8 # Copyright 2022 The Google Flax Team Authors and The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Flax RoBERTa-PreLayerNorm model.""" from typing import Callable, Optional, Tuple import flax.linen as nn import jax import jax.numpy as jnp import numpy as np from flax.core.frozen_dict import FrozenDict, freeze, unfreeze from flax.linen import combine_masks, make_causal_mask from flax.linen import partitioning as nn_partitioning from flax.linen.attention import dot_product_attention_weights from flax.traverse_util import flatten_dict, unflatten_dict from jax import lax from ...modeling_flax_outputs import ( FlaxBaseModelOutputWithPastAndCrossAttentions, FlaxBaseModelOutputWithPooling, FlaxBaseModelOutputWithPoolingAndCrossAttentions, FlaxCausalLMOutputWithCrossAttentions, FlaxMaskedLMOutput, FlaxMultipleChoiceModelOutput, FlaxQuestionAnsweringModelOutput, FlaxSequenceClassifierOutput, FlaxTokenClassifierOutput, ) from ...modeling_flax_utils import ACT2FN, FlaxPreTrainedModel, append_call_sample_docstring, overwrite_call_docstring from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging from .configuration_roberta_prelayernorm import RobertaPreLayerNormConfig logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = "andreasmadsen/efficient_mlm_m0.40" _CONFIG_FOR_DOC = "RobertaPreLayerNormConfig" remat = nn_partitioning.remat # Copied from transformers.models.roberta.modeling_flax_roberta.create_position_ids_from_input_ids def create_position_ids_from_input_ids(input_ids, padding_idx): """ Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding symbols are ignored. This is modified from fairseq's `utils.make_positions`. Args: input_ids: jnp.ndarray padding_idx: int Returns: jnp.ndarray """ # The series of casts and type-conversions here are carefully balanced to both work with ONNX export and XLA. mask = (input_ids != padding_idx).astype("i4") if mask.ndim > 2: mask = mask.reshape((-1, mask.shape[-1])) incremental_indices = jnp.cumsum(mask, axis=1).astype("i4") * mask incremental_indices = incremental_indices.reshape(input_ids.shape) else: incremental_indices = jnp.cumsum(mask, axis=1).astype("i4") * mask return incremental_indices.astype("i4") + padding_idx ROBERTA_PRELAYERNORM_START_DOCSTRING = r""" This model inherits from [`FlaxPreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading, saving and converting weights from PyTorch models) This model is also a [flax.linen.Module](https://flax.readthedocs.io/en/latest/api_reference/flax.linen/module.html) subclass. Use it as a regular Flax linen Module and refer to the Flax documentation for all matter related to general usage and behavior. Finally, this model supports inherent JAX features such as: - [Just-In-Time (JIT) compilation](https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit) - [Automatic Differentiation](https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation) - [Vectorization](https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap) - [Parallelization](https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap) Parameters: config ([`RobertaPreLayerNormConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~FlaxPreTrainedModel.from_pretrained`] method to load the model weights. """ ROBERTA_PRELAYERNORM_INPUTS_DOCSTRING = r""" Args: input_ids (`numpy.ndarray` of shape `({0})`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`numpy.ndarray` of shape `({0})`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) token_type_ids (`numpy.ndarray` of shape `({0})`, *optional*): Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0, 1]`: - 0 corresponds to a *sentence A* token, - 1 corresponds to a *sentence B* token. [What are token type IDs?](../glossary#token-type-ids) position_ids (`numpy.ndarray` of shape `({0})`, *optional*): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, config.max_position_embeddings - 1]`. head_mask (`numpy.ndarray` of shape `({0})`, `optional): Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ # Copied from transformers.models.bert.modeling_flax_bert.FlaxBertEmbeddings with Bert->RobertaPreLayerNorm class FlaxRobertaPreLayerNormEmbeddings(nn.Module): """Construct the embeddings from word, position and token_type embeddings.""" config: RobertaPreLayerNormConfig dtype: jnp.dtype = jnp.float32 # the dtype of the computation def setup(self): self.word_embeddings = nn.Embed( self.config.vocab_size, self.config.hidden_size, embedding_init=jax.nn.initializers.normal(stddev=self.config.initializer_range), dtype=self.dtype, ) self.position_embeddings = nn.Embed( self.config.max_position_embeddings, self.config.hidden_size, embedding_init=jax.nn.initializers.normal(stddev=self.config.initializer_range), dtype=self.dtype, ) self.token_type_embeddings = nn.Embed( self.config.type_vocab_size, self.config.hidden_size, embedding_init=jax.nn.initializers.normal(stddev=self.config.initializer_range), dtype=self.dtype, ) self.LayerNorm = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype) self.dropout = nn.Dropout(rate=self.config.hidden_dropout_prob) def __call__(self, input_ids, token_type_ids, position_ids, attention_mask, deterministic: bool = True): # Embed inputs_embeds = self.word_embeddings(input_ids.astype("i4")) position_embeds = self.position_embeddings(position_ids.astype("i4")) token_type_embeddings = self.token_type_embeddings(token_type_ids.astype("i4")) # Sum all embeddings hidden_states = inputs_embeds + token_type_embeddings + position_embeds # Layer Norm hidden_states = self.LayerNorm(hidden_states) hidden_states = self.dropout(hidden_states, deterministic=deterministic) return hidden_states # Copied from transformers.models.bert.modeling_flax_bert.FlaxBertSelfAttention with Bert->RobertaPreLayerNorm class FlaxRobertaPreLayerNormSelfAttention(nn.Module): config: RobertaPreLayerNormConfig causal: bool = False dtype: jnp.dtype = jnp.float32 # the dtype of the computation def setup(self): self.head_dim = self.config.hidden_size // self.config.num_attention_heads if self.config.hidden_size % self.config.num_attention_heads != 0: raise ValueError( "`config.hidden_size`: {self.config.hidden_size} has to be a multiple of `config.num_attention_heads` " " : {self.config.num_attention_heads}" ) self.query = nn.Dense( self.config.hidden_size, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(self.config.initializer_range), ) self.key = nn.Dense( self.config.hidden_size, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(self.config.initializer_range), ) self.value = nn.Dense( self.config.hidden_size, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(self.config.initializer_range), ) if self.causal: self.causal_mask = make_causal_mask( jnp.ones((1, self.config.max_position_embeddings), dtype="bool"), dtype="bool" ) def _split_heads(self, hidden_states): return hidden_states.reshape(hidden_states.shape[:2] + (self.config.num_attention_heads, self.head_dim)) def _merge_heads(self, hidden_states): return hidden_states.reshape(hidden_states.shape[:2] + (self.config.hidden_size,)) @nn.compact # Copied from transformers.models.bart.modeling_flax_bart.FlaxBartAttention._concatenate_to_cache def _concatenate_to_cache(self, key, value, query, attention_mask): """ This function takes projected key, value states from a single input token and concatenates the states to cached states from previous steps. This function is slighly adapted from the official Flax repository: https://github.com/google/flax/blob/491ce18759622506588784b4fca0e4bf05f8c8cd/flax/linen/attention.py#L252 """ # detect if we're initializing by absence of existing cache data. is_initialized = self.has_variable("cache", "cached_key") cached_key = self.variable("cache", "cached_key", jnp.zeros, key.shape, key.dtype) cached_value = self.variable("cache", "cached_value", jnp.zeros, value.shape, value.dtype) cache_index = self.variable("cache", "cache_index", lambda: jnp.array(0, dtype=jnp.int32)) if is_initialized: *batch_dims, max_length, num_heads, depth_per_head = cached_key.value.shape # update key, value caches with our new 1d spatial slices cur_index = cache_index.value indices = (0,) * len(batch_dims) + (cur_index, 0, 0) key = lax.dynamic_update_slice(cached_key.value, key, indices) value = lax.dynamic_update_slice(cached_value.value, value, indices) cached_key.value = key cached_value.value = value num_updated_cache_vectors = query.shape[1] cache_index.value = cache_index.value + num_updated_cache_vectors # causal mask for cached decoder self-attention: our single query position should only attend to those key positions that have already been generated and cached, not the remaining zero elements. pad_mask = jnp.broadcast_to( jnp.arange(max_length) < cur_index + num_updated_cache_vectors, tuple(batch_dims) + (1, num_updated_cache_vectors, max_length), ) attention_mask = combine_masks(pad_mask, attention_mask) return key, value, attention_mask def __call__( self, hidden_states, attention_mask, layer_head_mask, key_value_states: Optional[jnp.ndarray] = None, init_cache: bool = False, deterministic=True, output_attentions: bool = False, ): # if key_value_states are provided this layer is used as a cross-attention layer # for the decoder is_cross_attention = key_value_states is not None batch_size = hidden_states.shape[0] # get query proj query_states = self.query(hidden_states) # get key, value proj if is_cross_attention: # cross_attentions key_states = self.key(key_value_states) value_states = self.value(key_value_states) else: # self_attention key_states = self.key(hidden_states) value_states = self.value(hidden_states) query_states = self._split_heads(query_states) key_states = self._split_heads(key_states) value_states = self._split_heads(value_states) # handle cache prepare causal attention mask if self.causal: query_length, key_length = query_states.shape[1], key_states.shape[1] if self.has_variable("cache", "cached_key"): mask_shift = self.variables["cache"]["cache_index"] max_decoder_length = self.variables["cache"]["cached_key"].shape[1] causal_mask = lax.dynamic_slice( self.causal_mask, (0, 0, mask_shift, 0), (1, 1, query_length, max_decoder_length) ) else: causal_mask = self.causal_mask[:, :, :query_length, :key_length] causal_mask = jnp.broadcast_to(causal_mask, (batch_size,) + causal_mask.shape[1:]) # combine masks if needed if attention_mask is not None and self.causal: attention_mask = jnp.broadcast_to(jnp.expand_dims(attention_mask, axis=(-3, -2)), causal_mask.shape) attention_mask = combine_masks(attention_mask, causal_mask) elif self.causal: attention_mask = causal_mask elif attention_mask is not None: attention_mask = jnp.expand_dims(attention_mask, axis=(-3, -2)) # During fast autoregressive decoding, we feed one position at a time, # and cache the keys and values step by step. if self.causal and (self.has_variable("cache", "cached_key") or init_cache): key_states, value_states, attention_mask = self._concatenate_to_cache( key_states, value_states, query_states, attention_mask ) # Convert the boolean attention mask to an attention bias. if attention_mask is not None: # attention mask in the form of attention bias attention_bias = lax.select( attention_mask > 0, jnp.full(attention_mask.shape, 0.0).astype(self.dtype), jnp.full(attention_mask.shape, jnp.finfo(self.dtype).min).astype(self.dtype), ) else: attention_bias = None dropout_rng = None if not deterministic and self.config.attention_probs_dropout_prob > 0.0: dropout_rng = self.make_rng("dropout") attn_weights = dot_product_attention_weights( query_states, key_states, bias=attention_bias, dropout_rng=dropout_rng, dropout_rate=self.config.attention_probs_dropout_prob, broadcast_dropout=True, deterministic=deterministic, dtype=self.dtype, precision=None, ) # Mask heads if we want to if layer_head_mask is not None: attn_weights = jnp.einsum("...hqk,h->...hqk", attn_weights, layer_head_mask) attn_output = jnp.einsum("...hqk,...khd->...qhd", attn_weights, value_states) attn_output = attn_output.reshape(attn_output.shape[:2] + (-1,)) outputs = (attn_output, attn_weights) if output_attentions else (attn_output,) return outputs class FlaxRobertaPreLayerNormSelfOutput(nn.Module): config: RobertaPreLayerNormConfig dtype: jnp.dtype = jnp.float32 # the dtype of the computation def setup(self): self.dense = nn.Dense( self.config.hidden_size, kernel_init=jax.nn.initializers.normal(self.config.initializer_range), dtype=self.dtype, ) self.dropout = nn.Dropout(rate=self.config.hidden_dropout_prob) def __call__(self, hidden_states, input_tensor, deterministic: bool = True): hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states, deterministic=deterministic) hidden_states = hidden_states + input_tensor return hidden_states class FlaxRobertaPreLayerNormAttention(nn.Module): config: RobertaPreLayerNormConfig causal: bool = False dtype: jnp.dtype = jnp.float32 def setup(self): self.self = FlaxRobertaPreLayerNormSelfAttention(self.config, causal=self.causal, dtype=self.dtype) self.output = FlaxRobertaPreLayerNormSelfOutput(self.config, dtype=self.dtype) self.LayerNorm = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype) def __call__( self, hidden_states, attention_mask, layer_head_mask, key_value_states=None, init_cache=False, deterministic=True, output_attentions: bool = False, ): hidden_states_pre_layer_norm = self.LayerNorm(hidden_states) # Attention mask comes in as attention_mask.shape == (*batch_sizes, kv_length) # FLAX expects: attention_mask.shape == (*batch_sizes, 1, 1, kv_length) such that it is broadcastable # with attn_weights.shape == (*batch_sizes, num_heads, q_length, kv_length) attn_outputs = self.self( hidden_states_pre_layer_norm, attention_mask, layer_head_mask=layer_head_mask, key_value_states=key_value_states, init_cache=init_cache, deterministic=deterministic, output_attentions=output_attentions, ) attn_output = attn_outputs[0] hidden_states = self.output(attn_output, hidden_states, deterministic=deterministic) outputs = (hidden_states,) if output_attentions: outputs += (attn_outputs[1],) return outputs class FlaxRobertaPreLayerNormIntermediate(nn.Module): config: RobertaPreLayerNormConfig dtype: jnp.dtype = jnp.float32 # the dtype of the computation def setup(self): self.LayerNorm = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype) self.dense = nn.Dense( self.config.intermediate_size, kernel_init=jax.nn.initializers.normal(self.config.initializer_range), dtype=self.dtype, ) self.activation = ACT2FN[self.config.hidden_act] def __call__(self, hidden_states): hidden_states = self.LayerNorm(hidden_states) hidden_states = self.dense(hidden_states) hidden_states = self.activation(hidden_states) return hidden_states class FlaxRobertaPreLayerNormOutput(nn.Module): config: RobertaPreLayerNormConfig dtype: jnp.dtype = jnp.float32 # the dtype of the computation def setup(self): self.dense = nn.Dense( self.config.hidden_size, kernel_init=jax.nn.initializers.normal(self.config.initializer_range), dtype=self.dtype, ) self.dropout = nn.Dropout(rate=self.config.hidden_dropout_prob) def __call__(self, hidden_states, attention_output, deterministic: bool = True): hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states, deterministic=deterministic) hidden_states = hidden_states + attention_output return hidden_states # Copied from transformers.models.bert.modeling_flax_bert.FlaxBertLayer with Bert->RobertaPreLayerNorm class FlaxRobertaPreLayerNormLayer(nn.Module): config: RobertaPreLayerNormConfig dtype: jnp.dtype = jnp.float32 # the dtype of the computation def setup(self): self.attention = FlaxRobertaPreLayerNormAttention(self.config, causal=self.config.is_decoder, dtype=self.dtype) self.intermediate = FlaxRobertaPreLayerNormIntermediate(self.config, dtype=self.dtype) self.output = FlaxRobertaPreLayerNormOutput(self.config, dtype=self.dtype) if self.config.add_cross_attention: self.crossattention = FlaxRobertaPreLayerNormAttention(self.config, causal=False, dtype=self.dtype) def __call__( self, hidden_states, attention_mask, layer_head_mask, encoder_hidden_states: Optional[jnp.ndarray] = None, encoder_attention_mask: Optional[jnp.ndarray] = None, init_cache: bool = False, deterministic: bool = True, output_attentions: bool = False, ): # Self Attention attention_outputs = self.attention( hidden_states, attention_mask, layer_head_mask=layer_head_mask, init_cache=init_cache, deterministic=deterministic, output_attentions=output_attentions, ) attention_output = attention_outputs[0] # Cross-Attention Block if encoder_hidden_states is not None: cross_attention_outputs = self.crossattention( attention_output, attention_mask=encoder_attention_mask, layer_head_mask=layer_head_mask, key_value_states=encoder_hidden_states, deterministic=deterministic, output_attentions=output_attentions, ) attention_output = cross_attention_outputs[0] hidden_states = self.intermediate(attention_output) hidden_states = self.output(hidden_states, attention_output, deterministic=deterministic) outputs = (hidden_states,) if output_attentions: outputs += (attention_outputs[1],) if encoder_hidden_states is not None: outputs += (cross_attention_outputs[1],) return outputs # Copied from transformers.models.bert.modeling_flax_bert.FlaxBertLayerCollection with Bert->RobertaPreLayerNorm class FlaxRobertaPreLayerNormLayerCollection(nn.Module): config: RobertaPreLayerNormConfig dtype: jnp.dtype = jnp.float32 # the dtype of the computation gradient_checkpointing: bool = False def setup(self): if self.gradient_checkpointing: FlaxRobertaPreLayerNormCheckpointLayer = remat(FlaxRobertaPreLayerNormLayer, static_argnums=(5, 6, 7)) self.layers = [ FlaxRobertaPreLayerNormCheckpointLayer(self.config, name=str(i), dtype=self.dtype) for i in range(self.config.num_hidden_layers) ] else: self.layers = [ FlaxRobertaPreLayerNormLayer(self.config, name=str(i), dtype=self.dtype) for i in range(self.config.num_hidden_layers) ] def __call__( self, hidden_states, attention_mask, head_mask, encoder_hidden_states: Optional[jnp.ndarray] = None, encoder_attention_mask: Optional[jnp.ndarray] = None, init_cache: bool = False, deterministic: bool = True, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, ): all_attentions = () if output_attentions else None all_hidden_states = () if output_hidden_states else None all_cross_attentions = () if (output_attentions and encoder_hidden_states is not None) else None # Check if head_mask has a correct number of layers specified if desired if head_mask is not None: if head_mask.shape[0] != (len(self.layers)): raise ValueError( f"The head_mask should be specified for {len(self.layers)} layers, but it is for " f" {head_mask.shape[0]}." ) for i, layer in enumerate(self.layers): if output_hidden_states: all_hidden_states += (hidden_states,) layer_outputs = layer( hidden_states, attention_mask, head_mask[i] if head_mask is not None else None, encoder_hidden_states, encoder_attention_mask, init_cache, deterministic, output_attentions, ) hidden_states = layer_outputs[0] if output_attentions: all_attentions += (layer_outputs[1],) if encoder_hidden_states is not None: all_cross_attentions += (layer_outputs[2],) if output_hidden_states: all_hidden_states += (hidden_states,) outputs = (hidden_states, all_hidden_states, all_attentions, all_cross_attentions) if not return_dict: return tuple(v for v in outputs if v is not None) return FlaxBaseModelOutputWithPastAndCrossAttentions( last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions, cross_attentions=all_cross_attentions, ) # Copied from transformers.models.bert.modeling_flax_bert.FlaxBertEncoder with Bert->RobertaPreLayerNorm class FlaxRobertaPreLayerNormEncoder(nn.Module): config: RobertaPreLayerNormConfig dtype: jnp.dtype = jnp.float32 # the dtype of the computation gradient_checkpointing: bool = False def setup(self): self.layer = FlaxRobertaPreLayerNormLayerCollection( self.config, dtype=self.dtype, gradient_checkpointing=self.gradient_checkpointing, ) def __call__( self, hidden_states, attention_mask, head_mask, encoder_hidden_states: Optional[jnp.ndarray] = None, encoder_attention_mask: Optional[jnp.ndarray] = None, init_cache: bool = False, deterministic: bool = True, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, ): return self.layer( hidden_states, attention_mask, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, init_cache=init_cache, deterministic=deterministic, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) # Copied from transformers.models.bert.modeling_flax_bert.FlaxBertPooler with Bert->RobertaPreLayerNorm class FlaxRobertaPreLayerNormPooler(nn.Module): config: RobertaPreLayerNormConfig dtype: jnp.dtype = jnp.float32 # the dtype of the computation def setup(self): self.dense = nn.Dense( self.config.hidden_size, kernel_init=jax.nn.initializers.normal(self.config.initializer_range), dtype=self.dtype, ) def __call__(self, hidden_states): cls_hidden_state = hidden_states[:, 0] cls_hidden_state = self.dense(cls_hidden_state) return nn.tanh(cls_hidden_state) # Copied from transformers.models.roberta.modeling_flax_roberta.FlaxRobertaLMHead with Roberta->RobertaPreLayerNorm class FlaxRobertaPreLayerNormLMHead(nn.Module): config: RobertaPreLayerNormConfig dtype: jnp.dtype = jnp.float32 bias_init: Callable[..., np.ndarray] = jax.nn.initializers.zeros def setup(self): self.dense = nn.Dense( self.config.hidden_size, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(self.config.initializer_range), ) self.layer_norm = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype) self.decoder = nn.Dense( self.config.vocab_size, dtype=self.dtype, use_bias=False, kernel_init=jax.nn.initializers.normal(self.config.initializer_range), ) self.bias = self.param("bias", self.bias_init, (self.config.vocab_size,)) def __call__(self, hidden_states, shared_embedding=None): hidden_states = self.dense(hidden_states) hidden_states = ACT2FN["gelu"](hidden_states) hidden_states = self.layer_norm(hidden_states) if shared_embedding is not None: hidden_states = self.decoder.apply({"params": {"kernel": shared_embedding.T}}, hidden_states) else: hidden_states = self.decoder(hidden_states) bias = jnp.asarray(self.bias, self.dtype) hidden_states += bias return hidden_states # Copied from transformers.models.roberta.modeling_flax_roberta.FlaxRobertaClassificationHead with Roberta->RobertaPreLayerNorm class FlaxRobertaPreLayerNormClassificationHead(nn.Module): config: RobertaPreLayerNormConfig dtype: jnp.dtype = jnp.float32 def setup(self): self.dense = nn.Dense( self.config.hidden_size, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(self.config.initializer_range), ) classifier_dropout = ( self.config.classifier_dropout if self.config.classifier_dropout is not None else self.config.hidden_dropout_prob ) self.dropout = nn.Dropout(rate=classifier_dropout) self.out_proj = nn.Dense( self.config.num_labels, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(self.config.initializer_range), ) def __call__(self, hidden_states, deterministic=True): hidden_states = hidden_states[:, 0, :] # take <s> token (equiv. to [CLS]) hidden_states = self.dropout(hidden_states, deterministic=deterministic) hidden_states = self.dense(hidden_states) hidden_states = nn.tanh(hidden_states) hidden_states = self.dropout(hidden_states, deterministic=deterministic) hidden_states = self.out_proj(hidden_states) return hidden_states # Copied from transformers.models.roberta.modeling_flax_roberta.FlaxRobertaPreTrainedModel with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta->roberta_prelayernorm class FlaxRobertaPreLayerNormPreTrainedModel(FlaxPreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = RobertaPreLayerNormConfig base_model_prefix = "roberta_prelayernorm" module_class: nn.Module = None def __init__( self, config: RobertaPreLayerNormConfig, input_shape: Tuple = (1, 1), seed: int = 0, dtype: jnp.dtype = jnp.float32, _do_init: bool = True, gradient_checkpointing: bool = False, **kwargs, ): module = self.module_class(config=config, dtype=dtype, gradient_checkpointing=gradient_checkpointing, **kwargs) super().__init__(config, module, input_shape=input_shape, seed=seed, dtype=dtype, _do_init=_do_init) # Copied from transformers.models.bert.modeling_flax_bert.FlaxBertPreTrainedModel.enable_gradient_checkpointing def enable_gradient_checkpointing(self): self._module = self.module_class( config=self.config, dtype=self.dtype, gradient_checkpointing=True, ) def init_weights(self, rng: jax.random.PRNGKey, input_shape: Tuple, params: FrozenDict = None) -> FrozenDict: # init input tensors input_ids = jnp.zeros(input_shape, dtype="i4") token_type_ids = jnp.ones_like(input_ids) position_ids = create_position_ids_from_input_ids(input_ids, self.config.pad_token_id) attention_mask = jnp.ones_like(input_ids) head_mask = jnp.ones((self.config.num_hidden_layers, self.config.num_attention_heads)) params_rng, dropout_rng = jax.random.split(rng) rngs = {"params": params_rng, "dropout": dropout_rng} if self.config.add_cross_attention: encoder_hidden_states = jnp.zeros(input_shape + (self.config.hidden_size,)) encoder_attention_mask = attention_mask module_init_outputs = self.module.init( rngs, input_ids, attention_mask, token_type_ids, position_ids, head_mask, encoder_hidden_states, encoder_attention_mask, return_dict=False, ) else: module_init_outputs = self.module.init( rngs, input_ids, attention_mask, token_type_ids, position_ids, head_mask, return_dict=False ) random_params = module_init_outputs["params"] if params is not None: random_params = flatten_dict(unfreeze(random_params)) params = flatten_dict(unfreeze(params)) for missing_key in self._missing_keys: params[missing_key] = random_params[missing_key] self._missing_keys = set() return freeze(unflatten_dict(params)) else: return random_params # Copied from transformers.models.bart.modeling_flax_bart.FlaxBartDecoderPreTrainedModel.init_cache def init_cache(self, batch_size, max_length): r""" Args: batch_size (`int`): batch_size used for fast auto-regressive decoding. Defines the batch size of the initialized cache. max_length (`int`): maximum possible length for auto-regressive decoding. Defines the sequence length of the initialized cache. """ # init input variables to retrieve cache input_ids = jnp.ones((batch_size, max_length), dtype="i4") attention_mask = jnp.ones_like(input_ids, dtype="i4") position_ids = jnp.broadcast_to(jnp.arange(jnp.atleast_2d(input_ids).shape[-1]), input_ids.shape) init_variables = self.module.init( jax.random.PRNGKey(0), input_ids, attention_mask, position_ids, return_dict=False, init_cache=True ) return unfreeze(init_variables["cache"]) @add_start_docstrings_to_model_forward(ROBERTA_PRELAYERNORM_INPUTS_DOCSTRING.format("batch_size, sequence_length")) def __call__( self, input_ids, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, params: dict = None, dropout_rng: jax.random.PRNGKey = None, train: bool = False, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, past_key_values: dict = None, ): output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.return_dict # init input tensors if not passed if token_type_ids is None: token_type_ids = jnp.zeros_like(input_ids) if position_ids is None: position_ids = create_position_ids_from_input_ids(input_ids, self.config.pad_token_id) if attention_mask is None: attention_mask = jnp.ones_like(input_ids) if head_mask is None: head_mask = jnp.ones((self.config.num_hidden_layers, self.config.num_attention_heads)) # Handle any PRNG if needed rngs = {} if dropout_rng is not None: rngs["dropout"] = dropout_rng inputs = {"params": params or self.params} if self.config.add_cross_attention: # if past_key_values are passed then cache is already initialized a private flag init_cache has to be passed # down to ensure cache is used. It has to be made sure that cache is marked as mutable so that it can be # changed by FlaxRobertaPreLayerNormAttention module if past_key_values: inputs["cache"] = past_key_values mutable = ["cache"] else: mutable = False outputs = self.module.apply( inputs, jnp.array(input_ids, dtype="i4"), jnp.array(attention_mask, dtype="i4"), token_type_ids=jnp.array(token_type_ids, dtype="i4"), position_ids=jnp.array(position_ids, dtype="i4"), head_mask=jnp.array(head_mask, dtype="i4"), encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, deterministic=not train, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, rngs=rngs, mutable=mutable, ) # add updated cache to model output if past_key_values is not None and return_dict: outputs, past_key_values = outputs outputs["past_key_values"] = unfreeze(past_key_values["cache"]) return outputs elif past_key_values is not None and not return_dict: outputs, past_key_values = outputs outputs = outputs[:1] + (unfreeze(past_key_values["cache"]),) + outputs[1:] else: outputs = self.module.apply( inputs, jnp.array(input_ids, dtype="i4"), jnp.array(attention_mask, dtype="i4"), token_type_ids=jnp.array(token_type_ids, dtype="i4"), position_ids=jnp.array(position_ids, dtype="i4"), head_mask=jnp.array(head_mask, dtype="i4"), deterministic=not train, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, rngs=rngs, ) return outputs class FlaxRobertaPreLayerNormModule(nn.Module): config: RobertaPreLayerNormConfig dtype: jnp.dtype = jnp.float32 # the dtype of the computation add_pooling_layer: bool = True gradient_checkpointing: bool = False def setup(self): self.embeddings = FlaxRobertaPreLayerNormEmbeddings(self.config, dtype=self.dtype) self.encoder = FlaxRobertaPreLayerNormEncoder( self.config, dtype=self.dtype, gradient_checkpointing=self.gradient_checkpointing, ) self.LayerNorm = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype) self.pooler = FlaxRobertaPreLayerNormPooler(self.config, dtype=self.dtype) def __call__( self, input_ids, attention_mask, token_type_ids: Optional[jnp.ndarray] = None, position_ids: Optional[jnp.ndarray] = None, head_mask: Optional[jnp.ndarray] = None, encoder_hidden_states: Optional[jnp.ndarray] = None, encoder_attention_mask: Optional[jnp.ndarray] = None, init_cache: bool = False, deterministic: bool = True, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, ): # make sure `token_type_ids` is correctly initialized when not passed if token_type_ids is None: token_type_ids = jnp.zeros_like(input_ids) # make sure `position_ids` is correctly initialized when not passed if position_ids is None: position_ids = jnp.broadcast_to(jnp.arange(jnp.atleast_2d(input_ids).shape[-1]), input_ids.shape) hidden_states = self.embeddings( input_ids, token_type_ids, position_ids, attention_mask, deterministic=deterministic ) outputs = self.encoder( hidden_states, attention_mask, head_mask=head_mask, deterministic=deterministic, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, init_cache=init_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) hidden_states = outputs[0] hidden_states = self.LayerNorm(hidden_states) pooled = self.pooler(hidden_states) if self.add_pooling_layer else None if not return_dict: # if pooled is None, don't return it if pooled is None: return (hidden_states,) + outputs[1:] return (hidden_states, pooled) + outputs[1:] return FlaxBaseModelOutputWithPoolingAndCrossAttentions( last_hidden_state=hidden_states, pooler_output=pooled, hidden_states=outputs.hidden_states, attentions=outputs.attentions, cross_attentions=outputs.cross_attentions, ) @add_start_docstrings( "The bare RoBERTa-PreLayerNorm Model transformer outputting raw hidden-states without any specific head on top.", ROBERTA_PRELAYERNORM_START_DOCSTRING, ) # Copied from transformers.models.roberta.modeling_flax_roberta.FlaxRobertaModel with Roberta->RobertaPreLayerNorm class FlaxRobertaPreLayerNormModel(FlaxRobertaPreLayerNormPreTrainedModel): module_class = FlaxRobertaPreLayerNormModule append_call_sample_docstring( FlaxRobertaPreLayerNormModel, _CHECKPOINT_FOR_DOC, FlaxBaseModelOutputWithPooling, _CONFIG_FOR_DOC, ) # Copied from transformers.models.roberta.modeling_flax_roberta.FlaxRobertaForMaskedLMModule with Roberta->RobertaPreLayerNorm,roberta->roberta_prelayernorm class FlaxRobertaPreLayerNormForMaskedLMModule(nn.Module): config: RobertaPreLayerNormConfig dtype: jnp.dtype = jnp.float32 gradient_checkpointing: bool = False def setup(self): self.roberta_prelayernorm = FlaxRobertaPreLayerNormModule( config=self.config, add_pooling_layer=False, dtype=self.dtype, gradient_checkpointing=self.gradient_checkpointing, ) self.lm_head = FlaxRobertaPreLayerNormLMHead(config=self.config, dtype=self.dtype) def __call__( self, input_ids, attention_mask, token_type_ids, position_ids, head_mask, deterministic: bool = True, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, ): # Model outputs = self.roberta_prelayernorm( input_ids, attention_mask, token_type_ids, position_ids, head_mask, deterministic=deterministic, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) hidden_states = outputs[0] if self.config.tie_word_embeddings: shared_embedding = self.roberta_prelayernorm.variables["params"]["embeddings"]["word_embeddings"][ "embedding" ] else: shared_embedding = None # Compute the prediction scores logits = self.lm_head(hidden_states, shared_embedding=shared_embedding) if not return_dict: return (logits,) + outputs[1:] return FlaxMaskedLMOutput( logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @add_start_docstrings( """RoBERTa-PreLayerNorm Model with a `language modeling` head on top.""", ROBERTA_PRELAYERNORM_START_DOCSTRING ) # Copied from transformers.models.roberta.modeling_flax_roberta.FlaxRobertaForMaskedLM with Roberta->RobertaPreLayerNorm class FlaxRobertaPreLayerNormForMaskedLM(FlaxRobertaPreLayerNormPreTrainedModel): module_class = FlaxRobertaPreLayerNormForMaskedLMModule append_call_sample_docstring( FlaxRobertaPreLayerNormForMaskedLM, _CHECKPOINT_FOR_DOC, FlaxBaseModelOutputWithPooling, _CONFIG_FOR_DOC, mask="<mask>", ) # Copied from transformers.models.roberta.modeling_flax_roberta.FlaxRobertaForSequenceClassificationModule with Roberta->RobertaPreLayerNorm,roberta->roberta_prelayernorm class FlaxRobertaPreLayerNormForSequenceClassificationModule(nn.Module): config: RobertaPreLayerNormConfig dtype: jnp.dtype = jnp.float32 gradient_checkpointing: bool = False def setup(self): self.roberta_prelayernorm = FlaxRobertaPreLayerNormModule( config=self.config, dtype=self.dtype, add_pooling_layer=False, gradient_checkpointing=self.gradient_checkpointing, ) self.classifier = FlaxRobertaPreLayerNormClassificationHead(config=self.config, dtype=self.dtype) def __call__( self, input_ids, attention_mask, token_type_ids, position_ids, head_mask, deterministic: bool = True, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, ): # Model outputs = self.roberta_prelayernorm( input_ids, attention_mask, token_type_ids, position_ids, head_mask, deterministic=deterministic, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] logits = self.classifier(sequence_output, deterministic=deterministic) if not return_dict: return (logits,) + outputs[1:] return FlaxSequenceClassifierOutput( logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @add_start_docstrings( """ RobertaPreLayerNorm Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks. """, ROBERTA_PRELAYERNORM_START_DOCSTRING, ) # Copied from transformers.models.roberta.modeling_flax_roberta.FlaxRobertaForSequenceClassification with Roberta->RobertaPreLayerNorm class FlaxRobertaPreLayerNormForSequenceClassification(FlaxRobertaPreLayerNormPreTrainedModel): module_class = FlaxRobertaPreLayerNormForSequenceClassificationModule append_call_sample_docstring( FlaxRobertaPreLayerNormForSequenceClassification, _CHECKPOINT_FOR_DOC, FlaxSequenceClassifierOutput, _CONFIG_FOR_DOC, ) # Copied from transformers.models.bert.modeling_flax_bert.FlaxBertForMultipleChoiceModule with Bert->RobertaPreLayerNorm, with self.bert->self.roberta_prelayernorm class FlaxRobertaPreLayerNormForMultipleChoiceModule(nn.Module): config: RobertaPreLayerNormConfig dtype: jnp.dtype = jnp.float32 gradient_checkpointing: bool = False def setup(self): self.roberta_prelayernorm = FlaxRobertaPreLayerNormModule( config=self.config, dtype=self.dtype, gradient_checkpointing=self.gradient_checkpointing, ) self.dropout = nn.Dropout(rate=self.config.hidden_dropout_prob) self.classifier = nn.Dense(1, dtype=self.dtype) def __call__( self, input_ids, attention_mask, token_type_ids, position_ids, head_mask, deterministic: bool = True, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, ): num_choices = input_ids.shape[1] input_ids = input_ids.reshape(-1, input_ids.shape[-1]) if input_ids is not None else None attention_mask = attention_mask.reshape(-1, attention_mask.shape[-1]) if attention_mask is not None else None token_type_ids = token_type_ids.reshape(-1, token_type_ids.shape[-1]) if token_type_ids is not None else None position_ids = position_ids.reshape(-1, position_ids.shape[-1]) if position_ids is not None else None # Model outputs = self.roberta_prelayernorm( input_ids, attention_mask, token_type_ids, position_ids, head_mask, deterministic=deterministic, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) pooled_output = outputs[1] pooled_output = self.dropout(pooled_output, deterministic=deterministic) logits = self.classifier(pooled_output) reshaped_logits = logits.reshape(-1, num_choices) if not return_dict: return (reshaped_logits,) + outputs[2:] return FlaxMultipleChoiceModelOutput( logits=reshaped_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @add_start_docstrings( """ RobertaPreLayerNorm Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a softmax) e.g. for RocStories/SWAG tasks. """, ROBERTA_PRELAYERNORM_START_DOCSTRING, ) # Copied from transformers.models.roberta.modeling_flax_roberta.FlaxRobertaForMultipleChoice with Roberta->RobertaPreLayerNorm class FlaxRobertaPreLayerNormForMultipleChoice(FlaxRobertaPreLayerNormPreTrainedModel): module_class = FlaxRobertaPreLayerNormForMultipleChoiceModule overwrite_call_docstring( FlaxRobertaPreLayerNormForMultipleChoice, ROBERTA_PRELAYERNORM_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length"), ) append_call_sample_docstring( FlaxRobertaPreLayerNormForMultipleChoice, _CHECKPOINT_FOR_DOC, FlaxMultipleChoiceModelOutput, _CONFIG_FOR_DOC, ) # Copied from transformers.models.bert.modeling_flax_bert.FlaxBertForTokenClassificationModule with Bert->RobertaPreLayerNorm, with self.bert->self.roberta_prelayernorm class FlaxRobertaPreLayerNormForTokenClassificationModule(nn.Module): config: RobertaPreLayerNormConfig dtype: jnp.dtype = jnp.float32 gradient_checkpointing: bool = False def setup(self): self.roberta_prelayernorm = FlaxRobertaPreLayerNormModule( config=self.config, dtype=self.dtype, add_pooling_layer=False, gradient_checkpointing=self.gradient_checkpointing, ) classifier_dropout = ( self.config.classifier_dropout if self.config.classifier_dropout is not None else self.config.hidden_dropout_prob ) self.dropout = nn.Dropout(rate=classifier_dropout) self.classifier = nn.Dense(self.config.num_labels, dtype=self.dtype) def __call__( self, input_ids, attention_mask, token_type_ids, position_ids, head_mask, deterministic: bool = True, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, ): # Model outputs = self.roberta_prelayernorm( input_ids, attention_mask, token_type_ids, position_ids, head_mask, deterministic=deterministic, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) hidden_states = outputs[0] hidden_states = self.dropout(hidden_states, deterministic=deterministic) logits = self.classifier(hidden_states) if not return_dict: return (logits,) + outputs[1:] return FlaxTokenClassifierOutput( logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @add_start_docstrings( """ RobertaPreLayerNorm Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks. """, ROBERTA_PRELAYERNORM_START_DOCSTRING, ) # Copied from transformers.models.roberta.modeling_flax_roberta.FlaxRobertaForTokenClassification with Roberta->RobertaPreLayerNorm class FlaxRobertaPreLayerNormForTokenClassification(FlaxRobertaPreLayerNormPreTrainedModel): module_class = FlaxRobertaPreLayerNormForTokenClassificationModule append_call_sample_docstring( FlaxRobertaPreLayerNormForTokenClassification, _CHECKPOINT_FOR_DOC, FlaxTokenClassifierOutput, _CONFIG_FOR_DOC, ) # Copied from transformers.models.bert.modeling_flax_bert.FlaxBertForQuestionAnsweringModule with Bert->RobertaPreLayerNorm, with self.bert->self.roberta_prelayernorm class FlaxRobertaPreLayerNormForQuestionAnsweringModule(nn.Module): config: RobertaPreLayerNormConfig dtype: jnp.dtype = jnp.float32 gradient_checkpointing: bool = False def setup(self): self.roberta_prelayernorm = FlaxRobertaPreLayerNormModule( config=self.config, dtype=self.dtype, add_pooling_layer=False, gradient_checkpointing=self.gradient_checkpointing, ) self.qa_outputs = nn.Dense(self.config.num_labels, dtype=self.dtype) def __call__( self, input_ids, attention_mask, token_type_ids, position_ids, head_mask, deterministic: bool = True, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, ): # Model outputs = self.roberta_prelayernorm( input_ids, attention_mask, token_type_ids, position_ids, head_mask, deterministic=deterministic, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) hidden_states = outputs[0] logits = self.qa_outputs(hidden_states) start_logits, end_logits = jnp.split(logits, self.config.num_labels, axis=-1) start_logits = start_logits.squeeze(-1) end_logits = end_logits.squeeze(-1) if not return_dict: return (start_logits, end_logits) + outputs[1:] return FlaxQuestionAnsweringModelOutput( start_logits=start_logits, end_logits=end_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @add_start_docstrings( """ RobertaPreLayerNorm Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of the hidden-states output to compute `span start logits` and `span end logits`). """, ROBERTA_PRELAYERNORM_START_DOCSTRING, ) # Copied from transformers.models.roberta.modeling_flax_roberta.FlaxRobertaForQuestionAnswering with Roberta->RobertaPreLayerNorm class FlaxRobertaPreLayerNormForQuestionAnswering(FlaxRobertaPreLayerNormPreTrainedModel): module_class = FlaxRobertaPreLayerNormForQuestionAnsweringModule append_call_sample_docstring( FlaxRobertaPreLayerNormForQuestionAnswering, _CHECKPOINT_FOR_DOC, FlaxQuestionAnsweringModelOutput, _CONFIG_FOR_DOC, ) # Copied from transformers.models.roberta.modeling_flax_roberta.FlaxRobertaForCausalLMModule with Roberta->RobertaPreLayerNorm,roberta->roberta_prelayernorm class FlaxRobertaPreLayerNormForCausalLMModule(nn.Module): config: RobertaPreLayerNormConfig dtype: jnp.dtype = jnp.float32 gradient_checkpointing: bool = False def setup(self): self.roberta_prelayernorm = FlaxRobertaPreLayerNormModule( config=self.config, add_pooling_layer=False, dtype=self.dtype, gradient_checkpointing=self.gradient_checkpointing, ) self.lm_head = FlaxRobertaPreLayerNormLMHead(config=self.config, dtype=self.dtype) def __call__( self, input_ids, attention_mask, position_ids, token_type_ids: Optional[jnp.ndarray] = None, head_mask: Optional[jnp.ndarray] = None, encoder_hidden_states: Optional[jnp.ndarray] = None, encoder_attention_mask: Optional[jnp.ndarray] = None, init_cache: bool = False, deterministic: bool = True, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, ): # Model outputs = self.roberta_prelayernorm( input_ids, attention_mask, token_type_ids, position_ids, head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, init_cache=init_cache, deterministic=deterministic, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) hidden_states = outputs[0] if self.config.tie_word_embeddings: shared_embedding = self.roberta_prelayernorm.variables["params"]["embeddings"]["word_embeddings"][ "embedding" ] else: shared_embedding = None # Compute the prediction scores logits = self.lm_head(hidden_states, shared_embedding=shared_embedding) if not return_dict: return (logits,) + outputs[1:] return FlaxCausalLMOutputWithCrossAttentions( logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, cross_attentions=outputs.cross_attentions, ) @add_start_docstrings( """ RobertaPreLayerNorm Model with a language modeling head on top (a linear layer on top of the hidden-states output) e.g for autoregressive tasks. """, ROBERTA_PRELAYERNORM_START_DOCSTRING, ) # Copied from transformers.models.roberta.modeling_flax_roberta.FlaxRobertaForCausalLM with Roberta->RobertaPreLayerNorm class FlaxRobertaPreLayerNormForCausalLM(FlaxRobertaPreLayerNormPreTrainedModel): module_class = FlaxRobertaPreLayerNormForCausalLMModule def prepare_inputs_for_generation(self, input_ids, max_length, attention_mask: Optional[jax.Array] = None): # initializing the cache batch_size, seq_length = input_ids.shape past_key_values = self.init_cache(batch_size, max_length) # Note that usually one would have to put 0's in the attention_mask for x > input_ids.shape[-1] and x < cache_length. # But since the decoder uses a causal mask, those positions are masked anyway. # Thus, we can create a single static attention_mask here, which is more efficient for compilation extended_attention_mask = jnp.ones((batch_size, max_length), dtype="i4") if attention_mask is not None: position_ids = attention_mask.cumsum(axis=-1) - 1 extended_attention_mask = lax.dynamic_update_slice(extended_attention_mask, attention_mask, (0, 0)) else: position_ids = jnp.broadcast_to(jnp.arange(seq_length, dtype="i4")[None, :], (batch_size, seq_length)) return { "past_key_values": past_key_values, "attention_mask": extended_attention_mask, "position_ids": position_ids, } def update_inputs_for_generation(self, model_outputs, model_kwargs): model_kwargs["past_key_values"] = model_outputs.past_key_values model_kwargs["position_ids"] = model_kwargs["position_ids"][:, -1:] + 1 return model_kwargs append_call_sample_docstring( FlaxRobertaPreLayerNormForCausalLM, _CHECKPOINT_FOR_DOC, FlaxCausalLMOutputWithCrossAttentions, _CONFIG_FOR_DOC, )
transformers/src/transformers/models/roberta_prelayernorm/modeling_flax_roberta_prelayernorm.py/0
{ "file_path": "transformers/src/transformers/models/roberta_prelayernorm/modeling_flax_roberta_prelayernorm.py", "repo_id": "transformers", "token_count": 26553 }
358
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Audio/Text processor class for SeamlessM4T """ from ...processing_utils import ProcessorMixin class SeamlessM4TProcessor(ProcessorMixin): r""" Constructs a SeamlessM4T processor which wraps a SeamlessM4T feature extractor and a SeamlessM4T tokenizer into a single processor. [`SeamlessM4TProcessor`] offers all the functionalities of [`SeamlessM4TFeatureExtractor`] and [`SeamlessM4TTokenizerFast`]. See the [`~SeamlessM4TProcessor.__call__`] and [`~SeamlessM4TProcessor.decode`] for more information. Args: feature_extractor ([`SeamlessM4TFeatureExtractor`]): The audio processor is a required input. tokenizer ([`SeamlessM4TTokenizerFast`]): The tokenizer is a required input. """ feature_extractor_class = "SeamlessM4TFeatureExtractor" tokenizer_class = ("SeamlessM4TTokenizer", "SeamlessM4TTokenizerFast") def __init__(self, feature_extractor, tokenizer): super().__init__(feature_extractor, tokenizer) def __call__(self, text=None, audios=None, src_lang=None, tgt_lang=None, **kwargs): """ Main method to prepare for the model one or several sequences(s) and audio(s). This method forwards the `text` and `kwargs` arguments to SeamlessM4TTokenizerFast's [`~SeamlessM4TTokenizerFast.__call__`] if `text` is not `None` to encode the text. To prepare the audio(s), this method forwards the `audios` and `kwrags` arguments to SeamlessM4TFeatureExtractor's [`~SeamlessM4TFeatureExtractor.__call__`] if `audios` is not `None`. Please refer to the doctsring of the above two methods for more information. Args: text (`str`, `List[str]`, `List[List[str]]`): The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set `is_split_into_words=True` (to lift the ambiguity with a batch of sequences). audios (`np.ndarray`, `torch.Tensor`, `List[np.ndarray]`, `List[torch.Tensor]`): The audio or batch of audios to be prepared. Each audio can be NumPy array or PyTorch tensor. In case of a NumPy array/PyTorch tensor, each audio should be of shape (C, T), where C is a number of channels, and T the sample length of the audio. src_lang (`str`, *optional*): The language code of the input texts/audios. If not specified, the last `src_lang` specified will be used. tgt_lang (`str`, *optional*): The code of the target language. If not specified, the last `tgt_lang` specified will be used. kwargs (*optional*): Remaining dictionary of keyword arguments that will be passed to the feature extractor and/or the tokenizer. Returns: [`BatchEncoding`]: A [`BatchEncoding`] with the following fields: - **input_ids** -- List of token ids to be fed to a model. Returned when `text` is not `None`. - **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when `return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names` and if `text` is not `None`). - **input_features** -- Audio input features to be fed to a model. Returned when `audios` is not `None`. """ sampling_rate = kwargs.pop("sampling_rate", None) if text is None and audios is None: raise ValueError("You have to specify either text or audios. Both cannot be none.") elif text is not None and audios is not None: raise ValueError( "Text and audios are mututally exclusive when passed to `SeamlessM4T`. Specify one or another." ) elif text is not None: if tgt_lang is not None: self.tokenizer.tgt_lang = tgt_lang if src_lang is not None: self.tokenizer.src_lang = src_lang encoding = self.tokenizer(text, **kwargs) return encoding else: encoding = self.feature_extractor(audios, sampling_rate=sampling_rate, **kwargs) return encoding def batch_decode(self, *args, **kwargs): """ This method forwards all its arguments to SeamlessM4TTokenizerFast's [`~PreTrainedTokenizer.batch_decode`]. Please refer to the docstring of this method for more information. """ return self.tokenizer.batch_decode(*args, **kwargs) def decode(self, *args, **kwargs): """ This method forwards all its arguments to SeamlessM4TTokenizerFast's [`~PreTrainedTokenizer.decode`]. Please refer to the docstring of this method for more information. """ return self.tokenizer.decode(*args, **kwargs) @property def model_input_names(self): tokenizer_input_names = self.tokenizer.model_input_names feature_extractor_input_names = self.feature_extractor.model_input_names return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names))
transformers/src/transformers/models/seamless_m4t/processing_seamless_m4t.py/0
{ "file_path": "transformers/src/transformers/models/seamless_m4t/processing_seamless_m4t.py", "repo_id": "transformers", "token_count": 2298 }
359
# coding=utf-8 # Copyright 2024 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Convert SegGPT checkpoints from the original repository. URL: https://github.com/baaivision/Painter/tree/main/SegGPT """ import argparse import requests import torch from PIL import Image from transformers import SegGptConfig, SegGptForImageSegmentation, SegGptImageProcessor from transformers.utils import logging logging.set_verbosity_info() logger = logging.get_logger(__name__) # here we list all keys to be renamed (original name on the left, our name on the right) def create_rename_keys(config): rename_keys = [] # fmt: off # rename embedding and its parameters rename_keys.append(("patch_embed.proj.weight", "model.embeddings.patch_embeddings.projection.weight")) rename_keys.append(("patch_embed.proj.bias", "model.embeddings.patch_embeddings.projection.bias")) rename_keys.append(("mask_token", "model.embeddings.mask_token")) rename_keys.append(("segment_token_x", "model.embeddings.segment_token_input")) rename_keys.append(("segment_token_y", "model.embeddings.segment_token_prompt")) rename_keys.append(("type_token_cls", "model.embeddings.type_token_semantic")) rename_keys.append(("type_token_ins", "model.embeddings.type_token_instance")) rename_keys.append(("pos_embed", "model.embeddings.position_embeddings")) # rename decoder and other rename_keys.append(("norm.weight", "model.encoder.layernorm.weight")) rename_keys.append(("norm.bias", "model.encoder.layernorm.bias")) rename_keys.append(("decoder_embed.weight", "decoder.decoder_embed.weight")) rename_keys.append(("decoder_embed.bias", "decoder.decoder_embed.bias")) rename_keys.append(("decoder_pred.0.weight", "decoder.decoder_pred.conv.weight")) rename_keys.append(("decoder_pred.0.bias", "decoder.decoder_pred.conv.bias")) rename_keys.append(("decoder_pred.1.weight", "decoder.decoder_pred.layernorm.weight")) rename_keys.append(("decoder_pred.1.bias", "decoder.decoder_pred.layernorm.bias")) rename_keys.append(("decoder_pred.3.weight", "decoder.decoder_pred.head.weight")) rename_keys.append(("decoder_pred.3.bias", "decoder.decoder_pred.head.bias")) # rename blocks for i in range(config.num_hidden_layers): rename_keys.append((f"blocks.{i}.attn.qkv.weight", f"model.encoder.layers.{i}.attention.qkv.weight")) rename_keys.append((f"blocks.{i}.attn.qkv.bias", f"model.encoder.layers.{i}.attention.qkv.bias")) rename_keys.append((f"blocks.{i}.attn.proj.weight", f"model.encoder.layers.{i}.attention.proj.weight")) rename_keys.append((f"blocks.{i}.attn.proj.bias", f"model.encoder.layers.{i}.attention.proj.bias")) rename_keys.append((f"blocks.{i}.attn.rel_pos_h", f"model.encoder.layers.{i}.attention.rel_pos_h")) rename_keys.append((f"blocks.{i}.attn.rel_pos_w", f"model.encoder.layers.{i}.attention.rel_pos_w")) rename_keys.append((f"blocks.{i}.mlp.fc1.weight", f"model.encoder.layers.{i}.mlp.lin1.weight")) rename_keys.append((f"blocks.{i}.mlp.fc1.bias", f"model.encoder.layers.{i}.mlp.lin1.bias")) rename_keys.append((f"blocks.{i}.mlp.fc2.weight", f"model.encoder.layers.{i}.mlp.lin2.weight")) rename_keys.append((f"blocks.{i}.mlp.fc2.bias", f"model.encoder.layers.{i}.mlp.lin2.bias")) rename_keys.append((f"blocks.{i}.norm1.weight", f"model.encoder.layers.{i}.layernorm_before.weight")) rename_keys.append((f"blocks.{i}.norm1.bias", f"model.encoder.layers.{i}.layernorm_before.bias")) rename_keys.append((f"blocks.{i}.norm2.weight", f"model.encoder.layers.{i}.layernorm_after.weight")) rename_keys.append((f"blocks.{i}.norm2.bias", f"model.encoder.layers.{i}.layernorm_after.bias")) # fmt: on return rename_keys def rename_key(dct, old, new): val = dct.pop(old) dct[new] = val # We will verify our results on spongebob images def prepare_input(): image_input_url = ( "https://raw.githubusercontent.com/baaivision/Painter/main/SegGPT/SegGPT_inference/examples/hmbb_2.jpg" ) image_prompt_url = ( "https://raw.githubusercontent.com/baaivision/Painter/main/SegGPT/SegGPT_inference/examples/hmbb_1.jpg" ) mask_prompt_url = ( "https://raw.githubusercontent.com/baaivision/Painter/main/SegGPT/SegGPT_inference/examples/hmbb_1_target.png" ) image_input = Image.open(requests.get(image_input_url, stream=True).raw) image_prompt = Image.open(requests.get(image_prompt_url, stream=True).raw) mask_prompt = Image.open(requests.get(mask_prompt_url, stream=True).raw) return image_input, image_prompt, mask_prompt @torch.no_grad() def convert_seggpt_checkpoint(args): model_name = args.model_name pytorch_dump_folder_path = args.pytorch_dump_folder_path verify_logits = args.verify_logits push_to_hub = args.push_to_hub # Define default GroundingDINO configuation config = SegGptConfig() # Load original checkpoint checkpoint_url = "https://huggingface.co/BAAI/SegGpt/blob/main/seggpt_vit_large.pth" original_state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu")["model"] # # Rename keys new_state_dict = original_state_dict.copy() rename_keys = create_rename_keys(config) for src, dest in rename_keys: rename_key(new_state_dict, src, dest) # Load HF model model = SegGptForImageSegmentation(config) model.eval() missing_keys, unexpected_keys = model.load_state_dict(new_state_dict, strict=False) print("Missing keys:", missing_keys) print("Unexpected keys:", unexpected_keys) input_img, prompt_img, prompt_mask = prepare_input() image_processor = SegGptImageProcessor() inputs = image_processor(images=input_img, prompt_images=prompt_img, prompt_masks=prompt_mask, return_tensors="pt") expected_prompt_pixel_values = torch.tensor( [ [[-0.6965, -0.6965, -0.6965], [-0.6965, -0.6965, -0.6965], [-0.6965, -0.6965, -0.6965]], [[1.6583, 1.6583, 1.6583], [1.6583, 1.6583, 1.6583], [1.6583, 1.6583, 1.6583]], [[2.3088, 2.3088, 2.3088], [2.3088, 2.3088, 2.3088], [2.3088, 2.3088, 2.3088]], ] ) expected_pixel_values = torch.tensor( [ [[1.6324, 1.6153, 1.5810], [1.6153, 1.5982, 1.5810], [1.5810, 1.5639, 1.5639]], [[1.2731, 1.2556, 1.2206], [1.2556, 1.2381, 1.2031], [1.2206, 1.2031, 1.1681]], [[1.6465, 1.6465, 1.6465], [1.6465, 1.6465, 1.6465], [1.6291, 1.6291, 1.6291]], ] ) expected_prompt_masks = torch.tensor( [ [[-2.1179, -2.1179, -2.1179], [-2.1179, -2.1179, -2.1179], [-2.1179, -2.1179, -2.1179]], [[-2.0357, -2.0357, -2.0357], [-2.0357, -2.0357, -2.0357], [-2.0357, -2.0357, -2.0357]], [[-1.8044, -1.8044, -1.8044], [-1.8044, -1.8044, -1.8044], [-1.8044, -1.8044, -1.8044]], ] ) assert torch.allclose(inputs.pixel_values[0, :, :3, :3], expected_pixel_values, atol=1e-4) assert torch.allclose(inputs.prompt_pixel_values[0, :, :3, :3], expected_prompt_pixel_values, atol=1e-4) assert torch.allclose(inputs.prompt_masks[0, :, :3, :3], expected_prompt_masks, atol=1e-4) torch.manual_seed(2) outputs = model(**inputs) print(outputs) if verify_logits: expected_output = torch.tensor( [ [[-2.1208, -2.1190, -2.1198], [-2.1237, -2.1228, -2.1227], [-2.1232, -2.1226, -2.1228]], [[-2.0405, -2.0396, -2.0403], [-2.0434, -2.0434, -2.0433], [-2.0428, -2.0432, -2.0434]], [[-1.8102, -1.8088, -1.8099], [-1.8131, -1.8126, -1.8129], [-1.8130, -1.8128, -1.8131]], ] ) assert torch.allclose(outputs.pred_masks[0, :, :3, :3], expected_output, atol=1e-4) print("Looks good!") else: print("Converted without verifying logits") if pytorch_dump_folder_path is not None: print(f"Saving model and processor for {model_name} to {pytorch_dump_folder_path}") model.save_pretrained(pytorch_dump_folder_path) image_processor.save_pretrained(pytorch_dump_folder_path) if push_to_hub: print(f"Pushing model and processor for {model_name} to hub") model.push_to_hub(f"EduardoPacheco/{model_name}") image_processor.push_to_hub(f"EduardoPacheco/{model_name}") if __name__ == "__main__": parser = argparse.ArgumentParser() # Required parameters parser.add_argument( "--model_name", default="seggpt-vit-large", type=str, choices=["seggpt-vit-large"], help="Name of the SegGpt model you'd like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory." ) parser.add_argument( "--verify_logits", action="store_false", help="Whether or not to verify the logits against the original implementation.", ) parser.add_argument( "--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub." ) args = parser.parse_args() convert_seggpt_checkpoint(args)
transformers/src/transformers/models/seggpt/convert_seggpt_to_hf.py/0
{ "file_path": "transformers/src/transformers/models/seggpt/convert_seggpt_to_hf.py", "repo_id": "transformers", "token_count": 4276 }
360
# coding=utf-8 # Copyright 2024 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Image/Text processor class for SigLIP. """ from typing import List, Optional, Union from ...feature_extraction_utils import BatchFeature from ...image_utils import ImageInput from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class SiglipProcessor(ProcessorMixin): r""" Constructs a Siglip processor which wraps a Siglip image processor and a Siglip tokenizer into a single processor. [`SiglipProcessor`] offers all the functionalities of [`SiglipImageProcessor`] and [`SiglipTokenizer`]. See the [`~SiglipProcessor.__call__`] and [`~SiglipProcessor.decode`] for more information. Args: image_processor ([`SiglipImageProcessor`]): The image processor is a required input. tokenizer ([`SiglipTokenizer`]): The tokenizer is a required input. """ attributes = ["image_processor", "tokenizer"] image_processor_class = "SiglipImageProcessor" tokenizer_class = "SiglipTokenizer" def __init__(self, image_processor, tokenizer): super().__init__(image_processor, tokenizer) def __call__( self, text: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None, images: ImageInput = None, padding: Union[bool, str, PaddingStrategy] = False, truncation: Union[bool, str, TruncationStrategy] = None, max_length: int = None, return_tensors: Optional[Union[str, TensorType]] = TensorType.PYTORCH, ) -> BatchFeature: """ Main method to prepare for the model one or several sequences(s) and image(s). This method forwards the `text` and `kwargs` arguments to SiglipTokenizer's [`~SiglipTokenizer.__call__`] if `text` is not `None` to encode the text. To prepare the image(s), this method forwards the `images` argument to SiglipImageProcessor's [`~SiglipImageProcessor.__call__`] if `images` is not `None`. Please refer to the doctsring of the above two methods for more information. Args: text (`str`, `List[str]`, `List[List[str]]`): The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set `is_split_into_words=True` (to lift the ambiguity with a batch of sequences). images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `List[PIL.Image.Image]`, `List[np.ndarray]`, `List[torch.Tensor]`): The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch tensor. In case of a NumPy array/PyTorch tensor, each image should be of shape (C, H, W), where C is a number of channels, H and W are image height and width. padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`): Select a strategy to pad the returned sequences (according to the model's padding side and padding index) among: - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided). - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different lengths). max_length (`int`, *optional*): Maximum length of the returned list and optionally padding length (see above). truncation (`bool`, *optional*): Activates truncation to cut input sequences longer than `max_length` to `max_length`. return_tensors (`str` or [`~utils.TensorType`], *optional*): If set, will return tensors of a particular framework. Acceptable values are: - `'tf'`: Return TensorFlow `tf.constant` objects. - `'pt'`: Return PyTorch `torch.Tensor` objects. - `'np'`: Return NumPy `np.ndarray` objects. - `'jax'`: Return JAX `jnp.ndarray` objects. Returns: [`BatchFeature`]: A [`BatchFeature`] with the following fields: - **input_ids** -- List of token ids to be fed to a model. Returned when `text` is not `None`. - **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when `return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names` and if `text` is not `None`). - **pixel_values** -- Pixel values to be fed to a model. Returned when `images` is not `None`. """ if text is None and images is None: raise ValueError("You have to specify either text or images. Both cannot be none.") if text is not None: encoding = self.tokenizer( text, return_tensors=return_tensors, padding=padding, truncation=truncation, max_length=max_length ) if images is not None: image_features = self.image_processor(images, return_tensors=return_tensors) if text is not None and images is not None: encoding["pixel_values"] = image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchFeature(data=dict(**image_features), tensor_type=return_tensors) def decode(self, *args, **kwargs): """ This method forwards all its arguments to SiglipTokenizer's [`~PreTrainedTokenizer.decode`]. Please refer to the docstring of this method for more information. """ return self.tokenizer.decode(*args, **kwargs) def batch_decode(self, *args, **kwargs): """ This method forwards all its arguments to SiglipTokenizer's [`~PreTrainedTokenizer.batch_decode`]. Please refer to the docstring of this method for more information. """ return self.tokenizer.batch_decode(*args, **kwargs) @property # Copied from transformers.models.clip.processing_clip.CLIPProcessor.model_input_names with CLIP->Siglip, T5->Siglip def model_input_names(self): tokenizer_input_names = self.tokenizer.model_input_names image_processor_input_names = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
transformers/src/transformers/models/siglip/processing_siglip.py/0
{ "file_path": "transformers/src/transformers/models/siglip/processing_siglip.py", "repo_id": "transformers", "token_count": 2823 }
361
# coding=utf-8 # Copyright 2021 Tel AViv University, AllenAI and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ PyTorch Splinter model.""" import math from dataclasses import dataclass from typing import List, Optional, Tuple, Union import torch import torch.utils.checkpoint from torch import nn from torch.nn import CrossEntropyLoss from ...activations import ACT2FN from ...modeling_outputs import BaseModelOutputWithPastAndCrossAttentions, ModelOutput, QuestionAnsweringModelOutput from ...modeling_utils import PreTrainedModel from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging from .configuration_splinter import SplinterConfig logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = "tau/splinter-base" _CONFIG_FOR_DOC = "SplinterConfig" SPLINTER_PRETRAINED_MODEL_ARCHIVE_LIST = [ "tau/splinter-base", "tau/splinter-base-qass", "tau/splinter-large", "tau/splinter-large-qass", # See all Splinter models at https://huggingface.co/models?filter=splinter ] class SplinterEmbeddings(nn.Module): """Construct the embeddings from word, position and token_type embeddings.""" def __init__(self, config): super().__init__() self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id) self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size) self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size) # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load # any TensorFlow checkpoint file self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) # position_ids (1, len position emb) is contiguous in memory and exported when serialized self.register_buffer( "position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False ) self.position_embedding_type = getattr(config, "position_embedding_type", "absolute") def forward( self, input_ids: Optional[torch.LongTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, past_key_values_length: Optional[int] = 0, ) -> Tuple: if input_ids is not None: input_shape = input_ids.size() else: input_shape = inputs_embeds.size()[:-1] seq_length = input_shape[1] if position_ids is None: position_ids = self.position_ids[:, past_key_values_length : seq_length + past_key_values_length] if token_type_ids is None: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device) if inputs_embeds is None: inputs_embeds = self.word_embeddings(input_ids) token_type_embeddings = self.token_type_embeddings(token_type_ids) embeddings = inputs_embeds + token_type_embeddings if self.position_embedding_type == "absolute": position_embeddings = self.position_embeddings(position_ids) embeddings += position_embeddings embeddings = self.LayerNorm(embeddings) embeddings = self.dropout(embeddings) return embeddings # Copied from transformers.models.bert.modeling_bert.BertSelfAttention with Bert->Splinter class SplinterSelfAttention(nn.Module): def __init__(self, config, position_embedding_type=None): super().__init__() if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"): raise ValueError( f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention " f"heads ({config.num_attention_heads})" ) self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.query = nn.Linear(config.hidden_size, self.all_head_size) self.key = nn.Linear(config.hidden_size, self.all_head_size) self.value = nn.Linear(config.hidden_size, self.all_head_size) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) self.position_embedding_type = position_embedding_type or getattr( config, "position_embedding_type", "absolute" ) if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query": self.max_position_embeddings = config.max_position_embeddings self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size) self.is_decoder = config.is_decoder def transpose_for_scores(self, x: torch.Tensor) -> torch.Tensor: new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) x = x.view(new_x_shape) return x.permute(0, 2, 1, 3) def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.FloatTensor] = None, encoder_hidden_states: Optional[torch.FloatTensor] = None, encoder_attention_mask: Optional[torch.FloatTensor] = None, past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, output_attentions: Optional[bool] = False, ) -> Tuple[torch.Tensor]: mixed_query_layer = self.query(hidden_states) # If this is instantiated as a cross-attention module, the keys # and values come from an encoder; the attention mask needs to be # such that the encoder's padding tokens are not attended to. is_cross_attention = encoder_hidden_states is not None if is_cross_attention and past_key_value is not None: # reuse k,v, cross_attentions key_layer = past_key_value[0] value_layer = past_key_value[1] attention_mask = encoder_attention_mask elif is_cross_attention: key_layer = self.transpose_for_scores(self.key(encoder_hidden_states)) value_layer = self.transpose_for_scores(self.value(encoder_hidden_states)) attention_mask = encoder_attention_mask elif past_key_value is not None: key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) key_layer = torch.cat([past_key_value[0], key_layer], dim=2) value_layer = torch.cat([past_key_value[1], value_layer], dim=2) else: key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) query_layer = self.transpose_for_scores(mixed_query_layer) use_cache = past_key_value is not None if self.is_decoder: # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states. # Further calls to cross_attention layer can then reuse all cross-attention # key/value_states (first "if" case) # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of # all previous decoder key/value_states. Further calls to uni-directional self-attention # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case) # if encoder bi-directional self-attention `past_key_value` is always `None` past_key_value = (key_layer, value_layer) # Take the dot product between "query" and "key" to get the raw attention scores. attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query": query_length, key_length = query_layer.shape[2], key_layer.shape[2] if use_cache: position_ids_l = torch.tensor(key_length - 1, dtype=torch.long, device=hidden_states.device).view( -1, 1 ) else: position_ids_l = torch.arange(query_length, dtype=torch.long, device=hidden_states.device).view(-1, 1) position_ids_r = torch.arange(key_length, dtype=torch.long, device=hidden_states.device).view(1, -1) distance = position_ids_l - position_ids_r positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1) positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility if self.position_embedding_type == "relative_key": relative_position_scores = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding) attention_scores = attention_scores + relative_position_scores elif self.position_embedding_type == "relative_key_query": relative_position_scores_query = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding) relative_position_scores_key = torch.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding) attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key attention_scores = attention_scores / math.sqrt(self.attention_head_size) if attention_mask is not None: # Apply the attention mask is (precomputed for all layers in SplinterModel forward() function) attention_scores = attention_scores + attention_mask # Normalize the attention scores to probabilities. attention_probs = nn.functional.softmax(attention_scores, dim=-1) # This is actually dropping out entire tokens to attend to, which might # seem a bit unusual, but is taken from the original Transformer paper. attention_probs = self.dropout(attention_probs) # Mask heads if we want to if head_mask is not None: attention_probs = attention_probs * head_mask context_layer = torch.matmul(attention_probs, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) context_layer = context_layer.view(new_context_layer_shape) outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) if self.is_decoder: outputs = outputs + (past_key_value,) return outputs # Copied from transformers.models.bert.modeling_bert.BertSelfOutput with Bert->Splinter class SplinterSelfOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states # Copied from transformers.models.bert.modeling_bert.BertAttention with Bert->Splinter class SplinterAttention(nn.Module): def __init__(self, config, position_embedding_type=None): super().__init__() self.self = SplinterSelfAttention(config, position_embedding_type=position_embedding_type) self.output = SplinterSelfOutput(config) self.pruned_heads = set() def prune_heads(self, heads): if len(heads) == 0: return heads, index = find_pruneable_heads_and_indices( heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads ) # Prune linear layers self.self.query = prune_linear_layer(self.self.query, index) self.self.key = prune_linear_layer(self.self.key, index) self.self.value = prune_linear_layer(self.self.value, index) self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) # Update hyper params and store pruned heads self.self.num_attention_heads = self.self.num_attention_heads - len(heads) self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads self.pruned_heads = self.pruned_heads.union(heads) def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.FloatTensor] = None, encoder_hidden_states: Optional[torch.FloatTensor] = None, encoder_attention_mask: Optional[torch.FloatTensor] = None, past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, output_attentions: Optional[bool] = False, ) -> Tuple[torch.Tensor]: self_outputs = self.self( hidden_states, attention_mask, head_mask, encoder_hidden_states, encoder_attention_mask, past_key_value, output_attentions, ) attention_output = self.output(self_outputs[0], hidden_states) outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them return outputs # Copied from transformers.models.bert.modeling_bert.BertIntermediate with Bert->Splinter class SplinterIntermediate(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.intermediate_size) if isinstance(config.hidden_act, str): self.intermediate_act_fn = ACT2FN[config.hidden_act] else: self.intermediate_act_fn = config.hidden_act def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) return hidden_states # Copied from transformers.models.bert.modeling_bert.BertOutput with Bert->Splinter class SplinterOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.intermediate_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states # Copied from transformers.models.bert.modeling_bert.BertLayer with Bert->Splinter class SplinterLayer(nn.Module): def __init__(self, config): super().__init__() self.chunk_size_feed_forward = config.chunk_size_feed_forward self.seq_len_dim = 1 self.attention = SplinterAttention(config) self.is_decoder = config.is_decoder self.add_cross_attention = config.add_cross_attention if self.add_cross_attention: if not self.is_decoder: raise ValueError(f"{self} should be used as a decoder model if cross attention is added") self.crossattention = SplinterAttention(config, position_embedding_type="absolute") self.intermediate = SplinterIntermediate(config) self.output = SplinterOutput(config) def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.FloatTensor] = None, encoder_hidden_states: Optional[torch.FloatTensor] = None, encoder_attention_mask: Optional[torch.FloatTensor] = None, past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, output_attentions: Optional[bool] = False, ) -> Tuple[torch.Tensor]: # decoder uni-directional self-attention cached key/values tuple is at positions 1,2 self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None self_attention_outputs = self.attention( hidden_states, attention_mask, head_mask, output_attentions=output_attentions, past_key_value=self_attn_past_key_value, ) attention_output = self_attention_outputs[0] # if decoder, the last output is tuple of self-attn cache if self.is_decoder: outputs = self_attention_outputs[1:-1] present_key_value = self_attention_outputs[-1] else: outputs = self_attention_outputs[1:] # add self attentions if we output attention weights cross_attn_present_key_value = None if self.is_decoder and encoder_hidden_states is not None: if not hasattr(self, "crossattention"): raise ValueError( f"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers" " by setting `config.add_cross_attention=True`" ) # cross_attn cached key/values tuple is at positions 3,4 of past_key_value tuple cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None cross_attention_outputs = self.crossattention( attention_output, attention_mask, head_mask, encoder_hidden_states, encoder_attention_mask, cross_attn_past_key_value, output_attentions, ) attention_output = cross_attention_outputs[0] outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights # add cross-attn cache to positions 3,4 of present_key_value tuple cross_attn_present_key_value = cross_attention_outputs[-1] present_key_value = present_key_value + cross_attn_present_key_value layer_output = apply_chunking_to_forward( self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output ) outputs = (layer_output,) + outputs # if decoder, return the attn key/values as the last output if self.is_decoder: outputs = outputs + (present_key_value,) return outputs def feed_forward_chunk(self, attention_output): intermediate_output = self.intermediate(attention_output) layer_output = self.output(intermediate_output, attention_output) return layer_output # Copied from transformers.models.bert.modeling_bert.BertEncoder with Bert->Splinter class SplinterEncoder(nn.Module): def __init__(self, config): super().__init__() self.config = config self.layer = nn.ModuleList([SplinterLayer(config) for _ in range(config.num_hidden_layers)]) self.gradient_checkpointing = False def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.FloatTensor] = None, encoder_hidden_states: Optional[torch.FloatTensor] = None, encoder_attention_mask: Optional[torch.FloatTensor] = None, past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = False, output_hidden_states: Optional[bool] = False, return_dict: Optional[bool] = True, ) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPastAndCrossAttentions]: all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None if self.gradient_checkpointing and self.training: if use_cache: logger.warning_once( "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." ) use_cache = False next_decoder_cache = () if use_cache else None for i, layer_module in enumerate(self.layer): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_head_mask = head_mask[i] if head_mask is not None else None past_key_value = past_key_values[i] if past_key_values is not None else None if self.gradient_checkpointing and self.training: layer_outputs = self._gradient_checkpointing_func( layer_module.__call__, hidden_states, attention_mask, layer_head_mask, encoder_hidden_states, encoder_attention_mask, past_key_value, output_attentions, ) else: layer_outputs = layer_module( hidden_states, attention_mask, layer_head_mask, encoder_hidden_states, encoder_attention_mask, past_key_value, output_attentions, ) hidden_states = layer_outputs[0] if use_cache: next_decoder_cache += (layer_outputs[-1],) if output_attentions: all_self_attentions = all_self_attentions + (layer_outputs[1],) if self.config.add_cross_attention: all_cross_attentions = all_cross_attentions + (layer_outputs[2],) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple( v for v in [ hidden_states, next_decoder_cache, all_hidden_states, all_self_attentions, all_cross_attentions, ] if v is not None ) return BaseModelOutputWithPastAndCrossAttentions( last_hidden_state=hidden_states, past_key_values=next_decoder_cache, hidden_states=all_hidden_states, attentions=all_self_attentions, cross_attentions=all_cross_attentions, ) class SplinterPreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = SplinterConfig base_model_prefix = "splinter" supports_gradient_checkpointing = True # Copied from transformers.models.bert.modeling_bert.BertPreTrainedModel._init_weights def _init_weights(self, module): """Initialize the weights""" if isinstance(module, nn.Linear): # Slightly different from the TF version which uses truncated_normal for initialization # cf https://github.com/pytorch/pytorch/pull/5617 module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) SPLINTER_START_DOCSTRING = r""" This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`SplinterConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ SPLINTER_INPUTS_DOCSTRING = r""" Args: input_ids (`torch.LongTensor` of shape `({0})`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`torch.FloatTensor` of shape `{0}`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) token_type_ids (`torch.LongTensor` of shape `{0}`, *optional*): Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0, 1]`: - 0 corresponds to a *sentence A* token, - 1 corresponds to a *sentence B* token. [What are token type IDs?](../glossary#token-type-ids) position_ids (`torch.LongTensor` of shape `{0}`, *optional*): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, config.max_position_embeddings - 1]`. [What are position IDs?](../glossary#position-ids) head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert *input_ids* indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ @add_start_docstrings( "The bare Splinter Model transformer outputting raw hidden-states without any specific head on top.", SPLINTER_START_DOCSTRING, ) class SplinterModel(SplinterPreTrainedModel): """ The model is an encoder (with only self-attention) following the architecture described in [Attention is all you need](https://arxiv.org/abs/1706.03762) by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin. """ def __init__(self, config): super().__init__(config) self.config = config self.embeddings = SplinterEmbeddings(config) self.encoder = SplinterEncoder(config) # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.embeddings.word_embeddings def set_input_embeddings(self, value): self.embeddings.word_embeddings = value def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) @add_start_docstrings_to_model_forward(SPLINTER_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=BaseModelOutputWithPastAndCrossAttentions, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, token_type_ids: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, encoder_hidden_states: Optional[torch.Tensor] = None, encoder_attention_mask: Optional[torch.Tensor] = None, past_key_values: Optional[List[torch.FloatTensor]] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, BaseModelOutputWithPastAndCrossAttentions]: r""" encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if the model is configured as a decoder. encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if self.config.is_decoder: use_cache = use_cache if use_cache is not None else self.config.use_cache else: use_cache = False if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask) input_shape = input_ids.size() elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: raise ValueError("You have to specify either input_ids or inputs_embeds") batch_size, seq_length = input_shape device = input_ids.device if input_ids is not None else inputs_embeds.device # past_key_values_length past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0 if attention_mask is None: attention_mask = torch.ones(((batch_size, seq_length + past_key_values_length)), device=device) if token_type_ids is None: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device) # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] # ourselves in which case we just need to make it broadcastable to all heads. extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape) # If a 2D or 3D attention mask is provided for the cross-attention # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length] if self.config.is_decoder and encoder_hidden_states is not None: encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size() encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length) if encoder_attention_mask is None: encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device) encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask) else: encoder_extended_attention_mask = None # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) embedding_output = self.embeddings( input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds, past_key_values_length=past_key_values_length, ) encoder_outputs = self.encoder( embedding_output, attention_mask=extended_attention_mask, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_extended_attention_mask, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = encoder_outputs[0] if not return_dict: return (sequence_output,) + encoder_outputs[1:] return BaseModelOutputWithPastAndCrossAttentions( last_hidden_state=sequence_output, past_key_values=encoder_outputs.past_key_values, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, cross_attentions=encoder_outputs.cross_attentions, ) class SplinterFullyConnectedLayer(nn.Module): def __init__(self, input_dim, output_dim, hidden_act="gelu"): super().__init__() self.input_dim = input_dim self.output_dim = output_dim self.dense = nn.Linear(self.input_dim, self.output_dim) self.act_fn = ACT2FN[hidden_act] self.LayerNorm = nn.LayerNorm(self.output_dim) def forward(self, inputs: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(inputs) hidden_states = self.act_fn(hidden_states) hidden_states = self.LayerNorm(hidden_states) return hidden_states class QuestionAwareSpanSelectionHead(nn.Module): """ Implementation of Question-Aware Span Selection (QASS) head, described in Splinter's paper: """ def __init__(self, config): super().__init__() self.query_start_transform = SplinterFullyConnectedLayer(config.hidden_size, config.hidden_size) self.query_end_transform = SplinterFullyConnectedLayer(config.hidden_size, config.hidden_size) self.start_transform = SplinterFullyConnectedLayer(config.hidden_size, config.hidden_size) self.end_transform = SplinterFullyConnectedLayer(config.hidden_size, config.hidden_size) self.start_classifier = nn.Linear(config.hidden_size, config.hidden_size, bias=False) self.end_classifier = nn.Linear(config.hidden_size, config.hidden_size, bias=False) def forward(self, inputs, positions): _, _, dim = inputs.size() index = positions.unsqueeze(-1).repeat(1, 1, dim) # [batch_size, num_positions, dim] gathered_reps = torch.gather(inputs, dim=1, index=index) # [batch_size, num_positions, dim] query_start_reps = self.query_start_transform(gathered_reps) # [batch_size, num_positions, dim] query_end_reps = self.query_end_transform(gathered_reps) # [batch_size, num_positions, dim] start_reps = self.start_transform(inputs) # [batch_size, seq_length, dim] end_reps = self.end_transform(inputs) # [batch_size, seq_length, dim] hidden_states = self.start_classifier(query_start_reps) # [batch_size, num_positions, dim] start_reps = start_reps.permute(0, 2, 1) # [batch_size, dim, seq_length] start_logits = torch.matmul(hidden_states, start_reps) hidden_states = self.end_classifier(query_end_reps) end_reps = end_reps.permute(0, 2, 1) end_logits = torch.matmul(hidden_states, end_reps) return start_logits, end_logits @add_start_docstrings( """ Splinter Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of the hidden-states output to compute `span start logits` and `span end logits`). """, SPLINTER_START_DOCSTRING, ) class SplinterForQuestionAnswering(SplinterPreTrainedModel): def __init__(self, config): super().__init__(config) self.splinter = SplinterModel(config) self.splinter_qass = QuestionAwareSpanSelectionHead(config) self.question_token_id = config.question_token_id # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(SPLINTER_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=QuestionAnsweringModelOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, token_type_ids: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, start_positions: Optional[torch.LongTensor] = None, end_positions: Optional[torch.LongTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, question_positions: Optional[torch.LongTensor] = None, ) -> Union[Tuple, QuestionAnsweringModelOutput]: r""" start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for position (index) of the start of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for position (index) of the end of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. question_positions (`torch.LongTensor` of shape `(batch_size, num_questions)`, *optional*): The positions of all question tokens. If given, start_logits and end_logits will be of shape `(batch_size, num_questions, sequence_length)`. If None, the first question token in each sequence in the batch will be the only one for which start_logits and end_logits are calculated and they will be of shape `(batch_size, sequence_length)`. """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict question_positions_were_none = False if question_positions is None: if input_ids is not None: question_position_for_each_example = torch.argmax( (torch.eq(input_ids, self.question_token_id)).int(), dim=-1 ) else: question_position_for_each_example = torch.zeros( inputs_embeds.size(0), dtype=torch.long, layout=inputs_embeds.layout, device=inputs_embeds.device ) question_positions = question_position_for_each_example.unsqueeze(-1) question_positions_were_none = True outputs = self.splinter( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] start_logits, end_logits = self.splinter_qass(sequence_output, question_positions) if question_positions_were_none: start_logits, end_logits = start_logits.squeeze(1), end_logits.squeeze(1) if attention_mask is not None: start_logits = start_logits + (1 - attention_mask) * torch.finfo(start_logits.dtype).min end_logits = end_logits + (1 - attention_mask) * torch.finfo(end_logits.dtype).min total_loss = None if start_positions is not None and end_positions is not None: # If we are on multi-GPU, split add a dimension if len(start_positions.size()) > 1: start_positions = start_positions.squeeze(-1) if len(end_positions.size()) > 1: end_positions = end_positions.squeeze(-1) # sometimes the start/end positions are outside our model inputs, we ignore these terms ignored_index = start_logits.size(1) start_positions.clamp_(0, ignored_index) end_positions.clamp_(0, ignored_index) loss_fct = CrossEntropyLoss(ignore_index=ignored_index) start_loss = loss_fct(start_logits, start_positions) end_loss = loss_fct(end_logits, end_positions) total_loss = (start_loss + end_loss) / 2 if not return_dict: output = (start_logits, end_logits) + outputs[1:] return ((total_loss,) + output) if total_loss is not None else output return QuestionAnsweringModelOutput( loss=total_loss, start_logits=start_logits, end_logits=end_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @dataclass class SplinterForPreTrainingOutput(ModelOutput): """ Class for outputs of Splinter as a span selection model. Args: loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when start and end positions are provided): Total span extraction loss is the sum of a Cross-Entropy for the start and end positions. start_logits (`torch.FloatTensor` of shape `(batch_size, num_questions, sequence_length)`): Span-start scores (before SoftMax). end_logits (`torch.FloatTensor` of shape `(batch_size, num_questions, sequence_length)`): Span-end scores (before SoftMax). hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: Optional[torch.FloatTensor] = None start_logits: torch.FloatTensor = None end_logits: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None attentions: Optional[Tuple[torch.FloatTensor]] = None @add_start_docstrings( """ Splinter Model for the recurring span selection task as done during the pretraining. The difference to the QA task is that we do not have a question, but multiple question tokens that replace the occurrences of recurring spans instead. """, SPLINTER_START_DOCSTRING, ) class SplinterForPreTraining(SplinterPreTrainedModel): def __init__(self, config): super().__init__(config) self.splinter = SplinterModel(config) self.splinter_qass = QuestionAwareSpanSelectionHead(config) self.question_token_id = config.question_token_id # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward( SPLINTER_INPUTS_DOCSTRING.format("batch_size, num_questions, sequence_length") ) def forward( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, token_type_ids: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, start_positions: Optional[torch.LongTensor] = None, end_positions: Optional[torch.LongTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, question_positions: Optional[torch.LongTensor] = None, ) -> Union[Tuple, SplinterForPreTrainingOutput]: r""" start_positions (`torch.LongTensor` of shape `(batch_size, num_questions)`, *optional*): Labels for position (index) of the start of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. end_positions (`torch.LongTensor` of shape `(batch_size, num_questions)`, *optional*): Labels for position (index) of the end of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. question_positions (`torch.LongTensor` of shape `(batch_size, num_questions)`, *optional*): The positions of all question tokens. If given, start_logits and end_logits will be of shape `(batch_size, num_questions, sequence_length)`. If None, the first question token in each sequence in the batch will be the only one for which start_logits and end_logits are calculated and they will be of shape `(batch_size, sequence_length)`. """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict if question_positions is None and start_positions is not None and end_positions is not None: raise TypeError("question_positions must be specified in order to calculate the loss") elif question_positions is None and input_ids is None: raise TypeError("question_positions must be specified when input_embeds is used") elif question_positions is None: question_positions = self._prepare_question_positions(input_ids) outputs = self.splinter( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] batch_size, sequence_length, dim = sequence_output.size() # [batch_size, num_questions, sequence_length] start_logits, end_logits = self.splinter_qass(sequence_output, question_positions) num_questions = question_positions.size(1) if attention_mask is not None: attention_mask_for_each_question = attention_mask.unsqueeze(1).expand( batch_size, num_questions, sequence_length ) start_logits = start_logits + (1 - attention_mask_for_each_question) * torch.finfo(start_logits.dtype).min end_logits = end_logits + (1 - attention_mask_for_each_question) * torch.finfo(end_logits.dtype).min total_loss = None # [batch_size, num_questions, sequence_length] if start_positions is not None and end_positions is not None: # sometimes the start/end positions are outside our model inputs, we ignore these terms start_positions.clamp_(0, max(0, sequence_length - 1)) end_positions.clamp_(0, max(0, sequence_length - 1)) # Ignore zero positions in the loss. Splinter never predicts zero # during pretraining and zero is used for padding question # tokens as well as for start and end positions of padded # question tokens. loss_fct = CrossEntropyLoss(ignore_index=self.config.pad_token_id) start_loss = loss_fct( start_logits.view(batch_size * num_questions, sequence_length), start_positions.view(batch_size * num_questions), ) end_loss = loss_fct( end_logits.view(batch_size * num_questions, sequence_length), end_positions.view(batch_size * num_questions), ) total_loss = (start_loss + end_loss) / 2 if not return_dict: output = (start_logits, end_logits) + outputs[1:] return ((total_loss,) + output) if total_loss is not None else output return SplinterForPreTrainingOutput( loss=total_loss, start_logits=start_logits, end_logits=end_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) def _prepare_question_positions(self, input_ids: torch.Tensor) -> torch.Tensor: rows, flat_positions = torch.where(input_ids == self.config.question_token_id) num_questions = torch.bincount(rows) positions = torch.full( (input_ids.size(0), num_questions.max()), self.config.pad_token_id, dtype=torch.long, device=input_ids.device, ) cols = torch.cat([torch.arange(n) for n in num_questions]) positions[rows, cols] = flat_positions return positions
transformers/src/transformers/models/splinter/modeling_splinter.py/0
{ "file_path": "transformers/src/transformers/models/splinter/modeling_splinter.py", "repo_id": "transformers", "token_count": 22234 }
362
# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os import requests import torch from PIL import Image from transformers import SuperPointConfig, SuperPointForKeypointDetection, SuperPointImageProcessor def get_superpoint_config(): config = SuperPointConfig( encoder_hidden_sizes=[64, 64, 128, 128], decoder_hidden_size=256, keypoint_decoder_dim=65, descriptor_decoder_dim=256, keypoint_threshold=0.005, max_keypoints=-1, nms_radius=4, border_removal_distance=4, initializer_range=0.02, ) return config def create_rename_keys(config, state_dict): rename_keys = [] # Encoder weights rename_keys.append(("conv1a.weight", "encoder.conv_blocks.0.conv_a.weight")) rename_keys.append(("conv1b.weight", "encoder.conv_blocks.0.conv_b.weight")) rename_keys.append(("conv2a.weight", "encoder.conv_blocks.1.conv_a.weight")) rename_keys.append(("conv2b.weight", "encoder.conv_blocks.1.conv_b.weight")) rename_keys.append(("conv3a.weight", "encoder.conv_blocks.2.conv_a.weight")) rename_keys.append(("conv3b.weight", "encoder.conv_blocks.2.conv_b.weight")) rename_keys.append(("conv4a.weight", "encoder.conv_blocks.3.conv_a.weight")) rename_keys.append(("conv4b.weight", "encoder.conv_blocks.3.conv_b.weight")) rename_keys.append(("conv1a.bias", "encoder.conv_blocks.0.conv_a.bias")) rename_keys.append(("conv1b.bias", "encoder.conv_blocks.0.conv_b.bias")) rename_keys.append(("conv2a.bias", "encoder.conv_blocks.1.conv_a.bias")) rename_keys.append(("conv2b.bias", "encoder.conv_blocks.1.conv_b.bias")) rename_keys.append(("conv3a.bias", "encoder.conv_blocks.2.conv_a.bias")) rename_keys.append(("conv3b.bias", "encoder.conv_blocks.2.conv_b.bias")) rename_keys.append(("conv4a.bias", "encoder.conv_blocks.3.conv_a.bias")) rename_keys.append(("conv4b.bias", "encoder.conv_blocks.3.conv_b.bias")) # Keypoint Decoder weights rename_keys.append(("convPa.weight", "keypoint_decoder.conv_score_a.weight")) rename_keys.append(("convPb.weight", "keypoint_decoder.conv_score_b.weight")) rename_keys.append(("convPa.bias", "keypoint_decoder.conv_score_a.bias")) rename_keys.append(("convPb.bias", "keypoint_decoder.conv_score_b.bias")) # Descriptor Decoder weights rename_keys.append(("convDa.weight", "descriptor_decoder.conv_descriptor_a.weight")) rename_keys.append(("convDb.weight", "descriptor_decoder.conv_descriptor_b.weight")) rename_keys.append(("convDa.bias", "descriptor_decoder.conv_descriptor_a.bias")) rename_keys.append(("convDb.bias", "descriptor_decoder.conv_descriptor_b.bias")) return rename_keys def rename_key(dct, old, new): val = dct.pop(old) dct[new] = val def prepare_imgs(): url = "http://images.cocodataset.org/val2017/000000039769.jpg" im1 = Image.open(requests.get(url, stream=True).raw) url = "http://images.cocodataset.org/test-stuff2017/000000004016.jpg" im2 = Image.open(requests.get(url, stream=True).raw) return [im1, im2] @torch.no_grad() def convert_superpoint_checkpoint(checkpoint_url, pytorch_dump_folder_path, save_model, push_to_hub, test_mode=False): """ Copy/paste/tweak model's weights to our SuperPoint structure. """ print("Downloading original model from checkpoint...") config = get_superpoint_config() # load original state_dict from URL original_state_dict = torch.hub.load_state_dict_from_url(checkpoint_url) print("Converting model parameters...") # rename keys rename_keys = create_rename_keys(config, original_state_dict) new_state_dict = original_state_dict.copy() for src, dest in rename_keys: rename_key(new_state_dict, src, dest) # Load HuggingFace model model = SuperPointForKeypointDetection(config) model.load_state_dict(new_state_dict) model.eval() print("Successfully loaded weights in the model") # Check model outputs preprocessor = SuperPointImageProcessor() inputs = preprocessor(images=prepare_imgs(), return_tensors="pt") outputs = model(**inputs) # If test_mode is True, we check that the model outputs match the original results if test_mode: torch.count_nonzero(outputs.mask[0]) expected_keypoints_shape = (2, 830, 2) expected_scores_shape = (2, 830) expected_descriptors_shape = (2, 830, 256) expected_keypoints_values = torch.tensor([[480.0, 9.0], [494.0, 9.0], [489.0, 16.0]]) expected_scores_values = torch.tensor([0.0064, 0.0140, 0.0595, 0.0728, 0.5170, 0.0175, 0.1523, 0.2055, 0.0336]) expected_descriptors_value = torch.tensor(-0.1096) assert outputs.keypoints.shape == expected_keypoints_shape assert outputs.scores.shape == expected_scores_shape assert outputs.descriptors.shape == expected_descriptors_shape assert torch.allclose(outputs.keypoints[0, :3], expected_keypoints_values, atol=1e-3) assert torch.allclose(outputs.scores[0, :9], expected_scores_values, atol=1e-3) assert torch.allclose(outputs.descriptors[0, 0, 0], expected_descriptors_value, atol=1e-3) print("Model outputs match the original results!") if save_model: print("Saving model to local...") # Create folder to save model if not os.path.isdir(pytorch_dump_folder_path): os.mkdir(pytorch_dump_folder_path) model.save_pretrained(pytorch_dump_folder_path) preprocessor.save_pretrained(pytorch_dump_folder_path) model_name = "superpoint" if push_to_hub: print(f"Pushing {model_name} to the hub...") model.push_to_hub(model_name) preprocessor.push_to_hub(model_name) if __name__ == "__main__": parser = argparse.ArgumentParser() # Required parameters parser.add_argument( "--checkpoint_url", default="https://github.com/magicleap/SuperPointPretrainedNetwork/raw/master/superpoint_v1.pth", type=str, help="URL of the original SuperPoint checkpoint you'd like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default="model", type=str, help="Path to the output PyTorch model directory.", ) parser.add_argument("--save_model", action="store_true", help="Save model to local") parser.add_argument("--push_to_hub", action="store_true", help="Push model and image preprocessor to the hub") args = parser.parse_args() convert_superpoint_checkpoint( args.checkpoint_url, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub )
transformers/src/transformers/models/superpoint/convert_superpoint_to_pytorch.py/0
{ "file_path": "transformers/src/transformers/models/superpoint/convert_superpoint_to_pytorch.py", "repo_id": "transformers", "token_count": 2857 }
363
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Image processor class for Swin2SR.""" from typing import Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature from ...image_transforms import get_image_size, pad, to_channel_dimension_format from ...image_utils import ( ChannelDimension, ImageInput, infer_channel_dimension_format, is_scaled_image, make_list_of_images, to_numpy_array, valid_images, validate_kwargs, validate_preprocess_arguments, ) from ...utils import TensorType, logging logger = logging.get_logger(__name__) class Swin2SRImageProcessor(BaseImageProcessor): r""" Constructs a Swin2SR image processor. Args: do_rescale (`bool`, *optional*, defaults to `True`): Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by the `do_rescale` parameter in the `preprocess` method. rescale_factor (`int` or `float`, *optional*, defaults to `1/255`): Scale factor to use if rescaling the image. Can be overridden by the `rescale_factor` parameter in the `preprocess` method. """ model_input_names = ["pixel_values"] def __init__( self, do_rescale: bool = True, rescale_factor: Union[int, float] = 1 / 255, do_pad: bool = True, pad_size: int = 8, **kwargs, ) -> None: super().__init__(**kwargs) self.do_rescale = do_rescale self.rescale_factor = rescale_factor self.do_pad = do_pad self.pad_size = pad_size self._valid_processor_keys = [ "images", "do_rescale", "rescale_factor", "do_pad", "pad_size", "return_tensors", "data_format", "input_data_format", ] def pad( self, image: np.ndarray, size: int, data_format: Optional[Union[str, ChannelDimension]] = None, input_data_format: Optional[Union[str, ChannelDimension]] = None, ): """ Pad an image to make the height and width divisible by `size`. Args: image (`np.ndarray`): Image to pad. size (`int`): The size to make the height and width divisible by. data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format for the output image. If unset, the channel dimension format of the input image is used. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. input_data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format for the input image. If unset, the channel dimension format is inferred from the input image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. Returns: `np.ndarray`: The padded image. """ old_height, old_width = get_image_size(image, input_data_format) pad_height = (old_height // size + 1) * size - old_height pad_width = (old_width // size + 1) * size - old_width return pad( image, ((0, pad_height), (0, pad_width)), mode="symmetric", data_format=data_format, input_data_format=input_data_format, ) def preprocess( self, images: ImageInput, do_rescale: Optional[bool] = None, rescale_factor: Optional[float] = None, do_pad: Optional[bool] = None, pad_size: Optional[int] = None, return_tensors: Optional[Union[str, TensorType]] = None, data_format: Union[str, ChannelDimension] = ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]] = None, **kwargs, ): """ Preprocess an image or batch of images. Args: images (`ImageInput`): Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If passing in images with pixel values between 0 and 1, set `do_rescale=False`. do_rescale (`bool`, *optional*, defaults to `self.do_rescale`): Whether to rescale the image values between [0 - 1]. rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`): Rescale factor to rescale the image by if `do_rescale` is set to `True`. do_pad (`bool`, *optional*, defaults to `True`): Whether to pad the image to make the height and width divisible by `window_size`. pad_size (`int`, *optional*, defaults to 32): The size of the sliding window for the local attention. return_tensors (`str` or `TensorType`, *optional*): The type of tensors to return. Can be one of: - Unset: Return a list of `np.ndarray`. - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of typ, input_data_format=input_data_formate `tf.Tensor`. - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`. - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`. - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`. data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`): The channel dimension format for the output image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - Unset: Use the channel dimension format of the input image. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format for the input image. If unset, the channel dimension format is inferred from the input image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. """ do_rescale = do_rescale if do_rescale is not None else self.do_rescale rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor do_pad = do_pad if do_pad is not None else self.do_pad pad_size = pad_size if pad_size is not None else self.pad_size images = make_list_of_images(images) validate_kwargs(captured_kwargs=kwargs.keys(), valid_processor_keys=self._valid_processor_keys) if not valid_images(images): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) validate_preprocess_arguments( do_rescale=do_rescale, rescale_factor=rescale_factor, do_pad=do_pad, size_divisibility=pad_size, # Here the pad function simply requires pad_size. ) # All transformations expect numpy arrays. images = [to_numpy_array(image) for image in images] if is_scaled_image(images[0]) and do_rescale: logger.warning_once( "It looks like you are trying to rescale already rescaled images. If the input" " images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again." ) if input_data_format is None: # We assume that all images have the same channel dimension format. input_data_format = infer_channel_dimension_format(images[0]) if do_rescale: images = [ self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format) for image in images ] if do_pad: images = [self.pad(image, size=pad_size, input_data_format=input_data_format) for image in images] images = [ to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in images ] data = {"pixel_values": images} return BatchFeature(data=data, tensor_type=return_tensors)
transformers/src/transformers/models/swin2sr/image_processing_swin2sr.py/0
{ "file_path": "transformers/src/transformers/models/swin2sr/image_processing_swin2sr.py", "repo_id": "transformers", "token_count": 4087 }
364
#!/usr/bin/env bash # Use this script as follows ./download_from_gcp.sh /path/to/folder/to/store/downloads folder_to_store_downloads=${1} # Replace by gcp_path to T5 cloud bucket folder here # To download the official `t5-small` model of https://github.com/google-research/text-to-text-transfer-transformer#released-model-checkpoints: gcp_path="gs://t5-data/pretrained_models/small" # Number of files the checkpoint is split into num_of_checks=16 # Create dir if not exist mkdir -p ${folder_to_store_downloads} # Copy all meta information files gsutil cp "${gcp_path}/operative_config.gin" ${folder_to_store_downloads} gsutil cp "${gcp_path}/checkpoint" ${folder_to_store_downloads} gsutil cp "${gcp_path}/model.ckpt-1000000.index" ${folder_to_store_downloads} gsutil cp "${gcp_path}/model.ckpt-1000000.meta" ${folder_to_store_downloads} # Copy all model weights # single digit num checkpoitns for ((i = 0 ; i < ${num_of_checks} ; i++)); do gsutil cp "${gcp_path}/model.ckpt-1000000.data-0000${i}-of-000${num_of_checks}" ${folder_to_store_downloads} done # double digit num checkpoints for ((i = 0 ; i < ${num_of_checks} ; i++)); do gsutil cp "${gcp_path}/model.ckpt-1000000.data-000${i}-of-000${num_of_checks}" ${folder_to_store_downloads} done # Having run this script, you should create a suitable config.json, *e.g.* by # looking at `https://huggingface.co/t5-small`. # Then you can run `python convert_t5_original_tf_checkpoint_to_pytorch.py --tf_checkpoint_path "${folder_to_store_downloads}" --config_file "config.json" --pytorch_dump_path "/path/to/store/pytorch/weights"
transformers/src/transformers/models/t5/download_from_gcp.sh/0
{ "file_path": "transformers/src/transformers/models/t5/download_from_gcp.sh", "repo_id": "transformers", "token_count": 579 }
365
# coding=utf-8 # Copyright 2020 Google Research and The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Tokenization class for TAPAS model.""" import collections import datetime import enum import itertools import math import os import re import unicodedata from dataclasses import dataclass from typing import Callable, Dict, Generator, List, Optional, Text, Tuple, Union import numpy as np from ...tokenization_utils import PreTrainedTokenizer, _is_control, _is_punctuation, _is_whitespace from ...tokenization_utils_base import ( ENCODE_KWARGS_DOCSTRING, VERY_LARGE_INTEGER, BatchEncoding, EncodedInput, PreTokenizedInput, TextInput, ) from ...utils import ExplicitEnum, PaddingStrategy, TensorType, add_end_docstrings, is_pandas_available, logging if is_pandas_available(): import pandas as pd logger = logging.get_logger(__name__) VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt"} PRETRAINED_VOCAB_FILES_MAP = { "vocab_file": { # large models "google/tapas-large-finetuned-sqa": ( "https://huggingface.co/google/tapas-large-finetuned-sqa/resolve/main/vocab.txt" ), "google/tapas-large-finetuned-wtq": ( "https://huggingface.co/google/tapas-large-finetuned-wtq/resolve/main/vocab.txt" ), "google/tapas-large-finetuned-wikisql-supervised": ( "https://huggingface.co/google/tapas-large-finetuned-wikisql-supervised/resolve/main/vocab.txt" ), "google/tapas-large-finetuned-tabfact": ( "https://huggingface.co/google/tapas-large-finetuned-tabfact/resolve/main/vocab.txt" ), # base models "google/tapas-base-finetuned-sqa": ( "https://huggingface.co/google/tapas-base-finetuned-sqa/resolve/main/vocab.txt" ), "google/tapas-base-finetuned-wtq": ( "https://huggingface.co/google/tapas-base-finetuned-wtq/resolve/main/vocab.txt" ), "google/tapas-base-finetuned-wikisql-supervised": ( "https://huggingface.co/google/tapas-base-finetuned-wikisql-supervised/resolve/main/vocab.txt" ), "google/tapas-base-finetuned-tabfact": ( "https://huggingface.co/google/tapas-base-finetuned-tabfact/resolve/main/vocab.txt" ), # medium models "google/tapas-medium-finetuned-sqa": ( "https://huggingface.co/google/tapas-medium-finetuned-sqa/resolve/main/vocab.txt" ), "google/tapas-medium-finetuned-wtq": ( "https://huggingface.co/google/tapas-medium-finetuned-wtq/resolve/main/vocab.txt" ), "google/tapas-medium-finetuned-wikisql-supervised": ( "https://huggingface.co/google/tapas-medium-finetuned-wikisql-supervised/resolve/main/vocab.txt" ), "google/tapas-medium-finetuned-tabfact": ( "https://huggingface.co/google/tapas-medium-finetuned-tabfact/resolve/main/vocab.txt" ), # small models "google/tapas-small-finetuned-sqa": ( "https://huggingface.co/google/tapas-small-finetuned-sqa/resolve/main/vocab.txt" ), "google/tapas-small-finetuned-wtq": ( "https://huggingface.co/google/tapas-small-finetuned-wtq/resolve/main/vocab.txt" ), "google/tapas-small-finetuned-wikisql-supervised": ( "https://huggingface.co/google/tapas-small-finetuned-wikisql-supervised/resolve/main/vocab.txt" ), "google/tapas-small-finetuned-tabfact": ( "https://huggingface.co/google/tapas-small-finetuned-tabfact/resolve/main/vocab.txt" ), # tiny models "google/tapas-tiny-finetuned-sqa": ( "https://huggingface.co/google/tapas-tiny-finetuned-sqa/resolve/main/vocab.txt" ), "google/tapas-tiny-finetuned-wtq": ( "https://huggingface.co/google/tapas-tiny-finetuned-wtq/resolve/main/vocab.txt" ), "google/tapas-tiny-finetuned-wikisql-supervised": ( "https://huggingface.co/google/tapas-tiny-finetuned-wikisql-supervised/resolve/main/vocab.txt" ), "google/tapas-tiny-finetuned-tabfact": ( "https://huggingface.co/google/tapas-tiny-finetuned-tabfact/resolve/main/vocab.txt" ), # mini models "google/tapas-mini-finetuned-sqa": ( "https://huggingface.co/google/tapas-mini-finetuned-sqa/resolve/main/vocab.txt" ), "google/tapas-mini-finetuned-wtq": ( "https://huggingface.co/google/tapas-mini-finetuned-wtq/resolve/main/vocab.txt" ), "google/tapas-mini-finetuned-wikisql-supervised": ( "https://huggingface.co/google/tapas-mini-finetuned-wikisql-supervised/resolve/main/vocab.txt" ), "google/tapas-mini-finetuned-tabfact": ( "https://huggingface.co/google/tapas-mini-finetuned-tabfact/resolve/main/vocab.txt" ), } } PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {name: 512 for name in PRETRAINED_VOCAB_FILES_MAP.keys()} PRETRAINED_INIT_CONFIGURATION = {name: {"do_lower_case": True} for name in PRETRAINED_VOCAB_FILES_MAP.keys()} class TapasTruncationStrategy(ExplicitEnum): """ Possible values for the `truncation` argument in [`~TapasTokenizer.__call__`]. Useful for tab-completion in an IDE. """ DROP_ROWS_TO_FIT = "drop_rows_to_fit" DO_NOT_TRUNCATE = "do_not_truncate" TableValue = collections.namedtuple("TokenValue", ["token", "column_id", "row_id"]) @dataclass(frozen=True) class TokenCoordinates: column_index: int row_index: int token_index: int @dataclass class TokenizedTable: rows: List[List[List[Text]]] selected_tokens: List[TokenCoordinates] @dataclass(frozen=True) class SerializedExample: tokens: List[Text] column_ids: List[int] row_ids: List[int] segment_ids: List[int] def _is_inner_wordpiece(token: Text): return token.startswith("##") def load_vocab(vocab_file): """Loads a vocabulary file into a dictionary.""" vocab = collections.OrderedDict() with open(vocab_file, "r", encoding="utf-8") as reader: tokens = reader.readlines() for index, token in enumerate(tokens): token = token.rstrip("\n") vocab[token] = index return vocab def whitespace_tokenize(text): """Runs basic whitespace cleaning and splitting on a piece of text.""" text = text.strip() if not text: return [] tokens = text.split() return tokens TAPAS_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING = r""" add_special_tokens (`bool`, *optional*, defaults to `True`): Whether or not to encode the sequences with the special tokens relative to their model. padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`): Activates and controls padding. Accepts the following values: - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided). - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different lengths). truncation (`bool`, `str` or [`TapasTruncationStrategy`], *optional*, defaults to `False`): Activates and controls truncation. Accepts the following values: - `True` or `'drop_rows_to_fit'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will truncate row by row, removing rows from the table. - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths greater than the model maximum admissible input size). max_length (`int`, *optional*): Controls the maximum length to use by one of the truncation/padding parameters. If left unset or set to `None`, this will use the predefined model maximum length if a maximum length is required by one of the truncation/padding parameters. If the model has no specific maximum input length (like XLNet) truncation/padding to a maximum length will be deactivated. is_split_into_words (`bool`, *optional*, defaults to `False`): Whether or not the input is already pre-tokenized (e.g., split into words). If set to `True`, the tokenizer assumes the input is already split into words (for instance, by splitting it on whitespace) which it will tokenize. This is useful for NER or token classification. pad_to_multiple_of (`int`, *optional*): If set will pad the sequence to a multiple of the provided value. This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability `>= 7.5` (Volta). return_tensors (`str` or [`~utils.TensorType`], *optional*): If set, will return tensors instead of list of python integers. Acceptable values are: - `'tf'`: Return TensorFlow `tf.constant` objects. - `'pt'`: Return PyTorch `torch.Tensor` objects. - `'np'`: Return Numpy `np.ndarray` objects. """ class TapasTokenizer(PreTrainedTokenizer): r""" Construct a TAPAS tokenizer. Based on WordPiece. Flattens a table and one or more related sentences to be used by TAPAS models. This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. [`TapasTokenizer`] creates several token type ids to encode tabular structure. To be more precise, it adds 7 token type ids, in the following order: `segment_ids`, `column_ids`, `row_ids`, `prev_labels`, `column_ranks`, `inv_column_ranks` and `numeric_relations`: - segment_ids: indicate whether a token belongs to the question (0) or the table (1). 0 for special tokens and padding. - column_ids: indicate to which column of the table a token belongs (starting from 1). Is 0 for all question tokens, special tokens and padding. - row_ids: indicate to which row of the table a token belongs (starting from 1). Is 0 for all question tokens, special tokens and padding. Tokens of column headers are also 0. - prev_labels: indicate whether a token was (part of) an answer to the previous question (1) or not (0). Useful in a conversational setup (such as SQA). - column_ranks: indicate the rank of a table token relative to a column, if applicable. For example, if you have a column "number of movies" with values 87, 53 and 69, then the column ranks of these tokens are 3, 1 and 2 respectively. 0 for all question tokens, special tokens and padding. - inv_column_ranks: indicate the inverse rank of a table token relative to a column, if applicable. For example, if you have a column "number of movies" with values 87, 53 and 69, then the inverse column ranks of these tokens are 1, 3 and 2 respectively. 0 for all question tokens, special tokens and padding. - numeric_relations: indicate numeric relations between the question and the tokens of the table. 0 for all question tokens, special tokens and padding. [`TapasTokenizer`] runs end-to-end tokenization on a table and associated sentences: punctuation splitting and wordpiece. Args: vocab_file (`str`): File containing the vocabulary. do_lower_case (`bool`, *optional*, defaults to `True`): Whether or not to lowercase the input when tokenizing. do_basic_tokenize (`bool`, *optional*, defaults to `True`): Whether or not to do basic tokenization before WordPiece. never_split (`Iterable`, *optional*): Collection of tokens which will never be split during tokenization. Only has an effect when `do_basic_tokenize=True` unk_token (`str`, *optional*, defaults to `"[UNK]"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. sep_token (`str`, *optional*, defaults to `"[SEP]"`): The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for sequence classification or for a text and a question for question answering. It is also used as the last token of a sequence built with special tokens. pad_token (`str`, *optional*, defaults to `"[PAD]"`): The token used for padding, for example when batching sequences of different lengths. cls_token (`str`, *optional*, defaults to `"[CLS]"`): The classifier token which is used when doing sequence classification (classification of the whole sequence instead of per-token classification). It is the first token of the sequence when built with special tokens. mask_token (`str`, *optional*, defaults to `"[MASK]"`): The token used for masking values. This is the token used when training this model with masked language modeling. This is the token which the model will try to predict. empty_token (`str`, *optional*, defaults to `"[EMPTY]"`): The token used for empty cell values in a table. Empty cell values include "", "n/a", "nan" and "?". tokenize_chinese_chars (`bool`, *optional*, defaults to `True`): Whether or not to tokenize Chinese characters. This should likely be deactivated for Japanese (see this [issue](https://github.com/huggingface/transformers/issues/328)). strip_accents (`bool`, *optional*): Whether or not to strip all accents. If this option is not specified, then it will be determined by the value for `lowercase` (as in the original BERT). cell_trim_length (`int`, *optional*, defaults to -1): If > 0: Trim cells so that the length is <= this value. Also disables further cell trimming, should thus be used with `truncation` set to `True`. max_column_id (`int`, *optional*): Max column id to extract. max_row_id (`int`, *optional*): Max row id to extract. strip_column_names (`bool`, *optional*, defaults to `False`): Whether to add empty strings instead of column names. update_answer_coordinates (`bool`, *optional*, defaults to `False`): Whether to recompute the answer coordinates from the answer text. min_question_length (`int`, *optional*): Minimum length of each question in terms of tokens (will be skipped otherwise). max_question_length (`int`, *optional*): Maximum length of each question in terms of tokens (will be skipped otherwise). """ vocab_files_names = VOCAB_FILES_NAMES pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self, vocab_file, do_lower_case=True, do_basic_tokenize=True, never_split=None, unk_token="[UNK]", sep_token="[SEP]", pad_token="[PAD]", cls_token="[CLS]", mask_token="[MASK]", empty_token="[EMPTY]", tokenize_chinese_chars=True, strip_accents=None, cell_trim_length: int = -1, max_column_id: int = None, max_row_id: int = None, strip_column_names: bool = False, update_answer_coordinates: bool = False, min_question_length=None, max_question_length=None, model_max_length: int = 512, additional_special_tokens: Optional[List[str]] = None, **kwargs, ): if not is_pandas_available(): raise ImportError("Pandas is required for the TAPAS tokenizer.") if additional_special_tokens is not None: if empty_token not in additional_special_tokens: additional_special_tokens.append(empty_token) else: additional_special_tokens = [empty_token] if not os.path.isfile(vocab_file): raise ValueError( f"Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained" " model use `tokenizer = BertTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`" ) self.vocab = load_vocab(vocab_file) self.ids_to_tokens = collections.OrderedDict([(ids, tok) for tok, ids in self.vocab.items()]) self.do_basic_tokenize = do_basic_tokenize if do_basic_tokenize: self.basic_tokenizer = BasicTokenizer( do_lower_case=do_lower_case, never_split=never_split, tokenize_chinese_chars=tokenize_chinese_chars, strip_accents=strip_accents, ) self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab, unk_token=str(unk_token)) # Additional properties self.cell_trim_length = cell_trim_length self.max_column_id = ( max_column_id if max_column_id is not None else model_max_length if model_max_length is not None else VERY_LARGE_INTEGER ) self.max_row_id = ( max_row_id if max_row_id is not None else model_max_length if model_max_length is not None else VERY_LARGE_INTEGER ) self.strip_column_names = strip_column_names self.update_answer_coordinates = update_answer_coordinates self.min_question_length = min_question_length self.max_question_length = max_question_length super().__init__( do_lower_case=do_lower_case, do_basic_tokenize=do_basic_tokenize, never_split=never_split, unk_token=unk_token, sep_token=sep_token, pad_token=pad_token, cls_token=cls_token, mask_token=mask_token, empty_token=empty_token, tokenize_chinese_chars=tokenize_chinese_chars, strip_accents=strip_accents, cell_trim_length=cell_trim_length, max_column_id=max_column_id, max_row_id=max_row_id, strip_column_names=strip_column_names, update_answer_coordinates=update_answer_coordinates, min_question_length=min_question_length, max_question_length=max_question_length, model_max_length=model_max_length, additional_special_tokens=additional_special_tokens, **kwargs, ) @property def do_lower_case(self): return self.basic_tokenizer.do_lower_case @property def vocab_size(self): return len(self.vocab) def get_vocab(self): return dict(self.vocab, **self.added_tokens_encoder) def _tokenize(self, text): if format_text(text) == EMPTY_TEXT: return [self.additional_special_tokens[0]] split_tokens = [] if self.do_basic_tokenize: for token in self.basic_tokenizer.tokenize(text, never_split=self.all_special_tokens): # If the token is part of the never_split set if token in self.basic_tokenizer.never_split: split_tokens.append(token) else: split_tokens += self.wordpiece_tokenizer.tokenize(token) else: split_tokens = self.wordpiece_tokenizer.tokenize(text) return split_tokens def _convert_token_to_id(self, token): """Converts a token (str) in an id using the vocab.""" return self.vocab.get(token, self.vocab.get(self.unk_token)) def _convert_id_to_token(self, index): """Converts an index (integer) in a token (str) using the vocab.""" return self.ids_to_tokens.get(index, self.unk_token) def convert_tokens_to_string(self, tokens): """Converts a sequence of tokens (string) in a single string.""" out_string = " ".join(tokens).replace(" ##", "").strip() return out_string def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: index = 0 if os.path.isdir(save_directory): vocab_file = os.path.join( save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) else: vocab_file = (filename_prefix + "-" if filename_prefix else "") + save_directory with open(vocab_file, "w", encoding="utf-8") as writer: for token, token_index in sorted(self.vocab.items(), key=lambda kv: kv[1]): if index != token_index: logger.warning( f"Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive." " Please check that the vocabulary is not corrupted!" ) index = token_index writer.write(token + "\n") index += 1 return (vocab_file,) def create_attention_mask_from_sequences(self, query_ids: List[int], table_values: List[TableValue]) -> List[int]: """ Creates the attention mask according to the query token IDs and a list of table values. Args: query_ids (`List[int]`): list of token IDs corresponding to the ID. table_values (`List[TableValue]`): lift of table values, which are named tuples containing the token value, the column ID and the row ID of said token. Returns: `List[int]`: List of ints containing the attention mask values. """ return [1] * (1 + len(query_ids) + 1 + len(table_values)) def create_segment_token_type_ids_from_sequences( self, query_ids: List[int], table_values: List[TableValue] ) -> List[int]: """ Creates the segment token type IDs according to the query token IDs and a list of table values. Args: query_ids (`List[int]`): list of token IDs corresponding to the ID. table_values (`List[TableValue]`): lift of table values, which are named tuples containing the token value, the column ID and the row ID of said token. Returns: `List[int]`: List of ints containing the segment token type IDs values. """ table_ids = list(zip(*table_values))[0] if table_values else [] return [0] * (1 + len(query_ids) + 1) + [1] * len(table_ids) def create_column_token_type_ids_from_sequences( self, query_ids: List[int], table_values: List[TableValue] ) -> List[int]: """ Creates the column token type IDs according to the query token IDs and a list of table values. Args: query_ids (`List[int]`): list of token IDs corresponding to the ID. table_values (`List[TableValue]`): lift of table values, which are named tuples containing the token value, the column ID and the row ID of said token. Returns: `List[int]`: List of ints containing the column token type IDs values. """ table_column_ids = list(zip(*table_values))[1] if table_values else [] return [0] * (1 + len(query_ids) + 1) + list(table_column_ids) def create_row_token_type_ids_from_sequences( self, query_ids: List[int], table_values: List[TableValue] ) -> List[int]: """ Creates the row token type IDs according to the query token IDs and a list of table values. Args: query_ids (`List[int]`): list of token IDs corresponding to the ID. table_values (`List[TableValue]`): lift of table values, which are named tuples containing the token value, the column ID and the row ID of said token. Returns: `List[int]`: List of ints containing the row token type IDs values. """ table_row_ids = list(zip(*table_values))[2] if table_values else [] return [0] * (1 + len(query_ids) + 1) + list(table_row_ids) def build_inputs_with_special_tokens( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: """ Build model inputs from a question and flattened table for question answering or sequence classification tasks by concatenating and adding special tokens. Args: token_ids_0 (`List[int]`): The ids of the question. token_ids_1 (`List[int]`, *optional*): The ids of the flattened table. Returns: `List[int]`: The model input with special tokens. """ if token_ids_1 is None: raise ValueError("With TAPAS, you must provide both question IDs and table IDs.") return [self.cls_token_id] + token_ids_0 + [self.sep_token_id] + token_ids_1 def get_special_tokens_mask( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False ) -> List[int]: """ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `prepare_for_model` method. Args: token_ids_0 (`List[int]`): List of question IDs. token_ids_1 (`List[int]`, *optional*): List of flattened table IDs. already_has_special_tokens (`bool`, *optional*, defaults to `False`): Whether or not the token list is already formatted with special tokens for the model. Returns: `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. """ if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True ) if token_ids_1 is not None: return [1] + ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) return [1] + ([0] * len(token_ids_0)) + [1] @add_end_docstrings(TAPAS_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING) def __call__( self, table: "pd.DataFrame", queries: Optional[ Union[ TextInput, PreTokenizedInput, EncodedInput, List[TextInput], List[PreTokenizedInput], List[EncodedInput], ] ] = None, answer_coordinates: Optional[Union[List[Tuple], List[List[Tuple]]]] = None, answer_text: Optional[Union[List[TextInput], List[List[TextInput]]]] = None, add_special_tokens: bool = True, padding: Union[bool, str, PaddingStrategy] = False, truncation: Union[bool, str, TapasTruncationStrategy] = False, max_length: Optional[int] = None, pad_to_multiple_of: Optional[int] = None, return_tensors: Optional[Union[str, TensorType]] = None, return_token_type_ids: Optional[bool] = None, return_attention_mask: Optional[bool] = None, return_overflowing_tokens: bool = False, return_special_tokens_mask: bool = False, return_offsets_mapping: bool = False, return_length: bool = False, verbose: bool = True, **kwargs, ) -> BatchEncoding: """ Main method to tokenize and prepare for the model one or several sequence(s) related to a table. Args: table (`pd.DataFrame`): Table containing tabular data. Note that all cell values must be text. Use *.astype(str)* on a Pandas dataframe to convert it to string. queries (`str` or `List[str]`): Question or batch of questions related to a table to be encoded. Note that in case of a batch, all questions must refer to the **same** table. answer_coordinates (`List[Tuple]` or `List[List[Tuple]]`, *optional*): Answer coordinates of each table-question pair in the batch. In case only a single table-question pair is provided, then the answer_coordinates must be a single list of one or more tuples. Each tuple must be a (row_index, column_index) pair. The first data row (not the column header row) has index 0. The first column has index 0. In case a batch of table-question pairs is provided, then the answer_coordinates must be a list of lists of tuples (each list corresponding to a single table-question pair). answer_text (`List[str]` or `List[List[str]]`, *optional*): Answer text of each table-question pair in the batch. In case only a single table-question pair is provided, then the answer_text must be a single list of one or more strings. Each string must be the answer text of a corresponding answer coordinate. In case a batch of table-question pairs is provided, then the answer_coordinates must be a list of lists of strings (each list corresponding to a single table-question pair). """ assert isinstance(table, pd.DataFrame), "Table must be of type pd.DataFrame" # Input type checking for clearer error valid_query = False # Check that query has a valid type if queries is None or isinstance(queries, str): valid_query = True elif isinstance(queries, (list, tuple)): if len(queries) == 0 or isinstance(queries[0], str): valid_query = True if not valid_query: raise ValueError( "queries input must of type `str` (single example), `List[str]` (batch or single pretokenized" " example). " ) is_batched = isinstance(queries, (list, tuple)) if is_batched: return self.batch_encode_plus( table=table, queries=queries, answer_coordinates=answer_coordinates, answer_text=answer_text, add_special_tokens=add_special_tokens, padding=padding, truncation=truncation, max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs, ) else: return self.encode_plus( table=table, query=queries, answer_coordinates=answer_coordinates, answer_text=answer_text, add_special_tokens=add_special_tokens, padding=padding, truncation=truncation, max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs, ) @add_end_docstrings(ENCODE_KWARGS_DOCSTRING, TAPAS_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING) def batch_encode_plus( self, table: "pd.DataFrame", queries: Optional[ Union[ List[TextInput], List[PreTokenizedInput], List[EncodedInput], ] ] = None, answer_coordinates: Optional[List[List[Tuple]]] = None, answer_text: Optional[List[List[TextInput]]] = None, add_special_tokens: bool = True, padding: Union[bool, str, PaddingStrategy] = False, truncation: Union[bool, str, TapasTruncationStrategy] = False, max_length: Optional[int] = None, pad_to_multiple_of: Optional[int] = None, return_tensors: Optional[Union[str, TensorType]] = None, return_token_type_ids: Optional[bool] = None, return_attention_mask: Optional[bool] = None, return_overflowing_tokens: bool = False, return_special_tokens_mask: bool = False, return_offsets_mapping: bool = False, return_length: bool = False, verbose: bool = True, **kwargs, ) -> BatchEncoding: """ Prepare a table and a list of strings for the model. <Tip warning={true}> This method is deprecated, `__call__` should be used instead. </Tip> Args: table (`pd.DataFrame`): Table containing tabular data. Note that all cell values must be text. Use *.astype(str)* on a Pandas dataframe to convert it to string. queries (`List[str]`): Batch of questions related to a table to be encoded. Note that all questions must refer to the **same** table. answer_coordinates (`List[Tuple]` or `List[List[Tuple]]`, *optional*): Answer coordinates of each table-question pair in the batch. Each tuple must be a (row_index, column_index) pair. The first data row (not the column header row) has index 0. The first column has index 0. The answer_coordinates must be a list of lists of tuples (each list corresponding to a single table-question pair). answer_text (`List[str]` or `List[List[str]]`, *optional*): Answer text of each table-question pair in the batch. In case a batch of table-question pairs is provided, then the answer_coordinates must be a list of lists of strings (each list corresponding to a single table-question pair). Each string must be the answer text of a corresponding answer coordinate. """ if return_token_type_ids is not None and not add_special_tokens: raise ValueError( "Asking to return token_type_ids while setting add_special_tokens to False " "results in an undefined behavior. Please set add_special_tokens to True or " "set return_token_type_ids to None." ) if (answer_coordinates and not answer_text) or (not answer_coordinates and answer_text): raise ValueError("In case you provide answers, both answer_coordinates and answer_text should be provided") elif answer_coordinates is None and answer_text is None: answer_coordinates = answer_text = [None] * len(queries) if "is_split_into_words" in kwargs: raise NotImplementedError("Currently TapasTokenizer only supports questions as strings.") if return_offsets_mapping: raise NotImplementedError( "return_offset_mapping is not available when using Python tokenizers. " "To use this feature, change your tokenizer to one deriving from " "transformers.PreTrainedTokenizerFast." ) return self._batch_encode_plus( table=table, queries=queries, answer_coordinates=answer_coordinates, answer_text=answer_text, add_special_tokens=add_special_tokens, padding=padding, truncation=truncation, max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs, ) def _get_question_tokens(self, query): """Tokenizes the query, taking into account the max and min question length.""" query_tokens = self.tokenize(query) if self.max_question_length is not None and len(query_tokens) > self.max_question_length: logger.warning("Skipping query as its tokens are longer than the max question length") return "", [] if self.min_question_length is not None and len(query_tokens) < self.min_question_length: logger.warning("Skipping query as its tokens are shorter than the min question length") return "", [] return query, query_tokens def _batch_encode_plus( self, table, queries: Union[ List[TextInput], List[PreTokenizedInput], List[EncodedInput], ], answer_coordinates: Optional[List[List[Tuple]]] = None, answer_text: Optional[List[List[TextInput]]] = None, add_special_tokens: bool = True, padding: Union[bool, str, PaddingStrategy] = False, truncation: Union[bool, str, TapasTruncationStrategy] = False, max_length: Optional[int] = None, pad_to_multiple_of: Optional[int] = None, return_tensors: Optional[Union[str, TensorType]] = None, return_token_type_ids: Optional[bool] = True, return_attention_mask: Optional[bool] = None, return_overflowing_tokens: bool = False, return_special_tokens_mask: bool = False, return_offsets_mapping: bool = False, return_length: bool = False, verbose: bool = True, **kwargs, ) -> BatchEncoding: table_tokens = self._tokenize_table(table) queries_tokens = [] for idx, query in enumerate(queries): query, query_tokens = self._get_question_tokens(query) queries[idx] = query queries_tokens.append(query_tokens) batch_outputs = self._batch_prepare_for_model( table, queries, tokenized_table=table_tokens, queries_tokens=queries_tokens, answer_coordinates=answer_coordinates, padding=padding, truncation=truncation, answer_text=answer_text, add_special_tokens=add_special_tokens, max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, return_tensors=return_tensors, prepend_batch_axis=True, return_attention_mask=return_attention_mask, return_token_type_ids=return_token_type_ids, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_length=return_length, verbose=verbose, ) return BatchEncoding(batch_outputs) def _batch_prepare_for_model( self, raw_table: "pd.DataFrame", raw_queries: Union[ List[TextInput], List[PreTokenizedInput], List[EncodedInput], ], tokenized_table: Optional[TokenizedTable] = None, queries_tokens: Optional[List[List[str]]] = None, answer_coordinates: Optional[List[List[Tuple]]] = None, answer_text: Optional[List[List[TextInput]]] = None, add_special_tokens: bool = True, padding: Union[bool, str, PaddingStrategy] = False, truncation: Union[bool, str, TapasTruncationStrategy] = False, max_length: Optional[int] = None, pad_to_multiple_of: Optional[int] = None, return_tensors: Optional[Union[str, TensorType]] = None, return_token_type_ids: Optional[bool] = True, return_attention_mask: Optional[bool] = True, return_special_tokens_mask: bool = False, return_offsets_mapping: bool = False, return_length: bool = False, verbose: bool = True, prepend_batch_axis: bool = False, **kwargs, ) -> BatchEncoding: batch_outputs = {} for index, example in enumerate(zip(raw_queries, queries_tokens, answer_coordinates, answer_text)): raw_query, query_tokens, answer_coords, answer_txt = example outputs = self.prepare_for_model( raw_table, raw_query, tokenized_table=tokenized_table, query_tokens=query_tokens, answer_coordinates=answer_coords, answer_text=answer_txt, add_special_tokens=add_special_tokens, padding=PaddingStrategy.DO_NOT_PAD.value, # we pad in batch afterwards truncation=truncation, max_length=max_length, pad_to_multiple_of=None, # we pad in batch afterwards return_attention_mask=False, # we pad in batch afterwards return_token_type_ids=return_token_type_ids, return_special_tokens_mask=return_special_tokens_mask, return_length=return_length, return_tensors=None, # We convert the whole batch to tensors at the end prepend_batch_axis=False, verbose=verbose, prev_answer_coordinates=answer_coordinates[index - 1] if index != 0 else None, prev_answer_text=answer_text[index - 1] if index != 0 else None, ) for key, value in outputs.items(): if key not in batch_outputs: batch_outputs[key] = [] batch_outputs[key].append(value) batch_outputs = self.pad( batch_outputs, padding=padding, max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, return_attention_mask=return_attention_mask, ) batch_outputs = BatchEncoding(batch_outputs, tensor_type=return_tensors) return batch_outputs @add_end_docstrings(ENCODE_KWARGS_DOCSTRING) def encode( self, table: "pd.DataFrame", query: Optional[ Union[ TextInput, PreTokenizedInput, EncodedInput, ] ] = None, add_special_tokens: bool = True, padding: Union[bool, str, PaddingStrategy] = False, truncation: Union[bool, str, TapasTruncationStrategy] = False, max_length: Optional[int] = None, return_tensors: Optional[Union[str, TensorType]] = None, **kwargs, ) -> List[int]: """ Prepare a table and a string for the model. This method does not return token type IDs, attention masks, etc. which are necessary for the model to work correctly. Use that method if you want to build your processing on your own, otherwise refer to `__call__`. Args: table (`pd.DataFrame`): Table containing tabular data. Note that all cell values must be text. Use *.astype(str)* on a Pandas dataframe to convert it to string. query (`str` or `List[str]`): Question related to a table to be encoded. """ encoded_inputs = self.encode_plus( table, query=query, add_special_tokens=add_special_tokens, padding=padding, truncation=truncation, max_length=max_length, return_tensors=return_tensors, **kwargs, ) return encoded_inputs["input_ids"] @add_end_docstrings(ENCODE_KWARGS_DOCSTRING, TAPAS_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING) def encode_plus( self, table: "pd.DataFrame", query: Optional[ Union[ TextInput, PreTokenizedInput, EncodedInput, ] ] = None, answer_coordinates: Optional[List[Tuple]] = None, answer_text: Optional[List[TextInput]] = None, add_special_tokens: bool = True, padding: Union[bool, str, PaddingStrategy] = False, truncation: Union[bool, str, TapasTruncationStrategy] = False, max_length: Optional[int] = None, pad_to_multiple_of: Optional[int] = None, return_tensors: Optional[Union[str, TensorType]] = None, return_token_type_ids: Optional[bool] = None, return_attention_mask: Optional[bool] = None, return_special_tokens_mask: bool = False, return_offsets_mapping: bool = False, return_length: bool = False, verbose: bool = True, **kwargs, ) -> BatchEncoding: """ Prepare a table and a string for the model. Args: table (`pd.DataFrame`): Table containing tabular data. Note that all cell values must be text. Use *.astype(str)* on a Pandas dataframe to convert it to string. query (`str` or `List[str]`): Question related to a table to be encoded. answer_coordinates (`List[Tuple]` or `List[List[Tuple]]`, *optional*): Answer coordinates of each table-question pair in the batch. The answer_coordinates must be a single list of one or more tuples. Each tuple must be a (row_index, column_index) pair. The first data row (not the column header row) has index 0. The first column has index 0. answer_text (`List[str]` or `List[List[str]]`, *optional*): Answer text of each table-question pair in the batch. The answer_text must be a single list of one or more strings. Each string must be the answer text of a corresponding answer coordinate. """ if return_token_type_ids is not None and not add_special_tokens: raise ValueError( "Asking to return token_type_ids while setting add_special_tokens to False " "results in an undefined behavior. Please set add_special_tokens to True or " "set return_token_type_ids to None." ) if (answer_coordinates and not answer_text) or (not answer_coordinates and answer_text): raise ValueError("In case you provide answers, both answer_coordinates and answer_text should be provided") if "is_split_into_words" in kwargs: raise NotImplementedError("Currently TapasTokenizer only supports questions as strings.") if return_offsets_mapping: raise NotImplementedError( "return_offset_mapping is not available when using Python tokenizers. " "To use this feature, change your tokenizer to one deriving from " "transformers.PreTrainedTokenizerFast." ) return self._encode_plus( table=table, query=query, answer_coordinates=answer_coordinates, answer_text=answer_text, add_special_tokens=add_special_tokens, truncation=truncation, padding=padding, max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs, ) def _encode_plus( self, table: "pd.DataFrame", query: Union[ TextInput, PreTokenizedInput, EncodedInput, ], answer_coordinates: Optional[List[Tuple]] = None, answer_text: Optional[List[TextInput]] = None, add_special_tokens: bool = True, padding: Union[bool, str, PaddingStrategy] = False, truncation: Union[bool, str, TapasTruncationStrategy] = False, max_length: Optional[int] = None, pad_to_multiple_of: Optional[int] = None, return_tensors: Optional[Union[str, TensorType]] = None, return_token_type_ids: Optional[bool] = True, return_attention_mask: Optional[bool] = True, return_special_tokens_mask: bool = False, return_offsets_mapping: bool = False, return_length: bool = False, verbose: bool = True, **kwargs, ): if query is None: query = "" logger.warning( "TAPAS is a question answering model but you have not passed a query. Please be aware that the " "model will probably not behave correctly." ) table_tokens = self._tokenize_table(table) query, query_tokens = self._get_question_tokens(query) return self.prepare_for_model( table, query, tokenized_table=table_tokens, query_tokens=query_tokens, answer_coordinates=answer_coordinates, answer_text=answer_text, add_special_tokens=add_special_tokens, truncation=truncation, padding=padding, max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, return_tensors=return_tensors, prepend_batch_axis=True, return_attention_mask=return_attention_mask, return_token_type_ids=return_token_type_ids, return_special_tokens_mask=return_special_tokens_mask, return_length=return_length, verbose=verbose, ) @add_end_docstrings(ENCODE_KWARGS_DOCSTRING, TAPAS_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING) def prepare_for_model( self, raw_table: "pd.DataFrame", raw_query: Union[ TextInput, PreTokenizedInput, EncodedInput, ], tokenized_table: Optional[TokenizedTable] = None, query_tokens: Optional[TokenizedTable] = None, answer_coordinates: Optional[List[Tuple]] = None, answer_text: Optional[List[TextInput]] = None, add_special_tokens: bool = True, padding: Union[bool, str, PaddingStrategy] = False, truncation: Union[bool, str, TapasTruncationStrategy] = False, max_length: Optional[int] = None, pad_to_multiple_of: Optional[int] = None, return_tensors: Optional[Union[str, TensorType]] = None, return_token_type_ids: Optional[bool] = True, return_attention_mask: Optional[bool] = True, return_special_tokens_mask: bool = False, return_offsets_mapping: bool = False, return_length: bool = False, verbose: bool = True, prepend_batch_axis: bool = False, **kwargs, ) -> BatchEncoding: """ Prepares a sequence of input id so that it can be used by the model. It adds special tokens, truncates sequences if overflowing while taking into account the special tokens. Args: raw_table (`pd.DataFrame`): The original table before any transformation (like tokenization) was applied to it. raw_query (`TextInput` or `PreTokenizedInput` or `EncodedInput`): The original query before any transformation (like tokenization) was applied to it. tokenized_table (`TokenizedTable`): The table after tokenization. query_tokens (`List[str]`): The query after tokenization. answer_coordinates (`List[Tuple]` or `List[List[Tuple]]`, *optional*): Answer coordinates of each table-question pair in the batch. The answer_coordinates must be a single list of one or more tuples. Each tuple must be a (row_index, column_index) pair. The first data row (not the column header row) has index 0. The first column has index 0. answer_text (`List[str]` or `List[List[str]]`, *optional*): Answer text of each table-question pair in the batch. The answer_text must be a single list of one or more strings. Each string must be the answer text of a corresponding answer coordinate. """ if isinstance(padding, bool): if padding and (max_length is not None or pad_to_multiple_of is not None): padding = PaddingStrategy.MAX_LENGTH else: padding = PaddingStrategy.DO_NOT_PAD elif not isinstance(padding, PaddingStrategy): padding = PaddingStrategy(padding) if isinstance(truncation, bool): if truncation: truncation = TapasTruncationStrategy.DROP_ROWS_TO_FIT else: truncation = TapasTruncationStrategy.DO_NOT_TRUNCATE elif not isinstance(truncation, TapasTruncationStrategy): truncation = TapasTruncationStrategy(truncation) encoded_inputs = {} is_part_of_batch = False prev_answer_coordinates, prev_answer_text = None, None if "prev_answer_coordinates" in kwargs and "prev_answer_text" in kwargs: is_part_of_batch = True prev_answer_coordinates = kwargs["prev_answer_coordinates"] prev_answer_text = kwargs["prev_answer_text"] num_rows = self._get_num_rows(raw_table, truncation != TapasTruncationStrategy.DO_NOT_TRUNCATE) num_columns = self._get_num_columns(raw_table) _, _, num_tokens = self._get_table_boundaries(tokenized_table) if truncation != TapasTruncationStrategy.DO_NOT_TRUNCATE: num_rows, num_tokens = self._get_truncated_table_rows( query_tokens, tokenized_table, num_rows, num_columns, max_length, truncation_strategy=truncation ) table_data = list(self._get_table_values(tokenized_table, num_columns, num_rows, num_tokens)) query_ids = self.convert_tokens_to_ids(query_tokens) table_ids = list(zip(*table_data))[0] if len(table_data) > 0 else list(zip(*table_data)) table_ids = self.convert_tokens_to_ids(list(table_ids)) if "return_overflowing_tokens" in kwargs and kwargs["return_overflowing_tokens"]: raise ValueError("TAPAS does not return overflowing tokens as it works on tables.") if add_special_tokens: input_ids = self.build_inputs_with_special_tokens(query_ids, table_ids) else: input_ids = query_ids + table_ids if max_length is not None and len(input_ids) > max_length: raise ValueError( "Could not encode the query and table header given the maximum length. Encoding the query and table " f"header results in a length of {len(input_ids)} which is higher than the max_length of {max_length}" ) encoded_inputs["input_ids"] = input_ids segment_ids = self.create_segment_token_type_ids_from_sequences(query_ids, table_data) column_ids = self.create_column_token_type_ids_from_sequences(query_ids, table_data) row_ids = self.create_row_token_type_ids_from_sequences(query_ids, table_data) if not is_part_of_batch or (prev_answer_coordinates is None and prev_answer_text is None): # simply set the prev_labels to zeros prev_labels = [0] * len(row_ids) else: prev_labels = self.get_answer_ids( column_ids, row_ids, table_data, prev_answer_text, prev_answer_coordinates ) # FIRST: parse both the table and question in terms of numeric values raw_table = add_numeric_table_values(raw_table) raw_query = add_numeric_values_to_question(raw_query) # SECOND: add numeric-related features (and not parse them in these functions): column_ranks, inv_column_ranks = self._get_numeric_column_ranks(column_ids, row_ids, raw_table) numeric_relations = self._get_numeric_relations(raw_query, column_ids, row_ids, raw_table) # Load from model defaults if return_token_type_ids is None: return_token_type_ids = "token_type_ids" in self.model_input_names if return_attention_mask is None: return_attention_mask = "attention_mask" in self.model_input_names if return_attention_mask: attention_mask = self.create_attention_mask_from_sequences(query_ids, table_data) encoded_inputs["attention_mask"] = attention_mask if answer_coordinates is not None and answer_text is not None: labels = self.get_answer_ids(column_ids, row_ids, table_data, answer_text, answer_coordinates) numeric_values = self._get_numeric_values(raw_table, column_ids, row_ids) numeric_values_scale = self._get_numeric_values_scale(raw_table, column_ids, row_ids) encoded_inputs["labels"] = labels encoded_inputs["numeric_values"] = numeric_values encoded_inputs["numeric_values_scale"] = numeric_values_scale if return_token_type_ids: token_type_ids = [ segment_ids, column_ids, row_ids, prev_labels, column_ranks, inv_column_ranks, numeric_relations, ] token_type_ids = [list(ids) for ids in list(zip(*token_type_ids))] encoded_inputs["token_type_ids"] = token_type_ids if return_special_tokens_mask: if add_special_tokens: encoded_inputs["special_tokens_mask"] = self.get_special_tokens_mask(query_ids, table_ids) else: encoded_inputs["special_tokens_mask"] = [0] * len(input_ids) # Check lengths if max_length is None and len(encoded_inputs["input_ids"]) > self.model_max_length and verbose: if not self.deprecation_warnings.get("sequence-length-is-longer-than-the-specified-maximum", False): logger.warning( "Token indices sequence length is longer than the specified maximum sequence length " f"for this model ({len(encoded_inputs['input_ids'])} > {self.model_max_length}). Running this " "sequence through the model will result in indexing errors." ) self.deprecation_warnings["sequence-length-is-longer-than-the-specified-maximum"] = True # Padding if padding != PaddingStrategy.DO_NOT_PAD or return_attention_mask: encoded_inputs = self.pad( encoded_inputs, max_length=max_length, padding=padding.value, pad_to_multiple_of=pad_to_multiple_of, return_attention_mask=return_attention_mask, ) if return_length: encoded_inputs["length"] = len(encoded_inputs["input_ids"]) batch_outputs = BatchEncoding( encoded_inputs, tensor_type=return_tensors, prepend_batch_axis=prepend_batch_axis ) return batch_outputs def _get_truncated_table_rows( self, query_tokens: List[str], tokenized_table: TokenizedTable, num_rows: int, num_columns: int, max_length: int, truncation_strategy: Union[str, TapasTruncationStrategy], ) -> Tuple[int, int]: """ Truncates a sequence pair in-place following the strategy. Args: query_tokens (`List[str]`): List of strings corresponding to the tokenized query. tokenized_table (`TokenizedTable`): Tokenized table num_rows (`int`): Total number of table rows num_columns (`int`): Total number of table columns max_length (`int`): Total maximum length. truncation_strategy (`str` or [`TapasTruncationStrategy`]): Truncation strategy to use. Seeing as this method should only be called when truncating, the only available strategy is the `"drop_rows_to_fit"` strategy. Returns: `Tuple(int, int)`: tuple containing the number of rows after truncation, and the number of tokens available for each table element. """ if not isinstance(truncation_strategy, TapasTruncationStrategy): truncation_strategy = TapasTruncationStrategy(truncation_strategy) if max_length is None: max_length = self.model_max_length if truncation_strategy == TapasTruncationStrategy.DROP_ROWS_TO_FIT: while True: num_tokens = self._get_max_num_tokens( query_tokens, tokenized_table, num_rows=num_rows, num_columns=num_columns, max_length=max_length ) if num_tokens is not None: # We could fit the table. break # Try to drop a row to fit the table. num_rows -= 1 if num_rows < 1: break elif truncation_strategy != TapasTruncationStrategy.DO_NOT_TRUNCATE: raise ValueError(f"Unknown truncation strategy {truncation_strategy}.") return num_rows, num_tokens or 1 def _tokenize_table( self, table=None, ): """ Tokenizes column headers and cell texts of a table. Args: table (`pd.Dataframe`): Table. Returns: `TokenizedTable`: TokenizedTable object. """ tokenized_rows = [] tokenized_row = [] # tokenize column headers for column in table: if self.strip_column_names: tokenized_row.append(self.tokenize("")) else: tokenized_row.append(self.tokenize(column)) tokenized_rows.append(tokenized_row) # tokenize cell values for idx, row in table.iterrows(): tokenized_row = [] for cell in row: tokenized_row.append(self.tokenize(cell)) tokenized_rows.append(tokenized_row) token_coordinates = [] for row_index, row in enumerate(tokenized_rows): for column_index, cell in enumerate(row): for token_index, _ in enumerate(cell): token_coordinates.append( TokenCoordinates( row_index=row_index, column_index=column_index, token_index=token_index, ) ) return TokenizedTable( rows=tokenized_rows, selected_tokens=token_coordinates, ) def _question_encoding_cost(self, question_tokens): # Two extra spots of SEP and CLS. return len(question_tokens) + 2 def _get_token_budget(self, question_tokens, max_length=None): """ Computes the number of tokens left for the table after tokenizing a question, taking into account the max sequence length of the model. Args: question_tokens (`List[String]`): List of question tokens. Returns: `int`: the number of tokens left for the table, given the model max length. """ return (max_length if max_length is not None else self.model_max_length) - self._question_encoding_cost( question_tokens ) def _get_table_values(self, table, num_columns, num_rows, num_tokens) -> Generator[TableValue, None, None]: """Iterates over partial table and returns token, column and row indexes.""" for tc in table.selected_tokens: # First row is header row. if tc.row_index >= num_rows + 1: continue if tc.column_index >= num_columns: continue cell = table.rows[tc.row_index][tc.column_index] token = cell[tc.token_index] word_begin_index = tc.token_index # Don't add partial words. Find the starting word piece and check if it # fits in the token budget. while word_begin_index >= 0 and _is_inner_wordpiece(cell[word_begin_index]): word_begin_index -= 1 if word_begin_index >= num_tokens: continue yield TableValue(token, tc.column_index + 1, tc.row_index) def _get_table_boundaries(self, table): """Return maximal number of rows, columns and tokens.""" max_num_tokens = 0 max_num_columns = 0 max_num_rows = 0 for tc in table.selected_tokens: max_num_columns = max(max_num_columns, tc.column_index + 1) max_num_rows = max(max_num_rows, tc.row_index + 1) max_num_tokens = max(max_num_tokens, tc.token_index + 1) max_num_columns = min(self.max_column_id, max_num_columns) max_num_rows = min(self.max_row_id, max_num_rows) return max_num_rows, max_num_columns, max_num_tokens def _get_table_cost(self, table, num_columns, num_rows, num_tokens): return sum(1 for _ in self._get_table_values(table, num_columns, num_rows, num_tokens)) def _get_max_num_tokens(self, question_tokens, tokenized_table, num_columns, num_rows, max_length): """Computes max number of tokens that can be squeezed into the budget.""" token_budget = self._get_token_budget(question_tokens, max_length) _, _, max_num_tokens = self._get_table_boundaries(tokenized_table) if self.cell_trim_length >= 0 and max_num_tokens > self.cell_trim_length: max_num_tokens = self.cell_trim_length num_tokens = 0 for num_tokens in range(max_num_tokens + 1): cost = self._get_table_cost(tokenized_table, num_columns, num_rows, num_tokens + 1) if cost > token_budget: break if num_tokens < max_num_tokens: if self.cell_trim_length >= 0: # We don't allow dynamic trimming if a cell_trim_length is set. return None if num_tokens == 0: return None return num_tokens def _get_num_columns(self, table): num_columns = table.shape[1] if num_columns >= self.max_column_id: raise ValueError("Too many columns") return num_columns def _get_num_rows(self, table, drop_rows_to_fit): num_rows = table.shape[0] if num_rows >= self.max_row_id: if drop_rows_to_fit: num_rows = self.max_row_id - 1 else: raise ValueError("Too many rows") return num_rows def _serialize_text(self, question_tokens): """Serializes texts in index arrays.""" tokens = [] segment_ids = [] column_ids = [] row_ids = [] # add [CLS] token at the beginning tokens.append(self.cls_token) segment_ids.append(0) column_ids.append(0) row_ids.append(0) for token in question_tokens: tokens.append(token) segment_ids.append(0) column_ids.append(0) row_ids.append(0) return tokens, segment_ids, column_ids, row_ids def _serialize( self, question_tokens, table, num_columns, num_rows, num_tokens, ): """Serializes table and text.""" tokens, segment_ids, column_ids, row_ids = self._serialize_text(question_tokens) # add [SEP] token between question and table tokens tokens.append(self.sep_token) segment_ids.append(0) column_ids.append(0) row_ids.append(0) for token, column_id, row_id in self._get_table_values(table, num_columns, num_rows, num_tokens): tokens.append(token) segment_ids.append(1) column_ids.append(column_id) row_ids.append(row_id) return SerializedExample( tokens=tokens, segment_ids=segment_ids, column_ids=column_ids, row_ids=row_ids, ) def _get_column_values(self, table, col_index): table_numeric_values = {} for row_index, row in table.iterrows(): cell = row[col_index] if cell.numeric_value is not None: table_numeric_values[row_index] = cell.numeric_value return table_numeric_values def _get_cell_token_indexes(self, column_ids, row_ids, column_id, row_id): for index in range(len(column_ids)): if column_ids[index] - 1 == column_id and row_ids[index] - 1 == row_id: yield index def _get_numeric_column_ranks(self, column_ids, row_ids, table): """Returns column ranks for all numeric columns.""" ranks = [0] * len(column_ids) inv_ranks = [0] * len(column_ids) # original code from tf_example_utils.py of the original implementation if table is not None: for col_index in range(len(table.columns)): table_numeric_values = self._get_column_values(table, col_index) if not table_numeric_values: continue try: key_fn = get_numeric_sort_key_fn(table_numeric_values.values()) except ValueError: continue table_numeric_values = {row_index: key_fn(value) for row_index, value in table_numeric_values.items()} table_numeric_values_inv = collections.defaultdict(list) for row_index, value in table_numeric_values.items(): table_numeric_values_inv[value].append(row_index) unique_values = sorted(table_numeric_values_inv.keys()) for rank, value in enumerate(unique_values): for row_index in table_numeric_values_inv[value]: for index in self._get_cell_token_indexes(column_ids, row_ids, col_index, row_index): ranks[index] = rank + 1 inv_ranks[index] = len(unique_values) - rank return ranks, inv_ranks def _get_numeric_sort_key_fn(self, table_numeric_values, value): """ Returns the sort key function for comparing value to table values. The function returned will be a suitable input for the key param of the sort(). See number_annotation_utils._get_numeric_sort_key_fn for details Args: table_numeric_values: Numeric values of a column value: Numeric value in the question Returns: A function key function to compare column and question values. """ if not table_numeric_values: return None all_values = list(table_numeric_values.values()) all_values.append(value) try: return get_numeric_sort_key_fn(all_values) except ValueError: return None def _get_numeric_relations(self, question, column_ids, row_ids, table): """ Returns numeric relations embeddings Args: question: Question object. column_ids: Maps word piece position to column id. row_ids: Maps word piece position to row id. table: The table containing the numeric cell values. """ numeric_relations = [0] * len(column_ids) # first, we add any numeric value spans to the question: # Create a dictionary that maps a table cell to the set of all relations # this cell has with any value in the question. cell_indices_to_relations = collections.defaultdict(set) if question is not None and table is not None: for numeric_value_span in question.numeric_spans: for value in numeric_value_span.values: for column_index in range(len(table.columns)): table_numeric_values = self._get_column_values(table, column_index) sort_key_fn = self._get_numeric_sort_key_fn(table_numeric_values, value) if sort_key_fn is None: continue for row_index, cell_value in table_numeric_values.items(): relation = get_numeric_relation(value, cell_value, sort_key_fn) if relation is not None: cell_indices_to_relations[column_index, row_index].add(relation) # For each cell add a special feature for all its word pieces. for (column_index, row_index), relations in cell_indices_to_relations.items(): relation_set_index = 0 for relation in relations: assert relation.value >= Relation.EQ.value relation_set_index += 2 ** (relation.value - Relation.EQ.value) for cell_token_index in self._get_cell_token_indexes(column_ids, row_ids, column_index, row_index): numeric_relations[cell_token_index] = relation_set_index return numeric_relations def _get_numeric_values(self, table, column_ids, row_ids): """Returns numeric values for computation of answer loss.""" numeric_values = [float("nan")] * len(column_ids) if table is not None: num_rows = table.shape[0] num_columns = table.shape[1] for col_index in range(num_columns): for row_index in range(num_rows): numeric_value = table.iloc[row_index, col_index].numeric_value if numeric_value is not None: if numeric_value.float_value is None: continue float_value = numeric_value.float_value if float_value == float("inf"): continue for index in self._get_cell_token_indexes(column_ids, row_ids, col_index, row_index): numeric_values[index] = float_value return numeric_values def _get_numeric_values_scale(self, table, column_ids, row_ids): """Returns a scale to each token to down weigh the value of long words.""" numeric_values_scale = [1.0] * len(column_ids) if table is None: return numeric_values_scale num_rows = table.shape[0] num_columns = table.shape[1] for col_index in range(num_columns): for row_index in range(num_rows): indices = list(self._get_cell_token_indexes(column_ids, row_ids, col_index, row_index)) num_indices = len(indices) if num_indices > 1: for index in indices: numeric_values_scale[index] = float(num_indices) return numeric_values_scale def _pad_to_seq_length(self, inputs): while len(inputs) > self.model_max_length: inputs.pop() while len(inputs) < self.model_max_length: inputs.append(0) def _get_all_answer_ids_from_coordinates( self, column_ids, row_ids, answers_list, ): """Maps lists of answer coordinates to token indexes.""" answer_ids = [0] * len(column_ids) found_answers = set() all_answers = set() for answers in answers_list: column_index, row_index = answers all_answers.add((column_index, row_index)) for index in self._get_cell_token_indexes(column_ids, row_ids, column_index, row_index): found_answers.add((column_index, row_index)) answer_ids[index] = 1 missing_count = len(all_answers) - len(found_answers) return answer_ids, missing_count def _get_all_answer_ids(self, column_ids, row_ids, answer_coordinates): """ Maps answer coordinates of a question to token indexes. In the SQA format (TSV), the coordinates are given as (row, column) tuples. Here, we first swap them to (column, row) format before calling _get_all_answer_ids_from_coordinates. """ def _to_coordinates(answer_coordinates_question): return [(coords[1], coords[0]) for coords in answer_coordinates_question] return self._get_all_answer_ids_from_coordinates( column_ids, row_ids, answers_list=(_to_coordinates(answer_coordinates)) ) def _find_tokens(self, text, segment): """Return start index of segment in text or None.""" logging.info(f"text: {text} {segment}") for index in range(1 + len(text) - len(segment)): for seg_index, seg_token in enumerate(segment): if text[index + seg_index].piece != seg_token.piece: break else: return index return None def _find_answer_coordinates_from_answer_text( self, tokenized_table, answer_text, ): """Returns all occurrences of answer_text in the table.""" logging.info(f"answer text: {answer_text}") for row_index, row in enumerate(tokenized_table.rows): if row_index == 0: # We don't search for answers in the header. continue for col_index, cell in enumerate(row): token_index = self._find_tokens(cell, answer_text) if token_index is not None: yield TokenCoordinates( row_index=row_index, column_index=col_index, token_index=token_index, ) def _find_answer_ids_from_answer_texts( self, column_ids, row_ids, tokenized_table, answer_texts, ): """Maps question with answer texts to the first matching token indexes.""" answer_ids = [0] * len(column_ids) for answer_text in answer_texts: for coordinates in self._find_answer_coordinates_from_answer_text( tokenized_table, answer_text, ): # Maps answer coordinates to indexes this can fail if tokens / rows have # been pruned. indexes = list( self._get_cell_token_indexes( column_ids, row_ids, column_id=coordinates.column_index, row_id=coordinates.row_index - 1, ) ) indexes.sort() coordinate_answer_ids = [] if indexes: begin_index = coordinates.token_index + indexes[0] end_index = begin_index + len(answer_text) for index in indexes: if index >= begin_index and index < end_index: coordinate_answer_ids.append(index) if len(coordinate_answer_ids) == len(answer_text): for index in coordinate_answer_ids: answer_ids[index] = 1 break return answer_ids def _get_answer_ids(self, column_ids, row_ids, answer_coordinates): """Maps answer coordinates of a question to token indexes.""" answer_ids, missing_count = self._get_all_answer_ids(column_ids, row_ids, answer_coordinates) if missing_count: raise ValueError("Couldn't find all answers") return answer_ids def get_answer_ids(self, column_ids, row_ids, tokenized_table, answer_texts_question, answer_coordinates_question): if self.update_answer_coordinates: return self._find_answer_ids_from_answer_texts( column_ids, row_ids, tokenized_table, answer_texts=[self.tokenize(at) for at in answer_texts_question], ) return self._get_answer_ids(column_ids, row_ids, answer_coordinates_question) def _pad( self, encoded_inputs: Union[Dict[str, EncodedInput], BatchEncoding], max_length: Optional[int] = None, padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD, pad_to_multiple_of: Optional[int] = None, return_attention_mask: Optional[bool] = None, ) -> dict: """ Pad encoded inputs (on left/right and up to predefined length or max length in the batch) Args: encoded_inputs: Dictionary of tokenized inputs (`List[int]`) or batch of tokenized inputs (`List[List[int]]`). max_length: maximum length of the returned list and optionally padding length (see below). Will truncate by taking into account the special tokens. padding_strategy: PaddingStrategy to use for padding. - PaddingStrategy.LONGEST Pad to the longest sequence in the batch - PaddingStrategy.MAX_LENGTH: Pad to the max length (default) - PaddingStrategy.DO_NOT_PAD: Do not pad The tokenizer padding sides are defined in self.padding_side: - 'left': pads on the left of the sequences - 'right': pads on the right of the sequences pad_to_multiple_of: (optional) Integer if set will pad the sequence to a multiple of the provided value. This is especially useful to enable the use of Tensor Core on NVIDIA hardware with compute capability `>= 7.5` (Volta). return_attention_mask: (optional) Set to False to avoid returning attention mask (default: set to model specifics) """ # Load from model defaults if return_attention_mask is None: return_attention_mask = "attention_mask" in self.model_input_names if padding_strategy == PaddingStrategy.LONGEST: max_length = len(encoded_inputs["input_ids"]) if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0): max_length = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of needs_to_be_padded = ( padding_strategy != PaddingStrategy.DO_NOT_PAD and len(encoded_inputs["input_ids"]) != max_length ) # Initialize attention mask if not present. if return_attention_mask and "attention_mask" not in encoded_inputs: encoded_inputs["attention_mask"] = [1] * len(encoded_inputs["input_ids"]) if needs_to_be_padded: difference = max_length - len(encoded_inputs["input_ids"]) if self.padding_side == "right": if return_attention_mask: encoded_inputs["attention_mask"] = encoded_inputs["attention_mask"] + [0] * difference if "token_type_ids" in encoded_inputs: encoded_inputs["token_type_ids"] = ( encoded_inputs["token_type_ids"] + [[self.pad_token_type_id] * 7] * difference ) if "labels" in encoded_inputs: encoded_inputs["labels"] = encoded_inputs["labels"] + [0] * difference if "numeric_values" in encoded_inputs: encoded_inputs["numeric_values"] = encoded_inputs["numeric_values"] + [float("nan")] * difference if "numeric_values_scale" in encoded_inputs: encoded_inputs["numeric_values_scale"] = ( encoded_inputs["numeric_values_scale"] + [1.0] * difference ) if "special_tokens_mask" in encoded_inputs: encoded_inputs["special_tokens_mask"] = encoded_inputs["special_tokens_mask"] + [1] * difference encoded_inputs["input_ids"] = encoded_inputs["input_ids"] + [self.pad_token_id] * difference elif self.padding_side == "left": if return_attention_mask: encoded_inputs["attention_mask"] = [0] * difference + encoded_inputs["attention_mask"] if "token_type_ids" in encoded_inputs: encoded_inputs["token_type_ids"] = [[self.pad_token_type_id] * 7] * difference + encoded_inputs[ "token_type_ids" ] if "labels" in encoded_inputs: encoded_inputs["labels"] = [0] * difference + encoded_inputs["labels"] if "numeric_values" in encoded_inputs: encoded_inputs["numeric_values"] = [float("nan")] * difference + encoded_inputs["numeric_values"] if "numeric_values_scale" in encoded_inputs: encoded_inputs["numeric_values_scale"] = [1.0] * difference + encoded_inputs[ "numeric_values_scale" ] if "special_tokens_mask" in encoded_inputs: encoded_inputs["special_tokens_mask"] = [1] * difference + encoded_inputs["special_tokens_mask"] encoded_inputs["input_ids"] = [self.pad_token_id] * difference + encoded_inputs["input_ids"] else: raise ValueError("Invalid padding strategy:" + str(self.padding_side)) return encoded_inputs # Everything related to converting logits to predictions def _get_cell_token_probs(self, probabilities, segment_ids, row_ids, column_ids): for i, p in enumerate(probabilities): segment_id = segment_ids[i] col = column_ids[i] - 1 row = row_ids[i] - 1 if col >= 0 and row >= 0 and segment_id == 1: yield i, p def _get_mean_cell_probs(self, probabilities, segment_ids, row_ids, column_ids): """Computes average probability per cell, aggregating over tokens.""" coords_to_probs = collections.defaultdict(list) for i, prob in self._get_cell_token_probs(probabilities, segment_ids, row_ids, column_ids): col = column_ids[i] - 1 row = row_ids[i] - 1 coords_to_probs[(col, row)].append(prob) return {coords: np.array(cell_probs).mean() for coords, cell_probs in coords_to_probs.items()} def convert_logits_to_predictions(self, data, logits, logits_agg=None, cell_classification_threshold=0.5): """ Converts logits of [`TapasForQuestionAnswering`] to actual predicted answer coordinates and optional aggregation indices. The original implementation, on which this function is based, can be found [here](https://github.com/google-research/tapas/blob/4908213eb4df7aa988573350278b44c4dbe3f71b/tapas/experiments/prediction_utils.py#L288). Args: data (`dict`): Dictionary mapping features to actual values. Should be created using [`TapasTokenizer`]. logits (`torch.Tensor` or `tf.Tensor` of shape `(batch_size, sequence_length)`): Tensor containing the logits at the token level. logits_agg (`torch.Tensor` or `tf.Tensor` of shape `(batch_size, num_aggregation_labels)`, *optional*): Tensor containing the aggregation logits. cell_classification_threshold (`float`, *optional*, defaults to 0.5): Threshold to be used for cell selection. All table cells for which their probability is larger than this threshold will be selected. Returns: `tuple` comprising various elements depending on the inputs: - predicted_answer_coordinates (`List[List[[tuple]]` of length `batch_size`): Predicted answer coordinates as a list of lists of tuples. Each element in the list contains the predicted answer coordinates of a single example in the batch, as a list of tuples. Each tuple is a cell, i.e. (row index, column index). - predicted_aggregation_indices (`List[int]`of length `batch_size`, *optional*, returned when `logits_aggregation` is provided): Predicted aggregation operator indices of the aggregation head. """ # converting to numpy arrays to work with PT/TF logits = logits.numpy() if logits_agg is not None: logits_agg = logits_agg.numpy() data = {key: value.numpy() for key, value in data.items() if key != "training"} # input data is of type float32 # np.log(np.finfo(np.float32).max) = 88.72284 # Any value over 88.72284 will overflow when passed through the exponential, sending a warning # We disable this warning by truncating the logits. logits[logits < -88.7] = -88.7 # Compute probabilities from token logits probabilities = 1 / (1 + np.exp(-logits)) * data["attention_mask"] token_types = [ "segment_ids", "column_ids", "row_ids", "prev_labels", "column_ranks", "inv_column_ranks", "numeric_relations", ] # collect input_ids, segment ids, row ids and column ids of batch. Shape (batch_size, seq_len) input_ids = data["input_ids"] segment_ids = data["token_type_ids"][:, :, token_types.index("segment_ids")] row_ids = data["token_type_ids"][:, :, token_types.index("row_ids")] column_ids = data["token_type_ids"][:, :, token_types.index("column_ids")] # next, get answer coordinates for every example in the batch num_batch = input_ids.shape[0] predicted_answer_coordinates = [] for i in range(num_batch): probabilities_example = probabilities[i].tolist() segment_ids_example = segment_ids[i] row_ids_example = row_ids[i] column_ids_example = column_ids[i] max_width = column_ids_example.max() max_height = row_ids_example.max() if max_width == 0 and max_height == 0: continue cell_coords_to_prob = self._get_mean_cell_probs( probabilities_example, segment_ids_example.tolist(), row_ids_example.tolist(), column_ids_example.tolist(), ) # Select the answers above the classification threshold. answer_coordinates = [] for col in range(max_width): for row in range(max_height): cell_prob = cell_coords_to_prob.get((col, row), None) if cell_prob is not None: if cell_prob > cell_classification_threshold: answer_coordinates.append((row, col)) answer_coordinates = sorted(answer_coordinates) predicted_answer_coordinates.append(answer_coordinates) output = (predicted_answer_coordinates,) if logits_agg is not None: predicted_aggregation_indices = logits_agg.argmax(axis=-1) output = (predicted_answer_coordinates, predicted_aggregation_indices.tolist()) return output # End of everything related to converting logits to predictions # Copied from transformers.models.bert.tokenization_bert.BasicTokenizer class BasicTokenizer(object): """ Constructs a BasicTokenizer that will run basic tokenization (punctuation splitting, lower casing, etc.). Args: do_lower_case (`bool`, *optional*, defaults to `True`): Whether or not to lowercase the input when tokenizing. never_split (`Iterable`, *optional*): Collection of tokens which will never be split during tokenization. Only has an effect when `do_basic_tokenize=True` tokenize_chinese_chars (`bool`, *optional*, defaults to `True`): Whether or not to tokenize Chinese characters. This should likely be deactivated for Japanese (see this [issue](https://github.com/huggingface/transformers/issues/328)). strip_accents (`bool`, *optional*): Whether or not to strip all accents. If this option is not specified, then it will be determined by the value for `lowercase` (as in the original BERT). do_split_on_punc (`bool`, *optional*, defaults to `True`): In some instances we want to skip the basic punctuation splitting so that later tokenization can capture the full context of the words, such as contractions. """ def __init__( self, do_lower_case=True, never_split=None, tokenize_chinese_chars=True, strip_accents=None, do_split_on_punc=True, ): if never_split is None: never_split = [] self.do_lower_case = do_lower_case self.never_split = set(never_split) self.tokenize_chinese_chars = tokenize_chinese_chars self.strip_accents = strip_accents self.do_split_on_punc = do_split_on_punc def tokenize(self, text, never_split=None): """ Basic Tokenization of a piece of text. For sub-word tokenization, see WordPieceTokenizer. Args: never_split (`List[str]`, *optional*) Kept for backward compatibility purposes. Now implemented directly at the base class level (see [`PreTrainedTokenizer.tokenize`]) List of token not to split. """ # union() returns a new set by concatenating the two sets. never_split = self.never_split.union(set(never_split)) if never_split else self.never_split text = self._clean_text(text) # This was added on November 1st, 2018 for the multilingual and Chinese # models. This is also applied to the English models now, but it doesn't # matter since the English models were not trained on any Chinese data # and generally don't have any Chinese data in them (there are Chinese # characters in the vocabulary because Wikipedia does have some Chinese # words in the English Wikipedia.). if self.tokenize_chinese_chars: text = self._tokenize_chinese_chars(text) # prevents treating the same character with different unicode codepoints as different characters unicode_normalized_text = unicodedata.normalize("NFC", text) orig_tokens = whitespace_tokenize(unicode_normalized_text) split_tokens = [] for token in orig_tokens: if token not in never_split: if self.do_lower_case: token = token.lower() if self.strip_accents is not False: token = self._run_strip_accents(token) elif self.strip_accents: token = self._run_strip_accents(token) split_tokens.extend(self._run_split_on_punc(token, never_split)) output_tokens = whitespace_tokenize(" ".join(split_tokens)) return output_tokens def _run_strip_accents(self, text): """Strips accents from a piece of text.""" text = unicodedata.normalize("NFD", text) output = [] for char in text: cat = unicodedata.category(char) if cat == "Mn": continue output.append(char) return "".join(output) def _run_split_on_punc(self, text, never_split=None): """Splits punctuation on a piece of text.""" if not self.do_split_on_punc or (never_split is not None and text in never_split): return [text] chars = list(text) i = 0 start_new_word = True output = [] while i < len(chars): char = chars[i] if _is_punctuation(char): output.append([char]) start_new_word = True else: if start_new_word: output.append([]) start_new_word = False output[-1].append(char) i += 1 return ["".join(x) for x in output] def _tokenize_chinese_chars(self, text): """Adds whitespace around any CJK character.""" output = [] for char in text: cp = ord(char) if self._is_chinese_char(cp): output.append(" ") output.append(char) output.append(" ") else: output.append(char) return "".join(output) def _is_chinese_char(self, cp): """Checks whether CP is the codepoint of a CJK character.""" # This defines a "chinese character" as anything in the CJK Unicode block: # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block) # # Note that the CJK Unicode block is NOT all Japanese and Korean characters, # despite its name. The modern Korean Hangul alphabet is a different block, # as is Japanese Hiragana and Katakana. Those alphabets are used to write # space-separated words, so they are not treated specially and handled # like the all of the other languages. if ( (cp >= 0x4E00 and cp <= 0x9FFF) or (cp >= 0x3400 and cp <= 0x4DBF) # or (cp >= 0x20000 and cp <= 0x2A6DF) # or (cp >= 0x2A700 and cp <= 0x2B73F) # or (cp >= 0x2B740 and cp <= 0x2B81F) # or (cp >= 0x2B820 and cp <= 0x2CEAF) # or (cp >= 0xF900 and cp <= 0xFAFF) or (cp >= 0x2F800 and cp <= 0x2FA1F) # ): # return True return False def _clean_text(self, text): """Performs invalid character removal and whitespace cleanup on text.""" output = [] for char in text: cp = ord(char) if cp == 0 or cp == 0xFFFD or _is_control(char): continue if _is_whitespace(char): output.append(" ") else: output.append(char) return "".join(output) # Copied from transformers.models.bert.tokenization_bert.WordpieceTokenizer class WordpieceTokenizer(object): """Runs WordPiece tokenization.""" def __init__(self, vocab, unk_token, max_input_chars_per_word=100): self.vocab = vocab self.unk_token = unk_token self.max_input_chars_per_word = max_input_chars_per_word def tokenize(self, text): """ Tokenizes a piece of text into its word pieces. This uses a greedy longest-match-first algorithm to perform tokenization using the given vocabulary. For example, `input = "unaffable"` wil return as output `["un", "##aff", "##able"]`. Args: text: A single token or whitespace separated tokens. This should have already been passed through *BasicTokenizer*. Returns: A list of wordpiece tokens. """ output_tokens = [] for token in whitespace_tokenize(text): chars = list(token) if len(chars) > self.max_input_chars_per_word: output_tokens.append(self.unk_token) continue is_bad = False start = 0 sub_tokens = [] while start < len(chars): end = len(chars) cur_substr = None while start < end: substr = "".join(chars[start:end]) if start > 0: substr = "##" + substr if substr in self.vocab: cur_substr = substr break end -= 1 if cur_substr is None: is_bad = True break sub_tokens.append(cur_substr) start = end if is_bad: output_tokens.append(self.unk_token) else: output_tokens.extend(sub_tokens) return output_tokens # Below: utilities for TAPAS tokenizer (independent from PyTorch/Tensorflow). # This includes functions to parse numeric values (dates and numbers) from both the table and questions in order # to create the column_ranks, inv_column_ranks, numeric_values, numeric values_scale and numeric_relations in # prepare_for_model of TapasTokenizer. # These are meant to be used in an academic setup, for production use cases Gold mine or Aqua should be used. # taken from constants.py of the original implementation # URL: https://github.com/google-research/tapas/blob/master/tapas/utils/constants.py class Relation(enum.Enum): HEADER_TO_CELL = 1 # Connects header to cell. CELL_TO_HEADER = 2 # Connects cell to header. QUERY_TO_HEADER = 3 # Connects query to headers. QUERY_TO_CELL = 4 # Connects query to cells. ROW_TO_CELL = 5 # Connects row to cells. CELL_TO_ROW = 6 # Connects cells to row. EQ = 7 # Annotation value is same as cell value LT = 8 # Annotation value is less than cell value GT = 9 # Annotation value is greater than cell value @dataclass class Date: year: Optional[int] = None month: Optional[int] = None day: Optional[int] = None @dataclass class NumericValue: float_value: Optional[float] = None date: Optional[Date] = None @dataclass class NumericValueSpan: begin_index: int = None end_index: int = None values: List[NumericValue] = None @dataclass class Cell: text: Text numeric_value: Optional[NumericValue] = None @dataclass class Question: original_text: Text # The original raw question string. text: Text # The question string after normalization. numeric_spans: Optional[List[NumericValueSpan]] = None # Below: all functions from number_utils.py as well as 2 functions (namely get_all_spans and normalize_for_match) # from text_utils.py of the original implementation. URL's: # - https://github.com/google-research/tapas/blob/master/tapas/utils/number_utils.py # - https://github.com/google-research/tapas/blob/master/tapas/utils/text_utils.py # Constants for parsing date expressions. # Masks that specify (by a bool) which of (year, month, day) will be populated. _DateMask = collections.namedtuple("_DateMask", ["year", "month", "day"]) _YEAR = _DateMask(True, False, False) _YEAR_MONTH = _DateMask(True, True, False) _YEAR_MONTH_DAY = _DateMask(True, True, True) _MONTH = _DateMask(False, True, False) _MONTH_DAY = _DateMask(False, True, True) # Pairs of patterns to pass to 'datetime.strptime' and masks specifying which # fields will be set by the corresponding pattern. _DATE_PATTERNS = ( ("%B", _MONTH), ("%Y", _YEAR), ("%Ys", _YEAR), ("%b %Y", _YEAR_MONTH), ("%B %Y", _YEAR_MONTH), ("%B %d", _MONTH_DAY), ("%b %d", _MONTH_DAY), ("%d %b", _MONTH_DAY), ("%d %B", _MONTH_DAY), ("%B %d, %Y", _YEAR_MONTH_DAY), ("%d %B %Y", _YEAR_MONTH_DAY), ("%m-%d-%Y", _YEAR_MONTH_DAY), ("%Y-%m-%d", _YEAR_MONTH_DAY), ("%Y-%m", _YEAR_MONTH), ("%B %Y", _YEAR_MONTH), ("%d %b %Y", _YEAR_MONTH_DAY), ("%Y-%m-%d", _YEAR_MONTH_DAY), ("%b %d, %Y", _YEAR_MONTH_DAY), ("%d.%m.%Y", _YEAR_MONTH_DAY), ("%A, %b %d", _MONTH_DAY), ("%A, %B %d", _MONTH_DAY), ) # This mapping is used to convert date patterns to regex patterns. _FIELD_TO_REGEX = ( ("%A", r"\w+"), # Weekday as locale’s full name. ("%B", r"\w+"), # Month as locale’s full name. ("%Y", r"\d{4}"), # Year with century as a decimal number. ("%b", r"\w{3}"), # Month as locale’s abbreviated name. ("%d", r"\d{1,2}"), # Day of the month as a zero-padded decimal number. ("%m", r"\d{1,2}"), # Month as a zero-padded decimal number. ) def _process_date_pattern(dp): """Compute a regex for each date pattern to use as a prefilter.""" pattern, mask = dp regex = pattern regex = regex.replace(".", re.escape(".")) regex = regex.replace("-", re.escape("-")) regex = regex.replace(" ", r"\s+") for field, field_regex in _FIELD_TO_REGEX: regex = regex.replace(field, field_regex) # Make sure we didn't miss any of the fields. assert "%" not in regex, regex return pattern, mask, re.compile("^" + regex + "$") def _process_date_patterns(): return tuple(_process_date_pattern(dp) for dp in _DATE_PATTERNS) _PROCESSED_DATE_PATTERNS = _process_date_patterns() _MAX_DATE_NGRAM_SIZE = 5 # Following DynSp: # https://github.com/Microsoft/DynSP/blob/master/util.py#L414. _NUMBER_WORDS = [ "zero", "one", "two", "three", "four", "five", "six", "seven", "eight", "nine", "ten", "eleven", "twelve", ] _ORDINAL_WORDS = [ "zeroth", "first", "second", "third", "fourth", "fith", "sixth", "seventh", "eighth", "ninth", "tenth", "eleventh", "twelfth", ] _ORDINAL_SUFFIXES = ["st", "nd", "rd", "th"] _NUMBER_PATTERN = re.compile(r"((^|\s)[+-])?((\.\d+)|(\d+(,\d\d\d)*(\.\d*)?))") # Following DynSp: # https://github.com/Microsoft/DynSP/blob/master/util.py#L293. _MIN_YEAR = 1700 _MAX_YEAR = 2016 _INF = float("INF") def _get_numeric_value_from_date(date, mask): """Converts date (datetime Python object) to a NumericValue object with a Date object value.""" if date.year < _MIN_YEAR or date.year > _MAX_YEAR: raise ValueError(f"Invalid year: {date.year}") new_date = Date() if mask.year: new_date.year = date.year if mask.month: new_date.month = date.month if mask.day: new_date.day = date.day return NumericValue(date=new_date) def _get_span_length_key(span): """Sorts span by decreasing length first and increasing first index second.""" return span[1] - span[0], -span[0] def _get_numeric_value_from_float(value): """Converts float (Python) to a NumericValue object with a float value.""" return NumericValue(float_value=value) # Doesn't parse ordinal expressions such as '18th of february 1655'. def _parse_date(text): """Attempts to format a text as a standard date string (yyyy-mm-dd).""" text = re.sub(r"Sept\b", "Sep", text) for in_pattern, mask, regex in _PROCESSED_DATE_PATTERNS: if not regex.match(text): continue try: date = datetime.datetime.strptime(text, in_pattern).date() except ValueError: continue try: return _get_numeric_value_from_date(date, mask) except ValueError: continue return None def _parse_number(text): """Parses simple cardinal and ordinals numbers.""" for suffix in _ORDINAL_SUFFIXES: if text.endswith(suffix): text = text[: -len(suffix)] break text = text.replace(",", "") try: value = float(text) except ValueError: return None if math.isnan(value): return None if value == _INF: return None return value def get_all_spans(text, max_ngram_length): """ Split a text into all possible ngrams up to 'max_ngram_length'. Split points are white space and punctuation. Args: text: Text to split. max_ngram_length: maximal ngram length. Yields: Spans, tuples of begin-end index. """ start_indexes = [] for index, char in enumerate(text): if not char.isalnum(): continue if index == 0 or not text[index - 1].isalnum(): start_indexes.append(index) if index + 1 == len(text) or not text[index + 1].isalnum(): for start_index in start_indexes[-max_ngram_length:]: yield start_index, index + 1 def normalize_for_match(text): return " ".join(text.lower().split()) def format_text(text): """Lowercases and strips punctuation.""" text = text.lower().strip() if text == "n/a" or text == "?" or text == "nan": text = EMPTY_TEXT text = re.sub(r"[^\w\d]+", " ", text).replace("_", " ") text = " ".join(text.split()) text = text.strip() if text: return text return EMPTY_TEXT def parse_text(text): """ Extracts longest number and date spans. Args: text: text to annotate Returns: List of longest numeric value spans. """ span_dict = collections.defaultdict(list) for match in _NUMBER_PATTERN.finditer(text): span_text = text[match.start() : match.end()] number = _parse_number(span_text) if number is not None: span_dict[match.span()].append(_get_numeric_value_from_float(number)) for begin_index, end_index in get_all_spans(text, max_ngram_length=1): if (begin_index, end_index) in span_dict: continue span_text = text[begin_index:end_index] number = _parse_number(span_text) if number is not None: span_dict[begin_index, end_index].append(_get_numeric_value_from_float(number)) for number, word in enumerate(_NUMBER_WORDS): if span_text == word: span_dict[begin_index, end_index].append(_get_numeric_value_from_float(float(number))) break for number, word in enumerate(_ORDINAL_WORDS): if span_text == word: span_dict[begin_index, end_index].append(_get_numeric_value_from_float(float(number))) break for begin_index, end_index in get_all_spans(text, max_ngram_length=_MAX_DATE_NGRAM_SIZE): span_text = text[begin_index:end_index] date = _parse_date(span_text) if date is not None: span_dict[begin_index, end_index].append(date) spans = sorted(span_dict.items(), key=lambda span_value: _get_span_length_key(span_value[0]), reverse=True) selected_spans = [] for span, value in spans: for selected_span, _ in selected_spans: if selected_span[0] <= span[0] and span[1] <= selected_span[1]: break else: selected_spans.append((span, value)) selected_spans.sort(key=lambda span_value: span_value[0][0]) numeric_value_spans = [] for span, values in selected_spans: numeric_value_spans.append(NumericValueSpan(begin_index=span[0], end_index=span[1], values=values)) return numeric_value_spans # Below: all functions from number_annotation_utils.py and 2 functions (namely filter_invalid_unicode # and filter_invalid_unicode_from_table) from text_utils.py of the original implementation. URL's: # - https://github.com/google-research/tapas/blob/master/tapas/utils/number_annotation_utils.py # - https://github.com/google-research/tapas/blob/master/tapas/utils/text_utils.py _PrimitiveNumericValue = Union[float, Tuple[Optional[float], Optional[float], Optional[float]]] _SortKeyFn = Callable[[NumericValue], Tuple[float, Ellipsis]] _DATE_TUPLE_SIZE = 3 EMPTY_TEXT = "EMPTY" NUMBER_TYPE = "number" DATE_TYPE = "date" def _get_value_type(numeric_value): if numeric_value.float_value is not None: return NUMBER_TYPE elif numeric_value.date is not None: return DATE_TYPE raise ValueError(f"Unknown type: {numeric_value}") def _get_value_as_primitive_value(numeric_value): """Maps a NumericValue proto to a float or tuple of float.""" if numeric_value.float_value is not None: return numeric_value.float_value if numeric_value.date is not None: date = numeric_value.date value_tuple = [None, None, None] # All dates fields are cased to float to produce a simple primitive value. if date.year is not None: value_tuple[0] = float(date.year) if date.month is not None: value_tuple[1] = float(date.month) if date.day is not None: value_tuple[2] = float(date.day) return tuple(value_tuple) raise ValueError(f"Unknown type: {numeric_value}") def _get_all_types(numeric_values): return {_get_value_type(value) for value in numeric_values} def get_numeric_sort_key_fn(numeric_values): """ Creates a function that can be used as a sort key or to compare the values. Maps to primitive types and finds the biggest common subset. Consider the values "05/05/2010" and "August 2007". With the corresponding primitive values (2010.,5.,5.) and (2007.,8., None). These values can be compared by year and date so we map to the sequence (2010., 5.), (2007., 8.). If we added a third value "2006" with primitive value (2006., None, None), we could only compare by the year so we would map to (2010.,), (2007.,) and (2006.,). Args: numeric_values: Values to compare Returns: A function that can be used as a sort key function (mapping numeric values to a comparable tuple) Raises: ValueError if values don't have a common type or are not comparable. """ value_types = _get_all_types(numeric_values) if len(value_types) != 1: raise ValueError(f"No common value type in {numeric_values}") value_type = next(iter(value_types)) if value_type == NUMBER_TYPE: # Primitive values are simple floats, nothing to do here. return _get_value_as_primitive_value # The type can only be Date at this point which means the primitive type # is a float triple. valid_indexes = set(range(_DATE_TUPLE_SIZE)) for numeric_value in numeric_values: value = _get_value_as_primitive_value(numeric_value) assert isinstance(value, tuple) for tuple_index, inner_value in enumerate(value): if inner_value is None: valid_indexes.discard(tuple_index) if not valid_indexes: raise ValueError(f"No common value in {numeric_values}") def _sort_key_fn(numeric_value): value = _get_value_as_primitive_value(numeric_value) return tuple(value[index] for index in valid_indexes) return _sort_key_fn def _consolidate_numeric_values(row_index_to_values, min_consolidation_fraction, debug_info): """ Finds the most common numeric values in a column and returns them Args: row_index_to_values: For each row index all the values in that cell. min_consolidation_fraction: Fraction of cells that need to have consolidated value. debug_info: Additional information only used for logging Returns: For each row index the first value that matches the most common value. Rows that don't have a matching value are dropped. Empty list if values can't be consolidated. """ type_counts = collections.Counter() for numeric_values in row_index_to_values.values(): type_counts.update(_get_all_types(numeric_values)) if not type_counts: return {} max_count = max(type_counts.values()) if max_count < len(row_index_to_values) * min_consolidation_fraction: # logging.log_every_n(logging.INFO, f'Can\'t consolidate types: {debug_info} {row_index_to_values} {max_count}', 100) return {} valid_types = set() for value_type, count in type_counts.items(): if count == max_count: valid_types.add(value_type) if len(valid_types) > 1: assert DATE_TYPE in valid_types max_type = DATE_TYPE else: max_type = next(iter(valid_types)) new_row_index_to_value = {} for index, values in row_index_to_values.items(): # Extract the first matching value. for value in values: if _get_value_type(value) == max_type: new_row_index_to_value[index] = value break return new_row_index_to_value def _get_numeric_values(text): """Parses text and returns numeric values.""" numeric_spans = parse_text(text) return itertools.chain(*(span.values for span in numeric_spans)) def _get_column_values(table, col_index): """ Parses text in column and returns a dict mapping row_index to values. This is the _get_column_values function from number_annotation_utils.py of the original implementation Args: table: Pandas dataframe col_index: integer, indicating the index of the column to get the numeric values of """ index_to_values = {} for row_index, row in table.iterrows(): text = normalize_for_match(row[col_index].text) index_to_values[row_index] = list(_get_numeric_values(text)) return index_to_values def get_numeric_relation(value, other_value, sort_key_fn): """Compares two values and returns their relation or None.""" value = sort_key_fn(value) other_value = sort_key_fn(other_value) if value == other_value: return Relation.EQ if value < other_value: return Relation.LT if value > other_value: return Relation.GT return None def add_numeric_values_to_question(question): """Adds numeric value spans to a question.""" original_text = question question = normalize_for_match(question) numeric_spans = parse_text(question) return Question(original_text=original_text, text=question, numeric_spans=numeric_spans) def filter_invalid_unicode(text): """Return an empty string and True if 'text' is in invalid unicode.""" return ("", True) if isinstance(text, bytes) else (text, False) def filter_invalid_unicode_from_table(table): """ Removes invalid unicode from table. Checks whether a table cell text contains an invalid unicode encoding. If yes, reset the table cell text to an empty str and log a warning for each invalid cell Args: table: table to clean. """ # to do: add table id support if not hasattr(table, "table_id"): table.table_id = 0 for row_index, row in table.iterrows(): for col_index, cell in enumerate(row): cell, is_invalid = filter_invalid_unicode(cell) if is_invalid: logging.warning( f"Scrub an invalid table body @ table_id: {table.table_id}, row_index: {row_index}, " f"col_index: {col_index}", ) for col_index, column in enumerate(table.columns): column, is_invalid = filter_invalid_unicode(column) if is_invalid: logging.warning(f"Scrub an invalid table header @ table_id: {table.table_id}, col_index: {col_index}") def add_numeric_table_values(table, min_consolidation_fraction=0.7, debug_info=None): """ Parses text in table column-wise and adds the consolidated values. Consolidation refers to finding values with a common types (date or number) Args: table: Table to annotate. min_consolidation_fraction: Fraction of cells in a column that need to have consolidated value. debug_info: Additional information used for logging. """ table = table.copy() # First, filter table on invalid unicode filter_invalid_unicode_from_table(table) # Second, replace cell values by Cell objects for row_index, row in table.iterrows(): for col_index, cell in enumerate(row): table.iloc[row_index, col_index] = Cell(text=cell) # Third, add numeric_value attributes to these Cell objects for col_index, column in enumerate(table.columns): column_values = _consolidate_numeric_values( _get_column_values(table, col_index), min_consolidation_fraction=min_consolidation_fraction, debug_info=(debug_info, column), ) for row_index, numeric_value in column_values.items(): table.iloc[row_index, col_index].numeric_value = numeric_value return table
transformers/src/transformers/models/tapas/tokenization_tapas.py/0
{ "file_path": "transformers/src/transformers/models/tapas/tokenization_tapas.py", "repo_id": "transformers", "token_count": 53968 }
366
# flake8: noqa # There's no way to ignore "F401 '...' imported but unused" warnings in this # module, but to preserve other warnings. So, don't check this module at all. # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available, ) _import_structure = { "configuration_tvlt": ["TVLT_PRETRAINED_CONFIG_ARCHIVE_MAP", "TvltConfig"], "feature_extraction_tvlt": ["TvltFeatureExtractor"], "processing_tvlt": ["TvltProcessor"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["modeling_tvlt"] = [ "TVLT_PRETRAINED_MODEL_ARCHIVE_LIST", "TvltModel", "TvltForPreTraining", "TvltForAudioVisualClassification", "TvltPreTrainedModel", ] try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["image_processing_tvlt"] = ["TvltImageProcessor"] if TYPE_CHECKING: from .configuration_tvlt import TVLT_PRETRAINED_CONFIG_ARCHIVE_MAP, TvltConfig from .processing_tvlt import TvltProcessor from .feature_extraction_tvlt import TvltFeatureExtractor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tvlt import ( TVLT_PRETRAINED_MODEL_ARCHIVE_LIST, TvltForAudioVisualClassification, TvltForPreTraining, TvltModel, TvltPreTrainedModel, ) try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_tvlt import TvltImageProcessor else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
transformers/src/transformers/models/tvlt/__init__.py/0
{ "file_path": "transformers/src/transformers/models/tvlt/__init__.py", "repo_id": "transformers", "token_count": 1013 }
367
# coding=utf-8 # Copyright 2024 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License """ Tokenization classes for UDOP model.""" import os import re import warnings from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple, Union import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...tokenization_utils_base import ( AddedToken, BatchEncoding, EncodedInput, PreTokenizedInput, TextInput, TextInputPair, TruncationStrategy, ) from ...utils import PaddingStrategy, TensorType, add_end_docstrings, logging logger = logging.get_logger(__name__) SPIECE_UNDERLINE = "▁" UDOP_ENCODE_KWARGS_DOCSTRING = r""" add_special_tokens (`bool`, *optional*, defaults to `True`): Whether or not to encode the sequences with the special tokens relative to their model. padding (`bool`, `str` or [`~file_utils.PaddingStrategy`], *optional*, defaults to `False`): Activates and controls padding. Accepts the following values: - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided). - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different lengths). truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`): Activates and controls truncation. Accepts the following values: - `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will truncate token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch of pairs) is provided. - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided. - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided. - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths greater than the model maximum admissible input size). max_length (`int`, *optional*): Controls the maximum length to use by one of the truncation/padding parameters. If left unset or set to `None`, this will use the predefined model maximum length if a maximum length is required by one of the truncation/padding parameters. If the model has no specific maximum input length (like XLNet) truncation/padding to a maximum length will be deactivated. stride (`int`, *optional*, defaults to 0): If set to a number along with `max_length`, the overflowing tokens returned when `return_overflowing_tokens=True` will contain some tokens from the end of the truncated sequence returned to provide some overlap between truncated and overflowing sequences. The value of this argument defines the number of overlapping tokens. pad_to_multiple_of (`int`, *optional*): If set will pad the sequence to a multiple of the provided value. This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability `>= 7.5` (Volta). return_tensors (`str` or [`~file_utils.TensorType`], *optional*): If set, will return tensors instead of list of python integers. Acceptable values are: - `'tf'`: Return TensorFlow `tf.constant` objects. - `'pt'`: Return PyTorch `torch.Tensor` objects. - `'np'`: Return Numpy `np.ndarray` objects. return_token_type_ids (`bool`, *optional*): Whether to return token type IDs. If left to the default, will return the token type IDs according to the specific tokenizer's default, defined by the `return_outputs` attribute. [What are token type IDs?](../glossary#token-type-ids) return_attention_mask (`bool`, *optional*): Whether to return the attention mask. If left to the default, will return the attention mask according to the specific tokenizer's default, defined by the `return_outputs` attribute. [What are attention masks?](../glossary#attention-mask) return_overflowing_tokens (`bool`, *optional*, defaults to `False`): Whether or not to return overflowing token sequences. If a pair of sequences of input ids (or a batch of pairs) is provided with `truncation_strategy = longest_first` or `True`, an error is raised instead of returning overflowing tokens. return_special_tokens_mask (`bool`, *optional*, defaults to `False`): Whether or not to return special tokens mask information. return_offsets_mapping (`bool`, *optional*, defaults to `False`): Whether or not to return `(char_start, char_end)` for each token. This is only available on fast tokenizers inheriting from [`PreTrainedTokenizerFast`], if using Python's tokenizer, this method will raise `NotImplementedError`. return_length (`bool`, *optional*, defaults to `False`): Whether or not to return the lengths of the encoded inputs. verbose (`bool`, *optional*, defaults to `True`): Whether or not to print more information and warnings. **kwargs: passed to the `self.tokenize()` method Return: [`BatchEncoding`]: A [`BatchEncoding`] with the following fields: - **input_ids** -- List of token ids to be fed to a model. [What are input IDs?](../glossary#input-ids) - **bbox** -- List of bounding boxes to be fed to a model. - **token_type_ids** -- List of token type ids to be fed to a model (when `return_token_type_ids=True` or if *"token_type_ids"* is in `self.model_input_names`). [What are token type IDs?](../glossary#token-type-ids) - **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when `return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names`). [What are attention masks?](../glossary#attention-mask) - **labels** -- List of labels to be fed to a model. (when `word_labels` is specified). - **overflowing_tokens** -- List of overflowing tokens sequences (when a `max_length` is specified and `return_overflowing_tokens=True`). - **num_truncated_tokens** -- Number of tokens truncated (when a `max_length` is specified and `return_overflowing_tokens=True`). - **special_tokens_mask** -- List of 0s and 1s, with 1 specifying added special tokens and 0 specifying regular sequence tokens (when `add_special_tokens=True` and `return_special_tokens_mask=True`). - **length** -- The length of the inputs (when `return_length=True`). """ VOCAB_FILES_NAMES = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"} PRETRAINED_VOCAB_FILES_MAP = { "vocab_file": { "microsoft/udop-large": "https://huggingface.co/microsoft/udop-large/resolve/main/spiece.model", }, "tokenizer_file": { "microsoft/udop-large": "https://huggingface.co/microsoft/udop-large/resolve/main/tokenizer.json", }, } class UdopTokenizer(PreTrainedTokenizer): """ Adapted from [`LayoutXLMTokenizer`] and [`T5Tokenizer`]. Based on [SentencePiece](https://github.com/google/sentencepiece). This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. Args: vocab_file (`str`): Path to the vocabulary file. eos_token (`str`, *optional*, defaults to `"</s>"`): The end of sequence token. <Tip> When building a sequence using special tokens, this is not the token that is used for the end of sequence. The token used is the `sep_token`. </Tip> unk_token (`str`, *optional*, defaults to `"<unk>"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. sep_token (`str`, *optional*, defaults to `"</s>"`): The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for sequence classification or for a text and a question for question answering. It is also used as the last token of a sequence built with special tokens. pad_token (`str`, *optional*, defaults to `"<pad>"`): The token used for padding, for example when batching sequences of different lengths. sep_token_box (`List[int]`, *optional*, defaults to `[1000, 1000, 1000, 1000]`): The bounding box to use for the special [SEP] token. pad_token_box (`List[int]`, *optional*, defaults to `[0, 0, 0, 0]`): The bounding box to use for the special [PAD] token. pad_token_label (`int`, *optional*, defaults to -100): The label to use for padding tokens. Defaults to -100, which is the `ignore_index` of PyTorch's CrossEntropyLoss. only_label_first_subword (`bool`, *optional*, defaults to `True`): Whether or not to only label the first subword, in case word labels are provided. additional_special_tokens (`List[str]`, *optional*, defaults to `["<s>NOTUSED", "</s>NOTUSED"]`): Additional special tokens used by the tokenizer. sp_model_kwargs (`dict`, *optional*): Will be passed to the `SentencePieceProcessor.__init__()` method. The [Python wrapper for SentencePiece](https://github.com/google/sentencepiece/tree/master/python) can be used, among other things, to set: - `enable_sampling`: Enable subword regularization. - `nbest_size`: Sampling parameters for unigram. Invalid for BPE-Dropout. - `nbest_size = {0,1}`: No sampling is performed. - `nbest_size > 1`: samples from the nbest_size results. - `nbest_size < 0`: assuming that nbest_size is infinite and samples from the all hypothesis (lattice) using forward-filtering-and-backward-sampling algorithm. - `alpha`: Smoothing parameter for unigram sampling, and dropout probability of merge operations for BPE-dropout. legacy (`bool`, *optional*, defaults to `True`): Whether or not the `legacy` behaviour of the tokenizer should be used. Legacy is before the merge of #24622 which includes fixes to properly handle tokens that appear after special tokens. A simple example: - `legacy=True`: ```python >>> from transformers import T5Tokenizer >>> tokenizer = T5Tokenizer.from_pretrained("t5-base", legacy=True) >>> tokenizer.encode("Hello <extra_id_0>.") [8774, 32099, 3, 5, 1] ``` - `legacy=False`: ```python >>> from transformers import T5Tokenizer >>> tokenizer = T5Tokenizer.from_pretrained("t5-base", legacy=False) >>> tokenizer.encode("Hello <extra_id_0>.") # the extra space `[3]` is no longer here [8774, 32099, 5, 1] ``` Checkout the pull request and the issue [here](https://github.com/huggingface/transformers/pull/24565) for more details. add_prefix_space (`bool`, *optional*, defaults to `True`): Whether or not to add an initial space to the input. This allows to treat the leading word just as any other word. Attributes: sp_model (`SentencePieceProcessor`): The *SentencePiece* processor that is used for every conversion (string, tokens and IDs). """ vocab_files_names = VOCAB_FILES_NAMES pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP model_input_names = ["input_ids", "attention_mask"] def __init__( self, vocab_file, eos_token="</s>", unk_token="<unk>", sep_token="</s>", pad_token="<pad>", sep_token_box=[1000, 1000, 1000, 1000], pad_token_box=[0, 0, 0, 0], pad_token_label=-100, only_label_first_subword=True, additional_special_tokens=None, sp_model_kwargs: Optional[Dict[str, Any]] = None, legacy=True, add_prefix_space=True, **kwargs, ) -> None: eos_token = AddedToken(eos_token, special=True) if isinstance(eos_token, str) else eos_token unk_token = AddedToken(unk_token, special=True) if isinstance(unk_token, str) else unk_token sep_token = AddedToken(sep_token, special=True) if isinstance(sep_token, str) else sep_token pad_token = AddedToken(pad_token, special=True) if isinstance(pad_token, str) else pad_token self.legacy = legacy self.add_prefix_space = add_prefix_space self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs self.vocab_file = vocab_file self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.Load(vocab_file) # additional properties self.sep_token_box = sep_token_box self.pad_token_box = pad_token_box self.pad_token_label = pad_token_label self.only_label_first_subword = only_label_first_subword super().__init__( eos_token=eos_token, unk_token=unk_token, sep_token=sep_token, pad_token=pad_token, sep_token_box=sep_token_box, pad_token_box=pad_token_box, pad_token_label=pad_token_label, only_label_first_subword=only_label_first_subword, additional_special_tokens=additional_special_tokens, sp_model_kwargs=self.sp_model_kwargs, legacy=legacy, add_prefix_space=add_prefix_space, **kwargs, ) @property def vocab_size(self): return len(self.sp_model) # Copied from transformers.models.t5.tokenization_t5.T5Tokenizer.get_vocab def get_vocab(self): vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)} vocab.update(self.added_tokens_encoder) return vocab # Copied from transformers.models.t5.tokenization_t5.T5Tokenizer.get_special_tokens_mask def get_special_tokens_mask( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False ) -> List[int]: """ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `prepare_for_model` method. Args: token_ids_0 (`List[int]`): List of IDs. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. already_has_special_tokens (`bool`, *optional*, defaults to `False`): Whether or not the token list is already formatted with special tokens for the model. Returns: `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. """ if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True ) # normal case: some special tokens if token_ids_1 is None: return ([0] * len(token_ids_0)) + [1] return ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1] # Copied from transformers.models.t5.tokenization_t5.T5Tokenizer.get_sentinel_tokens def get_sentinel_tokens(self): return list( set(filter(lambda x: bool(re.search(r"<extra_id_\d+>", x)) is not None, self.additional_special_tokens)) ) # Copied from transformers.models.t5.tokenization_t5.T5Tokenizer.get_sentinel_token_ids def get_sentinel_token_ids(self): return [self.convert_tokens_to_ids(token) for token in self.get_sentinel_tokens()] # Copied from transformers.models.t5.tokenization_t5.T5Tokenizer._add_eos_if_not_present def _add_eos_if_not_present(self, token_ids: List[int]) -> List[int]: """Do not add eos again if user already added it.""" if len(token_ids) > 0 and token_ids[-1] == self.eos_token_id: warnings.warn( f"This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated" " eos tokens being added." ) return token_ids else: return token_ids + [self.eos_token_id] # Copied from transformers.models.t5.tokenization_t5.T5Tokenizer.create_token_type_ids_from_sequences def create_token_type_ids_from_sequences( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: """ Create a mask from the two sequences passed to be used in a sequence-pair classification task. T5 does not make use of token type ids, therefore a list of zeros is returned. Args: token_ids_0 (`List[int]`): List of IDs. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of zeros. """ eos = [self.eos_token_id] if token_ids_1 is None: return len(token_ids_0 + eos) * [0] return len(token_ids_0 + eos + token_ids_1 + eos) * [0] # Copied from transformers.models.t5.tokenization_t5.T5Tokenizer.build_inputs_with_special_tokens def build_inputs_with_special_tokens( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: """ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A sequence has the following format: - single sequence: `X </s>` - pair of sequences: `A </s> B </s>` Args: token_ids_0 (`List[int]`): List of IDs to which the special tokens will be added. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens. """ token_ids_0 = self._add_eos_if_not_present(token_ids_0) if token_ids_1 is None: return token_ids_0 else: token_ids_1 = self._add_eos_if_not_present(token_ids_1) return token_ids_0 + token_ids_1 # Copied from transformers.models.t5.tokenization_t5.T5Tokenizer.__getstate__ def __getstate__(self): state = self.__dict__.copy() state["sp_model"] = None return state def __setstate__(self, d): self.__dict__ = d self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.Load(self.vocab_file) # Copied from transformers.models.t5.tokenization_t5.T5Tokenizer.tokenize def tokenize(self, text: "TextInput", **kwargs) -> List[str]: """ Converts a string to a list of tokens. If `self.legacy` is set to `False`, a prefix token is added unless the first token is special. """ if self.legacy or len(text) == 0: return super().tokenize(text, **kwargs) text = text.replace(SPIECE_UNDERLINE, " ") if self.add_prefix_space: text = SPIECE_UNDERLINE + text tokens = super().tokenize(text, **kwargs) if len(tokens) > 1 and tokens[0] == SPIECE_UNDERLINE and tokens[1] in self.all_special_tokens: tokens = tokens[1:] return tokens # Copied from transformers.models.t5.tokenization_t5.T5Tokenizer._tokenize def _tokenize(self, text, **kwargs): """ Returns a tokenized string. We de-activated the `add_dummy_prefix` option, thus the sentencepiece internals will always strip any SPIECE_UNDERLINE. For example: `self.sp_model.encode(f"{SPIECE_UNDERLINE}Hey", out_type = str)` will give `['H', 'e', 'y']` instead of `['▁He', 'y']`. Thus we always encode `f"{unk_token}text"` and strip the `unk_token`. Here is an example with `unk_token = "<unk>"` and `unk_token_length = 4`. `self.tokenizer.sp_model.encode("<unk> Hey", out_type = str)[4:]`. """ tokens = self.sp_model.encode(text, out_type=str) if self.legacy or not text.startswith((SPIECE_UNDERLINE, " ")): return tokens # 1. Encode string + prefix ex: "<unk> Hey" tokens = self.sp_model.encode(self.unk_token + text, out_type=str) # 2. Remove self.unk_token from ['<','unk','>', '▁Hey'] return tokens[self.unk_token_length :] if len(tokens) >= self.unk_token_length else tokens def _convert_token_to_id(self, token): """Converts a token (str) in an id using the vocab.""" return self.sp_model.piece_to_id(token) def _convert_id_to_token(self, index): """Converts an index (integer) in a token (str) using the vocab.""" return self.sp_model.IdToPiece(index) # Copied from transformers.models.t5.tokenization_t5.T5Tokenizer.convert_tokens_to_string def convert_tokens_to_string(self, tokens): """Converts a sequence of tokens (string) in a single string.""" # since we manually add the prefix space, we have to remove it when decoding if tokens[0].startswith(SPIECE_UNDERLINE) and self.add_prefix_space: tokens[0] = tokens[0][1:] current_sub_tokens = [] out_string = "" prev_is_special = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(current_sub_tokens) + token prev_is_special = True current_sub_tokens = [] else: current_sub_tokens.append(token) prev_is_special = False out_string += self.sp_model.decode(current_sub_tokens) return out_string.strip() # Copied from transformers.models.t5.tokenization_t5.T5Tokenizer.save_vocabulary def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: if not os.path.isdir(save_directory): logger.error(f"Vocabulary path ({save_directory}) should be a directory") return out_vocab_file = os.path.join( save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file): copyfile(self.vocab_file, out_vocab_file) elif not os.path.isfile(self.vocab_file): with open(out_vocab_file, "wb") as fi: content_spiece_model = self.sp_model.serialized_model_proto() fi.write(content_spiece_model) return (out_vocab_file,) @add_end_docstrings(UDOP_ENCODE_KWARGS_DOCSTRING) def __call__( self, text: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None, text_pair: Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None, boxes: Union[List[List[int]], List[List[List[int]]]] = None, word_labels: Optional[Union[List[int], List[List[int]]]] = None, text_target: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None, text_pair_target: Optional[ Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] ] = None, **kwargs, ) -> BatchEncoding: if text is None and text_target is None: raise ValueError("You need to specify either `text` or `text_target`.") if text is not None: # The context manager will send the inputs as normal texts and not text_target, but we shouldn't change the # input mode in this case. if not self._in_target_context_manager: self._switch_to_input_mode() encodings = self.call_boxes(text=text, text_pair=text_pair, boxes=boxes, word_labels=word_labels, **kwargs) if text_target is not None: self._switch_to_target_mode() target_encodings = self._call_one(text=text_target, text_pair=text_pair_target, **kwargs) # Leave back tokenizer in input mode self._switch_to_input_mode() if text_target is None: return encodings elif text is None: return target_encodings else: encodings["labels"] = target_encodings["input_ids"] return encodings def call_boxes( self, text: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]], text_pair: Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None, boxes: Union[List[List[int]], List[List[List[int]]]] = None, word_labels: Optional[Union[List[int], List[List[int]]]] = None, add_special_tokens: bool = True, padding: Union[bool, str, PaddingStrategy] = False, truncation: Union[bool, str, TruncationStrategy] = None, max_length: Optional[int] = None, stride: int = 0, pad_to_multiple_of: Optional[int] = None, return_tensors: Optional[Union[str, TensorType]] = None, return_token_type_ids: Optional[bool] = None, return_attention_mask: Optional[bool] = None, return_overflowing_tokens: bool = False, return_special_tokens_mask: bool = False, return_offsets_mapping: bool = False, return_length: bool = False, verbose: bool = True, **kwargs, ) -> BatchEncoding: """ Main method to tokenize and prepare for the model one or several sequence(s) or one or several pair(s) of sequences with word-level normalized bounding boxes and optional labels. Args: text (`str`, `List[str]`, `List[List[str]]`): The sequence or batch of sequences to be encoded. Each sequence can be a string, a list of strings (words of a single example or questions of a batch of examples) or a list of list of strings (batch of words). text_pair (`List[str]`, `List[List[str]]`): The sequence or batch of sequences to be encoded. Each sequence should be a list of strings (pretokenized string). boxes (`List[List[int]]`, `List[List[List[int]]]`): Word-level bounding boxes. Each bounding box should be normalized to be on a 0-1000 scale. word_labels (`List[int]`, `List[List[int]]`, *optional*): Word-level integer labels (for token classification tasks such as FUNSD, CORD). """ # Input type checking for clearer error def _is_valid_text_input(t): if isinstance(t, str): # Strings are fine return True elif isinstance(t, (list, tuple)): # List are fine as long as they are... if len(t) == 0: # ... empty return True elif isinstance(t[0], str): # ... list of strings return True elif isinstance(t[0], (list, tuple)): # ... list with an empty list or with a list of strings return len(t[0]) == 0 or isinstance(t[0][0], str) else: return False else: return False if text_pair is not None: # in case text + text_pair are provided, text = questions, text_pair = words if not _is_valid_text_input(text): raise ValueError("text input must of type `str` (single example) or `List[str]` (batch of examples). ") if not isinstance(text_pair, (list, tuple)): raise ValueError( "words must of type `List[str]` (single pretokenized example), " "or `List[List[str]]` (batch of pretokenized examples)." ) else: # in case only text is provided => must be words if not isinstance(text, (list, tuple)): raise ValueError( "Words must of type `List[str]` (single pretokenized example), " "or `List[List[str]]` (batch of pretokenized examples)." ) if text_pair is not None: is_batched = isinstance(text, (list, tuple)) else: is_batched = isinstance(text, (list, tuple)) and text and isinstance(text[0], (list, tuple)) words = text if text_pair is None else text_pair if boxes is None: raise ValueError("You must provide corresponding bounding boxes") if is_batched: if len(words) != len(boxes): raise ValueError("You must provide words and boxes for an equal amount of examples") for words_example, boxes_example in zip(words, boxes): if len(words_example) != len(boxes_example): raise ValueError("You must provide as many words as there are bounding boxes") else: if len(words) != len(boxes): raise ValueError("You must provide as many words as there are bounding boxes") if is_batched: if text_pair is not None and len(text) != len(text_pair): raise ValueError( f"batch length of `text`: {len(text)} does not match batch length of `text_pair`:" f" {len(text_pair)}." ) batch_text_or_text_pairs = list(zip(text, text_pair)) if text_pair is not None else text is_pair = bool(text_pair is not None) return self.batch_encode_plus_boxes( batch_text_or_text_pairs=batch_text_or_text_pairs, is_pair=is_pair, boxes=boxes, word_labels=word_labels, add_special_tokens=add_special_tokens, padding=padding, truncation=truncation, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs, ) else: return self.encode_plus_boxes( text=text, text_pair=text_pair, boxes=boxes, word_labels=word_labels, add_special_tokens=add_special_tokens, padding=padding, truncation=truncation, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs, ) def batch_encode_plus_boxes( self, batch_text_or_text_pairs: Union[ List[TextInput], List[TextInputPair], List[PreTokenizedInput], ], is_pair: bool = None, boxes: Optional[List[List[List[int]]]] = None, word_labels: Optional[List[List[int]]] = None, add_special_tokens: bool = True, padding: Union[bool, str, PaddingStrategy] = False, truncation: Union[bool, str, TruncationStrategy] = None, max_length: Optional[int] = None, stride: int = 0, is_split_into_words: bool = False, pad_to_multiple_of: Optional[int] = None, return_tensors: Optional[Union[str, TensorType]] = None, return_token_type_ids: Optional[bool] = None, return_attention_mask: Optional[bool] = None, return_overflowing_tokens: bool = False, return_special_tokens_mask: bool = False, return_offsets_mapping: bool = False, return_length: bool = False, verbose: bool = True, **kwargs, ) -> BatchEncoding: """ Tokenize and prepare for the model a list of sequences or a list of pairs of sequences. Args: batch_text_or_text_pairs (`List[str]`, `List[Tuple[str, str]]`, `List[List[str]]`, `List[Tuple[List[str], List[str]]]`, and for not-fast tokenizers, also `List[List[int]]`, `List[Tuple[List[int], List[int]]]`): Batch of sequences or pair of sequences to be encoded. This can be a list of string/string-sequences/int-sequences or a list of pair of string/string-sequences/int-sequence (see details in `encode_plus`). """ # Backward compatibility for 'truncation_strategy', 'pad_to_max_length' padding_strategy, truncation_strategy, max_length, kwargs = self._get_padding_truncation_strategies( padding=padding, truncation=truncation, max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, verbose=verbose, **kwargs, ) return self._batch_encode_plus_boxes( batch_text_or_text_pairs=batch_text_or_text_pairs, is_pair=is_pair, boxes=boxes, word_labels=word_labels, add_special_tokens=add_special_tokens, padding_strategy=padding_strategy, truncation_strategy=truncation_strategy, max_length=max_length, stride=stride, is_split_into_words=is_split_into_words, pad_to_multiple_of=pad_to_multiple_of, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs, ) def encode_boxes( self, text: Union[TextInput, PreTokenizedInput, EncodedInput], text_pair: Optional[Union[TextInput, PreTokenizedInput, EncodedInput]] = None, boxes: Optional[List[List[int]]] = None, word_labels: Optional[List[List[int]]] = None, add_special_tokens: bool = True, padding: Union[bool, str, PaddingStrategy] = False, truncation: Union[bool, str, TruncationStrategy] = None, max_length: Optional[int] = None, stride: int = 0, return_tensors: Optional[Union[str, TensorType]] = None, **kwargs, ) -> List[int]: """ Args: Converts a string to a sequence of ids (integer), using the tokenizer and vocabulary. Same as doing `self.convert_tokens_to_ids(self.tokenize(text))`. text (`str`, `List[str]` or `List[int]`): The first sequence to be encoded. This can be a string, a list of strings (tokenized string using the `tokenize` method) or a list of integers (tokenized string ids using the `convert_tokens_to_ids` method). text_pair (`str`, `List[str]` or `List[int]`, *optional*): Optional second sequence to be encoded. This can be a string, a list of strings (tokenized string using the `tokenize` method) or a list of integers (tokenized string ids using the `convert_tokens_to_ids` method). """ encoded_inputs = self.encode_plus_boxes( text, text_pair=text_pair, boxes=boxes, word_labels=word_labels, add_special_tokens=add_special_tokens, padding=padding, truncation=truncation, max_length=max_length, stride=stride, return_tensors=return_tensors, **kwargs, ) return encoded_inputs["input_ids"] def encode_plus_boxes( self, text: Union[TextInput, PreTokenizedInput], text_pair: Optional[PreTokenizedInput] = None, boxes: Optional[List[List[int]]] = None, word_labels: Optional[List[List[int]]] = None, add_special_tokens: bool = True, padding: Union[bool, str, PaddingStrategy] = False, truncation: Union[bool, str, TruncationStrategy] = None, max_length: Optional[int] = None, stride: int = 0, is_split_into_words: bool = False, pad_to_multiple_of: Optional[int] = None, return_tensors: Optional[Union[str, TensorType]] = None, return_token_type_ids: Optional[bool] = None, return_attention_mask: Optional[bool] = None, return_overflowing_tokens: bool = False, return_special_tokens_mask: bool = False, return_offsets_mapping: bool = False, return_length: bool = False, verbose: bool = True, **kwargs, ) -> BatchEncoding: """ Tokenize and prepare for the model a sequence or a pair of sequences. <Tip warning={true}> This method is deprecated, `__call__` should be used instead. </Tip> Args: text (`str`, `List[str]` or `List[int]` (the latter only for not-fast tokenizers)): The first sequence to be encoded. This can be a string, a list of strings (tokenized string using the `tokenize` method) or a list of integers (tokenized string ids using the `convert_tokens_to_ids` method). text_pair (`str`, `List[str]` or `List[int]`, *optional*): Optional second sequence to be encoded. This can be a string, a list of strings (tokenized string using the `tokenize` method) or a list of integers (tokenized string ids using the `convert_tokens_to_ids` method). """ # Backward compatibility for 'truncation_strategy', 'pad_to_max_length' padding_strategy, truncation_strategy, max_length, kwargs = self._get_padding_truncation_strategies( padding=padding, truncation=truncation, max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, verbose=verbose, **kwargs, ) return self._encode_plus_boxes( text=text, text_pair=text_pair, boxes=boxes, word_labels=word_labels, add_special_tokens=add_special_tokens, padding_strategy=padding_strategy, truncation_strategy=truncation_strategy, max_length=max_length, stride=stride, is_split_into_words=is_split_into_words, pad_to_multiple_of=pad_to_multiple_of, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs, ) def _batch_encode_plus_boxes( self, batch_text_or_text_pairs: Union[ List[TextInput], List[TextInputPair], List[PreTokenizedInput], ], is_pair: bool = None, boxes: Optional[List[List[List[int]]]] = None, word_labels: Optional[List[List[int]]] = None, add_special_tokens: bool = True, padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD, truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE, max_length: Optional[int] = None, stride: int = 0, pad_to_multiple_of: Optional[int] = None, return_tensors: Optional[Union[str, TensorType]] = None, return_token_type_ids: Optional[bool] = None, return_attention_mask: Optional[bool] = None, return_overflowing_tokens: bool = False, return_special_tokens_mask: bool = False, return_offsets_mapping: bool = False, return_length: bool = False, verbose: bool = True, **kwargs, ) -> BatchEncoding: if return_offsets_mapping: raise NotImplementedError( "return_offset_mapping is not available when using Python tokenizers. " "To use this feature, change your tokenizer to one deriving from " "transformers.PreTrainedTokenizerFast." ) batch_outputs = self._batch_prepare_for_model_boxes( batch_text_or_text_pairs=batch_text_or_text_pairs, is_pair=is_pair, boxes=boxes, word_labels=word_labels, add_special_tokens=add_special_tokens, padding_strategy=padding_strategy, truncation_strategy=truncation_strategy, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, return_attention_mask=return_attention_mask, return_token_type_ids=return_token_type_ids, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_length=return_length, return_tensors=return_tensors, verbose=verbose, ) return BatchEncoding(batch_outputs) @add_end_docstrings(UDOP_ENCODE_KWARGS_DOCSTRING) def _batch_prepare_for_model_boxes( self, batch_text_or_text_pairs, is_pair: bool = None, boxes: Optional[List[List[int]]] = None, word_labels: Optional[List[List[int]]] = None, add_special_tokens: bool = True, padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD, truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE, max_length: Optional[int] = None, stride: int = 0, pad_to_multiple_of: Optional[int] = None, return_tensors: Optional[str] = None, return_token_type_ids: Optional[bool] = None, return_attention_mask: Optional[bool] = None, return_overflowing_tokens: bool = False, return_special_tokens_mask: bool = False, return_length: bool = False, verbose: bool = True, ) -> BatchEncoding: """ Prepares a sequence of input id, or a pair of sequences of inputs ids so that it can be used by the model. It adds special tokens, truncates sequences if overflowing while taking into account the special tokens and manages a moving window (with user defined stride) for overflowing tokens Args: batch_ids_pairs: list of tokenized input ids or input ids pairs """ batch_outputs = {} for idx, example in enumerate(zip(batch_text_or_text_pairs, boxes)): batch_text_or_text_pair, boxes_example = example outputs = self.prepare_for_model_boxes( batch_text_or_text_pair[0] if is_pair else batch_text_or_text_pair, batch_text_or_text_pair[1] if is_pair else None, boxes_example, word_labels=word_labels[idx] if word_labels is not None else None, add_special_tokens=add_special_tokens, padding=PaddingStrategy.DO_NOT_PAD.value, # we pad in batch afterward truncation=truncation_strategy.value, max_length=max_length, stride=stride, pad_to_multiple_of=None, # we pad in batch afterward return_attention_mask=False, # we pad in batch afterward return_token_type_ids=return_token_type_ids, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_length=return_length, return_tensors=None, # We convert the whole batch to tensors at the end prepend_batch_axis=False, verbose=verbose, ) for key, value in outputs.items(): if key not in batch_outputs: batch_outputs[key] = [] batch_outputs[key].append(value) batch_outputs = self.pad( batch_outputs, padding=padding_strategy.value, max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, return_attention_mask=return_attention_mask, ) batch_outputs = BatchEncoding(batch_outputs, tensor_type=return_tensors) return batch_outputs def _encode_plus_boxes( self, text: Union[TextInput, PreTokenizedInput], text_pair: Optional[PreTokenizedInput] = None, boxes: Optional[List[List[int]]] = None, word_labels: Optional[List[int]] = None, add_special_tokens: bool = True, padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD, truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE, max_length: Optional[int] = None, stride: int = 0, pad_to_multiple_of: Optional[int] = None, return_tensors: Optional[Union[str, TensorType]] = None, return_token_type_ids: Optional[bool] = None, return_attention_mask: Optional[bool] = None, return_overflowing_tokens: bool = False, return_special_tokens_mask: bool = False, return_offsets_mapping: bool = False, return_length: bool = False, verbose: bool = True, **kwargs, ) -> BatchEncoding: if return_offsets_mapping: raise NotImplementedError( "return_offset_mapping is not available when using Python tokenizers. " "To use this feature, change your tokenizer to one deriving from " "transformers.PreTrainedTokenizerFast. " "More information on available tokenizers at " "https://github.com/huggingface/transformers/pull/2674" ) return self.prepare_for_model_boxes( text=text, text_pair=text_pair, boxes=boxes, word_labels=word_labels, add_special_tokens=add_special_tokens, padding=padding_strategy.value, truncation=truncation_strategy.value, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, return_tensors=return_tensors, prepend_batch_axis=True, return_attention_mask=return_attention_mask, return_token_type_ids=return_token_type_ids, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_length=return_length, verbose=verbose, ) @add_end_docstrings(UDOP_ENCODE_KWARGS_DOCSTRING) def prepare_for_model_boxes( self, text: Union[TextInput, PreTokenizedInput], text_pair: Optional[PreTokenizedInput] = None, boxes: Optional[List[List[int]]] = None, word_labels: Optional[List[int]] = None, add_special_tokens: bool = True, padding: Union[bool, str, PaddingStrategy] = False, truncation: Union[bool, str, TruncationStrategy] = None, max_length: Optional[int] = None, stride: int = 0, pad_to_multiple_of: Optional[int] = None, return_tensors: Optional[Union[str, TensorType]] = None, return_token_type_ids: Optional[bool] = None, return_attention_mask: Optional[bool] = None, return_overflowing_tokens: bool = False, return_special_tokens_mask: bool = False, return_offsets_mapping: bool = False, return_length: bool = False, verbose: bool = True, prepend_batch_axis: bool = False, **kwargs, ) -> BatchEncoding: """ Prepares a sequence or a pair of sequences so that it can be used by the model. It adds special tokens, truncates sequences if overflowing while taking into account the special tokens and manages a moving window (with user defined stride) for overflowing tokens. Word-level `boxes` are turned into token-level `bbox`. If provided, word-level `word_labels` are turned into token-level `labels`. The word label is used for the first token of the word, while remaining tokens are labeled with -100, such that they will be ignored by the loss function. Args: text (`str`, `List[str]`, `List[List[str]]`): The first sequence to be encoded. This can be a string, a list of strings or a list of list of strings. text_pair (`List[str]` or `List[int]`, *optional*): Optional second sequence to be encoded. This can be a list of strings (words of a single example) or a list of list of strings (words of a batch of examples). """ # Backward compatibility for 'truncation_strategy', 'pad_to_max_length' padding_strategy, truncation_strategy, max_length, kwargs = self._get_padding_truncation_strategies( padding=padding, truncation=truncation, max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, verbose=verbose, **kwargs, ) tokens = [] pair_tokens = [] token_boxes = [] pair_token_boxes = [] labels = [] if text_pair is None: if word_labels is None: # CASE 1: document image classification (training + inference) + CASE 2: token classification (inference) for word, box in zip(text, boxes): if len(word) < 1: # skip empty words continue word_tokens = self.tokenize(word) tokens.extend(word_tokens) token_boxes.extend([box] * len(word_tokens)) else: # CASE 2: token classification (training) for word, box, label in zip(text, boxes, word_labels): if len(word) < 1: # skip empty words continue word_tokens = self.tokenize(word) tokens.extend(word_tokens) token_boxes.extend([box] * len(word_tokens)) if self.only_label_first_subword: # Use the real label id for the first token of the word, and padding ids for the remaining tokens labels.extend([label] + [self.pad_token_label] * (len(word_tokens) - 1)) else: labels.extend([label] * len(word_tokens)) else: # CASE 3: document visual question answering (inference) # text = question # text_pair = words tokens = self.tokenize(text) token_boxes = [self.pad_token_box for _ in range(len(tokens))] for word, box in zip(text_pair, boxes): if len(word) < 1: # skip empty words continue word_tokens = self.tokenize(word) pair_tokens.extend(word_tokens) pair_token_boxes.extend([box] * len(word_tokens)) # Create ids + pair_ids ids = self.convert_tokens_to_ids(tokens) pair_ids = self.convert_tokens_to_ids(pair_tokens) if pair_tokens else None # Compute the total size of the returned encodings pair = bool(pair_ids is not None) len_ids = len(ids) len_pair_ids = len(pair_ids) if pair else 0 total_len = len_ids + len_pair_ids + (self.num_special_tokens_to_add(pair=pair) if add_special_tokens else 0) # Truncation: Handle max sequence length overflowing_tokens = [] overflowing_token_boxes = [] overflowing_labels = [] if truncation_strategy != TruncationStrategy.DO_NOT_TRUNCATE and max_length and total_len > max_length: ( ids, token_boxes, pair_ids, pair_token_boxes, labels, overflowing_tokens, overflowing_token_boxes, overflowing_labels, ) = self.truncate_sequences( ids, token_boxes, pair_ids=pair_ids, pair_token_boxes=pair_token_boxes, labels=labels, num_tokens_to_remove=total_len - max_length, truncation_strategy=truncation_strategy, stride=stride, ) if return_token_type_ids and not add_special_tokens: raise ValueError( "Asking to return token_type_ids while setting add_special_tokens to False " "results in an undefined behavior. Please set add_special_tokens to True or " "set return_token_type_ids to None." ) # Load from model defaults if return_token_type_ids is None: return_token_type_ids = "token_type_ids" in self.model_input_names if return_attention_mask is None: return_attention_mask = "attention_mask" in self.model_input_names encoded_inputs = {} if return_overflowing_tokens: encoded_inputs["overflowing_tokens"] = overflowing_tokens encoded_inputs["overflowing_token_boxes"] = overflowing_token_boxes encoded_inputs["overflowing_labels"] = overflowing_labels encoded_inputs["num_truncated_tokens"] = total_len - max_length # Add special tokens if add_special_tokens: sequence = self.build_inputs_with_special_tokens(ids, pair_ids) token_type_ids = self.create_token_type_ids_from_sequences(ids, pair_ids) token_boxes = token_boxes + [self.sep_token_box] if pair_token_boxes: pair_token_boxes = pair_token_boxes + [self.sep_token_box] if labels: labels = labels + [self.pad_token_label] else: sequence = ids + pair_ids if pair else ids token_type_ids = [0] * len(ids) + ([0] * len(pair_ids) if pair else []) # Build output dictionary encoded_inputs["input_ids"] = sequence encoded_inputs["bbox"] = token_boxes + pair_token_boxes if return_token_type_ids: encoded_inputs["token_type_ids"] = token_type_ids if return_special_tokens_mask: if add_special_tokens: encoded_inputs["special_tokens_mask"] = self.get_special_tokens_mask(ids, pair_ids) else: encoded_inputs["special_tokens_mask"] = [0] * len(sequence) if labels: encoded_inputs["labels"] = labels # Check lengths self._eventual_warn_about_too_long_sequence(encoded_inputs["input_ids"], max_length, verbose) # Padding if padding_strategy != PaddingStrategy.DO_NOT_PAD or return_attention_mask: encoded_inputs = self.pad( encoded_inputs, max_length=max_length, padding=padding_strategy.value, pad_to_multiple_of=pad_to_multiple_of, return_attention_mask=return_attention_mask, ) if return_length: encoded_inputs["length"] = len(encoded_inputs["input_ids"]) batch_outputs = BatchEncoding( encoded_inputs, tensor_type=return_tensors, prepend_batch_axis=prepend_batch_axis ) return batch_outputs # Copied from transformers.models.layoutxlm.tokenization_layoutxlm.LayoutXLMTokenizer.truncate_sequences def truncate_sequences( self, ids: List[int], token_boxes: List[List[int]], pair_ids: Optional[List[int]] = None, pair_token_boxes: Optional[List[List[int]]] = None, labels: Optional[List[int]] = None, num_tokens_to_remove: int = 0, truncation_strategy: Union[str, TruncationStrategy] = "longest_first", stride: int = 0, ) -> Tuple[List[int], List[int], List[int]]: """ Truncates a sequence pair in-place following the strategy. Args: ids (`List[int]`): Tokenized input ids of the first sequence. Can be obtained from a string by chaining the `tokenize` and `convert_tokens_to_ids` methods. token_boxes (`List[List[int]]`): Bounding boxes of the first sequence. pair_ids (`List[int]`, *optional*): Tokenized input ids of the second sequence. Can be obtained from a string by chaining the `tokenize` and `convert_tokens_to_ids` methods. pair_token_boxes (`List[List[int]]`, *optional*): Bounding boxes of the second sequence. labels (`List[int]`, *optional*): Labels of the first sequence (for token classification tasks). num_tokens_to_remove (`int`, *optional*, defaults to 0): Number of tokens to remove using the truncation strategy. truncation_strategy (`str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`): The strategy to follow for truncation. Can be: - `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will truncate token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch of pairs) is provided. - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided. - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided. - `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths greater than the model maximum admissible input size). stride (`int`, *optional*, defaults to 0): If set to a positive number, the overflowing tokens returned will contain some tokens from the main sequence returned. The value of this argument defines the number of additional tokens. Returns: `Tuple[List[int], List[int], List[int]]`: The truncated `ids`, the truncated `pair_ids` and the list of overflowing tokens. """ if num_tokens_to_remove <= 0: return ids, token_boxes, pair_ids, pair_token_boxes, labels, [], [], [] if not isinstance(truncation_strategy, TruncationStrategy): truncation_strategy = TruncationStrategy(truncation_strategy) overflowing_tokens = [] overflowing_token_boxes = [] overflowing_labels = [] if truncation_strategy == TruncationStrategy.LONGEST_FIRST: for _ in range(num_tokens_to_remove): if pair_ids is None or len(ids) > len(pair_ids): if not overflowing_tokens: window_len = min(len(ids), stride + 1) else: window_len = 1 overflowing_tokens.extend(ids[-window_len:]) overflowing_token_boxes.extend(token_boxes[-window_len:]) overflowing_labels.extend(labels[-window_len:]) ids = ids[:-1] token_boxes = token_boxes[:-1] labels = labels[:-1] else: if not overflowing_tokens: window_len = min(len(pair_ids), stride + 1) else: window_len = 1 overflowing_tokens.extend(pair_ids[-window_len:]) overflowing_token_boxes.extend(pair_token_boxes[-window_len:]) pair_ids = pair_ids[:-1] pair_token_boxes = pair_token_boxes[:-1] elif truncation_strategy == TruncationStrategy.ONLY_FIRST: if len(ids) > num_tokens_to_remove: window_len = min(len(ids), stride + num_tokens_to_remove) overflowing_tokens = ids[-window_len:] overflowing_token_boxes = token_boxes[-window_len:] overflowing_labels = labels[-window_len:] ids = ids[:-num_tokens_to_remove] token_boxes = token_boxes[:-num_tokens_to_remove] labels = labels[:-num_tokens_to_remove] else: logger.error( f"We need to remove {num_tokens_to_remove} to truncate the input " f"but the first sequence has a length {len(ids)}. " f"Please select another truncation strategy than {truncation_strategy}, " "for instance 'longest_first' or 'only_second'." ) elif truncation_strategy == TruncationStrategy.ONLY_SECOND and pair_ids is not None: if len(pair_ids) > num_tokens_to_remove: window_len = min(len(pair_ids), stride + num_tokens_to_remove) overflowing_tokens = pair_ids[-window_len:] overflowing_token_boxes = pair_token_boxes[-window_len:] pair_ids = pair_ids[:-num_tokens_to_remove] pair_token_boxes = pair_token_boxes[:-num_tokens_to_remove] else: logger.error( f"We need to remove {num_tokens_to_remove} to truncate the input " f"but the second sequence has a length {len(pair_ids)}. " f"Please select another truncation strategy than {truncation_strategy}, " "for instance 'longest_first' or 'only_first'." ) return ( ids, token_boxes, pair_ids, pair_token_boxes, labels, overflowing_tokens, overflowing_token_boxes, overflowing_labels, ) # Copied from transformers.models.layoutxlm.tokenization_layoutxlm.LayoutXLMTokenizer._pad def _pad( self, encoded_inputs: Union[Dict[str, EncodedInput], BatchEncoding], max_length: Optional[int] = None, padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD, pad_to_multiple_of: Optional[int] = None, return_attention_mask: Optional[bool] = None, ) -> dict: """ Pad encoded inputs (on left/right and up to predefined length or max length in the batch) Args: encoded_inputs: Dictionary of tokenized inputs (`List[int]`) or batch of tokenized inputs (`List[List[int]]`). max_length: maximum length of the returned list and optionally padding length (see below). Will truncate by taking into account the special tokens. padding_strategy: PaddingStrategy to use for padding. - PaddingStrategy.LONGEST Pad to the longest sequence in the batch - PaddingStrategy.MAX_LENGTH: Pad to the max length (default) - PaddingStrategy.DO_NOT_PAD: Do not pad The tokenizer padding sides are defined in self.padding_side: - 'left': pads on the left of the sequences - 'right': pads on the right of the sequences pad_to_multiple_of: (optional) Integer if set will pad the sequence to a multiple of the provided value. This is especially useful to enable the use of Tensor Core on NVIDIA hardware with compute capability `>= 7.5` (Volta). return_attention_mask: (optional) Set to False to avoid returning attention mask (default: set to model specifics) """ # Load from model defaults if return_attention_mask is None: return_attention_mask = "attention_mask" in self.model_input_names required_input = encoded_inputs[self.model_input_names[0]] if padding_strategy == PaddingStrategy.LONGEST: max_length = len(required_input) if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0): max_length = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of needs_to_be_padded = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(required_input) != max_length # Initialize attention mask if not present. if return_attention_mask and "attention_mask" not in encoded_inputs: encoded_inputs["attention_mask"] = [1] * len(required_input) if needs_to_be_padded: difference = max_length - len(required_input) if self.padding_side == "right": if return_attention_mask: encoded_inputs["attention_mask"] = encoded_inputs["attention_mask"] + [0] * difference if "token_type_ids" in encoded_inputs: encoded_inputs["token_type_ids"] = ( encoded_inputs["token_type_ids"] + [self.pad_token_type_id] * difference ) if "bbox" in encoded_inputs: encoded_inputs["bbox"] = encoded_inputs["bbox"] + [self.pad_token_box] * difference if "labels" in encoded_inputs: encoded_inputs["labels"] = encoded_inputs["labels"] + [self.pad_token_label] * difference if "special_tokens_mask" in encoded_inputs: encoded_inputs["special_tokens_mask"] = encoded_inputs["special_tokens_mask"] + [1] * difference encoded_inputs[self.model_input_names[0]] = required_input + [self.pad_token_id] * difference elif self.padding_side == "left": if return_attention_mask: encoded_inputs["attention_mask"] = [0] * difference + encoded_inputs["attention_mask"] if "token_type_ids" in encoded_inputs: encoded_inputs["token_type_ids"] = [self.pad_token_type_id] * difference + encoded_inputs[ "token_type_ids" ] if "bbox" in encoded_inputs: encoded_inputs["bbox"] = [self.pad_token_box] * difference + encoded_inputs["bbox"] if "labels" in encoded_inputs: encoded_inputs["labels"] = [self.pad_token_label] * difference + encoded_inputs["labels"] if "special_tokens_mask" in encoded_inputs: encoded_inputs["special_tokens_mask"] = [1] * difference + encoded_inputs["special_tokens_mask"] encoded_inputs[self.model_input_names[0]] = [self.pad_token_id] * difference + required_input else: raise ValueError("Invalid padding strategy:" + str(self.padding_side)) return encoded_inputs
transformers/src/transformers/models/udop/tokenization_udop.py/0
{ "file_path": "transformers/src/transformers/models/udop/tokenization_udop.py", "repo_id": "transformers", "token_count": 32238 }
368
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ UnivNetModel model configuration""" from ...configuration_utils import PretrainedConfig from ...utils import logging logger = logging.get_logger(__name__) UNIVNET_PRETRAINED_CONFIG_ARCHIVE_MAP = { "dg845/univnet-dev": "https://huggingface.co/dg845/univnet-dev/resolve/main/config.json", } class UnivNetConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`UnivNetModel`]. It is used to instantiate a UnivNet vocoder model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the UnivNet [dg845/univnet-dev](https://huggingface.co/dg845/univnet-dev) architecture, which corresponds to the 'c32' architecture in [maum-ai/univnet](https://github.com/maum-ai/univnet/blob/master/config/default_c32.yaml). Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: model_in_channels (`int`, *optional*, defaults to 64): The number of input channels for the UnivNet residual network. This should correspond to `noise_sequence.shape[1]` and the value used in the [`UnivNetFeatureExtractor`] class. model_hidden_channels (`int`, *optional*, defaults to 32): The number of hidden channels of each residual block in the UnivNet residual network. num_mel_bins (`int`, *optional*, defaults to 100): The number of frequency bins in the conditioning log-mel spectrogram. This should correspond to the value used in the [`UnivNetFeatureExtractor`] class. resblock_kernel_sizes (`Tuple[int]` or `List[int]`, *optional*, defaults to `[3, 3, 3]`): A tuple of integers defining the kernel sizes of the 1D convolutional layers in the UnivNet residual network. The length of `resblock_kernel_sizes` defines the number of resnet blocks and should match that of `resblock_stride_sizes` and `resblock_dilation_sizes`. resblock_stride_sizes (`Tuple[int]` or `List[int]`, *optional*, defaults to `[8, 8, 4]`): A tuple of integers defining the stride sizes of the 1D convolutional layers in the UnivNet residual network. The length of `resblock_stride_sizes` should match that of `resblock_kernel_sizes` and `resblock_dilation_sizes`. resblock_dilation_sizes (`Tuple[Tuple[int]]` or `List[List[int]]`, *optional*, defaults to `[[1, 3, 9, 27], [1, 3, 9, 27], [1, 3, 9, 27]]`): A nested tuple of integers defining the dilation rates of the dilated 1D convolutional layers in the UnivNet residual network. The length of `resblock_dilation_sizes` should match that of `resblock_kernel_sizes` and `resblock_stride_sizes`. The length of each nested list in `resblock_dilation_sizes` defines the number of convolutional layers per resnet block. kernel_predictor_num_blocks (`int`, *optional*, defaults to 3): The number of residual blocks in the kernel predictor network, which calculates the kernel and bias for each location variable convolution layer in the UnivNet residual network. kernel_predictor_hidden_channels (`int`, *optional*, defaults to 64): The number of hidden channels for each residual block in the kernel predictor network. kernel_predictor_conv_size (`int`, *optional*, defaults to 3): The kernel size of each 1D convolutional layer in the kernel predictor network. kernel_predictor_dropout (`float`, *optional*, defaults to 0.0): The dropout probability for each residual block in the kernel predictor network. initializer_range (`float`, *optional*, defaults to 0.01): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. leaky_relu_slope (`float`, *optional*, defaults to 0.2): The angle of the negative slope used by the leaky ReLU activation. Example: ```python >>> from transformers import UnivNetModel, UnivNetConfig >>> # Initializing a Tortoise TTS style configuration >>> configuration = UnivNetConfig() >>> # Initializing a model (with random weights) from the Tortoise TTS style configuration >>> model = UnivNetModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ``` """ model_type = "univnet" def __init__( self, model_in_channels=64, model_hidden_channels=32, num_mel_bins=100, resblock_kernel_sizes=[3, 3, 3], resblock_stride_sizes=[8, 8, 4], resblock_dilation_sizes=[[1, 3, 9, 27], [1, 3, 9, 27], [1, 3, 9, 27]], kernel_predictor_num_blocks=3, kernel_predictor_hidden_channels=64, kernel_predictor_conv_size=3, kernel_predictor_dropout=0.0, initializer_range=0.01, leaky_relu_slope=0.2, **kwargs, ): if not (len(resblock_kernel_sizes) == len(resblock_stride_sizes) == len(resblock_dilation_sizes)): raise ValueError( "`resblock_kernel_sizes`, `resblock_stride_sizes`, and `resblock_dilation_sizes` must all have the" " same length (which will be the number of resnet blocks in the model)." ) self.model_in_channels = model_in_channels self.model_hidden_channels = model_hidden_channels self.num_mel_bins = num_mel_bins self.resblock_kernel_sizes = resblock_kernel_sizes self.resblock_stride_sizes = resblock_stride_sizes self.resblock_dilation_sizes = resblock_dilation_sizes self.kernel_predictor_num_blocks = kernel_predictor_num_blocks self.kernel_predictor_hidden_channels = kernel_predictor_hidden_channels self.kernel_predictor_conv_size = kernel_predictor_conv_size self.kernel_predictor_dropout = kernel_predictor_dropout self.initializer_range = initializer_range self.leaky_relu_slope = leaky_relu_slope super().__init__(**kwargs)
transformers/src/transformers/models/univnet/configuration_univnet.py/0
{ "file_path": "transformers/src/transformers/models/univnet/configuration_univnet.py", "repo_id": "transformers", "token_count": 2525 }
369
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ VilT model configuration""" from ...configuration_utils import PretrainedConfig from ...utils import logging logger = logging.get_logger(__name__) VILT_PRETRAINED_CONFIG_ARCHIVE_MAP = { "dandelin/vilt-b32-mlm": "https://huggingface.co/dandelin/vilt-b32-mlm/blob/main/config.json" } class ViltConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`ViLTModel`]. It is used to instantiate an ViLT model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the ViLT [dandelin/vilt-b32-mlm](https://huggingface.co/dandelin/vilt-b32-mlm) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 30522): Vocabulary size of the text part of the model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`ViltModel`]. type_vocab_size (`int`, *optional*, defaults to 2): The vocabulary size of the `token_type_ids` passed when calling [`ViltModel`]. This is used when encoding text. modality_type_vocab_size (`int`, *optional*, defaults to 2): The vocabulary size of the modalities passed when calling [`ViltModel`]. This is used after concatening the embeddings of the text and image modalities. max_position_embeddings (`int`, *optional*, defaults to 40): The maximum sequence length that this model might ever be used with. hidden_size (`int`, *optional*, defaults to 768): Dimensionality of the encoder layers and the pooler layer. num_hidden_layers (`int`, *optional*, defaults to 12): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 12): Number of attention heads for each attention layer in the Transformer encoder. intermediate_size (`int`, *optional*, defaults to 3072): Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` are supported. hidden_dropout_prob (`float`, *optional*, defaults to 0.0): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. attention_probs_dropout_prob (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. layer_norm_eps (`float`, *optional*, defaults to 1e-12): The epsilon used by the layer normalization layers. image_size (`int`, *optional*, defaults to 384): The size (resolution) of each image. patch_size (`int`, *optional*, defaults to 32): The size (resolution) of each patch. num_channels (`int`, *optional*, defaults to 3): The number of input channels. qkv_bias (`bool`, *optional*, defaults to `True`): Whether to add a bias to the queries, keys and values. max_image_length (`int`, *optional*, defaults to -1): The maximum number of patches to take as input for the Transformer encoder. If set to a positive integer, the encoder will sample `max_image_length` patches at maximum. If set to -1, will not be taken into account. num_images (`int`, *optional*, defaults to -1): The number of images to use for natural language visual reasoning. If set to a positive integer, will be used by [`ViltForImagesAndTextClassification`] for defining the classifier head. Example: ```python >>> from transformers import ViLTModel, ViLTConfig >>> # Initializing a ViLT dandelin/vilt-b32-mlm style configuration >>> configuration = ViLTConfig() >>> # Initializing a model from the dandelin/vilt-b32-mlm style configuration >>> model = ViLTModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "vilt" def __init__( self, vocab_size=30522, type_vocab_size=2, modality_type_vocab_size=2, max_position_embeddings=40, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act="gelu", hidden_dropout_prob=0.0, attention_probs_dropout_prob=0.0, initializer_range=0.02, layer_norm_eps=1e-12, image_size=384, patch_size=32, num_channels=3, qkv_bias=True, max_image_length=-1, tie_word_embeddings=False, num_images=-1, **kwargs, ): super().__init__(tie_word_embeddings=tie_word_embeddings, **kwargs) self.vocab_size = vocab_size self.type_vocab_size = type_vocab_size self.modality_type_vocab_size = modality_type_vocab_size self.max_position_embeddings = max_position_embeddings self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.initializer_range = initializer_range self.layer_norm_eps = layer_norm_eps self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.qkv_bias = qkv_bias self.max_image_length = max_image_length self.num_images = num_images
transformers/src/transformers/models/vilt/configuration_vilt.py/0
{ "file_path": "transformers/src/transformers/models/vilt/configuration_vilt.py", "repo_id": "transformers", "token_count": 2640 }
370
# coding=utf-8 # Copyright The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ VisionTextDualEncoder model configuration""" from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto.configuration_auto import AutoConfig from ..chinese_clip.configuration_chinese_clip import ChineseCLIPVisionConfig from ..clip.configuration_clip import CLIPVisionConfig from ..siglip.configuration_siglip import SiglipVisionConfig logger = logging.get_logger(__name__) VISION_MODEL_CONFIGS = { "clip_vision_model": CLIPVisionConfig, "chinese_clip_vision_model": ChineseCLIPVisionConfig, "siglip_vision_model": SiglipVisionConfig, } class VisionTextDualEncoderConfig(PretrainedConfig): r""" [`VisionTextDualEncoderConfig`] is the configuration class to store the configuration of a [`VisionTextDualEncoderModel`]. It is used to instantiate [`VisionTextDualEncoderModel`] model according to the specified arguments, defining the text model and vision model configs. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: projection_dim (`int`, *optional*, defaults to 512): Dimentionality of text and vision projection layers. logit_scale_init_value (`float`, *optional*, defaults to 2.6592): The inital value of the *logit_scale* paramter. Default is used as per the original CLIP implementation. kwargs (*optional*): Dictionary of keyword arguments. Examples: ```python >>> from transformers import ViTConfig, BertConfig, VisionTextDualEncoderConfig, VisionTextDualEncoderModel >>> # Initializing a BERT and ViT configuration >>> config_vision = ViTConfig() >>> config_text = BertConfig() >>> config = VisionTextDualEncoderConfig.from_vision_text_configs(config_vision, config_text, projection_dim=512) >>> # Initializing a BERT and ViT model (with random weights) >>> model = VisionTextDualEncoderModel(config=config) >>> # Accessing the model configuration >>> config_vision = model.config.vision_config >>> config_text = model.config.text_config >>> # Saving the model, including its configuration >>> model.save_pretrained("vit-bert") >>> # loading model and config from pretrained folder >>> vision_text_config = VisionTextDualEncoderConfig.from_pretrained("vit-bert") >>> model = VisionTextDualEncoderModel.from_pretrained("vit-bert", config=vision_text_config) ```""" model_type = "vision-text-dual-encoder" is_composition = True def __init__(self, projection_dim=512, logit_scale_init_value=2.6592, **kwargs): super().__init__(**kwargs) if "vision_config" not in kwargs: raise ValueError("`vision_config` can not be `None`.") if "text_config" not in kwargs: raise ValueError("`text_config` can not be `None`.") vision_config = kwargs.pop("vision_config") text_config = kwargs.pop("text_config") vision_model_type = vision_config.pop("model_type") text_model_type = text_config.pop("model_type") vision_config_class = VISION_MODEL_CONFIGS.get(vision_model_type) if vision_config_class is not None: self.vision_config = vision_config_class(**vision_config) else: self.vision_config = AutoConfig.for_model(vision_model_type, **vision_config) if hasattr(self.vision_config, "vision_config"): self.vision_config = self.vision_config.vision_config self.text_config = AutoConfig.for_model(text_model_type, **text_config) self.projection_dim = projection_dim self.logit_scale_init_value = logit_scale_init_value @classmethod def from_vision_text_configs(cls, vision_config: PretrainedConfig, text_config: PretrainedConfig, **kwargs): r""" Instantiate a [`VisionTextDualEncoderConfig`] (or a derived class) from text model configuration and vision model configuration. Returns: [`VisionTextDualEncoderConfig`]: An instance of a configuration object """ return cls(vision_config=vision_config.to_dict(), text_config=text_config.to_dict(), **kwargs)
transformers/src/transformers/models/vision_text_dual_encoder/configuration_vision_text_dual_encoder.py/0
{ "file_path": "transformers/src/transformers/models/vision_text_dual_encoder/configuration_vision_text_dual_encoder.py", "repo_id": "transformers", "token_count": 1662 }
371
# coding=utf-8 # Copyright 2021 Google AI, Ross Wightman, The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ TF 2.0 ViT model.""" from __future__ import annotations import collections.abc import math from typing import Optional, Tuple, Union import numpy as np import tensorflow as tf from ...activations_tf import get_tf_activation from ...modeling_tf_outputs import TFBaseModelOutput, TFBaseModelOutputWithPooling, TFSequenceClassifierOutput from ...modeling_tf_utils import ( TFModelInputType, TFPreTrainedModel, TFSequenceClassificationLoss, get_initializer, keras, keras_serializable, unpack_inputs, ) from ...tf_utils import shape_list, stable_softmax from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging from .configuration_vit import ViTConfig logger = logging.get_logger(__name__) # General docstring _CONFIG_FOR_DOC = "ViTConfig" # Base docstring _CHECKPOINT_FOR_DOC = "google/vit-base-patch16-224-in21k" _EXPECTED_OUTPUT_SHAPE = [1, 197, 768] # Image classification docstring _IMAGE_CLASS_CHECKPOINT = "google/vit-base-patch16-224" _IMAGE_CLASS_EXPECTED_OUTPUT = "Egyptian cat" class TFViTEmbeddings(keras.layers.Layer): """ Construct the CLS token, position and patch embeddings. """ def __init__(self, config: ViTConfig, **kwargs): super().__init__(**kwargs) self.patch_embeddings = TFViTPatchEmbeddings(config, name="patch_embeddings") self.dropout = keras.layers.Dropout(rate=config.hidden_dropout_prob) self.config = config def build(self, input_shape=None): num_patches = self.patch_embeddings.num_patches self.cls_token = self.add_weight( shape=(1, 1, self.config.hidden_size), initializer=get_initializer(self.config.initializer_range), trainable=True, name="cls_token", ) self.position_embeddings = self.add_weight( shape=(1, num_patches + 1, self.config.hidden_size), initializer=get_initializer(self.config.initializer_range), trainable=True, name="position_embeddings", ) if self.built: return self.built = True if getattr(self, "patch_embeddings", None) is not None: with tf.name_scope(self.patch_embeddings.name): self.patch_embeddings.build(None) def interpolate_pos_encoding(self, embeddings, height, width) -> tf.Tensor: """ This method allows to interpolate the pre-trained position encodings, to be able to use the model on higher resolution images. Source: https://github.com/facebookresearch/dino/blob/de9ee3df6cf39fac952ab558447af1fa1365362a/vision_transformer.py#L174 """ batch_size, seq_len, dim = shape_list(embeddings) num_patches = seq_len - 1 _, num_positions, _ = shape_list(self.position_embeddings) num_positions -= 1 if num_patches == num_positions and height == width: return self.position_embeddings class_pos_embed = self.position_embeddings[:, :1] patch_pos_embed = self.position_embeddings[:, 1:] h0 = height // self.config.patch_size w0 = width // self.config.patch_size patch_pos_embed = tf.image.resize( images=tf.reshape( patch_pos_embed, shape=(1, int(math.sqrt(num_positions)), int(math.sqrt(num_positions)), dim) ), size=(h0, w0), method="bicubic", ) shape = shape_list(patch_pos_embed) assert h0 == shape[-3] and w0 == shape[-2] patch_pos_embed = tf.reshape(tensor=patch_pos_embed, shape=(1, -1, dim)) return tf.concat(values=(class_pos_embed, patch_pos_embed), axis=1) def call( self, pixel_values: tf.Tensor, interpolate_pos_encoding: bool = False, training: bool = False ) -> tf.Tensor: batch_size, num_channels, height, width = shape_list(pixel_values) embeddings = self.patch_embeddings( pixel_values, interpolate_pos_encoding=interpolate_pos_encoding, training=training ) # add the [CLS] token to the embedded patch tokens cls_tokens = tf.repeat(self.cls_token, repeats=batch_size, axis=0) embeddings = tf.concat((cls_tokens, embeddings), axis=1) # add positional encoding to each token if interpolate_pos_encoding: embeddings = embeddings + self.interpolate_pos_encoding(embeddings, height, width) else: embeddings = embeddings + self.position_embeddings embeddings = self.dropout(embeddings, training=training) return embeddings # Based on timm implementation, which can be found here: # https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py class TFViTPatchEmbeddings(keras.layers.Layer): """ This class turns `pixel_values` of shape `(batch_size, num_channels, height, width)` into the initial `hidden_states` (patch embeddings) of shape `(batch_size, seq_length, hidden_size)` to be consumed by a Transformer. """ def __init__(self, config: ViTConfig, **kwargs): super().__init__(**kwargs) image_size, patch_size = config.image_size, config.patch_size num_channels, hidden_size = config.num_channels, config.hidden_size image_size = image_size if isinstance(image_size, collections.abc.Iterable) else (image_size, image_size) patch_size = patch_size if isinstance(patch_size, collections.abc.Iterable) else (patch_size, patch_size) num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.image_size = image_size self.patch_size = patch_size self.num_patches = num_patches self.num_channels = num_channels self.config = config self.projection = keras.layers.Conv2D( filters=hidden_size, kernel_size=patch_size, strides=patch_size, padding="valid", data_format="channels_last", use_bias=True, kernel_initializer=get_initializer(self.config.initializer_range), bias_initializer="zeros", name="projection", ) def call( self, pixel_values: tf.Tensor, interpolate_pos_encoding: bool = False, training: bool = False ) -> tf.Tensor: batch_size, num_channels, height, width = shape_list(pixel_values) if tf.executing_eagerly() and num_channels != self.num_channels: raise ValueError( "Make sure that the channel dimension of the pixel values match with the one set in the configuration." ) if not interpolate_pos_encoding: if tf.executing_eagerly(): if height != self.image_size[0] or width != self.image_size[1]: raise ValueError( f"Input image size ({height}*{width}) doesn't match model" f" ({self.image_size[0]}*{self.image_size[1]})." ) # When running on CPU, `keras.layers.Conv2D` doesn't support `NCHW` format. # So change the input format from `NCHW` to `NHWC`. # shape = (batch_size, in_height, in_width, in_channels=num_channels) pixel_values = tf.transpose(pixel_values, perm=(0, 2, 3, 1)) projection = self.projection(pixel_values) # Change the 2D spatial dimensions to a single temporal dimension. # shape = (batch_size, num_patches, out_channels=embed_dim) num_patches = (width // self.patch_size[1]) * (height // self.patch_size[0]) embeddings = tf.reshape(tensor=projection, shape=(batch_size, num_patches, -1)) return embeddings def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "projection", None) is not None: with tf.name_scope(self.projection.name): self.projection.build([None, None, None, self.num_channels]) class TFViTSelfAttention(keras.layers.Layer): def __init__(self, config: ViTConfig, **kwargs): super().__init__(**kwargs) if config.hidden_size % config.num_attention_heads != 0: raise ValueError( f"The hidden size ({config.hidden_size}) is not a multiple of the number " f"of attention heads ({config.num_attention_heads})" ) self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.sqrt_att_head_size = math.sqrt(self.attention_head_size) self.query = keras.layers.Dense( units=self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="query" ) self.key = keras.layers.Dense( units=self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="key" ) self.value = keras.layers.Dense( units=self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="value" ) self.dropout = keras.layers.Dropout(rate=config.attention_probs_dropout_prob) self.config = config def transpose_for_scores(self, tensor: tf.Tensor, batch_size: int) -> tf.Tensor: # Reshape from [batch_size, seq_length, all_head_size] to [batch_size, seq_length, num_attention_heads, attention_head_size] tensor = tf.reshape(tensor=tensor, shape=(batch_size, -1, self.num_attention_heads, self.attention_head_size)) # Transpose the tensor from [batch_size, seq_length, num_attention_heads, attention_head_size] to [batch_size, num_attention_heads, seq_length, attention_head_size] return tf.transpose(tensor, perm=[0, 2, 1, 3]) def call( self, hidden_states: tf.Tensor, head_mask: tf.Tensor, output_attentions: bool, training: bool = False, ) -> Tuple[tf.Tensor]: batch_size = shape_list(hidden_states)[0] mixed_query_layer = self.query(inputs=hidden_states) mixed_key_layer = self.key(inputs=hidden_states) mixed_value_layer = self.value(inputs=hidden_states) query_layer = self.transpose_for_scores(mixed_query_layer, batch_size) key_layer = self.transpose_for_scores(mixed_key_layer, batch_size) value_layer = self.transpose_for_scores(mixed_value_layer, batch_size) # Take the dot product between "query" and "key" to get the raw attention scores. # (batch size, num_heads, seq_len_q, seq_len_k) attention_scores = tf.matmul(query_layer, key_layer, transpose_b=True) dk = tf.cast(self.sqrt_att_head_size, dtype=attention_scores.dtype) attention_scores = tf.divide(attention_scores, dk) # Normalize the attention scores to probabilities. attention_probs = stable_softmax(logits=attention_scores, axis=-1) # This is actually dropping out entire tokens to attend to, which might # seem a bit unusual, but is taken from the original Transformer paper. attention_probs = self.dropout(inputs=attention_probs, training=training) # Mask heads if we want to if head_mask is not None: attention_probs = tf.multiply(attention_probs, head_mask) attention_output = tf.matmul(attention_probs, value_layer) attention_output = tf.transpose(attention_output, perm=[0, 2, 1, 3]) # (batch_size, seq_len_q, all_head_size) attention_output = tf.reshape(tensor=attention_output, shape=(batch_size, -1, self.all_head_size)) outputs = (attention_output, attention_probs) if output_attentions else (attention_output,) return outputs def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "query", None) is not None: with tf.name_scope(self.query.name): self.query.build([None, None, self.config.hidden_size]) if getattr(self, "key", None) is not None: with tf.name_scope(self.key.name): self.key.build([None, None, self.config.hidden_size]) if getattr(self, "value", None) is not None: with tf.name_scope(self.value.name): self.value.build([None, None, self.config.hidden_size]) class TFViTSelfOutput(keras.layers.Layer): """ The residual connection is defined in TFViTLayer instead of here (as is the case with other models), due to the layernorm applied before each block. """ def __init__(self, config: ViTConfig, **kwargs): super().__init__(**kwargs) self.dense = keras.layers.Dense( units=config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense" ) self.dropout = keras.layers.Dropout(rate=config.hidden_dropout_prob) self.config = config def call(self, hidden_states: tf.Tensor, input_tensor: tf.Tensor, training: bool = False) -> tf.Tensor: hidden_states = self.dense(inputs=hidden_states) hidden_states = self.dropout(inputs=hidden_states, training=training) return hidden_states def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "dense", None) is not None: with tf.name_scope(self.dense.name): self.dense.build([None, None, self.config.hidden_size]) class TFViTAttention(keras.layers.Layer): def __init__(self, config: ViTConfig, **kwargs): super().__init__(**kwargs) self.self_attention = TFViTSelfAttention(config, name="attention") self.dense_output = TFViTSelfOutput(config, name="output") def prune_heads(self, heads): raise NotImplementedError def call( self, input_tensor: tf.Tensor, head_mask: tf.Tensor, output_attentions: bool, training: bool = False, ) -> Tuple[tf.Tensor]: self_outputs = self.self_attention( hidden_states=input_tensor, head_mask=head_mask, output_attentions=output_attentions, training=training ) attention_output = self.dense_output( hidden_states=self_outputs[0], input_tensor=input_tensor, training=training ) outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them return outputs def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "self_attention", None) is not None: with tf.name_scope(self.self_attention.name): self.self_attention.build(None) if getattr(self, "dense_output", None) is not None: with tf.name_scope(self.dense_output.name): self.dense_output.build(None) class TFViTIntermediate(keras.layers.Layer): def __init__(self, config: ViTConfig, **kwargs): super().__init__(**kwargs) self.dense = keras.layers.Dense( units=config.intermediate_size, kernel_initializer=get_initializer(config.initializer_range), name="dense" ) if isinstance(config.hidden_act, str): self.intermediate_act_fn = get_tf_activation(config.hidden_act) else: self.intermediate_act_fn = config.hidden_act self.config = config def call(self, hidden_states: tf.Tensor) -> tf.Tensor: hidden_states = self.dense(inputs=hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) return hidden_states def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "dense", None) is not None: with tf.name_scope(self.dense.name): self.dense.build([None, None, self.config.hidden_size]) class TFViTOutput(keras.layers.Layer): def __init__(self, config: ViTConfig, **kwargs): super().__init__(**kwargs) self.dense = keras.layers.Dense( units=config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense" ) self.dropout = keras.layers.Dropout(rate=config.hidden_dropout_prob) self.config = config def call(self, hidden_states: tf.Tensor, input_tensor: tf.Tensor, training: bool = False) -> tf.Tensor: hidden_states = self.dense(inputs=hidden_states) hidden_states = self.dropout(inputs=hidden_states, training=training) hidden_states = hidden_states + input_tensor return hidden_states def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "dense", None) is not None: with tf.name_scope(self.dense.name): self.dense.build([None, None, self.config.intermediate_size]) class TFViTLayer(keras.layers.Layer): """This corresponds to the Block class in the timm implementation.""" def __init__(self, config: ViTConfig, **kwargs): super().__init__(**kwargs) self.attention = TFViTAttention(config, name="attention") self.intermediate = TFViTIntermediate(config, name="intermediate") self.vit_output = TFViTOutput(config, name="output") self.layernorm_before = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layernorm_before") self.layernorm_after = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layernorm_after") self.config = config def call( self, hidden_states: tf.Tensor, head_mask: tf.Tensor, output_attentions: bool, training: bool = False, ) -> Tuple[tf.Tensor]: attention_outputs = self.attention( # in ViT, layernorm is applied before self-attention input_tensor=self.layernorm_before(inputs=hidden_states), head_mask=head_mask, output_attentions=output_attentions, training=training, ) attention_output = attention_outputs[0] # first residual connection hidden_states = attention_output + hidden_states # in ViT, layernorm is also applied after self-attention layer_output = self.layernorm_after(inputs=hidden_states) intermediate_output = self.intermediate(hidden_states=layer_output) # second residual connection is done here layer_output = self.vit_output( hidden_states=intermediate_output, input_tensor=hidden_states, training=training ) outputs = (layer_output,) + attention_outputs[1:] # add attentions if we output them return outputs def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "attention", None) is not None: with tf.name_scope(self.attention.name): self.attention.build(None) if getattr(self, "intermediate", None) is not None: with tf.name_scope(self.intermediate.name): self.intermediate.build(None) if getattr(self, "vit_output", None) is not None: with tf.name_scope(self.vit_output.name): self.vit_output.build(None) if getattr(self, "layernorm_before", None) is not None: with tf.name_scope(self.layernorm_before.name): self.layernorm_before.build([None, None, self.config.hidden_size]) if getattr(self, "layernorm_after", None) is not None: with tf.name_scope(self.layernorm_after.name): self.layernorm_after.build([None, None, self.config.hidden_size]) class TFViTEncoder(keras.layers.Layer): def __init__(self, config: ViTConfig, **kwargs): super().__init__(**kwargs) self.layer = [TFViTLayer(config, name=f"layer_._{i}") for i in range(config.num_hidden_layers)] def call( self, hidden_states: tf.Tensor, head_mask: tf.Tensor, output_attentions: bool, output_hidden_states: bool, return_dict: bool, training: bool = False, ) -> Union[TFBaseModelOutput, Tuple[tf.Tensor]]: all_hidden_states = () if output_hidden_states else None all_attentions = () if output_attentions else None for i, layer_module in enumerate(self.layer): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_outputs = layer_module( hidden_states=hidden_states, head_mask=head_mask[i], output_attentions=output_attentions, training=training, ) hidden_states = layer_outputs[0] if output_attentions: all_attentions = all_attentions + (layer_outputs[1],) # Add last layer if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, all_hidden_states, all_attentions] if v is not None) return TFBaseModelOutput( last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "layer", None) is not None: for layer in self.layer: with tf.name_scope(layer.name): layer.build(None) @keras_serializable class TFViTMainLayer(keras.layers.Layer): config_class = ViTConfig def __init__(self, config: ViTConfig, add_pooling_layer: bool = True, **kwargs): super().__init__(**kwargs) self.config = config self.embeddings = TFViTEmbeddings(config, name="embeddings") self.encoder = TFViTEncoder(config, name="encoder") self.layernorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layernorm") self.pooler = TFViTPooler(config, name="pooler") if add_pooling_layer else None def get_input_embeddings(self) -> keras.layers.Layer: return self.embeddings.patch_embeddings def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ raise NotImplementedError @unpack_inputs def call( self, pixel_values: TFModelInputType | None = None, head_mask: np.ndarray | tf.Tensor | None = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, interpolate_pos_encoding: Optional[bool] = None, return_dict: Optional[bool] = None, training: bool = False, ) -> Union[TFBaseModelOutputWithPooling, Tuple[tf.Tensor]]: if pixel_values is None: raise ValueError("You have to specify pixel_values") embedding_output = self.embeddings( pixel_values=pixel_values, interpolate_pos_encoding=interpolate_pos_encoding, training=training, ) # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] if head_mask is not None: raise NotImplementedError else: head_mask = [None] * self.config.num_hidden_layers encoder_outputs = self.encoder( hidden_states=embedding_output, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) sequence_output = encoder_outputs[0] sequence_output = self.layernorm(inputs=sequence_output) pooled_output = self.pooler(hidden_states=sequence_output) if self.pooler is not None else None if not return_dict: return (sequence_output, pooled_output) + encoder_outputs[1:] return TFBaseModelOutputWithPooling( last_hidden_state=sequence_output, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "embeddings", None) is not None: with tf.name_scope(self.embeddings.name): self.embeddings.build(None) if getattr(self, "encoder", None) is not None: with tf.name_scope(self.encoder.name): self.encoder.build(None) if getattr(self, "layernorm", None) is not None: with tf.name_scope(self.layernorm.name): self.layernorm.build([None, None, self.config.hidden_size]) if getattr(self, "pooler", None) is not None: with tf.name_scope(self.pooler.name): self.pooler.build(None) class TFViTPreTrainedModel(TFPreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = ViTConfig base_model_prefix = "vit" main_input_name = "pixel_values" VIT_START_DOCSTRING = r""" This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a [keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior. <Tip> TensorFlow models and layers in `transformers` accept two formats as input: - having all inputs as keyword arguments (like PyTorch models), or - having all inputs as a list, tuple or dict in the first positional argument. The reason the second format is supported is that Keras methods prefer this format when passing inputs to models and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first positional argument: - a single Tensor with `pixel_values` only and nothing else: `model(pixel_values)` - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `model([pixel_values, attention_mask])` or `model([pixel_values, attention_mask, token_type_ids])` - a dictionary with one or several input Tensors associated to the input names given in the docstring: `model({"pixel_values": pixel_values, "token_type_ids": token_type_ids})` Note that when creating models and layers with [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry about any of this, as you can just pass inputs like you would to any other Python function! </Tip> Args: config ([`ViTConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights. """ VIT_INPUTS_DOCSTRING = r""" Args: pixel_values (`np.ndarray`, `tf.Tensor`, `List[tf.Tensor]` ``Dict[str, tf.Tensor]` or `Dict[str, np.ndarray]` and each example must have the shape `(batch_size, num_channels, height, width)`): Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`ViTImageProcessor.__call__`] for details. head_mask (`np.ndarray` or `tf.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead. interpolate_pos_encoding (`bool`, *optional*): Whether to interpolate the pre-trained position encodings. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True. training (`bool`, *optional*, defaults to `False``): Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation). """ @add_start_docstrings( "The bare ViT Model transformer outputting raw hidden-states without any specific head on top.", VIT_START_DOCSTRING, ) class TFViTModel(TFViTPreTrainedModel): def __init__(self, config: ViTConfig, *inputs, add_pooling_layer=True, **kwargs): super().__init__(config, *inputs, **kwargs) self.vit = TFViTMainLayer(config, add_pooling_layer=add_pooling_layer, name="vit") @unpack_inputs @add_start_docstrings_to_model_forward(VIT_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFBaseModelOutputWithPooling, config_class=_CONFIG_FOR_DOC, modality="vision", expected_output=_EXPECTED_OUTPUT_SHAPE, ) def call( self, pixel_values: TFModelInputType | None = None, head_mask: np.ndarray | tf.Tensor | None = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, interpolate_pos_encoding: Optional[bool] = None, return_dict: Optional[bool] = None, training: bool = False, ) -> Union[TFBaseModelOutputWithPooling, Tuple[tf.Tensor]]: outputs = self.vit( pixel_values=pixel_values, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, interpolate_pos_encoding=interpolate_pos_encoding, return_dict=return_dict, training=training, ) return outputs def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "vit", None) is not None: with tf.name_scope(self.vit.name): self.vit.build(None) class TFViTPooler(keras.layers.Layer): def __init__(self, config: ViTConfig, **kwargs): super().__init__(**kwargs) self.dense = keras.layers.Dense( units=config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), activation="tanh", name="dense", ) self.config = config def call(self, hidden_states: tf.Tensor) -> tf.Tensor: # We "pool" the model by simply taking the hidden state corresponding # to the first token. first_token_tensor = hidden_states[:, 0] pooled_output = self.dense(inputs=first_token_tensor) return pooled_output def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "dense", None) is not None: with tf.name_scope(self.dense.name): self.dense.build([None, None, self.config.hidden_size]) @add_start_docstrings( """ ViT Model transformer with an image classification head on top (a linear layer on top of the final hidden state of the [CLS] token) e.g. for ImageNet. <Tip> Note that it's possible to fine-tune ViT on higher resolution images than the ones it has been trained on, by setting `interpolate_pos_encoding` to `True` in the forward of the model. This will interpolate the pre-trained position embeddings to the higher resolution. </Tip> """, VIT_START_DOCSTRING, ) class TFViTForImageClassification(TFViTPreTrainedModel, TFSequenceClassificationLoss): def __init__(self, config: ViTConfig, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.num_labels = config.num_labels self.vit = TFViTMainLayer(config, add_pooling_layer=False, name="vit") # Classifier head self.classifier = keras.layers.Dense( units=config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="classifier", ) self.config = config @unpack_inputs @add_start_docstrings_to_model_forward(VIT_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT, output_type=TFSequenceClassifierOutput, config_class=_CONFIG_FOR_DOC, expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT, ) def call( self, pixel_values: TFModelInputType | None = None, head_mask: np.ndarray | tf.Tensor | None = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, interpolate_pos_encoding: Optional[bool] = None, return_dict: Optional[bool] = None, labels: np.ndarray | tf.Tensor | None = None, training: Optional[bool] = False, ) -> Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]: r""" labels (`tf.Tensor` or `np.ndarray` of shape `(batch_size,)`, *optional*): Labels for computing the image classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ outputs = self.vit( pixel_values=pixel_values, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, interpolate_pos_encoding=interpolate_pos_encoding, return_dict=return_dict, training=training, ) sequence_output = outputs[0] logits = self.classifier(inputs=sequence_output[:, 0, :]) loss = None if labels is None else self.hf_compute_loss(labels=labels, logits=logits) if not return_dict: output = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return TFSequenceClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "vit", None) is not None: with tf.name_scope(self.vit.name): self.vit.build(None) if getattr(self, "classifier", None) is not None: with tf.name_scope(self.classifier.name): self.classifier.build([None, None, self.config.hidden_size])
transformers/src/transformers/models/vit/modeling_tf_vit.py/0
{ "file_path": "transformers/src/transformers/models/vit/modeling_tf_vit.py", "repo_id": "transformers", "token_count": 15720 }
372
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Image processor class for Vivit.""" from typing import Dict, List, Optional, Union import numpy as np from transformers.utils import is_vision_available from transformers.utils.generic import TensorType from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( get_resize_output_image_size, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, infer_channel_dimension_format, is_scaled_image, is_valid_image, to_numpy_array, valid_images, validate_kwargs, validate_preprocess_arguments, ) from ...utils import logging if is_vision_available(): import PIL logger = logging.get_logger(__name__) def make_batched(videos) -> List[List[ImageInput]]: if isinstance(videos, (list, tuple)) and isinstance(videos[0], (list, tuple)) and is_valid_image(videos[0][0]): return videos elif isinstance(videos, (list, tuple)) and is_valid_image(videos[0]): return [videos] elif is_valid_image(videos): return [[videos]] raise ValueError(f"Could not make batched video from {videos}") class VivitImageProcessor(BaseImageProcessor): r""" Constructs a Vivit image processor. Args: do_resize (`bool`, *optional*, defaults to `True`): Whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden by the `do_resize` parameter in the `preprocess` method. size (`Dict[str, int]` *optional*, defaults to `{"shortest_edge": 256}`): Size of the output image after resizing. The shortest edge of the image will be resized to `size["shortest_edge"]` while maintaining the aspect ratio of the original image. Can be overriden by `size` in the `preprocess` method. resample (`PILImageResampling`, *optional*, defaults to `Resampling.BILINEAR`): Resampling filter to use if resizing the image. Can be overridden by the `resample` parameter in the `preprocess` method. do_center_crop (`bool`, *optional*, defaults to `True`): Whether to center crop the image to the specified `crop_size`. Can be overridden by the `do_center_crop` parameter in the `preprocess` method. crop_size (`Dict[str, int]`, *optional*, defaults to `{"height": 224, "width": 224}`): Size of the image after applying the center crop. Can be overridden by the `crop_size` parameter in the `preprocess` method. do_rescale (`bool`, *optional*, defaults to `True`): Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by the `do_rescale` parameter in the `preprocess` method. rescale_factor (`int` or `float`, *optional*, defaults to `1/127.5`): Defines the scale factor to use if rescaling the image. Can be overridden by the `rescale_factor` parameter in the `preprocess` method. offset (`bool`, *optional*, defaults to `True`): Whether to scale the image in both negative and positive directions. Can be overriden by the `offset` in the `preprocess` method. do_normalize (`bool`, *optional*, defaults to `True`): Whether to normalize the image. Can be overridden by the `do_normalize` parameter in the `preprocess` method. image_mean (`float` or `List[float]`, *optional*, defaults to `IMAGENET_STANDARD_MEAN`): Mean to use if normalizing the image. This is a float or list of floats the length of the number of channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method. image_std (`float` or `List[float]`, *optional*, defaults to `IMAGENET_STANDARD_STD`): Standard deviation to use if normalizing the image. This is a float or list of floats the length of the number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method. """ model_input_names = ["pixel_values"] def __init__( self, do_resize: bool = True, size: Dict[str, int] = None, resample: PILImageResampling = PILImageResampling.BILINEAR, do_center_crop: bool = True, crop_size: Dict[str, int] = None, do_rescale: bool = True, rescale_factor: Union[int, float] = 1 / 127.5, offset: bool = True, do_normalize: bool = True, image_mean: Optional[Union[float, List[float]]] = None, image_std: Optional[Union[float, List[float]]] = None, **kwargs, ) -> None: super().__init__(**kwargs) size = size if size is not None else {"shortest_edge": 256} size = get_size_dict(size, default_to_square=False) crop_size = crop_size if crop_size is not None else {"height": 224, "width": 224} crop_size = get_size_dict(crop_size, param_name="crop_size") self.do_resize = do_resize self.size = size self.do_center_crop = do_center_crop self.crop_size = crop_size self.resample = resample self.do_rescale = do_rescale self.rescale_factor = rescale_factor self.offset = offset self.do_normalize = do_normalize self.image_mean = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN self.image_std = image_std if image_std is not None else IMAGENET_STANDARD_STD self._valid_processor_keys = [ "videos", "do_resize", "size", "resample", "do_center_crop", "crop_size", "do_rescale", "rescale_factor", "offset", "do_normalize", "image_mean", "image_std", "return_tensors", "data_format", "input_data_format", ] def resize( self, image: np.ndarray, size: Dict[str, int], resample: PILImageResampling = PILImageResampling.BILINEAR, data_format: Optional[Union[str, ChannelDimension]] = None, input_data_format: Optional[Union[str, ChannelDimension]] = None, **kwargs, ) -> np.ndarray: """ Resize an image. Args: image (`np.ndarray`): Image to resize. size (`Dict[str, int]`): Size of the output image. If `size` is of the form `{"height": h, "width": w}`, the output image will have the size `(h, w)`. If `size` is of the form `{"shortest_edge": s}`, the output image will have its shortest edge of length `s` while keeping the aspect ratio of the original image. resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BILINEAR`): Resampling filter to use when resiizing the image. data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format of the image. If not provided, it will be the same as the input image. input_data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format of the input image. If not provided, it will be inferred. """ size = get_size_dict(size, default_to_square=False) if "shortest_edge" in size: output_size = get_resize_output_image_size( image, size["shortest_edge"], default_to_square=False, input_data_format=input_data_format ) elif "height" in size and "width" in size: output_size = (size["height"], size["width"]) else: raise ValueError(f"Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}") return resize( image, size=output_size, resample=resample, data_format=data_format, input_data_format=input_data_format, **kwargs, ) # Copied from transformers.models.efficientnet.image_processing_efficientnet.EfficientNetImageProcessor.rescale def rescale( self, image: np.ndarray, scale: Union[int, float], offset: bool = True, data_format: Optional[Union[str, ChannelDimension]] = None, input_data_format: Optional[Union[str, ChannelDimension]] = None, **kwargs, ): """ Rescale an image by a scale factor. If `offset` is `True`, the image has its values rescaled by `scale` and then offset by 1. If `scale` is 1/127.5, the image is rescaled between [-1, 1]. image = image * scale - 1 If `offset` is `False`, and `scale` is 1/255, the image is rescaled between [0, 1]. image = image * scale Args: image (`np.ndarray`): Image to rescale. scale (`int` or `float`): Scale to apply to the image. offset (`bool`, *optional*): Whether to scale the image in both negative and positive directions. data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format of the image. If not provided, it will be the same as the input image. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format of the input image. If not provided, it will be inferred. """ rescaled_image = rescale( image, scale=scale, data_format=data_format, input_data_format=input_data_format, **kwargs ) if offset: rescaled_image = rescaled_image - 1 return rescaled_image def _preprocess_image( self, image: ImageInput, do_resize: bool = None, size: Dict[str, int] = None, resample: PILImageResampling = None, do_center_crop: bool = None, crop_size: Dict[str, int] = None, do_rescale: bool = None, rescale_factor: float = None, offset: bool = None, do_normalize: bool = None, image_mean: Optional[Union[float, List[float]]] = None, image_std: Optional[Union[float, List[float]]] = None, data_format: Optional[ChannelDimension] = ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]] = None, ) -> np.ndarray: """Preprocesses a single image.""" validate_preprocess_arguments( do_rescale=do_rescale, rescale_factor=rescale_factor, do_normalize=do_normalize, image_mean=image_mean, image_std=image_std, do_center_crop=do_center_crop, crop_size=crop_size, do_resize=do_resize, size=size, resample=resample, ) if offset and not do_rescale: raise ValueError("For offset, do_rescale must also be set to True.") # All transformations expect numpy arrays. image = to_numpy_array(image) if is_scaled_image(image) and do_rescale: logger.warning_once( "It looks like you are trying to rescale already rescaled images. If the input" " images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again." ) if input_data_format is None: input_data_format = infer_channel_dimension_format(image) if do_resize: image = self.resize(image=image, size=size, resample=resample, input_data_format=input_data_format) if do_center_crop: image = self.center_crop(image, size=crop_size, input_data_format=input_data_format) if do_rescale: image = self.rescale(image=image, scale=rescale_factor, offset=offset, input_data_format=input_data_format) if do_normalize: image = self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format) image = to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) return image def preprocess( self, videos: ImageInput, do_resize: bool = None, size: Dict[str, int] = None, resample: PILImageResampling = None, do_center_crop: bool = None, crop_size: Dict[str, int] = None, do_rescale: bool = None, rescale_factor: float = None, offset: bool = None, do_normalize: bool = None, image_mean: Optional[Union[float, List[float]]] = None, image_std: Optional[Union[float, List[float]]] = None, return_tensors: Optional[Union[str, TensorType]] = None, data_format: ChannelDimension = ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]] = None, **kwargs, ) -> PIL.Image.Image: """ Preprocess an image or batch of images. Args: videos (`ImageInput`): Video frames to preprocess. Expects a single or batch of video frames with pixel values ranging from 0 to 255. If passing in frames with pixel values between 0 and 1, set `do_rescale=False`. do_resize (`bool`, *optional*, defaults to `self.do_resize`): Whether to resize the image. size (`Dict[str, int]`, *optional*, defaults to `self.size`): Size of the image after applying resize. resample (`PILImageResampling`, *optional*, defaults to `self.resample`): Resampling filter to use if resizing the image. This can be one of the enum `PILImageResampling`, Only has an effect if `do_resize` is set to `True`. do_center_crop (`bool`, *optional*, defaults to `self.do_centre_crop`): Whether to centre crop the image. crop_size (`Dict[str, int]`, *optional*, defaults to `self.crop_size`): Size of the image after applying the centre crop. do_rescale (`bool`, *optional*, defaults to `self.do_rescale`): Whether to rescale the image values between `[-1 - 1]` if `offset` is `True`, `[0, 1]` otherwise. rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`): Rescale factor to rescale the image by if `do_rescale` is set to `True`. offset (`bool`, *optional*, defaults to `self.offset`): Whether to scale the image in both negative and positive directions. do_normalize (`bool`, *optional*, defaults to `self.do_normalize`): Whether to normalize the image. image_mean (`float` or `List[float]`, *optional*, defaults to `self.image_mean`): Image mean. image_std (`float` or `List[float]`, *optional*, defaults to `self.image_std`): Image standard deviation. return_tensors (`str` or `TensorType`, *optional*): The type of tensors to return. Can be one of: - Unset: Return a list of `np.ndarray`. - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`. - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`. - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`. - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`. data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`): The channel dimension format for the output image. Can be one of: - `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `ChannelDimension.LAST`: image in (height, width, num_channels) format. - Unset: Use the inferred channel dimension format of the input image. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format for the input image. If unset, the channel dimension format is inferred from the input image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. """ do_resize = do_resize if do_resize is not None else self.do_resize resample = resample if resample is not None else self.resample do_center_crop = do_center_crop if do_center_crop is not None else self.do_center_crop do_rescale = do_rescale if do_rescale is not None else self.do_rescale rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor offset = offset if offset is not None else self.offset do_normalize = do_normalize if do_normalize is not None else self.do_normalize image_mean = image_mean if image_mean is not None else self.image_mean image_std = image_std if image_std is not None else self.image_std size = size if size is not None else self.size size = get_size_dict(size, default_to_square=False) crop_size = crop_size if crop_size is not None else self.crop_size crop_size = get_size_dict(crop_size, param_name="crop_size") validate_kwargs(captured_kwargs=kwargs.keys(), valid_processor_keys=self._valid_processor_keys) if not valid_images(videos): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) videos = make_batched(videos) videos = [ [ self._preprocess_image( image=img, do_resize=do_resize, size=size, resample=resample, do_center_crop=do_center_crop, crop_size=crop_size, do_rescale=do_rescale, rescale_factor=rescale_factor, offset=offset, do_normalize=do_normalize, image_mean=image_mean, image_std=image_std, data_format=data_format, input_data_format=input_data_format, ) for img in video ] for video in videos ] data = {"pixel_values": videos} return BatchFeature(data=data, tensor_type=return_tensors)
transformers/src/transformers/models/vivit/image_processing_vivit.py/0
{ "file_path": "transformers/src/transformers/models/vivit/image_processing_vivit.py", "repo_id": "transformers", "token_count": 8459 }
373
# coding=utf-8 # Copyright 2024 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Speech processor class for Wav2Vec2-BERT """ import warnings from ...processing_utils import ProcessorMixin from ..seamless_m4t.feature_extraction_seamless_m4t import SeamlessM4TFeatureExtractor from ..wav2vec2.tokenization_wav2vec2 import Wav2Vec2CTCTokenizer class Wav2Vec2BertProcessor(ProcessorMixin): r""" Constructs a Wav2Vec2-BERT processor which wraps a Wav2Vec2-BERT feature extractor and a Wav2Vec2 CTC tokenizer into a single processor. [`Wav2Vec2Processor`] offers all the functionalities of [`SeamlessM4TFeatureExtractor`] and [`PreTrainedTokenizer`]. See the docstring of [`~Wav2Vec2Processor.__call__`] and [`~Wav2Vec2Processor.decode`] for more information. Args: feature_extractor (`SeamlessM4TFeatureExtractor`): An instance of [`SeamlessM4TFeatureExtractor`]. The feature extractor is a required input. tokenizer ([`PreTrainedTokenizer`]): An instance of [`PreTrainedTokenizer`]. The tokenizer is a required input. """ feature_extractor_class = "SeamlessM4TFeatureExtractor" tokenizer_class = "AutoTokenizer" def __init__(self, feature_extractor, tokenizer): super().__init__(feature_extractor, tokenizer) @classmethod def from_pretrained(cls, pretrained_model_name_or_path, **kwargs): try: return super().from_pretrained(pretrained_model_name_or_path, **kwargs) except OSError: warnings.warn( f"Loading a tokenizer inside {cls.__name__} from a config that does not" " include a `tokenizer_class` attribute is deprecated and will be " "removed in v5. Please add `'tokenizer_class': 'Wav2Vec2CTCTokenizer'`" " attribute to either your `config.json` or `tokenizer_config.json` " "file to suppress this warning: ", FutureWarning, ) feature_extractor = SeamlessM4TFeatureExtractor.from_pretrained(pretrained_model_name_or_path, **kwargs) tokenizer = Wav2Vec2CTCTokenizer.from_pretrained(pretrained_model_name_or_path, **kwargs) return cls(feature_extractor=feature_extractor, tokenizer=tokenizer) def __call__(self, audio=None, text=None, **kwargs): """ Main method to prepare for the model one or several sequences(s) and audio(s). This method forwards the `audio` and `kwargs` arguments to SeamlessM4TFeatureExtractor's [`~SeamlessM4TFeatureExtractor.__call__`] if `audio` is not `None` to pre-process the audio. To prepare the target sequences(s), this method forwards the `text` and `kwargs` arguments to PreTrainedTokenizer's [`~PreTrainedTokenizer.__call__`] if `text` is not `None`. Please refer to the doctsring of the above two methods for more information. Args: text (`str`, `List[str]`, `List[List[str]]`): The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set `is_split_into_words=True` (to lift the ambiguity with a batch of sequences). audio (`np.ndarray`, `torch.Tensor`, `List[np.ndarray]`, `List[torch.Tensor]`): The audio or batch of audios to be prepared. Each audio can be NumPy array or PyTorch tensor. In case of a NumPy array/PyTorch tensor, each audio should be of shape (C, T), where C is a number of channels, and T the sample length of the audio. kwargs (*optional*): Remaining dictionary of keyword arguments that will be passed to the feature extractor and/or the tokenizer. Returns: [`BatchEncoding`]: A [`BatchEncoding`] with the following fields: - **input_features** -- Audio input features to be fed to a model. Returned when `audio` is not `None`. - **attention_mask** -- List of indices specifying which timestamps should be attended to by the model when `audio` is not `None`. When only `text` is specified, returns the token attention mask. - **labels** -- List of token ids to be fed to a model. Returned when both `text` and `audio` are not `None`. - **input_ids** -- List of token ids to be fed to a model. Returned when `text` is not `None` and `audio` is `None`. """ sampling_rate = kwargs.pop("sampling_rate", None) if audio is None and text is None: raise ValueError("You need to specify either an `audio` or `text` input to process.") if audio is not None: inputs = self.feature_extractor(audio, sampling_rate=sampling_rate, **kwargs) if text is not None: encodings = self.tokenizer(text, **kwargs) if text is None: return inputs elif audio is None: return encodings else: inputs["labels"] = encodings["input_ids"] return inputs def pad(self, input_features=None, labels=None, **kwargs): """ If `input_features` is not `None`, this method forwards the `input_features` and `kwargs` arguments to SeamlessM4TFeatureExtractor's [`~SeamlessM4TFeatureExtractor.pad`] to pad the input features. If `labels` is not `None`, this method forwards the `labels` and `kwargs` arguments to PreTrainedTokenizer's [`~PreTrainedTokenizer.pad`] to pad the label(s). Please refer to the doctsring of the above two methods for more information. """ if input_features is None and labels is None: raise ValueError("You need to specify either an `input_features` or `labels` input to pad.") if input_features is not None: input_features = self.feature_extractor.pad(input_features, **kwargs) if labels is not None: labels = self.tokenizer.pad(labels, **kwargs) if labels is None: return input_features elif input_features is None: return labels else: input_features["labels"] = labels["input_ids"] return input_features def batch_decode(self, *args, **kwargs): """ This method forwards all its arguments to PreTrainedTokenizer's [`~PreTrainedTokenizer.batch_decode`]. Please refer to the docstring of this method for more information. """ return self.tokenizer.batch_decode(*args, **kwargs) def decode(self, *args, **kwargs): """ This method forwards all its arguments to PreTrainedTokenizer's [`~PreTrainedTokenizer.decode`]. Please refer to the docstring of this method for more information. """ return self.tokenizer.decode(*args, **kwargs)
transformers/src/transformers/models/wav2vec2_bert/processing_wav2vec2_bert.py/0
{ "file_path": "transformers/src/transformers/models/wav2vec2_bert/processing_wav2vec2_bert.py", "repo_id": "transformers", "token_count": 2889 }
374
#!/usr/bin/env python """Converts a Whisper model in OpenAI format to Hugging Face format.""" # Copyright 2022 The HuggingFace Inc. team and the OpenAI team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import io import json import os import tempfile import urllib import warnings from typing import Any, Optional, Tuple import torch from huggingface_hub.utils import insecure_hashlib from torch import nn from tqdm import tqdm from transformers import ( GenerationConfig, WhisperConfig, WhisperFeatureExtractor, WhisperForConditionalGeneration, WhisperProcessor, WhisperTokenizer, WhisperTokenizerFast, ) from transformers.models.whisper.tokenization_whisper import LANGUAGES, bytes_to_unicode from transformers.utils.import_utils import _is_package_available _MODELS = { "tiny.en": "https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt", "tiny": "https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt", "base.en": "https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt", "base": "https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt", "small.en": "https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt", "small": "https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt", "medium.en": "https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt", "medium": "https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt", "large": "https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt", "large-v2": "https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt", "large-v3": "https://openaipublic.azureedge.net/main/whisper/models/e5b1a55b89c1367dacf97e3e19bfd829a01529dbfdeefa8caeb59b3f1b81dadb/large-v3.pt", } _TOKENIZERS = { "multilingual": "https://raw.githubusercontent.com/openai/whisper/main/whisper/assets/multilingual.tiktoken", "english": "https://raw.githubusercontent.com/openai/whisper/main/whisper/assets/gpt2.tiktoken", } def _get_generation_config( is_multilingual: bool, num_languages: int = 100, openai_version: Optional[str] = None, ) -> GenerationConfig: """ Loads the appropriate generation config from HF repo """ if openai_version is not None: repo = f"openai/whisper-{openai_version}" elif not is_multilingual: repo = "openai/whisper-medium.en" elif num_languages < 100: repo = "openai/whisper-large-v2" else: repo = "openai/whisper-large-v3" gen_cfg = GenerationConfig.from_pretrained(repo) if openai_version is None: gen_cfg.alignment_heads = None warnings.warn( "Alignment heads have not been included in the generation config, since they are available " "only for the original OpenAI checkpoints." "If you want to use word-level timestamps with a custom version of Whisper," "see https://github.com/openai/whisper/blob/main/notebooks/Multilingual_ASR.ipynb" "for the example of how to produce word-level timestamps manually." ) return gen_cfg def remove_ignore_keys_(state_dict): ignore_keys = ["layers", "blocks"] for k in ignore_keys: state_dict.pop(k, None) WHISPER_MAPPING = { "blocks": "layers", "mlp.0": "fc1", "mlp.2": "fc2", "mlp_ln": "final_layer_norm", ".attn.query": ".self_attn.q_proj", ".attn.key": ".self_attn.k_proj", ".attn.value": ".self_attn.v_proj", ".attn_ln": ".self_attn_layer_norm", ".attn.out": ".self_attn.out_proj", ".cross_attn.query": ".encoder_attn.q_proj", ".cross_attn.key": ".encoder_attn.k_proj", ".cross_attn.value": ".encoder_attn.v_proj", ".cross_attn_ln": ".encoder_attn_layer_norm", ".cross_attn.out": ".encoder_attn.out_proj", "decoder.ln.": "decoder.layer_norm.", "encoder.ln.": "encoder.layer_norm.", "token_embedding": "embed_tokens", "encoder.positional_embedding": "encoder.embed_positions.weight", "decoder.positional_embedding": "decoder.embed_positions.weight", "ln_post": "layer_norm", } def rename_keys(s_dict): keys = list(s_dict.keys()) for key in keys: new_key = key for k, v in WHISPER_MAPPING.items(): if k in key: new_key = new_key.replace(k, v) print(f"{key} -> {new_key}") s_dict[new_key] = s_dict.pop(key) return s_dict def make_linear_from_emb(emb): vocab_size, emb_size = emb.weight.shape lin_layer = nn.Linear(vocab_size, emb_size, bias=False) lin_layer.weight.data = emb.weight.data return lin_layer def _download(url: str, root: str) -> Any: os.makedirs(root, exist_ok=True) filename = os.path.basename(url) expected_sha256 = url.split("/")[-2] download_target = os.path.join(root, filename) if os.path.exists(download_target) and not os.path.isfile(download_target): raise RuntimeError(f"{download_target} exists and is not a regular file") if os.path.isfile(download_target): model_bytes = open(download_target, "rb").read() if insecure_hashlib.sha256(model_bytes).hexdigest() == expected_sha256: return torch.load(io.BytesIO(model_bytes)) else: warnings.warn(f"{download_target} exists, but the SHA256 checksum does not match; re-downloading the file") with urllib.request.urlopen(url) as source, open(download_target, "wb") as output: with tqdm( total=int(source.info().get("Content-Length")), ncols=80, unit="iB", unit_scale=True, unit_divisor=1024 ) as loop: while True: buffer = source.read(8192) if not buffer: break output.write(buffer) loop.update(len(buffer)) model_bytes = open(download_target, "rb").read() if insecure_hashlib.sha256(model_bytes).hexdigest() != expected_sha256: raise RuntimeError( "Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model." ) return torch.load(io.BytesIO(model_bytes)) def convert_openai_whisper_to_tfms( checkpoint_path, pytorch_dump_folder_path ) -> Tuple[WhisperForConditionalGeneration, bool, int]: if ".pt" not in checkpoint_path: root = os.path.dirname(pytorch_dump_folder_path) or "." original_checkpoint = _download(_MODELS[checkpoint_path], root) openai_version = checkpoint_path else: original_checkpoint = torch.load(checkpoint_path, map_location="cpu") openai_version = None dimensions = original_checkpoint["dims"] state_dict = original_checkpoint["model_state_dict"] proj_out_weights = state_dict["decoder.token_embedding.weight"] remove_ignore_keys_(state_dict) rename_keys(state_dict) tie_embeds = True ffn_dim = state_dict["decoder.layers.0.fc1.weight"].shape[0] # a hacky way to properly set up the bos/eos/pad token ids in the model endoftext_id = 50257 if dimensions["n_vocab"] > 51865 else 50256 config = WhisperConfig( vocab_size=dimensions["n_vocab"], encoder_ffn_dim=ffn_dim, decoder_ffn_dim=ffn_dim, num_mel_bins=dimensions["n_mels"], d_model=dimensions["n_audio_state"], max_target_positions=dimensions["n_text_ctx"], encoder_layers=dimensions["n_audio_layer"], encoder_attention_heads=dimensions["n_audio_head"], decoder_layers=dimensions["n_text_layer"], decoder_attention_heads=dimensions["n_text_head"], max_source_positions=dimensions["n_audio_ctx"], eos_token_id=endoftext_id, bos_token_id=endoftext_id, pad_token_id=endoftext_id, decoder_start_token_id=endoftext_id + 1, ) model = WhisperForConditionalGeneration(config) missing, unexpected = model.model.load_state_dict(state_dict, strict=False) if len(missing) > 0 and not set(missing) <= { "encoder.embed_positions.weights", "decoder.embed_positions.weights", }: raise ValueError( "Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing," f" but all the following weights are missing {missing}" ) if tie_embeds: model.proj_out = make_linear_from_emb(model.model.decoder.embed_tokens) else: model.proj_out.weight.data = proj_out_weights # determine those parameters from a model checkpoint as Whisper repo does is_multilingual = model.config.vocab_size >= 51865 num_languages = model.config.vocab_size - 51765 - int(is_multilingual) model.generation_config = _get_generation_config( is_multilingual, num_languages, openai_version, ) return model, is_multilingual, num_languages # Adapted from https://github.com/openai/tiktoken/issues/60#issuecomment-1499977960 def _bpe(mergeable_ranks, token: bytes, max_rank=None) -> list[bytes]: parts = [bytes([b]) for b in token] while True: min_idx = None min_rank = None for i, pair in enumerate(zip(parts[:-1], parts[1:])): rank = mergeable_ranks.get(pair[0] + pair[1]) if rank is not None and (min_rank is None or rank < min_rank): min_idx = i min_rank = rank if min_rank is None or (max_rank is not None and min_rank >= max_rank): break assert min_idx is not None parts = parts[:min_idx] + [parts[min_idx] + parts[min_idx + 1]] + parts[min_idx + 2 :] return parts def convert_tiktoken_bpe_to_hf(tiktoken_url: str): bpe_ranks = load_tiktoken_bpe(tiktoken_url) byte_encoder = bytes_to_unicode() def token_bytes_to_string(b): return "".join([byte_encoder[ord(char)] for char in b.decode("latin-1")]) merges = [] vocab = {} for token, rank in bpe_ranks.items(): vocab[token_bytes_to_string(token)] = rank if len(token) == 1: continue merged = tuple(_bpe(bpe_ranks, token, max_rank=rank)) if len(merged) == 2: # account for empty token merges.append(" ".join(map(token_bytes_to_string, merged))) return vocab, merges def convert_tiktoken_to_hf( multilingual: bool = True, num_languages: int = 100, time_precision=0.02 ) -> WhisperTokenizer: # requires whisper, unless we use the path to the tiktoken file tiktoken_tokenizer_path = _TOKENIZERS["multilingual" if multilingual else "english"] start_of_transcript = ["<|endoftext|>", "<|startoftranscript|>"] control_tokens = [ "<|translate|>", "<|transcribe|>", "<|startoflm|>", "<|startofprev|>", "<|nospeech|>", "<|notimestamps|>", ] # these are special tokens, not normalized language_tokens = [f"<|{k}|>" for k in list(LANGUAGES)[:num_languages]] # These are not special but normalized timestamp_tokens = [("<|%.2f|>" % (i * time_precision)) for i in range(1500 + 1)] vocab, merges = convert_tiktoken_bpe_to_hf(tiktoken_tokenizer_path) with tempfile.TemporaryDirectory() as tmpdirname: vocab_file = f"{tmpdirname}/vocab.json" merge_file = f"{tmpdirname}/merges.txt" with open(vocab_file, "w", encoding="utf-8") as f: f.write(json.dumps(vocab, indent=2, sort_keys=True, ensure_ascii=False) + "\n") with open(merge_file, "w", encoding="utf-8") as writer: writer.write("#version: 0.2\n") for bpe_tokens in merges: writer.write(bpe_tokens + "\n") hf_tokenizer = WhisperTokenizer(vocab_file, merge_file) hf_tokenizer.add_tokens(start_of_transcript + language_tokens + control_tokens, special_tokens=True) hf_tokenizer.add_tokens(timestamp_tokens, special_tokens=False) return hf_tokenizer if __name__ == "__main__": parser = argparse.ArgumentParser() # # Required parameters parser.add_argument("--checkpoint_path", type=str, help="Path to the downloaded checkpoints") parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument( "--convert_preprocessor", type=bool, default=False, help="Whether or not the preprocessor (tokenizer + feature extractor) should be converted along with the model.", ) args = parser.parse_args() model, is_multilingual, num_languages = convert_openai_whisper_to_tfms( args.checkpoint_path, args.pytorch_dump_folder_path ) if args.convert_preprocessor: try: if not _is_package_available("tiktoken"): raise """`tiktoken` is not installed, use `pip install tiktoken` to convert the tokenizer""" except Exception: pass else: from tiktoken.load import load_tiktoken_bpe tokenizer = convert_tiktoken_to_hf(is_multilingual, num_languages) feature_extractor = WhisperFeatureExtractor( feature_size=model.config.num_mel_bins, # the rest of default parameters are the same as hardcoded in openai/whisper ) processor = WhisperProcessor(tokenizer=tokenizer, feature_extractor=feature_extractor) processor.save_pretrained(args.pytorch_dump_folder_path) # save fast tokenizer as well fast_tokenizer = WhisperTokenizerFast.from_pretrained(args.pytorch_dump_folder_path) fast_tokenizer.save_pretrained(args.pytorch_dump_folder_path, legacy_format=False) model.save_pretrained(args.pytorch_dump_folder_path)
transformers/src/transformers/models/whisper/convert_openai_to_hf.py/0
{ "file_path": "transformers/src/transformers/models/whisper/convert_openai_to_hf.py", "repo_id": "transformers", "token_count": 6393 }
375
# coding=utf-8 # Copyright The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ XGLM model configuration""" from ...configuration_utils import PretrainedConfig from ...utils import logging logger = logging.get_logger(__name__) XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP = { "facebook/xglm-564M": "https://huggingface.co/facebook/xglm-564M/resolve/main/config.json", # See all XGLM models at https://huggingface.co/models?filter=xglm } class XGLMConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`XGLMModel`]. It is used to instantiate an XGLM model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the XGLM [facebook/xglm-564M](https://huggingface.co/facebook/xglm-564M) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 256008): Vocabulary size of the XGLM model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`XGLMModel`] or [`FlaxXGLMModel`]. max_position_embeddings (`int`, *optional*, defaults to 2048): The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). d_model (`int`, *optional*, defaults to 1024): Dimension of the layers and the pooler layer. ffn_dim (`int`, *optional*, defaults to 4096): Dimension of the "intermediate" (often named feed-forward) layer in decoder. num_layers (`int`, *optional*, defaults to 24): Number of hidden layers Transformer decoder. attention_heads (`int`, *optional*, defaults to 16): Number of attention heads for each attention layer in the Transformer decoder. activation_function (`str` or `function`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"silu"` and `"gelu_new"` are supported. dropout (`float`, *optional*, defaults to 0.1): The dropout probability for all fully connected layers in the embeddings, dencoder, and pooler. attention_dropout (`float`, *optional*, defaults to 0.1): The dropout ratio for the attention probabilities. activation_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for activations inside the fully connected layer. layerdrop (`float`, *optional*, defaults to 0.0): The LayerDrop probability for the encoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556) for more details. init_std (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. scale_embedding (`bool`, *optional*, defaults to `True`): Scale embeddings by diving by sqrt(d_model). use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). Example: ```python >>> from transformers import XGLMModel, XGLMConfig >>> # Initializing a XGLM facebook/xglm-564M style configuration >>> configuration = XGLMConfig() >>> # Initializing a model from the facebook/xglm-564M style configuration >>> model = XGLMModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "xglm" keys_to_ignore_at_inference = ["past_key_values"] attribute_map = { "num_attention_heads": "attention_heads", "hidden_size": "d_model", "num_hidden_layers": "num_layers", } def __init__( self, vocab_size=256008, max_position_embeddings=2048, d_model=1024, ffn_dim=4096, num_layers=24, attention_heads=16, activation_function="gelu", dropout=0.1, attention_dropout=0.1, activation_dropout=0.0, layerdrop=0.0, init_std=0.02, scale_embedding=True, use_cache=True, decoder_start_token_id=2, pad_token_id=1, bos_token_id=0, eos_token_id=2, **kwargs, ): self.vocab_size = vocab_size self.max_position_embeddings = max_position_embeddings self.d_model = d_model self.ffn_dim = ffn_dim self.num_layers = num_layers self.attention_heads = attention_heads self.activation_function = activation_function self.dropout = dropout self.attention_dropout = attention_dropout self.activation_dropout = activation_dropout self.layerdrop = layerdrop self.init_std = init_std self.scale_embedding = scale_embedding # scale factor will be sqrt(d_model) if True self.use_cache = use_cache super().__init__( pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, decoder_start_token_id=decoder_start_token_id, **kwargs, )
transformers/src/transformers/models/xglm/configuration_xglm.py/0
{ "file_path": "transformers/src/transformers/models/xglm/configuration_xglm.py", "repo_id": "transformers", "token_count": 2346 }
376
# coding=utf-8 # Copyright 2020 The Microsoft Authors and The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import collections import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging logger = logging.get_logger(__name__) SPIECE_UNDERLINE = "▁" VOCAB_FILES_NAMES = {"vocab_file": "prophetnet.tokenizer"} PRETRAINED_VOCAB_FILES_MAP = { "vocab_file": { "microsoft/xprophetnet-large-wiki100-cased": ( "https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/prophetnet.tokenizer" ), } } PRETRAINED_INIT_CONFIGURATION = { "microsoft/xprophetnet-large-wiki100-cased": {"do_lower_case": False}, } PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = { "microsoft/xprophetnet-large-wiki100-cased": 512, } def load_vocab(vocab_file): """Loads a vocabulary file into a dictionary.""" vocab = collections.OrderedDict() with open(vocab_file, "r", encoding="utf-8") as reader: tokens = reader.readlines() for index, token in enumerate(tokens): token = token.rstrip("\n") vocab[token] = index return vocab class XLMProphetNetTokenizer(PreTrainedTokenizer): """ Adapted from [`RobertaTokenizer`] and [`XLNetTokenizer`]. Based on [SentencePiece](https://github.com/google/sentencepiece). This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. Args: vocab_file (`str`): Path to the vocabulary file. bos_token (`str`, *optional*, defaults to `"[SEP]"`): The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token. <Tip> When building a sequence using special tokens, this is not the token that is used for the beginning of sequence. The token used is the `cls_token`. </Tip> eos_token (`str`, *optional*, defaults to `"[SEP]"`): The end of sequence token. <Tip> When building a sequence using special tokens, this is not the token that is used for the end of sequence. The token used is the `sep_token`. </Tip> sep_token (`str`, *optional*, defaults to `"[SEP]"`): The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for sequence classification or for a text and a question for question answering. It is also used as the last token of a sequence built with special tokens. unk_token (`str`, *optional*, defaults to `"[UNK]"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. pad_token (`str`, *optional*, defaults to `"[PAD]"`): The token used for padding, for example when batching sequences of different lengths. cls_token (`str`, *optional*, defaults to `"[CLS]"`): The classifier token which is used when doing sequence classification (classification of the whole sequence instead of per-token classification). It is the first token of the sequence when built with special tokens. mask_token (`str`, *optional*, defaults to `"[MASK]"`): The token used for masking values. This is the token used when training this model with masked language modeling. This is the token which the model will try to predict. sp_model_kwargs (`dict`, *optional*): Will be passed to the `SentencePieceProcessor.__init__()` method. The [Python wrapper for SentencePiece](https://github.com/google/sentencepiece/tree/master/python) can be used, among other things, to set: - `enable_sampling`: Enable subword regularization. - `nbest_size`: Sampling parameters for unigram. Invalid for BPE-Dropout. - `nbest_size = {0,1}`: No sampling is performed. - `nbest_size > 1`: samples from the nbest_size results. - `nbest_size < 0`: assuming that nbest_size is infinite and samples from the all hypothesis (lattice) using forward-filtering-and-backward-sampling algorithm. - `alpha`: Smoothing parameter for unigram sampling, and dropout probability of merge operations for BPE-dropout. Attributes: sp_model (`SentencePieceProcessor`): The *SentencePiece* processor that is used for every conversion (string, tokens and IDs). """ vocab_files_names = VOCAB_FILES_NAMES pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES model_input_names = ["input_ids", "attention_mask"] def __init__( self, vocab_file, bos_token="[SEP]", eos_token="[SEP]", sep_token="[SEP]", unk_token="[UNK]", pad_token="[PAD]", cls_token="[CLS]", mask_token="[MASK]", sp_model_kwargs: Optional[Dict[str, Any]] = None, **kwargs, ) -> None: self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs try: import sentencepiece as spm except ImportError: logger.warning( "You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece" " pip install sentencepiece" ) raise self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.Load(str(vocab_file)) self.vocab_file = vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-' # spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a' # put special tokens and [unused] tokens into the vocab self.fairseq_tokens_to_ids = {"[PAD]": 0, "[CLS]": 1, "[SEP]": 2, "[UNK]": 3, "[MASK]": 4} for i in range(10): tok = f"[unused{i}]" self.fairseq_tokens_to_ids[tok] = 5 + i # The first "real" token "," has position 15 in the embedding vocab and position 3 in the spm vocab self.fairseq_offset = 12 self.fairseq_ids_to_tokens = {v: k for k, v in self.fairseq_tokens_to_ids.items()} # TODO ArthurZ fairseq_ids_to_tokens should be removed super().__init__( bos_token=bos_token, eos_token=eos_token, sep_token=sep_token, unk_token=unk_token, pad_token=pad_token, cls_token=cls_token, mask_token=mask_token, sp_model_kwargs=self.sp_model_kwargs, **kwargs, ) @property def can_save_slow_tokenizer(self) -> bool: return os.path.isfile(self.vocab_file) if self.vocab_file else False def __getstate__(self): state = self.__dict__.copy() state["sp_model"] = None return state def __setstate__(self, d): self.__dict__ = d try: import sentencepiece as spm except ImportError: logger.warning( "You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece" " pip install sentencepiece" ) raise # for backward compatibility if not hasattr(self, "sp_model_kwargs"): self.sp_model_kwargs = {} self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.Load(self.vocab_file) def get_special_tokens_mask( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False ) -> List[int]: """ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `prepare_for_model` method. Args: token_ids_0 (`List[int]`): List of IDs. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. already_has_special_tokens (`bool`, *optional*, defaults to `False`): Whether or not the token list is already formatted with special tokens for the model. Returns: `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. """ if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True ) if token_ids_1 is None: return ([0] * len(token_ids_0)) + [1] return ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1] def create_token_type_ids_from_sequences( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: """ Create a mask from the two sequences passed to be used in a sequence-pair classification task. XLMProphetNet does not make use of token type ids, therefore a list of zeros is returned. Args: token_ids_0 (`List[int]`): List of IDs. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of zeros. """ sep = [self.sep_token_id] if token_ids_1 is None: return len(token_ids_0 + sep) * [0] return len(token_ids_0 + sep + sep + token_ids_1 + sep) * [0] @property def vocab_size(self): return len(self.sp_model) + self.fairseq_offset def get_vocab(self): vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)} vocab.update(self.added_tokens_encoder) return vocab def _tokenize(self, text: str) -> str: return self.sp_model.encode(text, out_type=str) def _convert_token_to_id(self, token): """Converts a token (str) in an id using the vocab.""" if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] spm_id = self.sp_model.PieceToId(token) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def _convert_id_to_token(self, index): """Converts an index (integer) in a token (str) using the vocab.""" if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset) def convert_tokens_to_string(self, tokens): """Converts a sequence of tokens (strings for sub-words) in a single string.""" out_string = "".join(tokens).replace(SPIECE_UNDERLINE, " ").strip() return out_string def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: if not os.path.isdir(save_directory): logger.error(f"Vocabulary path ({save_directory}) should be a directory") return out_vocab_file = os.path.join( save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file): copyfile(self.vocab_file, out_vocab_file) elif not os.path.isfile(self.vocab_file): with open(out_vocab_file, "wb") as fi: content_spiece_model = self.sp_model.serialized_model_proto() fi.write(content_spiece_model) return (out_vocab_file,) def build_inputs_with_special_tokens( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: """ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A XLMProphetNet sequence has the following format: - single sequence: `X [SEP]` - pair of sequences: `A [SEP] B [SEP]` Args: token_ids_0 (`List[int]`): List of IDs to which the special tokens will be added token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: list of [input IDs](../glossary#input-ids) with the appropriate special tokens. """ if token_ids_1 is None: return token_ids_0 + [self.sep_token_id] sep = [self.sep_token_id] return token_ids_0 + sep + token_ids_1 + sep
transformers/src/transformers/models/xlm_prophetnet/tokenization_xlm_prophetnet.py/0
{ "file_path": "transformers/src/transformers/models/xlm_prophetnet/tokenization_xlm_prophetnet.py", "repo_id": "transformers", "token_count": 5985 }
377
# coding=utf-8 # Copyright 2018 Google AI, Google Brain and Carnegie Mellon University Authors and the HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ PyTorch XLNet model. """ import warnings from dataclasses import dataclass from typing import List, Optional, Tuple, Union import torch from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACT2FN from ...modeling_utils import PoolerAnswerClass, PoolerEndLogits, PoolerStartLogits, PreTrainedModel, SequenceSummary from ...pytorch_utils import apply_chunking_to_forward from ...utils import ( ModelOutput, add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings, ) from .configuration_xlnet import XLNetConfig logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = "xlnet/xlnet-base-cased" _CONFIG_FOR_DOC = "XLNetConfig" XLNET_PRETRAINED_MODEL_ARCHIVE_LIST = [ "xlnet/xlnet-base-cased", "xlnet/xlnet-large-cased", # See all XLNet models at https://huggingface.co/models?filter=xlnet ] def build_tf_xlnet_to_pytorch_map(model, config, tf_weights=None): """ A map of modules from TF to PyTorch. I use a map to keep the PyTorch model as identical to the original PyTorch model as possible. """ tf_to_pt_map = {} if hasattr(model, "transformer"): if hasattr(model, "lm_loss"): # We will load also the output bias tf_to_pt_map["model/lm_loss/bias"] = model.lm_loss.bias if hasattr(model, "sequence_summary") and "model/sequnece_summary/summary/kernel" in tf_weights: # We will load also the sequence summary tf_to_pt_map["model/sequnece_summary/summary/kernel"] = model.sequence_summary.summary.weight tf_to_pt_map["model/sequnece_summary/summary/bias"] = model.sequence_summary.summary.bias if ( hasattr(model, "logits_proj") and config.finetuning_task is not None and f"model/regression_{config.finetuning_task}/logit/kernel" in tf_weights ): tf_to_pt_map[f"model/regression_{config.finetuning_task}/logit/kernel"] = model.logits_proj.weight tf_to_pt_map[f"model/regression_{config.finetuning_task}/logit/bias"] = model.logits_proj.bias # Now load the rest of the transformer model = model.transformer # Embeddings and output tf_to_pt_map.update( { "model/transformer/word_embedding/lookup_table": model.word_embedding.weight, "model/transformer/mask_emb/mask_emb": model.mask_emb, } ) # Transformer blocks for i, b in enumerate(model.layer): layer_str = f"model/transformer/layer_{i}/" tf_to_pt_map.update( { layer_str + "rel_attn/LayerNorm/gamma": b.rel_attn.layer_norm.weight, layer_str + "rel_attn/LayerNorm/beta": b.rel_attn.layer_norm.bias, layer_str + "rel_attn/o/kernel": b.rel_attn.o, layer_str + "rel_attn/q/kernel": b.rel_attn.q, layer_str + "rel_attn/k/kernel": b.rel_attn.k, layer_str + "rel_attn/r/kernel": b.rel_attn.r, layer_str + "rel_attn/v/kernel": b.rel_attn.v, layer_str + "ff/LayerNorm/gamma": b.ff.layer_norm.weight, layer_str + "ff/LayerNorm/beta": b.ff.layer_norm.bias, layer_str + "ff/layer_1/kernel": b.ff.layer_1.weight, layer_str + "ff/layer_1/bias": b.ff.layer_1.bias, layer_str + "ff/layer_2/kernel": b.ff.layer_2.weight, layer_str + "ff/layer_2/bias": b.ff.layer_2.bias, } ) # Relative positioning biases if config.untie_r: r_r_list = [] r_w_list = [] r_s_list = [] seg_embed_list = [] for b in model.layer: r_r_list.append(b.rel_attn.r_r_bias) r_w_list.append(b.rel_attn.r_w_bias) r_s_list.append(b.rel_attn.r_s_bias) seg_embed_list.append(b.rel_attn.seg_embed) else: r_r_list = [model.r_r_bias] r_w_list = [model.r_w_bias] r_s_list = [model.r_s_bias] seg_embed_list = [model.seg_embed] tf_to_pt_map.update( { "model/transformer/r_r_bias": r_r_list, "model/transformer/r_w_bias": r_w_list, "model/transformer/r_s_bias": r_s_list, "model/transformer/seg_embed": seg_embed_list, } ) return tf_to_pt_map def load_tf_weights_in_xlnet(model, config, tf_path): """Load tf checkpoints in a pytorch model""" try: import numpy as np import tensorflow as tf except ImportError: logger.error( "Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see " "https://www.tensorflow.org/install/ for installation instructions." ) raise # Load weights from TF model init_vars = tf.train.list_variables(tf_path) tf_weights = {} for name, shape in init_vars: logger.info(f"Loading TF weight {name} with shape {shape}") array = tf.train.load_variable(tf_path, name) tf_weights[name] = array # Build TF to PyTorch weights loading map tf_to_pt_map = build_tf_xlnet_to_pytorch_map(model, config, tf_weights) for name, pointer in tf_to_pt_map.items(): logger.info(f"Importing {name}") if name not in tf_weights: logger.info(f"{name} not in tf pre-trained weights, skipping") continue array = tf_weights[name] # adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v # which are not required for using pretrained model if "kernel" in name and ("ff" in name or "summary" in name or "logit" in name): logger.info("Transposing") array = np.transpose(array) if isinstance(pointer, list): # Here we will split the TF weights assert ( len(pointer) == array.shape[0] ), f"Pointer length {len(pointer)} and array length {array.shape[0]} mismatched" for i, p_i in enumerate(pointer): arr_i = array[i, ...] try: assert ( p_i.shape == arr_i.shape ), f"Pointer shape {p_i.shape} and array shape {arr_i.shape} mismatched" except AssertionError as e: e.args += (p_i.shape, arr_i.shape) raise logger.info(f"Initialize PyTorch weight {name} for layer {i}") p_i.data = torch.from_numpy(arr_i) else: try: assert ( pointer.shape == array.shape ), f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched" except AssertionError as e: e.args += (pointer.shape, array.shape) raise logger.info(f"Initialize PyTorch weight {name}") pointer.data = torch.from_numpy(array) tf_weights.pop(name, None) tf_weights.pop(name + "/Adam", None) tf_weights.pop(name + "/Adam_1", None) logger.info(f"Weights not copied to PyTorch model: {', '.join(tf_weights.keys())}") return model class XLNetRelativeAttention(nn.Module): def __init__(self, config): super().__init__() if config.d_model % config.n_head != 0: raise ValueError( f"The hidden size ({config.d_model}) is not a multiple of the number of attention " f"heads ({config.n_head}" ) self.n_head = config.n_head self.d_head = config.d_head self.d_model = config.d_model self.scale = 1 / (config.d_head**0.5) self.q = nn.Parameter(torch.FloatTensor(config.d_model, self.n_head, self.d_head)) self.k = nn.Parameter(torch.FloatTensor(config.d_model, self.n_head, self.d_head)) self.v = nn.Parameter(torch.FloatTensor(config.d_model, self.n_head, self.d_head)) self.o = nn.Parameter(torch.FloatTensor(config.d_model, self.n_head, self.d_head)) self.r = nn.Parameter(torch.FloatTensor(config.d_model, self.n_head, self.d_head)) self.r_r_bias = nn.Parameter(torch.FloatTensor(self.n_head, self.d_head)) self.r_s_bias = nn.Parameter(torch.FloatTensor(self.n_head, self.d_head)) self.r_w_bias = nn.Parameter(torch.FloatTensor(self.n_head, self.d_head)) self.seg_embed = nn.Parameter(torch.FloatTensor(2, self.n_head, self.d_head)) self.layer_norm = nn.LayerNorm(config.d_model, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.dropout) def prune_heads(self, heads): raise NotImplementedError @staticmethod def rel_shift(x, klen=-1): """perform relative shift to form the relative attention score.""" x_size = x.shape x = x.reshape(x_size[1], x_size[0], x_size[2], x_size[3]) x = x[1:, ...] x = x.reshape(x_size[0], x_size[1] - 1, x_size[2], x_size[3]) # x = x[:, 0:klen, :, :] x = torch.index_select(x, 1, torch.arange(klen, device=x.device, dtype=torch.long)) return x @staticmethod def rel_shift_bnij(x, klen=-1): x_size = x.shape x = x.reshape(x_size[0], x_size[1], x_size[3], x_size[2]) x = x[:, :, 1:, :] x = x.reshape(x_size[0], x_size[1], x_size[2], x_size[3] - 1) # Note: the tensor-slice form was faster in my testing than torch.index_select # However, tracing doesn't like the nature of the slice, and if klen changes # during the run then it'll fail, whereas index_select will be fine. x = torch.index_select(x, 3, torch.arange(klen, device=x.device, dtype=torch.long)) # x = x[:, :, :, :klen] return x def rel_attn_core( self, q_head, k_head_h, v_head_h, k_head_r, seg_mat=None, attn_mask=None, head_mask=None, output_attentions=False, ): """Core relative positional attention operations.""" # content based attention score ac = torch.einsum("ibnd,jbnd->bnij", q_head + self.r_w_bias, k_head_h) # position based attention score bd = torch.einsum("ibnd,jbnd->bnij", q_head + self.r_r_bias, k_head_r) bd = self.rel_shift_bnij(bd, klen=ac.shape[3]) # segment based attention score if seg_mat is None: ef = 0 else: ef = torch.einsum("ibnd,snd->ibns", q_head + self.r_s_bias, self.seg_embed) ef = torch.einsum("ijbs,ibns->bnij", seg_mat, ef) # merge attention scores and perform masking attn_score = (ac + bd + ef) * self.scale if attn_mask is not None: # attn_score = attn_score * (1 - attn_mask) - 1e30 * attn_mask if attn_mask.dtype == torch.float16: attn_score = attn_score - 65500 * torch.einsum("ijbn->bnij", attn_mask) else: attn_score = attn_score - 1e30 * torch.einsum("ijbn->bnij", attn_mask) # attention probability attn_prob = nn.functional.softmax(attn_score, dim=3) attn_prob = self.dropout(attn_prob) # Mask heads if we want to if head_mask is not None: attn_prob = attn_prob * torch.einsum("ijbn->bnij", head_mask) # attention output attn_vec = torch.einsum("bnij,jbnd->ibnd", attn_prob, v_head_h) if output_attentions: return attn_vec, torch.einsum("bnij->ijbn", attn_prob) return attn_vec def post_attention(self, h, attn_vec, residual=True): """Post-attention processing.""" # post-attention projection (back to `d_model`) attn_out = torch.einsum("ibnd,hnd->ibh", attn_vec, self.o) attn_out = self.dropout(attn_out) if residual: attn_out = attn_out + h output = self.layer_norm(attn_out) return output def forward( self, h, g, attn_mask_h, attn_mask_g, r, seg_mat, mems=None, target_mapping=None, head_mask=None, output_attentions=False, ): if g is not None: # Two-stream attention with relative positional encoding. # content based attention score if mems is not None and mems.dim() > 1: cat = torch.cat([mems, h], dim=0) else: cat = h # content-based key head k_head_h = torch.einsum("ibh,hnd->ibnd", cat, self.k) # content-based value head v_head_h = torch.einsum("ibh,hnd->ibnd", cat, self.v) # position-based key head k_head_r = torch.einsum("ibh,hnd->ibnd", r, self.r) # h-stream # content-stream query head q_head_h = torch.einsum("ibh,hnd->ibnd", h, self.q) # core attention ops attn_vec_h = self.rel_attn_core( q_head_h, k_head_h, v_head_h, k_head_r, seg_mat=seg_mat, attn_mask=attn_mask_h, head_mask=head_mask, output_attentions=output_attentions, ) if output_attentions: attn_vec_h, attn_prob_h = attn_vec_h # post processing output_h = self.post_attention(h, attn_vec_h) # g-stream # query-stream query head q_head_g = torch.einsum("ibh,hnd->ibnd", g, self.q) # core attention ops if target_mapping is not None: q_head_g = torch.einsum("mbnd,mlb->lbnd", q_head_g, target_mapping) attn_vec_g = self.rel_attn_core( q_head_g, k_head_h, v_head_h, k_head_r, seg_mat=seg_mat, attn_mask=attn_mask_g, head_mask=head_mask, output_attentions=output_attentions, ) if output_attentions: attn_vec_g, attn_prob_g = attn_vec_g attn_vec_g = torch.einsum("lbnd,mlb->mbnd", attn_vec_g, target_mapping) else: attn_vec_g = self.rel_attn_core( q_head_g, k_head_h, v_head_h, k_head_r, seg_mat=seg_mat, attn_mask=attn_mask_g, head_mask=head_mask, output_attentions=output_attentions, ) if output_attentions: attn_vec_g, attn_prob_g = attn_vec_g # post processing output_g = self.post_attention(g, attn_vec_g) if output_attentions: attn_prob = attn_prob_h, attn_prob_g else: # Multi-head attention with relative positional encoding if mems is not None and mems.dim() > 1: cat = torch.cat([mems, h], dim=0) else: cat = h # content heads q_head_h = torch.einsum("ibh,hnd->ibnd", h, self.q) k_head_h = torch.einsum("ibh,hnd->ibnd", cat, self.k) v_head_h = torch.einsum("ibh,hnd->ibnd", cat, self.v) # positional heads # type casting for fp16 support k_head_r = torch.einsum("ibh,hnd->ibnd", r.type(self.r.dtype), self.r) # core attention ops attn_vec = self.rel_attn_core( q_head_h, k_head_h, v_head_h, k_head_r, seg_mat=seg_mat, attn_mask=attn_mask_h, head_mask=head_mask, output_attentions=output_attentions, ) if output_attentions: attn_vec, attn_prob = attn_vec # post processing output_h = self.post_attention(h, attn_vec) output_g = None outputs = (output_h, output_g) if output_attentions: outputs = outputs + (attn_prob,) return outputs class XLNetFeedForward(nn.Module): def __init__(self, config): super().__init__() self.layer_norm = nn.LayerNorm(config.d_model, eps=config.layer_norm_eps) self.layer_1 = nn.Linear(config.d_model, config.d_inner) self.layer_2 = nn.Linear(config.d_inner, config.d_model) self.dropout = nn.Dropout(config.dropout) if isinstance(config.ff_activation, str): self.activation_function = ACT2FN[config.ff_activation] else: self.activation_function = config.ff_activation def forward(self, inp): output = inp output = self.layer_1(output) output = self.activation_function(output) output = self.dropout(output) output = self.layer_2(output) output = self.dropout(output) output = self.layer_norm(output + inp) return output class XLNetLayer(nn.Module): def __init__(self, config): super().__init__() self.rel_attn = XLNetRelativeAttention(config) self.ff = XLNetFeedForward(config) self.dropout = nn.Dropout(config.dropout) self.chunk_size_feed_forward = config.chunk_size_feed_forward self.seq_len_dim = 1 def forward( self, output_h, output_g, attn_mask_h, attn_mask_g, r, seg_mat, mems=None, target_mapping=None, head_mask=None, output_attentions=False, ): outputs = self.rel_attn( output_h, output_g, attn_mask_h, attn_mask_g, r, seg_mat, mems=mems, target_mapping=target_mapping, head_mask=head_mask, output_attentions=output_attentions, ) output_h, output_g = outputs[:2] if output_g is not None: output_g = apply_chunking_to_forward( self.ff_chunk, self.chunk_size_feed_forward, self.seq_len_dim, output_g ) output_h = apply_chunking_to_forward(self.ff_chunk, self.chunk_size_feed_forward, self.seq_len_dim, output_h) outputs = (output_h, output_g) + outputs[2:] # Add again attentions if there are there return outputs def ff_chunk(self, output_x): output_x = self.ff(output_x) return output_x class XLNetPreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = XLNetConfig load_tf_weights = load_tf_weights_in_xlnet base_model_prefix = "transformer" def _init_weights(self, module): """Initialize the weights.""" if isinstance(module, nn.Linear): # Slightly different from the TF version which uses truncated_normal for initialization # cf https://github.com/pytorch/pytorch/pull/5617 module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) elif isinstance(module, XLNetRelativeAttention): for param in [ module.q, module.k, module.v, module.o, module.r, module.r_r_bias, module.r_s_bias, module.r_w_bias, module.seg_embed, ]: param.data.normal_(mean=0.0, std=self.config.initializer_range) elif isinstance(module, XLNetModel): module.mask_emb.data.normal_(mean=0.0, std=self.config.initializer_range) @dataclass class XLNetModelOutput(ModelOutput): """ Output type of [`XLNetModel`]. Args: last_hidden_state (`torch.FloatTensor` of shape `(batch_size, num_predict, hidden_size)`): Sequence of hidden-states at the last layer of the model. `num_predict` corresponds to `target_mapping.shape[1]`. If `target_mapping` is `None`, then `num_predict` corresponds to `sequence_length`. mems (`List[torch.FloatTensor]` of length `config.n_layers`): Contains pre-computed hidden-states. Can be used (see `mems` input) to speed up sequential decoding. The token ids which have their past given to this model should not be passed as `input_ids` as they have already been computed. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ last_hidden_state: torch.FloatTensor mems: Optional[List[torch.FloatTensor]] = None hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None attentions: Optional[Tuple[torch.FloatTensor, ...]] = None @dataclass class XLNetLMHeadModelOutput(ModelOutput): """ Output type of [`XLNetLMHeadModel`]. Args: loss (`torch.FloatTensor` of shape *(1,)*, *optional*, returned when `labels` is provided) Language modeling loss (for next-token prediction). logits (`torch.FloatTensor` of shape `(batch_size, num_predict, config.vocab_size)`): Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). `num_predict` corresponds to `target_mapping.shape[1]`. If `target_mapping` is `None`, then `num_predict` corresponds to `sequence_length`. mems (`List[torch.FloatTensor]` of length `config.n_layers`): Contains pre-computed hidden-states. Can be used (see `mems` input) to speed up sequential decoding. The token ids which have their past given to this model should not be passed as `input_ids` as they have already been computed. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: Optional[torch.FloatTensor] = None logits: torch.FloatTensor = None mems: Optional[List[torch.FloatTensor]] = None hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None attentions: Optional[Tuple[torch.FloatTensor, ...]] = None @dataclass class XLNetForSequenceClassificationOutput(ModelOutput): """ Output type of [`XLNetForSequenceClassification`]. Args: loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `label` is provided): Classification (or regression if config.num_labels==1) loss. logits (`torch.FloatTensor` of shape `(batch_size, config.num_labels)`): Classification (or regression if config.num_labels==1) scores (before SoftMax). mems (`List[torch.FloatTensor]` of length `config.n_layers`): Contains pre-computed hidden-states. Can be used (see `mems` input) to speed up sequential decoding. The token ids which have their past given to this model should not be passed as `input_ids` as they have already been computed. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: Optional[torch.FloatTensor] = None logits: torch.FloatTensor = None mems: Optional[List[torch.FloatTensor]] = None hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None attentions: Optional[Tuple[torch.FloatTensor, ...]] = None @dataclass class XLNetForTokenClassificationOutput(ModelOutput): """ Output type of [`XLNetForTokenClassificationOutput`]. Args: loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided) : Classification loss. logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.num_labels)`): Classification scores (before SoftMax). mems (`List[torch.FloatTensor]` of length `config.n_layers`): Contains pre-computed hidden-states. Can be used (see `mems` input) to speed up sequential decoding. The token ids which have their past given to this model should not be passed as `input_ids` as they have already been computed. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: Optional[torch.FloatTensor] = None logits: torch.FloatTensor = None mems: Optional[List[torch.FloatTensor]] = None hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None attentions: Optional[Tuple[torch.FloatTensor, ...]] = None @dataclass class XLNetForMultipleChoiceOutput(ModelOutput): """ Output type of [`XLNetForMultipleChoice`]. Args: loss (`torch.FloatTensor` of shape *(1,)*, *optional*, returned when `labels` is provided): Classification loss. logits (`torch.FloatTensor` of shape `(batch_size, num_choices)`): *num_choices* is the second dimension of the input tensors. (see *input_ids* above). Classification scores (before SoftMax). mems (`List[torch.FloatTensor]` of length `config.n_layers`): Contains pre-computed hidden-states. Can be used (see `mems` input) to speed up sequential decoding. The token ids which have their past given to this model should not be passed as `input_ids` as they have already been computed. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: Optional[torch.FloatTensor] = None logits: torch.FloatTensor = None mems: Optional[List[torch.FloatTensor]] = None hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None attentions: Optional[Tuple[torch.FloatTensor, ...]] = None @dataclass class XLNetForQuestionAnsweringSimpleOutput(ModelOutput): """ Output type of [`XLNetForQuestionAnsweringSimple`]. Args: loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Total span extraction loss is the sum of a Cross-Entropy for the start and end positions. start_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length,)`): Span-start scores (before SoftMax). end_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length,)`): Span-end scores (before SoftMax). mems (`List[torch.FloatTensor]` of length `config.n_layers`): Contains pre-computed hidden-states. Can be used (see `mems` input) to speed up sequential decoding. The token ids which have their past given to this model should not be passed as `input_ids` as they have already been computed. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: Optional[torch.FloatTensor] = None start_logits: torch.FloatTensor = None end_logits: torch.FloatTensor = None mems: Optional[List[torch.FloatTensor]] = None hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None attentions: Optional[Tuple[torch.FloatTensor, ...]] = None @dataclass class XLNetForQuestionAnsweringOutput(ModelOutput): """ Output type of [`XLNetForQuestionAnswering`]. Args: loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned if both `start_positions` and `end_positions` are provided): Classification loss as the sum of start token, end token (and is_impossible if provided) classification losses. start_top_log_probs (`torch.FloatTensor` of shape `(batch_size, config.start_n_top)`, *optional*, returned if `start_positions` or `end_positions` is not provided): Log probabilities for the top config.start_n_top start token possibilities (beam-search). start_top_index (`torch.LongTensor` of shape `(batch_size, config.start_n_top)`, *optional*, returned if `start_positions` or `end_positions` is not provided): Indices for the top config.start_n_top start token possibilities (beam-search). end_top_log_probs (`torch.FloatTensor` of shape `(batch_size, config.start_n_top * config.end_n_top)`, *optional*, returned if `start_positions` or `end_positions` is not provided): Log probabilities for the top `config.start_n_top * config.end_n_top` end token possibilities (beam-search). end_top_index (`torch.LongTensor` of shape `(batch_size, config.start_n_top * config.end_n_top)`, *optional*, returned if `start_positions` or `end_positions` is not provided): Indices for the top `config.start_n_top * config.end_n_top` end token possibilities (beam-search). cls_logits (`torch.FloatTensor` of shape `(batch_size,)`, *optional*, returned if `start_positions` or `end_positions` is not provided): Log probabilities for the `is_impossible` label of the answers. mems (`List[torch.FloatTensor]` of length `config.n_layers`): Contains pre-computed hidden-states. Can be used (see `mems` input) to speed up sequential decoding. The token ids which have their past given to this model should not be passed as `input_ids` as they have already been computed. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: Optional[torch.FloatTensor] = None start_top_log_probs: Optional[torch.FloatTensor] = None start_top_index: Optional[torch.LongTensor] = None end_top_log_probs: Optional[torch.FloatTensor] = None end_top_index: Optional[torch.LongTensor] = None cls_logits: Optional[torch.FloatTensor] = None mems: Optional[List[torch.FloatTensor]] = None hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None attentions: Optional[Tuple[torch.FloatTensor, ...]] = None XLNET_START_DOCSTRING = r""" This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`XLNetConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ XLNET_INPUTS_DOCSTRING = r""" Args: input_ids (`torch.LongTensor` of shape `({0})`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) mems (`List[torch.FloatTensor]` of length `config.n_layers`): Contains pre-computed hidden-states (see `mems` output below) . Can be used to speed up sequential decoding. The token ids which have their past given to this model should not be passed as `input_ids` as they have already been computed. `use_mems` has to be set to `True` to make use of `mems`. perm_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length, sequence_length)`, *optional*): Mask to indicate the attention pattern for each input token with values selected in `[0, 1]`: - if `perm_mask[k, i, j] = 0`, i attend to j in batch k; - if `perm_mask[k, i, j] = 1`, i does not attend to j in batch k. If not set, each token attends to all the others (full bidirectional attention). Only used during pretraining (to define factorization order) or for sequential decoding (generation). target_mapping (`torch.FloatTensor` of shape `(batch_size, num_predict, sequence_length)`, *optional*): Mask to indicate the output tokens to use. If `target_mapping[k, i, j] = 1`, the i-th predict in batch k is on the j-th token. Only used during pretraining for partial prediction or for sequential decoding (generation). token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*): Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0, 1]`: - 0 corresponds to a *sentence A* token, - 1 corresponds to a *sentence B* token. [What are token type IDs?](../glossary#token-type-ids) input_mask (`torch.FloatTensor` of shape `{0}`, *optional*): Mask to avoid performing attention on padding token indices. Negative of `attention_mask`, i.e. with 0 for real tokens and 1 for padding which is kept for compatibility with the original code base. Mask values selected in `[0, 1]`: - 1 for tokens that are **masked**, - 0 for tokens that are **not masked**. You can only uses one of `input_mask` and `attention_mask`. head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ @add_start_docstrings( "The bare XLNet Model transformer outputting raw hidden-states without any specific head on top.", XLNET_START_DOCSTRING, ) class XLNetModel(XLNetPreTrainedModel): def __init__(self, config): super().__init__(config) self.mem_len = config.mem_len self.reuse_len = config.reuse_len self.d_model = config.d_model self.same_length = config.same_length self.attn_type = config.attn_type self.bi_data = config.bi_data self.clamp_len = config.clamp_len self.n_layer = config.n_layer self.word_embedding = nn.Embedding(config.vocab_size, config.d_model) self.mask_emb = nn.Parameter(torch.FloatTensor(1, 1, config.d_model)) self.layer = nn.ModuleList([XLNetLayer(config) for _ in range(config.n_layer)]) self.dropout = nn.Dropout(config.dropout) # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.word_embedding def set_input_embeddings(self, new_embeddings): self.word_embedding = new_embeddings def _prune_heads(self, heads_to_prune): raise NotImplementedError def create_mask(self, qlen, mlen): """ Creates causal attention mask. Float mask where 1.0 indicates masked, 0.0 indicates not-masked. Args: qlen: Sequence length mlen: Mask length :: same_length=False: same_length=True: <mlen > < qlen > <mlen > < qlen > ^ [0 0 0 0 0 1 1 1 1] [0 0 0 0 0 1 1 1 1] [0 0 0 0 0 0 1 1 1] [1 0 0 0 0 0 1 1 1] qlen [0 0 0 0 0 0 0 1 1] [1 1 0 0 0 0 0 1 1] [0 0 0 0 0 0 0 0 1] [1 1 1 0 0 0 0 0 1] v [0 0 0 0 0 0 0 0 0] [1 1 1 1 0 0 0 0 0] """ mask = torch.ones((qlen, qlen + mlen), device=self.device) if self.same_length: mask_lo = mask[:, :qlen].tril(-1) mask.triu_(mlen + 1) mask[:, :qlen] += mask_lo else: mask.triu_(mlen + 1) return mask def cache_mem(self, curr_out, prev_mem): # cache hidden states into memory. if self.reuse_len is not None and self.reuse_len > 0: curr_out = curr_out[: self.reuse_len] if self.mem_len is None or self.mem_len == 0: # If `use_mems` is active but no `mem_len` is defined, the model behaves like GPT-2 at inference time # and returns all of the past and current hidden states. cutoff = 0 else: # If `use_mems` is active and `mem_len` is defined, the model returns the last `mem_len` hidden # states. This is the preferred setting for training and long-form generation. cutoff = -self.mem_len if prev_mem is None: # if `use_mems` is active and `mem_len` is defined, the model new_mem = curr_out[cutoff:] else: new_mem = torch.cat([prev_mem, curr_out], dim=0)[cutoff:] return new_mem.detach() @staticmethod def positional_embedding(pos_seq, inv_freq, bsz=None): sinusoid_inp = torch.einsum("i,d->id", pos_seq, inv_freq) pos_emb = torch.cat([torch.sin(sinusoid_inp), torch.cos(sinusoid_inp)], dim=-1) pos_emb = pos_emb[:, None, :] if bsz is not None: pos_emb = pos_emb.expand(-1, bsz, -1) return pos_emb def relative_positional_encoding(self, qlen, klen, bsz=None): # create relative positional encoding. freq_seq = torch.arange(0, self.d_model, 2.0, dtype=torch.int64).float() inv_freq = 1 / torch.pow(10000, (freq_seq / self.d_model)) if self.attn_type == "bi": # beg, end = klen - 1, -qlen beg, end = klen, -qlen elif self.attn_type == "uni": # beg, end = klen - 1, -1 beg, end = klen, -1 else: raise ValueError(f"Unknown `attn_type` {self.attn_type}.") if self.bi_data: fwd_pos_seq = torch.arange(beg, end, -1.0, dtype=torch.int64).float() bwd_pos_seq = torch.arange(-beg, -end, 1.0, dtype=torch.int64).float() if self.clamp_len > 0: fwd_pos_seq = fwd_pos_seq.clamp(-self.clamp_len, self.clamp_len) bwd_pos_seq = bwd_pos_seq.clamp(-self.clamp_len, self.clamp_len) if bsz is not None: fwd_pos_emb = self.positional_embedding(fwd_pos_seq, inv_freq, bsz // 2) bwd_pos_emb = self.positional_embedding(bwd_pos_seq, inv_freq, bsz // 2) else: fwd_pos_emb = self.positional_embedding(fwd_pos_seq, inv_freq) bwd_pos_emb = self.positional_embedding(bwd_pos_seq, inv_freq) pos_emb = torch.cat([fwd_pos_emb, bwd_pos_emb], dim=1) else: fwd_pos_seq = torch.arange(beg, end, -1.0, dtype=torch.int64).float() if self.clamp_len > 0: fwd_pos_seq = fwd_pos_seq.clamp(-self.clamp_len, self.clamp_len) pos_emb = self.positional_embedding(fwd_pos_seq, inv_freq, bsz) return pos_emb @add_start_docstrings_to_model_forward(XLNET_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=XLNetModelOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, mems: Optional[torch.Tensor] = None, perm_mask: Optional[torch.Tensor] = None, target_mapping: Optional[torch.Tensor] = None, token_type_ids: Optional[torch.Tensor] = None, input_mask: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, use_mems: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, **kwargs, # delete after depreciation warning is removed ) -> Union[Tuple, XLNetModelOutput]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if "use_cache" in kwargs: warnings.warn( "The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems`" " instead.", FutureWarning, ) use_mems = kwargs["use_cache"] if self.training: use_mems = use_mems if use_mems is not None else self.config.use_mems_train else: use_mems = use_mems if use_mems is not None else self.config.use_mems_eval # the original code for XLNet uses shapes [len, bsz] with the batch dimension at the end # but we want a unified interface in the library with the batch size on the first dimension # so we move here the first dimension (batch) to the end if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: input_ids = input_ids.transpose(0, 1).contiguous() qlen, bsz = input_ids.shape[0], input_ids.shape[1] elif inputs_embeds is not None: inputs_embeds = inputs_embeds.transpose(0, 1).contiguous() qlen, bsz = inputs_embeds.shape[0], inputs_embeds.shape[1] else: raise ValueError("You have to specify either input_ids or inputs_embeds") token_type_ids = token_type_ids.transpose(0, 1).contiguous() if token_type_ids is not None else None input_mask = input_mask.transpose(0, 1).contiguous() if input_mask is not None else None attention_mask = attention_mask.transpose(0, 1).contiguous() if attention_mask is not None else None perm_mask = perm_mask.permute(1, 2, 0).contiguous() if perm_mask is not None else None target_mapping = target_mapping.permute(1, 2, 0).contiguous() if target_mapping is not None else None mlen = mems[0].shape[0] if mems is not None and mems[0] is not None else 0 klen = mlen + qlen dtype_float = self.dtype device = self.device # Attention mask # causal attention mask if self.attn_type == "uni": attn_mask = self.create_mask(qlen, mlen) attn_mask = attn_mask[:, :, None, None] elif self.attn_type == "bi": attn_mask = None else: raise ValueError(f"Unsupported attention type: {self.attn_type}") # data mask: input mask & perm mask assert input_mask is None or attention_mask is None, "You can only use one of input_mask (uses 1 for padding) " "or attention_mask (uses 0 for padding, added for compatibility with BERT). Please choose one." if input_mask is None and attention_mask is not None: input_mask = 1.0 - attention_mask if input_mask is not None and perm_mask is not None: data_mask = input_mask[None] + perm_mask elif input_mask is not None and perm_mask is None: data_mask = input_mask[None] elif input_mask is None and perm_mask is not None: data_mask = perm_mask else: data_mask = None if data_mask is not None: # all mems can be attended to if mlen > 0: mems_mask = torch.zeros([data_mask.shape[0], mlen, bsz]).to(data_mask) data_mask = torch.cat([mems_mask, data_mask], dim=1) if attn_mask is None: attn_mask = data_mask[:, :, :, None] else: attn_mask += data_mask[:, :, :, None] if attn_mask is not None: attn_mask = (attn_mask > 0).to(dtype_float) if attn_mask is not None: non_tgt_mask = -torch.eye(qlen).to(attn_mask) if mlen > 0: non_tgt_mask = torch.cat([torch.zeros([qlen, mlen]).to(attn_mask), non_tgt_mask], dim=-1) non_tgt_mask = ((attn_mask + non_tgt_mask[:, :, None, None]) > 0).to(attn_mask) else: non_tgt_mask = None # Word embeddings and prepare h & g hidden states if inputs_embeds is not None: word_emb_k = inputs_embeds else: word_emb_k = self.word_embedding(input_ids) output_h = self.dropout(word_emb_k) if target_mapping is not None: word_emb_q = self.mask_emb.expand(target_mapping.shape[0], bsz, -1) # else: # We removed the inp_q input which was same as target mapping # inp_q_ext = inp_q[:, :, None] # word_emb_q = inp_q_ext * self.mask_emb + (1 - inp_q_ext) * word_emb_k output_g = self.dropout(word_emb_q) else: output_g = None # Segment embedding if token_type_ids is not None: # Convert `token_type_ids` to one-hot `seg_mat` if mlen > 0: mem_pad = torch.zeros([mlen, bsz], dtype=torch.long, device=device) cat_ids = torch.cat([mem_pad, token_type_ids], dim=0) else: cat_ids = token_type_ids # `1` indicates not in the same segment [qlen x klen x bsz] seg_mat = (token_type_ids[:, None] != cat_ids[None, :]).long() seg_mat = nn.functional.one_hot(seg_mat, num_classes=2).to(dtype_float) else: seg_mat = None # Positional encoding pos_emb = self.relative_positional_encoding(qlen, klen, bsz=bsz) pos_emb = pos_emb.to(output_h.device) pos_emb = self.dropout(pos_emb) # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] (a head_mask for each layer) # and head_mask is converted to shape [num_hidden_layers x qlen x klen x bsz x n_head] if head_mask is not None: if head_mask.dim() == 1: head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(0).unsqueeze(0) head_mask = head_mask.expand(self.n_layer, -1, -1, -1, -1) elif head_mask.dim() == 2: head_mask = head_mask.unsqueeze(1).unsqueeze(1).unsqueeze(1) head_mask = head_mask.to( dtype=next(self.parameters()).dtype ) # switch to float if need + fp16 compatibility else: head_mask = [None] * self.n_layer new_mems = () if mems is None: mems = [None] * len(self.layer) attentions = [] if output_attentions else None hidden_states = [] if output_hidden_states else None for i, layer_module in enumerate(self.layer): if use_mems: # cache new mems new_mems = new_mems + (self.cache_mem(output_h, mems[i]),) if output_hidden_states: hidden_states.append((output_h, output_g) if output_g is not None else output_h) outputs = layer_module( output_h, output_g, attn_mask_h=non_tgt_mask, attn_mask_g=attn_mask, r=pos_emb, seg_mat=seg_mat, mems=mems[i], target_mapping=target_mapping, head_mask=head_mask[i], output_attentions=output_attentions, ) output_h, output_g = outputs[:2] if output_attentions: attentions.append(outputs[2]) # Add last hidden state if output_hidden_states: hidden_states.append((output_h, output_g) if output_g is not None else output_h) output = self.dropout(output_g if output_g is not None else output_h) # Prepare outputs, we transpose back here to shape [bsz, len, hidden_dim] (cf. beginning of forward() method) output = output.permute(1, 0, 2).contiguous() if not use_mems: new_mems = None if output_hidden_states: if output_g is not None: hidden_states = tuple(h.permute(1, 0, 2).contiguous() for hs in hidden_states for h in hs) else: hidden_states = tuple(hs.permute(1, 0, 2).contiguous() for hs in hidden_states) if output_attentions: if target_mapping is not None: # when target_mapping is provided, there are 2-tuple of attentions attentions = tuple( tuple(att_stream.permute(2, 3, 0, 1).contiguous() for att_stream in t) for t in attentions ) else: attentions = tuple(t.permute(2, 3, 0, 1).contiguous() for t in attentions) if not return_dict: return tuple(v for v in [output, new_mems, hidden_states, attentions] if v is not None) return XLNetModelOutput( last_hidden_state=output, mems=new_mems, hidden_states=hidden_states, attentions=attentions ) @add_start_docstrings( """ XLNet Model with a language modeling head on top (linear layer with weights tied to the input embeddings). """, XLNET_START_DOCSTRING, ) class XLNetLMHeadModel(XLNetPreTrainedModel): _tied_weights_keys = ["lm_loss.weight"] def __init__(self, config): super().__init__(config) self.attn_type = config.attn_type self.same_length = config.same_length self.transformer = XLNetModel(config) self.lm_loss = nn.Linear(config.d_model, config.vocab_size, bias=True) # Initialize weights and apply final processing self.post_init() def get_output_embeddings(self): return self.lm_loss def set_output_embeddings(self, new_embeddings): self.lm_loss = new_embeddings def prepare_inputs_for_generation(self, input_ids, past_key_values=None, use_mems=None, **kwargs): # Add dummy token at the end (no attention on this one) effective_batch_size = input_ids.shape[0] dummy_token = torch.zeros((effective_batch_size, 1), dtype=torch.long, device=input_ids.device) # At every pass, the attention values for the new token and the two last generated tokens # are computed, the rest is reloaded from the `past` cache. A purely auto-regressive model would have # offset = 1; offset = 2 seems to have slightly better computation. offset = 2 if past_key_values: input_ids = torch.cat([input_ids[:, -offset:], dummy_token], dim=1) else: input_ids = torch.cat([input_ids, dummy_token], dim=1) # Build permutation mask so that previous tokens don't see last token sequence_length = input_ids.shape[1] perm_mask = torch.zeros( (effective_batch_size, sequence_length, sequence_length), dtype=torch.float, device=input_ids.device ) perm_mask[:, :, -1] = 1.0 # We'll only predict the last token target_mapping = torch.zeros( (effective_batch_size, 1, sequence_length), dtype=torch.float, device=input_ids.device ) target_mapping[:, 0, -1] = 1.0 inputs = { "input_ids": input_ids, "perm_mask": perm_mask, "target_mapping": target_mapping, "use_mems": use_mems, } # if past is defined in model kwargs then use it for faster decoding if past_key_values: inputs["mems"] = tuple(layer_past[:-offset, :, :] for layer_past in past_key_values) return inputs @add_start_docstrings_to_model_forward(XLNET_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @replace_return_docstrings(output_type=XLNetLMHeadModelOutput, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, mems: Optional[torch.Tensor] = None, perm_mask: Optional[torch.Tensor] = None, target_mapping: Optional[torch.Tensor] = None, token_type_ids: Optional[torch.Tensor] = None, input_mask: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, labels: Optional[torch.Tensor] = None, use_mems: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, **kwargs, # delete when `use_cache` is removed in XLNetModel ) -> Union[Tuple, XLNetLMHeadModelOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size, num_predict)`, *optional*): Labels for masked language modeling. `num_predict` corresponds to `target_mapping.shape[1]`. If `target_mapping` is `None`, then `num_predict` corresponds to `sequence_length`. The labels should correspond to the masked input words that should be predicted and depends on `target_mapping`. Note in order to perform standard auto-regressive language modeling a *<mask>* token has to be added to the `input_ids` (see the `prepare_inputs_for_generation` function and examples below) Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100` are ignored, the loss is only computed for labels in `[0, ..., config.vocab_size]` Return: Examples: ```python >>> from transformers import AutoTokenizer, XLNetLMHeadModel >>> import torch >>> tokenizer = AutoTokenizer.from_pretrained("xlnet/xlnet-large-cased") >>> model = XLNetLMHeadModel.from_pretrained("xlnet/xlnet-large-cased") >>> # We show how to setup inputs to predict a next token using a bi-directional context. >>> input_ids = torch.tensor( ... tokenizer.encode("Hello, my dog is very <mask>", add_special_tokens=False) ... ).unsqueeze( ... 0 ... ) # We will predict the masked token >>> perm_mask = torch.zeros((1, input_ids.shape[1], input_ids.shape[1]), dtype=torch.float) >>> perm_mask[:, :, -1] = 1.0 # Previous tokens don't see last token >>> target_mapping = torch.zeros( ... (1, 1, input_ids.shape[1]), dtype=torch.float ... ) # Shape [1, 1, seq_length] => let's predict one token >>> target_mapping[ ... 0, 0, -1 ... ] = 1.0 # Our first (and only) prediction will be the last token of the sequence (the masked token) >>> outputs = model(input_ids, perm_mask=perm_mask, target_mapping=target_mapping) >>> next_token_logits = outputs[ ... 0 ... ] # Output has shape [target_mapping.size(0), target_mapping.size(1), config.vocab_size] >>> # The same way can the XLNetLMHeadModel be used to be trained by standard auto-regressive language modeling. >>> input_ids = torch.tensor( ... tokenizer.encode("Hello, my dog is very <mask>", add_special_tokens=False) ... ).unsqueeze( ... 0 ... ) # We will predict the masked token >>> labels = torch.tensor(tokenizer.encode("cute", add_special_tokens=False)).unsqueeze(0) >>> assert labels.shape[0] == 1, "only one word will be predicted" >>> perm_mask = torch.zeros((1, input_ids.shape[1], input_ids.shape[1]), dtype=torch.float) >>> perm_mask[ ... :, :, -1 ... ] = 1.0 # Previous tokens don't see last token as is done in standard auto-regressive lm training >>> target_mapping = torch.zeros( ... (1, 1, input_ids.shape[1]), dtype=torch.float ... ) # Shape [1, 1, seq_length] => let's predict one token >>> target_mapping[ ... 0, 0, -1 ... ] = 1.0 # Our first (and only) prediction will be the last token of the sequence (the masked token) >>> outputs = model(input_ids, perm_mask=perm_mask, target_mapping=target_mapping, labels=labels) >>> loss = outputs.loss >>> next_token_logits = ( ... outputs.logits ... ) # Logits have shape [target_mapping.size(0), target_mapping.size(1), config.vocab_size] ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict transformer_outputs = self.transformer( input_ids, attention_mask=attention_mask, mems=mems, perm_mask=perm_mask, target_mapping=target_mapping, token_type_ids=token_type_ids, input_mask=input_mask, head_mask=head_mask, inputs_embeds=inputs_embeds, use_mems=use_mems, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, **kwargs, ) logits = self.lm_loss(transformer_outputs[0]) loss = None if labels is not None: # Flatten the tokens loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, logits.size(-1)), labels.view(-1)) if not return_dict: output = (logits,) + transformer_outputs[1:] return ((loss,) + output) if loss is not None else output return XLNetLMHeadModelOutput( loss=loss, logits=logits, mems=transformer_outputs.mems, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions, ) @staticmethod def _reorder_cache(mems: List[torch.Tensor], beam_idx: torch.Tensor) -> List[torch.Tensor]: """ This function is used to re-order the `mems` cache if [`~PreTrainedModel.beam_search`] or [`~PreTrainedModel.beam_sample`] is called. This is required to match `mems` with the correct beam_idx at every generation step. """ return [layer_past.index_select(1, beam_idx.to(layer_past.device)) for layer_past in mems] @add_start_docstrings( """ XLNet Model with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks. """, XLNET_START_DOCSTRING, ) class XLNetForSequenceClassification(XLNetPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.config = config self.transformer = XLNetModel(config) self.sequence_summary = SequenceSummary(config) self.logits_proj = nn.Linear(config.d_model, config.num_labels) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(XLNET_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=XLNetForSequenceClassificationOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, mems: Optional[torch.Tensor] = None, perm_mask: Optional[torch.Tensor] = None, target_mapping: Optional[torch.Tensor] = None, token_type_ids: Optional[torch.Tensor] = None, input_mask: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, labels: Optional[torch.Tensor] = None, use_mems: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, **kwargs, # delete when `use_cache` is removed in XLNetModel ) -> Union[Tuple, XLNetForSequenceClassificationOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict transformer_outputs = self.transformer( input_ids, attention_mask=attention_mask, mems=mems, perm_mask=perm_mask, target_mapping=target_mapping, token_type_ids=token_type_ids, input_mask=input_mask, head_mask=head_mask, inputs_embeds=inputs_embeds, use_mems=use_mems, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, **kwargs, ) output = transformer_outputs[0] output = self.sequence_summary(output) logits = self.logits_proj(output) loss = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = "regression" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = "single_label_classification" else: self.config.problem_type = "multi_label_classification" if self.config.problem_type == "regression": loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(logits.squeeze(), labels.squeeze()) else: loss = loss_fct(logits, labels) elif self.config.problem_type == "single_label_classification": loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) elif self.config.problem_type == "multi_label_classification": loss_fct = BCEWithLogitsLoss() loss = loss_fct(logits, labels) if not return_dict: output = (logits,) + transformer_outputs[1:] return ((loss,) + output) if loss is not None else output return XLNetForSequenceClassificationOutput( loss=loss, logits=logits, mems=transformer_outputs.mems, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions, ) @add_start_docstrings( """ XLNet Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks. """, XLNET_START_DOCSTRING, ) class XLNetForTokenClassification(XLNetPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.transformer = XLNetModel(config) self.classifier = nn.Linear(config.hidden_size, config.num_labels) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(XLNET_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=XLNetForTokenClassificationOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, mems: Optional[torch.Tensor] = None, perm_mask: Optional[torch.Tensor] = None, target_mapping: Optional[torch.Tensor] = None, token_type_ids: Optional[torch.Tensor] = None, input_mask: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, labels: Optional[torch.Tensor] = None, use_mems: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, **kwargs, # delete when `use_cache` is removed in XLNetModel ) -> Union[Tuple, XLNetForTokenClassificationOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the multiple choice classification loss. Indices should be in `[0, ..., num_choices]` where *num_choices* is the size of the second dimension of the input tensors. (see *input_ids* above) """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.transformer( input_ids, attention_mask=attention_mask, mems=mems, perm_mask=perm_mask, target_mapping=target_mapping, token_type_ids=token_type_ids, input_mask=input_mask, head_mask=head_mask, inputs_embeds=inputs_embeds, use_mems=use_mems, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] logits = self.classifier(sequence_output) loss = None if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) if not return_dict: output = (logits,) + outputs[1:] return ((loss,) + output) if loss is not None else output return XLNetForTokenClassificationOutput( loss=loss, logits=logits, mems=outputs.mems, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @add_start_docstrings( """ XLNet Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a softmax) e.g. for RACE/SWAG tasks. """, XLNET_START_DOCSTRING, ) class XLNetForMultipleChoice(XLNetPreTrainedModel): def __init__(self, config): super().__init__(config) self.transformer = XLNetModel(config) self.sequence_summary = SequenceSummary(config) self.logits_proj = nn.Linear(config.d_model, 1) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(XLNET_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=XLNetForMultipleChoiceOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids: Optional[torch.Tensor] = None, token_type_ids: Optional[torch.Tensor] = None, input_mask: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, mems: Optional[torch.Tensor] = None, perm_mask: Optional[torch.Tensor] = None, target_mapping: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, labels: Optional[torch.Tensor] = None, use_mems: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, **kwargs, # delete when `use_cache` is removed in XLNetModel ) -> Union[Tuple, XLNetForMultipleChoiceOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the multiple choice classification loss. Indices should be in `[0, ..., num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See `input_ids` above) """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1] flat_input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None flat_token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None flat_attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None flat_input_mask = input_mask.view(-1, input_mask.size(-1)) if input_mask is not None else None flat_inputs_embeds = ( inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1)) if inputs_embeds is not None else None ) transformer_outputs = self.transformer( flat_input_ids, token_type_ids=flat_token_type_ids, input_mask=flat_input_mask, attention_mask=flat_attention_mask, mems=mems, perm_mask=perm_mask, target_mapping=target_mapping, head_mask=head_mask, inputs_embeds=flat_inputs_embeds, use_mems=use_mems, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, **kwargs, ) output = transformer_outputs[0] output = self.sequence_summary(output) logits = self.logits_proj(output) reshaped_logits = logits.view(-1, num_choices) loss = None if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(reshaped_logits, labels.view(-1)) if not return_dict: output = (reshaped_logits,) + transformer_outputs[1:] return ((loss,) + output) if loss is not None else output return XLNetForMultipleChoiceOutput( loss=loss, logits=reshaped_logits, mems=transformer_outputs.mems, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions, ) @add_start_docstrings( """ XLNet Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of the hidden-states output to compute `span start logits` and `span end logits`). """, XLNET_START_DOCSTRING, ) class XLNetForQuestionAnsweringSimple(XLNetPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.transformer = XLNetModel(config) self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(XLNET_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=XLNetForQuestionAnsweringSimpleOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, mems: Optional[torch.Tensor] = None, perm_mask: Optional[torch.Tensor] = None, target_mapping: Optional[torch.Tensor] = None, token_type_ids: Optional[torch.Tensor] = None, input_mask: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, start_positions: Optional[torch.Tensor] = None, end_positions: Optional[torch.Tensor] = None, use_mems: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, **kwargs, # delete when `use_cache` is removed in XLNetModel ) -> Union[Tuple, XLNetForQuestionAnsweringSimpleOutput]: r""" start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for position (index) of the start of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for position (index) of the end of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.transformer( input_ids, attention_mask=attention_mask, mems=mems, perm_mask=perm_mask, target_mapping=target_mapping, token_type_ids=token_type_ids, input_mask=input_mask, head_mask=head_mask, inputs_embeds=inputs_embeds, use_mems=use_mems, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, **kwargs, ) sequence_output = outputs[0] logits = self.qa_outputs(sequence_output) start_logits, end_logits = logits.split(1, dim=-1) start_logits = start_logits.squeeze(-1).contiguous() end_logits = end_logits.squeeze(-1).contiguous() total_loss = None if start_positions is not None and end_positions is not None: # If we are on multi-GPU, split add a dimension if len(start_positions.size()) > 1: start_positions = start_positions.squeeze(-1) if len(end_positions.size()) > 1: end_positions = end_positions.squeeze(-1) # sometimes the start/end positions are outside our model inputs, we ignore these terms ignored_index = start_logits.size(1) start_positions = start_positions.clamp(0, ignored_index) end_positions = end_positions.clamp(0, ignored_index) loss_fct = CrossEntropyLoss(ignore_index=ignored_index) start_loss = loss_fct(start_logits, start_positions) end_loss = loss_fct(end_logits, end_positions) total_loss = (start_loss + end_loss) / 2 if not return_dict: output = (start_logits, end_logits) + outputs[1:] return ((total_loss,) + output) if total_loss is not None else output return XLNetForQuestionAnsweringSimpleOutput( loss=total_loss, start_logits=start_logits, end_logits=end_logits, mems=outputs.mems, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @add_start_docstrings( """ XLNet Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of the hidden-states output to compute `span start logits` and `span end logits`). """, XLNET_START_DOCSTRING, ) class XLNetForQuestionAnswering(XLNetPreTrainedModel): def __init__(self, config): super().__init__(config) self.start_n_top = config.start_n_top self.end_n_top = config.end_n_top self.transformer = XLNetModel(config) self.start_logits = PoolerStartLogits(config) self.end_logits = PoolerEndLogits(config) self.answer_class = PoolerAnswerClass(config) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(XLNET_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @replace_return_docstrings(output_type=XLNetForQuestionAnsweringOutput, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, mems: Optional[torch.Tensor] = None, perm_mask: Optional[torch.Tensor] = None, target_mapping: Optional[torch.Tensor] = None, token_type_ids: Optional[torch.Tensor] = None, input_mask: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, start_positions: Optional[torch.Tensor] = None, end_positions: Optional[torch.Tensor] = None, is_impossible: Optional[torch.Tensor] = None, cls_index: Optional[torch.Tensor] = None, p_mask: Optional[torch.Tensor] = None, use_mems: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, **kwargs, # delete when `use_cache` is removed in XLNetModel ) -> Union[Tuple, XLNetForQuestionAnsweringOutput]: r""" start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for position (index) of the start of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for position (index) of the end of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. is_impossible (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels whether a question has an answer or no answer (SQuAD 2.0) cls_index (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for position (index) of the classification token to use as input for computing plausibility of the answer. p_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): Optional mask of tokens which can't be in answers (e.g. [CLS], [PAD], ...). 1.0 means token should be masked. 0.0 mean token is not masked. Returns: Example: ```python >>> from transformers import AutoTokenizer, XLNetForQuestionAnswering >>> import torch >>> tokenizer = AutoTokenizer.from_pretrained("xlnet/xlnet-base-cased") >>> model = XLNetForQuestionAnswering.from_pretrained("xlnet/xlnet-base-cased") >>> input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True)).unsqueeze( ... 0 ... ) # Batch size 1 >>> start_positions = torch.tensor([1]) >>> end_positions = torch.tensor([3]) >>> outputs = model(input_ids, start_positions=start_positions, end_positions=end_positions) >>> loss = outputs.loss ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict transformer_outputs = self.transformer( input_ids, attention_mask=attention_mask, mems=mems, perm_mask=perm_mask, target_mapping=target_mapping, token_type_ids=token_type_ids, input_mask=input_mask, head_mask=head_mask, inputs_embeds=inputs_embeds, use_mems=use_mems, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, **kwargs, ) hidden_states = transformer_outputs[0] start_logits = self.start_logits(hidden_states, p_mask=p_mask) outputs = transformer_outputs[1:] # Keep mems, hidden states, attentions if there are in it if start_positions is not None and end_positions is not None: # If we are on multi-GPU, let's remove the dimension added by batch splitting for x in (start_positions, end_positions, cls_index, is_impossible): if x is not None and x.dim() > 1: x.squeeze_(-1) # during training, compute the end logits based on the ground truth of the start position end_logits = self.end_logits(hidden_states, start_positions=start_positions, p_mask=p_mask) loss_fct = CrossEntropyLoss() start_loss = loss_fct(start_logits, start_positions) end_loss = loss_fct(end_logits, end_positions) total_loss = (start_loss + end_loss) / 2 if cls_index is not None and is_impossible is not None: # Predict answerability from the representation of CLS and START cls_logits = self.answer_class(hidden_states, start_positions=start_positions, cls_index=cls_index) loss_fct_cls = nn.BCEWithLogitsLoss() cls_loss = loss_fct_cls(cls_logits, is_impossible) # note(zhiliny): by default multiply the loss by 0.5 so that the scale is comparable to start_loss and end_loss total_loss += cls_loss * 0.5 if not return_dict: return (total_loss,) + transformer_outputs[1:] else: return XLNetForQuestionAnsweringOutput( loss=total_loss, mems=transformer_outputs.mems, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions, ) else: # during inference, compute the end logits based on beam search bsz, slen, hsz = hidden_states.size() start_log_probs = nn.functional.softmax(start_logits, dim=-1) # shape (bsz, slen) start_top_log_probs, start_top_index = torch.topk( start_log_probs, self.start_n_top, dim=-1 ) # shape (bsz, start_n_top) start_top_index_exp = start_top_index.unsqueeze(-1).expand(-1, -1, hsz) # shape (bsz, start_n_top, hsz) start_states = torch.gather(hidden_states, -2, start_top_index_exp) # shape (bsz, start_n_top, hsz) start_states = start_states.unsqueeze(1).expand(-1, slen, -1, -1) # shape (bsz, slen, start_n_top, hsz) hidden_states_expanded = hidden_states.unsqueeze(2).expand_as( start_states ) # shape (bsz, slen, start_n_top, hsz) p_mask = p_mask.unsqueeze(-1) if p_mask is not None else None end_logits = self.end_logits(hidden_states_expanded, start_states=start_states, p_mask=p_mask) end_log_probs = nn.functional.softmax(end_logits, dim=1) # shape (bsz, slen, start_n_top) end_top_log_probs, end_top_index = torch.topk( end_log_probs, self.end_n_top, dim=1 ) # shape (bsz, end_n_top, start_n_top) end_top_log_probs = end_top_log_probs.view(-1, self.start_n_top * self.end_n_top) end_top_index = end_top_index.view(-1, self.start_n_top * self.end_n_top) start_states = torch.einsum( "blh,bl->bh", hidden_states, start_log_probs ) # get the representation of START as weighted sum of hidden states cls_logits = self.answer_class( hidden_states, start_states=start_states, cls_index=cls_index ) # Shape (batch size,): one single `cls_logits` for each sample if not return_dict: outputs = (start_top_log_probs, start_top_index, end_top_log_probs, end_top_index, cls_logits) return outputs + transformer_outputs[1:] else: return XLNetForQuestionAnsweringOutput( start_top_log_probs=start_top_log_probs, start_top_index=start_top_index, end_top_log_probs=end_top_log_probs, end_top_index=end_top_index, cls_logits=cls_logits, mems=transformer_outputs.mems, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions, )
transformers/src/transformers/models/xlnet/modeling_xlnet.py/0
{ "file_path": "transformers/src/transformers/models/xlnet/modeling_xlnet.py", "repo_id": "transformers", "token_count": 41808 }
378
# coding=utf-8 # Copyright 2022 University of Wisconsin-Madison and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ PyTorch YOSO model.""" import math from pathlib import Path from typing import Optional, Tuple, Union import torch import torch.utils.checkpoint from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACT2FN from ...modeling_outputs import ( BaseModelOutputWithCrossAttentions, MaskedLMOutput, MultipleChoiceModelOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput, ) from ...modeling_utils import PreTrainedModel from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer from ...utils import ( add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, is_ninja_available, is_torch_cuda_available, logging, ) from .configuration_yoso import YosoConfig logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = "uw-madison/yoso-4096" _CONFIG_FOR_DOC = "YosoConfig" YOSO_PRETRAINED_MODEL_ARCHIVE_LIST = [ "uw-madison/yoso-4096", # See all YOSO models at https://huggingface.co/models?filter=yoso ] lsh_cumulation = None def load_cuda_kernels(): global lsh_cumulation from torch.utils.cpp_extension import load def append_root(files): src_folder = Path(__file__).resolve().parent.parent.parent / "kernels" / "yoso" return [src_folder / file for file in files] src_files = append_root(["fast_lsh_cumulation_torch.cpp", "fast_lsh_cumulation.cu", "fast_lsh_cumulation_cuda.cu"]) load("fast_lsh_cumulation", src_files, verbose=True) import fast_lsh_cumulation as lsh_cumulation def to_contiguous(input_tensors): if isinstance(input_tensors, list): out = [] for tensor in input_tensors: if not tensor.is_contiguous(): tensor = tensor.contiguous() out.append(tensor) return out else: if not input_tensors.is_contiguous(): input_tensors = input_tensors.contiguous() return input_tensors def normalize(input_tensors): if isinstance(input_tensors, list): out = [] for tensor in input_tensors: out.append(nn.functional.normalize(tensor, p=2, dim=-1)) return out else: return nn.functional.normalize(input_tensors, p=2, dim=-1) def hashing(query, key, num_hash, hash_len): if len(query.size()) != 3: raise ValueError("Query has incorrect size.") if len(key.size()) != 3: raise ValueError("Key has incorrect size.") rmat = torch.randn(query.size(0), query.size(2), num_hash * hash_len, device=query.device) raise_pow = 2 ** torch.arange(hash_len, device=query.device) query_projection = torch.matmul(query, rmat).reshape(query.size(0), query.size(1), num_hash, hash_len) key_projection = torch.matmul(key, rmat).reshape(key.size(0), key.size(1), num_hash, hash_len) query_binary = (query_projection > 0).int() key_binary = (key_projection > 0).int() query_hash = torch.sum(query_binary * raise_pow, dim=-1) query_hash = torch.sum(key_binary * raise_pow, dim=-1) return query_hash.int(), query_hash.int() class YosoCumulation(torch.autograd.Function): @staticmethod def forward(ctx, query_mask, key_mask, query, key, value, config): hash_code_len = config["hash_code_len"] expectation = (1 - torch.acos(torch.matmul(query, key.transpose(-1, -2))) / math.pi) ** hash_code_len expectation = expectation * query_mask[:, :, None] * key_mask[:, None, :] cumulation_value = torch.matmul(expectation, value) ctx.save_for_backward(query_mask, key_mask, expectation, query, key, value) ctx.config = config return cumulation_value @staticmethod def backward(ctx, grad): grad = to_contiguous(grad) query_mask, key_mask, expectation, query, key, value = ctx.saved_tensors config = ctx.config hash_code_len = config["hash_code_len"] weighted_exp = torch.matmul(grad, value.transpose(-1, -2)) * expectation grad_query = torch.matmul(weighted_exp, (hash_code_len / 2) * key) grad_key = torch.matmul(weighted_exp.transpose(-1, -2), (hash_code_len / 2) * query) grad_value = torch.matmul(expectation.transpose(-1, -2), grad) return None, None, grad_query, grad_key, grad_value, None class YosoLSHCumulation(torch.autograd.Function): @staticmethod def forward(ctx, query_mask, key_mask, query, key, value, config): if query_mask.size(0) != key_mask.size(0): raise ValueError("Query mask and Key mask differ in sizes in dimension 0") if query_mask.size(0) != query.size(0): raise ValueError("Query mask and Query differ in sizes in dimension 0") if query_mask.size(0) != key.size(0): raise ValueError("Query mask and Key differ in sizes in dimension 0") if query_mask.size(0) != value.size(0): raise ValueError("Query mask and Value mask differ in sizes in dimension 0") if key.size(1) != value.size(1): raise ValueError("Key and Value differ in sizes in dimension 1") if query.size(2) != key.size(2): raise ValueError("Query and Key differ in sizes in dimension 2") query_mask, key_mask, query, key, value = to_contiguous([query_mask, key_mask, query, key, value]) use_cuda = query_mask.is_cuda num_hash = config["num_hash"] hash_code_len = config["hash_code_len"] hashtable_capacity = int(2**hash_code_len) if config["use_fast_hash"]: query_hash_code, key_hash_code = lsh_cumulation.fast_hash( query_mask, query, key_mask, key, num_hash, hash_code_len, use_cuda, 1 ) else: query_hash_code, key_hash_code = hashing(query, key, num_hash, hash_code_len) cumulation_value = lsh_cumulation.lsh_cumulation( query_mask, query_hash_code, key_mask, key_hash_code, value, hashtable_capacity, use_cuda, 1 ) ctx.save_for_backward(query_mask, key_mask, query_hash_code, key_hash_code, query, key, value) ctx.config = config return cumulation_value @staticmethod def backward(ctx, grad): grad = to_contiguous(grad) query_mask, key_mask, query_hash_code, key_hash_code, query, key, value = ctx.saved_tensors config = ctx.config use_cuda = grad.is_cuda hash_code_len = config["hash_code_len"] hashtable_capacity = int(2**hash_code_len) if config["lsh_backward"]: grad_value = lsh_cumulation.lsh_cumulation( key_mask, key_hash_code, query_mask, query_hash_code, grad, hashtable_capacity, use_cuda, 1 ) grad_query = lsh_cumulation.lsh_weighted_cumulation( query_mask, query_hash_code, grad, key_mask, key_hash_code, value, (hash_code_len / 2) * key, hashtable_capacity, use_cuda, 4, ) grad_key = lsh_cumulation.lsh_weighted_cumulation( key_mask, key_hash_code, value, query_mask, query_hash_code, grad, (hash_code_len / 2) * query, hashtable_capacity, use_cuda, 4, ) else: expectation = (1 - torch.acos(torch.matmul(query, key.transpose(-1, -2))) / math.pi) ** hash_code_len expectation = expectation * query_mask[:, :, None] * key_mask[:, None, :] weighted_exp = torch.matmul(grad, value.transpose(-1, -2)) * expectation grad_query = torch.matmul(weighted_exp, (hash_code_len / 2) * key) grad_key = torch.matmul(weighted_exp.transpose(-1, -2), (hash_code_len / 2) * query) grad_value = torch.matmul(expectation.transpose(-1, -2), grad) return None, None, grad_query, grad_key, grad_value, None # Copied from transformers.models.nystromformer.modeling_nystromformer.NystromformerEmbeddings class YosoEmbeddings(nn.Module): """Construct the embeddings from word, position and token_type embeddings.""" def __init__(self, config): super().__init__() self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id) self.position_embeddings = nn.Embedding(config.max_position_embeddings + 2, config.hidden_size) self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size) # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load # any TensorFlow checkpoint file self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) # position_ids (1, len position emb) is contiguous in memory and exported when serialized self.register_buffer( "position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)) + 2, persistent=False ) self.position_embedding_type = getattr(config, "position_embedding_type", "absolute") self.register_buffer( "token_type_ids", torch.zeros(self.position_ids.size(), dtype=torch.long, device=self.position_ids.device), persistent=False, ) def forward(self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None): if input_ids is not None: input_shape = input_ids.size() else: input_shape = inputs_embeds.size()[:-1] seq_length = input_shape[1] if position_ids is None: position_ids = self.position_ids[:, :seq_length] # Setting the token_type_ids to the registered buffer in constructor where it is all zeros, which usually occurs # when its auto-generated, registered buffer helps users when tracing the model without passing token_type_ids, solves # issue #5664 if token_type_ids is None: if hasattr(self, "token_type_ids"): buffered_token_type_ids = self.token_type_ids[:, :seq_length] buffered_token_type_ids_expanded = buffered_token_type_ids.expand(input_shape[0], seq_length) token_type_ids = buffered_token_type_ids_expanded else: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device) if inputs_embeds is None: inputs_embeds = self.word_embeddings(input_ids) token_type_embeddings = self.token_type_embeddings(token_type_ids) embeddings = inputs_embeds + token_type_embeddings if self.position_embedding_type == "absolute": position_embeddings = self.position_embeddings(position_ids) embeddings += position_embeddings embeddings = self.LayerNorm(embeddings) embeddings = self.dropout(embeddings) return embeddings class YosoSelfAttention(nn.Module): def __init__(self, config, position_embedding_type=None): super().__init__() if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"): raise ValueError( f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention " f"heads ({config.num_attention_heads})" ) kernel_loaded = lsh_cumulation is not None if is_torch_cuda_available() and is_ninja_available() and not kernel_loaded: try: load_cuda_kernels() except Exception as e: logger.warning(f"Could not load the custom kernel for multi-scale deformable attention: {e}") self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.query = nn.Linear(config.hidden_size, self.all_head_size) self.key = nn.Linear(config.hidden_size, self.all_head_size) self.value = nn.Linear(config.hidden_size, self.all_head_size) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) self.position_embedding_type = ( position_embedding_type if position_embedding_type is not None else config.position_embedding_type ) self.use_expectation = config.use_expectation self.hash_code_len = config.hash_code_len self.use_conv = config.conv_window is not None self.use_fast_hash = config.use_fast_hash self.num_hash = config.num_hash self.lsh_backward = config.lsh_backward self.lsh_config = { "hash_code_len": self.hash_code_len, "use_fast_hash": self.use_fast_hash, "num_hash": self.num_hash, "lsh_backward": self.lsh_backward, } if config.conv_window is not None: self.conv = nn.Conv2d( in_channels=config.num_attention_heads, out_channels=config.num_attention_heads, kernel_size=(config.conv_window, 1), padding=(config.conv_window // 2, 0), bias=False, groups=config.num_attention_heads, ) def transpose_for_scores(self, layer): new_layer_shape = layer.size()[:-1] + (self.num_attention_heads, self.attention_head_size) layer = layer.view(*new_layer_shape) return layer.permute(0, 2, 1, 3) def forward(self, hidden_states, attention_mask=None, output_attentions=False): mixed_query_layer = self.query(hidden_states) key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) query_layer = self.transpose_for_scores(mixed_query_layer) if self.use_conv: conv_value_layer = self.conv(value_layer * attention_mask[:, None, :, None]) batch_size, num_heads, seq_len, head_dim = query_layer.size() query_layer = query_layer.reshape(batch_size * num_heads, seq_len, head_dim) key_layer = key_layer.reshape(batch_size * num_heads, seq_len, head_dim) value_layer = value_layer.reshape(batch_size * num_heads, seq_len, head_dim) attention_mask = 1.0 + attention_mask / 10000.0 attention_mask = ( attention_mask.unsqueeze(1) .repeat_interleave(num_heads, dim=1) .reshape(batch_size * num_heads, seq_len) .int() ) # The CUDA kernels are most efficient with inputs whose size is a multiple of a GPU's warp size (32). Inputs # smaller than this are padded with zeros. gpu_warp_size = 32 if (not self.use_expectation) and head_dim < gpu_warp_size: pad_size = batch_size * num_heads, seq_len, gpu_warp_size - head_dim query_layer = torch.cat( [ query_layer, torch.zeros(pad_size, device=query_layer.device), ], dim=-1, ) key_layer = torch.cat( [ key_layer, torch.zeros(pad_size, device=key_layer.device), ], dim=-1, ) value_layer = torch.cat( [ value_layer, torch.zeros(pad_size, device=value_layer.device), ], dim=-1, ) if self.use_expectation or self.training: query_layer, key_layer = normalize([query_layer, key_layer]) if self.use_expectation: context_layer = YosoCumulation.apply( attention_mask, attention_mask, query_layer, key_layer, value_layer, self.lsh_config ) else: context_layer = YosoLSHCumulation.apply( attention_mask, attention_mask, query_layer, key_layer, value_layer, self.lsh_config ) if (not self.use_expectation) and head_dim < gpu_warp_size: context_layer = context_layer[:, :, :head_dim] context_layer = normalize(context_layer) context_layer = context_layer.reshape(batch_size, num_heads, seq_len, head_dim) if self.use_conv: context_layer += conv_value_layer context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) context_layer = context_layer.view(*new_context_layer_shape) outputs = (context_layer, context_layer) if output_attentions else (context_layer,) return outputs # Copied from transformers.models.bert.modeling_bert.BertSelfOutput class YosoSelfOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states class YosoAttention(nn.Module): def __init__(self, config, position_embedding_type=None): super().__init__() self.self = YosoSelfAttention(config, position_embedding_type=position_embedding_type) self.output = YosoSelfOutput(config) self.pruned_heads = set() def prune_heads(self, heads): if len(heads) == 0: return heads, index = find_pruneable_heads_and_indices( heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads ) # Prune linear layers self.self.query = prune_linear_layer(self.self.query, index) self.self.key = prune_linear_layer(self.self.key, index) self.self.value = prune_linear_layer(self.self.value, index) self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) # Update hyper params and store pruned heads self.self.num_attention_heads = self.self.num_attention_heads - len(heads) self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads self.pruned_heads = self.pruned_heads.union(heads) def forward(self, hidden_states, attention_mask=None, output_attentions=False): self_outputs = self.self(hidden_states, attention_mask, output_attentions) attention_output = self.output(self_outputs[0], hidden_states) outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them return outputs # Copied from transformers.models.bert.modeling_bert.BertIntermediate class YosoIntermediate(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.intermediate_size) if isinstance(config.hidden_act, str): self.intermediate_act_fn = ACT2FN[config.hidden_act] else: self.intermediate_act_fn = config.hidden_act def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) return hidden_states # Copied from transformers.models.bert.modeling_bert.BertOutput class YosoOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.intermediate_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states class YosoLayer(nn.Module): def __init__(self, config): super().__init__() self.chunk_size_feed_forward = config.chunk_size_feed_forward self.seq_len_dim = 1 self.attention = YosoAttention(config) self.add_cross_attention = config.add_cross_attention self.intermediate = YosoIntermediate(config) self.output = YosoOutput(config) def forward(self, hidden_states, attention_mask=None, output_attentions=False): self_attention_outputs = self.attention(hidden_states, attention_mask, output_attentions=output_attentions) attention_output = self_attention_outputs[0] outputs = self_attention_outputs[1:] # add self attentions if we output attention weights layer_output = apply_chunking_to_forward( self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output ) outputs = (layer_output,) + outputs return outputs def feed_forward_chunk(self, attention_output): intermediate_output = self.intermediate(attention_output) layer_output = self.output(intermediate_output, attention_output) return layer_output class YosoEncoder(nn.Module): def __init__(self, config): super().__init__() self.config = config self.layer = nn.ModuleList([YosoLayer(config) for _ in range(config.num_hidden_layers)]) self.gradient_checkpointing = False def forward( self, hidden_states, attention_mask=None, head_mask=None, output_attentions=False, output_hidden_states=False, return_dict=True, ): all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None for i, layer_module in enumerate(self.layer): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if self.gradient_checkpointing and self.training: layer_outputs = self._gradient_checkpointing_func( layer_module.__call__, hidden_states, attention_mask, output_attentions, ) else: layer_outputs = layer_module(hidden_states, attention_mask, output_attentions) hidden_states = layer_outputs[0] if output_attentions: all_self_attentions = all_self_attentions + (layer_outputs[1],) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None) return BaseModelOutputWithCrossAttentions( last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_self_attentions, ) # Copied from transformers.models.bert.modeling_bert.BertPredictionHeadTransform class YosoPredictionHeadTransform(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) if isinstance(config.hidden_act, str): self.transform_act_fn = ACT2FN[config.hidden_act] else: self.transform_act_fn = config.hidden_act self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.transform_act_fn(hidden_states) hidden_states = self.LayerNorm(hidden_states) return hidden_states # Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->Yoso class YosoLMPredictionHead(nn.Module): def __init__(self, config): super().__init__() self.transform = YosoPredictionHeadTransform(config) # The output weights are the same as the input embeddings, but there is # an output-only bias for each token. self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False) self.bias = nn.Parameter(torch.zeros(config.vocab_size)) # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings` self.decoder.bias = self.bias def forward(self, hidden_states): hidden_states = self.transform(hidden_states) hidden_states = self.decoder(hidden_states) return hidden_states # Copied from transformers.models.bert.modeling_bert.BertOnlyMLMHead with Bert->Yoso class YosoOnlyMLMHead(nn.Module): def __init__(self, config): super().__init__() self.predictions = YosoLMPredictionHead(config) def forward(self, sequence_output: torch.Tensor) -> torch.Tensor: prediction_scores = self.predictions(sequence_output) return prediction_scores class YosoPreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = YosoConfig base_model_prefix = "yoso" supports_gradient_checkpointing = True def _init_weights(self, module): """Initialize the weights""" if isinstance(module, nn.Linear): # Slightly different from the TF version which uses truncated_normal for initialization # cf https://github.com/pytorch/pytorch/pull/5617 module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) YOSO_START_DOCSTRING = r""" This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`YosoConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ YOSO_INPUTS_DOCSTRING = r""" Args: input_ids (`torch.LongTensor` of shape `({0})`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*): Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0, 1]`: - 0 corresponds to a *sentence A* token, - 1 corresponds to a *sentence B* token. [What are token type IDs?](../glossary#token-type-ids) position_ids (`torch.LongTensor` of shape `({0})`, *optional*): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, config.max_position_embeddings - 1]`. [What are position IDs?](../glossary#position-ids) head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert *input_ids* indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ @add_start_docstrings( "The bare YOSO Model transformer outputting raw hidden-states without any specific head on top.", YOSO_START_DOCSTRING, ) class YosoModel(YosoPreTrainedModel): def __init__(self, config): super().__init__(config) self.config = config self.embeddings = YosoEmbeddings(config) self.encoder = YosoEncoder(config) # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.embeddings.word_embeddings def set_input_embeddings(self, value): self.embeddings.word_embeddings = value def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) @add_start_docstrings_to_model_forward(YOSO_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=BaseModelOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, token_type_ids: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, BaseModelOutputWithCrossAttentions]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask) input_shape = input_ids.size() elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: raise ValueError("You have to specify either input_ids or inputs_embeds") batch_size, seq_length = input_shape device = input_ids.device if input_ids is not None else inputs_embeds.device if attention_mask is None: attention_mask = torch.ones(((batch_size, seq_length)), device=device) if token_type_ids is None: if hasattr(self.embeddings, "token_type_ids"): buffered_token_type_ids = self.embeddings.token_type_ids[:, :seq_length] buffered_token_type_ids_expanded = buffered_token_type_ids.expand(batch_size, seq_length) token_type_ids = buffered_token_type_ids_expanded else: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device) # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) embedding_output = self.embeddings( input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds, ) encoder_outputs = self.encoder( embedding_output, attention_mask=attention_mask, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = encoder_outputs[0] if not return_dict: return (sequence_output,) + encoder_outputs[1:] return BaseModelOutputWithCrossAttentions( last_hidden_state=sequence_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, cross_attentions=encoder_outputs.cross_attentions, ) @add_start_docstrings("""YOSO Model with a `language modeling` head on top.""", YOSO_START_DOCSTRING) class YosoForMaskedLM(YosoPreTrainedModel): _tied_weights_keys = ["cls.predictions.decoder.weight", "cls.predictions.decoder.bias"] def __init__(self, config): super().__init__(config) self.yoso = YosoModel(config) self.cls = YosoOnlyMLMHead(config) # Initialize weights and apply final processing self.post_init() def get_output_embeddings(self): return self.cls.predictions.decoder def set_output_embeddings(self, new_embeddings): self.cls.predictions.decoder = new_embeddings @add_start_docstrings_to_model_forward(YOSO_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=MaskedLMOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, token_type_ids: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, labels: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, MaskedLMOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.yoso( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] prediction_scores = self.cls(sequence_output) masked_lm_loss = None if labels is not None: loss_fct = CrossEntropyLoss() # -100 index = padding token masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1)) if not return_dict: output = (prediction_scores,) + outputs[1:] return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output return MaskedLMOutput( loss=masked_lm_loss, logits=prediction_scores, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) class YosoClassificationHead(nn.Module): """Head for sentence-level classification tasks.""" def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.out_proj = nn.Linear(config.hidden_size, config.num_labels) self.config = config def forward(self, features, **kwargs): x = features[:, 0, :] # take <s> token (equiv. to [CLS]) x = self.dropout(x) x = self.dense(x) x = ACT2FN[self.config.hidden_act](x) x = self.dropout(x) x = self.out_proj(x) return x @add_start_docstrings( """YOSO Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks.""", YOSO_START_DOCSTRING, ) class YosoForSequenceClassification(YosoPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.yoso = YosoModel(config) self.classifier = YosoClassificationHead(config) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(YOSO_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=SequenceClassifierOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, token_type_ids: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, labels: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, SequenceClassifierOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.yoso( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] logits = self.classifier(sequence_output) loss = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = "regression" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = "single_label_classification" else: self.config.problem_type = "multi_label_classification" if self.config.problem_type == "regression": loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(logits.squeeze(), labels.squeeze()) else: loss = loss_fct(logits, labels) elif self.config.problem_type == "single_label_classification": loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) elif self.config.problem_type == "multi_label_classification": loss_fct = BCEWithLogitsLoss() loss = loss_fct(logits, labels) if not return_dict: output = (logits,) + outputs[1:] return ((loss,) + output) if loss is not None else output return SequenceClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @add_start_docstrings( """YOSO Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a softmax) e.g. for RocStories/SWAG tasks.""", YOSO_START_DOCSTRING, ) class YosoForMultipleChoice(YosoPreTrainedModel): def __init__(self, config): super().__init__(config) self.yoso = YosoModel(config) self.pre_classifier = nn.Linear(config.hidden_size, config.hidden_size) self.classifier = nn.Linear(config.hidden_size, 1) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(YOSO_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=MultipleChoiceModelOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, token_type_ids: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, labels: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, MultipleChoiceModelOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the multiple choice classification loss. Indices should be in `[0, ..., num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See `input_ids` above) """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1] input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None inputs_embeds = ( inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1)) if inputs_embeds is not None else None ) outputs = self.yoso( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) hidden_state = outputs[0] # (bs * num_choices, seq_len, dim) pooled_output = hidden_state[:, 0] # (bs * num_choices, dim) pooled_output = self.pre_classifier(pooled_output) # (bs * num_choices, dim) pooled_output = nn.ReLU()(pooled_output) # (bs * num_choices, dim) logits = self.classifier(pooled_output) reshaped_logits = logits.view(-1, num_choices) loss = None if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(reshaped_logits, labels) if not return_dict: output = (reshaped_logits,) + outputs[1:] return ((loss,) + output) if loss is not None else output return MultipleChoiceModelOutput( loss=loss, logits=reshaped_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @add_start_docstrings( """YOSO Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks.""", YOSO_START_DOCSTRING, ) class YosoForTokenClassification(YosoPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.yoso = YosoModel(config) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, config.num_labels) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(YOSO_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=TokenClassifierOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, token_type_ids: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, labels: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, TokenClassifierOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`. """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.yoso( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] sequence_output = self.dropout(sequence_output) logits = self.classifier(sequence_output) loss = None if labels is not None: loss_fct = CrossEntropyLoss() # Only keep active parts of the loss if attention_mask is not None: active_loss = attention_mask.view(-1) == 1 active_logits = logits.view(-1, self.num_labels) active_labels = torch.where( active_loss, labels.view(-1), torch.tensor(loss_fct.ignore_index).type_as(labels) ) loss = loss_fct(active_logits, active_labels) else: loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) if not return_dict: output = (logits,) + outputs[1:] return ((loss,) + output) if loss is not None else output return TokenClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @add_start_docstrings( """YOSO Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of the hidden-states output to compute `span start logits` and `span end logits`).""", YOSO_START_DOCSTRING, ) class YosoForQuestionAnswering(YosoPreTrainedModel): def __init__(self, config): super().__init__(config) config.num_labels = 2 self.num_labels = config.num_labels self.yoso = YosoModel(config) self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(YOSO_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=QuestionAnsweringModelOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, token_type_ids: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, start_positions: Optional[torch.Tensor] = None, end_positions: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, QuestionAnsweringModelOutput]: r""" start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for position (index) of the start of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for position (index) of the end of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.yoso( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] logits = self.qa_outputs(sequence_output) start_logits, end_logits = logits.split(1, dim=-1) start_logits = start_logits.squeeze(-1) end_logits = end_logits.squeeze(-1) total_loss = None if start_positions is not None and end_positions is not None: # If we are on multi-GPU, split add a dimension if len(start_positions.size()) > 1: start_positions = start_positions.squeeze(-1) if len(end_positions.size()) > 1: end_positions = end_positions.squeeze(-1) # sometimes the start/end positions are outside our model inputs, we ignore these terms ignored_index = start_logits.size(1) start_positions = start_positions.clamp(0, ignored_index) end_positions = end_positions.clamp(0, ignored_index) loss_fct = CrossEntropyLoss(ignore_index=ignored_index) start_loss = loss_fct(start_logits, start_positions) end_loss = loss_fct(end_logits, end_positions) total_loss = (start_loss + end_loss) / 2 if not return_dict: output = (start_logits, end_logits) + outputs[1:] return ((total_loss,) + output) if total_loss is not None else output return QuestionAnsweringModelOutput( loss=total_loss, start_logits=start_logits, end_logits=end_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, )
transformers/src/transformers/models/yoso/modeling_yoso.py/0
{ "file_path": "transformers/src/transformers/models/yoso/modeling_yoso.py", "repo_id": "transformers", "token_count": 23776 }
379
# Copyright 2022 The Impira Team and the HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import re from typing import List, Optional, Tuple, Union import numpy as np from ..utils import ( ExplicitEnum, add_end_docstrings, is_pytesseract_available, is_torch_available, is_vision_available, logging, ) from .base import ChunkPipeline, build_pipeline_init_args from .question_answering import select_starts_ends if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES TESSERACT_LOADED = False if is_pytesseract_available(): TESSERACT_LOADED = True import pytesseract logger = logging.get_logger(__name__) # normalize_bbox() and apply_tesseract() are derived from apply_tesseract in models/layoutlmv3/feature_extraction_layoutlmv3.py. # However, because the pipeline may evolve from what layoutlmv3 currently does, it's copied (vs. imported) to avoid creating an # unnecessary dependency. def normalize_box(box, width, height): return [ int(1000 * (box[0] / width)), int(1000 * (box[1] / height)), int(1000 * (box[2] / width)), int(1000 * (box[3] / height)), ] def apply_tesseract(image: "Image.Image", lang: Optional[str], tesseract_config: Optional[str]): """Applies Tesseract OCR on a document image, and returns recognized words + normalized bounding boxes.""" # apply OCR data = pytesseract.image_to_data(image, lang=lang, output_type="dict", config=tesseract_config) words, left, top, width, height = data["text"], data["left"], data["top"], data["width"], data["height"] # filter empty words and corresponding coordinates irrelevant_indices = [idx for idx, word in enumerate(words) if not word.strip()] words = [word for idx, word in enumerate(words) if idx not in irrelevant_indices] left = [coord for idx, coord in enumerate(left) if idx not in irrelevant_indices] top = [coord for idx, coord in enumerate(top) if idx not in irrelevant_indices] width = [coord for idx, coord in enumerate(width) if idx not in irrelevant_indices] height = [coord for idx, coord in enumerate(height) if idx not in irrelevant_indices] # turn coordinates into (left, top, left+width, top+height) format actual_boxes = [] for x, y, w, h in zip(left, top, width, height): actual_box = [x, y, x + w, y + h] actual_boxes.append(actual_box) image_width, image_height = image.size # finally, normalize the bounding boxes normalized_boxes = [] for box in actual_boxes: normalized_boxes.append(normalize_box(box, image_width, image_height)) if len(words) != len(normalized_boxes): raise ValueError("Not as many words as there are bounding boxes") return words, normalized_boxes class ModelType(ExplicitEnum): LayoutLM = "layoutlm" LayoutLMv2andv3 = "layoutlmv2andv3" VisionEncoderDecoder = "vision_encoder_decoder" @add_end_docstrings(build_pipeline_init_args(has_image_processor=True, has_tokenizer=True)) class DocumentQuestionAnsweringPipeline(ChunkPipeline): # TODO: Update task_summary docs to include an example with document QA and then update the first sentence """ Document Question Answering pipeline using any `AutoModelForDocumentQuestionAnswering`. The inputs/outputs are similar to the (extractive) question answering pipeline; however, the pipeline takes an image (and optional OCR'd words/boxes) as input instead of text context. Example: ```python >>> from transformers import pipeline >>> document_qa = pipeline(model="impira/layoutlm-document-qa") >>> document_qa( ... image="https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png", ... question="What is the invoice number?", ... ) [{'score': 0.425, 'answer': 'us-001', 'start': 16, 'end': 16}] ``` Learn more about the basics of using a pipeline in the [pipeline tutorial](../pipeline_tutorial) This document question answering pipeline can currently be loaded from [`pipeline`] using the following task identifier: `"document-question-answering"`. The models that this pipeline can use are models that have been fine-tuned on a document question answering task. See the up-to-date list of available models on [huggingface.co/models](https://huggingface.co/models?filter=document-question-answering). """ def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) if self.tokenizer is not None and not self.tokenizer.__class__.__name__.endswith("Fast"): raise ValueError( "`DocumentQuestionAnsweringPipeline` requires a fast tokenizer, but a slow tokenizer " f"(`{self.tokenizer.__class__.__name__}`) is provided." ) if self.model.config.__class__.__name__ == "VisionEncoderDecoderConfig": self.model_type = ModelType.VisionEncoderDecoder if self.model.config.encoder.model_type != "donut-swin": raise ValueError("Currently, the only supported VisionEncoderDecoder model is Donut") else: self.check_model_type(MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES) if self.model.config.__class__.__name__ == "LayoutLMConfig": self.model_type = ModelType.LayoutLM else: self.model_type = ModelType.LayoutLMv2andv3 def _sanitize_parameters( self, padding=None, doc_stride=None, max_question_len=None, lang: Optional[str] = None, tesseract_config: Optional[str] = None, max_answer_len=None, max_seq_len=None, top_k=None, handle_impossible_answer=None, timeout=None, **kwargs, ): preprocess_params, postprocess_params = {}, {} if padding is not None: preprocess_params["padding"] = padding if doc_stride is not None: preprocess_params["doc_stride"] = doc_stride if max_question_len is not None: preprocess_params["max_question_len"] = max_question_len if max_seq_len is not None: preprocess_params["max_seq_len"] = max_seq_len if lang is not None: preprocess_params["lang"] = lang if tesseract_config is not None: preprocess_params["tesseract_config"] = tesseract_config if timeout is not None: preprocess_params["timeout"] = timeout if top_k is not None: if top_k < 1: raise ValueError(f"top_k parameter should be >= 1 (got {top_k})") postprocess_params["top_k"] = top_k if max_answer_len is not None: if max_answer_len < 1: raise ValueError(f"max_answer_len parameter should be >= 1 (got {max_answer_len}") postprocess_params["max_answer_len"] = max_answer_len if handle_impossible_answer is not None: postprocess_params["handle_impossible_answer"] = handle_impossible_answer return preprocess_params, {}, postprocess_params def __call__( self, image: Union["Image.Image", str], question: Optional[str] = None, word_boxes: Tuple[str, List[float]] = None, **kwargs, ): """ Answer the question(s) given as inputs by using the document(s). A document is defined as an image and an optional list of (word, box) tuples which represent the text in the document. If the `word_boxes` are not provided, it will use the Tesseract OCR engine (if available) to extract the words and boxes automatically for LayoutLM-like models which require them as input. For Donut, no OCR is run. You can invoke the pipeline several ways: - `pipeline(image=image, question=question)` - `pipeline(image=image, question=question, word_boxes=word_boxes)` - `pipeline([{"image": image, "question": question}])` - `pipeline([{"image": image, "question": question, "word_boxes": word_boxes}])` Args: image (`str` or `PIL.Image`): The pipeline handles three types of images: - A string containing a http link pointing to an image - A string containing a local path to an image - An image loaded in PIL directly The pipeline accepts either a single image or a batch of images. If given a single image, it can be broadcasted to multiple questions. question (`str`): A question to ask of the document. word_boxes (`List[str, Tuple[float, float, float, float]]`, *optional*): A list of words and bounding boxes (normalized 0->1000). If you provide this optional input, then the pipeline will use these words and boxes instead of running OCR on the image to derive them for models that need them (e.g. LayoutLM). This allows you to reuse OCR'd results across many invocations of the pipeline without having to re-run it each time. top_k (`int`, *optional*, defaults to 1): The number of answers to return (will be chosen by order of likelihood). Note that we return less than top_k answers if there are not enough options available within the context. doc_stride (`int`, *optional*, defaults to 128): If the words in the document are too long to fit with the question for the model, it will be split in several chunks with some overlap. This argument controls the size of that overlap. max_answer_len (`int`, *optional*, defaults to 15): The maximum length of predicted answers (e.g., only answers with a shorter length are considered). max_seq_len (`int`, *optional*, defaults to 384): The maximum length of the total sentence (context + question) in tokens of each chunk passed to the model. The context will be split in several chunks (using `doc_stride` as overlap) if needed. max_question_len (`int`, *optional*, defaults to 64): The maximum length of the question after tokenization. It will be truncated if needed. handle_impossible_answer (`bool`, *optional*, defaults to `False`): Whether or not we accept impossible as an answer. lang (`str`, *optional*): Language to use while running OCR. Defaults to english. tesseract_config (`str`, *optional*): Additional flags to pass to tesseract while running OCR. timeout (`float`, *optional*, defaults to None): The maximum time in seconds to wait for fetching images from the web. If None, no timeout is set and the call may block forever. Return: A `dict` or a list of `dict`: Each result comes as a dictionary with the following keys: - **score** (`float`) -- The probability associated to the answer. - **start** (`int`) -- The start word index of the answer (in the OCR'd version of the input or provided `word_boxes`). - **end** (`int`) -- The end word index of the answer (in the OCR'd version of the input or provided `word_boxes`). - **answer** (`str`) -- The answer to the question. - **words** (`list[int]`) -- The index of each word/box pair that is in the answer """ if isinstance(question, str): inputs = {"question": question, "image": image} if word_boxes is not None: inputs["word_boxes"] = word_boxes else: inputs = image return super().__call__(inputs, **kwargs) def preprocess( self, input, padding="do_not_pad", doc_stride=None, max_seq_len=None, word_boxes: Tuple[str, List[float]] = None, lang=None, tesseract_config="", timeout=None, ): # NOTE: This code mirrors the code in question answering and will be implemented in a follow up PR # to support documents with enough tokens that overflow the model's window if max_seq_len is None: max_seq_len = self.tokenizer.model_max_length if doc_stride is None: doc_stride = min(max_seq_len // 2, 256) image = None image_features = {} if input.get("image", None) is not None: image = load_image(input["image"], timeout=timeout) if self.image_processor is not None: image_features.update(self.image_processor(images=image, return_tensors=self.framework)) elif self.feature_extractor is not None: image_features.update(self.feature_extractor(images=image, return_tensors=self.framework)) elif self.model_type == ModelType.VisionEncoderDecoder: raise ValueError("If you are using a VisionEncoderDecoderModel, you must provide a feature extractor") words, boxes = None, None if not self.model_type == ModelType.VisionEncoderDecoder: if "word_boxes" in input: words = [x[0] for x in input["word_boxes"]] boxes = [x[1] for x in input["word_boxes"]] elif "words" in image_features and "boxes" in image_features: words = image_features.pop("words")[0] boxes = image_features.pop("boxes")[0] elif image is not None: if not TESSERACT_LOADED: raise ValueError( "If you provide an image without word_boxes, then the pipeline will run OCR using Tesseract," " but pytesseract is not available" ) if TESSERACT_LOADED: words, boxes = apply_tesseract(image, lang=lang, tesseract_config=tesseract_config) else: raise ValueError( "You must provide an image or word_boxes. If you provide an image, the pipeline will automatically" " run OCR to derive words and boxes" ) if self.tokenizer.padding_side != "right": raise ValueError( "Document question answering only supports tokenizers whose padding side is 'right', not" f" {self.tokenizer.padding_side}" ) if self.model_type == ModelType.VisionEncoderDecoder: task_prompt = f'<s_docvqa><s_question>{input["question"]}</s_question><s_answer>' # Adapted from https://huggingface.co/spaces/nielsr/donut-docvqa/blob/main/app.py encoding = { "inputs": image_features["pixel_values"], "decoder_input_ids": self.tokenizer( task_prompt, add_special_tokens=False, return_tensors=self.framework ).input_ids, "return_dict_in_generate": True, } yield { **encoding, "p_mask": None, "word_ids": None, "words": None, "output_attentions": True, "is_last": True, } else: tokenizer_kwargs = {} if self.model_type == ModelType.LayoutLM: tokenizer_kwargs["text"] = input["question"].split() tokenizer_kwargs["text_pair"] = words tokenizer_kwargs["is_split_into_words"] = True else: tokenizer_kwargs["text"] = [input["question"]] tokenizer_kwargs["text_pair"] = [words] tokenizer_kwargs["boxes"] = [boxes] encoding = self.tokenizer( padding=padding, max_length=max_seq_len, stride=doc_stride, return_token_type_ids=True, truncation="only_second", return_overflowing_tokens=True, **tokenizer_kwargs, ) # TODO: check why slower `LayoutLMTokenizer` and `LayoutLMv2Tokenizer` don't have this key in outputs # FIXME: ydshieh and/or Narsil encoding.pop("overflow_to_sample_mapping", None) # We do not use this num_spans = len(encoding["input_ids"]) # p_mask: mask with 1 for token than cannot be in the answer (0 for token which can be in an answer) # We put 0 on the tokens from the context and 1 everywhere else (question and special tokens) # This logic mirrors the logic in the question_answering pipeline p_mask = [[tok != 1 for tok in encoding.sequence_ids(span_id)] for span_id in range(num_spans)] for span_idx in range(num_spans): if self.framework == "pt": span_encoding = {k: torch.tensor(v[span_idx : span_idx + 1]) for (k, v) in encoding.items()} if "pixel_values" in image_features: span_encoding["image"] = image_features["pixel_values"] else: raise ValueError("Unsupported: Tensorflow preprocessing for DocumentQuestionAnsweringPipeline") input_ids_span_idx = encoding["input_ids"][span_idx] # keep the cls_token unmasked (some models use it to indicate unanswerable questions) if self.tokenizer.cls_token_id is not None: cls_indices = np.nonzero(np.array(input_ids_span_idx) == self.tokenizer.cls_token_id)[0] for cls_index in cls_indices: p_mask[span_idx][cls_index] = 0 # For each span, place a bounding box [0,0,0,0] for question and CLS tokens, [1000,1000,1000,1000] # for SEP tokens, and the word's bounding box for words in the original document. if "boxes" not in tokenizer_kwargs: bbox = [] for input_id, sequence_id, word_id in zip( encoding.input_ids[span_idx], encoding.sequence_ids(span_idx), encoding.word_ids(span_idx), ): if sequence_id == 1: bbox.append(boxes[word_id]) elif input_id == self.tokenizer.sep_token_id: bbox.append([1000] * 4) else: bbox.append([0] * 4) if self.framework == "pt": span_encoding["bbox"] = torch.tensor(bbox).unsqueeze(0) elif self.framework == "tf": raise ValueError("Unsupported: Tensorflow preprocessing for DocumentQuestionAnsweringPipeline") yield { **span_encoding, "p_mask": p_mask[span_idx], "word_ids": encoding.word_ids(span_idx), "words": words, "is_last": span_idx == num_spans - 1, } def _forward(self, model_inputs, **generate_kwargs): p_mask = model_inputs.pop("p_mask", None) word_ids = model_inputs.pop("word_ids", None) words = model_inputs.pop("words", None) is_last = model_inputs.pop("is_last", False) if self.model_type == ModelType.VisionEncoderDecoder: model_outputs = self.model.generate(**model_inputs, **generate_kwargs) else: model_outputs = self.model(**model_inputs) model_outputs = dict(model_outputs.items()) model_outputs["p_mask"] = p_mask model_outputs["word_ids"] = word_ids model_outputs["words"] = words model_outputs["attention_mask"] = model_inputs.get("attention_mask", None) model_outputs["is_last"] = is_last return model_outputs def postprocess(self, model_outputs, top_k=1, **kwargs): if self.model_type == ModelType.VisionEncoderDecoder: answers = [self.postprocess_encoder_decoder_single(o) for o in model_outputs] else: answers = self.postprocess_extractive_qa(model_outputs, top_k=top_k, **kwargs) answers = sorted(answers, key=lambda x: x.get("score", 0), reverse=True)[:top_k] return answers def postprocess_encoder_decoder_single(self, model_outputs, **kwargs): sequence = self.tokenizer.batch_decode(model_outputs["sequences"])[0] # TODO: A lot of this logic is specific to Donut and should probably be handled in the tokenizer # (see https://github.com/huggingface/transformers/pull/18414/files#r961747408 for more context). sequence = sequence.replace(self.tokenizer.eos_token, "").replace(self.tokenizer.pad_token, "") sequence = re.sub(r"<.*?>", "", sequence, count=1).strip() # remove first task start token ret = { "answer": None, } answer = re.search(r"<s_answer>(.*)</s_answer>", sequence) if answer is not None: ret["answer"] = answer.group(1).strip() return ret def postprocess_extractive_qa( self, model_outputs, top_k=1, handle_impossible_answer=False, max_answer_len=15, **kwargs ): min_null_score = 1000000 # large and positive answers = [] for output in model_outputs: words = output["words"] starts, ends, scores, min_null_score = select_starts_ends( start=output["start_logits"], end=output["end_logits"], p_mask=output["p_mask"], attention_mask=output["attention_mask"].numpy() if output.get("attention_mask", None) is not None else None, min_null_score=min_null_score, top_k=top_k, handle_impossible_answer=handle_impossible_answer, max_answer_len=max_answer_len, ) word_ids = output["word_ids"] for start, end, score in zip(starts, ends, scores): word_start, word_end = word_ids[start], word_ids[end] if word_start is not None and word_end is not None: answers.append( { "score": float(score), "answer": " ".join(words[word_start : word_end + 1]), "start": word_start, "end": word_end, } ) if handle_impossible_answer: answers.append({"score": min_null_score, "answer": "", "start": 0, "end": 0}) return answers
transformers/src/transformers/pipelines/document_question_answering.py/0
{ "file_path": "transformers/src/transformers/pipelines/document_question_answering.py", "repo_id": "transformers", "token_count": 10346 }
380
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License.from typing import List, Union from typing import List, Union from ..utils import is_torch_available from .base import Pipeline if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_TEXT_TO_SPECTROGRAM_MAPPING from ..models.speecht5.modeling_speecht5 import SpeechT5HifiGan DEFAULT_VOCODER_ID = "microsoft/speecht5_hifigan" class TextToAudioPipeline(Pipeline): """ Text-to-audio generation pipeline using any `AutoModelForTextToWaveform` or `AutoModelForTextToSpectrogram`. This pipeline generates an audio file from an input text and optional other conditional inputs. Example: ```python >>> from transformers import pipeline >>> pipe = pipeline(model="suno/bark-small") >>> output = pipe("Hey it's HuggingFace on the phone!") >>> audio = output["audio"] >>> sampling_rate = output["sampling_rate"] ``` Learn more about the basics of using a pipeline in the [pipeline tutorial](../pipeline_tutorial) <Tip> You can specify parameters passed to the model by using [`TextToAudioPipeline.__call__.forward_params`] or [`TextToAudioPipeline.__call__.generate_kwargs`]. Example: ```python >>> from transformers import pipeline >>> music_generator = pipeline(task="text-to-audio", model="facebook/musicgen-small", framework="pt") >>> # diversify the music generation by adding randomness with a high temperature and set a maximum music length >>> generate_kwargs = { ... "do_sample": True, ... "temperature": 0.7, ... "max_new_tokens": 35, ... } >>> outputs = music_generator("Techno music with high melodic riffs", generate_kwargs=generate_kwargs) ``` </Tip> This pipeline can currently be loaded from [`pipeline`] using the following task identifiers: `"text-to-speech"` or `"text-to-audio"`. See the list of available models on [huggingface.co/models](https://huggingface.co/models?filter=text-to-speech). """ def __init__(self, *args, vocoder=None, sampling_rate=None, **kwargs): super().__init__(*args, **kwargs) if self.framework == "tf": raise ValueError("The TextToAudioPipeline is only available in PyTorch.") self.vocoder = None if self.model.__class__ in MODEL_FOR_TEXT_TO_SPECTROGRAM_MAPPING.values(): self.vocoder = ( SpeechT5HifiGan.from_pretrained(DEFAULT_VOCODER_ID).to(self.model.device) if vocoder is None else vocoder ) self.sampling_rate = sampling_rate if self.vocoder is not None: self.sampling_rate = self.vocoder.config.sampling_rate if self.sampling_rate is None: # get sampling_rate from config and generation config config = self.model.config gen_config = self.model.__dict__.get("generation_config", None) if gen_config is not None: config.update(gen_config.to_dict()) for sampling_rate_name in ["sample_rate", "sampling_rate"]: sampling_rate = getattr(config, sampling_rate_name, None) if sampling_rate is not None: self.sampling_rate = sampling_rate def preprocess(self, text, **kwargs): if isinstance(text, str): text = [text] if self.model.config.model_type == "bark": # bark Tokenizer is called with BarkProcessor which uses those kwargs new_kwargs = { "max_length": self.model.generation_config.semantic_config.get("max_input_semantic_length", 256), "add_special_tokens": False, "return_attention_mask": True, "return_token_type_ids": False, "padding": "max_length", } # priority is given to kwargs new_kwargs.update(kwargs) kwargs = new_kwargs output = self.tokenizer(text, **kwargs, return_tensors="pt") return output def _forward(self, model_inputs, **kwargs): # we expect some kwargs to be additional tensors which need to be on the right device kwargs = self._ensure_tensor_on_device(kwargs, device=self.device) forward_params = kwargs["forward_params"] generate_kwargs = kwargs["generate_kwargs"] if self.model.can_generate(): # we expect some kwargs to be additional tensors which need to be on the right device generate_kwargs = self._ensure_tensor_on_device(generate_kwargs, device=self.device) # generate_kwargs get priority over forward_params forward_params.update(generate_kwargs) output = self.model.generate(**model_inputs, **forward_params) else: if len(generate_kwargs): raise ValueError( f"""You're using the `TextToAudioPipeline` with a forward-only model, but `generate_kwargs` is non empty. For forward-only TTA models, please use `forward_params` instead of of `generate_kwargs`. For reference, here are the `generate_kwargs` used here: {generate_kwargs.keys()}""" ) output = self.model(**model_inputs, **forward_params)[0] if self.vocoder is not None: # in that case, the output is a spectrogram that needs to be converted into a waveform output = self.vocoder(output) return output def __call__(self, text_inputs: Union[str, List[str]], **forward_params): """ Generates speech/audio from the inputs. See the [`TextToAudioPipeline`] documentation for more information. Args: text_inputs (`str` or `List[str]`): The text(s) to generate. forward_params (`dict`, *optional*): Parameters passed to the model generation/forward method. `forward_params` are always passed to the underlying model. generate_kwargs (`dict`, *optional*): The dictionary of ad-hoc parametrization of `generate_config` to be used for the generation call. For a complete overview of generate, check the [following guide](https://huggingface.co/docs/transformers/en/main_classes/text_generation). `generate_kwargs` are only passed to the underlying model if the latter is a generative model. Return: A `dict` or a list of `dict`: The dictionaries have two keys: - **audio** (`np.ndarray` of shape `(nb_channels, audio_length)`) -- The generated audio waveform. - **sampling_rate** (`int`) -- The sampling rate of the generated audio waveform. """ return super().__call__(text_inputs, **forward_params) def _sanitize_parameters( self, preprocess_params=None, forward_params=None, generate_kwargs=None, ): params = { "forward_params": forward_params if forward_params else {}, "generate_kwargs": generate_kwargs if generate_kwargs else {}, } if preprocess_params is None: preprocess_params = {} postprocess_params = {} return preprocess_params, params, postprocess_params def postprocess(self, waveform): output_dict = {} output_dict["audio"] = waveform.cpu().float().numpy() output_dict["sampling_rate"] = self.sampling_rate return output_dict
transformers/src/transformers/pipelines/text_to_audio.py/0
{ "file_path": "transformers/src/transformers/pipelines/text_to_audio.py", "repo_id": "transformers", "token_count": 3322 }
381
# Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import importlib from typing import TYPE_CHECKING, Any, Dict, List, Optional, Union from packaging import version from .base import HfQuantizer if TYPE_CHECKING: from ..modeling_utils import PreTrainedModel from ..utils import is_accelerate_available, is_bitsandbytes_available, is_torch_available, logging from .quantizers_utils import get_module_from_name if is_torch_available(): import torch from ..pytorch_utils import Conv1D logger = logging.get_logger(__name__) class Bnb8BitHfQuantizer(HfQuantizer): """ 8-bit quantization from bitsandbytes quantization method: before loading: converts transformer layers into Linear8bitLt during loading: load 16bit weight and pass to the layer object after: quantizes individual weights in Linear8bitLt into 8bit at fitst .cuda() call saving: from state dict, as usual; saves weights and 'SCB' component loading: need to locate SCB component and pass to the Linear8bitLt object """ use_keep_in_fp32_modules = True requires_parameters_quantization = True requires_calibration = False required_packages = ["bitsandbytes", "accelerate"] def __init__(self, quantization_config, **kwargs): super().__init__(quantization_config, **kwargs) if self.quantization_config.llm_int8_skip_modules is not None: self.modules_to_not_convert = self.quantization_config.llm_int8_skip_modules def validate_environment(self, *args, **kwargs): if not (is_accelerate_available() and is_bitsandbytes_available()): raise ImportError( "Using `bitsandbytes` 8-bit quantization requires Accelerate: `pip install accelerate` " "and the latest version of bitsandbytes: `pip install -i https://pypi.org/simple/ bitsandbytes`" ) if kwargs.get("from_tf", False) or kwargs.get("from_flax", False): raise ValueError( "Converting into 4-bit or 8-bit weights from tf/flax weights is currently not supported, please make" " sure the weights are in PyTorch format." ) if not torch.cuda.is_available(): raise RuntimeError("No GPU found. A GPU is needed for quantization.") device_map = kwargs.get("device_map", None) if ( device_map is not None and isinstance(device_map, dict) and not self.quantization_config.llm_int8_enable_fp32_cpu_offload ): device_map_without_lm_head = { key: device_map[key] for key in device_map.keys() if key not in self.modules_to_not_convert } if "cpu" in device_map_without_lm_head.values() or "disk" in device_map_without_lm_head.values(): raise ValueError( """ Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit the quantized model. If you want to dispatch the model on the CPU or the disk while keeping these modules in 32-bit, you need to set `load_in_8bit_fp32_cpu_offload=True` and pass a custom `device_map` to `from_pretrained`. Check https://huggingface.co/docs/transformers/main/en/main_classes/quantization#offload-between-cpu-and-gpu for more details. """ ) if version.parse(importlib.metadata.version("bitsandbytes")) < version.parse("0.37.2"): raise ValueError( "You have a version of `bitsandbytes` that is not compatible with 8bit inference and training" " make sure you have the latest version of `bitsandbytes` installed" ) def adjust_max_memory(self, max_memory: Dict[str, Union[int, str]]) -> Dict[str, Union[int, str]]: # need more space for buffers that are created during quantization max_memory = {key: val * 0.90 for key, val in max_memory.items()} return max_memory def update_torch_dtype(self, torch_dtype: "torch.dtype") -> "torch.dtype": if torch_dtype is None: # We force the `dtype` to be float16, this is a requirement from `bitsandbytes` logger.info( "Overriding torch_dtype=%s with `torch_dtype=torch.float16` due to " "requirements of `bitsandbytes` to enable model loading in 8-bit or 4-bit. " "Pass your own torch_dtype to specify the dtype of the remaining non-linear layers or pass" " torch_dtype=torch.float16 to remove this warning.", torch_dtype, ) torch_dtype = torch.float16 return torch_dtype def update_device_map(self, device_map): if device_map is None: device_map = {"": torch.cuda.current_device()} logger.info( "The device_map was not initialized. " "Setting device_map to {'':torch.cuda.current_device()}. " "If you want to use the model for inference, please set device_map ='auto' " ) return device_map def adjust_target_dtype(self, target_dtype: "torch.dtype") -> "torch.dtype": if target_dtype != torch.int8: logger.info("target_dtype {target_dtype} is replaced by `torch.int8` for 8-bit BnB quantization") return torch.int8 def check_quantized_param( self, model: "PreTrainedModel", param_value: "torch.Tensor", param_name: str, state_dict: Dict[str, Any], **kwargs, ): import bitsandbytes as bnb module, tensor_name = get_module_from_name(model, param_name) if isinstance(module._parameters.get(tensor_name, None), bnb.nn.Int8Params): if self.pre_quantized: if param_name.replace("weight", "SCB") not in state_dict.keys(): raise ValueError("Missing quantization component `SCB`") if param_value.dtype != torch.int8: raise ValueError( f"Incompatible dtype `{param_value.dtype}` when loading 8-bit prequantized weight. Expected `torch.int8`." ) return True return False def create_quantized_param( self, model: "PreTrainedModel", param_value: "torch.Tensor", param_name: str, target_device: "torch.device", state_dict: Dict[str, Any], unexpected_keys: Optional[List[str]] = None, ): """ combines logic from _load_state_dict_into_meta_model and .integrations.bitsandbytes.py::set_module_quantized_tensor_to_device() needs aux items from state dicts, if found - removes them from unexpected_keys """ import bitsandbytes as bnb fp16_statistics_key = param_name.replace("weight", "SCB") fp16_statistics = state_dict.get(fp16_statistics_key, None) module, tensor_name = get_module_from_name(model, param_name) if tensor_name not in module._parameters: raise ValueError(f"{module} does not have a parameter or a buffer named {tensor_name}.") old_value = getattr(module, tensor_name) if not isinstance(module._parameters[tensor_name], bnb.nn.Int8Params): raise ValueError(f"Parameter `{tensor_name}` should only be a `bnb.nn.Int8Params` instance.") if ( old_value.device == torch.device("meta") and target_device not in ["meta", torch.device("meta")] and param_value is None ): raise ValueError(f"{tensor_name} is on the meta device, we need a `value` to put in on {target_device}.") new_value = param_value.to("cpu") if self.pre_quantized and not self.is_serializable: raise ValueError( "Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. " "Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`." ) # Support models using `Conv1D` in place of `nn.Linear` (e.g. openai-community/gpt2) by transposing the weight matrix prior to quantization. # Since weights are saved in the correct "orientation", we skip transposing when loading. if issubclass(module.source_cls, Conv1D): if fp16_statistics is None: new_value = new_value.T kwargs = old_value.__dict__ new_value = bnb.nn.Int8Params(new_value, requires_grad=False, **kwargs).to(target_device) module._parameters[tensor_name] = new_value if fp16_statistics is not None: setattr(module.weight, "SCB", fp16_statistics.to(target_device)) if unexpected_keys is not None: unexpected_keys.remove(fp16_statistics_key) def _process_model_after_weight_loading(self, model: "PreTrainedModel", **kwargs): model.is_loaded_in_8bit = True model.is_8bit_serializable = self.is_serializable return model def _process_model_before_weight_loading( self, model: "PreTrainedModel", device_map, keep_in_fp32_modules: List[str] = [], **kwargs, ): from ..integrations import get_keys_to_not_convert, replace_with_bnb_linear load_in_8bit_fp32_cpu_offload = self.quantization_config.llm_int8_enable_fp32_cpu_offload # We keep some modules such as the lm_head in their original dtype for numerical stability reasons if self.quantization_config.llm_int8_skip_modules is None: self.modules_to_not_convert = get_keys_to_not_convert(model) else: self.modules_to_not_convert = self.quantization_config.llm_int8_skip_modules if not isinstance(self.modules_to_not_convert, list): self.modules_to_not_convert = [self.modules_to_not_convert] self.modules_to_not_convert.extend(keep_in_fp32_modules) # Extend `self.modules_to_not_convert` to keys that are supposed to be offloaded to `cpu` or `disk` if isinstance(device_map, dict) and len(device_map.keys()) > 1: keys_on_cpu = [key for key, value in device_map.items() if value in ["disk", "cpu"]] if len(keys_on_cpu) > 0 and not load_in_8bit_fp32_cpu_offload: raise ValueError( "If you want to offload some keys to `cpu` or `disk`, you need to set " "`llm_int8_enable_fp32_cpu_offload=True`. Note that these modules will not be " " converted to 8-bit but kept in 32-bit." ) self.modules_to_not_convert.extend(keys_on_cpu) model = replace_with_bnb_linear( model, modules_to_not_convert=self.modules_to_not_convert, quantization_config=self.quantization_config ) # TODO: consider bringing replace_with_bnb_linear() code from ..integrations/bitsandbyter.py to here model.config.quantization_config = self.quantization_config @property def is_serializable(self): _bnb_supports_8bit_serialization = version.parse(importlib.metadata.version("bitsandbytes")) > version.parse( "0.37.2" ) if not _bnb_supports_8bit_serialization: logger.warning( "You are calling `save_pretrained` to a 8-bit converted model, but your `bitsandbytes` version doesn't support it. " "If you want to save 8-bit models, make sure to have `bitsandbytes>0.37.2` installed. You will most likely face errors or" " unexpected behaviours." ) return False return True @property def is_trainable(self) -> bool: return version.parse(importlib.metadata.version("bitsandbytes")) >= version.parse("0.37.0")
transformers/src/transformers/quantizers/quantizer_bnb_8bit.py/0
{ "file_path": "transformers/src/transformers/quantizers/quantizer_bnb_8bit.py", "repo_id": "transformers", "token_count": 5284 }
382
#!/usr/bin/env python # coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import importlib.util import json import os import time from dataclasses import dataclass from typing import Dict import requests from huggingface_hub import HfFolder, hf_hub_download, list_spaces from ..models.auto import AutoTokenizer from ..utils import is_offline_mode, is_openai_available, is_torch_available, logging from .base import TASK_MAPPING, TOOL_CONFIG_FILE, Tool, load_tool, supports_remote from .prompts import CHAT_MESSAGE_PROMPT, download_prompt from .python_interpreter import evaluate logger = logging.get_logger(__name__) if is_openai_available(): import openai if is_torch_available(): from ..generation import StoppingCriteria, StoppingCriteriaList from ..models.auto import AutoModelForCausalLM else: StoppingCriteria = object _tools_are_initialized = False BASE_PYTHON_TOOLS = { "print": print, "range": range, "float": float, "int": int, "bool": bool, "str": str, } @dataclass class PreTool: task: str description: str repo_id: str HUGGINGFACE_DEFAULT_TOOLS = {} HUGGINGFACE_DEFAULT_TOOLS_FROM_HUB = [ "image-transformation", "text-download", "text-to-image", "text-to-video", ] def get_remote_tools(organization="huggingface-tools"): if is_offline_mode(): logger.info("You are in offline mode, so remote tools are not available.") return {} spaces = list_spaces(author=organization) tools = {} for space_info in spaces: repo_id = space_info.id resolved_config_file = hf_hub_download(repo_id, TOOL_CONFIG_FILE, repo_type="space") with open(resolved_config_file, encoding="utf-8") as reader: config = json.load(reader) task = repo_id.split("/")[-1] tools[config["name"]] = PreTool(task=task, description=config["description"], repo_id=repo_id) return tools def _setup_default_tools(): global HUGGINGFACE_DEFAULT_TOOLS global _tools_are_initialized if _tools_are_initialized: return main_module = importlib.import_module("transformers") tools_module = main_module.tools remote_tools = get_remote_tools() for task_name, tool_class_name in TASK_MAPPING.items(): tool_class = getattr(tools_module, tool_class_name) description = tool_class.description HUGGINGFACE_DEFAULT_TOOLS[tool_class.name] = PreTool(task=task_name, description=description, repo_id=None) if not is_offline_mode(): for task_name in HUGGINGFACE_DEFAULT_TOOLS_FROM_HUB: found = False for tool_name, tool in remote_tools.items(): if tool.task == task_name: HUGGINGFACE_DEFAULT_TOOLS[tool_name] = tool found = True break if not found: raise ValueError(f"{task_name} is not implemented on the Hub.") _tools_are_initialized = True def resolve_tools(code, toolbox, remote=False, cached_tools=None): if cached_tools is None: resolved_tools = BASE_PYTHON_TOOLS.copy() else: resolved_tools = cached_tools for name, tool in toolbox.items(): if name not in code or name in resolved_tools: continue if isinstance(tool, Tool): resolved_tools[name] = tool else: task_or_repo_id = tool.task if tool.repo_id is None else tool.repo_id _remote = remote and supports_remote(task_or_repo_id) resolved_tools[name] = load_tool(task_or_repo_id, remote=_remote) return resolved_tools def get_tool_creation_code(code, toolbox, remote=False): code_lines = ["from transformers import load_tool", ""] for name, tool in toolbox.items(): if name not in code or isinstance(tool, Tool): continue task_or_repo_id = tool.task if tool.repo_id is None else tool.repo_id line = f'{name} = load_tool("{task_or_repo_id}"' if remote: line += ", remote=True" line += ")" code_lines.append(line) return "\n".join(code_lines) + "\n" def clean_code_for_chat(result): lines = result.split("\n") idx = 0 while idx < len(lines) and not lines[idx].lstrip().startswith("```"): idx += 1 explanation = "\n".join(lines[:idx]).strip() if idx == len(lines): return explanation, None idx += 1 start_idx = idx while not lines[idx].lstrip().startswith("```"): idx += 1 code = "\n".join(lines[start_idx:idx]).strip() return explanation, code def clean_code_for_run(result): result = f"I will use the following {result}" explanation, code = result.split("Answer:") explanation = explanation.strip() code = code.strip() code_lines = code.split("\n") if code_lines[0] in ["```", "```py", "```python"]: code_lines = code_lines[1:] if code_lines[-1] == "```": code_lines = code_lines[:-1] code = "\n".join(code_lines) return explanation, code class Agent: """ Base class for all agents which contains the main API methods. Args: chat_prompt_template (`str`, *optional*): Pass along your own prompt if you want to override the default template for the `chat` method. Can be the actual prompt template or a repo ID (on the Hugging Face Hub). The prompt should be in a file named `chat_prompt_template.txt` in this repo in this case. run_prompt_template (`str`, *optional*): Pass along your own prompt if you want to override the default template for the `run` method. Can be the actual prompt template or a repo ID (on the Hugging Face Hub). The prompt should be in a file named `run_prompt_template.txt` in this repo in this case. additional_tools ([`Tool`], list of tools or dictionary with tool values, *optional*): Any additional tools to include on top of the default ones. If you pass along a tool with the same name as one of the default tools, that default tool will be overridden. """ def __init__(self, chat_prompt_template=None, run_prompt_template=None, additional_tools=None): _setup_default_tools() agent_name = self.__class__.__name__ self.chat_prompt_template = download_prompt(chat_prompt_template, agent_name, mode="chat") self.run_prompt_template = download_prompt(run_prompt_template, agent_name, mode="run") self._toolbox = HUGGINGFACE_DEFAULT_TOOLS.copy() self.log = print if additional_tools is not None: if isinstance(additional_tools, (list, tuple)): additional_tools = {t.name: t for t in additional_tools} elif not isinstance(additional_tools, dict): additional_tools = {additional_tools.name: additional_tools} replacements = {name: tool for name, tool in additional_tools.items() if name in HUGGINGFACE_DEFAULT_TOOLS} self._toolbox.update(additional_tools) if len(replacements) > 1: names = "\n".join([f"- {n}: {t}" for n, t in replacements.items()]) logger.warning( f"The following tools have been replaced by the ones provided in `additional_tools`:\n{names}." ) elif len(replacements) == 1: name = list(replacements.keys())[0] logger.warning(f"{name} has been replaced by {replacements[name]} as provided in `additional_tools`.") self.prepare_for_new_chat() @property def toolbox(self) -> Dict[str, Tool]: """Get all tool currently available to the agent""" return self._toolbox def format_prompt(self, task, chat_mode=False): description = "\n".join([f"- {name}: {tool.description}" for name, tool in self.toolbox.items()]) if chat_mode: if self.chat_history is None: prompt = self.chat_prompt_template.replace("<<all_tools>>", description) else: prompt = self.chat_history prompt += CHAT_MESSAGE_PROMPT.replace("<<task>>", task) else: prompt = self.run_prompt_template.replace("<<all_tools>>", description) prompt = prompt.replace("<<prompt>>", task) return prompt def set_stream(self, streamer): """ Set the function use to stream results (which is `print` by default). Args: streamer (`callable`): The function to call when streaming results from the LLM. """ self.log = streamer def chat(self, task, *, return_code=False, remote=False, **kwargs): """ Sends a new request to the agent in a chat. Will use the previous ones in its history. Args: task (`str`): The task to perform return_code (`bool`, *optional*, defaults to `False`): Whether to just return code and not evaluate it. remote (`bool`, *optional*, defaults to `False`): Whether or not to use remote tools (inference endpoints) instead of local ones. kwargs (additional keyword arguments, *optional*): Any keyword argument to send to the agent when evaluating the code. Example: ```py from transformers import HfAgent agent = HfAgent("https://api-inference.huggingface.co/models/bigcode/starcoder") agent.chat("Draw me a picture of rivers and lakes") agent.chat("Transform the picture so that there is a rock in there") ``` """ prompt = self.format_prompt(task, chat_mode=True) result = self.generate_one(prompt, stop=["Human:", "====="]) self.chat_history = prompt + result.strip() + "\n" explanation, code = clean_code_for_chat(result) self.log(f"==Explanation from the agent==\n{explanation}") if code is not None: self.log(f"\n\n==Code generated by the agent==\n{code}") if not return_code: self.log("\n\n==Result==") self.cached_tools = resolve_tools(code, self.toolbox, remote=remote, cached_tools=self.cached_tools) self.chat_state.update(kwargs) return evaluate(code, self.cached_tools, self.chat_state, chat_mode=True) else: tool_code = get_tool_creation_code(code, self.toolbox, remote=remote) return f"{tool_code}\n{code}" def prepare_for_new_chat(self): """ Clears the history of prior calls to [`~Agent.chat`]. """ self.chat_history = None self.chat_state = {} self.cached_tools = None def clean_code_for_run(self, result): """ Override this method if you want to change the way the code is cleaned for the `run` method. """ return clean_code_for_run(result) def run(self, task, *, return_code=False, remote=False, **kwargs): """ Sends a request to the agent. Args: task (`str`): The task to perform return_code (`bool`, *optional*, defaults to `False`): Whether to just return code and not evaluate it. remote (`bool`, *optional*, defaults to `False`): Whether or not to use remote tools (inference endpoints) instead of local ones. kwargs (additional keyword arguments, *optional*): Any keyword argument to send to the agent when evaluating the code. Example: ```py from transformers import HfAgent agent = HfAgent("https://api-inference.huggingface.co/models/bigcode/starcoder") agent.run("Draw me a picture of rivers and lakes") ``` """ prompt = self.format_prompt(task) result = self.generate_one(prompt, stop=["Task:"]) explanation, code = self.clean_code_for_run(result) self.log(f"==Explanation from the agent==\n{explanation}") self.log(f"\n\n==Code generated by the agent==\n{code}") if not return_code: self.log("\n\n==Result==") self.cached_tools = resolve_tools(code, self.toolbox, remote=remote, cached_tools=self.cached_tools) return evaluate(code, self.cached_tools, state=kwargs.copy()) else: tool_code = get_tool_creation_code(code, self.toolbox, remote=remote) return f"{tool_code}\n{code}" def generate_one(self, prompt, stop): # This is the method to implement in your custom agent. raise NotImplementedError def generate_many(self, prompts, stop): # Override if you have a way to do batch generation faster than one by one return [self.generate_one(prompt, stop) for prompt in prompts] class OpenAiAgent(Agent): """ Agent that uses the openai API to generate code. <Tip warning={true}> The openAI models are used in generation mode, so even for the `chat()` API, it's better to use models like `"text-davinci-003"` over the chat-GPT variant. Proper support for chat-GPT models will come in a next version. </Tip> Args: model (`str`, *optional*, defaults to `"text-davinci-003"`): The name of the OpenAI model to use. api_key (`str`, *optional*): The API key to use. If unset, will look for the environment variable `"OPENAI_API_KEY"`. chat_prompt_template (`str`, *optional*): Pass along your own prompt if you want to override the default template for the `chat` method. Can be the actual prompt template or a repo ID (on the Hugging Face Hub). The prompt should be in a file named `chat_prompt_template.txt` in this repo in this case. run_prompt_template (`str`, *optional*): Pass along your own prompt if you want to override the default template for the `run` method. Can be the actual prompt template or a repo ID (on the Hugging Face Hub). The prompt should be in a file named `run_prompt_template.txt` in this repo in this case. additional_tools ([`Tool`], list of tools or dictionary with tool values, *optional*): Any additional tools to include on top of the default ones. If you pass along a tool with the same name as one of the default tools, that default tool will be overridden. Example: ```py from transformers import OpenAiAgent agent = OpenAiAgent(model="text-davinci-003", api_key=xxx) agent.run("Is the following `text` (in Spanish) positive or negative?", text="¡Este es un API muy agradable!") ``` """ def __init__( self, model="text-davinci-003", api_key=None, chat_prompt_template=None, run_prompt_template=None, additional_tools=None, ): if not is_openai_available(): raise ImportError("Using `OpenAiAgent` requires `openai`: `pip install openai`.") if api_key is None: api_key = os.environ.get("OPENAI_API_KEY", None) if api_key is None: raise ValueError( "You need an openai key to use `OpenAIAgent`. You can get one here: Get one here " "https://openai.com/api/`. If you have one, set it in your env with `os.environ['OPENAI_API_KEY'] = " "xxx." ) else: openai.api_key = api_key self.model = model super().__init__( chat_prompt_template=chat_prompt_template, run_prompt_template=run_prompt_template, additional_tools=additional_tools, ) def generate_many(self, prompts, stop): if "gpt" in self.model: return [self._chat_generate(prompt, stop) for prompt in prompts] else: return self._completion_generate(prompts, stop) def generate_one(self, prompt, stop): if "gpt" in self.model: return self._chat_generate(prompt, stop) else: return self._completion_generate([prompt], stop)[0] def _chat_generate(self, prompt, stop): result = openai.chat.completions.create( model=self.model, messages=[{"role": "user", "content": prompt}], temperature=0, stop=stop, ) return result.choices[0].message.content def _completion_generate(self, prompts, stop): result = openai.Completion.create( model=self.model, prompt=prompts, temperature=0, stop=stop, max_tokens=200, ) return [answer["text"] for answer in result["choices"]] class AzureOpenAiAgent(Agent): """ Agent that uses Azure OpenAI to generate code. See the [official documentation](https://learn.microsoft.com/en-us/azure/cognitive-services/openai/) to learn how to deploy an openAI model on Azure <Tip warning={true}> The openAI models are used in generation mode, so even for the `chat()` API, it's better to use models like `"text-davinci-003"` over the chat-GPT variant. Proper support for chat-GPT models will come in a next version. </Tip> Args: deployment_id (`str`): The name of the deployed Azure openAI model to use. api_key (`str`, *optional*): The API key to use. If unset, will look for the environment variable `"AZURE_OPENAI_API_KEY"`. resource_name (`str`, *optional*): The name of your Azure OpenAI Resource. If unset, will look for the environment variable `"AZURE_OPENAI_RESOURCE_NAME"`. api_version (`str`, *optional*, default to `"2022-12-01"`): The API version to use for this agent. is_chat_mode (`bool`, *optional*): Whether you are using a completion model or a chat model (see note above, chat models won't be as efficient). Will default to `gpt` being in the `deployment_id` or not. chat_prompt_template (`str`, *optional*): Pass along your own prompt if you want to override the default template for the `chat` method. Can be the actual prompt template or a repo ID (on the Hugging Face Hub). The prompt should be in a file named `chat_prompt_template.txt` in this repo in this case. run_prompt_template (`str`, *optional*): Pass along your own prompt if you want to override the default template for the `run` method. Can be the actual prompt template or a repo ID (on the Hugging Face Hub). The prompt should be in a file named `run_prompt_template.txt` in this repo in this case. additional_tools ([`Tool`], list of tools or dictionary with tool values, *optional*): Any additional tools to include on top of the default ones. If you pass along a tool with the same name as one of the default tools, that default tool will be overridden. Example: ```py from transformers import AzureOpenAiAgent agent = AzureAiAgent(deployment_id="Davinci-003", api_key=xxx, resource_name=yyy) agent.run("Is the following `text` (in Spanish) positive or negative?", text="¡Este es un API muy agradable!") ``` """ def __init__( self, deployment_id, api_key=None, resource_name=None, api_version="2022-12-01", is_chat_model=None, chat_prompt_template=None, run_prompt_template=None, additional_tools=None, ): if not is_openai_available(): raise ImportError("Using `OpenAiAgent` requires `openai`: `pip install openai`.") self.deployment_id = deployment_id openai.api_type = "azure" if api_key is None: api_key = os.environ.get("AZURE_OPENAI_API_KEY", None) if api_key is None: raise ValueError( "You need an Azure openAI key to use `AzureOpenAIAgent`. If you have one, set it in your env with " "`os.environ['AZURE_OPENAI_API_KEY'] = xxx." ) else: openai.api_key = api_key if resource_name is None: resource_name = os.environ.get("AZURE_OPENAI_RESOURCE_NAME", None) if resource_name is None: raise ValueError( "You need a resource_name to use `AzureOpenAIAgent`. If you have one, set it in your env with " "`os.environ['AZURE_OPENAI_RESOURCE_NAME'] = xxx." ) else: openai.api_base = f"https://{resource_name}.openai.azure.com" openai.api_version = api_version if is_chat_model is None: is_chat_model = "gpt" in deployment_id.lower() self.is_chat_model = is_chat_model super().__init__( chat_prompt_template=chat_prompt_template, run_prompt_template=run_prompt_template, additional_tools=additional_tools, ) def generate_many(self, prompts, stop): if self.is_chat_model: return [self._chat_generate(prompt, stop) for prompt in prompts] else: return self._completion_generate(prompts, stop) def generate_one(self, prompt, stop): if self.is_chat_model: return self._chat_generate(prompt, stop) else: return self._completion_generate([prompt], stop)[0] def _chat_generate(self, prompt, stop): result = openai.ChatCompletion.create( engine=self.deployment_id, messages=[{"role": "user", "content": prompt}], temperature=0, stop=stop, ) return result["choices"][0]["message"]["content"] def _completion_generate(self, prompts, stop): result = openai.Completion.create( engine=self.deployment_id, prompt=prompts, temperature=0, stop=stop, max_tokens=200, ) return [answer["text"] for answer in result["choices"]] class HfAgent(Agent): """ Agent that uses an inference endpoint to generate code. Args: url_endpoint (`str`): The name of the url endpoint to use. token (`str`, *optional*): The token to use as HTTP bearer authorization for remote files. If unset, will use the token generated when running `huggingface-cli login` (stored in `~/.huggingface`). chat_prompt_template (`str`, *optional*): Pass along your own prompt if you want to override the default template for the `chat` method. Can be the actual prompt template or a repo ID (on the Hugging Face Hub). The prompt should be in a file named `chat_prompt_template.txt` in this repo in this case. run_prompt_template (`str`, *optional*): Pass along your own prompt if you want to override the default template for the `run` method. Can be the actual prompt template or a repo ID (on the Hugging Face Hub). The prompt should be in a file named `run_prompt_template.txt` in this repo in this case. additional_tools ([`Tool`], list of tools or dictionary with tool values, *optional*): Any additional tools to include on top of the default ones. If you pass along a tool with the same name as one of the default tools, that default tool will be overridden. Example: ```py from transformers import HfAgent agent = HfAgent("https://api-inference.huggingface.co/models/bigcode/starcoder") agent.run("Is the following `text` (in Spanish) positive or negative?", text="¡Este es un API muy agradable!") ``` """ def __init__( self, url_endpoint, token=None, chat_prompt_template=None, run_prompt_template=None, additional_tools=None ): self.url_endpoint = url_endpoint if token is None: self.token = f"Bearer {HfFolder().get_token()}" elif token.startswith("Bearer") or token.startswith("Basic"): self.token = token else: self.token = f"Bearer {token}" super().__init__( chat_prompt_template=chat_prompt_template, run_prompt_template=run_prompt_template, additional_tools=additional_tools, ) def generate_one(self, prompt, stop): headers = {"Authorization": self.token} inputs = { "inputs": prompt, "parameters": {"max_new_tokens": 200, "return_full_text": False, "stop": stop}, } response = requests.post(self.url_endpoint, json=inputs, headers=headers) if response.status_code == 429: logger.info("Getting rate-limited, waiting a tiny bit before trying again.") time.sleep(1) return self._generate_one(prompt) elif response.status_code != 200: raise ValueError(f"Error {response.status_code}: {response.json()}") result = response.json()[0]["generated_text"] # Inference API returns the stop sequence for stop_seq in stop: if result.endswith(stop_seq): return result[: -len(stop_seq)] return result class LocalAgent(Agent): """ Agent that uses a local model and tokenizer to generate code. Args: model ([`PreTrainedModel`]): The model to use for the agent. tokenizer ([`PreTrainedTokenizer`]): The tokenizer to use for the agent. chat_prompt_template (`str`, *optional*): Pass along your own prompt if you want to override the default template for the `chat` method. Can be the actual prompt template or a repo ID (on the Hugging Face Hub). The prompt should be in a file named `chat_prompt_template.txt` in this repo in this case. run_prompt_template (`str`, *optional*): Pass along your own prompt if you want to override the default template for the `run` method. Can be the actual prompt template or a repo ID (on the Hugging Face Hub). The prompt should be in a file named `run_prompt_template.txt` in this repo in this case. additional_tools ([`Tool`], list of tools or dictionary with tool values, *optional*): Any additional tools to include on top of the default ones. If you pass along a tool with the same name as one of the default tools, that default tool will be overridden. Example: ```py import torch from transformers import AutoModelForCausalLM, AutoTokenizer, LocalAgent checkpoint = "bigcode/starcoder" model = AutoModelForCausalLM.from_pretrained(checkpoint, device_map="auto", torch_dtype=torch.bfloat16) tokenizer = AutoTokenizer.from_pretrained(checkpoint) agent = LocalAgent(model, tokenizer) agent.run("Draw me a picture of rivers and lakes.") ``` """ def __init__(self, model, tokenizer, chat_prompt_template=None, run_prompt_template=None, additional_tools=None): self.model = model self.tokenizer = tokenizer super().__init__( chat_prompt_template=chat_prompt_template, run_prompt_template=run_prompt_template, additional_tools=additional_tools, ) @classmethod def from_pretrained(cls, pretrained_model_name_or_path, **kwargs): """ Convenience method to build a `LocalAgent` from a pretrained checkpoint. Args: pretrained_model_name_or_path (`str` or `os.PathLike`): The name of a repo on the Hub or a local path to a folder containing both model and tokenizer. kwargs (`Dict[str, Any]`, *optional*): Keyword arguments passed along to [`~PreTrainedModel.from_pretrained`]. Example: ```py import torch from transformers import LocalAgent agent = LocalAgent.from_pretrained("bigcode/starcoder", device_map="auto", torch_dtype=torch.bfloat16) agent.run("Draw me a picture of rivers and lakes.") ``` """ model = AutoModelForCausalLM.from_pretrained(pretrained_model_name_or_path, **kwargs) tokenizer = AutoTokenizer.from_pretrained(pretrained_model_name_or_path, **kwargs) return cls(model, tokenizer) @property def _model_device(self): if hasattr(self.model, "hf_device_map"): return list(self.model.hf_device_map.values())[0] for param in self.model.parameters(): return param.device def generate_one(self, prompt, stop): encoded_inputs = self.tokenizer(prompt, return_tensors="pt").to(self._model_device) src_len = encoded_inputs["input_ids"].shape[1] stopping_criteria = StoppingCriteriaList([StopSequenceCriteria(stop, self.tokenizer)]) outputs = self.model.generate( encoded_inputs["input_ids"], max_new_tokens=200, stopping_criteria=stopping_criteria ) result = self.tokenizer.decode(outputs[0].tolist()[src_len:]) # Inference API returns the stop sequence for stop_seq in stop: if result.endswith(stop_seq): result = result[: -len(stop_seq)] return result class StopSequenceCriteria(StoppingCriteria): """ This class can be used to stop generation whenever a sequence of tokens is encountered. Args: stop_sequences (`str` or `List[str]`): The sequence (or list of sequences) on which to stop execution. tokenizer: The tokenizer used to decode the model outputs. """ def __init__(self, stop_sequences, tokenizer): if isinstance(stop_sequences, str): stop_sequences = [stop_sequences] self.stop_sequences = stop_sequences self.tokenizer = tokenizer def __call__(self, input_ids, scores, **kwargs) -> bool: decoded_output = self.tokenizer.decode(input_ids.tolist()[0]) return any(decoded_output.endswith(stop_sequence) for stop_sequence in self.stop_sequences)
transformers/src/transformers/tools/agents.py/0
{ "file_path": "transformers/src/transformers/tools/agents.py", "repo_id": "transformers", "token_count": 12720 }
383
# coding=utf-8 # Copyright 2020-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Callbacks to use with the Trainer class and customize the training loop. """ import dataclasses import json from dataclasses import dataclass from typing import Dict, List, Optional, Union import numpy as np from tqdm.auto import tqdm from .trainer_utils import IntervalStrategy, has_length from .training_args import TrainingArguments from .utils import logging logger = logging.get_logger(__name__) @dataclass class TrainerState: """ A class containing the [`Trainer`] inner state that will be saved along the model and optimizer when checkpointing and passed to the [`TrainerCallback`]. <Tip> In all this class, one step is to be understood as one update step. When using gradient accumulation, one update step may require several forward and backward passes: if you use `gradient_accumulation_steps=n`, then one update step requires going through *n* batches. </Tip> Args: epoch (`float`, *optional*): Only set during training, will represent the epoch the training is at (the decimal part being the percentage of the current epoch completed). global_step (`int`, *optional*, defaults to 0): During training, represents the number of update steps completed. max_steps (`int`, *optional*, defaults to 0): The number of update steps to do during the current training. logging_steps (`int`, *optional*, defaults to 500): Log every X updates steps eval_steps (`int`, *optional*): Run an evaluation every X steps. save_steps (`int`, *optional*, defaults to 500): Save checkpoint every X updates steps. train_batch_size (`int`, *optional*): The batch size for the training dataloader. Only needed when `auto_find_batch_size` has been used. num_input_tokens_seen (`int`, *optional*, defaults to 0): The number of tokens seen during training (number of input tokens, not the number of prediction tokens). total_flos (`float`, *optional*, defaults to 0): The total number of floating operations done by the model since the beginning of training (stored as floats to avoid overflow). log_history (`List[Dict[str, float]]`, *optional*): The list of logs done since the beginning of training. best_metric (`float`, *optional*): When tracking the best model, the value of the best metric encountered so far. best_model_checkpoint (`str`, *optional*): When tracking the best model, the value of the name of the checkpoint for the best model encountered so far. is_local_process_zero (`bool`, *optional*, defaults to `True`): Whether or not this process is the local (e.g., on one machine if training in a distributed fashion on several machines) main process. is_world_process_zero (`bool`, *optional*, defaults to `True`): Whether or not this process is the global main process (when training in a distributed fashion on several machines, this is only going to be `True` for one process). is_hyper_param_search (`bool`, *optional*, defaults to `False`): Whether we are in the process of a hyper parameter search using Trainer.hyperparameter_search. This will impact the way data will be logged in TensorBoard. """ epoch: Optional[float] = None global_step: int = 0 max_steps: int = 0 logging_steps: int = 500 eval_steps: int = 500 save_steps: int = 500 train_batch_size: int = None num_train_epochs: int = 0 num_input_tokens_seen: int = 0 total_flos: float = 0 log_history: List[Dict[str, float]] = None best_metric: Optional[float] = None best_model_checkpoint: Optional[str] = None is_local_process_zero: bool = True is_world_process_zero: bool = True is_hyper_param_search: bool = False trial_name: str = None trial_params: Dict[str, Union[str, float, int, bool]] = None def __post_init__(self): if self.log_history is None: self.log_history = [] def save_to_json(self, json_path: str): """Save the content of this instance in JSON format inside `json_path`.""" json_string = json.dumps(dataclasses.asdict(self), indent=2, sort_keys=True) + "\n" with open(json_path, "w", encoding="utf-8") as f: f.write(json_string) @classmethod def load_from_json(cls, json_path: str): """Create an instance from the content of `json_path`.""" with open(json_path, "r", encoding="utf-8") as f: text = f.read() return cls(**json.loads(text)) @dataclass class TrainerControl: """ A class that handles the [`Trainer`] control flow. This class is used by the [`TrainerCallback`] to activate some switches in the training loop. Args: should_training_stop (`bool`, *optional*, defaults to `False`): Whether or not the training should be interrupted. If `True`, this variable will not be set back to `False`. The training will just stop. should_epoch_stop (`bool`, *optional*, defaults to `False`): Whether or not the current epoch should be interrupted. If `True`, this variable will be set back to `False` at the beginning of the next epoch. should_save (`bool`, *optional*, defaults to `False`): Whether or not the model should be saved at this step. If `True`, this variable will be set back to `False` at the beginning of the next step. should_evaluate (`bool`, *optional*, defaults to `False`): Whether or not the model should be evaluated at this step. If `True`, this variable will be set back to `False` at the beginning of the next step. should_log (`bool`, *optional*, defaults to `False`): Whether or not the logs should be reported at this step. If `True`, this variable will be set back to `False` at the beginning of the next step. """ should_training_stop: bool = False should_epoch_stop: bool = False should_save: bool = False should_evaluate: bool = False should_log: bool = False def _new_training(self): """Internal method that resets the variable for a new training.""" self.should_training_stop = False def _new_epoch(self): """Internal method that resets the variable for a new epoch.""" self.should_epoch_stop = False def _new_step(self): """Internal method that resets the variable for a new step.""" self.should_save = False self.should_evaluate = False self.should_log = False class TrainerCallback: # no-format """ A class for objects that will inspect the state of the training loop at some events and take some decisions. At each of those events the following arguments are available: Args: args ([`TrainingArguments`]): The training arguments used to instantiate the [`Trainer`]. state ([`TrainerState`]): The current state of the [`Trainer`]. control ([`TrainerControl`]): The object that is returned to the [`Trainer`] and can be used to make some decisions. model ([`PreTrainedModel`] or `torch.nn.Module`): The model being trained. tokenizer ([`PreTrainedTokenizer`]): The tokenizer used for encoding the data. optimizer (`torch.optim.Optimizer`): The optimizer used for the training steps. lr_scheduler (`torch.optim.lr_scheduler.LambdaLR`): The scheduler used for setting the learning rate. train_dataloader (`torch.utils.data.DataLoader`, *optional*): The current dataloader used for training. eval_dataloader (`torch.utils.data.DataLoader`, *optional*): The current dataloader used for training. metrics (`Dict[str, float]`): The metrics computed by the last evaluation phase. Those are only accessible in the event `on_evaluate`. logs (`Dict[str, float]`): The values to log. Those are only accessible in the event `on_log`. The `control` object is the only one that can be changed by the callback, in which case the event that changes it should return the modified version. The argument `args`, `state` and `control` are positionals for all events, all the others are grouped in `kwargs`. You can unpack the ones you need in the signature of the event using them. As an example, see the code of the simple [`~transformers.PrinterCallback`]. Example: ```python class PrinterCallback(TrainerCallback): def on_log(self, args, state, control, logs=None, **kwargs): _ = logs.pop("total_flos", None) if state.is_local_process_zero: print(logs) ```""" def on_init_end(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs): """ Event called at the end of the initialization of the [`Trainer`]. """ pass def on_train_begin(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs): """ Event called at the beginning of training. """ pass def on_train_end(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs): """ Event called at the end of training. """ pass def on_epoch_begin(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs): """ Event called at the beginning of an epoch. """ pass def on_epoch_end(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs): """ Event called at the end of an epoch. """ pass def on_step_begin(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs): """ Event called at the beginning of a training step. If using gradient accumulation, one training step might take several inputs. """ pass def on_substep_end(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs): """ Event called at the end of an substep during gradient accumulation. """ pass def on_step_end(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs): """ Event called at the end of a training step. If using gradient accumulation, one training step might take several inputs. """ pass def on_evaluate(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs): """ Event called after an evaluation phase. """ pass def on_predict(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, metrics, **kwargs): """ Event called after a successful prediction. """ pass def on_save(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs): """ Event called after a checkpoint save. """ pass def on_log(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs): """ Event called after logging the last logs. """ pass def on_prediction_step(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs): """ Event called after a prediction step. """ pass class CallbackHandler(TrainerCallback): """Internal class that just calls the list of callbacks in order.""" def __init__(self, callbacks, model, tokenizer, optimizer, lr_scheduler): self.callbacks = [] for cb in callbacks: self.add_callback(cb) self.model = model self.tokenizer = tokenizer self.optimizer = optimizer self.lr_scheduler = lr_scheduler self.train_dataloader = None self.eval_dataloader = None if not any(isinstance(cb, DefaultFlowCallback) for cb in self.callbacks): logger.warning( "The Trainer will not work properly if you don't have a `DefaultFlowCallback` in its callbacks. You\n" + "should add one before training with `trainer.add_callback(DefaultFlowCallback). The current list of" + "callbacks is\n:" + self.callback_list ) def add_callback(self, callback): cb = callback() if isinstance(callback, type) else callback cb_class = callback if isinstance(callback, type) else callback.__class__ if cb_class in [c.__class__ for c in self.callbacks]: logger.warning( f"You are adding a {cb_class} to the callbacks of this Trainer, but there is already one. The current" + "list of callbacks is\n:" + self.callback_list ) self.callbacks.append(cb) def pop_callback(self, callback): if isinstance(callback, type): for cb in self.callbacks: if isinstance(cb, callback): self.callbacks.remove(cb) return cb else: for cb in self.callbacks: if cb == callback: self.callbacks.remove(cb) return cb def remove_callback(self, callback): if isinstance(callback, type): for cb in self.callbacks: if isinstance(cb, callback): self.callbacks.remove(cb) return else: self.callbacks.remove(callback) @property def callback_list(self): return "\n".join(cb.__class__.__name__ for cb in self.callbacks) def on_init_end(self, args: TrainingArguments, state: TrainerState, control: TrainerControl): return self.call_event("on_init_end", args, state, control) def on_train_begin(self, args: TrainingArguments, state: TrainerState, control: TrainerControl): control.should_training_stop = False return self.call_event("on_train_begin", args, state, control) def on_train_end(self, args: TrainingArguments, state: TrainerState, control: TrainerControl): return self.call_event("on_train_end", args, state, control) def on_epoch_begin(self, args: TrainingArguments, state: TrainerState, control: TrainerControl): control.should_epoch_stop = False return self.call_event("on_epoch_begin", args, state, control) def on_epoch_end(self, args: TrainingArguments, state: TrainerState, control: TrainerControl): return self.call_event("on_epoch_end", args, state, control) def on_step_begin(self, args: TrainingArguments, state: TrainerState, control: TrainerControl): control.should_log = False control.should_evaluate = False control.should_save = False return self.call_event("on_step_begin", args, state, control) def on_substep_end(self, args: TrainingArguments, state: TrainerState, control: TrainerControl): return self.call_event("on_substep_end", args, state, control) def on_step_end(self, args: TrainingArguments, state: TrainerState, control: TrainerControl): return self.call_event("on_step_end", args, state, control) def on_evaluate(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, metrics): control.should_evaluate = False return self.call_event("on_evaluate", args, state, control, metrics=metrics) def on_predict(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, metrics): return self.call_event("on_predict", args, state, control, metrics=metrics) def on_save(self, args: TrainingArguments, state: TrainerState, control: TrainerControl): control.should_save = False return self.call_event("on_save", args, state, control) def on_log(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, logs): control.should_log = False return self.call_event("on_log", args, state, control, logs=logs) def on_prediction_step(self, args: TrainingArguments, state: TrainerState, control: TrainerControl): return self.call_event("on_prediction_step", args, state, control) def call_event(self, event, args, state, control, **kwargs): for callback in self.callbacks: result = getattr(callback, event)( args, state, control, model=self.model, tokenizer=self.tokenizer, optimizer=self.optimizer, lr_scheduler=self.lr_scheduler, train_dataloader=self.train_dataloader, eval_dataloader=self.eval_dataloader, **kwargs, ) # A Callback can skip the return of `control` if it doesn't change it. if result is not None: control = result return control class DefaultFlowCallback(TrainerCallback): """ A [`TrainerCallback`] that handles the default flow of the training loop for logs, evaluation and checkpoints. """ def on_step_end(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs): # Log if state.global_step == 1 and args.logging_first_step: control.should_log = True if args.logging_strategy == IntervalStrategy.STEPS and state.global_step % state.logging_steps == 0: control.should_log = True # Evaluate if ( args.evaluation_strategy == IntervalStrategy.STEPS and state.global_step % state.eval_steps == 0 and args.eval_delay <= state.global_step ): control.should_evaluate = True # Save if ( args.save_strategy == IntervalStrategy.STEPS and state.save_steps > 0 and state.global_step % state.save_steps == 0 ): control.should_save = True # End training if state.global_step >= state.max_steps: control.should_training_stop = True return control def on_epoch_end(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs): # Log if args.logging_strategy == IntervalStrategy.EPOCH: control.should_log = True # Evaluate if args.evaluation_strategy == IntervalStrategy.EPOCH and args.eval_delay <= state.epoch: control.should_evaluate = True # Save if args.save_strategy == IntervalStrategy.EPOCH: control.should_save = True return control class ProgressCallback(TrainerCallback): """ A [`TrainerCallback`] that displays the progress of training or evaluation. """ def __init__(self): self.training_bar = None self.prediction_bar = None def on_train_begin(self, args, state, control, **kwargs): if state.is_world_process_zero: self.training_bar = tqdm(total=state.max_steps, dynamic_ncols=True) self.current_step = 0 def on_step_end(self, args, state, control, **kwargs): if state.is_world_process_zero: self.training_bar.update(state.global_step - self.current_step) self.current_step = state.global_step def on_prediction_step(self, args, state, control, eval_dataloader=None, **kwargs): if state.is_world_process_zero and has_length(eval_dataloader): if self.prediction_bar is None: self.prediction_bar = tqdm( total=len(eval_dataloader), leave=self.training_bar is None, dynamic_ncols=True ) self.prediction_bar.update(1) def on_evaluate(self, args, state, control, **kwargs): if state.is_world_process_zero: if self.prediction_bar is not None: self.prediction_bar.close() self.prediction_bar = None def on_predict(self, args, state, control, **kwargs): if state.is_world_process_zero: if self.prediction_bar is not None: self.prediction_bar.close() self.prediction_bar = None def on_log(self, args, state, control, logs=None, **kwargs): if state.is_world_process_zero and self.training_bar is not None: _ = logs.pop("total_flos", None) self.training_bar.write(str(logs)) def on_train_end(self, args, state, control, **kwargs): if state.is_world_process_zero: self.training_bar.close() self.training_bar = None class PrinterCallback(TrainerCallback): """ A bare [`TrainerCallback`] that just prints the logs. """ def on_log(self, args, state, control, logs=None, **kwargs): _ = logs.pop("total_flos", None) if state.is_local_process_zero: print(logs) class EarlyStoppingCallback(TrainerCallback): """ A [`TrainerCallback`] that handles early stopping. Args: early_stopping_patience (`int`): Use with `metric_for_best_model` to stop training when the specified metric worsens for `early_stopping_patience` evaluation calls. early_stopping_threshold(`float`, *optional*): Use with TrainingArguments `metric_for_best_model` and `early_stopping_patience` to denote how much the specified metric must improve to satisfy early stopping conditions. ` This callback depends on [`TrainingArguments`] argument *load_best_model_at_end* functionality to set best_metric in [`TrainerState`]. Note that if the [`TrainingArguments`] argument *save_steps* differs from *eval_steps*, the early stopping will not occur until the next save step. """ def __init__(self, early_stopping_patience: int = 1, early_stopping_threshold: Optional[float] = 0.0): self.early_stopping_patience = early_stopping_patience self.early_stopping_threshold = early_stopping_threshold # early_stopping_patience_counter denotes the number of times validation metrics failed to improve. self.early_stopping_patience_counter = 0 def check_metric_value(self, args, state, control, metric_value): # best_metric is set by code for load_best_model operator = np.greater if args.greater_is_better else np.less if state.best_metric is None or ( operator(metric_value, state.best_metric) and abs(metric_value - state.best_metric) > self.early_stopping_threshold ): self.early_stopping_patience_counter = 0 else: self.early_stopping_patience_counter += 1 def on_train_begin(self, args, state, control, **kwargs): assert args.load_best_model_at_end, "EarlyStoppingCallback requires load_best_model_at_end = True" assert ( args.metric_for_best_model is not None ), "EarlyStoppingCallback requires metric_for_best_model is defined" assert ( args.evaluation_strategy != IntervalStrategy.NO ), "EarlyStoppingCallback requires IntervalStrategy of steps or epoch" def on_evaluate(self, args, state, control, metrics, **kwargs): metric_to_check = args.metric_for_best_model if not metric_to_check.startswith("eval_"): metric_to_check = f"eval_{metric_to_check}" metric_value = metrics.get(metric_to_check) if metric_value is None: logger.warning( f"early stopping required metric_for_best_model, but did not find {metric_to_check} so early stopping" " is disabled" ) return self.check_metric_value(args, state, control, metric_value) if self.early_stopping_patience_counter >= self.early_stopping_patience: control.should_training_stop = True
transformers/src/transformers/trainer_callback.py/0
{ "file_path": "transformers/src/transformers/trainer_callback.py", "repo_id": "transformers", "token_count": 9727 }
384
# This file is autogenerated by the command `make fix-copies`, do not edit. from ..utils import DummyObject, requires_backends class Pop2PianoFeatureExtractor(metaclass=DummyObject): _backends = ["music"] def __init__(self, *args, **kwargs): requires_backends(self, ["music"]) class Pop2PianoTokenizer(metaclass=DummyObject): _backends = ["music"] def __init__(self, *args, **kwargs): requires_backends(self, ["music"])
transformers/src/transformers/utils/dummy_music_objects.py/0
{ "file_path": "transformers/src/transformers/utils/dummy_music_objects.py", "repo_id": "transformers", "token_count": 169 }
385
# coding=utf-8 # Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from math import ceil def assert_device_map(device_map, num_blocks): blocks = list(range(0, num_blocks)) device_map_blocks = [item for sublist in list(device_map.values()) for item in sublist] # Duplicate check duplicate_blocks = [] for i in device_map_blocks: if device_map_blocks.count(i) > 1 and i not in duplicate_blocks: duplicate_blocks.append(i) # Missing blocks missing_blocks = [i for i in blocks if i not in device_map_blocks] extra_blocks = [i for i in device_map_blocks if i not in blocks] if len(duplicate_blocks) != 0: raise ValueError( "Duplicate attention blocks specified in device_map. Attention blocks must be specified to one device." " These attention blocks were specified more than once: " + str(duplicate_blocks) ) if len(missing_blocks) != 0: raise ValueError( "There are attention blocks for this model that are not specified in the device_map. Add these attention " "blocks to a device on the device_map: " + str(missing_blocks) ) if len(extra_blocks) != 0: raise ValueError( "The device_map contains more attention blocks than this model has. Remove these from the device_map:" + str(extra_blocks) ) def get_device_map(n_layers, devices): """Returns a dictionary of layers distributed evenly across all devices.""" layers = list(range(n_layers)) n_blocks = int(ceil(n_layers / len(devices))) layers_list = [layers[i : i + n_blocks] for i in range(0, n_layers, n_blocks)] return dict(zip(devices, layers_list))
transformers/src/transformers/utils/model_parallel_utils.py/0
{ "file_path": "transformers/src/transformers/utils/model_parallel_utils.py", "repo_id": "transformers", "token_count": 778 }
386
{ "modelname": "{{cookiecutter.modelname}}", "uppercase_modelname": "{{cookiecutter.uppercase_modelname}}", "lowercase_modelname": "{{cookiecutter.lowercase_modelname}}", "camelcase_modelname": "{{cookiecutter.camelcase_modelname}}", "authors": "{{cookiecutter.authors}}", "checkpoint_identifier": "{{cookiecutter.checkpoint_identifier}}", "tokenizer_type": "{{cookiecutter.tokenizer_type}}", "generate_tensorflow_pytorch_and_flax": "{{cookiecutter.generate_tensorflow_pytorch_and_flax}}", "is_encoder_decoder_model": "{{cookiecutter.is_encoder_decoder_model}}" }
transformers/templates/adding_a_new_model/cookiecutter-template-{{cookiecutter.modelname}}/configuration.json/0
{ "file_path": "transformers/templates/adding_a_new_model/cookiecutter-template-{{cookiecutter.modelname}}/configuration.json", "repo_id": "transformers", "token_count": 218 }
387
{ "modelname": "TemplateFLAX", "uppercase_modelname": "TEMPLATE_FLAX", "lowercase_modelname": "template_flax", "camelcase_modelname": "TemplateFlax", "authors": "The HuggingFace Team", "checkpoint_identifier": "brand-new-bert-base-cased", "tokenizer_type": "Based on BERT", "generate_tensorflow_pytorch_and_flax": "Flax", "is_encoder_decoder_model": "False" }
transformers/templates/adding_a_new_model/tests/flax-encoder-bert-tokenizer.json/0
{ "file_path": "transformers/templates/adding_a_new_model/tests/flax-encoder-bert-tokenizer.json", "repo_id": "transformers", "token_count": 150 }
388
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import itertools import os import subprocess from os.path import dirname from parameterized import parameterized from tests.trainer.test_trainer import TrainerIntegrationCommon # noqa from transformers import is_torch_available from transformers.testing_utils import ( TestCasePlus, execute_subprocess_async, get_gpu_count, get_tests_dir, require_deepspeed, require_torch_gpu, slow, ) from transformers.trainer_utils import set_seed if is_torch_available(): from tests.trainer.test_trainer import ( # noqa RegressionModelConfig, RegressionPreTrainedModel, get_regression_trainer, ) set_seed(42) FIXTURE_DIRECTORY = get_tests_dir("fixtures") ROOT_DIRECTORY = os.path.join(dirname(get_tests_dir())) DS_TESTS_DIRECTORY = dirname(os.path.abspath(__file__)) # default torch.distributed port DEFAULT_MASTER_PORT = "10999" T5_SMALL = "google-t5/t5-small" # *** Working Models *** ALBERT_TINY = "hf-internal-testing/tiny-albert" BART_TINY = "sshleifer/bart-tiny-random" BERT_TINY = "hf-internal-testing/tiny-bert" BIGBIRD_PEGASUS_TINY = "hf-internal-testing/tiny-random-bigbird_pegasus" BIG_BIRD_TINY = "hf-internal-testing/tiny-random-big_bird" BLENDERBOT_TINY = "hf-internal-testing/tiny-random-blenderbot" BLOOM_TINY = "bigscience/bigscience-small-testing" DEBERTA_TINY = "hf-internal-testing/tiny-random-deberta" DEBERTA_V2_TINY = "hf-internal-testing/tiny-random-deberta-v2" DISTILBERT_TINY = "sshleifer/tiny-distilbert-base-cased" ELECTRA_TINY = "hf-internal-testing/tiny-electra" FLAUBERT_TINY = "hf-internal-testing/tiny-random-flaubert" FSMT_TINY = "stas/tiny-wmt19-en-de" FUNNEL_TINY = "hf-internal-testing/tiny-random-funnel" GPT2_TINY = "sshleifer/tiny-gpt2" GPTJ_TINY = "hf-internal-testing/tiny-random-gptj" GPT_NEO_TINY = "hf-internal-testing/tiny-random-gpt_neo" LAYOUTLM_TINY = "hf-internal-testing/tiny-layoutlm" LED_TINY = "hf-internal-testing/tiny-random-led" LONGFORMER_TINY = "hf-internal-testing/tiny-random-longformer" M2M_100_TINY = "stas/tiny-m2m_100" # hf tiny model is unsuitable MARIAN_TINY = "sshleifer/tiny-marian-en-de" MBART_TINY = "sshleifer/tiny-mbart" MOBILEBERT_TINY = "hf-internal-testing/tiny-random-mobilebert" MPNET_TINY = "hf-internal-testing/tiny-random-mpnet" PEGASUS_TINY = "stas/pegasus-cnn_dailymail-tiny-random" PROPHETNET_TINY = "hf-internal-testing/tiny-random-prophetnet" ROBERTA_TINY = "sshleifer/tiny-distilroberta-base" SQUEEZEBERT_TINY = "hf-internal-testing/tiny-random-squeezebert" T5_TINY = "patrickvonplaten/t5-tiny-random" T5_V1_TINY = "hf-internal-testing/tiny-random-t5-v1.1" VIT_TINY = "hf-internal-testing/tiny-random-vit" XLM_ROBERTA_TINY = "hf-internal-testing/tiny-xlm-roberta" XLNET_TINY = "sshleifer/tiny-xlnet-base-cased" # *** To Fix *** # *** tiny model issues *** # missing model files: MT5_TINY = "hf-internal-testing/tiny-random-mt5" CAMEMBERT_TINY = "hf-internal-testing/tiny-random-camembert" OPENAI_GPT_TINY = "hf-internal-testing/tiny-random-openai-gpt" # missing tokenizer files CONVBERT_TINY = "hf-internal-testing/tiny-random-convbert" LAYOUTLMV2_TINY = "hf-internal-testing/tiny-random-layoutlmv2" HUBERT_TINY = "hf-internal-testing/tiny-random-hubert" # issues with tokenizer CTRL_TINY = "hf-internal-testing/tiny-random-ctrl" TRANSFO_XL_TINY = "hf-internal-testing/tiny-random-transfo-xl" # same as Salesforce/ctrl # other issues with tiny models IBERT_TINY = "hf-internal-testing/tiny-random-ibert" # multiple issues with either mlm/qa/clas REFORMER_TINY = "hf-internal-testing/tiny-random-reformer" # multiple issues with either mlm/qa/clas # *** Lacking official examples to test with *** # or not working with examples DPR_TINY = "hf-internal-testing/tiny-random-dpr" # - "dpr" examples/research_projects/rag-end2end-retriever/ RAG_TINY = "hf-internal-testing/tiny-random-rag" # - "rag" research_projects LUKE_TINY = "" # - "luke" Entities classes - no plan to make such example LXMERT_TINY = "hf-internal-testing/tiny-random-lxmert" # - "lxmert" doesn't work with run_qa.py CLIP_TINY = "hf-internal-testing/tiny-random-clip" # - "clip" nothing under pytorch examples - XXX: Suraj is working on adding some - check by end of Sep SPEECH_TO_TEXT_TINY = "hf-internal-testing/tiny-random-speech_to_text" # - "speech_to_text", nothing under pytorch examples # *** Reactive mode *** # models with low usage, unstable API, things about to change - do nothing about the following until someone runs into a problem TAPAS_TINY = "hf-internal-testing/tiny-random-tapas" # additional notes on tapas # 1. "Table must be of type pd.DataFrame" failure # TODO: new models to add: # def get_launcher(distributed=False): # 1. explicitly set --num_nodes=1 just in case these tests end up run on a multi-node setup # - it won't be able to handle that # 2. for now testing with just 2 gpus max (since some quality tests may give different # results with mode gpus because we use very little data) num_gpus = min(2, get_gpu_count()) if distributed else 1 master_port = os.environ.get("DS_TEST_PORT", DEFAULT_MASTER_PORT) return f"deepspeed --num_nodes 1 --num_gpus {num_gpus} --master_port {master_port}".split() def make_task_cmds(): data_dir_samples = f"{FIXTURE_DIRECTORY}/tests_samples" data_dir_wmt = f"{data_dir_samples}/wmt_en_ro" data_dir_xsum = f"{data_dir_samples}/xsum" args_main = """ --do_train --max_train_samples 4 --per_device_train_batch_size 2 --num_train_epochs 1 --fp16 --report_to none --overwrite_output_dir """.split() # try to cover as many models as possible once (it's enough to run on one task per model) # but need a tiny model for each # # should have "{model_type.upper()}_TINY" corresponding vars defined, e.g., T5_TINY, etc. tasks2models = { "trans": [ "bart", "fsmt", "m2m_100", "marian", "mbart", "t5", "t5_v1", # "mt5", missing model files ], "sum": [ "pegasus", ], "clm": [ "big_bird", "bigbird_pegasus", "blenderbot", "bloom", "gpt2", "gpt_neo", "gptj", "xlm-roberta", "prophetnet", # "camembert", missing model files ], "mlm": [ "albert", "deberta", "deberta-v2", "distilbert", "electra", "flaubert", "funnel", "layoutlm", # "reformer", # multiple issues with either mlm/qa/clas ], "qa": [ "led", "longformer", "mobilebert", "mpnet", "roberta", "squeezebert", # "convbert", # missing tokenizer files # "layoutlmv2", missing model files ], "clas": [ "bert", "xlnet", # "hubert", # missing tokenizer files # "ibert", # multiple issues with either mlm/qa/clas # "transfo-xl", # tokenizer issues as Salesforce/ctrl # "Salesforce/ctrl", # tokenizer issues # "openai-community/openai-gpt", missing model files # "tapas", multiple issues ], "img_clas": [ "vit", ], } scripts_dir = f"{ROOT_DIRECTORY}/examples/pytorch" tasks = { "trans": f""" {scripts_dir}/translation/run_translation.py --train_file {data_dir_wmt}/train.json --source_lang en --target_lang ro """, "sum": f""" {scripts_dir}/summarization/run_summarization.py --train_file {data_dir_xsum}/sample.json --max_source_length 12 --max_target_length 12 --lang en """, "clm": f""" {scripts_dir}/language-modeling/run_clm.py --train_file {FIXTURE_DIRECTORY}/sample_text.txt --block_size 8 """, "mlm": f""" {scripts_dir}/language-modeling/run_mlm.py --train_file {FIXTURE_DIRECTORY}/sample_text.txt """, "qa": f""" {scripts_dir}/question-answering/run_qa.py --train_file {data_dir_samples}/SQUAD/sample.json """, "clas": f""" {scripts_dir}/text-classification/run_glue.py --train_file {data_dir_samples}/MRPC/train.csv --max_seq_length 12 --task_name MRPC """, "img_clas": f""" {scripts_dir}/image-classification/run_image_classification.py --dataset_name hf-internal-testing/cats_vs_dogs_sample --remove_unused_columns False --max_steps 10 --image_processor_name {DS_TESTS_DIRECTORY}/vit_feature_extractor.json """, } launcher = get_launcher(distributed=True) cmds = {} for task, args in tasks.items(): args = args.split() for model in tasks2models[task]: model_name = globals()[f"{model.upper().replace('-', '_')}_TINY"] args_model = f"--model_name_or_path {model_name}".split() cmds[f"{task}_{model}"] = launcher + args + args_model + args_main # # generation special case # if task == "gen": # launcher = f"deepspeed --num_nodes 1 --num_gpus 1".split() # args_model += f"--model_type {model}".split() # cmds[f"{task}_{model}"] = launcher + args + args_model # else: return cmds task_cmds = make_task_cmds() ZERO2 = "zero2" ZERO3 = "zero3" stages = [ZERO2, ZERO3] # future preparation: # for now test just fp16, as these tests are quite slow # FP16 = "fp16" # BF16 = "bf16" # # dtypes = [FP16] # so just hardcoding --fp16 for now # if is_torch_bf16_gpu_available(): # dtypes += [BF16] def parameterized_custom_name_func(func, param_num, param): # customize the test name generator function as we want both params to appear in the sub-test # name, as by default it shows only the first param param_based_name = parameterized.to_safe_name("_".join(str(x) for x in param.args)) return f"{func.__name__}_{param_based_name}" # Cartesian-product of zero stages with models to test params = list(itertools.product(stages, task_cmds.keys())) @slow @require_deepspeed @require_torch_gpu class TestDeepSpeedModelZoo(TestCasePlus): """This class is for testing via an external script - can do multiple gpus""" def get_task_cmd(self, task, stage): # return a ready to run train cmd if task not in task_cmds: raise ValueError(f"don't know of task {task}, have {task_cmds.keys()}") cmd = task_cmds[task] args_ds = f"--deepspeed {self.test_file_dir_str}/ds_config_{stage}.json".split() output_dir = self.get_auto_remove_tmp_dir() args_out = f"--output_dir {output_dir}".split() cmd += args_ds + args_out return cmd, output_dir @parameterized.expand(params, name_func=parameterized_custom_name_func) def test_zero_to_fp32(self, stage, task): # testing the ability to do a run followed by recovery of full fp32 weights cmd, output_dir = self.get_task_cmd(task, stage) # 1. generate the checkpoint cmd += "--save_steps 1".split() # keep for quick debug # print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] + cmd)); die execute_subprocess_async(cmd, env=self.get_env()) # 2. test that the fp32 weights get reconsolidated chkpt_dir = f"{output_dir}/checkpoint-1" recovered_model_path = f"{chkpt_dir}/out.bin" cmd = f"{chkpt_dir}/zero_to_fp32.py {chkpt_dir} {recovered_model_path}" # keep for quick debug # print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die subprocess.check_call(cmd, shell=True) assert os.path.exists(recovered_model_path), f"{recovered_model_path} was not found" # possibly could also test that the resulting saved model is usable but given that we use # random models we won't know if it's any good
transformers/tests/deepspeed/test_model_zoo.py/0
{ "file_path": "transformers/tests/deepspeed/test_model_zoo.py", "repo_id": "transformers", "token_count": 5689 }
389
# coding=utf-8 # Copyright 2020 The HuggingFace Team Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a clone of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch, torch_device from ..test_modeling_common import floats_tensor, ids_tensor if is_torch_available(): import torch from transformers.generation import ( BeamHypotheses, BeamSearchScorer, ConstrainedBeamSearchScorer, DisjunctiveConstraint, PhrasalConstraint, ) class BeamSearchTester: def __init__( self, parent, batch_size=3, sequence_length=10, vocab_size=99, pad_token_id=0, max_length=20, num_beams=4, length_penalty=2.0, do_early_stopping=True, num_beam_hyps_to_keep=2, ): self.parent = parent self.batch_size = batch_size self.sequence_length = sequence_length self.vocab_size = vocab_size self.pad_token_id = pad_token_id self.max_length = max_length self.num_beams = num_beams self.length_penalty = length_penalty self.do_early_stopping = do_early_stopping self.num_beam_hyps_to_keep = num_beam_hyps_to_keep # cannot be randomly generated self.eos_token_id = vocab_size + 1 def prepare_beam_scorer(self, **kwargs): return BeamSearchScorer( batch_size=kwargs.get("batch_size", self.batch_size), num_beams=kwargs.get("num_beams", self.num_beams), device=torch_device, length_penalty=kwargs.get("length_penalty", self.length_penalty), do_early_stopping=kwargs.get("do_early_stopping", self.do_early_stopping), num_beam_hyps_to_keep=kwargs.get("num_beam_hyps_to_keep", self.num_beam_hyps_to_keep), ) def prepare_inputs(self): input_ids = ids_tensor((self.batch_size * self.num_beams, self.sequence_length), self.vocab_size) next_tokens = ids_tensor((self.batch_size, 2 * self.num_beams), self.vocab_size).to(torch_device) next_indices = ids_tensor((self.batch_size, 2 * self.num_beams), self.num_beams).to(torch_device) next_scores, _ = (-floats_tensor((self.batch_size, 2 * self.num_beams)).to(torch_device)).sort(descending=True) return (input_ids, next_tokens, next_indices, next_scores) def check_beam_hypotheses(self, input_ids, *args): # check that correct number of beam hypotheses is set in beam scorer beam_scorer = self.prepare_beam_scorer(do_early_stopping=True) beam_hyp = beam_scorer._beam_hyps[0] self.parent.assertEqual(len(beam_scorer._beam_hyps), self.batch_size) # check correct type self.parent.assertTrue(isinstance(beam_hyp, BeamHypotheses)) # check that num_beams is correctly set self.parent.assertEqual(beam_hyp.num_beams, self.num_beams) # check for early stopping deactivated for beam_idx in range(self.num_beams): beam_hyp.add(input_ids[beam_idx], -10.0) # if early stopping True -> score does not matter self.parent.assertTrue(beam_hyp.is_done(-10.0, 5)) # re-init beam_scorer = self.prepare_beam_scorer(do_early_stopping=False) beam_hyp = beam_scorer._beam_hyps[0] # add `num_beams + 1` beams to change `worst_score` for beam_idx in range(self.num_beams + 1): beam_hyp.add(input_ids[beam_idx], -10.0 + float(beam_idx)) # -10.0 is removed => -9.0 is worst score self.parent.assertAlmostEqual(beam_hyp.worst_score, -9.0 / (self.sequence_length**beam_hyp.length_penalty)) # -5.0 is better than worst score => should not be finished self.parent.assertFalse(beam_hyp.is_done(-5.0, self.sequence_length)) # -20.0 is worse than worst score => should be finished self.parent.assertTrue(beam_hyp.is_done(-20.0, self.sequence_length)) def check_beam_scorer_update(self, input_ids, next_tokens, next_indices, next_scores): # check too many eos tokens beam_scorer = self.prepare_beam_scorer() tokens = next_tokens.clone() tokens[0, :] = self.eos_token_id with self.parent.assertRaises(ValueError): beam_scorer.process(input_ids, next_scores, tokens, next_indices, eos_token_id=self.eos_token_id) # check all batches are done beam_scorer = self.prepare_beam_scorer() tokens = next_tokens.clone() tokens[:, : self.num_beams] = self.eos_token_id beam_indices = torch.zeros_like(input_ids) + torch.arange(input_ids.shape[-1], device=input_ids.device) beam_indices = tuple(tuple(b) for b in beam_indices) beam_scorer.process( input_ids, next_scores, tokens, next_indices, eos_token_id=self.eos_token_id, beam_indices=beam_indices ) # beam scorer should be done self.parent.assertTrue(beam_scorer.is_done) # check beam_scorer = self.prepare_beam_scorer() tokens = next_tokens.clone() tokens[:, 1] = self.eos_token_id beam_outputs = beam_scorer.process( input_ids, next_scores, tokens, next_indices, eos_token_id=self.eos_token_id, beam_indices=beam_indices ) output_scores = beam_outputs["next_beam_scores"] output_tokens = beam_outputs["next_beam_tokens"] output_indices = beam_outputs["next_beam_indices"] def cut_expected_tensor(tensor): return torch.cat([tensor[:, :1], tensor[:, 2 : self.num_beams + 1]], dim=1).flatten() # check all outptus # cut out id of eos token and take best `num_beams` outputs expected_output_tokens = cut_expected_tensor(tokens) expected_output_scores = cut_expected_tensor(next_scores) # add num_beams * batch_idx offset = torch.div( torch.arange(self.num_beams * self.batch_size, device=torch_device), self.num_beams, rounding_mode="floor" ) expected_output_indices = cut_expected_tensor(next_indices) + offset * self.num_beams self.parent.assertListEqual(expected_output_tokens.tolist(), output_tokens.tolist()) self.parent.assertListEqual(expected_output_indices.tolist(), output_indices.tolist()) self.parent.assertTrue(torch.allclose(expected_output_scores, output_scores, atol=1e-3)) # make sure ids of eos token are correctly saved in beam_hyps of beam scorer expected_beam_indices = list(range(10)) for batch_idx in range(self.batch_size): correct_idx = batch_idx * self.num_beams + next_indices[batch_idx, 1] self.parent.assertListEqual( input_ids[correct_idx].tolist(), beam_scorer._beam_hyps[batch_idx].beams[0][1].tolist() ) self.parent.assertListEqual( expected_beam_indices + [correct_idx], torch.tensor(beam_scorer._beam_hyps[batch_idx].beams[0][2]).tolist(), ) def check_beam_scores_finalize(self, input_ids, next_tokens, next_indices, next_scores): # max_length should be only one more than current input_ids to check that eos is correctly appended max_length = self.sequence_length + 1 beam_scorer = self.prepare_beam_scorer(num_beam_hyps_to_keep=1, length_penalty=1.0, do_early_stopping=False) # update beams and append to input_ids tokens = next_tokens.clone() # first batch, first output has to finish with eos token id since scores are correctly sorted tokens[0, 0] = self.eos_token_id # make sure corresponding score is as good as possible to surely be picked first next_scores[0, 0] = 0.0 beam_outputs = beam_scorer.process( input_ids, next_scores, tokens, next_indices, eos_token_id=self.eos_token_id ) output_scores = beam_outputs["next_beam_scores"] output_tokens = beam_outputs["next_beam_tokens"] output_indices = beam_outputs["next_beam_indices"] input_ids = torch.cat([input_ids[output_indices, :], output_tokens.unsqueeze(-1)], dim=-1) # finalize beam_indices = torch.zeros_like(input_ids) + torch.arange(input_ids.shape[-1], device=input_ids.device) beam_indices = tuple(tuple(b) for b in beam_indices) sequence_output = beam_scorer.finalize( input_ids, output_scores, output_tokens, output_indices, pad_token_id=self.pad_token_id, eos_token_id=self.eos_token_id, max_length=max_length, beam_indices=beam_indices, ) sequences = sequence_output["sequences"] sequence_scores = sequence_output["sequence_scores"] # since `num_beam_hyps_to_keep` = 1 => only return `batch_size` x `max_length` self.parent.assertListEqual(list(sequences.shape), [self.batch_size, max_length]) self.parent.assertListEqual(list(sequence_scores.shape), [self.batch_size]) # check sequence_scores self.parent.assertFalse((sequence_scores > 0).any().item()) # first batch has to finish with eos_token self.parent.assertEqual(sequences[0, -1].item(), self.eos_token_id) # other batches cannot finish with eos token self.parent.assertNotEqual(sequences[1, -1].item(), self.eos_token_id) self.parent.assertNotEqual(sequences[2, -1].item(), self.eos_token_id) # now test that if `num_beam_hyps_to_keep` is 3 => all beams are returned beam_scorer.num_beam_hyps_to_keep = self.num_beams sequence_output = beam_scorer.finalize( input_ids, output_scores, output_tokens, output_indices, pad_token_id=self.pad_token_id, eos_token_id=self.eos_token_id, max_length=max_length, beam_indices=beam_indices, ) sequences = sequence_output["sequences"] sequence_scores = sequence_output["sequence_scores"] self.parent.assertListEqual(list(sequences.shape), [self.num_beams * self.batch_size, max_length]) self.parent.assertListEqual(list(sequence_scores.shape), [self.num_beams * self.batch_size]) class ConstrainedBeamSearchTester: def __init__( self, parent, constraints=None, batch_size=3, sequence_length=10, vocab_size=99, pad_token_id=0, max_length=20, num_beams=4, length_penalty=2.0, do_early_stopping=True, num_beam_hyps_to_keep=2, ): self.parent = parent self.batch_size = batch_size self.sequence_length = sequence_length self.vocab_size = vocab_size self.pad_token_id = pad_token_id self.max_length = max_length self.num_beams = num_beams self.length_penalty = length_penalty self.do_early_stopping = do_early_stopping self.num_beam_hyps_to_keep = num_beam_hyps_to_keep if constraints is None: force_tokens = torch.randint(10, 50, (1, 2))[0].tolist() disjunctive_tokens = torch.randint(10, 50, (2, 2)).tolist() constraints = [PhrasalConstraint(force_tokens), DisjunctiveConstraint(disjunctive_tokens)] self.constraints = constraints # cannot be randomly generated self.eos_token_id = vocab_size + 1 def prepare_constrained_beam_scorer(self, **kwargs): return ConstrainedBeamSearchScorer( constraints=kwargs.get("constraints", self.constraints), batch_size=kwargs.get("batch_size", self.batch_size), num_beams=kwargs.get("num_beams", self.num_beams), device=torch_device, length_penalty=kwargs.get("length_penalty", self.length_penalty), do_early_stopping=kwargs.get("do_early_stopping", self.do_early_stopping), num_beam_hyps_to_keep=kwargs.get("num_beam_hyps_to_keep", self.num_beam_hyps_to_keep), ) def prepare_inputs(self): input_ids = ids_tensor((self.batch_size * self.num_beams, self.sequence_length), self.vocab_size) next_tokens = ids_tensor((self.batch_size, 2 * self.num_beams), self.vocab_size).to(torch_device) next_indices = ids_tensor((self.batch_size, 2 * self.num_beams), self.num_beams).to(torch_device) next_scores, _ = (-floats_tensor((self.batch_size, 2 * self.num_beams)).to(torch_device)).sort(descending=True) scores_for_all_vocab, _ = ( -floats_tensor((self.batch_size * self.num_beams, self.vocab_size)).to(torch_device) ).sort(descending=True) return (input_ids, next_tokens, next_indices, next_scores, scores_for_all_vocab) def check_beam_hypotheses(self, input_ids, *args): # check that correct number of beam hypotheses is set in beam scorer constrained_beam_scorer = self.prepare_constrained_beam_scorer(do_early_stopping=True) beam_hyp = constrained_beam_scorer._beam_hyps[0] self.parent.assertEqual(len(constrained_beam_scorer._beam_hyps), self.batch_size) # check correct type self.parent.assertTrue(isinstance(beam_hyp, BeamHypotheses)) # check that num_beams is correctly set self.parent.assertEqual(beam_hyp.num_beams, self.num_beams) # check for early stopping deactivated for beam_idx in range(self.num_beams): beam_hyp.add(input_ids[beam_idx], -10.0) # if early stopping True -> score does not matter self.parent.assertTrue(beam_hyp.is_done(-10.0, 5)) # re-init constrained_beam_scorer = self.prepare_constrained_beam_scorer(do_early_stopping=False) beam_hyp = constrained_beam_scorer._beam_hyps[0] # add `num_beams + 1` beams to change `worst_score` for beam_idx in range(self.num_beams + 1): beam_hyp.add(input_ids[beam_idx], -10.0 + float(beam_idx)) # -10.0 is removed => -9.0 is worst score self.parent.assertAlmostEqual(beam_hyp.worst_score, -9.0 / (self.sequence_length**beam_hyp.length_penalty)) # -5.0 is better than worst score => should not be finished self.parent.assertFalse(beam_hyp.is_done(-5.0, self.sequence_length)) # -20.0 is worse than worst score => should be finished self.parent.assertTrue(beam_hyp.is_done(-20.0, self.sequence_length)) def check_constrained_beam_scorer_update( self, input_ids, next_tokens, next_indices, next_scores, scores_for_all_vocab ): # check too many eos tokens constrained_beam_scorer = self.prepare_constrained_beam_scorer() stacked_token_ids = [] for constraint in self.constraints: token_ids = constraint.token_ids token_ids = token_ids[0] if isinstance(token_ids[0], list) else token_ids stacked_token_ids = stacked_token_ids + token_ids fulfilling_sequence = torch.LongTensor(stacked_token_ids) fulfill_len = fulfilling_sequence.size(0) input_ids[:, :fulfill_len] = fulfilling_sequence tokens = next_tokens.clone() tokens[0, :] = self.eos_token_id with self.parent.assertRaises(ValueError): constrained_beam_scorer.process( input_ids, next_scores, tokens, next_indices, scores_for_all_vocab, eos_token_id=self.eos_token_id ) # check all batches are done constrained_beam_scorer = self.prepare_constrained_beam_scorer() tokens = next_tokens.clone() tokens[:, : self.num_beams] = self.eos_token_id constrained_beam_scorer.process( input_ids, next_scores, tokens, next_indices, scores_for_all_vocab, eos_token_id=self.eos_token_id ) # beam scorer should be done self.parent.assertTrue(constrained_beam_scorer.is_done) # check constrained_beam_scorer = self.prepare_constrained_beam_scorer() tokens = next_tokens.clone() tokens[:, 1] = self.eos_token_id beam_outputs = constrained_beam_scorer.process( input_ids, next_scores, tokens, next_indices, scores_for_all_vocab, eos_token_id=self.eos_token_id ) output_scores = beam_outputs["next_beam_scores"] output_tokens = beam_outputs["next_beam_tokens"] output_indices = beam_outputs["next_beam_indices"] def cut_expected_tensor(tensor): return torch.cat([tensor[:, :1], tensor[:, 2 : self.num_beams + 1]], dim=1).flatten() # check all outptus # cut out id of eos token and take best `num_beams` outputs expected_output_tokens = cut_expected_tensor(tokens) expected_output_scores = cut_expected_tensor(next_scores) # add num_beams * batch_idx offset = torch.div( torch.arange(self.num_beams * self.batch_size, device=torch_device), self.num_beams, rounding_mode="floor" ) expected_output_indices = cut_expected_tensor(next_indices) + offset * self.num_beams self.parent.assertListEqual(expected_output_tokens.tolist(), output_tokens.tolist()) self.parent.assertListEqual(expected_output_indices.tolist(), output_indices.tolist()) self.parent.assertTrue(torch.allclose(expected_output_scores, output_scores, atol=1e-3)) # make sure ids of eos token are correctly saved in beam_hyps of beam scorer for batch_idx in range(self.batch_size): correct_idx = batch_idx * self.num_beams + next_indices[batch_idx, 1] self.parent.assertListEqual( input_ids[correct_idx].tolist(), constrained_beam_scorer._beam_hyps[batch_idx].beams[0][1].tolist() ) def check_constrained_beam_scorer_finalize( self, input_ids, next_tokens, next_indices, next_scores, scores_for_all_vocab ): # max_length should be only one more than current input_ids to check that eos is correctly appended max_length = self.sequence_length + 1 # for testing finalize, we do want to have fulfilled constraints stacked_token_ids = [] for constraint in self.constraints: token_ids = constraint.token_ids token_ids = token_ids[0] if isinstance(token_ids[0], list) else token_ids stacked_token_ids = stacked_token_ids + token_ids fulfilling_sequence = torch.LongTensor(stacked_token_ids) fulfill_len = fulfilling_sequence.size(0) input_ids[:, :fulfill_len] = fulfilling_sequence constrained_beam_scorer = self.prepare_constrained_beam_scorer( num_beam_hyps_to_keep=1, length_penalty=1.0, do_early_stopping=False ) constraints = constrained_beam_scorer.constraints # update beams and append to input_ids tokens = next_tokens.clone() # first batch, first output has to finish with eos token id since scores are correctly sorted tokens[0, 0] = self.eos_token_id # make sure corresponding score is as good as possible to surely be picked first next_scores[0, 0] = 0.0 beam_outputs = constrained_beam_scorer.process( input_ids, next_scores, tokens, next_indices, scores_for_all_vocab, eos_token_id=self.eos_token_id ) output_scores = beam_outputs["next_beam_scores"] output_tokens = beam_outputs["next_beam_tokens"] output_indices = beam_outputs["next_beam_indices"] input_ids = torch.cat([input_ids[output_indices, :], output_tokens.unsqueeze(-1)], dim=-1) # finalize sequence_output = constrained_beam_scorer.finalize( input_ids, output_scores, output_tokens, output_indices, pad_token_id=self.pad_token_id, eos_token_id=self.eos_token_id, max_length=max_length, ) sequences = sequence_output["sequences"] sequence_scores = sequence_output["sequence_scores"] # since `num_beam_hyps_to_keep` = 1 => only return `batch_size` x `max_length` self.parent.assertListEqual(list(sequences.shape), [self.batch_size, max_length]) self.parent.assertListEqual(list(sequence_scores.shape), [self.batch_size]) # check sequence_scores self.parent.assertFalse((sequence_scores > 0).any().item()) # first batch has to finish with eos_token self.parent.assertEqual(sequences[0, -1].item(), self.eos_token_id) # other batches cannot finish with eos token self.parent.assertNotEqual(sequences[1, -1].item(), self.eos_token_id) self.parent.assertNotEqual(sequences[2, -1].item(), self.eos_token_id) # test that the constraint is indeed fulfilled for output, constraint in [(s, c) for s in sequences for c in constraints]: forced_token_ids = constraint.token_ids if isinstance(forced_token_ids[0], list): # disjunctive case flag = False for token_ids in forced_token_ids: if self._check_sequence_inside_sequence(output, token_ids): flag = True break self.parent.assertEqual(flag, True) else: self.parent.assertEqual(self._check_sequence_inside_sequence(output, forced_token_ids), True) # now test that if `num_beam_hyps_to_keep` is 3 => all beams are returned # constrained_beam_scorer.num_beam_hyps_to_keep = self.num_beams constrained_beam_scorer = self.prepare_constrained_beam_scorer( num_beam_hyps_to_keep=self.num_beams, length_penalty=1.0, do_early_stopping=False ) sequence_output = constrained_beam_scorer.finalize( input_ids, output_scores, output_tokens, output_indices, pad_token_id=self.pad_token_id, eos_token_id=self.eos_token_id, max_length=max_length, ) sequences = sequence_output["sequences"] sequence_scores = sequence_output["sequence_scores"] self.parent.assertListEqual(list(sequences.shape), [self.num_beams * self.batch_size, max_length]) self.parent.assertListEqual(list(sequence_scores.shape), [self.num_beams * self.batch_size]) def _check_sequence_inside_sequence(self, tensor_1, tensor_2): # check if tensor_1 inside tensor_2 or tensor_2 inside tensor_1. # set to same device. we don't care what device. if not isinstance(tensor_1, list): tensor_1 = tensor_1.cpu().tolist() if not isinstance(tensor_2, list): tensor_2 = tensor_2.cpu().tolist() in_order = len(tensor_1) <= len(tensor_2) longer = tensor_2 if in_order else tensor_1 shorter = tensor_1 if in_order else tensor_2 flag = False chunk_size = len(shorter) for chunk_idx in range(len(longer) - chunk_size + 1): subseq = longer[chunk_idx : chunk_idx + chunk_size] if subseq == shorter: flag = True break return flag @require_torch class BeamSearchTest(unittest.TestCase): def setUp(self): self.beam_search_tester = BeamSearchTester(self) def test_beam_hypotheses(self): inputs = self.beam_search_tester.prepare_inputs() self.beam_search_tester.check_beam_hypotheses(*inputs) def test_beam_scorer_update(self): inputs = self.beam_search_tester.prepare_inputs() self.beam_search_tester.check_beam_scorer_update(*inputs) def test_beam_scorer_finalize(self): inputs = self.beam_search_tester.prepare_inputs() self.beam_search_tester.check_beam_scores_finalize(*inputs) @require_torch class ConstrainedBeamSearchTest(unittest.TestCase): def setUp(self): self.constrained_beam_search_tester = ConstrainedBeamSearchTester(self) def test_constrained_beam_hypotheses(self): inputs = self.constrained_beam_search_tester.prepare_inputs() self.constrained_beam_search_tester.check_beam_hypotheses(*inputs) def test_constrained_beam_scorer_update(self): inputs = self.constrained_beam_search_tester.prepare_inputs() self.constrained_beam_search_tester.check_constrained_beam_scorer_update(*inputs) def test_constrained_beam_scorer_finalize(self): inputs = self.constrained_beam_search_tester.prepare_inputs() self.constrained_beam_search_tester.check_constrained_beam_scorer_finalize(*inputs)
transformers/tests/generation/test_beam_search.py/0
{ "file_path": "transformers/tests/generation/test_beam_search.py", "repo_id": "transformers", "token_count": 11152 }
390
# coding=utf-8 # Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import annotations import unittest from transformers import is_tf_available, is_torch_available from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, is_pt_tf_cross_test, slow if is_tf_available(): from transformers import ( AutoConfig, BertConfig, GPT2Config, T5Config, TFAutoModel, TFAutoModelForCausalLM, TFAutoModelForMaskedLM, TFAutoModelForPreTraining, TFAutoModelForQuestionAnswering, TFAutoModelForSeq2SeqLM, TFAutoModelForSequenceClassification, TFAutoModelWithLMHead, TFBertForMaskedLM, TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification, TFBertModel, TFGPT2LMHeadModel, TFRobertaForMaskedLM, TFT5ForConditionalGeneration, ) from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.gpt2.modeling_tf_gpt2 import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.t5.modeling_tf_t5 import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST if is_torch_available(): from transformers import ( AutoModel, AutoModelForCausalLM, AutoModelForMaskedLM, AutoModelForPreTraining, AutoModelForQuestionAnswering, AutoModelForSeq2SeqLM, AutoModelForSequenceClassification, AutoModelWithLMHead, BertForMaskedLM, BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification, BertModel, GPT2LMHeadModel, RobertaForMaskedLM, T5ForConditionalGeneration, ) @is_pt_tf_cross_test class TFPTAutoModelTest(unittest.TestCase): @slow def test_model_from_pretrained(self): # for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: for model_name in ["google-bert/bert-base-uncased"]: config = AutoConfig.from_pretrained(model_name) self.assertIsNotNone(config) self.assertIsInstance(config, BertConfig) model = TFAutoModel.from_pretrained(model_name, from_pt=True) self.assertIsNotNone(model) self.assertIsInstance(model, TFBertModel) model = AutoModel.from_pretrained(model_name, from_tf=True) self.assertIsNotNone(model) self.assertIsInstance(model, BertModel) @slow def test_model_for_pretraining_from_pretrained(self): # for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: for model_name in ["google-bert/bert-base-uncased"]: config = AutoConfig.from_pretrained(model_name) self.assertIsNotNone(config) self.assertIsInstance(config, BertConfig) model = TFAutoModelForPreTraining.from_pretrained(model_name, from_pt=True) self.assertIsNotNone(model) self.assertIsInstance(model, TFBertForPreTraining) model = AutoModelForPreTraining.from_pretrained(model_name, from_tf=True) self.assertIsNotNone(model) self.assertIsInstance(model, BertForPreTraining) @slow def test_model_for_causal_lm(self): for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: config = AutoConfig.from_pretrained(model_name) self.assertIsNotNone(config) self.assertIsInstance(config, GPT2Config) model = TFAutoModelForCausalLM.from_pretrained(model_name, from_pt=True) model, loading_info = TFAutoModelForCausalLM.from_pretrained( model_name, output_loading_info=True, from_pt=True ) self.assertIsNotNone(model) self.assertIsInstance(model, TFGPT2LMHeadModel) model = AutoModelForCausalLM.from_pretrained(model_name, from_tf=True) model, loading_info = AutoModelForCausalLM.from_pretrained( model_name, output_loading_info=True, from_tf=True ) self.assertIsNotNone(model) self.assertIsInstance(model, GPT2LMHeadModel) @slow def test_lmhead_model_from_pretrained(self): for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: config = AutoConfig.from_pretrained(model_name) self.assertIsNotNone(config) self.assertIsInstance(config, BertConfig) model = TFAutoModelWithLMHead.from_pretrained(model_name, from_pt=True) self.assertIsNotNone(model) self.assertIsInstance(model, TFBertForMaskedLM) model = AutoModelWithLMHead.from_pretrained(model_name, from_tf=True) self.assertIsNotNone(model) self.assertIsInstance(model, BertForMaskedLM) @slow def test_model_for_masked_lm(self): for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: config = AutoConfig.from_pretrained(model_name) self.assertIsNotNone(config) self.assertIsInstance(config, BertConfig) model = TFAutoModelForMaskedLM.from_pretrained(model_name, from_pt=True) model, loading_info = TFAutoModelForMaskedLM.from_pretrained( model_name, output_loading_info=True, from_pt=True ) self.assertIsNotNone(model) self.assertIsInstance(model, TFBertForMaskedLM) model = AutoModelForMaskedLM.from_pretrained(model_name, from_tf=True) model, loading_info = AutoModelForMaskedLM.from_pretrained( model_name, output_loading_info=True, from_tf=True ) self.assertIsNotNone(model) self.assertIsInstance(model, BertForMaskedLM) @slow def test_model_for_encoder_decoder_lm(self): for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: config = AutoConfig.from_pretrained(model_name) self.assertIsNotNone(config) self.assertIsInstance(config, T5Config) model = TFAutoModelForSeq2SeqLM.from_pretrained(model_name, from_pt=True) model, loading_info = TFAutoModelForSeq2SeqLM.from_pretrained( model_name, output_loading_info=True, from_pt=True ) self.assertIsNotNone(model) self.assertIsInstance(model, TFT5ForConditionalGeneration) model = AutoModelForSeq2SeqLM.from_pretrained(model_name, from_tf=True) model, loading_info = AutoModelForSeq2SeqLM.from_pretrained( model_name, output_loading_info=True, from_tf=True ) self.assertIsNotNone(model) self.assertIsInstance(model, T5ForConditionalGeneration) @slow def test_sequence_classification_model_from_pretrained(self): # for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: for model_name in ["google-bert/bert-base-uncased"]: config = AutoConfig.from_pretrained(model_name) self.assertIsNotNone(config) self.assertIsInstance(config, BertConfig) model = TFAutoModelForSequenceClassification.from_pretrained(model_name, from_pt=True) self.assertIsNotNone(model) self.assertIsInstance(model, TFBertForSequenceClassification) model = AutoModelForSequenceClassification.from_pretrained(model_name, from_tf=True) self.assertIsNotNone(model) self.assertIsInstance(model, BertForSequenceClassification) @slow def test_question_answering_model_from_pretrained(self): # for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: for model_name in ["google-bert/bert-base-uncased"]: config = AutoConfig.from_pretrained(model_name) self.assertIsNotNone(config) self.assertIsInstance(config, BertConfig) model = TFAutoModelForQuestionAnswering.from_pretrained(model_name, from_pt=True) self.assertIsNotNone(model) self.assertIsInstance(model, TFBertForQuestionAnswering) model = AutoModelForQuestionAnswering.from_pretrained(model_name, from_tf=True) self.assertIsNotNone(model) self.assertIsInstance(model, BertForQuestionAnswering) def test_from_pretrained_identifier(self): model = TFAutoModelWithLMHead.from_pretrained(SMALL_MODEL_IDENTIFIER, from_pt=True) self.assertIsInstance(model, TFBertForMaskedLM) self.assertEqual(model.num_parameters(), 14410) self.assertEqual(model.num_parameters(only_trainable=True), 14410) model = AutoModelWithLMHead.from_pretrained(SMALL_MODEL_IDENTIFIER, from_tf=True) self.assertIsInstance(model, BertForMaskedLM) self.assertEqual(model.num_parameters(), 14410) self.assertEqual(model.num_parameters(only_trainable=True), 14410) def test_from_identifier_from_model_type(self): model = TFAutoModelWithLMHead.from_pretrained(DUMMY_UNKNOWN_IDENTIFIER, from_pt=True) self.assertIsInstance(model, TFRobertaForMaskedLM) self.assertEqual(model.num_parameters(), 14410) self.assertEqual(model.num_parameters(only_trainable=True), 14410) model = AutoModelWithLMHead.from_pretrained(DUMMY_UNKNOWN_IDENTIFIER, from_tf=True) self.assertIsInstance(model, RobertaForMaskedLM) self.assertEqual(model.num_parameters(), 14410) self.assertEqual(model.num_parameters(only_trainable=True), 14410)
transformers/tests/models/auto/test_modeling_tf_pytorch.py/0
{ "file_path": "transformers/tests/models/auto/test_modeling_tf_pytorch.py", "repo_id": "transformers", "token_count": 4463 }
391
# coding=utf-8 # Copyright 2021 HuggingFace Inc. team. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import unittest from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer from transformers.testing_utils import get_tests_dir from ...test_tokenization_common import TokenizerTesterMixin SAMPLE_VOCAB = get_tests_dir("fixtures/test_sentencepiece_bpe.model") class BartphoTokenizerTest(TokenizerTesterMixin, unittest.TestCase): from_pretrained_id = "vinai/bartpho-syllable" tokenizer_class = BartphoTokenizer test_rust_tokenizer = False test_sentencepiece = True def setUp(self): super().setUp() vocab = ["▁This", "▁is", "▁a", "▁t", "est"] vocab_tokens = dict(zip(vocab, range(len(vocab)))) self.special_tokens_map = {"unk_token": "<unk>"} self.monolingual_vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["monolingual_vocab_file"]) with open(self.monolingual_vocab_file, "w", encoding="utf-8") as fp: for token in vocab_tokens: fp.write(f"{token} {vocab_tokens[token]}\n") tokenizer = BartphoTokenizer(SAMPLE_VOCAB, self.monolingual_vocab_file, **self.special_tokens_map) tokenizer.save_pretrained(self.tmpdirname) def get_tokenizer(self, **kwargs): kwargs.update(self.special_tokens_map) return BartphoTokenizer.from_pretrained(self.tmpdirname, **kwargs) def get_input_output_texts(self, tokenizer): input_text = "This is a là test" output_text = "This is a<unk><unk> test" return input_text, output_text def test_full_tokenizer(self): tokenizer = BartphoTokenizer(SAMPLE_VOCAB, self.monolingual_vocab_file, **self.special_tokens_map) text = "This is a là test" bpe_tokens = "▁This ▁is ▁a ▁l à ▁t est".split() tokens = tokenizer.tokenize(text) self.assertListEqual(tokens, bpe_tokens) input_tokens = tokens + [tokenizer.unk_token] input_bpe_tokens = [4, 5, 6, 3, 3, 7, 8, 3] self.assertListEqual(tokenizer.convert_tokens_to_ids(input_tokens), input_bpe_tokens)
transformers/tests/models/bartpho/test_tokenization_bartpho.py/0
{ "file_path": "transformers/tests/models/bartpho/test_tokenization_bartpho.py", "repo_id": "transformers", "token_count": 1082 }
392
# coding=utf-8 # Copyright 2021 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import annotations import unittest from transformers import BlenderbotConfig, BlenderbotTokenizer, is_tf_available from transformers.testing_utils import require_tf, require_tokenizers, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFAutoModelForSeq2SeqLM, TFBlenderbotForConditionalGeneration, TFBlenderbotModel @require_tf class TFBlenderbotModelTester: config_cls = BlenderbotConfig config_updates = {} hidden_act = "gelu" def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_labels=False, vocab_size=99, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=50, eos_token_id=2, pad_token_id=1, bos_token_id=0, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.eos_token_id = eos_token_id self.pad_token_id = pad_token_id self.bos_token_id = bos_token_id def prepare_config_and_inputs_for_common(self): input_ids = ids_tensor([self.batch_size, self.seq_length - 1], self.vocab_size) eos_tensor = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size), 1) input_ids = tf.concat([input_ids, eos_tensor], axis=1) decoder_input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) config = self.config_cls( vocab_size=self.vocab_size, d_model=self.hidden_size, encoder_layers=self.num_hidden_layers, decoder_layers=self.num_hidden_layers, encoder_attention_heads=self.num_attention_heads, decoder_attention_heads=self.num_attention_heads, encoder_ffn_dim=self.intermediate_size, decoder_ffn_dim=self.intermediate_size, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, eos_token_ids=[2], bos_token_id=self.bos_token_id, pad_token_id=self.pad_token_id, decoder_start_token_id=self.pad_token_id, **self.config_updates, ) inputs_dict = prepare_blenderbot_inputs_dict(config, input_ids, decoder_input_ids) return config, inputs_dict def check_decoder_model_past_large_inputs(self, config, inputs_dict): model = TFBlenderbotModel(config=config).get_decoder() input_ids = inputs_dict["input_ids"] input_ids = input_ids[:1, :] attention_mask = inputs_dict["attention_mask"][:1, :] head_mask = inputs_dict["head_mask"] self.batch_size = 1 # first forward pass outputs = model(input_ids, attention_mask=attention_mask, head_mask=head_mask, use_cache=True) output, past_key_values = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size) next_attn_mask = tf.cast(ids_tensor((self.batch_size, 3), 2), tf.int8) # append to next input_ids and next_input_ids = tf.concat([input_ids, next_tokens], axis=-1) next_attention_mask = tf.concat([attention_mask, next_attn_mask], axis=-1) output_from_no_past = model(next_input_ids, attention_mask=next_attention_mask)[0] output_from_past = model(next_tokens, attention_mask=next_attention_mask, past_key_values=past_key_values)[0] self.parent.assertEqual(next_tokens.shape[1], output_from_past.shape[1]) # select random slice random_slice_idx = int(ids_tensor((1,), output_from_past.shape[-1])) output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx] output_from_past_slice = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(output_from_past_slice, output_from_no_past_slice, rtol=1e-3) def prepare_blenderbot_inputs_dict( config, input_ids, decoder_input_ids, attention_mask=None, decoder_attention_mask=None, head_mask=None, decoder_head_mask=None, cross_attn_head_mask=None, ): if attention_mask is None: attention_mask = tf.cast(tf.math.not_equal(input_ids, config.pad_token_id), tf.int8) if decoder_attention_mask is None: decoder_attention_mask = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape, dtype=tf.int8), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:], config.pad_token_id), tf.int8), ], axis=-1, ) if head_mask is None: head_mask = tf.ones((config.encoder_layers, config.encoder_attention_heads)) if decoder_head_mask is None: decoder_head_mask = tf.ones((config.decoder_layers, config.decoder_attention_heads)) if cross_attn_head_mask is None: cross_attn_head_mask = tf.ones((config.decoder_layers, config.decoder_attention_heads)) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } @require_tf class TFBlenderbotModelTest(TFModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (TFBlenderbotForConditionalGeneration, TFBlenderbotModel) if is_tf_available() else () all_generative_model_classes = (TFBlenderbotForConditionalGeneration,) if is_tf_available() else () pipeline_model_mapping = ( { "conversational": TFBlenderbotForConditionalGeneration, "feature-extraction": TFBlenderbotModel, "summarization": TFBlenderbotForConditionalGeneration, "text2text-generation": TFBlenderbotForConditionalGeneration, "translation": TFBlenderbotForConditionalGeneration, } if is_tf_available() else {} ) is_encoder_decoder = True test_pruning = False test_onnx = False def setUp(self): self.model_tester = TFBlenderbotModelTester(self) self.config_tester = ConfigTester(self, config_class=BlenderbotConfig) def test_config(self): self.config_tester.run_common_tests() def test_decoder_model_past_large_inputs(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*config_and_inputs) @require_tokenizers @require_tf class TFBlenderbot400MIntegrationTests(unittest.TestCase): src_text = ["My friends are cool but they eat too many carbs."] model_name = "facebook/blenderbot-400M-distill" @cached_property def tokenizer(self): return BlenderbotTokenizer.from_pretrained(self.model_name) @cached_property def model(self): model = TFAutoModelForSeq2SeqLM.from_pretrained(self.model_name) return model @slow def test_generation_from_long_input(self): model_inputs = self.tokenizer(self.src_text, return_tensors="tf") generated_ids = self.model.generate( model_inputs.input_ids, ) generated_words = self.tokenizer.batch_decode(generated_ids.numpy(), skip_special_tokens=True)[0] assert ( generated_words == " That's unfortunate. Are they trying to lose weight or are they just trying to be healthier?" )
transformers/tests/models/blenderbot/test_modeling_tf_blenderbot.py/0
{ "file_path": "transformers/tests/models/blenderbot/test_modeling_tf_blenderbot.py", "repo_id": "transformers", "token_count": 3935 }
393
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import CLIPTokenizer, CLIPTokenizerFast from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available from ...test_processing_common import ProcessorTesterMixin if is_vision_available(): from PIL import Image from transformers import CLIPImageProcessor, CLIPProcessor @require_vision class CLIPProcessorTest(ProcessorTesterMixin, unittest.TestCase): processor_class = CLIPProcessor def setUp(self): self.tmpdirname = tempfile.mkdtemp() vocab = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"] # fmt: skip vocab_tokens = dict(zip(vocab, range(len(vocab)))) merges = ["#version: 0.2", "l o", "lo w</w>", "e r</w>", ""] self.special_tokens_map = {"unk_token": "<unk>"} self.vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"]) self.merges_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["merges_file"]) with open(self.vocab_file, "w", encoding="utf-8") as fp: fp.write(json.dumps(vocab_tokens) + "\n") with open(self.merges_file, "w", encoding="utf-8") as fp: fp.write("\n".join(merges)) image_processor_map = { "do_resize": True, "size": 20, "do_center_crop": True, "crop_size": 18, "do_normalize": True, "image_mean": [0.48145466, 0.4578275, 0.40821073], "image_std": [0.26862954, 0.26130258, 0.27577711], } self.image_processor_file = os.path.join(self.tmpdirname, IMAGE_PROCESSOR_NAME) with open(self.image_processor_file, "w", encoding="utf-8") as fp: json.dump(image_processor_map, fp) def get_tokenizer(self, **kwargs): return CLIPTokenizer.from_pretrained(self.tmpdirname, **kwargs) def get_rust_tokenizer(self, **kwargs): return CLIPTokenizerFast.from_pretrained(self.tmpdirname, **kwargs) def get_image_processor(self, **kwargs): return CLIPImageProcessor.from_pretrained(self.tmpdirname, **kwargs) def tearDown(self): shutil.rmtree(self.tmpdirname) def prepare_image_inputs(self): """This function prepares a list of PIL images, or a list of numpy arrays if one specifies numpify=True, or a list of PyTorch tensors if one specifies torchify=True. """ image_inputs = [np.random.randint(255, size=(3, 30, 400), dtype=np.uint8)] image_inputs = [Image.fromarray(np.moveaxis(x, 0, -1)) for x in image_inputs] return image_inputs def test_save_load_pretrained_default(self): tokenizer_slow = self.get_tokenizer() tokenizer_fast = self.get_rust_tokenizer() image_processor = self.get_image_processor() processor_slow = CLIPProcessor(tokenizer=tokenizer_slow, image_processor=image_processor) processor_slow.save_pretrained(self.tmpdirname) processor_slow = CLIPProcessor.from_pretrained(self.tmpdirname, use_fast=False) processor_fast = CLIPProcessor(tokenizer=tokenizer_fast, image_processor=image_processor) processor_fast.save_pretrained(self.tmpdirname) processor_fast = CLIPProcessor.from_pretrained(self.tmpdirname) self.assertEqual(processor_slow.tokenizer.get_vocab(), tokenizer_slow.get_vocab()) self.assertEqual(processor_fast.tokenizer.get_vocab(), tokenizer_fast.get_vocab()) self.assertEqual(tokenizer_slow.get_vocab(), tokenizer_fast.get_vocab()) self.assertIsInstance(processor_slow.tokenizer, CLIPTokenizer) self.assertIsInstance(processor_fast.tokenizer, CLIPTokenizerFast) self.assertEqual(processor_slow.image_processor.to_json_string(), image_processor.to_json_string()) self.assertEqual(processor_fast.image_processor.to_json_string(), image_processor.to_json_string()) self.assertIsInstance(processor_slow.image_processor, CLIPImageProcessor) self.assertIsInstance(processor_fast.image_processor, CLIPImageProcessor) def test_save_load_pretrained_additional_features(self): processor = CLIPProcessor(tokenizer=self.get_tokenizer(), image_processor=self.get_image_processor()) processor.save_pretrained(self.tmpdirname) tokenizer_add_kwargs = self.get_tokenizer(bos_token="(BOS)", eos_token="(EOS)") image_processor_add_kwargs = self.get_image_processor(do_normalize=False, padding_value=1.0) processor = CLIPProcessor.from_pretrained( self.tmpdirname, bos_token="(BOS)", eos_token="(EOS)", do_normalize=False, padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab()) self.assertIsInstance(processor.tokenizer, CLIPTokenizerFast) self.assertEqual(processor.image_processor.to_json_string(), image_processor_add_kwargs.to_json_string()) self.assertIsInstance(processor.image_processor, CLIPImageProcessor) def test_image_processor(self): image_processor = self.get_image_processor() tokenizer = self.get_tokenizer() processor = CLIPProcessor(tokenizer=tokenizer, image_processor=image_processor) image_input = self.prepare_image_inputs() input_image_proc = image_processor(image_input, return_tensors="np") input_processor = processor(images=image_input, return_tensors="np") for key in input_image_proc.keys(): self.assertAlmostEqual(input_image_proc[key].sum(), input_processor[key].sum(), delta=1e-2) def test_tokenizer(self): image_processor = self.get_image_processor() tokenizer = self.get_tokenizer() processor = CLIPProcessor(tokenizer=tokenizer, image_processor=image_processor) input_str = "lower newer" encoded_processor = processor(text=input_str) encoded_tok = tokenizer(input_str) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key], encoded_processor[key]) def test_processor(self): image_processor = self.get_image_processor() tokenizer = self.get_tokenizer() processor = CLIPProcessor(tokenizer=tokenizer, image_processor=image_processor) input_str = "lower newer" image_input = self.prepare_image_inputs() inputs = processor(text=input_str, images=image_input) self.assertListEqual(list(inputs.keys()), ["input_ids", "attention_mask", "pixel_values"]) # test if it raises when no input is passed with pytest.raises(ValueError): processor() def test_tokenizer_decode(self): image_processor = self.get_image_processor() tokenizer = self.get_tokenizer() processor = CLIPProcessor(tokenizer=tokenizer, image_processor=image_processor) predicted_ids = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] decoded_processor = processor.batch_decode(predicted_ids) decoded_tok = tokenizer.batch_decode(predicted_ids) self.assertListEqual(decoded_tok, decoded_processor) def test_model_input_names(self): image_processor = self.get_image_processor() tokenizer = self.get_tokenizer() processor = CLIPProcessor(tokenizer=tokenizer, image_processor=image_processor) input_str = "lower newer" image_input = self.prepare_image_inputs() inputs = processor(text=input_str, images=image_input) self.assertListEqual(list(inputs.keys()), processor.model_input_names)
transformers/tests/models/clip/test_processor_clip.py/0
{ "file_path": "transformers/tests/models/clip/test_processor_clip.py", "repo_id": "transformers", "token_count": 3365 }
394
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Testing suite for the PyTorch Cohere model. """ import unittest from parameterized import parameterized from transformers import CohereConfig, is_torch_available from transformers.testing_utils import ( require_bitsandbytes, require_torch, require_torch_multi_gpu, require_torch_sdpa, slow, torch_device, ) from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import AutoTokenizer, CohereForCausalLM, CohereModel # Copied from transformers.tests.models.llama.LlamaModelTester with Llama->Cohere class CohereModelTester: def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_input_mask=True, use_token_type_ids=False, use_labels=True, vocab_size=99, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, num_labels=3, num_choices=4, pad_token_id=0, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_token_type_ids = use_token_type_ids self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.num_labels = num_labels self.num_choices = num_choices self.pad_token_id = pad_token_id self.scope = scope def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = torch.tril(torch.ones(self.batch_size, self.seq_length)).to(torch_device) token_type_ids = None if self.use_token_type_ids: token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size) sequence_labels = None token_labels = None choice_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) choice_labels = ids_tensor([self.batch_size], self.num_choices) config = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels # Ignore copy def get_config(self): return CohereConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=False, initializer_range=self.initializer_range, pad_token_id=self.pad_token_id, eos_token_id=self.pad_token_id, ) def create_and_check_model( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = CohereModel(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_model_as_decoder( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ): config.add_cross_attention = True model = CohereModel(config) model.to(torch_device) model.eval() result = model( input_ids, attention_mask=input_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, ) result = model( input_ids, attention_mask=input_mask, encoder_hidden_states=encoder_hidden_states, ) result = model(input_ids, attention_mask=input_mask) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_for_causal_lm( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ): model = CohereForCausalLM(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, labels=token_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def create_and_check_decoder_model_past_large_inputs( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ): config.is_decoder = True config.add_cross_attention = True model = CohereForCausalLM(config=config) model.to(torch_device) model.eval() # first forward pass outputs = model( input_ids, attention_mask=input_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, use_cache=True, ) past_key_values = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size) next_mask = ids_tensor((self.batch_size, 3), vocab_size=2) # append to next input_ids and next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) next_attention_mask = torch.cat([input_mask, next_mask], dim=-1) output_from_no_past = model( next_input_ids, attention_mask=next_attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, output_hidden_states=True, )["hidden_states"][0] output_from_past = model( next_tokens, attention_mask=next_attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, past_key_values=past_key_values, output_hidden_states=True, )["hidden_states"][0] # select random slice random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach() output_from_past_slice = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1]) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) = config_and_inputs inputs_dict = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class CohereModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (CohereModel, CohereForCausalLM) if is_torch_available() else () all_generative_model_classes = (CohereForCausalLM,) if is_torch_available() else () pipeline_model_mapping = ( { "feature-extraction": CohereModel, "text-generation": CohereForCausalLM, } if is_torch_available() else {} ) test_headmasking = False test_pruning = False fx_compatible = ( False # FIXME @michaelbenayoun or @fxmarty from https://github.com/huggingface/transformers/pull/29753 ) # Need to use `0.8` instead of `0.9` for `test_cpu_offload` # This is because we are hitting edge cases with the causal_mask buffer model_split_percents = [0.5, 0.7, 0.8] def setUp(self): self.model_tester = CohereModelTester(self) self.config_tester = ConfigTester(self, config_class=CohereConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() @unittest.skip("TODO @gante fix this for Cohere") @parameterized.expand([(1, False), (1, True), (4, False)]) def test_new_cache_format(self, num_beams, do_sample): pass def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_model_various_embeddings(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: config_and_inputs[0].position_embedding_type = type self.model_tester.create_and_check_model(*config_and_inputs) @require_bitsandbytes @require_torch_sdpa @require_torch_multi_gpu @slow def test_eager_matches_sdpa_generate(self): """ Overwritting the common test as the test is flaky on tiny models """ max_new_tokens = 30 model_id = "CohereForAI/c4ai-command-r-v01-4bit" tokenizer = AutoTokenizer.from_pretrained(model_id) model_sdpa = CohereForCausalLM.from_pretrained( model_id, torch_dtype=torch.float16, low_cpu_mem_usage=True, device_map="auto" ) self.assertTrue(model_sdpa.config._attn_implementation == "sdpa") model_eager = CohereForCausalLM.from_pretrained( model_id, torch_dtype=torch.float16, attn_implementation="eager", device_map="auto" ) self.assertTrue(model_eager.config._attn_implementation == "eager") for name, submodule in model_eager.named_modules(): if "SdpaAttention" in submodule.__class__.__name__: raise ValueError("The eager model should not have SDPA attention layers") has_sdpa = False for name, submodule in model_sdpa.named_modules(): if "SdpaAttention" in submodule.__class__.__name__: has_sdpa = True break if not has_sdpa: raise ValueError("The SDPA model should have SDPA attention layers") texts = [ "hi here's a longer context, getting longer and", "Hello this is a very long sentence my friend, very long for real", "Today I am in Paris and", ] for padding_side in ["left", "right"]: tokenizer.padding_side = padding_side tokenizer.pad_token = tokenizer.eos_token inputs = tokenizer(texts, return_tensors="pt", padding=True).to(torch_device) res_eager = model_eager.generate(**inputs, max_new_tokens=max_new_tokens, do_sample=False) res_sdpa = model_sdpa.generate(**inputs, max_new_tokens=max_new_tokens, do_sample=False) with self.subTest(f"{padding_side}"): torch.testing.assert_close( res_eager, res_sdpa, msg=f"\n{tokenizer.batch_decode(res_eager)} \nvs\n{tokenizer.batch_decode(res_sdpa)}", ) @require_torch @slow class CohereIntegrationTest(unittest.TestCase): @require_torch_multi_gpu def test_batched_4bit(self): model_id = "CohereForAI/c4ai-command-r-v01-4bit" EXPECTED_TEXT = [ 'Hello today I am going to show you how to make a simple and easy card using the new stamp set called "Hello" from the Occasions catalog. This set is so versatile and can be used for many occasions. I used the new In', "Hi there, here we are again with another great collection of free fonts. This time we have gathered 10 free fonts that you can download and use in your designs. These fonts are free for personal and commercial use. So", ] model = CohereForCausalLM.from_pretrained(model_id) tokenizer = AutoTokenizer.from_pretrained(model_id) tokenizer.pad_token = tokenizer.eos_token text = ["Hello today I am going to show you how to", "Hi there, here we are"] inputs = tokenizer(text, return_tensors="pt", padding=True).to(torch_device) output = model.generate(**inputs, max_new_tokens=40, do_sample=False) self.assertEqual(tokenizer.batch_decode(output, skip_special_tokens=True), EXPECTED_TEXT) def test_batched_small_model_logits(self): # Since the model is very large, we created a random cohere model so that we can do a simple # logits check on it. model_id = "hf-internal-testing/cohere-random" EXPECTED_LOGITS = torch.Tensor( [ [[0.0000, 0.1866, -0.1997], [0.0000, -0.0736, 0.1785], [0.0000, -0.1965, -0.0569]], [[0.0000, -0.0302, 0.1488], [0.0000, -0.0402, 0.1351], [0.0000, -0.0341, 0.1116]], ] ).to(torch_device) tokenizer = AutoTokenizer.from_pretrained(model_id) model = CohereForCausalLM.from_pretrained(model_id, low_cpu_mem_usage=True, torch_dtype=torch.float16).to( torch_device ) tokenizer.pad_token = tokenizer.eos_token text = ["Hello today I am going to show you how to", "Hi there, here we are"] inputs = tokenizer(text, return_tensors="pt", padding=True).to(torch_device) with torch.no_grad(): output = model(**inputs) logits = output.logits self.assertTrue(torch.allclose(EXPECTED_LOGITS, logits[:, :3, :3], rtol=1e-3, atol=1e-3))
transformers/tests/models/cohere/test_modeling_cohere.py/0
{ "file_path": "transformers/tests/models/cohere/test_modeling_cohere.py", "repo_id": "transformers", "token_count": 7280 }
395
# coding=utf-8 # Copyright 2018 HuggingFace Inc. team. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from transformers.models.cpm.tokenization_cpm import CpmTokenizer from transformers.testing_utils import custom_tokenizers from ..xlnet.test_modeling_xlnet import XLNetModelTest @custom_tokenizers class CpmTokenizationTest(XLNetModelTest): # There is no `CpmModel` def is_pipeline_test_to_skip( self, pipeline_test_casse_name, config_class, model_architecture, tokenizer_name, processor_name ): return True def test_pre_tokenization(self): tokenizer = CpmTokenizer.from_pretrained("TsinghuaAI/CPM-Generate") text = "Hugging Face大法好,谁用谁知道。" normalized_text = "Hugging Face大法好,谁用谁知道。<unk>" bpe_tokens = "▁Hu gg ing ▁ ▂ ▁F ace ▁大法 ▁好 ▁ , ▁谁 ▁用 ▁谁 ▁知 道 ▁ 。".split() tokens = tokenizer.tokenize(text) self.assertListEqual(tokens, bpe_tokens) input_tokens = tokens + [tokenizer.unk_token] input_bpe_tokens = [13789, 13283, 1421, 8, 10, 1164, 13608, 16528, 63, 8, 9, 440, 108, 440, 121, 90, 8, 12, 0] self.assertListEqual(tokenizer.convert_tokens_to_ids(input_tokens), input_bpe_tokens) reconstructed_text = tokenizer.decode(input_bpe_tokens) self.assertEqual(reconstructed_text, normalized_text)
transformers/tests/models/cpm/test_tokenization_cpm.py/0
{ "file_path": "transformers/tests/models/cpm/test_tokenization_cpm.py", "repo_id": "transformers", "token_count": 733 }
396
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Testing suite for the TensorFlow DeiT model. """ from __future__ import annotations import inspect import unittest import numpy as np from transformers import DeiTConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher, TFDeiTForMaskedImageModeling, TFDeiTModel, ) from transformers.modeling_tf_utils import keras from transformers.models.deit.modeling_tf_deit import TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import DeiTImageProcessor class TFDeiTModelTester: def __init__( self, parent, batch_size=13, image_size=30, patch_size=2, num_channels=3, is_training=True, use_labels=True, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, type_sequence_label_size=10, initializer_range=0.02, num_labels=3, scope=None, encoder_stride=2, ): self.parent = parent self.batch_size = batch_size self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.is_training = is_training self.use_labels = use_labels self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.scope = scope self.encoder_stride = encoder_stride # in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens) num_patches = (image_size // patch_size) ** 2 self.seq_length = num_patches + 2 def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) labels = None if self.use_labels: labels = ids_tensor([self.batch_size], self.type_sequence_label_size) config = self.get_config() return config, pixel_values, labels def get_config(self): return DeiTConfig( image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=False, initializer_range=self.initializer_range, encoder_stride=self.encoder_stride, ) def create_and_check_model(self, config, pixel_values, labels): model = TFDeiTModel(config=config) result = model(pixel_values) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_for_masked_image_modeling(self, config, pixel_values, labels): model = TFDeiTForMaskedImageModeling(config=config) result = model(pixel_values) self.parent.assertEqual( result.reconstruction.shape, (self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images config.num_channels = 1 model = TFDeiTForMaskedImageModeling(config) pixel_values = floats_tensor([self.batch_size, 1, self.image_size, self.image_size]) result = model(pixel_values) self.parent.assertEqual(result.reconstruction.shape, (self.batch_size, 1, self.image_size, self.image_size)) def create_and_check_for_image_classification(self, config, pixel_values, labels): config.num_labels = self.type_sequence_label_size model = TFDeiTForImageClassification(config) result = model(pixel_values, labels=labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size)) # test greyscale images config.num_channels = 1 model = TFDeiTForImageClassification(config) pixel_values = floats_tensor([self.batch_size, 1, self.image_size, self.image_size]) result = model(pixel_values, labels=labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values, labels = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_tf class TFDeiTModelTest(TFModelTesterMixin, PipelineTesterMixin, unittest.TestCase): """ Here we also overwrite some of the tests of test_modeling_tf_common.py, as DeiT does not use input_ids, inputs_embeds, attention_mask and seq_length. """ all_model_classes = ( ( TFDeiTModel, TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher, TFDeiTForMaskedImageModeling, ) if is_tf_available() else () ) pipeline_model_mapping = ( { "feature-extraction": TFDeiTModel, "image-classification": (TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher), } if is_tf_available() else {} ) test_pruning = False test_resize_embeddings = False test_head_masking = False test_onnx = False def setUp(self): self.model_tester = TFDeiTModelTester(self) self.config_tester = ConfigTester(self, config_class=DeiTConfig, has_text_modality=False, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() @unittest.skip(reason="DeiT does not use inputs_embeds") def test_inputs_embeds(self): pass def test_model_common_attributes(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) self.assertIsInstance(model.get_input_embeddings(), (keras.layers.Layer)) x = model.get_output_embeddings() self.assertTrue(x is None or isinstance(x, keras.layers.Dense)) def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.call) # signature.parameters is an OrderedDict => so arg_names order is deterministic arg_names = [*signature.parameters.keys()] expected_arg_names = ["pixel_values"] self.assertListEqual(arg_names[:1], expected_arg_names) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_for_masked_image_modeling(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*config_and_inputs) def test_for_image_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*config_and_inputs) # special case for DeiTForImageClassificationWithTeacher model def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): inputs_dict = super()._prepare_for_class(inputs_dict, model_class, return_labels=return_labels) if return_labels: if "labels" in inputs_dict and "labels" not in inspect.signature(model_class.call).parameters: del inputs_dict["labels"] return inputs_dict @slow def test_model_from_pretrained(self): for model_name in TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = TFDeiTModel.from_pretrained(model_name) self.assertIsNotNone(model) # We will verify our results on an image of cute cats def prepare_img(): image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") return image @require_tf @require_vision class DeiTModelIntegrationTest(unittest.TestCase): @cached_property def default_image_processor(self): return ( DeiTImageProcessor.from_pretrained("facebook/deit-base-distilled-patch16-224") if is_vision_available() else None ) @slow def test_inference_image_classification_head(self): model = TFDeiTForImageClassificationWithTeacher.from_pretrained("facebook/deit-base-distilled-patch16-224") image_processor = self.default_image_processor image = prepare_img() inputs = image_processor(images=image, return_tensors="tf") # forward pass outputs = model(**inputs) # verify the logits expected_shape = tf.TensorShape((1, 1000)) self.assertEqual(outputs.logits.shape, expected_shape) expected_slice = tf.constant([-1.0266, 0.1912, -1.2861]) self.assertTrue(np.allclose(outputs.logits[0, :3], expected_slice, atol=1e-4))
transformers/tests/models/deit/test_modeling_tf_deit.py/0
{ "file_path": "transformers/tests/models/deit/test_modeling_tf_deit.py", "repo_id": "transformers", "token_count": 4573 }
397
# coding=utf-8 # Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import annotations import unittest from transformers import DistilBertConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers.models.distilbert.modeling_tf_distilbert import ( TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFDistilBertForMaskedLM, TFDistilBertForMultipleChoice, TFDistilBertForQuestionAnswering, TFDistilBertForSequenceClassification, TFDistilBertForTokenClassification, TFDistilBertModel, ) class TFDistilBertModelTester: def __init__( self, parent, ): self.parent = parent self.batch_size = 13 self.seq_length = 7 self.is_training = True self.use_input_mask = True self.use_token_type_ids = False self.use_labels = True self.vocab_size = 99 self.hidden_size = 32 self.num_hidden_layers = 2 self.num_attention_heads = 4 self.intermediate_size = 37 self.hidden_act = "gelu" self.hidden_dropout_prob = 0.1 self.attention_probs_dropout_prob = 0.1 self.max_position_embeddings = 512 self.type_vocab_size = 16 self.type_sequence_label_size = 2 self.initializer_range = 0.02 self.num_labels = 3 self.num_choices = 4 self.scope = None def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) sequence_labels = None token_labels = None choice_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) choice_labels = ids_tensor([self.batch_size], self.num_choices) config = DistilBertConfig( vocab_size=self.vocab_size, dim=self.hidden_size, n_layers=self.num_hidden_layers, n_heads=self.num_attention_heads, hidden_dim=self.intermediate_size, hidden_act=self.hidden_act, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, ) return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def create_and_check_distilbert_model( self, config, input_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = TFDistilBertModel(config=config) inputs = {"input_ids": input_ids, "attention_mask": input_mask} result = model(inputs) inputs = [input_ids, input_mask] result = model(inputs) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_distilbert_for_masked_lm( self, config, input_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = TFDistilBertForMaskedLM(config=config) inputs = {"input_ids": input_ids, "attention_mask": input_mask} result = model(inputs) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def create_and_check_distilbert_for_question_answering( self, config, input_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = TFDistilBertForQuestionAnswering(config=config) inputs = { "input_ids": input_ids, "attention_mask": input_mask, } result = model(inputs) self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length)) def create_and_check_distilbert_for_sequence_classification( self, config, input_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_labels = self.num_labels model = TFDistilBertForSequenceClassification(config) inputs = {"input_ids": input_ids, "attention_mask": input_mask} result = model(inputs) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) def create_and_check_distilbert_for_multiple_choice( self, config, input_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_choices = self.num_choices model = TFDistilBertForMultipleChoice(config) multiple_choice_inputs_ids = tf.tile(tf.expand_dims(input_ids, 1), (1, self.num_choices, 1)) multiple_choice_input_mask = tf.tile(tf.expand_dims(input_mask, 1), (1, self.num_choices, 1)) inputs = { "input_ids": multiple_choice_inputs_ids, "attention_mask": multiple_choice_input_mask, } result = model(inputs) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices)) def create_and_check_distilbert_for_token_classification( self, config, input_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_labels = self.num_labels model = TFDistilBertForTokenClassification(config) inputs = {"input_ids": input_ids, "attention_mask": input_mask} result = model(inputs) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() (config, input_ids, input_mask, sequence_labels, token_labels, choice_labels) = config_and_inputs inputs_dict = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_tf class TFDistilBertModelTest(TFModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( ( TFDistilBertModel, TFDistilBertForMaskedLM, TFDistilBertForQuestionAnswering, TFDistilBertForSequenceClassification, TFDistilBertForTokenClassification, TFDistilBertForMultipleChoice, ) if is_tf_available() else None ) pipeline_model_mapping = ( { "feature-extraction": TFDistilBertModel, "fill-mask": TFDistilBertForMaskedLM, "question-answering": TFDistilBertForQuestionAnswering, "text-classification": TFDistilBertForSequenceClassification, "token-classification": TFDistilBertForTokenClassification, "zero-shot": TFDistilBertForSequenceClassification, } if is_tf_available() else {} ) test_head_masking = False test_onnx = False def setUp(self): self.model_tester = TFDistilBertModelTester(self) self.config_tester = ConfigTester(self, config_class=DistilBertConfig, dim=37) def test_config(self): self.config_tester.run_common_tests() def test_distilbert_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_model(*config_and_inputs) def test_for_masked_lm(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_masked_lm(*config_and_inputs) def test_for_question_answering(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_question_answering(*config_and_inputs) def test_for_sequence_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_sequence_classification(*config_and_inputs) def test_for_multiple_choice(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_multiple_choice(*config_and_inputs) def test_for_token_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_token_classification(*config_and_inputs) @slow def test_model_from_pretrained(self): for model_name in list(TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]): model = TFDistilBertModel.from_pretrained(model_name) self.assertIsNotNone(model) @require_tf class TFDistilBertModelIntegrationTest(unittest.TestCase): @slow def test_inference_masked_lm(self): model = TFDistilBertModel.from_pretrained("distilbert-base-uncased") input_ids = tf.constant([[0, 1, 2, 3, 4, 5]]) output = model(input_ids)[0] expected_shape = [1, 6, 768] self.assertEqual(output.shape, expected_shape) expected_slice = tf.constant( [ [ [0.19261885, -0.13732955, 0.4119799], [0.22150156, -0.07422661, 0.39037204], [0.22756018, -0.0896414, 0.3701467], ] ] ) tf.debugging.assert_near(output[:, :3, :3], expected_slice, atol=1e-4)
transformers/tests/models/distilbert/test_modeling_tf_distilbert.py/0
{ "file_path": "transformers/tests/models/distilbert/test_modeling_tf_distilbert.py", "repo_id": "transformers", "token_count": 4547 }
398
# coding=utf-8 # Copyright 2020 Huggingface # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import tempfile import unittest import timeout_decorator # noqa from parameterized import parameterized from transformers import FSMTConfig, is_torch_available from transformers.testing_utils import ( require_sentencepiece, require_tokenizers, require_torch, require_torch_fp16, slow, torch_device, ) from transformers.utils import cached_property from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import FSMTForConditionalGeneration, FSMTModel, FSMTTokenizer from transformers.models.fsmt.modeling_fsmt import ( SinusoidalPositionalEmbedding, _prepare_fsmt_decoder_inputs, invert_mask, shift_tokens_right, ) from transformers.pipelines import TranslationPipeline class FSMTModelTester: def __init__( self, parent, src_vocab_size=99, tgt_vocab_size=99, langs=["ru", "en"], batch_size=13, seq_length=7, is_training=False, use_labels=False, hidden_size=16, num_hidden_layers=2, num_attention_heads=4, intermediate_size=4, hidden_act="relu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=20, bos_token_id=0, pad_token_id=1, eos_token_id=2, ): self.parent = parent self.src_vocab_size = src_vocab_size self.tgt_vocab_size = tgt_vocab_size self.langs = langs self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_labels = use_labels self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.bos_token_id = bos_token_id self.pad_token_id = pad_token_id self.eos_token_id = eos_token_id torch.manual_seed(0) # hack needed for modeling_common tests - despite not really having this attribute in this model self.vocab_size = self.src_vocab_size def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.src_vocab_size).clamp( 3, ) input_ids[:, -1] = 2 # Eos Token config = self.get_config() inputs_dict = prepare_fsmt_inputs_dict(config, input_ids) return config, inputs_dict def get_config(self): return FSMTConfig( vocab_size=self.src_vocab_size, # hack needed for common tests src_vocab_size=self.src_vocab_size, tgt_vocab_size=self.tgt_vocab_size, langs=self.langs, d_model=self.hidden_size, encoder_layers=self.num_hidden_layers, decoder_layers=self.num_hidden_layers, encoder_attention_heads=self.num_attention_heads, decoder_attention_heads=self.num_attention_heads, encoder_ffn_dim=self.intermediate_size, decoder_ffn_dim=self.intermediate_size, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, eos_token_id=self.eos_token_id, bos_token_id=self.bos_token_id, pad_token_id=self.pad_token_id, ) def prepare_config_and_inputs_for_common(self): config, inputs_dict = self.prepare_config_and_inputs() inputs_dict["decoder_input_ids"] = inputs_dict["input_ids"] inputs_dict["decoder_attention_mask"] = inputs_dict["attention_mask"] inputs_dict["use_cache"] = False return config, inputs_dict def prepare_fsmt_inputs_dict( config, input_ids, attention_mask=None, head_mask=None, decoder_head_mask=None, cross_attn_head_mask=None, ): if attention_mask is None: attention_mask = input_ids.ne(config.pad_token_id) if head_mask is None: head_mask = torch.ones(config.encoder_layers, config.encoder_attention_heads, device=torch_device) if decoder_head_mask is None: decoder_head_mask = torch.ones(config.decoder_layers, config.decoder_attention_heads, device=torch_device) if cross_attn_head_mask is None: cross_attn_head_mask = torch.ones(config.decoder_layers, config.decoder_attention_heads, device=torch_device) return { "input_ids": input_ids, "attention_mask": attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, } @require_torch class FSMTModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (FSMTModel, FSMTForConditionalGeneration) if is_torch_available() else () all_generative_model_classes = (FSMTForConditionalGeneration,) if is_torch_available() else () pipeline_model_mapping = ( { "conversational": FSMTForConditionalGeneration, "feature-extraction": FSMTModel, "summarization": FSMTForConditionalGeneration, "text2text-generation": FSMTForConditionalGeneration, "translation": FSMTForConditionalGeneration, } if is_torch_available() else {} ) is_encoder_decoder = True test_pruning = False test_missing_keys = False def setUp(self): self.model_tester = FSMTModelTester(self) self.langs = ["en", "ru"] config = { "langs": self.langs, "src_vocab_size": 10, "tgt_vocab_size": 20, } # XXX: hack to appease to all other models requiring `vocab_size` config["vocab_size"] = 99 # no such thing in FSMT self.config_tester = ConfigTester(self, config_class=FSMTConfig, **config) def test_config(self): self.config_tester.run_common_tests() # XXX: override test_model_common_attributes / different Embedding type def test_model_common_attributes(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: model = model_class(config) self.assertIsInstance(model.get_input_embeddings(), (nn.Embedding)) model.set_input_embeddings(nn.Embedding(10, 10)) x = model.get_output_embeddings() self.assertTrue(x is None or isinstance(x, nn.modules.sparse.Embedding)) def test_initialization_more(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs() model = FSMTModel(config) model.to(torch_device) model.eval() # test init # self.assertTrue((model.encoder.embed_tokens.weight == model.shared.weight).all().item()) def _check_var(module): """Check that we initialized various parameters from N(0, config.init_std).""" self.assertAlmostEqual(torch.std(module.weight).item(), config.init_std, 2) _check_var(model.encoder.embed_tokens) _check_var(model.encoder.layers[0].self_attn.k_proj) _check_var(model.encoder.layers[0].fc1) # XXX: different std for fairseq version of SinusoidalPositionalEmbedding # self.assertAlmostEqual(torch.std(model.encoder.embed_positions.weights).item(), config.init_std, 2) def test_advanced_inputs(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs() config.use_cache = False inputs_dict["input_ids"][:, -2:] = config.pad_token_id decoder_input_ids, decoder_attn_mask, causal_mask = _prepare_fsmt_decoder_inputs( config, inputs_dict["input_ids"] ) model = FSMTModel(config).to(torch_device).eval() decoder_features_with_created_mask = model(**inputs_dict)[0] decoder_features_with_passed_mask = model( decoder_attention_mask=invert_mask(decoder_attn_mask), decoder_input_ids=decoder_input_ids, **inputs_dict )[0] _assert_tensors_equal(decoder_features_with_passed_mask, decoder_features_with_created_mask) useless_mask = torch.zeros_like(decoder_attn_mask) decoder_features = model(decoder_attention_mask=useless_mask, **inputs_dict)[0] self.assertTrue(isinstance(decoder_features, torch.Tensor)) # no hidden states or attentions self.assertEqual( decoder_features.size(), (self.model_tester.batch_size, self.model_tester.seq_length, config.tgt_vocab_size), ) if decoder_attn_mask.min().item() < -1e3: # some tokens were masked self.assertFalse((decoder_features_with_created_mask == decoder_features).all().item()) # Test different encoder attention masks decoder_features_with_long_encoder_mask = model( inputs_dict["input_ids"], attention_mask=inputs_dict["attention_mask"].long() )[0] _assert_tensors_equal(decoder_features_with_long_encoder_mask, decoder_features_with_created_mask) def test_save_load_missing_keys(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: model = model_class(config) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model2, info = model_class.from_pretrained(tmpdirname, output_loading_info=True) self.assertEqual(info["missing_keys"], []) @unittest.skip("Test has a segmentation fault on torch 1.8.0") def test_export_to_onnx(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs() model = FSMTModel(config).to(torch_device) with tempfile.TemporaryDirectory() as tmpdirname: torch.onnx.export( model, (inputs_dict["input_ids"], inputs_dict["attention_mask"]), f"{tmpdirname}/fsmt_test.onnx", export_params=True, opset_version=12, input_names=["input_ids", "attention_mask"], ) def test_ensure_weights_are_shared(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs() config.tie_word_embeddings = True model = FSMTForConditionalGeneration(config) # FSMT shares three weights. # Not an issue to not have these correctly tied for torch.load, but it is an issue for safetensors. self.assertEqual( len( { model.get_output_embeddings().weight.data_ptr(), model.get_input_embeddings().weight.data_ptr(), model.base_model.decoder.output_projection.weight.data_ptr(), } ), 1, ) config.tie_word_embeddings = False model = FSMTForConditionalGeneration(config) # FSMT shares three weights. # Not an issue to not have these correctly tied for torch.load, but it is an issue for safetensors. self.assertEqual( len( { model.get_output_embeddings().weight.data_ptr(), model.get_input_embeddings().weight.data_ptr(), model.base_model.decoder.output_projection.weight.data_ptr(), } ), 2, ) @unittest.skip("can't be implemented for FSMT due to dual vocab.") def test_resize_tokens_embeddings(self): pass @unittest.skip("Passing inputs_embeds not implemented for FSMT.") def test_inputs_embeds(self): pass @unittest.skip("model weights aren't tied in FSMT.") def test_tie_model_weights(self): pass @unittest.skip("TODO: Decoder embeddings cannot be resized at the moment") def test_resize_embeddings_untied(self): pass @require_torch class FSMTHeadTests(unittest.TestCase): src_vocab_size = 99 tgt_vocab_size = 99 langs = ["ru", "en"] def _get_config(self): return FSMTConfig( src_vocab_size=self.src_vocab_size, tgt_vocab_size=self.tgt_vocab_size, langs=self.langs, d_model=24, encoder_layers=2, decoder_layers=2, encoder_attention_heads=2, decoder_attention_heads=2, encoder_ffn_dim=32, decoder_ffn_dim=32, max_position_embeddings=48, eos_token_id=2, pad_token_id=1, bos_token_id=0, ) def _get_config_and_data(self): input_ids = torch.tensor( [ [71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 82, 2], [5, 97, 17, 39, 94, 40, 2], [76, 83, 94, 25, 70, 78, 2], [87, 59, 41, 35, 48, 66, 2], [55, 13, 16, 58, 5, 2, 1], # note padding [64, 27, 31, 51, 12, 75, 2], [52, 64, 86, 17, 83, 39, 2], [48, 61, 9, 24, 71, 82, 2], [26, 1, 60, 48, 22, 13, 2], [21, 5, 62, 28, 14, 76, 2], [45, 98, 37, 86, 59, 48, 2], [70, 70, 50, 9, 28, 0, 2], ], dtype=torch.long, device=torch_device, ) batch_size = input_ids.shape[0] config = self._get_config() return config, input_ids, batch_size def test_generate_beam_search(self): input_ids = torch.tensor([[71, 82, 2], [68, 34, 2]], dtype=torch.long, device=torch_device) config = self._get_config() lm_model = FSMTForConditionalGeneration(config).to(torch_device) lm_model.eval() max_length = 5 new_input_ids = lm_model.generate( input_ids.clone(), do_sample=True, num_return_sequences=1, num_beams=2, no_repeat_ngram_size=3, max_length=max_length, ) self.assertEqual(new_input_ids.shape, (input_ids.shape[0], max_length)) def test_shift_tokens_right(self): input_ids = torch.tensor([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]], dtype=torch.long) shifted = shift_tokens_right(input_ids, 1) n_pad_before = input_ids.eq(1).float().sum() n_pad_after = shifted.eq(1).float().sum() self.assertEqual(shifted.shape, input_ids.shape) self.assertEqual(n_pad_after, n_pad_before - 1) self.assertTrue(torch.eq(shifted[:, 0], 2).all()) @require_torch_fp16 def test_generate_fp16(self): config, input_ids, batch_size = self._get_config_and_data() attention_mask = input_ids.ne(1).to(torch_device) model = FSMTForConditionalGeneration(config).eval().to(torch_device) model.half() model.generate(input_ids, attention_mask=attention_mask) model.generate(num_beams=4, do_sample=True, early_stopping=False, num_return_sequences=3) def test_dummy_inputs(self): config, *_ = self._get_config_and_data() model = FSMTForConditionalGeneration(config).eval().to(torch_device) model(**model.dummy_inputs) def test_prepare_fsmt_decoder_inputs(self): config, *_ = self._get_config_and_data() input_ids = _long_tensor(([4, 4, 2])) decoder_input_ids = _long_tensor([[26388, 2, config.pad_token_id]]) causal_mask_dtype = torch.float32 ignore = torch.finfo(causal_mask_dtype).min decoder_input_ids, decoder_attn_mask, causal_mask = _prepare_fsmt_decoder_inputs( config, input_ids, decoder_input_ids, causal_mask_dtype=causal_mask_dtype ) expected_causal_mask = torch.tensor( [[0, ignore, ignore], [0, 0, ignore], [0, 0, 0]] # never attend to the final token, because its pad ).to(input_ids.device) self.assertEqual(decoder_attn_mask.size(), decoder_input_ids.size()) self.assertTrue(torch.eq(expected_causal_mask, causal_mask).all()) def _assert_tensors_equal(a, b, atol=1e-12, prefix=""): """If tensors not close, or a and b arent both tensors, raise a nice Assertion error.""" if a is None and b is None: return True try: if torch.allclose(a, b, atol=atol): return True raise except Exception: if len(prefix) > 0: prefix = f"{prefix}: " raise AssertionError(f"{prefix}{a} != {b}") def _long_tensor(tok_lst): return torch.tensor(tok_lst, dtype=torch.long, device=torch_device) TOLERANCE = 1e-4 pairs = [ ["en-ru"], ["ru-en"], ["en-de"], ["de-en"], ] @require_torch @require_sentencepiece @require_tokenizers class FSMTModelIntegrationTests(unittest.TestCase): tokenizers_cache = {} models_cache = {} default_mname = "facebook/wmt19-en-ru" @cached_property def default_tokenizer(self): return self.get_tokenizer(self.default_mname) @cached_property def default_model(self): return self.get_model(self.default_mname) def get_tokenizer(self, mname): if mname not in self.tokenizers_cache: self.tokenizers_cache[mname] = FSMTTokenizer.from_pretrained(mname) return self.tokenizers_cache[mname] def get_model(self, mname): if mname not in self.models_cache: self.models_cache[mname] = FSMTForConditionalGeneration.from_pretrained(mname).to(torch_device) if torch_device == "cuda": self.models_cache[mname].half() return self.models_cache[mname] @slow def test_inference_no_head(self): tokenizer = self.default_tokenizer model = FSMTModel.from_pretrained(self.default_mname).to(torch_device) src_text = "My friend computer will translate this for me" input_ids = tokenizer([src_text], return_tensors="pt")["input_ids"] input_ids = _long_tensor(input_ids).to(torch_device) inputs_dict = prepare_fsmt_inputs_dict(model.config, input_ids) with torch.no_grad(): output = model(**inputs_dict)[0] expected_shape = torch.Size((1, 10, model.config.tgt_vocab_size)) self.assertEqual(output.shape, expected_shape) # expected numbers were generated when en-ru model, using just fairseq's model4.pt # may have to adjust if switched to a different checkpoint expected_slice = torch.tensor( [[-1.5753, -1.5753, 2.8975], [-0.9540, -0.9540, 1.0299], [-3.3131, -3.3131, 0.5219]] ).to(torch_device) self.assertTrue(torch.allclose(output[:, :3, :3], expected_slice, atol=TOLERANCE)) def translation_setup(self, pair): text = { "en": "Machine learning is great, isn't it?", "ru": "Машинное обучение - это здорово, не так ли?", "de": "Maschinelles Lernen ist großartig, oder?", } src, tgt = pair.split("-") print(f"Testing {src} -> {tgt}") mname = f"facebook/wmt19-{pair}" src_text = text[src] tgt_text = text[tgt] tokenizer = self.get_tokenizer(mname) model = self.get_model(mname) return tokenizer, model, src_text, tgt_text @parameterized.expand(pairs) @slow def test_translation_direct(self, pair): tokenizer, model, src_text, tgt_text = self.translation_setup(pair) input_ids = tokenizer.encode(src_text, return_tensors="pt").to(torch_device) outputs = model.generate(input_ids) decoded = tokenizer.decode(outputs[0], skip_special_tokens=True) assert decoded == tgt_text, f"\n\ngot: {decoded}\nexp: {tgt_text}\n" @parameterized.expand(pairs) @slow def test_translation_pipeline(self, pair): tokenizer, model, src_text, tgt_text = self.translation_setup(pair) pipeline = TranslationPipeline(model, tokenizer, framework="pt", device=torch_device) output = pipeline([src_text]) self.assertEqual([tgt_text], [x["translation_text"] for x in output]) @require_torch class TestSinusoidalPositionalEmbeddings(unittest.TestCase): padding_idx = 1 tolerance = 1e-4 def test_basic(self): input_ids = torch.tensor([[4, 10]], dtype=torch.long, device=torch_device) emb1 = SinusoidalPositionalEmbedding(num_positions=6, embedding_dim=6, padding_idx=self.padding_idx).to( torch_device ) emb = emb1(input_ids) desired_weights = torch.tensor( [ [9.0930e-01, 1.9999e-02, 2.0000e-04, -4.1615e-01, 9.9980e-01, 1.0000e00], [1.4112e-01, 2.9995e-02, 3.0000e-04, -9.8999e-01, 9.9955e-01, 1.0000e00], ] ).to(torch_device) self.assertTrue( torch.allclose(emb[0], desired_weights, atol=self.tolerance), msg=f"\nexp:\n{desired_weights}\ngot:\n{emb[0]}\n", ) def test_odd_embed_dim(self): # odd embedding_dim is allowed SinusoidalPositionalEmbedding(num_positions=4, embedding_dim=5, padding_idx=self.padding_idx).to(torch_device) # odd num_embeddings is allowed SinusoidalPositionalEmbedding(num_positions=5, embedding_dim=4, padding_idx=self.padding_idx).to(torch_device) @unittest.skip("different from marian (needs more research)") def test_positional_emb_weights_against_marian(self): desired_weights = torch.tensor( [ [0, 0, 0, 0, 0], [0.84147096, 0.82177866, 0.80180490, 0.78165019, 0.76140374], [0.90929741, 0.93651021, 0.95829457, 0.97505713, 0.98720258], ] ) emb1 = SinusoidalPositionalEmbedding(num_positions=512, embedding_dim=512, padding_idx=self.padding_idx).to( torch_device ) weights = emb1.weights.data[:3, :5] # XXX: only the 1st and 3rd lines match - this is testing against # verbatim copy of SinusoidalPositionalEmbedding from fairseq self.assertTrue( torch.allclose(weights, desired_weights, atol=self.tolerance), msg=f"\nexp:\n{desired_weights}\ngot:\n{weights}\n", ) # test that forward pass is just a lookup, there is no ignore padding logic input_ids = torch.tensor( [[4, 10, self.padding_idx, self.padding_idx, self.padding_idx]], dtype=torch.long, device=torch_device ) no_cache_pad_zero = emb1(input_ids)[0] # XXX: only the 1st line matches the 3rd self.assertTrue( torch.allclose(torch.tensor(desired_weights, device=torch_device), no_cache_pad_zero[:3, :5], atol=1e-3) )
transformers/tests/models/fsmt/test_modeling_fsmt.py/0
{ "file_path": "transformers/tests/models/fsmt/test_modeling_fsmt.py", "repo_id": "transformers", "token_count": 11072 }
399
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Testing suite for the PyTorch GPTNeoX model. """ import unittest from parameterized import parameterized from transformers import AutoTokenizer, GPTNeoXConfig, is_torch_available, set_seed from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( GPTNeoXForCausalLM, GPTNeoXForQuestionAnswering, GPTNeoXForSequenceClassification, GPTNeoXForTokenClassification, GPTNeoXModel, ) class GPTNeoXModelTester: def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_input_mask=True, use_token_type_ids=True, use_labels=True, vocab_size=99, hidden_size=64, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, num_labels=3, num_choices=4, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_token_type_ids = use_token_type_ids self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.num_labels = num_labels self.num_choices = num_choices self.scope = scope self.pad_token_id = vocab_size - 1 def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) token_labels = None if self.use_labels: token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) config = self.get_config() return config, input_ids, input_mask, token_labels def get_config(self): return GPTNeoXConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=False, initializer_range=self.initializer_range, pad_token_id=self.pad_token_id, ) def prepare_config_and_inputs_for_decoder(self): config, input_ids, input_mask, token_labels = self.prepare_config_and_inputs() config.is_decoder = True return config, input_ids, input_mask, token_labels def create_and_check_model(self, config, input_ids, input_mask): model = GPTNeoXModel(config=config) model.to(torch_device) model.eval() _ = model(input_ids, attention_mask=input_mask) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_model_as_decoder(self, config, input_ids, input_mask): config.add_cross_attention = True model = GPTNeoXModel(config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_for_causal_lm(self, config, input_ids, input_mask, token_labels): model = GPTNeoXForCausalLM(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, labels=token_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def create_and_check_for_question_answering(self, config, input_ids, input_mask, token_labels): config.num_labels = self.num_labels model = GPTNeoXForQuestionAnswering(config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask) self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length)) def create_and_check_for_sequence_classification(self, config, input_ids, input_mask, token_labels): config.num_labels = self.num_labels model = GPTNeoXForSequenceClassification(config) model.to(torch_device) model.eval() sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) result = model(input_ids, attention_mask=input_mask, labels=sequence_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) def create_and_check_for_token_classification(self, config, input_ids, input_mask, token_labels): config.num_labels = self.num_labels model = GPTNeoXForTokenClassification(config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, labels=token_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels)) def create_and_check_decoder_model_past_large_inputs(self, config, input_ids, input_mask): config.is_decoder = True model = GPTNeoXForCausalLM(config=config) model.to(torch_device) model.eval() # first forward pass outputs = model(input_ids, attention_mask=input_mask, use_cache=True) past_key_values = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size) next_mask = ids_tensor((self.batch_size, 3), vocab_size=2) # append to next input_ids and next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) next_attention_mask = torch.cat([input_mask, next_mask], dim=-1) output_from_no_past = model(next_input_ids, attention_mask=next_attention_mask, output_hidden_states=True) output_from_no_past = output_from_no_past["hidden_states"][0] output_from_past = model( next_tokens, attention_mask=next_attention_mask, past_key_values=past_key_values, output_hidden_states=True, )["hidden_states"][0] # select random slice random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach() output_from_past_slice = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1]) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, input_ids, input_mask, token_labels = config_and_inputs inputs_dict = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class GPTNeoXModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( ( GPTNeoXModel, GPTNeoXForCausalLM, GPTNeoXForQuestionAnswering, GPTNeoXForSequenceClassification, GPTNeoXForTokenClassification, ) if is_torch_available() else () ) all_generative_model_classes = (GPTNeoXForCausalLM,) if is_torch_available() else () pipeline_model_mapping = ( { "feature-extraction": GPTNeoXModel, "question-answering": GPTNeoXForQuestionAnswering, "text-classification": GPTNeoXForSequenceClassification, "text-generation": GPTNeoXForCausalLM, "token-classification": GPTNeoXForTokenClassification, "zero-shot": GPTNeoXForSequenceClassification, } if is_torch_available() else {} ) test_pruning = False test_missing_keys = False test_model_parallel = False test_head_masking = False def setUp(self): self.model_tester = GPTNeoXModelTester(self) self.config_tester = ConfigTester(self, config_class=GPTNeoXConfig, hidden_size=64, num_attention_heads=8) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config, input_ids, input_mask, token_labels = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(config, input_ids, input_mask) def test_model_as_decoder(self): config, input_ids, input_mask, token_labels = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(config, input_ids, input_mask) def test_model_as_decoder_with_default_input_mask(self): # This regression test was failing with PyTorch < 1.3 config, input_ids, input_mask, token_labels = self.model_tester.prepare_config_and_inputs_for_decoder() input_mask = None self.model_tester.create_and_check_model_as_decoder(config, input_ids, input_mask) def test_decoder_model_past_large_inputs(self): config, input_ids, input_mask, token_labels = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past_large_inputs(config, input_ids, input_mask) def test_model_for_causal_lm(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_causal_lm(*config_and_inputs) def test_model_for_question_answering(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*config_and_inputs) def test_model_for_sequence_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*config_and_inputs) def test_model_for_token_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*config_and_inputs) @unittest.skip(reason="Feed forward chunking is not implemented") def test_feed_forward_chunking(self): pass @parameterized.expand([("linear",), ("dynamic",)]) def test_model_rope_scaling(self, scaling_type): config, _ = self.model_tester.prepare_config_and_inputs_for_common() short_input = ids_tensor([1, 10], config.vocab_size) long_input = ids_tensor([1, int(config.max_position_embeddings * 1.5)], config.vocab_size) set_seed(42) # Fixed seed at init time so the two models get the same random weights original_model = GPTNeoXModel(config) original_model.to(torch_device) original_model.eval() original_short_output = original_model(short_input).last_hidden_state original_long_output = original_model(long_input).last_hidden_state set_seed(42) # Fixed seed at init time so the two models get the same random weights config.rope_scaling = {"type": scaling_type, "factor": 10.0} scaled_model = GPTNeoXModel(config) scaled_model.to(torch_device) scaled_model.eval() scaled_short_output = scaled_model(short_input).last_hidden_state scaled_long_output = scaled_model(long_input).last_hidden_state # Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original # maximum sequence length, so the outputs for the short input should match. if scaling_type == "dynamic": self.assertTrue(torch.allclose(original_short_output, scaled_short_output, atol=1e-5)) else: self.assertFalse(torch.allclose(original_short_output, scaled_short_output, atol=1e-5)) # The output should be different for long inputs self.assertFalse(torch.allclose(original_long_output, scaled_long_output, atol=1e-5)) @require_torch class GPTNeoXLanguageGenerationTest(unittest.TestCase): @slow def test_lm_generate_gptneox(self): tokenizer = AutoTokenizer.from_pretrained("EleutherAI/pythia-410m-deduped") for checkpointing in [True, False]: model = GPTNeoXForCausalLM.from_pretrained("EleutherAI/pythia-410m-deduped") if checkpointing: model.gradient_checkpointing_enable() else: model.gradient_checkpointing_disable() model.to(torch_device) inputs = tokenizer("My favorite food is", return_tensors="pt").to(torch_device) # The hub repo. is updated on 2023-04-04, resulting in poor outputs. # See: https://github.com/huggingface/transformers/pull/24193 expected_output = "My favorite food is a good old-fashioned, old-fashioned, old-fashioned.\n\nI'm not sure" output_ids = model.generate(**inputs, do_sample=False, max_new_tokens=20) output_str = tokenizer.batch_decode(output_ids)[0] self.assertEqual(output_str, expected_output) def pythia_integration_test(self): model_name_or_path = "EleutherAI/pythia-70m" model = GPTNeoXForCausalLM.from_pretrained(model_name_or_path, torch_dtype=torch.float16).to(torch_device) EXPECTED_LOGITS = torch.tensor([1069.0000, 228.7500, 1072.0000, 1072.0000, 1069.0000, 1068.0000, 1068.0000, 1071.0000, 1071.0000, 1071.0000, 1073.0000, 1070.0000, 1071.0000, 1075.0000, 1073.0000, 1075.0000, 1074.0000, 1069.0000, 1072.0000, 1071.0000, 1071.0000, 1071.0000, 1070.0000, 1069.0000, 1069.0000, 1069.0000, 1070.0000, 1075.0000, 1073.0000, 1074.0000]) # fmt: skip input_ids = [29, 93, 303, 64, 5478, 49651, 10394, 187, 34, 12939, 875] # alternative: tokenizer('<|im_start|>system\nA chat between') input_ids = torch.as_tensor(input_ids)[None].to(torch_device) outputs = model(input_ids)["logits"][:, -1][0, :30] self.assertTrue(torch.allclose(EXPECTED_LOGITS, outputs, atol=1e-5))
transformers/tests/models/gpt_neox/test_modeling_gpt_neox.py/0
{ "file_path": "transformers/tests/models/gpt_neox/test_modeling_gpt_neox.py", "repo_id": "transformers", "token_count": 7007 }
400
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Testing suite for the PyTorch GroupViT model. """ import inspect import os import random import tempfile import unittest import numpy as np import requests from transformers import GroupViTConfig, GroupViTTextConfig, GroupViTVisionConfig from transformers.testing_utils import is_pt_tf_cross_test, require_torch, require_vision, slow, torch_device from transformers.utils import is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ( ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor, random_attention_mask, ) from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import GroupViTModel, GroupViTTextModel, GroupViTVisionModel from transformers.models.groupvit.modeling_groupvit import GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import CLIPProcessor class GroupViTVisionModelTester: def __init__( self, parent, batch_size=12, image_size=30, patch_size=2, num_channels=3, is_training=True, hidden_size=32, depths=[6, 3, 3], num_group_tokens=[64, 8, 0], num_output_groups=[64, 8, 8], num_attention_heads=4, intermediate_size=37, dropout=0.1, attention_dropout=0.1, initializer_range=0.02, scope=None, ): self.parent = parent self.batch_size = batch_size self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.is_training = is_training self.hidden_size = hidden_size self.depths = depths self.num_hidden_layers = sum(depths) self.expected_num_hidden_layers = len(depths) + 1 self.num_group_tokens = num_group_tokens self.num_output_groups = num_output_groups self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.dropout = dropout self.attention_dropout = attention_dropout self.initializer_range = initializer_range self.scope = scope num_patches = (image_size // patch_size) ** 2 # no [CLS] token for GroupViT self.seq_length = num_patches def prepare_config_and_inputs(self): rng = random.Random(0) pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size], rng=rng) config = self.get_config() return config, pixel_values def get_config(self): return GroupViTVisionConfig( image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, depths=self.depths, num_group_tokens=self.num_group_tokens, num_output_groups=self.num_output_groups, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, dropout=self.dropout, attention_dropout=self.attention_dropout, initializer_range=self.initializer_range, ) def create_and_check_model(self, config, pixel_values): model = GroupViTVisionModel(config=config) model.to(torch_device) model.eval() with torch.no_grad(): result = model(pixel_values) self.parent.assertEqual( result.last_hidden_state.shape, (self.batch_size, self.num_output_groups[-1], self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class GroupViTVisionModelTest(ModelTesterMixin, unittest.TestCase): """ Here we also overwrite some of the tests of test_modeling_common.py, as GROUPVIT does not use input_ids, inputs_embeds, attention_mask and seq_length. """ all_model_classes = (GroupViTVisionModel,) if is_torch_available() else () test_pruning = False test_torchscript = False test_resize_embeddings = False test_head_masking = False def setUp(self): self.model_tester = GroupViTVisionModelTester(self) self.config_tester = ConfigTester( self, config_class=GroupViTVisionConfig, has_text_modality=False, hidden_size=37 ) def test_config(self): self.config_tester.run_common_tests() @unittest.skip(reason="GroupViT does not use inputs_embeds") def test_inputs_embeds(self): pass @is_pt_tf_cross_test def test_pt_tf_model_equivalence(self): import tensorflow as tf seed = 338 random.seed(seed) np.random.seed(seed) torch.manual_seed(seed) torch.cuda.manual_seed_all(seed) tf.random.set_seed(seed) return super().test_pt_tf_model_equivalence() def test_model_common_attributes(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) self.assertIsInstance(model.get_input_embeddings(), (nn.Module)) x = model.get_output_embeddings() self.assertTrue(x is None or isinstance(x, nn.Linear)) def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic arg_names = [*signature.parameters.keys()] expected_arg_names = ["pixel_values"] self.assertListEqual(arg_names[:1], expected_arg_names) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_attention_outputs(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True seq_len = getattr(self.model_tester, "seq_length", None) expected_num_attention_outputs = sum(g > 0 for g in self.model_tester.num_group_tokens) for model_class in self.all_model_classes: inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = False config.return_dict = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.attentions # GroupViT returns attention grouping of each stage self.assertEqual(len(attentions), sum(g > 0 for g in self.model_tester.num_group_tokens)) # check that output_attentions also work using config del inputs_dict["output_attentions"] config.output_attentions = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.attentions # GroupViT returns attention grouping of each stage self.assertEqual(len(attentions), expected_num_attention_outputs) out_len = len(outputs) # Check attention is always last and order is fine inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) added_hidden_states = 1 self.assertEqual(out_len + added_hidden_states, len(outputs)) self_attentions = outputs.attentions # GroupViT returns attention grouping of each stage self.assertEqual(len(self_attentions), expected_num_attention_outputs) for i, self_attn in enumerate(self_attentions): if self_attn is None: continue self.assertListEqual( list(self_attentions[i].shape[-2:]), [ self.model_tester.num_output_groups[i], self.model_tester.num_output_groups[i - 1] if i > 0 else seq_len, ], ) def test_training(self): pass def test_training_gradient_checkpointing(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant_false(self): pass @unittest.skip(reason="GroupViTVisionModel has no base class and is not available in MODEL_MAPPING") def test_save_load_fast_init_from_base(self): pass @unittest.skip(reason="GroupViTVisionModel has no base class and is not available in MODEL_MAPPING") def test_save_load_fast_init_to_base(self): pass # override since the attention mask from GroupViT is not used to compute loss, thus no grad def test_retain_grad_hidden_states_attentions(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.output_hidden_states = True config.output_attentions = self.has_attentions # no need to test all models as different heads yield the same functionality model_class = self.all_model_classes[0] model = model_class(config) model.to(torch_device) inputs = self._prepare_for_class(inputs_dict, model_class) outputs = model(**inputs) output = outputs[0] if config.is_encoder_decoder: # Seq2Seq models encoder_hidden_states = outputs.encoder_hidden_states[0] encoder_hidden_states.retain_grad() decoder_hidden_states = outputs.decoder_hidden_states[0] decoder_hidden_states.retain_grad() if self.has_attentions: encoder_attentions = outputs.encoder_attentions[0] encoder_attentions.retain_grad() decoder_attentions = outputs.decoder_attentions[0] decoder_attentions.retain_grad() cross_attentions = outputs.cross_attentions[0] cross_attentions.retain_grad() output.flatten()[0].backward(retain_graph=True) self.assertIsNotNone(encoder_hidden_states.grad) self.assertIsNotNone(decoder_hidden_states.grad) if self.has_attentions: self.assertIsNotNone(encoder_attentions.grad) self.assertIsNotNone(decoder_attentions.grad) self.assertIsNotNone(cross_attentions.grad) else: # Encoder-/Decoder-only models hidden_states = outputs.hidden_states[0] hidden_states.retain_grad() if self.has_attentions: attentions = outputs.attentions[0] attentions.retain_grad() output.flatten()[0].backward(retain_graph=True) self.assertIsNotNone(hidden_states.grad) if self.has_attentions: self.assertIsNone(attentions.grad) @slow def test_model_from_pretrained(self): for model_name in GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = GroupViTVisionModel.from_pretrained(model_name) self.assertIsNotNone(model) class GroupViTTextModelTester: def __init__( self, parent, batch_size=12, seq_length=7, is_training=True, use_input_mask=True, use_labels=True, vocab_size=99, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, dropout=0.1, attention_dropout=0.1, max_position_embeddings=512, initializer_range=0.02, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.dropout = dropout self.attention_dropout = attention_dropout self.max_position_embeddings = max_position_embeddings self.initializer_range = initializer_range self.scope = scope def prepare_config_and_inputs(self): rng = random.Random(0) input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size, rng=rng) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) if input_mask is not None: batch_size, seq_length = input_mask.shape rnd_start_indices = np.random.randint(1, seq_length - 1, size=(batch_size,)) for batch_idx, start_index in enumerate(rnd_start_indices): input_mask[batch_idx, :start_index] = 1 input_mask[batch_idx, start_index:] = 0 config = self.get_config() return config, input_ids, input_mask def get_config(self): return GroupViTTextConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, dropout=self.dropout, attention_dropout=self.attention_dropout, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, ) def create_and_check_model(self, config, input_ids, input_mask): model = GroupViTTextModel(config=config) model.to(torch_device) model.eval() with torch.no_grad(): result = model(input_ids, attention_mask=input_mask) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, input_ids, input_mask = config_and_inputs inputs_dict = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class GroupViTTextModelTest(ModelTesterMixin, unittest.TestCase): all_model_classes = (GroupViTTextModel,) if is_torch_available() else () test_pruning = False test_head_masking = False def setUp(self): self.model_tester = GroupViTTextModelTester(self) self.config_tester = ConfigTester(self, config_class=GroupViTTextConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_training(self): pass def test_training_gradient_checkpointing(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant_false(self): pass @unittest.skip(reason="GroupViTTextModel does not use inputs_embeds") def test_inputs_embeds(self): pass @unittest.skip(reason="GroupViTTextModel has no base class and is not available in MODEL_MAPPING") def test_save_load_fast_init_from_base(self): pass @unittest.skip(reason="GroupViTTextModel has no base class and is not available in MODEL_MAPPING") def test_save_load_fast_init_to_base(self): pass @slow def test_model_from_pretrained(self): for model_name in GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = GroupViTTextModel.from_pretrained(model_name) self.assertIsNotNone(model) class GroupViTModelTester: def __init__(self, parent, text_kwargs=None, vision_kwargs=None, is_training=True): if text_kwargs is None: text_kwargs = {} if vision_kwargs is None: vision_kwargs = {} self.parent = parent self.text_model_tester = GroupViTTextModelTester(parent, **text_kwargs) self.vision_model_tester = GroupViTVisionModelTester(parent, **vision_kwargs) self.batch_size = self.text_model_tester.batch_size # need bs for batching_equivalence test self.is_training = is_training def prepare_config_and_inputs(self): text_config, input_ids, attention_mask = self.text_model_tester.prepare_config_and_inputs() vision_config, pixel_values = self.vision_model_tester.prepare_config_and_inputs() config = self.get_config() return config, input_ids, attention_mask, pixel_values def get_config(self): return GroupViTConfig.from_text_vision_configs( self.text_model_tester.get_config(), self.vision_model_tester.get_config(), projection_dim=64 ) def create_and_check_model(self, config, input_ids, attention_mask, pixel_values): model = GroupViTModel(config).to(torch_device).eval() with torch.no_grad(): result = model(input_ids, pixel_values, attention_mask) self.parent.assertEqual( result.logits_per_image.shape, (self.vision_model_tester.batch_size, self.text_model_tester.batch_size) ) self.parent.assertEqual( result.logits_per_text.shape, (self.text_model_tester.batch_size, self.vision_model_tester.batch_size) ) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, input_ids, attention_mask, pixel_values = config_and_inputs inputs_dict = { "input_ids": input_ids, "attention_mask": attention_mask, "pixel_values": pixel_values, "return_loss": True, } return config, inputs_dict @require_torch class GroupViTModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (GroupViTModel,) if is_torch_available() else () pipeline_model_mapping = {"feature-extraction": GroupViTModel} if is_torch_available() else {} test_head_masking = False test_pruning = False test_resize_embeddings = False test_attention_outputs = False def setUp(self): self.model_tester = GroupViTModelTester(self) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) @unittest.skip(reason="hidden_states are tested in individual model tests") def test_hidden_states_output(self): pass @unittest.skip(reason="input_embeds are tested in individual model tests") def test_inputs_embeds(self): pass @unittest.skip(reason="tested in individual model tests") def test_retain_grad_hidden_states_attentions(self): pass @unittest.skip(reason="GroupViTModel does not have input/output embeddings") def test_model_common_attributes(self): pass # overwritten from parent as this equivalent test needs a specific `seed` and hard to get a good one! def check_pt_tf_outputs(self, tf_outputs, pt_outputs, model_class, tol=2e-5, name="outputs", attributes=None): super().check_pt_tf_outputs(tf_outputs, pt_outputs, model_class, tol=tol, name=name, attributes=attributes) @is_pt_tf_cross_test def test_pt_tf_model_equivalence(self): import tensorflow as tf seed = 163 random.seed(seed) np.random.seed(seed) torch.manual_seed(seed) torch.cuda.manual_seed_all(seed) tf.random.set_seed(seed) return super().test_pt_tf_model_equivalence() # override as the `logit_scale` parameter initilization is different for GROUPVIT def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() configs_no_init = _config_zero_init(config) for model_class in self.all_model_classes: model = model_class(config=configs_no_init) for name, param in model.named_parameters(): if param.requires_grad: # check if `logit_scale` is initilized as per the original implementation if name == "logit_scale": self.assertAlmostEqual( param.data.item(), np.log(1 / 0.07), delta=1e-3, msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) else: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) def _create_and_check_torchscript(self, config, inputs_dict): if not self.test_torchscript: return configs_no_init = _config_zero_init(config) # To be sure we have no Nan configs_no_init.torchscript = True configs_no_init.return_dict = False for model_class in self.all_model_classes: model = model_class(config=configs_no_init) model.to(torch_device) model.eval() try: input_ids = inputs_dict["input_ids"] pixel_values = inputs_dict["pixel_values"] # GROUPVIT needs pixel_values traced_model = torch.jit.trace(model, (input_ids, pixel_values)) except RuntimeError: self.fail("Couldn't trace module.") with tempfile.TemporaryDirectory() as tmp_dir_name: pt_file_name = os.path.join(tmp_dir_name, "traced_model.pt") try: torch.jit.save(traced_model, pt_file_name) except Exception: self.fail("Couldn't save module.") try: loaded_model = torch.jit.load(pt_file_name) except Exception: self.fail("Couldn't load module.") model.to(torch_device) model.eval() loaded_model.to(torch_device) loaded_model.eval() model_state_dict = model.state_dict() loaded_model_state_dict = loaded_model.state_dict() non_persistent_buffers = {} for key in loaded_model_state_dict.keys(): if key not in model_state_dict.keys(): non_persistent_buffers[key] = loaded_model_state_dict[key] loaded_model_state_dict = { key: value for key, value in loaded_model_state_dict.items() if key not in non_persistent_buffers } self.assertEqual(set(model_state_dict.keys()), set(loaded_model_state_dict.keys())) model_buffers = list(model.buffers()) for non_persistent_buffer in non_persistent_buffers.values(): found_buffer = False for i, model_buffer in enumerate(model_buffers): if torch.equal(non_persistent_buffer, model_buffer): found_buffer = True break self.assertTrue(found_buffer) model_buffers.pop(i) models_equal = True for layer_name, p1 in model_state_dict.items(): p2 = loaded_model_state_dict[layer_name] if p1.data.ne(p2.data).sum() > 0: models_equal = False self.assertTrue(models_equal) def test_load_vision_text_config(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() # Save GroupViTConfig and check if we can load GroupViTVisionConfig from it with tempfile.TemporaryDirectory() as tmp_dir_name: config.save_pretrained(tmp_dir_name) vision_config = GroupViTVisionConfig.from_pretrained(tmp_dir_name) self.assertDictEqual(config.vision_config.to_dict(), vision_config.to_dict()) # Save GroupViTConfig and check if we can load GroupViTTextConfig from it with tempfile.TemporaryDirectory() as tmp_dir_name: config.save_pretrained(tmp_dir_name) text_config = GroupViTTextConfig.from_pretrained(tmp_dir_name) self.assertDictEqual(config.text_config.to_dict(), text_config.to_dict()) @slow def test_model_from_pretrained(self): for model_name in GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = GroupViTModel.from_pretrained(model_name) self.assertIsNotNone(model) # We will verify our results on an image of cute cats def prepare_img(): url = "http://images.cocodataset.org/val2017/000000039769.jpg" im = Image.open(requests.get(url, stream=True).raw) return im @require_vision @require_torch class GroupViTModelIntegrationTest(unittest.TestCase): @slow def test_inference(self): model_name = "nvidia/groupvit-gcc-yfcc" model = GroupViTModel.from_pretrained(model_name) processor = CLIPProcessor.from_pretrained(model_name) image = prepare_img() inputs = processor( text=["a photo of a cat", "a photo of a dog"], images=image, padding=True, return_tensors="pt" ) # forward pass with torch.no_grad(): outputs = model(**inputs) # verify the logits self.assertEqual( outputs.logits_per_image.shape, torch.Size((inputs.pixel_values.shape[0], inputs.input_ids.shape[0])), ) self.assertEqual( outputs.logits_per_text.shape, torch.Size((inputs.input_ids.shape[0], inputs.pixel_values.shape[0])), ) expected_logits = torch.tensor([[13.3523, 6.3629]]) self.assertTrue(torch.allclose(outputs.logits_per_image, expected_logits, atol=1e-3))
transformers/tests/models/groupvit/test_modeling_groupvit.py/0
{ "file_path": "transformers/tests/models/groupvit/test_modeling_groupvit.py", "repo_id": "transformers", "token_count": 12796 }
401
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import os import unittest from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, require_torch from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class TestTokenizationLED(TokenizerTesterMixin, unittest.TestCase): from_pretrained_id = "allenai/led-base-16384" tokenizer_class = LEDTokenizer rust_tokenizer_class = LEDTokenizerFast test_rust_tokenizer = True def setUp(self): super().setUp() vocab = [ "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "\u0120", "\u0120l", "\u0120n", "\u0120lo", "\u0120low", "er", "\u0120lowest", "\u0120newer", "\u0120wider", "<unk>", ] vocab_tokens = dict(zip(vocab, range(len(vocab)))) merges = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""] self.special_tokens_map = {"unk_token": "<unk>"} self.vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"]) self.merges_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["merges_file"]) with open(self.vocab_file, "w", encoding="utf-8") as fp: fp.write(json.dumps(vocab_tokens) + "\n") with open(self.merges_file, "w", encoding="utf-8") as fp: fp.write("\n".join(merges)) def get_tokenizer(self, **kwargs): kwargs.update(self.special_tokens_map) return self.tokenizer_class.from_pretrained(self.tmpdirname, **kwargs) def get_rust_tokenizer(self, **kwargs): kwargs.update(self.special_tokens_map) return self.rust_tokenizer_class.from_pretrained(self.tmpdirname, **kwargs) def get_input_output_texts(self, tokenizer): return "lower newer", "lower newer" @cached_property def default_tokenizer(self): return LEDTokenizer.from_pretrained("allenai/led-base-16384") @cached_property def default_tokenizer_fast(self): return LEDTokenizerFast.from_pretrained("allenai/led-base-16384") @require_torch def test_prepare_batch(self): src_text = ["A long paragraph for summarization.", "Another paragraph for summarization."] expected_src_tokens = [0, 250, 251, 17818, 13, 39186, 1938, 4, 2] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: batch = tokenizer(src_text, max_length=len(expected_src_tokens), padding=True, return_tensors="pt") self.assertIsInstance(batch, BatchEncoding) self.assertEqual((2, 9), batch.input_ids.shape) self.assertEqual((2, 9), batch.attention_mask.shape) result = batch.input_ids.tolist()[0] self.assertListEqual(expected_src_tokens, result) @require_torch def test_prepare_batch_empty_target_text(self): src_text = ["A long paragraph for summarization.", "Another paragraph for summarization."] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: batch = tokenizer(src_text, padding=True, return_tensors="pt") self.assertIn("input_ids", batch) self.assertIn("attention_mask", batch) self.assertNotIn("labels", batch) self.assertNotIn("decoder_attention_mask", batch) @require_torch def test_tokenizer_as_target_length(self): tgt_text = [ "Summary of the text.", "Another summary.", ] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: targets = tokenizer(text_target=tgt_text, max_length=32, padding="max_length", return_tensors="pt") self.assertEqual(32, targets["input_ids"].shape[1]) @require_torch def test_prepare_batch_not_longer_than_maxlen(self): for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: batch = tokenizer( ["I am a small frog" * 1024, "I am a small frog"], padding=True, truncation=True, return_tensors="pt" ) self.assertIsInstance(batch, BatchEncoding) self.assertEqual(batch.input_ids.shape, (2, 5122)) @require_torch def test_special_tokens(self): src_text = ["A long paragraph for summarization."] tgt_text = [ "Summary of the text.", ] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: inputs = tokenizer(src_text, return_tensors="pt") targets = tokenizer(text_target=tgt_text, return_tensors="pt") input_ids = inputs["input_ids"] labels = targets["input_ids"] self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item()) self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item()) self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item()) self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item()) @require_torch def test_global_attention_mask(self): for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: src_text = ["Summary of the text.", "Another summary."] expected_global_attention_mask = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]] encoded_output = tokenizer(src_text, padding=False) encoded_output["global_attention_mask"] = [[0] * len(x) for x in encoded_output["input_ids"]] outputs = tokenizer.pad(encoded_output) self.assertSequenceEqual(outputs["global_attention_mask"], expected_global_attention_mask) def test_pretokenized_inputs(self): pass def test_embeded_special_tokens(self): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"): tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs) tokenizer_p = self.tokenizer_class.from_pretrained(pretrained_name, **kwargs) sentence = "A, <mask> AllenNLP sentence." tokens_r = tokenizer_r.encode_plus(sentence, add_special_tokens=True, return_token_type_ids=True) tokens_p = tokenizer_p.encode_plus(sentence, add_special_tokens=True, return_token_type_ids=True) self.assertEqual(sum(tokens_r["token_type_ids"]), sum(tokens_p["token_type_ids"])) self.assertEqual( sum(tokens_r["attention_mask"]) / len(tokens_r["attention_mask"]), sum(tokens_p["attention_mask"]) / len(tokens_p["attention_mask"]), ) tokens_r_str = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"]) tokens_p_str = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"]) self.assertSequenceEqual(tokens_p["input_ids"], [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2]) self.assertSequenceEqual(tokens_r["input_ids"], [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2]) self.assertSequenceEqual( tokens_p_str, ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] ) self.assertSequenceEqual( tokens_r_str, ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
transformers/tests/models/led/test_tokenization_led.py/0
{ "file_path": "transformers/tests/models/led/test_tokenization_led.py", "repo_id": "transformers", "token_count": 3812 }
402
# coding=utf-8 # Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from transformers import LongformerConfig, is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( LongformerForMaskedLM, LongformerForMultipleChoice, LongformerForQuestionAnswering, LongformerForSequenceClassification, LongformerForTokenClassification, LongformerModel, LongformerSelfAttention, ) class LongformerModelTester: def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_input_mask=True, use_token_type_ids=True, use_labels=True, vocab_size=99, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, num_labels=3, num_choices=4, scope=None, attention_window=4, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_token_type_ids = use_token_type_ids self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.num_labels = num_labels self.num_choices = num_choices self.scope = scope self.attention_window = attention_window # `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size # [num_attention_heads, encoder_seq_length, encoder_key_length], but LongformerSelfAttention # returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1] # because its local attention only attends to `self.attention_window + 1` locations # (assuming no token with global attention, otherwise the last dimension of attentions # is x + self.attention_window + 1, where x is the number of tokens with global attention) self.key_length = self.attention_window + 2 def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) token_type_ids = None if self.use_token_type_ids: token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size) sequence_labels = None token_labels = None choice_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) choice_labels = ids_tensor([self.batch_size], self.num_choices) config = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def get_config(self): return LongformerConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, attention_window=self.attention_window, ) def get_pipeline_config(self): config = self.get_config() config.vocab_size = 300 return config def create_and_check_attention_mask_determinism( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = LongformerModel(config=config) model.to(torch_device) model.eval() attention_mask = torch.ones(input_ids.shape, dtype=torch.long, device=torch_device) output_with_mask = model(input_ids, attention_mask=attention_mask)["last_hidden_state"] output_without_mask = model(input_ids)["last_hidden_state"] self.parent.assertTrue(torch.allclose(output_with_mask[0, 0, :5], output_without_mask[0, 0, :5], atol=1e-4)) def create_and_check_model( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = LongformerModel(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids) result = model(input_ids, token_type_ids=token_type_ids) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size)) def create_and_check_model_with_global_attention_mask( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = LongformerModel(config=config) model.to(torch_device) model.eval() global_attention_mask = input_mask.clone() global_attention_mask[:, input_mask.shape[-1] // 2] = 0 global_attention_mask = global_attention_mask.to(torch_device) result = model( input_ids, attention_mask=input_mask, global_attention_mask=global_attention_mask, token_type_ids=token_type_ids, ) result = model(input_ids, token_type_ids=token_type_ids, global_attention_mask=global_attention_mask) result = model(input_ids, global_attention_mask=global_attention_mask) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size)) def create_and_check_for_masked_lm( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = LongformerForMaskedLM(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def create_and_check_for_question_answering( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = LongformerForQuestionAnswering(config=config) model.to(torch_device) model.eval() result = model( input_ids, attention_mask=input_mask, global_attention_mask=input_mask, token_type_ids=token_type_ids, start_positions=sequence_labels, end_positions=sequence_labels, ) self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length)) def create_and_check_for_sequence_classification( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_labels = self.num_labels model = LongformerForSequenceClassification(config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=sequence_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) def create_and_check_for_token_classification( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_labels = self.num_labels model = LongformerForTokenClassification(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels)) def create_and_check_for_multiple_choice( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_choices = self.num_choices model = LongformerForMultipleChoice(config=config) model.to(torch_device) model.eval() multiple_choice_inputs_ids = input_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() multiple_choice_token_type_ids = token_type_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() multiple_choice_input_mask = input_mask.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() multiple_choice_input_mask = input_mask.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() result = model( multiple_choice_inputs_ids, attention_mask=multiple_choice_input_mask, global_attention_mask=multiple_choice_input_mask, token_type_ids=multiple_choice_token_type_ids, labels=choice_labels, ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) = config_and_inputs global_attention_mask = torch.zeros_like(input_ids) global_attention_mask[:, -1] = 1 inputs_dict = { "input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask, "global_attention_mask": global_attention_mask, } return config, inputs_dict def prepare_config_and_inputs_for_question_answering(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) = config_and_inputs # Replace sep_token_id by some random id input_ids[input_ids == config.sep_token_id] = torch.randint(0, config.vocab_size, (1,)).item() # Make sure there are exactly three sep_token_id input_ids[:, -3:] = config.sep_token_id input_mask = torch.ones_like(input_ids) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels @require_torch class LongformerModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): test_pruning = False # pruning is not supported test_torchscript = False all_model_classes = ( ( LongformerModel, LongformerForMaskedLM, LongformerForSequenceClassification, LongformerForQuestionAnswering, LongformerForTokenClassification, LongformerForMultipleChoice, ) if is_torch_available() else () ) pipeline_model_mapping = ( { "feature-extraction": LongformerModel, "fill-mask": LongformerForMaskedLM, "question-answering": LongformerForQuestionAnswering, "text-classification": LongformerForSequenceClassification, "token-classification": LongformerForTokenClassification, "zero-shot": LongformerForSequenceClassification, } if is_torch_available() else {} ) # Need to use `0.6` instead of `0.5` for `test_disk_offload` model_split_percents = [0.6, 0.7, 0.9] # TODO: Fix the failed tests def is_pipeline_test_to_skip( self, pipeline_test_casse_name, config_class, model_architecture, tokenizer_name, processor_name ): if ( pipeline_test_casse_name == "QAPipelineTests" and tokenizer_name is not None and not tokenizer_name.endswith("Fast") ): # `QAPipelineTests` fails for a few models when the slower tokenizer are used. # (The slower tokenizers were never used for pipeline tests before the pipeline testing rework) # TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer return True return False def setUp(self): self.model_tester = LongformerModelTester(self) self.config_tester = ConfigTester(self, config_class=LongformerConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_model_attention_mask_determinism(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_attention_mask_determinism(*config_and_inputs) def test_model_global_attention_mask(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model_with_global_attention_mask(*config_and_inputs) def test_for_masked_lm(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*config_and_inputs) def test_for_question_answering(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_question_answering() self.model_tester.create_and_check_for_question_answering(*config_and_inputs) def test_for_sequence_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*config_and_inputs) def test_for_token_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*config_and_inputs) def test_for_multiple_choice(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*config_and_inputs) def test_retain_grad_hidden_states_attentions(self): # longformer cannot keep gradients in attentions or hidden states return @unittest.skip("LongFormer calculates global attn only when attn_mask has non-zero elements") def test_batching_equivalence(self): return @require_torch @require_sentencepiece @require_tokenizers class LongformerModelIntegrationTest(unittest.TestCase): def _get_hidden_states(self): return torch.tensor( [ [ [ 4.98332758e-01, 2.69175139e00, -7.08081422e-03, 1.04915401e00, -1.83476661e00, 7.67220476e-01, 2.98580543e-01, 2.84803992e-02, ], [ -7.58357372e-01, 4.20635998e-01, -4.04739919e-02, 1.59924145e-01, 2.05135748e00, -1.15997978e00, 5.37166397e-01, 2.62873606e-01, ], [ -1.69438001e00, 4.17574660e-01, -1.49196962e00, -1.76483717e00, -1.94566312e-01, -1.71183858e00, 7.72903565e-01, -1.11557056e00, ], [ 5.44028163e-01, 2.05466114e-01, -3.63045868e-01, 2.41865062e-01, 3.20348382e-01, -9.05611176e-01, -1.92690727e-01, -1.19917547e00, ], ] ], dtype=torch.float32, device=torch_device, ) def test_diagonalize(self): hidden_states = self._get_hidden_states() hidden_states = hidden_states.reshape((1, 8, 4)) # set seq length = 8, hidden dim = 4 chunked_hidden_states = LongformerSelfAttention._chunk(hidden_states, window_overlap=2) window_overlap_size = chunked_hidden_states.shape[2] self.assertTrue(window_overlap_size == 4) padded_hidden_states = LongformerSelfAttention._pad_and_diagonalize(chunked_hidden_states) self.assertTrue(padded_hidden_states.shape[-1] == chunked_hidden_states.shape[-1] + window_overlap_size - 1) # first row => [0.4983, 2.6918, -0.0071, 1.0492, 0.0000, 0.0000, 0.0000] self.assertTrue(torch.allclose(padded_hidden_states[0, 0, 0, :4], chunked_hidden_states[0, 0, 0], atol=1e-3)) self.assertTrue( torch.allclose( padded_hidden_states[0, 0, 0, 4:], torch.zeros((3,), device=torch_device, dtype=torch.float32), atol=1e-3, ) ) # last row => [0.0000, 0.0000, 0.0000, 2.0514, -1.1600, 0.5372, 0.2629] self.assertTrue(torch.allclose(padded_hidden_states[0, 0, -1, 3:], chunked_hidden_states[0, 0, -1], atol=1e-3)) self.assertTrue( torch.allclose( padded_hidden_states[0, 0, -1, :3], torch.zeros((3,), device=torch_device, dtype=torch.float32), atol=1e-3, ) ) def test_pad_and_transpose_last_two_dims(self): hidden_states = self._get_hidden_states() self.assertEqual(hidden_states.shape, (1, 4, 8)) padding = (0, 0, 0, 1) padded_hidden_states = LongformerSelfAttention._pad_and_transpose_last_two_dims(hidden_states, padding) self.assertEqual(padded_hidden_states.shape, (1, 8, 5)) expected_added_dim = torch.zeros((5,), device=torch_device, dtype=torch.float32) self.assertTrue(torch.allclose(expected_added_dim, padded_hidden_states[0, -1, :], atol=1e-6)) self.assertTrue(torch.allclose(hidden_states[0, -1, :], padded_hidden_states.view(1, -1)[0, 24:32], atol=1e-6)) def test_chunk(self): hidden_states = self._get_hidden_states() batch_size = 1 seq_length = 8 hidden_size = 4 hidden_states = hidden_states.reshape((batch_size, seq_length, hidden_size)) chunked_hidden_states = LongformerSelfAttention._chunk(hidden_states, window_overlap=2) # expected slices across chunk and seq length dim expected_slice_along_seq_length = torch.tensor( [0.4983, -0.7584, -1.6944], device=torch_device, dtype=torch.float32 ) expected_slice_along_chunk = torch.tensor( [0.4983, -1.8348, -0.7584, 2.0514], device=torch_device, dtype=torch.float32 ) self.assertTrue(torch.allclose(chunked_hidden_states[0, :, 0, 0], expected_slice_along_seq_length, atol=1e-3)) self.assertTrue(torch.allclose(chunked_hidden_states[0, 0, :, 0], expected_slice_along_chunk, atol=1e-3)) self.assertEqual(chunked_hidden_states.shape, (1, 3, 4, 4)) def test_mask_invalid_locations(self): hidden_states = self._get_hidden_states() batch_size = 1 seq_length = 8 hidden_size = 4 hidden_states = hidden_states.reshape((batch_size, seq_length, hidden_size)) chunked_hidden_states = LongformerSelfAttention._chunk(hidden_states, window_overlap=2) hid_states_1 = chunked_hidden_states.clone() LongformerSelfAttention._mask_invalid_locations(hid_states_1, 1) self.assertTrue(torch.isinf(hid_states_1).sum().item() == 8) hid_states_2 = chunked_hidden_states.clone() LongformerSelfAttention._mask_invalid_locations(hid_states_2, 2) self.assertTrue(torch.isinf(hid_states_2).sum().item() == 24) hid_states_3 = chunked_hidden_states.clone()[:, :, :, :3] LongformerSelfAttention._mask_invalid_locations(hid_states_3, 2) self.assertTrue(torch.isinf(hid_states_3).sum().item() == 24) hid_states_4 = chunked_hidden_states.clone()[:, :, 2:, :] LongformerSelfAttention._mask_invalid_locations(hid_states_4, 2) self.assertTrue(torch.isinf(hid_states_4).sum().item() == 12) def test_layer_local_attn(self): model = LongformerModel.from_pretrained("patrickvonplaten/longformer-random-tiny") model.eval() layer = model.encoder.layer[0].attention.self.to(torch_device) hidden_states = self._get_hidden_states() batch_size, seq_length, hidden_size = hidden_states.size() attention_mask = torch.zeros((batch_size, seq_length), dtype=torch.float32, device=torch_device) attention_mask[:, -2:] = -10000 is_index_masked = attention_mask < 0 is_index_global_attn = attention_mask > 0 is_global_attn = is_index_global_attn.flatten().any().item() output_hidden_states = layer( hidden_states, attention_mask=attention_mask, is_index_masked=is_index_masked, is_index_global_attn=is_index_global_attn, is_global_attn=is_global_attn, )[0] self.assertEqual(output_hidden_states.shape, (1, 4, 8)) self.assertTrue( torch.allclose( output_hidden_states[0, 1], torch.tensor( [0.0019, 0.0122, -0.0171, -0.0256, -0.0300, 0.0173, -0.0115, 0.0048], dtype=torch.float32, device=torch_device, ), atol=1e-3, ) ) def test_layer_global_attn(self): model = LongformerModel.from_pretrained("patrickvonplaten/longformer-random-tiny") model.eval() layer = model.encoder.layer[0].attention.self.to(torch_device) hidden_states = torch.cat([self._get_hidden_states(), self._get_hidden_states() - 0.5], dim=0) batch_size, seq_length, hidden_size = hidden_states.size() attention_mask = torch.zeros((batch_size, seq_length), dtype=torch.float32, device=torch_device) # create attn mask attention_mask[0, -2:] = 10000.0 attention_mask[0, -1:] = -10000.0 attention_mask[1, 1:] = 10000.0 is_index_masked = attention_mask < 0 is_index_global_attn = attention_mask > 0 is_global_attn = is_index_global_attn.flatten().any().item() output_hidden_states = layer( hidden_states, attention_mask=attention_mask, is_index_masked=is_index_masked, is_index_global_attn=is_index_global_attn, is_global_attn=is_global_attn, )[0] self.assertEqual(output_hidden_states.shape, (2, 4, 8)) self.assertTrue( torch.allclose( output_hidden_states[0, 2], torch.tensor( [-0.0651, -0.0393, 0.0309, -0.0342, -0.0066, -0.0155, -0.0209, -0.0494], dtype=torch.float32, device=torch_device, ), atol=1e-3, ) ) self.assertTrue( torch.allclose( output_hidden_states[1, -2], torch.tensor( [-0.0405, -0.0384, 0.0396, -0.0374, -0.0341, 0.0136, 0.0014, -0.0571], dtype=torch.float32, device=torch_device, ), atol=1e-3, ) ) def test_layer_attn_probs(self): model = LongformerModel.from_pretrained("patrickvonplaten/longformer-random-tiny") model.eval() layer = model.encoder.layer[0].attention.self.to(torch_device) hidden_states = torch.cat([self._get_hidden_states(), self._get_hidden_states() - 0.5], dim=0) batch_size, seq_length, hidden_size = hidden_states.size() attention_mask = torch.zeros((batch_size, seq_length), dtype=torch.float32, device=torch_device) # create attn mask attention_mask[0, -2:] = 10000.0 attention_mask[0, -1:] = -10000.0 attention_mask[1, 1:] = 10000.0 is_index_masked = attention_mask < 0 is_index_global_attn = attention_mask > 0 is_global_attn = is_index_global_attn.flatten().any().item() output_hidden_states, local_attentions, global_attentions = layer( hidden_states, attention_mask=attention_mask, is_index_masked=is_index_masked, is_index_global_attn=is_index_global_attn, is_global_attn=is_global_attn, output_attentions=True, ) self.assertEqual(local_attentions.shape, (2, 4, 2, 8)) self.assertEqual(global_attentions.shape, (2, 2, 3, 4)) # All tokens with global attention have weight 0 in local attentions. self.assertTrue(torch.all(local_attentions[0, 2:4, :, :] == 0)) self.assertTrue(torch.all(local_attentions[1, 1:4, :, :] == 0)) # The weight of all tokens with local attention must sum to 1. self.assertTrue(torch.all(torch.abs(global_attentions[0, :, :2, :].sum(dim=-1) - 1) < 1e-6)) self.assertTrue(torch.all(torch.abs(global_attentions[1, :, :1, :].sum(dim=-1) - 1) < 1e-6)) self.assertTrue( torch.allclose( local_attentions[0, 0, 0, :], torch.tensor( [0.3328, 0.0000, 0.0000, 0.0000, 0.0000, 0.3355, 0.3318, 0.0000], dtype=torch.float32, device=torch_device, ), atol=1e-3, ) ) self.assertTrue( torch.allclose( local_attentions[1, 0, 0, :], torch.tensor( [0.2492, 0.2502, 0.2502, 0.0000, 0.0000, 0.2505, 0.0000, 0.0000], dtype=torch.float32, device=torch_device, ), atol=1e-3, ) ) # All the global attention weights must sum to 1. self.assertTrue(torch.all(torch.abs(global_attentions.sum(dim=-1) - 1) < 1e-6)) self.assertTrue( torch.allclose( global_attentions[0, 0, 1, :], torch.tensor( [0.2500, 0.2500, 0.2500, 0.2500], dtype=torch.float32, device=torch_device, ), atol=1e-3, ) ) self.assertTrue( torch.allclose( global_attentions[1, 0, 0, :], torch.tensor( [0.2497, 0.2500, 0.2499, 0.2504], dtype=torch.float32, device=torch_device, ), atol=1e-3, ) ) @slow def test_inference_no_head(self): model = LongformerModel.from_pretrained("allenai/longformer-base-4096") model.to(torch_device) # 'Hello world!' input_ids = torch.tensor([[0, 20920, 232, 328, 1437, 2]], dtype=torch.long, device=torch_device) attention_mask = torch.ones(input_ids.shape, dtype=torch.long, device=torch_device) output = model(input_ids, attention_mask=attention_mask)[0] output_without_mask = model(input_ids)[0] expected_output_slice = torch.tensor([0.0549, 0.1087, -0.1119, -0.0368, 0.0250], device=torch_device) self.assertTrue(torch.allclose(output[0, 0, -5:], expected_output_slice, atol=1e-4)) self.assertTrue(torch.allclose(output_without_mask[0, 0, -5:], expected_output_slice, atol=1e-4)) @slow def test_inference_no_head_long(self): model = LongformerModel.from_pretrained("allenai/longformer-base-4096") model.to(torch_device) # 'Hello world! ' repeated 1000 times input_ids = torch.tensor( [[0] + [20920, 232, 328, 1437] * 1000 + [2]], dtype=torch.long, device=torch_device ) # long input attention_mask = torch.ones(input_ids.shape, dtype=torch.long, device=input_ids.device) global_attention_mask = torch.zeros(input_ids.shape, dtype=torch.long, device=input_ids.device) global_attention_mask[:, [1, 4, 21]] = 1 # Set global attention on a few random positions output = model(input_ids, attention_mask=attention_mask, global_attention_mask=global_attention_mask)[0] expected_output_sum = torch.tensor(74585.8594, device=torch_device) expected_output_mean = torch.tensor(0.0243, device=torch_device) self.assertTrue(torch.allclose(output.sum(), expected_output_sum, atol=1e-4)) self.assertTrue(torch.allclose(output.mean(), expected_output_mean, atol=1e-4)) @slow def test_inference_masked_lm_long(self): model = LongformerForMaskedLM.from_pretrained("allenai/longformer-base-4096") model.to(torch_device) # 'Hello world! ' repeated 1000 times input_ids = torch.tensor( [[0] + [20920, 232, 328, 1437] * 1000 + [2]], dtype=torch.long, device=torch_device ) # long input input_ids = input_ids.to(torch_device) loss, prediction_scores = model(input_ids, labels=input_ids).to_tuple() expected_loss = torch.tensor(0.0074, device=torch_device) expected_prediction_scores_sum = torch.tensor(-6.1048e08, device=torch_device) expected_prediction_scores_mean = torch.tensor(-3.0348, device=torch_device) self.assertTrue(torch.allclose(loss, expected_loss, atol=1e-4)) self.assertTrue(torch.allclose(prediction_scores.sum(), expected_prediction_scores_sum, atol=1e-4)) self.assertTrue(torch.allclose(prediction_scores.mean(), expected_prediction_scores_mean, atol=1e-4))
transformers/tests/models/longformer/test_modeling_longformer.py/0
{ "file_path": "transformers/tests/models/longformer/test_modeling_longformer.py", "repo_id": "transformers", "token_count": 15462 }
403
# coding=utf-8 # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import math import unittest from transformers import MptConfig, is_torch_available from transformers.testing_utils import require_bitsandbytes, require_torch, require_torch_gpu, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MPT_PRETRAINED_MODEL_ARCHIVE_LIST, AutoTokenizer, MptForCausalLM, MptForQuestionAnswering, MptForSequenceClassification, MptForTokenClassification, MptModel, ) @require_torch class MptModelTester: def __init__( self, parent, batch_size=14, seq_length=7, is_training=True, use_token_type_ids=False, use_input_mask=True, use_labels=True, use_mc_token_ids=True, vocab_size=99, hidden_size=48, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, num_labels=3, num_choices=4, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_token_type_ids = use_token_type_ids self.use_input_mask = use_input_mask self.use_labels = use_labels self.use_mc_token_ids = use_mc_token_ids self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_dropout_prob = attention_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.num_labels = num_labels self.num_choices = num_choices self.scope = None self.bos_token_id = vocab_size - 1 self.eos_token_id = vocab_size - 1 self.pad_token_id = vocab_size - 1 def get_large_model_config(self): return MptConfig.from_pretrained("mosaicml/mpt-7b") def prepare_config_and_inputs(self, gradient_checkpointing=False): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) sequence_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) config = self.get_config(gradient_checkpointing=gradient_checkpointing) return (config, input_ids, input_mask, sequence_labels) def get_config(self, gradient_checkpointing=False): return MptConfig( vocab_size=self.vocab_size, seq_length=self.seq_length, hidden_size=self.hidden_size, n_layers=self.num_hidden_layers, n_heads=self.num_attention_heads, hidden_dropout=self.hidden_dropout_prob, attention_dropout=self.attention_dropout_prob, n_positions=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, use_cache=True, bos_token_id=self.bos_token_id, eos_token_id=self.eos_token_id, pad_token_id=self.pad_token_id, num_labels=self.num_labels, gradient_checkpointing=gradient_checkpointing, dtype="float32", ) def create_and_check_mpt_model(self, config, input_ids, input_mask, *args): model = MptModel(config=config) model.to(torch_device) model.eval() result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(len(result.past_key_values), config.n_layers) def create_and_check_mpt_model_past(self, config, input_ids, input_mask, *args): model = MptModel(config=config) model.to(torch_device) model.eval() # first forward pass outputs = model(input_ids, attention_mask=torch.ones_like(input_ids), use_cache=True) outputs_use_cache_conf = model(input_ids, attention_mask=torch.ones_like(input_ids)) outputs_no_past = model(input_ids, use_cache=False, attention_mask=torch.ones_like(input_ids)) self.parent.assertTrue(len(outputs) == len(outputs_use_cache_conf)) self.parent.assertTrue(len(outputs) == len(outputs_no_past) + 1) past = outputs["past_key_values"] # create hypothetical next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size) # append to next input_ids and token_type_ids next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) output_from_no_past = model(next_input_ids)["last_hidden_state"] output_from_past = model(next_tokens, past_key_values=past)["last_hidden_state"] # select random slice random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, -1, random_slice_idx].detach() output_from_past_slice = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)) def create_and_check_mpt_model_attention_mask_past(self, config, input_ids, input_mask, *args): model = MptModel(config=config) model.to(torch_device) model.eval() # create attention mask attn_mask = torch.ones(input_ids.shape, dtype=torch.long, device=torch_device) half_seq_length = self.seq_length // 2 attn_mask[:, half_seq_length:] = 0 # first forward pass output, past = model(input_ids, attention_mask=attn_mask).to_tuple() # create hypothetical next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size) # change a random masked slice from input_ids random_seq_idx_to_change = ids_tensor((1,), half_seq_length).item() + 1 random_other_next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size).squeeze(-1) input_ids[:, -random_seq_idx_to_change] = random_other_next_tokens # append to next input_ids and attn_mask next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) attn_mask = torch.cat( [attn_mask, torch.ones((attn_mask.shape[0], 1), dtype=torch.long, device=torch_device)], dim=1, ) # get two different outputs output_from_no_past = model(next_input_ids, attention_mask=attn_mask)["last_hidden_state"] output_from_past = model(next_tokens, past_key_values=past, attention_mask=attn_mask)["last_hidden_state"] # select random slice random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, -1, random_slice_idx].detach() output_from_past_slice = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)) def create_and_check_mpt_model_past_large_inputs(self, config, input_ids, input_mask, *args): model = MptModel(config=config) model.to(torch_device) model.eval() # first forward pass outputs = model( input_ids, attention_mask=input_mask, use_cache=True, ) past_key_values = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size) next_mask = ids_tensor((self.batch_size, 3), vocab_size=2) # append to next input_ids and next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) next_attention_mask = torch.cat([input_mask, next_mask], dim=-1) output_from_no_past = model( next_input_ids, attention_mask=next_attention_mask, output_hidden_states=True, ) hidden_states_from_no_past = output_from_no_past["hidden_states"][0] output_from_past = model( next_tokens, attention_mask=next_attention_mask, past_key_values=past_key_values, output_hidden_states=True, ) hidden_states_from_past = output_from_past["hidden_states"][0] # select random slice random_slice_idx = ids_tensor((1,), hidden_states_from_past.shape[-1]).item() output_from_no_past_slice = hidden_states_from_no_past[:, -3:, random_slice_idx].detach() output_from_past_slice = hidden_states_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1]) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)) def create_and_check_lm_head_model(self, config, input_ids, input_mask, *args): model = MptForCausalLM(config) model.to(torch_device) model.eval() result = model(input_ids, labels=input_ids) self.parent.assertEqual(result.loss.shape, ()) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def create_and_check_sequence_classification_model(self, config, input_ids, input_mask, *args): config.num_labels = self.num_labels model = MptForSequenceClassification(config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) def create_and_check_token_classification_model(self, config, input_ids, input_mask, *args): model = MptForTokenClassification(config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels)) def create_and_check_question_answering_model(self, config, input_ids, input_mask, *args): model = MptForQuestionAnswering(config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels)) def create_and_check_forward_and_backwards( self, config, input_ids, input_mask, *args, gradient_checkpointing=False ): model = MptForCausalLM(config) model.to(torch_device) if gradient_checkpointing: model.gradient_checkpointing_enable() result = model(input_ids, labels=input_ids) self.parent.assertEqual(result.loss.shape, ()) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) result.loss.backward() def create_and_check_mpt_weight_initialization(self, config, *args): model = MptModel(config) model_std = model.config.initializer_range / math.sqrt(2 * model.config.n_layers) for key in model.state_dict().keys(): if "c_proj" in key and "weight" in key: self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key]) - model_std), 0.001) self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key]) - 0.0), 0.01) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, input_ids, input_mask, sequence_labels = config_and_inputs inputs_dict = {"input_ids": input_ids} return config, inputs_dict class MptConfigTester(ConfigTester): def __init__(self, parent, config_class=None, has_text_modality=True, common_properties=None, **kwargs): super().__init__(parent, config_class, has_text_modality, common_properties, **kwargs) def test_attn_config_as_dict(self): config = self.config_class(**self.inputs_dict, attn_config={"attn_impl": "flash", "softmax_scale": None}) self.parent.assertTrue(config.attn_config.attn_impl == "flash") self.parent.assertTrue(config.attn_config.softmax_scale is None) def run_common_tests(self): self.test_attn_config_as_dict() return super().run_common_tests() @require_torch class MptModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( ( MptModel, MptForCausalLM, MptForSequenceClassification, MptForTokenClassification, MptForQuestionAnswering, ) if is_torch_available() else () ) all_generative_model_classes = (MptForCausalLM,) if is_torch_available() else () fx_compatible = False test_missing_keys = False test_pruning = False test_torchscript = False test_head_masking = False pipeline_model_mapping = ( { "feature-extraction": MptModel, "question-answering": MptForQuestionAnswering, "text-classification": MptForSequenceClassification, "text-generation": MptForCausalLM, "token-classification": MptForTokenClassification, "zero-shot": MptForSequenceClassification, } if is_torch_available() else {} ) def setUp(self): self.model_tester = MptModelTester(self) self.config_tester = MptConfigTester(self, config_class=MptConfig, n_embd=37) def test_config(self): self.config_tester.run_common_tests() def test_mpt_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpt_model(*config_and_inputs) def test_mpt_model_alibi_tensor(self): # test creation of alibi tensor when num heads is not a power of two config_and_inputs = self.model_tester.prepare_config_and_inputs() config_and_inputs[0].n_heads = 6 self.model_tester.create_and_check_mpt_model(*config_and_inputs) def test_mpt_model_past(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpt_model_past(*config_and_inputs) def test_mpt_model_att_mask_past(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpt_model_attention_mask_past(*config_and_inputs) def test_mpt_model_past_large_inputs(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpt_model_past_large_inputs(*config_and_inputs) def test_mpt_lm_head_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_lm_head_model(*config_and_inputs) def test_mpt_sequence_classification_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_sequence_classification_model(*config_and_inputs) def test_mpt_token_classification_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_token_classification_model(*config_and_inputs) def test_mpt_gradient_checkpointing(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_forward_and_backwards(*config_and_inputs, gradient_checkpointing=True) def test_mpt_weight_initialization(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpt_weight_initialization(*config_and_inputs) @unittest.skip("For backward compatibility the lm_head is not in the model's state dict on the Hub.") def test_model_weights_reload_no_missing_tied_weights(self): pass @slow def test_model_from_pretrained(self): for model_name in MPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = MptModel.from_pretrained(model_name) self.assertIsNotNone(model) @slow @require_torch_gpu @require_bitsandbytes class MptIntegrationTests(unittest.TestCase): def test_generation_8k(self): model_id = "mosaicml/mpt-7b-8k" tokenizer = AutoTokenizer.from_pretrained(model_id) # Load in 4bit to fit the daily CI runner GPU RAM model = MptForCausalLM.from_pretrained( model_id, torch_dtype=torch.bfloat16, device_map={"": 0}, load_in_4bit=True ) input_text = "Hello" expected_output = 'Hello, I\'m a new user of the forum. I have a question about the "Safety"' inputs = tokenizer(input_text, return_tensors="pt") outputs = model.generate(**inputs, max_new_tokens=20) decoded_output = tokenizer.decode(outputs[0], skip_special_tokens=True) self.assertEqual(decoded_output, expected_output) def test_generation(self): model_id = "mosaicml/mpt-7b" tokenizer = AutoTokenizer.from_pretrained(model_id) # Load in 4bit to fit the daily CI runner GPU RAM model = MptForCausalLM.from_pretrained( model_id, torch_dtype=torch.bfloat16, device_map={"": 0}, load_in_4bit=True ) input_text = "Hello" expected_output = ( "Hello and welcome to the first day of the new release countdown for the month of May!\nToday" ) inputs = tokenizer(input_text, return_tensors="pt") outputs = model.generate(**inputs, max_new_tokens=20) decoded_output = tokenizer.decode(outputs[0], skip_special_tokens=True) self.assertEqual(decoded_output, expected_output) def test_generation_batched(self): model_id = "mosaicml/mpt-7b" tokenizer = AutoTokenizer.from_pretrained(model_id) # Load in 4bit to fit the daily CI runner GPU RAM model = MptForCausalLM.from_pretrained( model_id, torch_dtype=torch.bfloat16, device_map={"": 0}, load_in_4bit=True ) input_texts = ["Hello my name is", "Today I am going at the gym and"] tokenizer.pad_token_id = tokenizer.eos_token_id tokenizer.padding_side = "left" inputs = tokenizer(input_texts, return_tensors="pt", padding=True).to(torch_device) expected_output = [ "Hello my name is Tiffany and I am a mother of two beautiful children. I have been a nanny for over", "Today I am going at the gym and then I am going to go to the grocery store and get some food. I am going to make", ] outputs = model.generate(**inputs, max_new_tokens=20) decoded_outputs = tokenizer.batch_decode(outputs, skip_special_tokens=True) for i, predicted_output in enumerate(decoded_outputs): self.assertEqual(predicted_output, expected_output[i]) def test_model_logits(self): model_id = "mosaicml/mpt-7b" # Load in 4bit to fit the daily CI runner GPU RAM model = MptForCausalLM.from_pretrained( model_id, torch_dtype=torch.bfloat16, device_map={"": 0}, load_in_4bit=True ) dummy_input = torch.LongTensor([[1, 2, 3, 4, 5]]).to(torch_device) outputs = model(dummy_input, output_hidden_states=True) expected_slice = torch.Tensor([-0.2539, -0.2178, -0.1953]).to(torch_device, torch.bfloat16) predicted_slice = outputs.hidden_states[-1][0, 0, :3] self.assertTrue(torch.allclose(expected_slice, predicted_slice, atol=1e-3, rtol=1e-3))
transformers/tests/models/mpt/test_modeling_mpt.py/0
{ "file_path": "transformers/tests/models/mpt/test_modeling_mpt.py", "repo_id": "transformers", "token_count": 9258 }
404
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Testing suite for the PyTorch OneFormer model. """ import copy import inspect import unittest import numpy as np from tests.test_modeling_common import floats_tensor from transformers import OneFormerConfig, is_torch_available, is_vision_available from transformers.testing_utils import ( require_torch, require_torch_accelerator, require_torch_fp16, require_torch_multi_gpu, require_vision, slow, torch_device, ) from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import OneFormerForUniversalSegmentation, OneFormerModel if is_vision_available(): from transformers import OneFormerProcessor if is_vision_available(): from PIL import Image def _config_zero_init(config): configs_no_init = copy.deepcopy(config) for key in configs_no_init.__dict__.keys(): if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key: setattr(configs_no_init, key, 1e-10) return configs_no_init class OneFormerModelTester: def __init__( self, parent, batch_size=2, is_training=True, vocab_size=99, use_auxiliary_loss=False, num_queries=10, num_channels=3, min_size=32 * 8, max_size=32 * 8, num_labels=4, hidden_dim=64, sequence_length=77, n_ctx=4, ): self.parent = parent self.batch_size = batch_size self.is_training = is_training self.vocab_size = vocab_size self.use_auxiliary_loss = use_auxiliary_loss self.num_queries = num_queries self.num_channels = num_channels self.min_size = min_size self.max_size = max_size self.num_labels = num_labels self.hidden_dim = hidden_dim self.sequence_length = sequence_length self.n_ctx = n_ctx def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size]).to( torch_device ) task_inputs = ( torch.randint(high=self.vocab_size, size=(self.batch_size, self.sequence_length)).to(torch_device).long() ) pixel_mask = torch.ones([self.batch_size, self.min_size, self.max_size], device=torch_device) text_inputs = ( torch.randint( high=self.vocab_size, size=(self.batch_size, self.num_queries - self.n_ctx, self.sequence_length) ) .to(torch_device) .long() ) mask_labels = ( torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size], device=torch_device) > 0.5 ).float() class_labels = (torch.rand((self.batch_size, self.num_labels), device=torch_device) > 0.5).long() config = self.get_config() return config, pixel_values, task_inputs, text_inputs, pixel_mask, mask_labels, class_labels def get_config(self): config = OneFormerConfig( text_encoder_vocab_size=self.vocab_size, hidden_size=self.hidden_dim, num_queries=self.num_queries, num_labels=self.num_labels, encoder_feedforward_dim=32, dim_feedforward=64, encoder_layers=2, decoder_layers=2, ) config.backbone_config.embed_dim = 16 config.backbone_config.depths = [1, 1, 1, 1] config.backbone_config.hidden_size = 16 config.backbone_config.num_channels = self.num_channels config.backbone_config.num_heads = [1, 1, 2, 2] config.backbone = None config.hidden_dim = self.hidden_dim config.mask_dim = self.hidden_dim config.conv_dim = self.hidden_dim config.text_encoder_width = self.hidden_dim config.task_seq_len = self.sequence_length config.max_seq_len = self.sequence_length config.text_encoder_context_length = self.sequence_length config.text_encoder_n_ctx = self.n_ctx return config def prepare_config_and_inputs_for_common(self): config, pixel_values, task_inputs, pixel_mask, _, _, _ = self.prepare_config_and_inputs() inputs_dict = {"pixel_values": pixel_values, "pixel_mask": pixel_mask, "task_inputs": task_inputs} return config, inputs_dict def check_output_hidden_state(self, output, config): encoder_hidden_states = output.encoder_hidden_states pixel_decoder_hidden_states = output.pixel_decoder_hidden_states transformer_decoder_hidden_states = output.transformer_decoder_hidden_states self.parent.assertTrue(len(encoder_hidden_states), len(config.backbone_config.depths)) self.parent.assertTrue(len(pixel_decoder_hidden_states), config.encoder_layers) self.parent.assertTrue(len(transformer_decoder_hidden_states), config.decoder_layers - 1) def create_and_check_oneformer_model( self, config, pixel_values, task_inputs, pixel_mask, output_hidden_states=False ): with torch.no_grad(): model = OneFormerModel(config=config) model.to(torch_device) model.eval() output = model(pixel_values=pixel_values, task_inputs=task_inputs, pixel_mask=pixel_mask) output = model(pixel_values, task_inputs=task_inputs, output_hidden_states=True) # the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the # encoder and pixel decoder self.parent.assertEqual( output.transformer_decoder_object_queries.shape, (self.batch_size, self.num_queries, self.hidden_dim), ) # let's ensure the other two hidden state exists self.parent.assertTrue(output.pixel_decoder_hidden_states is not None) self.parent.assertTrue(output.encoder_hidden_states is not None) if output_hidden_states: self.check_output_hidden_state(output, config) def create_and_check_oneformer_universal_segmentation_head_model( self, config, pixel_values, task_inputs, text_inputs, pixel_mask, mask_labels, class_labels ): model = OneFormerForUniversalSegmentation(config=config) model.to(torch_device) model.eval() def comm_check_on_output(result): # let's still check that all the required stuff is there self.parent.assertTrue(result.transformer_decoder_hidden_states is not None) self.parent.assertTrue(result.pixel_decoder_hidden_states is not None) self.parent.assertTrue(result.encoder_hidden_states is not None) # okay, now we need to check the logits shape # due to the encoder compression, masks have a //4 spatial size self.parent.assertEqual( result.masks_queries_logits.shape, (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4), ) # + 1 for null class self.parent.assertEqual( result.class_queries_logits.shape, (self.batch_size, self.num_queries, self.num_labels + 1) ) with torch.no_grad(): result = model(pixel_values=pixel_values, task_inputs=task_inputs, pixel_mask=pixel_mask) result = model(pixel_values, task_inputs) comm_check_on_output(result) config.is_training = True model = OneFormerForUniversalSegmentation(config=config) model.to(torch_device) model.eval() with torch.no_grad(): result = model( pixel_values=pixel_values, task_inputs=task_inputs, pixel_mask=pixel_mask, mask_labels=mask_labels, class_labels=class_labels, text_inputs=text_inputs, ) comm_check_on_output(result) self.parent.assertTrue(result.loss is not None) self.parent.assertEqual(result.loss.shape, torch.Size([1])) @require_torch class OneFormerModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (OneFormerModel, OneFormerForUniversalSegmentation) if is_torch_available() else () pipeline_model_mapping = {"feature-extraction": OneFormerModel} if is_torch_available() else {} is_encoder_decoder = False test_pruning = False test_head_masking = False test_missing_keys = False # TODO: Fix the failed tests when this model gets more usage def is_pipeline_test_to_skip( self, pipeline_test_casse_name, config_class, model_architecture, tokenizer_name, processor_name ): if pipeline_test_casse_name == "FeatureExtractionPipelineTests": return True return False def setUp(self): self.model_tester = OneFormerModelTester(self) self.config_tester = ConfigTester(self, config_class=OneFormerConfig, has_text_modality=False) def test_config(self): self.config_tester.run_common_tests() def test_oneformer_model(self): config, inputs = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_oneformer_model(config, **inputs, output_hidden_states=False) def test_oneformer_universal_segmentation_head_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_oneformer_universal_segmentation_head_model(*config_and_inputs) def test_model_main_input_name(self): for model_class in self.all_model_classes: model_signature = inspect.signature(getattr(model_class, "forward")) # The main input is the name of the argument after `self` observed_main_input_name = list(model_signature.parameters.keys())[1:3] self.assertEqual(model_class.main_input_name, observed_main_input_name) @unittest.skip(reason="OneFormer uses two main inputs") def test_torchscript_simple(self): pass @unittest.skip(reason="OneFormer uses two main inputs") def test_torchscript_output_attentions(self): pass @unittest.skip(reason="OneFormer uses two main inputs") def test_torchscript_output_hidden_state(self): pass @unittest.skip(reason="OneFormer does not use inputs_embeds") def test_inputs_embeds(self): pass @unittest.skip(reason="OneFormer does not have a get_input_embeddings method") def test_model_common_attributes(self): pass @unittest.skip(reason="OneFormer is not a generative model") def test_generate_without_input_ids(self): pass @unittest.skip(reason="OneFormer does not use token embeddings") def test_resize_tokens_embeddings(self): pass @require_torch_multi_gpu @unittest.skip( reason="OneFormer has some layers using `add_module` which doesn't work well with `nn.DataParallel`" ) def test_multi_gpu_data_parallel_forward(self): pass def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic arg_names = [*signature.parameters.keys()] expected_arg_names = ["pixel_values", "task_inputs"] self.assertListEqual(arg_names[:2], expected_arg_names) @slow def test_model_from_pretrained(self): for model_name in ["shi-labs/oneformer_ade20k_swin_tiny"]: model = OneFormerModel.from_pretrained(model_name) self.assertIsNotNone(model) def test_model_with_labels(self): size = (self.model_tester.min_size,) * 2 inputs = { "pixel_values": torch.randn((2, 3, *size), device=torch_device), "task_inputs": torch.randint(high=self.model_tester.vocab_size, size=(2, 77), device=torch_device).long(), "text_inputs": torch.randint( high=self.model_tester.vocab_size, size=(2, 6, 77), device=torch_device ).long(), "mask_labels": torch.randn((2, 150, *size), device=torch_device), "class_labels": torch.zeros(2, 150, device=torch_device).long(), } config = self.model_tester.get_config() config.is_training = True model = OneFormerForUniversalSegmentation(config).to(torch_device) outputs = model(**inputs) self.assertTrue(outputs.loss is not None) def test_hidden_states_output(self): config, inputs = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_oneformer_model(config, **inputs, output_hidden_states=True) def test_attention_outputs(self): config, inputs = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config).to(torch_device) outputs = model(**inputs, output_attentions=True) self.assertTrue(outputs.attentions is not None) def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.contrastive_temperature = 1 configs_no_init = _config_zero_init(config) for model_class in self.all_model_classes: model = model_class(config=configs_no_init) for name, param in model.named_parameters(): if param.requires_grad: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) def test_training(self): if not self.model_tester.is_training: return # only OneFormerForUniversalSegmentation has the loss model_class = self.all_model_classes[1] ( config, pixel_values, task_inputs, text_inputs, pixel_mask, mask_labels, class_labels, ) = self.model_tester.prepare_config_and_inputs() config.is_training = True model = model_class(config) model.to(torch_device) model.train() loss = model( pixel_values, task_inputs, text_inputs=text_inputs, mask_labels=mask_labels, class_labels=class_labels ).loss loss.backward() def test_retain_grad_hidden_states_attentions(self): # only OneFormerForUniversalSegmentation has the loss model_class = self.all_model_classes[1] ( config, pixel_values, task_inputs, text_inputs, pixel_mask, mask_labels, class_labels, ) = self.model_tester.prepare_config_and_inputs() config.output_hidden_states = True config.output_attentions = True config.is_training = True model = model_class(config) model.to(torch_device) model.train() outputs = model( pixel_values, task_inputs, text_inputs=text_inputs, mask_labels=mask_labels, class_labels=class_labels ) encoder_hidden_states = outputs.encoder_hidden_states[0] encoder_hidden_states.retain_grad() pixel_decoder_hidden_states = outputs.pixel_decoder_hidden_states[0] pixel_decoder_hidden_states.retain_grad() transformer_decoder_class_predictions = outputs.transformer_decoder_class_predictions transformer_decoder_class_predictions.retain_grad() transformer_decoder_mask_predictions = outputs.transformer_decoder_mask_predictions transformer_decoder_mask_predictions.retain_grad() attentions = outputs.attentions[0][0] attentions.retain_grad() outputs.loss.backward(retain_graph=True) self.assertIsNotNone(encoder_hidden_states.grad) self.assertIsNotNone(pixel_decoder_hidden_states.grad) self.assertIsNotNone(transformer_decoder_class_predictions.grad) self.assertIsNotNone(transformer_decoder_mask_predictions.grad) self.assertIsNotNone(attentions.grad) TOLERANCE = 1e-4 # We will verify our results on an image of cute cats def prepare_img(): image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") return image @require_vision @slow class OneFormerModelIntegrationTest(unittest.TestCase): @cached_property def model_checkpoints(self): return "shi-labs/oneformer_ade20k_swin_tiny" @cached_property def default_processor(self): return OneFormerProcessor.from_pretrained(self.model_checkpoints) if is_vision_available() else None def test_inference_no_head(self): model = OneFormerModel.from_pretrained(self.model_checkpoints).to(torch_device) processor = self.default_processor image = prepare_img() inputs = processor(image, ["semantic"], return_tensors="pt").to(torch_device) inputs_shape = inputs["pixel_values"].shape # check size self.assertEqual(inputs_shape, (1, 3, 512, 682)) task_inputs_shape = inputs["task_inputs"].shape # check size self.assertEqual(task_inputs_shape, (1, 77)) with torch.no_grad(): outputs = model(**inputs) expected_slice_hidden_state = torch.tensor( [[0.2723, 0.8280, 0.6026], [1.2699, 1.1257, 1.1444], [1.1344, 0.6153, 0.4177]] ).to(torch_device) self.assertTrue( torch.allclose( outputs.encoder_hidden_states[-1][0, 0, :3, :3], expected_slice_hidden_state, atol=TOLERANCE ) ) expected_slice_hidden_state = torch.tensor( [[1.0581, 1.2276, 1.2003], [1.1903, 1.2925, 1.2862], [1.158, 1.2559, 1.3216]] ).to(torch_device) self.assertTrue( torch.allclose( outputs.pixel_decoder_hidden_states[0][0, 0, :3, :3], expected_slice_hidden_state, atol=TOLERANCE ) ) expected_slice_hidden_state = torch.tensor( [[3.0668, -1.1833, -5.1103], [3.344, -3.362, -5.1101], [2.6017, -4.3613, -4.1444]] ).to(torch_device) self.assertTrue( torch.allclose( outputs.transformer_decoder_class_predictions[0, :3, :3], expected_slice_hidden_state, atol=TOLERANCE ) ) def test_inference_universal_segmentation_head(self): model = OneFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints).to(torch_device).eval() processor = self.default_processor image = prepare_img() inputs = processor(image, ["semantic"], return_tensors="pt").to(torch_device) inputs_shape = inputs["pixel_values"].shape # check size self.assertEqual(inputs_shape, (1, 3, 512, 682)) with torch.no_grad(): outputs = model(**inputs) # masks_queries_logits masks_queries_logits = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape, (1, model.config.num_queries, inputs_shape[-2] // 4, (inputs_shape[-1] + 2) // 4), ) expected_slice = [[[3.1848, 4.2141, 4.1993], [2.9000, 3.5721, 3.6603], [2.5358, 3.0883, 3.6168]]] expected_slice = torch.tensor(expected_slice).to(torch_device) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3], expected_slice, atol=TOLERANCE)) # class_queries_logits class_queries_logits = outputs.class_queries_logits self.assertEqual( class_queries_logits.shape, (1, model.config.num_queries, model.config.num_labels + 1), ) expected_slice = torch.tensor( [[3.0668, -1.1833, -5.1103], [3.3440, -3.3620, -5.1101], [2.6017, -4.3613, -4.1444]] ).to(torch_device) self.assertTrue(torch.allclose(class_queries_logits[0, :3, :3], expected_slice, atol=TOLERANCE)) @require_torch_accelerator @require_torch_fp16 def test_inference_fp16(self): model = ( OneFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints) .to(torch_device, dtype=torch.float16) .eval() ) processor = self.default_processor image = prepare_img() inputs = processor(image, ["semantic"], return_tensors="pt").to(torch_device, dtype=torch.float16) with torch.no_grad(): _ = model(**inputs) def test_with_segmentation_maps_and_loss(self): dummy_model = OneFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints) processor = self.default_processor processor.image_processor.num_text = dummy_model.config.num_queries - dummy_model.config.text_encoder_n_ctx dummy_model.config.is_training = True model = OneFormerForUniversalSegmentation(dummy_model.config).to(torch_device).eval() del dummy_model inputs = processor( [np.zeros((3, 512, 640)), np.zeros((3, 512, 640))], ["semantic", "semantic"], segmentation_maps=[np.zeros((384, 384)).astype(np.float32), np.zeros((384, 384)).astype(np.float32)], return_tensors="pt", ) inputs["pixel_values"] = inputs["pixel_values"].to(torch_device) inputs["task_inputs"] = inputs["task_inputs"].to(torch_device) inputs["text_inputs"] = inputs["text_inputs"].to(torch_device) inputs["mask_labels"] = [el.to(torch_device) for el in inputs["mask_labels"]] inputs["class_labels"] = [el.to(torch_device) for el in inputs["class_labels"]] with torch.no_grad(): outputs = model(**inputs) self.assertTrue(outputs.loss is not None)
transformers/tests/models/oneformer/test_modeling_oneformer.py/0
{ "file_path": "transformers/tests/models/oneformer/test_modeling_oneformer.py", "repo_id": "transformers", "token_count": 10046 }
405
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import CLIPTokenizer, CLIPTokenizerFast from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import OwlViTImageProcessor, OwlViTProcessor @require_vision class OwlViTProcessorTest(unittest.TestCase): def setUp(self): self.tmpdirname = tempfile.mkdtemp() vocab = ["", "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"] # fmt: skip vocab_tokens = dict(zip(vocab, range(len(vocab)))) merges = ["#version: 0.2", "l o", "lo w</w>", "e r</w>", ""] self.special_tokens_map = {"unk_token": "<unk>"} self.vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"]) self.merges_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["merges_file"]) with open(self.vocab_file, "w", encoding="utf-8") as fp: fp.write(json.dumps(vocab_tokens) + "\n") with open(self.merges_file, "w", encoding="utf-8") as fp: fp.write("\n".join(merges)) image_processor_map = { "do_resize": True, "size": 20, "do_center_crop": True, "crop_size": 18, "do_normalize": True, "image_mean": [0.48145466, 0.4578275, 0.40821073], "image_std": [0.26862954, 0.26130258, 0.27577711], } self.image_processor_file = os.path.join(self.tmpdirname, IMAGE_PROCESSOR_NAME) with open(self.image_processor_file, "w", encoding="utf-8") as fp: json.dump(image_processor_map, fp) def get_tokenizer(self, **kwargs): return CLIPTokenizer.from_pretrained(self.tmpdirname, pad_token="!", **kwargs) def get_rust_tokenizer(self, **kwargs): return CLIPTokenizerFast.from_pretrained(self.tmpdirname, pad_token="!", **kwargs) def get_image_processor(self, **kwargs): return OwlViTImageProcessor.from_pretrained(self.tmpdirname, **kwargs) def tearDown(self): shutil.rmtree(self.tmpdirname) def prepare_image_inputs(self): """This function prepares a list of PIL images, or a list of numpy arrays if one specifies numpify=True, or a list of PyTorch tensors if one specifies torchify=True. """ image_inputs = [np.random.randint(255, size=(3, 30, 400), dtype=np.uint8)] image_inputs = [Image.fromarray(np.moveaxis(x, 0, -1)) for x in image_inputs] return image_inputs def test_save_load_pretrained_default(self): tokenizer_slow = self.get_tokenizer() tokenizer_fast = self.get_rust_tokenizer() image_processor = self.get_image_processor() processor_slow = OwlViTProcessor(tokenizer=tokenizer_slow, image_processor=image_processor) processor_slow.save_pretrained(self.tmpdirname) processor_slow = OwlViTProcessor.from_pretrained(self.tmpdirname, use_fast=False) processor_fast = OwlViTProcessor(tokenizer=tokenizer_fast, image_processor=image_processor) processor_fast.save_pretrained(self.tmpdirname) processor_fast = OwlViTProcessor.from_pretrained(self.tmpdirname) self.assertEqual(processor_slow.tokenizer.get_vocab(), tokenizer_slow.get_vocab()) self.assertEqual(processor_fast.tokenizer.get_vocab(), tokenizer_fast.get_vocab()) self.assertEqual(tokenizer_slow.get_vocab(), tokenizer_fast.get_vocab()) self.assertIsInstance(processor_slow.tokenizer, CLIPTokenizer) self.assertIsInstance(processor_fast.tokenizer, CLIPTokenizerFast) self.assertEqual(processor_slow.image_processor.to_json_string(), image_processor.to_json_string()) self.assertEqual(processor_fast.image_processor.to_json_string(), image_processor.to_json_string()) self.assertIsInstance(processor_slow.image_processor, OwlViTImageProcessor) self.assertIsInstance(processor_fast.image_processor, OwlViTImageProcessor) def test_save_load_pretrained_additional_features(self): processor = OwlViTProcessor(tokenizer=self.get_tokenizer(), image_processor=self.get_image_processor()) processor.save_pretrained(self.tmpdirname) tokenizer_add_kwargs = self.get_tokenizer(bos_token="(BOS)", eos_token="(EOS)") image_processor_add_kwargs = self.get_image_processor(do_normalize=False) processor = OwlViTProcessor.from_pretrained( self.tmpdirname, bos_token="(BOS)", eos_token="(EOS)", pad_token="!", do_normalize=False ) self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab()) self.assertIsInstance(processor.tokenizer, CLIPTokenizerFast) self.assertEqual(processor.image_processor.to_json_string(), image_processor_add_kwargs.to_json_string()) self.assertIsInstance(processor.image_processor, OwlViTImageProcessor) def test_image_processor(self): image_processor = self.get_image_processor() tokenizer = self.get_tokenizer() processor = OwlViTProcessor(tokenizer=tokenizer, image_processor=image_processor) image_input = self.prepare_image_inputs() input_image_proc = image_processor(image_input, return_tensors="np") input_processor = processor(images=image_input, return_tensors="np") for key in input_image_proc.keys(): self.assertAlmostEqual(input_image_proc[key].sum(), input_processor[key].sum(), delta=1e-2) def test_tokenizer(self): image_processor = self.get_image_processor() tokenizer = self.get_tokenizer() processor = OwlViTProcessor(tokenizer=tokenizer, image_processor=image_processor) input_str = "lower newer" encoded_processor = processor(text=input_str, return_tensors="np") encoded_tok = tokenizer(input_str, return_tensors="np") for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key][0].tolist(), encoded_processor[key][0].tolist()) def test_processor(self): image_processor = self.get_image_processor() tokenizer = self.get_tokenizer() processor = OwlViTProcessor(tokenizer=tokenizer, image_processor=image_processor) input_str = "lower newer" image_input = self.prepare_image_inputs() inputs = processor(text=input_str, images=image_input) self.assertListEqual(list(inputs.keys()), ["input_ids", "attention_mask", "pixel_values"]) # test if it raises when no input is passed with pytest.raises(ValueError): processor() def test_processor_with_text_list(self): model_name = "google/owlvit-base-patch32" processor = OwlViTProcessor.from_pretrained(model_name) input_text = ["cat", "nasa badge"] inputs = processor(text=input_text) seq_length = 16 self.assertListEqual(list(inputs.keys()), ["input_ids", "attention_mask"]) self.assertEqual(inputs["input_ids"].shape, (2, seq_length)) # test if it raises when no input is passed with pytest.raises(ValueError): processor() def test_processor_with_nested_text_list(self): model_name = "google/owlvit-base-patch32" processor = OwlViTProcessor.from_pretrained(model_name) input_texts = [["cat", "nasa badge"], ["person"]] inputs = processor(text=input_texts) seq_length = 16 batch_size = len(input_texts) num_max_text_queries = max([len(texts) for texts in input_texts]) self.assertListEqual(list(inputs.keys()), ["input_ids", "attention_mask"]) self.assertEqual(inputs["input_ids"].shape, (batch_size * num_max_text_queries, seq_length)) # test if it raises when no input is passed with pytest.raises(ValueError): processor() def test_processor_case(self): model_name = "google/owlvit-base-patch32" processor = OwlViTProcessor.from_pretrained(model_name) input_texts = ["cat", "nasa badge"] inputs = processor(text=input_texts) seq_length = 16 input_ids = inputs["input_ids"] predicted_ids = [ [49406, 2368, 49407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [49406, 6841, 11301, 49407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], ] self.assertListEqual(list(inputs.keys()), ["input_ids", "attention_mask"]) self.assertEqual(inputs["input_ids"].shape, (2, seq_length)) self.assertListEqual(list(input_ids[0]), predicted_ids[0]) self.assertListEqual(list(input_ids[1]), predicted_ids[1]) def test_processor_case2(self): image_processor = self.get_image_processor() tokenizer = self.get_tokenizer() processor = OwlViTProcessor(tokenizer=tokenizer, image_processor=image_processor) image_input = self.prepare_image_inputs() query_input = self.prepare_image_inputs() inputs = processor(images=image_input, query_images=query_input) self.assertListEqual(list(inputs.keys()), ["query_pixel_values", "pixel_values"]) # test if it raises when no input is passed with pytest.raises(ValueError): processor() def test_tokenizer_decode(self): image_processor = self.get_image_processor() tokenizer = self.get_tokenizer() processor = OwlViTProcessor(tokenizer=tokenizer, image_processor=image_processor) predicted_ids = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] decoded_processor = processor.batch_decode(predicted_ids) decoded_tok = tokenizer.batch_decode(predicted_ids) self.assertListEqual(decoded_tok, decoded_processor)
transformers/tests/models/owlvit/test_processor_owlvit.py/0
{ "file_path": "transformers/tests/models/owlvit/test_processor_owlvit.py", "repo_id": "transformers", "token_count": 4354 }
406
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Testing suite for the PyTorch Persimmon model. """ import gc import unittest from parameterized import parameterized from transformers import PersimmonConfig, is_torch_available, set_seed from transformers.testing_utils import ( backend_empty_cache, require_torch, require_torch_accelerator, require_torch_fp16, slow, torch_device, ) from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( AutoTokenizer, PersimmonForCausalLM, PersimmonForSequenceClassification, PersimmonModel, ) # Copied from tests.models.llama.test_modeling_llama.LlamaModelTester with Llama->Persimmon class PersimmonModelTester: def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_input_mask=True, use_token_type_ids=False, use_labels=True, vocab_size=99, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, num_labels=3, num_choices=4, pad_token_id=0, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_token_type_ids = use_token_type_ids self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.num_labels = num_labels self.num_choices = num_choices self.pad_token_id = pad_token_id self.scope = scope def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = torch.tril(torch.ones(self.batch_size, self.seq_length)).to(torch_device) token_type_ids = None if self.use_token_type_ids: token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size) sequence_labels = None token_labels = None choice_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) choice_labels = ids_tensor([self.batch_size], self.num_choices) config = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def get_config(self): return PersimmonConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=False, initializer_range=self.initializer_range, pad_token_id=self.pad_token_id, ) def create_and_check_model( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = PersimmonModel(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_model_as_decoder( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ): config.add_cross_attention = True model = PersimmonModel(config) model.to(torch_device) model.eval() result = model( input_ids, attention_mask=input_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, ) result = model( input_ids, attention_mask=input_mask, encoder_hidden_states=encoder_hidden_states, ) result = model(input_ids, attention_mask=input_mask) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_for_causal_lm( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ): model = PersimmonForCausalLM(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, labels=token_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def create_and_check_decoder_model_past_large_inputs( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ): config.is_decoder = True config.add_cross_attention = True model = PersimmonForCausalLM(config=config) model.to(torch_device) model.eval() # first forward pass outputs = model( input_ids, attention_mask=input_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, use_cache=True, ) past_key_values = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size) next_mask = ids_tensor((self.batch_size, 3), vocab_size=2) # append to next input_ids and next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) next_attention_mask = torch.cat([input_mask, next_mask], dim=-1) output_from_no_past = model( next_input_ids, attention_mask=next_attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, output_hidden_states=True, )["hidden_states"][0] output_from_past = model( next_tokens, attention_mask=next_attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, past_key_values=past_key_values, output_hidden_states=True, )["hidden_states"][0] # select random slice random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach() output_from_past_slice = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1]) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) = config_and_inputs inputs_dict = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class PersimmonModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( (PersimmonModel, PersimmonForCausalLM, PersimmonForSequenceClassification) if is_torch_available() else () ) pipeline_model_mapping = ( { "feature-extraction": PersimmonModel, "text-classification": PersimmonForSequenceClassification, # TODO (ydshieh): check why these two fail. Fix them or skip them in a better way. # "text-generation": PersimmonForCausalLM, # "zero-shot": PersimmonForSequenceClassification, } if is_torch_available() else {} ) all_generative_model_classes = (PersimmonForCausalLM,) if is_torch_available() else () test_headmasking = False test_pruning = False # Copied from tests.models.llama.test_modeling_llama.LlamaModelTest.setUp with Llama->Persimmon def setUp(self): self.model_tester = PersimmonModelTester(self) self.config_tester = ConfigTester(self, config_class=PersimmonConfig, hidden_size=37) # Copied from tests.models.llama.test_modeling_llama.LlamaModelTest.test_config def test_config(self): self.config_tester.run_common_tests() # Copied from tests.models.llama.test_modeling_llama.LlamaModelTest.test_model def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) # Copied from tests.models.llama.test_modeling_llama.LlamaModelTest.test_model_various_embeddings def test_model_various_embeddings(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: config_and_inputs[0].position_embedding_type = type self.model_tester.create_and_check_model(*config_and_inputs) # Copied from tests.models.llama.test_modeling_llama.LlamaModelTest.test_llama_sequence_classification_model with Llama->Persimmon,llama->persimmon def test_persimmon_sequence_classification_model(self): config, input_dict = self.model_tester.prepare_config_and_inputs_for_common() config.num_labels = 3 input_ids = input_dict["input_ids"] attention_mask = input_ids.ne(1).to(torch_device) sequence_labels = ids_tensor([self.model_tester.batch_size], self.model_tester.type_sequence_label_size) model = PersimmonForSequenceClassification(config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=attention_mask, labels=sequence_labels) self.assertEqual(result.logits.shape, (self.model_tester.batch_size, self.model_tester.num_labels)) # Copied from tests.models.llama.test_modeling_llama.LlamaModelTest.test_llama_sequence_classification_model_for_single_label with Llama->Persimmon,llama->persimmon def test_persimmon_sequence_classification_model_for_single_label(self): config, input_dict = self.model_tester.prepare_config_and_inputs_for_common() config.num_labels = 3 config.problem_type = "single_label_classification" input_ids = input_dict["input_ids"] attention_mask = input_ids.ne(1).to(torch_device) sequence_labels = ids_tensor([self.model_tester.batch_size], self.model_tester.type_sequence_label_size) model = PersimmonForSequenceClassification(config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=attention_mask, labels=sequence_labels) self.assertEqual(result.logits.shape, (self.model_tester.batch_size, self.model_tester.num_labels)) # Copied from tests.models.llama.test_modeling_llama.LlamaModelTest.test_llama_sequence_classification_model_for_multi_label with Llama->Persimmon,llama->persimmon def test_persimmon_sequence_classification_model_for_multi_label(self): config, input_dict = self.model_tester.prepare_config_and_inputs_for_common() config.num_labels = 3 config.problem_type = "multi_label_classification" input_ids = input_dict["input_ids"] attention_mask = input_ids.ne(1).to(torch_device) sequence_labels = ids_tensor( [self.model_tester.batch_size, config.num_labels], self.model_tester.type_sequence_label_size ).to(torch.float) model = PersimmonForSequenceClassification(config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=attention_mask, labels=sequence_labels) self.assertEqual(result.logits.shape, (self.model_tester.batch_size, self.model_tester.num_labels)) @unittest.skip("Persimmon buffers include complex numbers, which breaks this test") # Copied from tests.models.llama.test_modeling_llama.LlamaModelTest.test_save_load_fast_init_from_base def test_save_load_fast_init_from_base(self): pass @parameterized.expand([("linear",), ("dynamic",)]) # Copied from tests.models.llama.test_modeling_llama.LlamaModelTest.test_model_rope_scaling with Llama->Persimmon def test_model_rope_scaling(self, scaling_type): config, _ = self.model_tester.prepare_config_and_inputs_for_common() short_input = ids_tensor([1, 10], config.vocab_size) long_input = ids_tensor([1, int(config.max_position_embeddings * 1.5)], config.vocab_size) set_seed(42) # Fixed seed at init time so the two models get the same random weights original_model = PersimmonModel(config) original_model.to(torch_device) original_model.eval() original_short_output = original_model(short_input).last_hidden_state original_long_output = original_model(long_input).last_hidden_state set_seed(42) # Fixed seed at init time so the two models get the same random weights config.rope_scaling = {"type": scaling_type, "factor": 10.0} scaled_model = PersimmonModel(config) scaled_model.to(torch_device) scaled_model.eval() scaled_short_output = scaled_model(short_input).last_hidden_state scaled_long_output = scaled_model(long_input).last_hidden_state # Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original # maximum sequence length, so the outputs for the short input should match. if scaling_type == "dynamic": self.assertTrue(torch.allclose(original_short_output, scaled_short_output, atol=1e-5)) else: self.assertFalse(torch.allclose(original_short_output, scaled_short_output, atol=1e-5)) # The output should be different for long inputs self.assertFalse(torch.allclose(original_long_output, scaled_long_output, atol=1e-5)) @require_torch class PersimmonIntegrationTest(unittest.TestCase): @slow def test_model_8b_chat_logits(self): input_ids = [1, 306, 4658, 278, 6593, 310, 2834, 338] model = PersimmonForCausalLM.from_pretrained( "adept/persimmon-8b-chat", load_in_8bit=True, device_map={"": 0}, torch_dtype=torch.float16 ) out = model(torch.tensor([input_ids], device=torch_device)).logits EXPECTED_MEAN = torch.tensor( [[-11.4726, -11.1495, -11.2694, -11.2223, -10.9452, -11.0663, -11.0031, -11.1028]] ) # change dtype to `torch.float32` before calling `mean` to avoid `nan` values torch.testing.assert_close(out.cpu().to(torch.float32).mean(-1), EXPECTED_MEAN, atol=1e-4, rtol=1e-4) # fmt: off EXPECTED_SLICE = torch.tensor( [-16.9062, -16.9062, -16.9062, -16.9062, -16.8906, -16.9062, -16.9531, -16.9062, -16.9062, -16.9062, -16.9531, -16.9062, -16.9531, -16.9062, -16.9062, -16.9062, -16.9062, -16.9062, -16.9531, -16.9062, -16.9062, -16.9062, -16.9062, -16.9062, -16.9062, -16.9531, -16.9062, -16.9531, -16.9062, -16.9062], dtype=torch.float16 ) # fmt: on torch.testing.assert_close(out.cpu()[0, 0, :30], EXPECTED_SLICE, atol=1e-5, rtol=1e-5) backend_empty_cache(torch_device) del model gc.collect() @slow @require_torch_accelerator @require_torch_fp16 def test_model_8b_chat_greedy_generation(self): EXPECTED_TEXT_COMPLETION = """human: Simply put, the theory of relativity states that?\n\nadept: The theory of relativity states that the laws of physics are the same for all observers, regardless of their relative motion.""" prompt = "human: Simply put, the theory of relativity states that?\n\nadept:" tokenizer = AutoTokenizer.from_pretrained("adept/persimmon-8b-chat", use_fast=False) input_ids = tokenizer.encode(prompt, return_tensors="pt").to(torch_device) model = PersimmonForCausalLM.from_pretrained( "adept/persimmon-8b-chat", load_in_8bit=True, device_map={"": 0}, torch_dtype=torch.float16 ) # greedy generation outputs generated_ids = model.generate(input_ids, max_new_tokens=64) text = tokenizer.decode(generated_ids[0], skip_special_tokens=True) self.assertEqual(EXPECTED_TEXT_COMPLETION, text) backend_empty_cache(torch_device) del model gc.collect()
transformers/tests/models/persimmon/test_modeling_persimmon.py/0
{ "file_path": "transformers/tests/models/persimmon/test_modeling_persimmon.py", "repo_id": "transformers", "token_count": 8403 }
407
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import tempfile import unittest import numpy as np from datasets import load_dataset from transformers.testing_utils import ( check_json_file_has_correct_format, require_essentia, require_librosa, require_scipy, require_tf, require_torch, ) from transformers.utils.import_utils import ( is_essentia_available, is_librosa_available, is_scipy_available, is_torch_available, ) from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin requirements_available = ( is_torch_available() and is_essentia_available() and is_scipy_available() and is_librosa_available() ) if requirements_available: import torch from transformers import Pop2PianoFeatureExtractor class Pop2PianoFeatureExtractionTester(unittest.TestCase): def __init__( self, parent, n_bars=2, sample_rate=22050, use_mel=True, padding_value=0, vocab_size_special=4, vocab_size_note=128, vocab_size_velocity=2, vocab_size_time=100, ): self.parent = parent self.n_bars = n_bars self.sample_rate = sample_rate self.use_mel = use_mel self.padding_value = padding_value self.vocab_size_special = vocab_size_special self.vocab_size_note = vocab_size_note self.vocab_size_velocity = vocab_size_velocity self.vocab_size_time = vocab_size_time def prepare_feat_extract_dict(self): return { "n_bars": self.n_bars, "sample_rate": self.sample_rate, "use_mel": self.use_mel, "padding_value": self.padding_value, "vocab_size_special": self.vocab_size_special, "vocab_size_note": self.vocab_size_note, "vocab_size_velocity": self.vocab_size_velocity, "vocab_size_time": self.vocab_size_time, } @require_torch @require_essentia @require_librosa @require_scipy class Pop2PianoFeatureExtractionTest(SequenceFeatureExtractionTestMixin, unittest.TestCase): feature_extraction_class = Pop2PianoFeatureExtractor if requirements_available else None def setUp(self): self.feat_extract_tester = Pop2PianoFeatureExtractionTester(self) def test_feat_extract_from_and_save_pretrained(self): feat_extract_first = self.feature_extraction_class(**self.feat_extract_dict) with tempfile.TemporaryDirectory() as tmpdirname: saved_file = feat_extract_first.save_pretrained(tmpdirname)[0] check_json_file_has_correct_format(saved_file) feat_extract_second = self.feature_extraction_class.from_pretrained(tmpdirname) dict_first = feat_extract_first.to_dict() dict_second = feat_extract_second.to_dict() mel_1 = feat_extract_first.use_mel mel_2 = feat_extract_second.use_mel self.assertTrue(np.allclose(mel_1, mel_2)) self.assertEqual(dict_first, dict_second) def test_feat_extract_to_json_file(self): feat_extract_first = self.feature_extraction_class(**self.feat_extract_dict) with tempfile.TemporaryDirectory() as tmpdirname: json_file_path = os.path.join(tmpdirname, "feat_extract.json") feat_extract_first.to_json_file(json_file_path) feat_extract_second = self.feature_extraction_class.from_json_file(json_file_path) dict_first = feat_extract_first.to_dict() dict_second = feat_extract_second.to_dict() mel_1 = feat_extract_first.use_mel mel_2 = feat_extract_second.use_mel self.assertTrue(np.allclose(mel_1, mel_2)) self.assertEqual(dict_first, dict_second) def test_call(self): feature_extractor = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict()) speech_input = np.zeros([1000000], dtype=np.float32) input_features = feature_extractor(speech_input, sampling_rate=16_000, return_tensors="np") self.assertTrue(input_features.input_features.ndim == 3) self.assertEqual(input_features.input_features.shape[-1], 512) self.assertTrue(input_features.beatsteps.ndim == 2) self.assertTrue(input_features.extrapolated_beatstep.ndim == 2) def test_integration(self): ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") speech_samples = ds.sort("id").select([0])["audio"] input_speech = [x["array"] for x in speech_samples][0] sampling_rate = [x["sampling_rate"] for x in speech_samples][0] feaure_extractor = Pop2PianoFeatureExtractor.from_pretrained("sweetcocoa/pop2piano") input_features = feaure_extractor( input_speech, sampling_rate=sampling_rate, return_tensors="pt" ).input_features EXPECTED_INPUT_FEATURES = torch.tensor( [[-7.1493, -6.8701, -4.3214], [-5.9473, -5.7548, -3.8438], [-6.1324, -5.9018, -4.3778]] ) self.assertTrue(torch.allclose(input_features[0, :3, :3], EXPECTED_INPUT_FEATURES, atol=1e-4)) def test_attention_mask(self): feature_extractor = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict()) speech_input1 = np.zeros([1_000_000], dtype=np.float32) speech_input2 = np.random.randint(low=0, high=10, size=500_000).astype(np.float32) input_features = feature_extractor( [speech_input1, speech_input2], sampling_rate=[44_100, 16_000], return_tensors="np", return_attention_mask=True, ) self.assertTrue(hasattr(input_features, "attention_mask")) # check shapes self.assertTrue(input_features["attention_mask"].ndim == 2) self.assertEqual(input_features["attention_mask_beatsteps"].shape[0], 2) self.assertEqual(input_features["attention_mask_extrapolated_beatstep"].shape[0], 2) # check if they are any values except 0 and 1 self.assertTrue(np.max(input_features["attention_mask"]) == 1) self.assertTrue(np.max(input_features["attention_mask_beatsteps"]) == 1) self.assertTrue(np.max(input_features["attention_mask_extrapolated_beatstep"]) == 1) self.assertTrue(np.min(input_features["attention_mask"]) == 0) self.assertTrue(np.min(input_features["attention_mask_beatsteps"]) == 0) self.assertTrue(np.min(input_features["attention_mask_extrapolated_beatstep"]) == 0) def test_batch_feature(self): feature_extractor = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict()) speech_input1 = np.zeros([1_000_000], dtype=np.float32) speech_input2 = np.ones([2_000_000], dtype=np.float32) speech_input3 = np.random.randint(low=0, high=10, size=500_000).astype(np.float32) input_features = feature_extractor( [speech_input1, speech_input2, speech_input3], sampling_rate=[44_100, 16_000, 48_000], return_attention_mask=True, ) self.assertEqual(len(input_features["input_features"].shape), 3) # check shape self.assertEqual(input_features["beatsteps"].shape[0], 3) self.assertEqual(input_features["extrapolated_beatstep"].shape[0], 3) def test_batch_feature_np(self): feature_extractor = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict()) speech_input1 = np.zeros([1_000_000], dtype=np.float32) speech_input2 = np.ones([2_000_000], dtype=np.float32) speech_input3 = np.random.randint(low=0, high=10, size=500_000).astype(np.float32) input_features = feature_extractor( [speech_input1, speech_input2, speech_input3], sampling_rate=[44_100, 16_000, 48_000], return_tensors="np", return_attention_mask=True, ) # check np array or not self.assertEqual(type(input_features["input_features"]), np.ndarray) # check shape self.assertEqual(len(input_features["input_features"].shape), 3) def test_batch_feature_pt(self): feature_extractor = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict()) speech_input1 = np.zeros([1_000_000], dtype=np.float32) speech_input2 = np.ones([2_000_000], dtype=np.float32) speech_input3 = np.random.randint(low=0, high=10, size=500_000).astype(np.float32) input_features = feature_extractor( [speech_input1, speech_input2, speech_input3], sampling_rate=[44_100, 16_000, 48_000], return_tensors="pt", return_attention_mask=True, ) # check pt tensor or not self.assertEqual(type(input_features["input_features"]), torch.Tensor) # check shape self.assertEqual(len(input_features["input_features"].shape), 3) @require_tf def test_batch_feature_tf(self): import tensorflow as tf feature_extractor = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict()) speech_input1 = np.zeros([1_000_000], dtype=np.float32) speech_input2 = np.ones([2_000_000], dtype=np.float32) speech_input3 = np.random.randint(low=0, high=10, size=500_000).astype(np.float32) input_features = feature_extractor( [speech_input1, speech_input2, speech_input3], sampling_rate=[44_100, 16_000, 48_000], return_tensors="tf", return_attention_mask=True, ) # check tf tensor or not self.assertTrue(tf.is_tensor(input_features["input_features"])) # check shape self.assertEqual(len(input_features["input_features"].shape), 3) @unittest.skip( "Pop2PianoFeatureExtractor does not supports padding externally (while processing audios in batches padding is automatically applied to max_length)" ) def test_padding_accepts_tensors_pt(self): pass @unittest.skip( "Pop2PianoFeatureExtractor does not supports padding externally (while processing audios in batches padding is automatically applied to max_length)" ) def test_padding_accepts_tensors_tf(self): pass @unittest.skip( "Pop2PianoFeatureExtractor does not supports padding externally (while processing audios in batches padding is automatically applied to max_length)" ) def test_padding_from_list(self): pass @unittest.skip( "Pop2PianoFeatureExtractor does not supports padding externally (while processing audios in batches padding is automatically applied to max_length)" ) def test_padding_from_array(self): pass @unittest.skip("Pop2PianoFeatureExtractor does not support truncation") def test_attention_mask_with_truncation(self): pass @unittest.skip("Pop2PianoFeatureExtractor does not supports truncation") def test_truncation_from_array(self): pass @unittest.skip("Pop2PianoFeatureExtractor does not supports truncation") def test_truncation_from_list(self): pass
transformers/tests/models/pop2piano/test_feature_extraction_pop2piano.py/0
{ "file_path": "transformers/tests/models/pop2piano/test_feature_extraction_pop2piano.py", "repo_id": "transformers", "token_count": 4962 }
408
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Testing suite for the TensorFlow RegNet model. """ from __future__ import annotations import inspect import unittest from typing import List, Tuple from transformers import RegNetConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFRegNetForImageClassification, TFRegNetModel if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class TFRegNetModelTester: def __init__( self, parent, batch_size=3, image_size=32, num_channels=3, embeddings_size=10, hidden_sizes=[10, 20, 30, 40], depths=[1, 1, 2, 1], is_training=True, use_labels=True, hidden_act="relu", num_labels=3, scope=None, ): self.parent = parent self.batch_size = batch_size self.image_size = image_size self.num_channels = num_channels self.embeddings_size = embeddings_size self.hidden_sizes = hidden_sizes self.depths = depths self.is_training = is_training self.use_labels = use_labels self.hidden_act = hidden_act self.num_labels = num_labels self.scope = scope self.num_stages = len(hidden_sizes) def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) labels = None if self.use_labels: labels = ids_tensor([self.batch_size], self.num_labels) config = self.get_config() return config, pixel_values, labels def get_config(self): return RegNetConfig( num_channels=self.num_channels, embeddings_size=self.embeddings_size, hidden_sizes=self.hidden_sizes, depths=self.depths, hidden_act=self.hidden_act, num_labels=self.num_labels, ) def create_and_check_model(self, config, pixel_values, labels): model = TFRegNetModel(config=config) result = model(pixel_values, training=False) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape, (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32), ) def create_and_check_for_image_classification(self, config, pixel_values, labels): config.num_labels = self.num_labels model = TFRegNetForImageClassification(config) result = model(pixel_values, labels=labels, training=False) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values, labels = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_tf class TFRegNetModelTest(TFModelTesterMixin, PipelineTesterMixin, unittest.TestCase): """ Here we also overwrite some of the tests of test_modeling_common.py, as RegNet does not use input_ids, inputs_embeds, attention_mask and seq_length. """ all_model_classes = (TFRegNetModel, TFRegNetForImageClassification) if is_tf_available() else () pipeline_model_mapping = ( {"feature-extraction": TFRegNetModel, "image-classification": TFRegNetForImageClassification} if is_tf_available() else {} ) test_pruning = False test_onnx = False test_resize_embeddings = False test_head_masking = False has_attentions = False def setUp(self): self.model_tester = TFRegNetModelTester(self) self.config_tester = ConfigTester(self, config_class=RegNetConfig, has_text_modality=False) def create_and_test_config_common_properties(self): return @unittest.skip(reason="RegNet does not use inputs_embeds") def test_inputs_embeds(self): pass @unittest.skipIf( not is_tf_available() or len(tf.config.list_physical_devices("GPU")) == 0, reason="TF does not support backprop for grouped convolutions on CPU.", ) @slow def test_keras_fit(self): super().test_keras_fit() @unittest.skip(reason="RegNet does not support input and output embeddings") def test_model_common_attributes(self): pass def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.call) # signature.parameters is an OrderedDict => so arg_names order is deterministic arg_names = [*signature.parameters.keys()] expected_arg_names = ["pixel_values"] self.assertListEqual(arg_names[:1], expected_arg_names) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_hidden_states_output(self): def check_hidden_states_output(inputs_dict, config, model_class): model = model_class(config) outputs = model(**self._prepare_for_class(inputs_dict, model_class), training=False) hidden_states = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states expected_num_stages = self.model_tester.num_stages self.assertEqual(len(hidden_states), expected_num_stages + 1) # RegNet's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:]), [self.model_tester.image_size // 2, self.model_tester.image_size // 2], ) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() layers_type = ["basic", "bottleneck"] for model_class in self.all_model_classes: for layer_type in layers_type: config.layer_type = layer_type inputs_dict["output_hidden_states"] = True check_hidden_states_output(inputs_dict, config, model_class) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] config.output_hidden_states = True check_hidden_states_output(inputs_dict, config, model_class) # Since RegNet does not have any attention we need to rewrite this test. def test_model_outputs_equivalence(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() def check_equivalence(model, tuple_inputs, dict_inputs, additional_kwargs={}): tuple_output = model(tuple_inputs, return_dict=False, **additional_kwargs) dict_output = model(dict_inputs, return_dict=True, **additional_kwargs).to_tuple() def recursive_check(tuple_object, dict_object): if isinstance(tuple_object, (List, Tuple)): for tuple_iterable_value, dict_iterable_value in zip(tuple_object, dict_object): recursive_check(tuple_iterable_value, dict_iterable_value) elif tuple_object is None: return else: self.assertTrue( all(tf.equal(tuple_object, dict_object)), msg=( "Tuple and dict output are not equal. Difference:" f" {tf.math.reduce_max(tf.abs(tuple_object - dict_object))}" ), ) recursive_check(tuple_output, dict_output) for model_class in self.all_model_classes: model = model_class(config) tuple_inputs = self._prepare_for_class(inputs_dict, model_class) dict_inputs = self._prepare_for_class(inputs_dict, model_class) check_equivalence(model, tuple_inputs, dict_inputs) tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) check_equivalence(model, tuple_inputs, dict_inputs) tuple_inputs = self._prepare_for_class(inputs_dict, model_class) dict_inputs = self._prepare_for_class(inputs_dict, model_class) check_equivalence(model, tuple_inputs, dict_inputs, {"output_hidden_states": True}) tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) check_equivalence(model, tuple_inputs, dict_inputs, {"output_hidden_states": True}) def test_for_image_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*config_and_inputs) @slow def test_model_from_pretrained(self): for model_name in TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = TFRegNetModel.from_pretrained(model_name) self.assertIsNotNone(model) # We will verify our results on an image of cute cats def prepare_img(): image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") return image @require_tf @require_vision class RegNetModelIntegrationTest(unittest.TestCase): @cached_property def default_image_processor(self): return ( AutoImageProcessor.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0]) if is_vision_available() else None ) @slow def test_inference_image_classification_head(self): model = TFRegNetForImageClassification.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0]) image_processor = self.default_image_processor image = prepare_img() inputs = image_processor(images=image, return_tensors="tf") # forward pass outputs = model(**inputs, training=False) # verify the logits expected_shape = tf.TensorShape((1, 1000)) self.assertEqual(outputs.logits.shape, expected_shape) expected_slice = tf.constant([-0.4180, -1.5051, -3.4836]) tf.debugging.assert_near(outputs.logits[0, :3], expected_slice, atol=1e-4)
transformers/tests/models/regnet/test_modeling_tf_regnet.py/0
{ "file_path": "transformers/tests/models/regnet/test_modeling_tf_regnet.py", "repo_id": "transformers", "token_count": 4911 }
409
# coding=utf-8 # Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from transformers import RobertaPreLayerNormConfig, is_torch_available from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( RobertaPreLayerNormForCausalLM, RobertaPreLayerNormForMaskedLM, RobertaPreLayerNormForMultipleChoice, RobertaPreLayerNormForQuestionAnswering, RobertaPreLayerNormForSequenceClassification, RobertaPreLayerNormForTokenClassification, RobertaPreLayerNormModel, ) from transformers.models.roberta_prelayernorm.modeling_roberta_prelayernorm import ( ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST, RobertaPreLayerNormEmbeddings, create_position_ids_from_input_ids, ) # Copied from tests.models.roberta.test_modeling_roberta.RobertaModelTester with Roberta->RobertaPreLayerNorm class RobertaPreLayerNormModelTester: def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_input_mask=True, use_token_type_ids=True, use_labels=True, vocab_size=99, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, num_labels=3, num_choices=4, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_token_type_ids = use_token_type_ids self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.num_labels = num_labels self.num_choices = num_choices self.scope = scope def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) token_type_ids = None if self.use_token_type_ids: token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size) sequence_labels = None token_labels = None choice_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) choice_labels = ids_tensor([self.batch_size], self.num_choices) config = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def get_config(self): return RobertaPreLayerNormConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, ) def get_pipeline_config(self): config = self.get_config() config.vocab_size = 300 return config def prepare_config_and_inputs_for_decoder(self): ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) = self.prepare_config_and_inputs() config.is_decoder = True encoder_hidden_states = floats_tensor([self.batch_size, self.seq_length, self.hidden_size]) encoder_attention_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2) return ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def create_and_check_model( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = RobertaPreLayerNormModel(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids) result = model(input_ids, token_type_ids=token_type_ids) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size)) def create_and_check_model_as_decoder( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ): config.add_cross_attention = True model = RobertaPreLayerNormModel(config) model.to(torch_device) model.eval() result = model( input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, ) result = model( input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, encoder_hidden_states=encoder_hidden_states, ) result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size)) def create_and_check_for_causal_lm( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ): model = RobertaPreLayerNormForCausalLM(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def create_and_check_decoder_model_past_large_inputs( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ): config.is_decoder = True config.add_cross_attention = True model = RobertaPreLayerNormForCausalLM(config=config).to(torch_device).eval() # make sure that ids don't start with pad token mask = input_ids.ne(config.pad_token_id).long() input_ids = input_ids * mask # first forward pass outputs = model( input_ids, attention_mask=input_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, use_cache=True, ) past_key_values = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size) # make sure that ids don't start with pad token mask = next_tokens.ne(config.pad_token_id).long() next_tokens = next_tokens * mask next_mask = ids_tensor((self.batch_size, 3), vocab_size=2) # append to next input_ids and next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) next_attention_mask = torch.cat([input_mask, next_mask], dim=-1) output_from_no_past = model( next_input_ids, attention_mask=next_attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, output_hidden_states=True, )["hidden_states"][0] output_from_past = model( next_tokens, attention_mask=next_attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, past_key_values=past_key_values, output_hidden_states=True, )["hidden_states"][0] # select random slice random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach() output_from_past_slice = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1]) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)) def create_and_check_for_masked_lm( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = RobertaPreLayerNormForMaskedLM(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def create_and_check_for_token_classification( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_labels = self.num_labels model = RobertaPreLayerNormForTokenClassification(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels)) def create_and_check_for_multiple_choice( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_choices = self.num_choices model = RobertaPreLayerNormForMultipleChoice(config=config) model.to(torch_device) model.eval() multiple_choice_inputs_ids = input_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() multiple_choice_token_type_ids = token_type_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() multiple_choice_input_mask = input_mask.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() result = model( multiple_choice_inputs_ids, attention_mask=multiple_choice_input_mask, token_type_ids=multiple_choice_token_type_ids, labels=choice_labels, ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices)) def create_and_check_for_question_answering( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = RobertaPreLayerNormForQuestionAnswering(config=config) model.to(torch_device) model.eval() result = model( input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, start_positions=sequence_labels, end_positions=sequence_labels, ) self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) = config_and_inputs inputs_dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class RobertaPreLayerNormModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( ( RobertaPreLayerNormForCausalLM, RobertaPreLayerNormForMaskedLM, RobertaPreLayerNormModel, RobertaPreLayerNormForSequenceClassification, RobertaPreLayerNormForTokenClassification, RobertaPreLayerNormForMultipleChoice, RobertaPreLayerNormForQuestionAnswering, ) if is_torch_available() else () ) all_generative_model_classes = (RobertaPreLayerNormForCausalLM,) if is_torch_available() else () pipeline_model_mapping = ( { "feature-extraction": RobertaPreLayerNormModel, "fill-mask": RobertaPreLayerNormForMaskedLM, "question-answering": RobertaPreLayerNormForQuestionAnswering, "text-classification": RobertaPreLayerNormForSequenceClassification, "text-generation": RobertaPreLayerNormForCausalLM, "token-classification": RobertaPreLayerNormForTokenClassification, "zero-shot": RobertaPreLayerNormForSequenceClassification, } if is_torch_available() else {} ) fx_compatible = False model_split_percents = [0.5, 0.8, 0.9] # Copied from tests.models.roberta.test_modeling_roberta.RobertaModelTest.setUp with Roberta->RobertaPreLayerNorm def setUp(self): self.model_tester = RobertaPreLayerNormModelTester(self) self.config_tester = ConfigTester(self, config_class=RobertaPreLayerNormConfig, hidden_size=37) # Copied from tests.models.roberta.test_modeling_roberta.RobertaModelTest.test_config def test_config(self): self.config_tester.run_common_tests() # Copied from tests.models.roberta.test_modeling_roberta.RobertaModelTest.test_model def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) # Copied from tests.models.roberta.test_modeling_roberta.RobertaModelTest.test_model_various_embeddings def test_model_various_embeddings(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: config_and_inputs[0].position_embedding_type = type self.model_tester.create_and_check_model(*config_and_inputs) # Copied from tests.models.roberta.test_modeling_roberta.RobertaModelTest.test_model_as_decoder def test_model_as_decoder(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(*config_and_inputs) # Copied from tests.models.roberta.test_modeling_roberta.RobertaModelTest.test_model_as_decoder_with_default_input_mask def test_model_as_decoder_with_default_input_mask(self): # This regression test was failing with PyTorch < 1.3 ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) = self.model_tester.prepare_config_and_inputs_for_decoder() input_mask = None self.model_tester.create_and_check_model_as_decoder( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) # Copied from tests.models.roberta.test_modeling_roberta.RobertaModelTest.test_for_causal_lm def test_for_causal_lm(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_for_causal_lm(*config_and_inputs) # Copied from tests.models.roberta.test_modeling_roberta.RobertaModelTest.test_decoder_model_past_with_large_inputs def test_decoder_model_past_with_large_inputs(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_decoder_model_past_large_inputs(*config_and_inputs) # Copied from tests.models.roberta.test_modeling_roberta.RobertaModelTest.test_for_masked_lm def test_for_masked_lm(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*config_and_inputs) # Copied from tests.models.roberta.test_modeling_roberta.RobertaModelTest.test_for_token_classification def test_for_token_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*config_and_inputs) # Copied from tests.models.roberta.test_modeling_roberta.RobertaModelTest.test_for_multiple_choice def test_for_multiple_choice(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*config_and_inputs) # Copied from tests.models.roberta.test_modeling_roberta.RobertaModelTest.test_for_question_answering def test_for_question_answering(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*config_and_inputs) @slow # Copied from tests.models.roberta.test_modeling_roberta.RobertaModelTest.test_model_from_pretrained with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm def test_model_from_pretrained(self): for model_name in ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = RobertaPreLayerNormModel.from_pretrained(model_name) self.assertIsNotNone(model) # Copied from tests.models.roberta.test_modeling_roberta.RobertaModelTest.test_create_position_ids_respects_padding_index with Roberta->RobertaPreLayerNorm def test_create_position_ids_respects_padding_index(self): """Ensure that the default position ids only assign a sequential . This is a regression test for https://github.com/huggingface/transformers/issues/1761 The position ids should be masked with the embedding object's padding index. Therefore, the first available non-padding position index is RobertaPreLayerNormEmbeddings.padding_idx + 1 """ config = self.model_tester.prepare_config_and_inputs()[0] model = RobertaPreLayerNormEmbeddings(config=config) input_ids = torch.as_tensor([[12, 31, 13, model.padding_idx]]) expected_positions = torch.as_tensor( [[0 + model.padding_idx + 1, 1 + model.padding_idx + 1, 2 + model.padding_idx + 1, model.padding_idx]] ) position_ids = create_position_ids_from_input_ids(input_ids, model.padding_idx) self.assertEqual(position_ids.shape, expected_positions.shape) self.assertTrue(torch.all(torch.eq(position_ids, expected_positions))) # Copied from tests.models.roberta.test_modeling_roberta.RobertaModelTest.test_create_position_ids_from_inputs_embeds with Roberta->RobertaPreLayerNorm def test_create_position_ids_from_inputs_embeds(self): """Ensure that the default position ids only assign a sequential . This is a regression test for https://github.com/huggingface/transformers/issues/1761 The position ids should be masked with the embedding object's padding index. Therefore, the first available non-padding position index is RobertaPreLayerNormEmbeddings.padding_idx + 1 """ config = self.model_tester.prepare_config_and_inputs()[0] embeddings = RobertaPreLayerNormEmbeddings(config=config) inputs_embeds = torch.empty(2, 4, 30) expected_single_positions = [ 0 + embeddings.padding_idx + 1, 1 + embeddings.padding_idx + 1, 2 + embeddings.padding_idx + 1, 3 + embeddings.padding_idx + 1, ] expected_positions = torch.as_tensor([expected_single_positions, expected_single_positions]) position_ids = embeddings.create_position_ids_from_inputs_embeds(inputs_embeds) self.assertEqual(position_ids.shape, expected_positions.shape) self.assertTrue(torch.all(torch.eq(position_ids, expected_positions))) @require_torch class RobertaPreLayerNormModelIntegrationTest(TestCasePlus): @slow def test_inference_masked_lm(self): model = RobertaPreLayerNormForMaskedLM.from_pretrained("andreasmadsen/efficient_mlm_m0.40") input_ids = torch.tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]]) with torch.no_grad(): output = model(input_ids)[0] expected_shape = torch.Size((1, 11, 50265)) self.assertEqual(output.shape, expected_shape) # compare the actual values for a slice. EXPECTED_SLICE = torch.tensor( [[[40.4880, 18.0199, -5.2367], [-1.8877, -4.0885, 10.7085], [-2.2613, -5.6110, 7.2665]]] ) self.assertTrue(torch.allclose(output[:, :3, :3], EXPECTED_SLICE, atol=1e-4)) @slow def test_inference_no_head(self): model = RobertaPreLayerNormModel.from_pretrained("andreasmadsen/efficient_mlm_m0.40") input_ids = torch.tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]]) with torch.no_grad(): output = model(input_ids)[0] # compare the actual values for a slice. EXPECTED_SLICE = torch.tensor( [[[0.0208, -0.0356, 0.0237], [-0.1569, -0.0411, -0.2626], [0.1879, 0.0125, -0.0089]]] ) self.assertTrue(torch.allclose(output[:, :3, :3], EXPECTED_SLICE, atol=1e-4))
transformers/tests/models/roberta_prelayernorm/test_modeling_roberta_prelayernorm.py/0
{ "file_path": "transformers/tests/models/roberta_prelayernorm/test_modeling_roberta_prelayernorm.py", "repo_id": "transformers", "token_count": 10713 }
410
# coding=utf-8 # Copyright 2021 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Testing suite for the PyTorch Speech2Text model. """ import unittest from transformers import Speech2Text2Config from transformers.testing_utils import is_torch_available, require_torch, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers.models.speech_to_text_2.modeling_speech_to_text_2 import ( Speech2Text2Decoder, Speech2Text2ForCausalLM, ) @require_torch class Speech2Text2StandaloneDecoderModelTester: def __init__( self, parent, vocab_size=99, batch_size=13, d_model=16, decoder_seq_length=7, is_training=True, is_decoder=True, use_attention_mask=True, use_cache=False, use_labels=True, decoder_start_token_id=2, decoder_ffn_dim=32, decoder_layers=2, decoder_attention_heads=4, max_position_embeddings=30, pad_token_id=0, bos_token_id=1, eos_token_id=2, scope=None, ): self.parent = parent self.batch_size = batch_size self.decoder_seq_length = decoder_seq_length # For common tests self.seq_length = self.decoder_seq_length self.is_training = is_training self.use_attention_mask = use_attention_mask self.use_labels = use_labels self.vocab_size = vocab_size self.d_model = d_model self.hidden_size = d_model self.num_hidden_layers = decoder_layers self.decoder_layers = decoder_layers self.decoder_ffn_dim = decoder_ffn_dim self.decoder_attention_heads = decoder_attention_heads self.num_attention_heads = decoder_attention_heads self.eos_token_id = eos_token_id self.bos_token_id = bos_token_id self.pad_token_id = pad_token_id self.decoder_start_token_id = decoder_start_token_id self.use_cache = use_cache self.max_position_embeddings = max_position_embeddings self.scope = None self.decoder_key_length = decoder_seq_length self.base_model_out_len = 2 self.decoder_attention_idx = 1 def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.decoder_seq_length], self.vocab_size) attention_mask = None if self.use_attention_mask: attention_mask = ids_tensor([self.batch_size, self.decoder_seq_length], vocab_size=2) lm_labels = None if self.use_labels: lm_labels = ids_tensor([self.batch_size, self.decoder_seq_length], self.vocab_size) config = Speech2Text2Config( vocab_size=self.vocab_size, d_model=self.d_model, decoder_layers=self.decoder_layers, decoder_ffn_dim=self.decoder_ffn_dim, decoder_attention_heads=self.decoder_attention_heads, eos_token_id=self.eos_token_id, bos_token_id=self.bos_token_id, use_cache=self.use_cache, pad_token_id=self.pad_token_id, decoder_start_token_id=self.decoder_start_token_id, max_position_embeddings=self.max_position_embeddings, ) return ( config, input_ids, attention_mask, lm_labels, ) def create_and_check_decoder_model_past( self, config, input_ids, attention_mask, lm_labels, ): config.use_cache = True model = Speech2Text2Decoder(config=config).to(torch_device).eval() input_ids = input_ids[:2] input_ids[input_ids == 0] += 1 # first forward pass outputs = model(input_ids, use_cache=True) outputs_use_cache_conf = model(input_ids) outputs_no_past = model(input_ids, use_cache=False) self.parent.assertTrue(len(outputs) == len(outputs_use_cache_conf)) self.parent.assertTrue(len(outputs) == len(outputs_no_past) + 1) past_key_values = outputs["past_key_values"] # create hypothetical next token and extent to next_input_ids next_tokens = ids_tensor((2, 1), config.vocab_size - 1) + 1 # append to next input_ids and next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) print(next_input_ids) output_from_no_past = model(next_input_ids)["last_hidden_state"] output_from_past = model(next_tokens, past_key_values=past_key_values)["last_hidden_state"] # select random slice random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach() output_from_past_slice = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice assert torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, attention_mask, lm_labels, ) = config_and_inputs inputs_dict = { "input_ids": input_ids, "attention_mask": attention_mask, } return config, inputs_dict @require_torch class Speech2Text2StandaloneDecoderModelTest( ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase ): all_model_classes = (Speech2Text2Decoder, Speech2Text2ForCausalLM) if is_torch_available() else () all_generative_model_classes = (Speech2Text2ForCausalLM,) if is_torch_available() else () pipeline_model_mapping = {"text-generation": Speech2Text2ForCausalLM} if is_torch_available() else {} fx_compatible = True test_pruning = False def setUp( self, ): self.model_tester = Speech2Text2StandaloneDecoderModelTester(self, is_training=False) self.config_tester = ConfigTester(self, config_class=Speech2Text2Config) # not implemented currently def test_inputs_embeds(self): pass # speech2text2 has no base model def test_save_load_fast_init_from_base(self): pass # speech2text2 has no base model def test_save_load_fast_init_to_base(self): pass def test_config(self): self.config_tester.run_common_tests() def test_decoder_model_past(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past(*config_and_inputs) # decoder cannot keep gradients def test_retain_grad_hidden_states_attentions(self): return
transformers/tests/models/speech_to_text_2/test_modeling_speech_to_text_2.py/0
{ "file_path": "transformers/tests/models/speech_to_text_2/test_modeling_speech_to_text_2.py", "repo_id": "transformers", "token_count": 3324 }
411
# coding=utf-8 # Copyright 2021 Google T5 Authors and HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import tempfile import unittest import numpy as np import transformers from transformers import is_flax_available from transformers.testing_utils import ( is_pt_flax_cross_test, require_flax, require_sentencepiece, require_tokenizers, slow, ) from ...generation.test_flax_utils import FlaxGenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor if is_flax_available(): import os # The slow tests are often failing with OOM error on GPU # This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed # but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html os.environ["XLA_PYTHON_CLIENT_ALLOCATOR"] = "platform" import jax import jax.numpy as jnp import optax from flax.core.frozen_dict import unfreeze from flax.training.common_utils import onehot from flax.traverse_util import flatten_dict from transformers import FLAX_MODEL_MAPPING, ByT5Tokenizer, T5Config, T5Tokenizer from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model from transformers.models.t5.modeling_flax_t5 import ( FlaxT5EncoderModel, FlaxT5ForConditionalGeneration, FlaxT5Model, shift_tokens_right, ) class FlaxT5ModelTester: def __init__( self, parent, vocab_size=99, batch_size=13, encoder_seq_length=7, decoder_seq_length=9, # For common tests is_training=True, use_attention_mask=True, use_labels=True, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, d_ff=37, relative_attention_num_buckets=8, dropout_rate=0.1, initializer_factor=0.002, eos_token_id=1, pad_token_id=0, decoder_start_token_id=0, scope=None, decoder_layers=None, ): self.parent = parent self.batch_size = batch_size self.encoder_seq_length = encoder_seq_length self.decoder_seq_length = decoder_seq_length # For common tests self.seq_length = self.decoder_seq_length self.is_training = is_training self.use_attention_mask = use_attention_mask self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.d_ff = d_ff self.relative_attention_num_buckets = relative_attention_num_buckets self.dropout_rate = dropout_rate self.initializer_factor = initializer_factor self.eos_token_id = eos_token_id self.pad_token_id = pad_token_id self.decoder_start_token_id = decoder_start_token_id self.scope = None self.decoder_layers = decoder_layers def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.encoder_seq_length], self.vocab_size) decoder_input_ids = ids_tensor([self.batch_size, self.decoder_seq_length], self.vocab_size) attention_mask = None decoder_attention_mask = None if self.use_attention_mask: attention_mask = ids_tensor([self.batch_size, self.encoder_seq_length], vocab_size=2) decoder_attention_mask = ids_tensor([self.batch_size, self.decoder_seq_length], vocab_size=2) config = T5Config( vocab_size=self.vocab_size, d_model=self.hidden_size, d_ff=self.d_ff, d_kv=self.hidden_size // self.num_attention_heads, num_layers=self.num_hidden_layers, num_decoder_layers=self.decoder_layers, num_heads=self.num_attention_heads, relative_attention_num_buckets=self.relative_attention_num_buckets, dropout_rate=self.dropout_rate, initializer_factor=self.initializer_factor, eos_token_id=self.eos_token_id, bos_token_id=self.pad_token_id, pad_token_id=self.pad_token_id, decoder_start_token_id=self.decoder_start_token_id, ) return ( config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, ) def create_and_check_model( self, config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, ): model = FlaxT5Model(config=config) result = model( input_ids=input_ids, decoder_input_ids=decoder_input_ids, attention_mask=attention_mask, decoder_attention_mask=decoder_attention_mask, ) result = model(input_ids=input_ids, decoder_input_ids=decoder_input_ids) decoder_output = result.last_hidden_state encoder_output = result.encoder_last_hidden_state self.parent.assertEqual(encoder_output.shape, (self.batch_size, self.encoder_seq_length, self.hidden_size)) self.parent.assertEqual(decoder_output.shape, (self.batch_size, self.decoder_seq_length, self.hidden_size)) def check_use_cache_forward_with_attn_mask( self, model_class_name, config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, ): max_decoder_length = 20 model = model_class_name(config) encoder_outputs = model.encode(input_ids) # prevent fully zero'd out attention mask decoder_attention_mask = jnp.ones_like(decoder_attention_mask) decoder_attention_mask_cache = jnp.concatenate( [ decoder_attention_mask, jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1])), ], axis=-1, ) past_key_values = model.init_cache(decoder_input_ids.shape[0], max_decoder_length, encoder_outputs) outputs_cache = model.decode( decoder_input_ids[:, :-1], encoder_outputs, decoder_attention_mask=decoder_attention_mask_cache, past_key_values=past_key_values, ) outputs_cache_next = model.decode( decoder_input_ids[:, -1:], encoder_outputs, past_key_values=outputs_cache.past_key_values, decoder_attention_mask=decoder_attention_mask_cache, ) outputs = model.decode(decoder_input_ids, encoder_outputs, decoder_attention_mask=decoder_attention_mask) diff = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]))) self.parent.assertTrue(diff < 1e-3, msg=f"Max diff is {diff}") def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, ) = config_and_inputs inputs_dict = { "input_ids": input_ids, "attention_mask": attention_mask, "decoder_input_ids": decoder_input_ids, "decoder_attention_mask": decoder_attention_mask, } return config, inputs_dict @require_flax class FlaxT5ModelTest(FlaxModelTesterMixin, FlaxGenerationTesterMixin, unittest.TestCase): all_model_classes = (FlaxT5Model, FlaxT5ForConditionalGeneration) if is_flax_available() else () all_generative_model_classes = (FlaxT5ForConditionalGeneration,) if is_flax_available() else () is_encoder_decoder = True def setUp(self): self.model_tester = FlaxT5ModelTester(self) self.config_tester = ConfigTester(self, config_class=T5Config, d_model=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_model_v1_1(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() # check that gated gelu feed forward and different word embeddings work config = config_and_inputs[0] config.tie_word_embeddings = False config.feed_forward_proj = "gated-gelu" self.model_tester.create_and_check_model(config, *config_and_inputs[1:]) def test_use_cache_forward_with_attn_mask(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward_with_attn_mask(model_class, *config_and_inputs) def test_encode(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__): prepared_inputs_dict = self._prepare_for_class(inputs_dict, model_class) model = model_class(config) @jax.jit def encode_jitted(input_ids, attention_mask=None, **kwargs): return model.encode(input_ids=input_ids, attention_mask=attention_mask) with self.subTest("JIT Enabled"): jitted_outputs = encode_jitted(**prepared_inputs_dict).to_tuple() with self.subTest("JIT Disabled"): with jax.disable_jit(): outputs = encode_jitted(**prepared_inputs_dict).to_tuple() self.assertEqual(len(outputs), len(jitted_outputs)) for jitted_output, output in zip(jitted_outputs, outputs): self.assertEqual(jitted_output.shape, output.shape) def test_decode(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__): model = model_class(config) encoder_outputs = model.encode(inputs_dict["input_ids"], inputs_dict["attention_mask"]) prepared_inputs_dict = { "decoder_input_ids": inputs_dict["decoder_input_ids"], "decoder_attention_mask": inputs_dict["decoder_attention_mask"], "encoder_outputs": encoder_outputs, } @jax.jit def decode_jitted(decoder_input_ids, decoder_attention_mask, encoder_outputs): return model.decode( decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, encoder_outputs=encoder_outputs, ) with self.subTest("JIT Enabled"): jitted_outputs = decode_jitted(**prepared_inputs_dict).to_tuple() with self.subTest("JIT Disabled"): with jax.disable_jit(): outputs = decode_jitted(**prepared_inputs_dict).to_tuple() self.assertEqual(len(outputs), len(jitted_outputs)) for jitted_output, output in zip(jitted_outputs, outputs): self.assertEqual(jitted_output.shape, output.shape) def test_shift_right(self): decoder_start_token_id = 0 pad_token_id = 1 labels = np.arange(2, 102).reshape(5, 20) labels[:2, 15:] = -100 decoder_input_ids = shift_tokens_right(labels, pad_token_id, decoder_start_token_id) np_decoder_input_ids = np.array(decoder_input_ids) padded_slice = np_decoder_input_ids[:2, (15 + 1) :] self.assertTrue((padded_slice == 1).all()) not_padded_slice = np_decoder_input_ids[2:, 1:] rolled_labels = np.roll(labels[2:], 1)[:, 1:] self.assertTrue((not_padded_slice == rolled_labels).all()) self.assertTrue((np_decoder_input_ids[:, 0] == 0).all()) # overwrite since special base model prefix is used def test_save_load_from_base(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() base_class = FLAX_MODEL_MAPPING[config.__class__] for model_class in self.all_model_classes: if model_class == base_class: continue model = base_class(config) base_params = flatten_dict(unfreeze(model.params)) # check that all base model weights are loaded correctly with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) head_model = model_class.from_pretrained(tmpdirname) base_param_from_head = flatten_dict(unfreeze(head_model.params)) for key in base_param_from_head.keys(): max_diff = (base_params[key] - base_param_from_head[key]).sum().item() self.assertLessEqual(max_diff, 1e-3, msg=f"{key} not identical") # overwrite since special base model prefix is used def test_save_load_to_base(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() base_class = FLAX_MODEL_MAPPING[config.__class__] for model_class in self.all_model_classes: if model_class == base_class: continue model = model_class(config) base_params_from_head = flatten_dict(unfreeze(model.params)) # check that all base model weights are loaded correctly with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) base_model = base_class.from_pretrained(tmpdirname) base_params = flatten_dict(unfreeze(base_model.params)) for key in base_params_from_head.keys(): max_diff = (base_params[key] - base_params_from_head[key]).sum().item() self.assertLessEqual(max_diff, 1e-3, msg=f"{key} not identical") # overwrite since special base model prefix is used @is_pt_flax_cross_test def test_save_load_from_base_pt(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() base_class = FLAX_MODEL_MAPPING[config.__class__] for model_class in self.all_model_classes: if model_class == base_class: continue model = base_class(config) base_params = flatten_dict(unfreeze(model.params)) # convert Flax model to PyTorch model pt_model_class = getattr(transformers, base_class.__name__[4:]) # Skip the "Flax" at the beginning pt_model = pt_model_class(config).eval() pt_model = load_flax_weights_in_pytorch_model(pt_model, model.params) # check that all base model weights are loaded correctly with tempfile.TemporaryDirectory() as tmpdirname: # save pt model pt_model.save_pretrained(tmpdirname) head_model = model_class.from_pretrained(tmpdirname, from_pt=True) base_param_from_head = flatten_dict(unfreeze(head_model.params)) for key in base_param_from_head.keys(): max_diff = (base_params[key] - base_param_from_head[key]).sum().item() self.assertLessEqual(max_diff, 1e-3, msg=f"{key} not identical") # overwrite since special base model prefix is used @is_pt_flax_cross_test def test_save_load_to_base_pt(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() base_class = FLAX_MODEL_MAPPING[config.__class__] for model_class in self.all_model_classes: if model_class == base_class: continue model = model_class(config) base_params_from_head = flatten_dict(unfreeze(model.params)) # convert Flax model to PyTorch model pt_model_class = getattr(transformers, model_class.__name__[4:]) # Skip the "Flax" at the beginning pt_model = pt_model_class(config).eval() pt_model = load_flax_weights_in_pytorch_model(pt_model, model.params) # check that all base model weights are loaded correctly with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(tmpdirname) base_model = base_class.from_pretrained(tmpdirname, from_pt=True) base_params = flatten_dict(unfreeze(base_model.params)) for key in base_params_from_head.keys(): max_diff = (base_params[key] - base_params_from_head[key]).sum().item() self.assertLessEqual(max_diff, 1e-3, msg=f"{key} not identical") # overwrite since special base model prefix is used @is_pt_flax_cross_test def test_save_load_bf16_to_base_pt(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() base_class = FLAX_MODEL_MAPPING[config.__class__] for model_class in self.all_model_classes: if model_class == base_class: continue model = model_class(config) model.params = model.to_bf16(model.params) base_params_from_head = flatten_dict(unfreeze(model.params)) # convert Flax model to PyTorch model pt_model_class = getattr(transformers, model_class.__name__[4:]) # Skip the "Flax" at the beginning pt_model = pt_model_class(config).eval() pt_model = load_flax_weights_in_pytorch_model(pt_model, model.params) # check that all base model weights are loaded correctly with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(tmpdirname) base_model = base_class.from_pretrained(tmpdirname, from_pt=True) base_params = flatten_dict(unfreeze(base_model.params)) for key in base_params_from_head.keys(): max_diff = (base_params[key] - base_params_from_head[key]).sum().item() self.assertLessEqual(max_diff, 1e-3, msg=f"{key} not identical") class FlaxT5EncoderOnlyModelTester: def __init__( self, parent, vocab_size=99, batch_size=13, encoder_seq_length=7, # For common tests is_training=True, use_attention_mask=True, use_labels=True, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, d_ff=37, relative_attention_num_buckets=8, dropout_rate=0.1, initializer_factor=0.002, eos_token_id=1, pad_token_id=0, decoder_start_token_id=0, scope=None, ): self.parent = parent self.batch_size = batch_size self.encoder_seq_length = encoder_seq_length # For common tests self.seq_length = self.encoder_seq_length self.is_training = is_training self.use_attention_mask = use_attention_mask self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.d_ff = d_ff self.relative_attention_num_buckets = relative_attention_num_buckets self.dropout_rate = dropout_rate self.initializer_factor = initializer_factor self.eos_token_id = eos_token_id self.pad_token_id = pad_token_id self.decoder_start_token_id = decoder_start_token_id self.scope = None self.decoder_layers = 0 def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.encoder_seq_length], self.vocab_size) attention_mask = None if self.use_attention_mask: attention_mask = ids_tensor([self.batch_size, self.encoder_seq_length], vocab_size=2) config = T5Config( vocab_size=self.vocab_size, d_model=self.hidden_size, d_ff=self.d_ff, d_kv=self.hidden_size // self.num_attention_heads, num_layers=self.num_hidden_layers, num_decoder_layers=self.decoder_layers, num_heads=self.num_attention_heads, relative_attention_num_buckets=self.relative_attention_num_buckets, dropout_rate=self.dropout_rate, initializer_factor=self.initializer_factor, eos_token_id=self.eos_token_id, bos_token_id=self.pad_token_id, pad_token_id=self.pad_token_id, decoder_start_token_id=self.decoder_start_token_id, is_encoder_decoder=False, ) return ( config, input_ids, attention_mask, ) def create_and_check_model( self, config, input_ids, attention_mask, ): model = FlaxT5EncoderModel(config=config) result = model( input_ids=input_ids, attention_mask=attention_mask, ) result = model(input_ids=input_ids) encoder_output = result.last_hidden_state self.parent.assertEqual(encoder_output.shape, (self.batch_size, self.encoder_seq_length, self.hidden_size)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, attention_mask, ) = config_and_inputs inputs_dict = { "input_ids": input_ids, "attention_mask": attention_mask, } return config, inputs_dict @require_flax class FlaxT5EncoderOnlyModelTest(FlaxModelTesterMixin, unittest.TestCase): all_model_classes = (FlaxT5EncoderModel,) if is_flax_available() else () is_encoder_decoder = False def setUp(self): self.model_tester = FlaxT5EncoderOnlyModelTester(self) self.config_tester = ConfigTester(self, config_class=T5Config, d_model=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_model_v1_1(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() # check that gated gelu feed forward and different word embeddings work config = config_and_inputs[0] config.tie_word_embeddings = False config.feed_forward_proj = "gated-gelu" self.model_tester.create_and_check_model(config, *config_and_inputs[1:]) def test_encode(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__): prepared_inputs_dict = self._prepare_for_class(inputs_dict, model_class) model = model_class(config) @jax.jit def encode_jitted(input_ids, attention_mask=None, **kwargs): return model(input_ids=input_ids, attention_mask=attention_mask) with self.subTest("JIT Enabled"): jitted_outputs = encode_jitted(**prepared_inputs_dict).to_tuple() with self.subTest("JIT Disabled"): with jax.disable_jit(): outputs = encode_jitted(**prepared_inputs_dict).to_tuple() self.assertEqual(len(outputs), len(jitted_outputs)) for jitted_output, output in zip(jitted_outputs, outputs): self.assertEqual(jitted_output.shape, output.shape) # overwrite since special base model prefix is used def test_save_load_from_base(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() base_class = FLAX_MODEL_MAPPING[config.__class__] for model_class in self.all_model_classes: if model_class == base_class: continue model = base_class(config) base_params = flatten_dict(unfreeze(model.params)) # check that all base model weights are loaded correctly with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) head_model = model_class.from_pretrained(tmpdirname) base_param_from_head = flatten_dict(unfreeze(head_model.params)) for key in base_param_from_head.keys(): max_diff = (base_params[key] - base_param_from_head[key]).sum().item() self.assertLessEqual(max_diff, 1e-3, msg=f"{key} not identical") # overwrite since special base model prefix is used def test_save_load_to_base(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() base_class = FLAX_MODEL_MAPPING[config.__class__] for model_class in self.all_model_classes: if model_class == base_class: continue model = model_class(config) base_params_from_head = flatten_dict(unfreeze(model.params)) # check that all base model weights are loaded correctly with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) base_model = base_class.from_pretrained(tmpdirname) base_params = flatten_dict(unfreeze(base_model.params)) for key in base_params_from_head.keys(): max_diff = (base_params[key] - base_params_from_head[key]).sum().item() self.assertLessEqual(max_diff, 1e-3, msg=f"{key} not identical") # overwrite since special base model prefix is used @is_pt_flax_cross_test def test_save_load_from_base_pt(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() base_class = FLAX_MODEL_MAPPING[config.__class__] for model_class in self.all_model_classes: if model_class == base_class: continue model = base_class(config) base_params = flatten_dict(unfreeze(model.params)) # convert Flax model to PyTorch model pt_model_class = getattr(transformers, base_class.__name__[4:]) # Skip the "Flax" at the beginning pt_model = pt_model_class(config).eval() pt_model = load_flax_weights_in_pytorch_model(pt_model, model.params) # check that all base model weights are loaded correctly with tempfile.TemporaryDirectory() as tmpdirname: # save pt model pt_model.save_pretrained(tmpdirname) head_model = model_class.from_pretrained(tmpdirname, from_pt=True) base_param_from_head = flatten_dict(unfreeze(head_model.params)) for key in base_param_from_head.keys(): max_diff = (base_params[key] - base_param_from_head[key]).sum().item() self.assertLessEqual(max_diff, 1e-3, msg=f"{key} not identical") # overwrite since special base model prefix is used @is_pt_flax_cross_test def test_save_load_to_base_pt(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() base_class = FLAX_MODEL_MAPPING[config.__class__] for model_class in self.all_model_classes: if model_class == base_class: continue model = model_class(config) base_params_from_head = flatten_dict(unfreeze(model.params)) # convert Flax model to PyTorch model pt_model_class = getattr(transformers, model_class.__name__[4:]) # Skip the "Flax" at the beginning pt_model = pt_model_class(config).eval() pt_model = load_flax_weights_in_pytorch_model(pt_model, model.params) # check that all base model weights are loaded correctly with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(tmpdirname) base_model = base_class.from_pretrained(tmpdirname, from_pt=True) base_params = flatten_dict(unfreeze(base_model.params)) for key in base_params_from_head.keys(): max_diff = (base_params[key] - base_params_from_head[key]).sum().item() self.assertLessEqual(max_diff, 1e-3, msg=f"{key} not identical") # overwrite since special base model prefix is used @is_pt_flax_cross_test def test_save_load_bf16_to_base_pt(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() base_class = FLAX_MODEL_MAPPING[config.__class__] for model_class in self.all_model_classes: if model_class == base_class: continue model = model_class(config) model.params = model.to_bf16(model.params) base_params_from_head = flatten_dict(unfreeze(model.params)) # convert Flax model to PyTorch model pt_model_class = getattr(transformers, model_class.__name__[4:]) # Skip the "Flax" at the beginning pt_model = pt_model_class(config).eval() pt_model = load_flax_weights_in_pytorch_model(pt_model, model.params) # check that all base model weights are loaded correctly with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(tmpdirname) base_model = base_class.from_pretrained(tmpdirname, from_pt=True) base_params = flatten_dict(unfreeze(base_model.params)) for key in base_params_from_head.keys(): max_diff = (base_params[key] - base_params_from_head[key]).sum().item() self.assertLessEqual(max_diff, 1e-3, msg=f"{key} not identical") @require_sentencepiece @require_tokenizers @require_flax class FlaxT5ModelIntegrationTests(unittest.TestCase): @slow def test_small_integration_test(self): """ For comparision run: >>> import t5 # pip install t5==0.7.1 >>> from t5.data.sentencepiece_vocabulary import SentencePieceVocabulary >>> path_to_mtf_small_t5_checkpoint = '<fill_in>' >>> path_to_mtf_small_spm_model_path = '<fill_in>' >>> t5_model = t5.models.MtfModel(model_dir=path_to_mtf_small_t5_checkpoint, batch_size=1, tpu=None) >>> vocab = SentencePieceVocabulary(path_to_mtf_small_spm_model_path, extra_ids=100) >>> score = t5_model.score(inputs=["Hello there"], targets=["Hi I am"], vocabulary=vocab) """ model = FlaxT5ForConditionalGeneration.from_pretrained("google-t5/t5-small") tokenizer = T5Tokenizer.from_pretrained("google-t5/t5-small") input_ids = tokenizer("Hello there", return_tensors="np").input_ids labels = tokenizer("Hi I am", return_tensors="np").input_ids decoder_input_ids = shift_tokens_right(labels, model.config.pad_token_id, model.config.decoder_start_token_id) logits = model(input_ids, decoder_input_ids=decoder_input_ids).logits loss = optax.softmax_cross_entropy(logits, onehot(labels, logits.shape[-1])).mean() mtf_score = -(labels.shape[-1] * loss.item()) EXPECTED_SCORE = -19.0845 self.assertTrue(abs(mtf_score - EXPECTED_SCORE) < 1e-4) @slow def test_small_v1_1_integration_test(self): """ For comparision run: >>> import t5 # pip install t5==0.7.1 >>> from t5.data.sentencepiece_vocabulary import SentencePieceVocabulary >>> path_to_mtf_small_t5_v1_1_checkpoint = '<fill_in>' >>> path_to_mtf_small_spm_model_path = '<fill_in>' >>> t5_model = t5.models.MtfModel(model_dir=path_to_mtf_small_t5_v1_1_checkpoint, batch_size=1, tpu=None) >>> vocab = SentencePieceVocabulary(path_to_mtf_small_spm_model_path, extra_ids=100) >>> score = t5_model.score(inputs=["Hello there"], targets=["Hi I am"], vocabulary=vocab) """ model = FlaxT5ForConditionalGeneration.from_pretrained("google/t5-v1_1-small") tokenizer = T5Tokenizer.from_pretrained("google/t5-v1_1-small") input_ids = tokenizer("Hello there", return_tensors="np").input_ids labels = tokenizer("Hi I am", return_tensors="np").input_ids decoder_input_ids = shift_tokens_right(labels, model.config.pad_token_id, model.config.decoder_start_token_id) logits = model(input_ids, decoder_input_ids=decoder_input_ids).logits loss = optax.softmax_cross_entropy(logits, onehot(labels, logits.shape[-1])).mean() mtf_score = -(labels.shape[-1] * loss.item()) EXPECTED_SCORE = -59.0293 self.assertTrue(abs(mtf_score - EXPECTED_SCORE) < 1e-4) @slow def test_small_byt5_integration_test(self): """ For comparision run: >>> import t5 # pip install t5==0.9.1 >>> path_to_byt5_small_checkpoint = '<fill_in>' >>> t5_model = t5.models.MtfModel(model_dir=path_to_tf_checkpoint, batch_size=1, tpu=None) >>> vocab = t5.data.ByteVocabulary() >>> score = t5_model.score(inputs=["Hello there"], targets=["Hi I am"], vocabulary=vocab) """ model = FlaxT5ForConditionalGeneration.from_pretrained("google/byt5-small") tokenizer = ByT5Tokenizer.from_pretrained("google/byt5-small") input_ids = tokenizer("Hello there", return_tensors="np").input_ids labels = tokenizer("Hi I am", return_tensors="np").input_ids decoder_input_ids = shift_tokens_right(labels, model.config.pad_token_id, model.config.decoder_start_token_id) logits = model(input_ids, decoder_input_ids=decoder_input_ids).logits loss = optax.softmax_cross_entropy(logits, onehot(labels, logits.shape[-1])).mean() mtf_score = -(labels.shape[-1] * loss.item()) EXPECTED_SCORE = -60.7397 self.assertTrue(abs(mtf_score - EXPECTED_SCORE) < 1e-4) @slow def test_small_generation(self): model = FlaxT5ForConditionalGeneration.from_pretrained("google-t5/t5-small") model.config.max_length = 8 model.config.num_beams = 1 model.config.do_sample = False tokenizer = T5Tokenizer.from_pretrained("google-t5/t5-small") input_ids = tokenizer("summarize: Hello there", return_tensors="np").input_ids sequences = model.generate(input_ids).sequences output_str = tokenizer.batch_decode(sequences, skip_special_tokens=True)[0] self.assertTrue(output_str == "Hello there!") @slow def test_small_generation_bfloat16(self): model = FlaxT5ForConditionalGeneration.from_pretrained("google-t5/t5-small", dtype=jnp.bfloat16) model.config.max_length = 8 model.config.num_beams = 1 model.config.do_sample = False tokenizer = T5Tokenizer.from_pretrained("google-t5/t5-small") input_ids = tokenizer("summarize: Hello there", return_tensors="np").input_ids sequences = model.generate(input_ids).sequences output_str = tokenizer.batch_decode(sequences, skip_special_tokens=True)[0] self.assertTrue(output_str == "Hello there!") @slow def test_summarization(self): model = FlaxT5ForConditionalGeneration.from_pretrained("google-t5/t5-base") tok = T5Tokenizer.from_pretrained("google-t5/t5-base") FRANCE_ARTICLE = ( # @noqa "Marseille, France (CNN)The French prosecutor leading an investigation into the crash of Germanwings" " Flight 9525 insisted Wednesday that he was not aware of any video footage from on board the plane." ' Marseille prosecutor Brice Robin told CNN that "so far no videos were used in the crash investigation."' ' He added, "A person who has such a video needs to immediately give it to the investigators." Robin\'s' " comments follow claims by two magazines, German daily Bild and French Paris Match, of a cell phone video" " showing the harrowing final seconds from on board Germanwings Flight 9525 as it crashed into the French" " Alps. All 150 on board were killed. Paris Match and Bild reported that the video was recovered from a" " phone at the wreckage site. The two publications described the supposed video, but did not post it on" " their websites. The publications said that they watched the video, which was found by a source close to" " the investigation. \"One can hear cries of 'My God' in several languages,\" Paris Match reported." ' "Metallic banging can also be heard more than three times, perhaps of the pilot trying to open the' " cockpit door with a heavy object. Towards the end, after a heavy shake, stronger than the others, the" ' screaming intensifies. Then nothing." "It is a very disturbing scene," said Julian Reichelt,' " editor-in-chief of Bild online. An official with France's accident investigation agency, the BEA, said" " the agency is not aware of any such video. Lt. Col. Jean-Marc Menichini, a French Gendarmerie spokesman" " in charge of communications on rescue efforts around the Germanwings crash site, told CNN that the" ' reports were "completely wrong" and "unwarranted." Cell phones have been collected at the site, he said,' ' but that they "hadn\'t been exploited yet." Menichini said he believed the cell phones would need to be' " sent to the Criminal Research Institute in Rosny sous-Bois, near Paris, in order to be analyzed by" " specialized technicians working hand-in-hand with investigators. But none of the cell phones found so" " far have been sent to the institute, Menichini said. Asked whether staff involved in the search could" ' have leaked a memory card to the media, Menichini answered with a categorical "no." Reichelt told "Erin' ' Burnett: Outfront" that he had watched the video and stood by the report, saying Bild and Paris Match' ' are "very confident" that the clip is real. He noted that investigators only revealed they\'d recovered' ' cell phones from the crash site after Bild and Paris Match published their reports. "That is something' " we did not know before. ... Overall we can say many things of the investigation weren't revealed by the" ' investigation at the beginning," he said. What was mental state of Germanwings co-pilot? German airline' " Lufthansa confirmed Tuesday that co-pilot Andreas Lubitz had battled depression years before he took the" " controls of Germanwings Flight 9525, which he's accused of deliberately crashing last week in the" ' French Alps. Lubitz told his Lufthansa flight training school in 2009 that he had a "previous episode of' ' severe depression," the airline said Tuesday. Email correspondence between Lubitz and the school' " discovered in an internal investigation, Lufthansa said, included medical documents he submitted in" " connection with resuming his flight training. The announcement indicates that Lufthansa, the parent" " company of Germanwings, knew of Lubitz's battle with depression, allowed him to continue training and" " ultimately put him in the cockpit. Lufthansa, whose CEO Carsten Spohr previously said Lubitz was 100%" ' fit to fly, described its statement Tuesday as a "swift and seamless clarification" and said it was' " sharing the information and documents -- including training and medical records -- with public" " prosecutors. Spohr traveled to the crash site Wednesday, where recovery teams have been working for the" " past week to recover human remains and plane debris scattered across a steep mountainside. He saw the" " crisis center set up in Seyne-les-Alpes, laid a wreath in the village of Le Vernet, closer to the crash" " site, where grieving families have left flowers at a simple stone memorial. Menichini told CNN late" " Tuesday that no visible human remains were left at the site but recovery teams would keep searching." " French President Francois Hollande, speaking Tuesday, said that it should be possible to identify all" " the victims using DNA analysis by the end of the week, sooner than authorities had previously suggested." " In the meantime, the recovery of the victims' personal belongings will start Wednesday, Menichini said." " Among those personal belongings could be more cell phones belonging to the 144 passengers and six crew" " on board. Check out the latest from our correspondents . The details about Lubitz's correspondence with" " the flight school during his training were among several developments as investigators continued to" " delve into what caused the crash and Lubitz's possible motive for downing the jet. A Lufthansa" " spokesperson told CNN on Tuesday that Lubitz had a valid medical certificate, had passed all his" ' examinations and "held all the licenses required." Earlier, a spokesman for the prosecutor\'s office in' " Dusseldorf, Christoph Kumpa, said medical records reveal Lubitz suffered from suicidal tendencies at" " some point before his aviation career and underwent psychotherapy before he got his pilot's license." " Kumpa emphasized there's no evidence suggesting Lubitz was suicidal or acting aggressively before the" " crash. Investigators are looking into whether Lubitz feared his medical condition would cause him to" " lose his pilot's license, a European government official briefed on the investigation told CNN on" ' Tuesday. While flying was "a big part of his life," the source said, it\'s only one theory being' " considered. Another source, a law enforcement official briefed on the investigation, also told CNN that" " authorities believe the primary motive for Lubitz to bring down the plane was that he feared he would" " not be allowed to fly because of his medical problems. Lubitz's girlfriend told investigators he had" " seen an eye doctor and a neuropsychologist, both of whom deemed him unfit to work recently and concluded" " he had psychological issues, the European government official said. But no matter what details emerge" " about his previous mental health struggles, there's more to the story, said Brian Russell, a forensic" ' psychologist. "Psychology can explain why somebody would turn rage inward on themselves about the fact' " that maybe they weren't going to keep doing their job and they're upset about that and so they're" ' suicidal," he said. "But there is no mental illness that explains why somebody then feels entitled to' " also take that rage and turn it outward on 149 other people who had nothing to do with the person's" ' problems." Germanwings crash compensation: What we know . Who was the captain of Germanwings Flight' " 9525? CNN's Margot Haddad reported from Marseille and Pamela Brown from Dusseldorf, while Laura" " Smith-Spark wrote from London. CNN's Frederik Pleitgen, Pamela Boykoff, Antonia Mortensen, Sandrine" " Amiel and Anna-Maja Rappard contributed to this report." ) SHORTER_ARTICLE = ( "(CNN)The Palestinian Authority officially became the 123rd member of the International Criminal Court on" " Wednesday, a step that gives the court jurisdiction over alleged crimes in Palestinian territories. The" " formal accession was marked with a ceremony at The Hague, in the Netherlands, where the court is based." " The Palestinians signed the ICC's founding Rome Statute in January, when they also accepted its" ' jurisdiction over alleged crimes committed "in the occupied Palestinian territory, including East' ' Jerusalem, since June 13, 2014." Later that month, the ICC opened a preliminary examination into the' " situation in Palestinian territories, paving the way for possible war crimes investigations against" " Israelis. As members of the court, Palestinians may be subject to counter-charges as well. Israel and" " the United States, neither of which is an ICC member, opposed the Palestinians' efforts to join the" " body. But Palestinian Foreign Minister Riad al-Malki, speaking at Wednesday's ceremony, said it was a" ' move toward greater justice. "As Palestine formally becomes a State Party to the Rome Statute today, the' ' world is also a step closer to ending a long era of impunity and injustice," he said, according to an' ' ICC news release. "Indeed, today brings us closer to our shared goals of justice and peace." Judge' " Kuniko Ozaki, a vice president of the ICC, said acceding to the treaty was just the first step for the" ' Palestinians. "As the Rome Statute today enters into force for the State of Palestine, Palestine' " acquires all the rights as well as responsibilities that come with being a State Party to the Statute." ' These are substantive commitments, which cannot be taken lightly," she said. Rights group Human Rights' ' Watch welcomed the development. "Governments seeking to penalize Palestine for joining the ICC should' " immediately end their pressure, and countries that support universal acceptance of the court's treaty" ' should speak out to welcome its membership," said Balkees Jarrah, international justice counsel for the' " group. \"What's objectionable is the attempts to undermine international justice, not Palestine's" ' decision to join a treaty to which over 100 countries around the world are members." In January, when' " the preliminary ICC examination was opened, Israeli Prime Minister Benjamin Netanyahu described it as an" ' outrage, saying the court was overstepping its boundaries. The United States also said it "strongly"' " disagreed with the court's decision. \"As we have said repeatedly, we do not believe that Palestine is a" ' state and therefore we do not believe that it is eligible to join the ICC," the State Department said in' ' a statement. It urged the warring sides to resolve their differences through direct negotiations. "We' ' will continue to oppose actions against Israel at the ICC as counterproductive to the cause of peace,"' " it said. But the ICC begs to differ with the definition of a state for its purposes and refers to the" ' territories as "Palestine." While a preliminary examination is not a formal investigation, it allows the' " court to review evidence and determine whether to investigate suspects on both sides. Prosecutor Fatou" ' Bensouda said her office would "conduct its analysis in full independence and impartiality." The war' " between Israel and Hamas militants in Gaza last summer left more than 2,000 people dead. The inquiry" " will include alleged war crimes committed since June. The International Criminal Court was set up in" " 2002 to prosecute genocide, crimes against humanity and war crimes. CNN's Vasco Cotovio, Kareem Khadder" " and Faith Karimi contributed to this report." ) IRAN_ARTICLE = ( "(CNN)The United States and its negotiating partners reached a very strong framework agreement with Iran" " in Lausanne, Switzerland, on Thursday that limits Iran's nuclear program in such a way as to effectively" " block it from building a nuclear weapon. Expect pushback anyway, if the recent past is any harbinger." " Just last month, in an attempt to head off such an agreement, House Speaker John Boehner invited Israeli" " Prime Minister Benjamin Netanyahu to preemptively blast it before Congress, and 47 senators sent a" " letter to the Iranian leadership warning them away from a deal. The debate that has already begun since" " the announcement of the new framework will likely result in more heat than light. It will not be helped" " by the gathering swirl of dubious assumptions and doubtful assertions. Let us address some of these: ." " The most misleading assertion, despite universal rejection by experts, is that the negotiations'" " objective at the outset was the total elimination of any nuclear program in Iran. That is the position" " of Netanyahu and his acolytes in the U.S. Congress. But that is not and never was the objective. If it" " had been, there would have been no Iranian team at the negotiating table. Rather, the objective has" " always been to structure an agreement or series of agreements so that Iran could not covertly develop a" " nuclear arsenal before the United States and its allies could respond. The new framework has exceeded" " expectations in achieving that goal. It would reduce Iran's low-enriched uranium stockpile, cut by" " two-thirds its number of installed centrifuges and implement a rigorous inspection regime. Another" " dubious assumption of opponents is that the Iranian nuclear program is a covert weapons program. Despite" " sharp accusations by some in the United States and its allies, Iran denies having such a program, and" " U.S. intelligence contends that Iran has not yet made the decision to build a nuclear weapon. Iran's" " continued cooperation with International Atomic Energy Agency inspections is further evidence on this" " point, and we'll know even more about Iran's program in the coming months and years because of the deal." " In fact, the inspections provisions that are part of this agreement are designed to protect against any" " covert action by the Iranians. What's more, the rhetoric of some members of Congress has implied that" " the negotiations have been between only the United States and Iran (i.e., the 47 senators' letter" " warning that a deal might be killed by Congress or a future president). This of course is not the case." " The talks were between Iran and the five permanent members of the U.N. Security Council (United States," " United Kingdom, France, China and Russia) plus Germany, dubbed the P5+1. While the United States has" " played a leading role in the effort, it negotiated the terms alongside its partners. If the agreement" " reached by the P5+1 is rejected by Congress, it could result in an unraveling of the sanctions on Iran" " and threaten NATO cohesion in other areas. Another questionable assertion is that this agreement" " contains a sunset clause, after which Iran will be free to do as it pleases. Again, this is not the" " case. Some of the restrictions on Iran's nuclear activities, such as uranium enrichment, will be eased" " or eliminated over time, as long as 15 years. But most importantly, the framework agreement includes" " Iran's ratification of the Additional Protocol, which allows IAEA inspectors expanded access to nuclear" " sites both declared and nondeclared. This provision will be permanent. It does not sunset. Thus, going" " forward, if Iran decides to enrich uranium to weapons-grade levels, monitors will be able to detect such" " a move in a matter of days and alert the U.N. Security Council. Many in Congress have said that the" ' agreement should be a formal treaty requiring the Senate to "advise and consent." But the issue is not' " suited for a treaty. Treaties impose equivalent obligations on all signatories. For example, the New" " START treaty limits Russia and the United States to 1,550 deployed strategic warheads. But any agreement" " with Iran will not be so balanced. The restrictions and obligations in the final framework agreement" " will be imposed almost exclusively on Iran. The P5+1 are obligated only to ease and eventually remove" " most but not all economic sanctions, which were imposed as leverage to gain this final deal. Finally" " some insist that any agreement must address Iranian missile programs, human rights violations or support" " for Hamas or Hezbollah. As important as these issues are, and they must indeed be addressed, they are" " unrelated to the most important aim of a nuclear deal: preventing a nuclear Iran. To include them in" " the negotiations would be a poison pill. This agreement should be judged on its merits and on how it" " affects the security of our negotiating partners and allies, including Israel. Those judgments should be" " fact-based, not based on questionable assertions or dubious assumptions." ) ARTICLE_SUBWAY = ( "New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County, New York. A" " year later, she got married again in Westchester County, but to a different man and without divorcing" " her first husband. Only 18 days after that marriage, she got hitched yet again. Then, Barrientos" ' declared "I do" five more times, sometimes only within two weeks of each other. In 2010, she married' " once more, this time in the Bronx. In an application for a marriage license, she stated it was her" ' "first and only" marriage. Barrientos, now 39, is facing two criminal counts of "offering a false' ' instrument for filing in the first degree," referring to her false statements on the 2010 marriage' " license application, according to court documents. Prosecutors said the marriages were part of an" " immigration scam. On Friday, she pleaded not guilty at State Supreme Court in the Bronx, according to" " her attorney, Christopher Wright, who declined to comment further. After leaving court, Barrientos was" " arrested and charged with theft of service and criminal trespass for allegedly sneaking into the New" " York subway through an emergency exit, said Detective Annette Markowski, a police spokeswoman. In total," " Barrientos has been married 10 times, with nine of her marriages occurring between 1999 and 2002. All" " occurred either in Westchester County, Long Island, New Jersey or the Bronx. She is believed to still be" " married to four men, and at one time, she was married to eight men at once, prosecutors say. Prosecutors" " said the immigration scam involved some of her husbands, who filed for permanent residence status" " shortly after the marriages. Any divorces happened only after such filings were approved. It was" " unclear whether any of the men will be prosecuted. The case was referred to the Bronx District" " Attorney's Office by Immigration and Customs Enforcement and the Department of Homeland Security's" ' Investigation Division. Seven of the men are from so-called "red-flagged" countries, including Egypt,' " Turkey, Georgia, Pakistan and Mali. Her eighth husband, Rashid Rajput, was deported in 2006 to his" " native Pakistan after an investigation by the Joint Terrorism Task Force. If convicted, Barrientos faces" " up to four years in prison. Her next court appearance is scheduled for May 18." ) expected_summaries = [ 'prosecutor: "so far no videos were used in the crash investigation" two magazines claim to have found a' " cell phone video of the final seconds . \"one can hear cries of 'My God' in several languages,\" one" " magazine says . all 150 on board were killed in the crash .", "the formal accession was marked by a ceremony at The Hague, in the Netherlands . the ICC opened a" " preliminary examination into the situation in the occupied Palestinian territory . as members of the" " court, Palestinians may be subject to counter-charges as well .", "the u.s. and its negotiating partners reached a very strong framework agreement with Iran . aaron miller:" " the debate that has already begun since the announcement of the new framework will likely result in more" " heat than light . he says the new framework would reduce Iran's low-enriched uranium stockpile and cut" " centrifuges . miller: if it had been, there would have been no Iranian team at the table .", "prosecutors say the marriages were part of an immigration scam . if convicted, barrientos faces two" ' criminal counts of "offering a false instrument for filing in the first degree" she has been married 10' " times, with nine of her marriages occurring between 1999 and 2002 .", ] dct = tok( ["summarize: " + x for x in [FRANCE_ARTICLE, SHORTER_ARTICLE, IRAN_ARTICLE, ARTICLE_SUBWAY]], padding="max_length", truncation=True, return_tensors="np", ) self.assertEqual(512, dct["input_ids"].shape[1]) hypotheses_batch = model.generate( **dct, num_beams=4, length_penalty=2.0, max_length=142, min_length=56, do_sample=False, early_stopping=True, ).sequences decoded = tok.batch_decode(hypotheses_batch, skip_special_tokens=True, clean_up_tokenization_spaces=False) self.assertListEqual( expected_summaries, decoded, )
transformers/tests/models/t5/test_modeling_flax_t5.py/0
{ "file_path": "transformers/tests/models/t5/test_modeling_flax_t5.py", "repo_id": "transformers", "token_count": 23924 }
412
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Testing suite for the PyTorch VipLlava model. """ import copy import gc import unittest import requests from transformers import ( AutoProcessor, VipLlavaConfig, VipLlavaForConditionalGeneration, is_torch_available, is_vision_available, ) from transformers.testing_utils import require_bitsandbytes, require_torch, require_torch_gpu, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor if is_torch_available(): import torch else: is_torch_greater_or_equal_than_2_0 = False if is_vision_available(): from PIL import Image # Copied from transformers.tests.models.llava.test_modeling_llava.LlavaVisionText2TextModelTester with Llava->VipLlava class VipLlavaVisionText2TextModelTester: # Ignore copy def __init__( self, parent, ignore_index=-100, image_token_index=0, projector_hidden_act="gelu", seq_length=7, vision_feature_layers=[0, 0, 1, 1, 0], text_config={ "model_type": "llama", "seq_length": 7, "is_training": True, "use_input_mask": True, "use_token_type_ids": False, "use_labels": True, "vocab_size": 99, "hidden_size": 32, "num_hidden_layers": 2, "num_attention_heads": 4, "intermediate_size": 37, "hidden_act": "gelu", "hidden_dropout_prob": 0.1, "attention_probs_dropout_prob": 0.1, "max_position_embeddings": 512, "type_vocab_size": 16, "type_sequence_label_size": 2, "initializer_range": 0.02, "num_labels": 3, "num_choices": 4, "pad_token_id": 0, }, is_training=True, vision_config={ "batch_size": 12, "image_size": 30, "patch_size": 2, "num_channels": 3, "is_training": True, "hidden_size": 32, "projection_dim": 32, "num_hidden_layers": 2, "num_attention_heads": 4, "intermediate_size": 37, "dropout": 0.1, "attention_dropout": 0.1, "initializer_range": 0.02, }, ): self.parent = parent self.ignore_index = ignore_index self.image_token_index = image_token_index self.projector_hidden_act = projector_hidden_act self.vision_feature_layers = vision_feature_layers self.text_config = text_config self.vision_config = vision_config self.seq_length = seq_length self.num_hidden_layers = text_config["num_hidden_layers"] self.vocab_size = text_config["vocab_size"] self.hidden_size = text_config["hidden_size"] self.num_attention_heads = text_config["num_attention_heads"] self.is_training = is_training self.batch_size = 3 self.num_channels = 3 self.image_size = 336 self.encoder_seq_length = 231 def get_config(self): return VipLlavaConfig( text_config=self.text_config, vision_config=self.vision_config, ignore_index=self.ignore_index, image_token_index=self.image_token_index, projector_hidden_act=self.projector_hidden_act, vision_feature_layers=self.vision_feature_layers, ) def prepare_config_and_inputs(self): pixel_values = floats_tensor( [ self.batch_size, self.vision_config["num_channels"], self.vision_config["image_size"], self.vision_config["image_size"], ] ) config = self.get_config() return config, pixel_values def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values = config_and_inputs input_ids = ids_tensor([self.batch_size, self.seq_length], config.text_config.vocab_size - 1) + 1 attention_mask = input_ids.ne(1).to(torch_device) # we are giving 3 images let's make sure we pass in 3 image tokens input_ids[:, 1] = config.image_token_index inputs_dict = { "pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask, } return config, inputs_dict @require_torch # Copied from transformers.tests.models.llava.test_modeling_llava.LlavaForConditionalGenerationModelTest with Llava->VipLlava class VipLlavaForConditionalGenerationModelTest(ModelTesterMixin, unittest.TestCase): """ Model tester for `VipLlavaForConditionalGeneration`. """ all_model_classes = (VipLlavaForConditionalGeneration,) if is_torch_available() else () fx_compatible = False test_pruning = False test_resize_embeddings = True test_head_masking = False def setUp(self): self.model_tester = VipLlavaVisionText2TextModelTester(self) self.config_tester = ConfigTester(self, config_class=VipLlavaConfig, has_text_modality=False) @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant_false(self): pass # Copied from tests.test_modeling_common.ModelTesterMixin.test_resize_tokens_embeddings with config.vocab_size->config.text_config.vocab_size def test_resize_tokens_embeddings(self): ( original_config, inputs_dict, ) = self.model_tester.prepare_config_and_inputs_for_common() if not self.test_resize_embeddings: return for model_class in self.all_model_classes: config = copy.deepcopy(original_config) model = model_class(config) model.to(torch_device) if self.model_tester.is_training is False: model.eval() model_vocab_size = config.text_config.vocab_size # Retrieve the embeddings and clone theme model_embed = model.resize_token_embeddings(model_vocab_size) cloned_embeddings = model_embed.weight.clone() # Check that resizing the token embeddings with a larger vocab size increases the model's vocab size model_embed = model.resize_token_embeddings(model_vocab_size + 10) self.assertEqual(model.config.text_config.vocab_size, model_vocab_size + 10) # Check that it actually resizes the embeddings matrix self.assertEqual(model_embed.weight.shape[0], cloned_embeddings.shape[0] + 10) # Check that the model can still do a forward pass successfully (every parameter should be resized) model(**self._prepare_for_class(inputs_dict, model_class)) # Check that resizing the token embeddings with a smaller vocab size decreases the model's vocab size model_embed = model.resize_token_embeddings(model_vocab_size - 15) self.assertEqual(model.config.text_config.vocab_size, model_vocab_size - 15) # Check that it actually resizes the embeddings matrix self.assertEqual(model_embed.weight.shape[0], cloned_embeddings.shape[0] - 15) # Check that the model can still do a forward pass successfully (every parameter should be resized) # Input ids should be clamped to the maximum size of the vocabulary inputs_dict["input_ids"].clamp_(max=model_vocab_size - 15 - 1) # make sure that decoder_input_ids are resized as well if "decoder_input_ids" in inputs_dict: inputs_dict["decoder_input_ids"].clamp_(max=model_vocab_size - 15 - 1) model(**self._prepare_for_class(inputs_dict, model_class)) # Check that adding and removing tokens has not modified the first part of the embedding matrix. models_equal = True for p1, p2 in zip(cloned_embeddings, model_embed.weight): if p1.data.ne(p2.data).sum() > 0: models_equal = False self.assertTrue(models_equal) config = copy.deepcopy(original_config) model = model_class(config) model.to(torch_device) model_vocab_size = config.text_config.vocab_size model.resize_token_embeddings(model_vocab_size + 10, pad_to_multiple_of=1) self.assertTrue(model.config.text_config.vocab_size + 10, model_vocab_size) model_embed = model.resize_token_embeddings(model_vocab_size, pad_to_multiple_of=64) self.assertTrue(model_embed.weight.shape[0] // 64, 0) self.assertTrue(model_embed.weight.shape[0], model.config.text_config.vocab_size) self.assertTrue(model.config.text_config.vocab_size, model.vocab_size) model_embed = model.resize_token_embeddings(model_vocab_size + 13, pad_to_multiple_of=64) self.assertTrue(model_embed.weight.shape[0] // 64, 0) # Check that resizing a model to a multiple of pad_to_multiple leads to a model of exactly that size target_dimension = 128 model_embed = model.resize_token_embeddings(target_dimension, pad_to_multiple_of=64) self.assertTrue(model_embed.weight.shape[0], target_dimension) with self.assertRaisesRegex( ValueError, "Asking to pad the embedding matrix to a multiple of `1.3`, which is not and integer. Please make sure to pass an integer", ): model.resize_token_embeddings(model_vocab_size, pad_to_multiple_of=1.3) # Copied from tests.test_modeling_common.ModelTesterMixin.test_resize_embeddings_untied with config.vocab_size->config.text_config.vocab_size def test_resize_embeddings_untied(self): ( original_config, inputs_dict, ) = self.model_tester.prepare_config_and_inputs_for_common() if not self.test_resize_embeddings: return original_config.tie_word_embeddings = False # if model cannot untied embeddings -> leave test if original_config.tie_word_embeddings: return for model_class in self.all_model_classes: config = copy.deepcopy(original_config) model = model_class(config).to(torch_device) # if no output embeddings -> leave test if model.get_output_embeddings() is None: continue # Check that resizing the token embeddings with a larger vocab size increases the model's vocab size model_vocab_size = config.text_config.vocab_size model.resize_token_embeddings(model_vocab_size + 10) self.assertEqual(model.config.text_config.vocab_size, model_vocab_size + 10) output_embeds = model.get_output_embeddings() self.assertEqual(output_embeds.weight.shape[0], model_vocab_size + 10) # Check bias if present if output_embeds.bias is not None: self.assertEqual(output_embeds.bias.shape[0], model_vocab_size + 10) # Check that the model can still do a forward pass successfully (every parameter should be resized) model(**self._prepare_for_class(inputs_dict, model_class)) # Check that resizing the token embeddings with a smaller vocab size decreases the model's vocab size model.resize_token_embeddings(model_vocab_size - 15) self.assertEqual(model.config.text_config.vocab_size, model_vocab_size - 15) # Check that it actually resizes the embeddings matrix output_embeds = model.get_output_embeddings() self.assertEqual(output_embeds.weight.shape[0], model_vocab_size - 15) # Check bias if present if output_embeds.bias is not None: self.assertEqual(output_embeds.bias.shape[0], model_vocab_size - 15) # Check that the model can still do a forward pass successfully (every parameter should be resized) # Input ids should be clamped to the maximum size of the vocabulary inputs_dict["input_ids"].clamp_(max=model_vocab_size - 15 - 1) if "decoder_input_ids" in inputs_dict: inputs_dict["decoder_input_ids"].clamp_(max=model_vocab_size - 15 - 1) # Check that the model can still do a forward pass successfully (every parameter should be resized) model(**self._prepare_for_class(inputs_dict, model_class)) # Copied from tests.test_modeling_common.ModelTesterMixin.test_tie_model_weights with config.vocab_size->config.text_config.vocab_size def test_tie_model_weights(self): if not self.test_torchscript: return config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() def check_same_values(layer_1, layer_2): equal = True for p1, p2 in zip(layer_1.weight, layer_2.weight): if p1.data.ne(p2.data).sum() > 0: equal = False return equal for model_class in self.all_model_classes: config.torchscript = True model_not_tied = model_class(config) if model_not_tied.get_output_embeddings() is None: continue config_tied = copy.deepcopy(config) config_tied.torchscript = False model_tied = model_class(config_tied) params_tied = list(model_tied.parameters()) # Check that the embedding layer and decoding layer are the same in size and in value # self.assertTrue(check_same_values(embeddings, decoding)) # Check that after resize they remain tied. model_tied.resize_token_embeddings(config.text_config.vocab_size + 10) params_tied_2 = list(model_tied.parameters()) self.assertEqual(len(params_tied_2), len(params_tied)) @require_torch class VipLlavaForConditionalGenerationIntegrationTest(unittest.TestCase): def setUp(self): self.processor = AutoProcessor.from_pretrained("llava-hf/vip-llava-7b-hf") def tearDown(self): gc.collect() torch.cuda.empty_cache() @slow @require_bitsandbytes def test_small_model_integration_test(self): model_id = "llava-hf/vip-llava-7b-hf" model = VipLlavaForConditionalGeneration.from_pretrained(model_id, load_in_4bit=True) processor = AutoProcessor.from_pretrained(model_id) url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/compel-neg.png" image = Image.open(requests.get(url, stream=True).raw) prompt = "USER: <image>\nCan you please describe this image?\nASSISTANT:" inputs = processor(prompt, image, return_tensors="pt").to(torch_device, torch.float16) outputs = model.generate(**inputs, max_new_tokens=10) EXPECTED_OUTPUT = "USER: <image> \nCan you please describe this image?\nASSISTANT: The image features a brown and white cat sitting on" self.assertEqual(processor.decode(outputs[0], skip_special_tokens=True), EXPECTED_OUTPUT) @slow @require_torch_gpu def test_vipllava_merge_inputs_error_bug(self): # This is a reproducer of https://github.com/huggingface/transformers/pull/28333 and makes sure it does not happen anymore model_id = "llava-hf/vip-llava-7b-hf" model = VipLlavaForConditionalGeneration.from_pretrained( model_id, torch_dtype=torch.float16, low_cpu_mem_usage=True ).to(torch_device) # Simulate some user inputs pixel_values = torch.randn( (2, 3, 336, 336), dtype=torch.float, device=torch_device, ) input_ids = torch.tensor( [ [32001, 32001, 1, 15043, 7084, 32000, 29871, 13, 7900], [1, 15043, 7084, 29901, 29871, 32000, 29871, 13, 7900], ], dtype=torch.long, device=torch_device, ) attention_mask = torch.tensor( [[0, 0, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1]], dtype=torch.long, device=torch_device, ) # Make sure that the loss is properly computed loss = model( pixel_values=pixel_values, input_ids=input_ids, attention_mask=attention_mask, labels=input_ids, ).loss loss.backward()
transformers/tests/models/vipllava/test_modeling_vipllava.py/0
{ "file_path": "transformers/tests/models/vipllava/test_modeling_vipllava.py", "repo_id": "transformers", "token_count": 7943 }
413
# coding=utf-8 # Copyright 2021 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Testing suite for the PyTorch ViT model. """ import unittest from transformers import ViTConfig from transformers.testing_utils import ( require_accelerate, require_torch, require_torch_accelerator, require_torch_fp16, require_vision, slow, torch_device, ) from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ViTForImageClassification, ViTForMaskedImageModeling, ViTModel from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class ViTModelTester: def __init__( self, parent, batch_size=13, image_size=30, patch_size=2, num_channels=3, is_training=True, use_labels=True, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, type_sequence_label_size=10, initializer_range=0.02, scope=None, encoder_stride=2, ): self.parent = parent self.batch_size = batch_size self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.is_training = is_training self.use_labels = use_labels self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.scope = scope self.encoder_stride = encoder_stride # in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) num_patches = (image_size // patch_size) ** 2 self.seq_length = num_patches + 1 def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) labels = None if self.use_labels: labels = ids_tensor([self.batch_size], self.type_sequence_label_size) config = self.get_config() return config, pixel_values, labels def get_config(self): return ViTConfig( image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=False, initializer_range=self.initializer_range, encoder_stride=self.encoder_stride, ) def create_and_check_model(self, config, pixel_values, labels): model = ViTModel(config=config) model.to(torch_device) model.eval() result = model(pixel_values) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_for_masked_image_modeling(self, config, pixel_values, labels): model = ViTForMaskedImageModeling(config=config) model.to(torch_device) model.eval() result = model(pixel_values) self.parent.assertEqual( result.reconstruction.shape, (self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images config.num_channels = 1 model = ViTForMaskedImageModeling(config) model.to(torch_device) model.eval() pixel_values = floats_tensor([self.batch_size, 1, self.image_size, self.image_size]) result = model(pixel_values) self.parent.assertEqual(result.reconstruction.shape, (self.batch_size, 1, self.image_size, self.image_size)) def create_and_check_for_image_classification(self, config, pixel_values, labels): config.num_labels = self.type_sequence_label_size model = ViTForImageClassification(config) model.to(torch_device) model.eval() result = model(pixel_values, labels=labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size)) # test greyscale images config.num_channels = 1 model = ViTForImageClassification(config) model.to(torch_device) model.eval() pixel_values = floats_tensor([self.batch_size, 1, self.image_size, self.image_size]) result = model(pixel_values) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, pixel_values, labels, ) = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class ViTModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): """ Here we also overwrite some of the tests of test_modeling_common.py, as ViT does not use input_ids, inputs_embeds, attention_mask and seq_length. """ all_model_classes = ( ( ViTModel, ViTForImageClassification, ViTForMaskedImageModeling, ) if is_torch_available() else () ) pipeline_model_mapping = ( {"image-feature-extraction": ViTModel, "image-classification": ViTForImageClassification} if is_torch_available() else {} ) fx_compatible = True test_pruning = False test_resize_embeddings = False test_head_masking = False def setUp(self): self.model_tester = ViTModelTester(self) self.config_tester = ConfigTester(self, config_class=ViTConfig, has_text_modality=False, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() @unittest.skip(reason="ViT does not use inputs_embeds") def test_inputs_embeds(self): pass def test_model_common_attributes(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) self.assertIsInstance(model.get_input_embeddings(), (nn.Module)) x = model.get_output_embeddings() self.assertTrue(x is None or isinstance(x, nn.Linear)) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_for_masked_image_modeling(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*config_and_inputs) def test_for_image_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*config_and_inputs) @slow def test_model_from_pretrained(self): for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = ViTModel.from_pretrained(model_name) self.assertIsNotNone(model) # We will verify our results on an image of cute cats def prepare_img(): image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") return image @require_torch @require_vision class ViTModelIntegrationTest(unittest.TestCase): @cached_property def default_image_processor(self): return ViTImageProcessor.from_pretrained("google/vit-base-patch16-224") if is_vision_available() else None @slow def test_inference_image_classification_head(self): model = ViTForImageClassification.from_pretrained("google/vit-base-patch16-224").to(torch_device) image_processor = self.default_image_processor image = prepare_img() inputs = image_processor(images=image, return_tensors="pt").to(torch_device) # forward pass with torch.no_grad(): outputs = model(**inputs) # verify the logits expected_shape = torch.Size((1, 1000)) self.assertEqual(outputs.logits.shape, expected_shape) expected_slice = torch.tensor([-0.2744, 0.8215, -0.0836]).to(torch_device) self.assertTrue(torch.allclose(outputs.logits[0, :3], expected_slice, atol=1e-4)) @slow def test_inference_interpolate_pos_encoding(self): # ViT models have an `interpolate_pos_encoding` argument in their forward method, # allowing to interpolate the pre-trained position embeddings in order to use # the model on higher resolutions. The DINO model by Facebook AI leverages this # to visualize self-attention on higher resolution images. model = ViTModel.from_pretrained("facebook/dino-vits8").to(torch_device) image_processor = ViTImageProcessor.from_pretrained("facebook/dino-vits8", size=480) image = prepare_img() inputs = image_processor(images=image, return_tensors="pt") pixel_values = inputs.pixel_values.to(torch_device) # forward pass with torch.no_grad(): outputs = model(pixel_values, interpolate_pos_encoding=True) # verify the logits expected_shape = torch.Size((1, 3601, 384)) self.assertEqual(outputs.last_hidden_state.shape, expected_shape) expected_slice = torch.tensor( [[4.2340, 4.3906, -6.6692], [4.5463, 1.8928, -6.7257], [4.4429, 0.8496, -5.8585]] ).to(torch_device) self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3], expected_slice, atol=1e-4)) @slow @require_accelerate @require_torch_accelerator @require_torch_fp16 def test_inference_fp16(self): r""" A small test to make sure that inference work in half precision without any problem. """ model = ViTModel.from_pretrained("facebook/dino-vits8", torch_dtype=torch.float16, device_map="auto") image_processor = self.default_image_processor image = prepare_img() inputs = image_processor(images=image, return_tensors="pt") pixel_values = inputs.pixel_values.to(torch_device) # forward pass to make sure inference works in fp16 with torch.no_grad(): _ = model(pixel_values)
transformers/tests/models/vit/test_modeling_vit.py/0
{ "file_path": "transformers/tests/models/vit/test_modeling_vit.py", "repo_id": "transformers", "token_count": 4981 }
414
# coding=utf-8 # Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for the Wav2Vec2Phoneme tokenizer.""" import json import os import unittest from typing import Tuple from transformers import Wav2Vec2PhonemeCTCTokenizer from transformers.models.wav2vec2.tokenization_wav2vec2 import VOCAB_FILES_NAMES from transformers.models.wav2vec2_phoneme.tokenization_wav2vec2_phoneme import Wav2Vec2PhonemeCTCTokenizerOutput from transformers.testing_utils import require_phonemizer from ...test_tokenization_common import TokenizerTesterMixin @require_phonemizer class Wav2Vec2PhonemeCTCTokenizerTest(TokenizerTesterMixin, unittest.TestCase): from_pretrained_id = "facebook/wav2vec2-lv-60-espeak-cv-ft" tokenizer_class = Wav2Vec2PhonemeCTCTokenizer test_rust_tokenizer = False def setUp(self): super().setUp() vocab = ( "<s> <pad> </s> <unk> n s t ə l a i k d m ɛ ɾ e ɪ p o ɐ z ð f j v b ɹ ʁ ʊ iː r w ʌ u ɡ æ aɪ ʃ h ɔ ɑː " "ŋ ɚ eɪ β uː y ɑ̃ oʊ ᵻ eː θ aʊ ts oː ɔ̃ ɣ ɜ ɑ dʒ əl x ɜː ç ʒ tʃ ɔː ɑːɹ ɛ̃ ʎ ɔːɹ ʋ aː ɕ œ ø oːɹ ɲ yː " "ʔ iə i5 s. tɕ ?? nʲ ɛː œ̃ ɭ ɔø ʑ tʲ ɨ ɛɹ ts. rʲ ɪɹ ɭʲ i.5 ɔɪ q sʲ u5 ʊɹ iɜ a5 iɛ5 øː ʕ ja əɜ th ɑ5 " "oɪ dʲ ə5 tɕh ts.h mʲ ɯ dʑ vʲ e̞ tʃʲ ei5 o5 onɡ5 ɑu5 iɑ5 ai5 aɪɚ kh ə1 ʐ i2 ʉ ħ t[ aɪə ʲ ju ə2 u2 oɜ " "pː iɛɜ ou5 y5 uɜ tː uo5 d[ uoɜ tsh ɑɜ ɵ i̪5 uei5 ɟ aɜ ɑɨ i.ɜ eʊ o2 ɐ̃ ä pʲ kʲ n̩ ɒ ph ɑu2 uɨ əɪ ɫ ɬ " "yɜ bʲ ɑ2 s̪ aiɜ χ ɐ̃ʊ̃ 1 ə4 yæɜ a2 ɨː t̪ iouɜ ũ onɡɜ aɨ iɛ2 ɔɨ ɑuɜ o̞ ei2 iou2 c kː y2 ɖ oe dˤ yɛɜ " 'əʊ S ɡʲ onɡ2 u" eiɜ ʈ ɯᵝ iou5 dZ r̝̊ i.2 tS s^ ʝ yə5 iɑɜ uə5 pf ɨu iɑ2 ou2 ər2 fʲ ai2 r̝ uəɜ ɳ əɨ ' "ua5 uɪ ɽ bː yu5 uo2 yɛ5 l̩ ɻ ərɜ ʂ i̪2 ouɜ uaɜ a. a.ː yæ5 dː r̩ ee ɪu ər5 i̪ ɜ æi u: i.ː t^ o1 ɪ^ " "ai ueiɜ æː ɛɪ eə i. ɴ ie ua2 ɑ1 o4 tʃː o: ɑ: u1 N i̪1 au yæ2 u. qː yəɜ y: kʰ tʃʰ iʊ sx õ uo tʰ " "uai5 bʰ u.ː uə2 ʊə d^ s̪ː yiɜ dʰ r. oe: i1 ɟː yu2 nʲʲ i̪4 uei2 tsʲ ɸ ĩ ɑ4 t̪ː eɑ u4 e: tsː ʈʰ ɡʰ " "ɯɯ dʒʲ ʂʲ X ɵː uaiɜ tɕʲ ã t^ː ẽː yɛ2 cː i.1 ɛʊ dˤdˤ dʒː i4 ɡː yi ɕʲ ɟʰ pʰ dʑʲ yuɜ ua1 ua4 æiː ɐɐ " "ui iou1 ʊː a1 iou4 cʰ iɛ1 yə2 ɖʰ ẽ ʒʲ ää ər4 iːː ɪː iɑ1 ər1 œː øi ɪuː cʰcʰ əː1 iː1 ũ kʰː o̞o̞ xʲ " "ou1 iɛ4 e̞e̞ y1 dzː dʲʲ dʰː ɯᵝɯᵝ lː uo1 i.4 i: yɛ5ʲ a4" ).split(" ") vocab_tokens = dict(zip(vocab, range(len(vocab)))) self.special_tokens_map = {"pad_token": "<pad>", "unk_token": "<unk>", "bos_token": "<s>", "eos_token": "</s>"} self.vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"]) with open(self.vocab_file, "w", encoding="utf-8") as fp: fp.write(json.dumps(vocab_tokens) + "\n") # overwrite since phonemes require specific creation def get_clean_sequence(self, tokenizer, with_prefix_space=False, max_length=20, min_length=5) -> Tuple[str, list]: toks = [(i, tokenizer.decode([i], clean_up_tokenization_spaces=False)) for i in range(len(tokenizer))] toks = list(filter(lambda t: [t[0]] == tokenizer.encode(t[1], do_phonemize=False), toks)) if max_length is not None and len(toks) > max_length: toks = toks[:max_length] if min_length is not None and len(toks) < min_length and len(toks) > 0: while len(toks) < min_length: toks = toks + toks # toks_str = [t[1] for t in toks] toks_ids = [t[0] for t in toks] # Ensure consistency output_txt = tokenizer.decode(toks_ids, clean_up_tokenization_spaces=False) if " " not in output_txt and len(toks_ids) > 1: output_txt = ( tokenizer.decode([toks_ids[0]], clean_up_tokenization_spaces=False) + " " + tokenizer.decode(toks_ids[1:], clean_up_tokenization_spaces=False) ) if with_prefix_space: output_txt = " " + output_txt output_ids = tokenizer.encode(output_txt, add_special_tokens=False) return output_txt, output_ids def get_tokenizer(self, **kwargs): kwargs.update(self.special_tokens_map) return Wav2Vec2PhonemeCTCTokenizer.from_pretrained(self.tmpdirname, **kwargs) def test_tokenizer_add_new_tokens(self): tokenizer = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft") # check adding a single token tokenizer.add_tokens("xxx") token_ids = tokenizer("m xxx ɪ", do_phonemize=False).input_ids self.assertEqual(token_ids, [13, 392, 17]) # xxx should be last token tokenizer.add_tokens(["aaa", "bbb", "ccc"]) token_ids = tokenizer("m aaa ɪ ccc", do_phonemize=False).input_ids self.assertEqual(token_ids, [13, 393, 17, 395]) # aaa and ccc should be after xxx and 2 after aaa token_ids = tokenizer("maɪ c", do_phonemize=False).input_ids self.assertEqual(token_ids, [3, 200]) # mai should be <unk> (=3) def test_phonemize(self): tokenizer = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft") input_text = "Hello how are you" phonemes = tokenizer.phonemize(input_text, phonemizer_lang="en-us") self.assertEqual(phonemes, "h ə l oʊ h aʊ ɑːɹ j uː") def test_encode(self): tokenizer = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft") input_text = "Hello how are you" phonemes = tokenizer.phonemize(input_text, phonemizer_lang="en-us") self.assertEqual(tokenizer(input_text).input_ids, tokenizer(phonemes, do_phonemize=False).input_ids) def test_encode_decode(self): tokenizer = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft") input_text = "Hello how are you" phonemes = tokenizer.phonemize(input_text, phonemizer_lang="en-us") phonemes_enc_dec = tokenizer.decode(tokenizer(input_text).input_ids) self.assertEqual(phonemes, phonemes_enc_dec) def test_decode(self): tokenizer = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft") sample_ids = [ [11, 5, 15, tokenizer.pad_token_id, 15, 8, 98], [24, 22, 5, 24, 22, 5, 77], ] tokens = tokenizer.decode(sample_ids[0]) batch_tokens = tokenizer.batch_decode(sample_ids) self.assertEqual(tokens, batch_tokens[0]) self.assertEqual(batch_tokens, ["k s ɾ ɾ l ɭʲ", "j ð s j ð s oːɹ"]) def test_phonemize_with_word_del(self): tokenizer = self.tokenizer_class.from_pretrained( "facebook/wav2vec2-lv-60-espeak-cv-ft", word_delimiter_token="|" ) tokenizer.add_tokens("|") input_text = "Hello how are you" phonemes = tokenizer.phonemize(input_text, phonemizer_lang="en-us") self.assertEqual(phonemes, "h ə l oʊ | h aʊ | ɑːɹ | j uː |") def test_encode_with_del(self): tokenizer = self.tokenizer_class.from_pretrained( "facebook/wav2vec2-lv-60-espeak-cv-ft", word_delimiter_token="|" ) tokenizer.add_tokens("|") input_text = "Hello how are you" phonemes = tokenizer.phonemize(input_text, phonemizer_lang="en-us") self.assertEqual(tokenizer(input_text).input_ids, tokenizer(phonemes, do_phonemize=False).input_ids) def test_decode_with_del(self): tokenizer = self.tokenizer_class.from_pretrained( "facebook/wav2vec2-lv-60-espeak-cv-ft", word_delimiter_token="|" ) tokenizer.add_tokens("|") # fmt: off sample_ids = [ [11, 5, 15, tokenizer.pad_token_id, tokenizer.word_delimiter_token_id, 15, 8, tokenizer.word_delimiter_token_id, 98], [tokenizer.word_delimiter_token_id, 24, 22, tokenizer.word_delimiter_token_id, 5, 24, 22, 5, 77], ] # fmt: on # decode with word_del_token filter tokens = tokenizer.decode(sample_ids[0]) batch_tokens = tokenizer.batch_decode(sample_ids) self.assertEqual(tokens, batch_tokens[0]) self.assertEqual(batch_tokens, ["k s ɾ ɾ l ɭʲ", "j ð s j ð s oːɹ"]) # decode with no word_del_token filter tokens = tokenizer.decode(sample_ids[0], filter_word_delimiter_token=False) batch_tokens = tokenizer.batch_decode(sample_ids, filter_word_delimiter_token=False) self.assertEqual(tokens, batch_tokens[0]) self.assertEqual(batch_tokens, ["k s ɾ | ɾ l | ɭʲ", "| j ð | s j ð s oːɹ"]) def test_encode_decode_with_del(self): tokenizer = self.tokenizer_class.from_pretrained( "facebook/wav2vec2-lv-60-espeak-cv-ft", word_delimiter_token="|" ) tokenizer.add_tokens("|") input_text = "Hello how are you" phonemes = tokenizer.phonemize(input_text, phonemizer_lang="en-us") phonemes_enc_dec = tokenizer.decode(tokenizer(input_text).input_ids, filter_word_delimiter_token=False) self.assertEqual(phonemes, phonemes_enc_dec) def test_encode_decode_with_del_filter(self): tokenizer = self.tokenizer_class.from_pretrained( "facebook/wav2vec2-lv-60-espeak-cv-ft", word_delimiter_token="|" ) tokenizer.add_tokens("|") input_text = "Hello how are you" phonemes = tokenizer.phonemize(input_text, phonemizer_lang="en-us") phonemes_enc_dec = tokenizer.decode(tokenizer(input_text).input_ids, filter_word_delimiter_token=True) self.assertEqual(" ".join([p.strip() for p in phonemes.split(" |")]).strip(), phonemes_enc_dec) def test_change_phonemizer_lang(self): tokenizer = self.tokenizer_class.from_pretrained( "facebook/wav2vec2-lv-60-espeak-cv-ft", word_delimiter_token=None ) input_text = "Hello how are you" input_ids_en = tokenizer(input_text, phonemizer_lang="en-us").input_ids input_ids_fr = tokenizer(input_text, phonemizer_lang="fr-fr").input_ids self.assertNotEqual(input_ids_en, input_ids_fr) text_en = tokenizer.decode(input_ids_en) text_fr = tokenizer.decode(input_ids_fr) self.assertEqual(text_en, "h ə l oʊ h aʊ ɑːɹ j uː") self.assertEqual(text_fr, "ɛ l o h aʊ a ʁ j u") def test_case_insensitive(self): tokenizer = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft") input_text_up = "Hello how Are you" input_text_low = "hello how are you" input_ids_up = tokenizer(input_text_up).input_ids input_ids_low = tokenizer(input_text_low).input_ids self.assertEqual(input_ids_up, input_ids_low) def test_tokenizer_decode_added_tokens(self): tokenizer = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft") tokenizer.add_tokens(["!", "?"]) tokenizer.add_special_tokens({"cls_token": "$$$"}) # fmt: off sample_ids = [ [11, 5, 15, tokenizer.pad_token_id, 15, 8, 98, 392, 392, 393, 392, 392, 393, 394, 394], [24, 22, 5, 24, 22, 5, 77, tokenizer.pad_token_id, 394, 394], ] # fmt: on batch_tokens = tokenizer.batch_decode(sample_ids) self.assertEqual(batch_tokens, ["k s ɾ ɾ l ɭʲ!?!? $$$", "j ð s j ð s oːɹ $$$"]) @staticmethod def get_from_offsets(offsets, key): retrieved_list = [d[key] for d in offsets] return retrieved_list def test_offsets(self): tokenizer = self.get_tokenizer(word_delimiter_token="|") tokenizer.add_tokens("|") # fmt: off # ksssɾɾ|ɾɾ<pad>ɾɾ|<pad>ɾlll|ɭʲ -> k s ɾ ɾ | ɾ l | ɭʲ" sample_ids = [11, 5, 5, 5, 15, 15, tokenizer.pad_token_id, 15, 15, tokenizer.word_delimiter_token_id, tokenizer.pad_token_id, 15, 8, 8, 8, tokenizer.word_delimiter_token_id, 98] # fmt: on outputs = tokenizer.decode(sample_ids, output_char_offsets=True, filter_word_delimiter_token=False) # check Wav2Vec2CTCTokenizerOutput keys for char self.assertEqual(len(outputs.keys()), 2) self.assertTrue("text" in outputs) self.assertTrue("char_offsets" in outputs) self.assertTrue(isinstance(outputs, Wav2Vec2PhonemeCTCTokenizerOutput)) # check that order of chars is correct and identical for both outputs self.assertEqual(" ".join(self.get_from_offsets(outputs["char_offsets"], "char")), outputs.text) self.assertListEqual( self.get_from_offsets(outputs["char_offsets"], "char"), ["k", "s", "ɾ", "ɾ", "|", "ɾ", "l", "|", "ɭʲ"] ) # check that offsets are actually correct for char # 0-1 is 11, 1-4 is 5, 4-6 is first 15, 6-7 is <pad> (thus not shown), 7-9 is second 15, 9-10 is word_delimiter_token, # 10-11 is <pad> (thus not shown), 11-12 is third 15, 12-15 is 8, 15-16 is word_delimiter_token, 16-17 is 98 self.assertListEqual( self.get_from_offsets(outputs["char_offsets"], "start_offset"), [0, 1, 4, 7, 9, 11, 12, 15, 16] ) self.assertListEqual( self.get_from_offsets(outputs["char_offsets"], "end_offset"), [1, 4, 6, 9, 10, 12, 15, 16, 17] ) def test_offsets_batch(self): tokenizer = self.get_tokenizer(word_delimiter_token="|") def check_list_tuples_equal(outputs_batch, outputs_list): self.assertTrue(isinstance(outputs_batch, Wav2Vec2PhonemeCTCTokenizerOutput)) self.assertTrue(isinstance(outputs_list[0], Wav2Vec2PhonemeCTCTokenizerOutput)) # transform list to ModelOutput outputs_batch_2 = Wav2Vec2PhonemeCTCTokenizerOutput( {k: [d[k] for d in outputs_list] for k in outputs_list[0]} ) self.assertListEqual(outputs_batch["text"], outputs_batch_2["text"]) def recursive_check(list_or_dict_1, list_or_dict_2): if isinstance(list_or_dict_1, list): [recursive_check(l1, l2) for l1, l2 in zip(list_or_dict_1, list_or_dict_2)] self.assertEqual(list_or_dict_1, list_or_dict_2) if "char_offsets" in outputs_batch: recursive_check(outputs_batch["char_offsets"], outputs_batch_2["char_offsets"]) # fmt: off sample_ids = [ [11, 5, 15, tokenizer.pad_token_id, 15, 4, 8, 98, 32, 32, 32, 32, 4, 33, tokenizer.word_delimiter_token_id, 32, 32, 33, 34, 34], [24, 22, 5, tokenizer.word_delimiter_token_id, tokenizer.word_delimiter_token_id, 24, 22, 22, 22, 4, 5, 77, tokenizer.pad_token_id, 22, 22, 4, 34, 34, 34, 34], ] # fmt: on # We assume that `decode` works as expected. All we will check now is # the output type is correct and the output is identical to `decode` # char outputs_char_batch = tokenizer.batch_decode(sample_ids, output_char_offsets=True) outputs_char = [tokenizer.decode(ids, output_char_offsets=True) for ids in sample_ids] check_list_tuples_equal(outputs_char_batch, outputs_char) @unittest.skip("Wav2Vec2PhonemeTokenizer always lower cases letters to correctly map to phonemes") def test_added_tokens_do_lower_case(self): pass @unittest.skip("Wav2Vec2PhonemeTokenizer always puts spaces between phonemes") def test_encode_decode_with_spaces(self): pass @unittest.skip("encodes to text to ids, but decodes ids to phonemes -> not possible to have internal consistency") def test_internal_consistency(self): pass @unittest.skip("Wav2Vec2PhonemeModel has no max model length => no testing") def test_pretrained_model_lists(self): pass # overwrite common def test_add_tokens_tokenizer(self): tokenizers = self.get_tokenizers(do_lower_case=False) for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): vocab_size = tokenizer.vocab_size all_size = len(tokenizer) self.assertNotEqual(vocab_size, 0) # We usually have added tokens from the start in tests because our vocab fixtures are # smaller than the original vocabs - let's not assert this # self.assertEqual(vocab_size, all_size) new_toks = ["aaaaa bbbbbb", "cccccccccdddddddd"] added_toks = tokenizer.add_tokens(new_toks) vocab_size_2 = tokenizer.vocab_size all_size_2 = len(tokenizer) self.assertNotEqual(vocab_size_2, 0) self.assertEqual(vocab_size, vocab_size_2) self.assertEqual(added_toks, len(new_toks)) self.assertEqual(all_size_2, all_size + len(new_toks)) tokens = tokenizer.encode("aaaaa bbbbbb low cccccccccdddddddd l", add_special_tokens=False) self.assertGreaterEqual(len(tokens), 4) self.assertGreater(tokens[0], tokenizer.vocab_size - 1) self.assertGreater(tokens[-3], tokenizer.vocab_size - 1) new_toks_2 = {"eos_token": ">>>>|||<||<<|<<", "pad_token": "<<<<<|||>|>>>>|>"} added_toks_2 = tokenizer.add_special_tokens(new_toks_2) vocab_size_3 = tokenizer.vocab_size all_size_3 = len(tokenizer) self.assertNotEqual(vocab_size_3, 0) self.assertEqual(vocab_size, vocab_size_3) self.assertEqual(added_toks_2, len(new_toks_2)) self.assertEqual(all_size_3, all_size_2 + len(new_toks_2)) tokens = tokenizer.encode( ">>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l", add_special_tokens=False ) self.assertGreaterEqual(len(tokens), 6) self.assertGreater(tokens[0], tokenizer.vocab_size - 1) self.assertGreater(tokens[0], tokens[1]) self.assertGreater(tokens[-3], tokenizer.vocab_size - 1) self.assertGreater(tokens[-3], tokens[-4]) self.assertEqual(tokens[0], tokenizer.eos_token_id) self.assertEqual(tokens[-3], tokenizer.pad_token_id) @unittest.skip("The tokenizer shouldn't be used to encode input IDs (except for labels), only to decode.") def test_tf_encode_plus_sent_to_model(self): pass @unittest.skip("The tokenizer shouldn't be used to encode input IDs (except for labels), only to decode.") def test_torch_encode_plus_sent_to_model(self): pass def test_convert_tokens_to_string_format(self): # The default common tokenizer tests assumes that the output of `convert_tokens_to_string` is a string which # is not the case for Wav2Vec2PhonemeCTCTokenizer. tokenizers = self.get_tokenizers(fast=True, do_lower_case=True) for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): tokens = ["ð", "ɪ", "s", "ɪ", "z", "ɐ", "t", "ɛ", "k", "s", "t"] output = tokenizer.convert_tokens_to_string(tokens) self.assertIsInstance(output["text"], str)
transformers/tests/models/wav2vec2_phoneme/test_tokenization_wav2vec2_phoneme.py/0
{ "file_path": "transformers/tests/models/wav2vec2_phoneme/test_tokenization_wav2vec2_phoneme.py", "repo_id": "transformers", "token_count": 9993 }
415
# coding=utf-8 # Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import annotations import unittest from transformers import XGLMConfig, XGLMTokenizer, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers.models.xglm.modeling_tf_xglm import ( TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFXGLMForCausalLM, TFXGLMModel, ) @require_tf class TFXGLMModelTester: config_cls = XGLMConfig config_updates = {} hidden_act = "gelu" def __init__( self, parent, batch_size=14, seq_length=7, is_training=True, use_input_mask=True, use_labels=True, vocab_size=99, d_model=32, num_hidden_layers=2, num_attention_heads=4, ffn_dim=37, activation_function="gelu", activation_dropout=0.1, attention_dropout=0.1, max_position_embeddings=512, initializer_range=0.02, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = d_model self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.ffn_dim = ffn_dim self.activation_function = activation_function self.activation_dropout = activation_dropout self.attention_dropout = attention_dropout self.max_position_embeddings = max_position_embeddings self.initializer_range = initializer_range self.scope = None self.bos_token_id = 0 self.eos_token_id = 2 self.pad_token_id = 1 def get_large_model_config(self): return XGLMConfig.from_pretrained("facebook/xglm-564M") def prepare_config_and_inputs(self): input_ids = tf.clip_by_value( ids_tensor([self.batch_size, self.seq_length], self.vocab_size), clip_value_min=0, clip_value_max=3 ) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) config = self.get_config() head_mask = floats_tensor([self.num_hidden_layers, self.num_attention_heads], 2) return ( config, input_ids, input_mask, head_mask, ) def get_config(self): return XGLMConfig( vocab_size=self.vocab_size, d_model=self.hidden_size, num_layers=self.num_hidden_layers, attention_heads=self.num_attention_heads, ffn_dim=self.ffn_dim, activation_function=self.activation_function, activation_dropout=self.activation_dropout, attention_dropout=self.attention_dropout, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, use_cache=True, bos_token_id=self.bos_token_id, eos_token_id=self.eos_token_id, pad_token_id=self.pad_token_id, return_dict=True, ) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, input_mask, head_mask, ) = config_and_inputs inputs_dict = { "input_ids": input_ids, "head_mask": head_mask, } return config, inputs_dict @require_tf class TFXGLMModelTest(TFModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else () all_generative_model_classes = (TFXGLMForCausalLM,) if is_tf_available() else () pipeline_model_mapping = ( {"feature-extraction": TFXGLMModel, "text-generation": TFXGLMForCausalLM} if is_tf_available() else {} ) test_onnx = False test_missing_keys = False test_pruning = False def setUp(self): self.model_tester = TFXGLMModelTester(self) self.config_tester = ConfigTester(self, config_class=XGLMConfig, n_embd=37) def test_config(self): self.config_tester.run_common_tests() @slow def test_model_from_pretrained(self): for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = TFXGLMModel.from_pretrained(model_name) self.assertIsNotNone(model) @unittest.skip(reason="Currently, model embeddings are going to undergo a major refactor.") def test_resize_token_embeddings(self): super().test_resize_token_embeddings() @require_tf class TFXGLMModelLanguageGenerationTest(unittest.TestCase): @slow def test_lm_generate_xglm(self, verify_outputs=True): model = TFXGLMForCausalLM.from_pretrained("facebook/xglm-564M") input_ids = tf.convert_to_tensor([[2, 268, 9865]], dtype=tf.int32) # The dog # </s> The dog is a very friendly dog. He is very affectionate and loves to play with other expected_output_ids = [2, 268, 9865, 67, 11, 1988, 57252, 9865, 5, 984, 67, 1988, 213838, 1658, 53, 70446, 33, 6657, 278, 1581] # fmt: skip output_ids = model.generate(input_ids, do_sample=False, num_beams=1) if verify_outputs: self.assertListEqual(output_ids[0].numpy().tolist(), expected_output_ids) @slow def test_xglm_sample(self): tokenizer = XGLMTokenizer.from_pretrained("facebook/xglm-564M") model = TFXGLMForCausalLM.from_pretrained("facebook/xglm-564M") tf.random.set_seed(0) tokenized = tokenizer("Today is a nice day and", return_tensors="tf") input_ids = tokenized.input_ids # forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices) with tf.device(":/CPU:0"): output_ids = model.generate(input_ids, do_sample=True, seed=[7, 0]) output_str = tokenizer.decode(output_ids[0], skip_special_tokens=True) EXPECTED_OUTPUT_STR = ( "Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due" ) self.assertEqual(output_str, EXPECTED_OUTPUT_STR) @slow def test_batch_generation(self): model = TFXGLMForCausalLM.from_pretrained("facebook/xglm-564M") tokenizer = XGLMTokenizer.from_pretrained("facebook/xglm-564M") tokenizer.padding_side = "left" # use different length sentences to test batching sentences = [ "This is an extremelly long sentence that only exists to test the ability of the model to cope with " "left-padding, such as in batched generation. The output for the sequence below should be the same " "regardless of whether left padding is applied or not. When", "Hello, my dog is a little", ] inputs = tokenizer(sentences, return_tensors="tf", padding=True) input_ids = inputs["input_ids"] outputs = model.generate(input_ids=input_ids, attention_mask=inputs["attention_mask"], max_new_tokens=12) inputs_non_padded = tokenizer(sentences[0], return_tensors="tf").input_ids output_non_padded = model.generate(input_ids=inputs_non_padded, max_new_tokens=12) inputs_padded = tokenizer(sentences[1], return_tensors="tf").input_ids output_padded = model.generate(input_ids=inputs_padded, max_new_tokens=12) batch_out_sentence = tokenizer.batch_decode(outputs, skip_special_tokens=True) non_padded_sentence = tokenizer.decode(output_non_padded[0], skip_special_tokens=True) padded_sentence = tokenizer.decode(output_padded[0], skip_special_tokens=True) expected_output_sentence = [ "This is an extremelly long sentence that only exists to test the ability of the model to cope with " "left-padding, such as in batched generation. The output for the sequence below should be the same " "regardless of whether left padding is applied or not. When left padding is applied, the sequence will be " "a single", "Hello, my dog is a little bit of a shy one, but he is very friendly", ] self.assertListEqual(expected_output_sentence, batch_out_sentence) self.assertListEqual(expected_output_sentence, [non_padded_sentence, padded_sentence])
transformers/tests/models/xglm/test_modeling_tf_xglm.py/0
{ "file_path": "transformers/tests/models/xglm/test_modeling_tf_xglm.py", "repo_id": "transformers", "token_count": 4033 }
416
# coding=utf-8 # Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from transformers import XLMRobertaXLConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( XLMRobertaXLForCausalLM, XLMRobertaXLForMaskedLM, XLMRobertaXLForMultipleChoice, XLMRobertaXLForQuestionAnswering, XLMRobertaXLForSequenceClassification, XLMRobertaXLForTokenClassification, XLMRobertaXLModel, ) from transformers.models.xlm_roberta_xl.modeling_xlm_roberta_xl import ( XLMRobertaXLEmbeddings, create_position_ids_from_input_ids, ) class XLMRobertaXLModelTester: def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_input_mask=True, use_token_type_ids=True, use_labels=True, vocab_size=99, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, num_labels=3, num_choices=4, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_token_type_ids = use_token_type_ids self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.num_labels = num_labels self.num_choices = num_choices self.scope = scope def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) token_type_ids = None if self.use_token_type_ids: token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size) sequence_labels = None token_labels = None choice_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) choice_labels = ids_tensor([self.batch_size], self.num_choices) config = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def get_config(self): return XLMRobertaXLConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, ) def prepare_config_and_inputs_for_decoder(self): ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) = self.prepare_config_and_inputs() config.is_decoder = True encoder_hidden_states = floats_tensor([self.batch_size, self.seq_length, self.hidden_size]) encoder_attention_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2) return ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def create_and_check_model( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = XLMRobertaXLModel(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids) result = model(input_ids, token_type_ids=token_type_ids) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size)) def create_and_check_model_as_decoder( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ): config.add_cross_attention = True model = XLMRobertaXLModel(config) model.to(torch_device) model.eval() result = model( input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, ) result = model( input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, encoder_hidden_states=encoder_hidden_states, ) result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size)) def create_and_check_for_causal_lm( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ): model = XLMRobertaXLForCausalLM(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def create_and_check_decoder_model_past_large_inputs( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ): config.is_decoder = True config.add_cross_attention = True model = XLMRobertaXLForCausalLM(config=config).to(torch_device).eval() # make sure that ids don't start with pad token mask = input_ids.ne(config.pad_token_id).long() input_ids = input_ids * mask # first forward pass outputs = model( input_ids, attention_mask=input_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, use_cache=True, ) past_key_values = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size) # make sure that ids don't start with pad token mask = next_tokens.ne(config.pad_token_id).long() next_tokens = next_tokens * mask next_mask = ids_tensor((self.batch_size, 3), vocab_size=2) # append to next input_ids and next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) next_attention_mask = torch.cat([input_mask, next_mask], dim=-1) output_from_no_past = model( next_input_ids, attention_mask=next_attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, output_hidden_states=True, )["hidden_states"][0] output_from_past = model( next_tokens, attention_mask=next_attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, past_key_values=past_key_values, output_hidden_states=True, )["hidden_states"][0] # select random slice random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach() output_from_past_slice = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1]) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)) def create_and_check_for_masked_lm( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = XLMRobertaXLForMaskedLM(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def create_and_check_for_token_classification( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_labels = self.num_labels model = XLMRobertaXLForTokenClassification(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels)) def create_and_check_for_multiple_choice( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_choices = self.num_choices model = XLMRobertaXLForMultipleChoice(config=config) model.to(torch_device) model.eval() multiple_choice_inputs_ids = input_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() multiple_choice_token_type_ids = token_type_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() multiple_choice_input_mask = input_mask.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() result = model( multiple_choice_inputs_ids, attention_mask=multiple_choice_input_mask, token_type_ids=multiple_choice_token_type_ids, labels=choice_labels, ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices)) def create_and_check_for_question_answering( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = XLMRobertaXLForQuestionAnswering(config=config) model.to(torch_device) model.eval() result = model( input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, start_positions=sequence_labels, end_positions=sequence_labels, ) self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) = config_and_inputs inputs_dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class XLMRobertaXLModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( ( XLMRobertaXLForCausalLM, XLMRobertaXLForMaskedLM, XLMRobertaXLModel, XLMRobertaXLForSequenceClassification, XLMRobertaXLForTokenClassification, XLMRobertaXLForMultipleChoice, XLMRobertaXLForQuestionAnswering, ) if is_torch_available() else () ) all_generative_model_classes = (XLMRobertaXLForCausalLM,) if is_torch_available() else () pipeline_model_mapping = ( { "feature-extraction": XLMRobertaXLModel, "fill-mask": XLMRobertaXLForMaskedLM, "question-answering": XLMRobertaXLForQuestionAnswering, "text-classification": XLMRobertaXLForSequenceClassification, "text-generation": XLMRobertaXLForCausalLM, "token-classification": XLMRobertaXLForTokenClassification, "zero-shot": XLMRobertaXLForSequenceClassification, } if is_torch_available() else {} ) # TODO: Fix the failed tests def is_pipeline_test_to_skip( self, pipeline_test_casse_name, config_class, model_architecture, tokenizer_name, processor_name ): if pipeline_test_casse_name == "QAPipelineTests" and not tokenizer_name.endswith("Fast"): return True return False def setUp(self): self.model_tester = XLMRobertaXLModelTester(self) self.config_tester = ConfigTester(self, config_class=XLMRobertaXLConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_model_various_embeddings(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: config_and_inputs[0].position_embedding_type = type self.model_tester.create_and_check_model(*config_and_inputs) def test_model_as_decoder(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(*config_and_inputs) def test_model_as_decoder_with_default_input_mask(self): # This regression test was failing with PyTorch < 1.3 ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) = self.model_tester.prepare_config_and_inputs_for_decoder() input_mask = None self.model_tester.create_and_check_model_as_decoder( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def test_for_causal_lm(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_for_causal_lm(*config_and_inputs) def test_decoder_model_past_with_large_inputs(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_decoder_model_past_large_inputs(*config_and_inputs) def test_decoder_model_past_with_large_inputs_relative_pos_emb(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder() config_and_inputs[0].position_embedding_type = "relative_key" self.model_tester.create_and_check_decoder_model_past_large_inputs(*config_and_inputs) def test_for_masked_lm(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*config_and_inputs) def test_for_token_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*config_and_inputs) def test_for_multiple_choice(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*config_and_inputs) def test_for_question_answering(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*config_and_inputs) def test_create_position_ids_respects_padding_index(self): """Ensure that the default position ids only assign a sequential . This is a regression test for https://github.com/huggingface/transformers/issues/1761 The position ids should be masked with the embedding object's padding index. Therefore, the first available non-padding position index is XLMRobertaXLEmbeddings.padding_idx + 1 """ config = self.model_tester.prepare_config_and_inputs()[0] model = XLMRobertaXLEmbeddings(config=config) input_ids = torch.as_tensor([[12, 31, 13, model.padding_idx]]) expected_positions = torch.as_tensor( [[0 + model.padding_idx + 1, 1 + model.padding_idx + 1, 2 + model.padding_idx + 1, model.padding_idx]] ) position_ids = create_position_ids_from_input_ids(input_ids, model.padding_idx) self.assertEqual(position_ids.shape, expected_positions.shape) self.assertTrue(torch.all(torch.eq(position_ids, expected_positions))) def test_create_position_ids_from_inputs_embeds(self): """Ensure that the default position ids only assign a sequential . This is a regression test for https://github.com/huggingface/transformers/issues/1761 The position ids should be masked with the embedding object's padding index. Therefore, the first available non-padding position index is XLMRobertaXLEmbeddings.padding_idx + 1 """ config = self.model_tester.prepare_config_and_inputs()[0] embeddings = XLMRobertaXLEmbeddings(config=config) inputs_embeds = torch.empty(2, 4, 30) expected_single_positions = [ 0 + embeddings.padding_idx + 1, 1 + embeddings.padding_idx + 1, 2 + embeddings.padding_idx + 1, 3 + embeddings.padding_idx + 1, ] expected_positions = torch.as_tensor([expected_single_positions, expected_single_positions]) position_ids = embeddings.create_position_ids_from_inputs_embeds(inputs_embeds) self.assertEqual(position_ids.shape, expected_positions.shape) self.assertTrue(torch.all(torch.eq(position_ids, expected_positions))) @require_torch class XLMRobertaModelXLIntegrationTest(unittest.TestCase): @slow def test_xlm_roberta_xl(self): model = XLMRobertaXLModel.from_pretrained("facebook/xlm-roberta-xl").to(torch_device) input_ids = torch.tensor( [[0, 581, 10269, 83, 99942, 136, 60742, 23, 70, 80583, 18276, 2]], device=torch_device ) # The dog is cute and lives in the garden house expected_output_shape = torch.Size((1, 12, 2560)) # batch_size, sequence_length, embedding_vector_dim expected_output_values_last_dim = torch.tensor( [[0.0110, 0.0605, 0.0354, 0.0689, 0.0066, 0.0691, 0.0302, 0.0412, 0.0860, 0.0036, 0.0405, 0.0170]], device=torch_device, ) output = model(input_ids)["last_hidden_state"].detach() self.assertEqual(output.shape, expected_output_shape) # compare the actual values for a slice of last dim self.assertTrue(torch.allclose(output[:, :, -1], expected_output_values_last_dim, atol=1e-3)) @unittest.skip(reason="Model is too large to be tested on the CI") def test_xlm_roberta_xxl(self): model = XLMRobertaXLModel.from_pretrained("facebook/xlm-roberta-xxl").to(torch_device) input_ids = torch.tensor( [[0, 581, 10269, 83, 99942, 136, 60742, 23, 70, 80583, 18276, 2]], device=torch_device ) # The dog is cute and lives in the garden house expected_output_shape = torch.Size((1, 12, 4096)) # batch_size, sequence_length, embedding_vector_dim expected_output_values_last_dim = torch.tensor( [[0.0046, 0.0146, 0.0227, 0.0126, 0.0219, 0.0175, -0.0101, 0.0006, 0.0124, 0.0209, -0.0063, 0.0096]], device=torch_device, ) output = model(input_ids)["last_hidden_state"].detach() self.assertEqual(output.shape, expected_output_shape) # compare the actual values for a slice of last dim self.assertTrue(torch.allclose(output[:, :, -1], expected_output_values_last_dim, atol=1e-3))
transformers/tests/models/xlm_roberta_xl/test_modeling_xlm_roberta_xl.py/0
{ "file_path": "transformers/tests/models/xlm_roberta_xl/test_modeling_xlm_roberta_xl.py", "repo_id": "transformers", "token_count": 10342 }
417
# coding=utf-8 # Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import importlib import tempfile import unittest from packaging import version from transformers import AqlmConfig, AutoConfig, AutoModelForCausalLM, AutoTokenizer, OPTForCausalLM, StaticCache from transformers.testing_utils import ( require_accelerate, require_aqlm, require_torch_gpu, require_torch_multi_gpu, slow, torch_device, ) from transformers.utils import is_accelerate_available, is_aqlm_available, is_torch_available if is_torch_available(): import torch if is_accelerate_available(): from accelerate import init_empty_weights @require_torch_gpu class AqlmConfigTest(unittest.TestCase): def test_to_dict(self): """ Simple test that checks if one uses a config and converts it to a dict, the dict is the same as the config object """ quantization_config = AqlmConfig() config_to_dict = quantization_config.to_dict() for key in config_to_dict: self.assertEqual(getattr(quantization_config, key), config_to_dict[key]) def test_from_dict(self): """ Simple test that checks if one uses a dict and converts it to a config object, the config object is the same as the dict """ dict = { "in_group_size": 32, "num_codebooks": 8, "nbits_per_codebook": 8, "linear_weights_not_to_quantize": ["lm_head.weight"], } quantization_config = AqlmConfig.from_dict(dict) self.assertEqual(dict["in_group_size"], quantization_config.in_group_size) self.assertEqual(dict["num_codebooks"], quantization_config.num_codebooks) self.assertEqual(dict["nbits_per_codebook"], quantization_config.nbits_per_codebook) self.assertEqual(dict["linear_weights_not_to_quantize"], quantization_config.linear_weights_not_to_quantize) @slow @require_torch_gpu @require_aqlm @require_accelerate class AqlmTest(unittest.TestCase): model_name = "BlackSamorez/Llama-2-7b-AQLM-2Bit-1x16-hf" input_text = "Hello my name is" max_new_tokens = 32 EXPECTED_OUTPUT = "Hello my name is Katie. I am a 20 year old college student. I am a very outgoing person. I love to have fun and be active. I" device_map = "cuda" # called only once for all test in this class @classmethod def setUpClass(cls): """ Setup quantized model """ cls.tokenizer = AutoTokenizer.from_pretrained(cls.model_name) cls.quantized_model = AutoModelForCausalLM.from_pretrained( cls.model_name, device_map=cls.device_map, ) def tearDown(self): gc.collect() torch.cuda.empty_cache() gc.collect() def test_quantized_model_conversion(self): """ Simple test that checks if the quantized model has been converted properly """ from aqlm import QuantizedLinear from transformers.integrations import replace_with_aqlm_linear model_id = "facebook/opt-350m" config = AutoConfig.from_pretrained(model_id, revision="cb32f77e905cccbca1d970436fb0f5e6b58ee3c5") quantization_config = AqlmConfig() with init_empty_weights(): model = OPTForCausalLM(config) nb_linears = 0 for module in model.modules(): if isinstance(module, torch.nn.Linear): nb_linears += 1 model, _ = replace_with_aqlm_linear(model, quantization_config=quantization_config) nb_aqlm_linear = 0 for module in model.modules(): if isinstance(module, QuantizedLinear): nb_aqlm_linear += 1 self.assertEqual(nb_linears, nb_aqlm_linear) # Try with `linear_weights_not_to_quantize` with init_empty_weights(): model = OPTForCausalLM(config) model, _ = replace_with_aqlm_linear( model, quantization_config=quantization_config, linear_weights_not_to_quantize=["lm_head.weight"] ) nb_aqlm_linear = 0 for module in model.modules(): if isinstance(module, QuantizedLinear): nb_aqlm_linear += 1 self.assertEqual(nb_linears - 1, nb_aqlm_linear) def test_quantized_model(self): """ Simple test that checks if the quantized model is working properly """ input_ids = self.tokenizer(self.input_text, return_tensors="pt").to(torch_device) output = self.quantized_model.generate(**input_ids, max_new_tokens=self.max_new_tokens) self.assertEqual(self.tokenizer.decode(output[0], skip_special_tokens=True), self.EXPECTED_OUTPUT) def test_raise_if_non_quantized(self): model_id = "facebook/opt-125m" quantization_config = AqlmConfig(bits=4) with self.assertRaises(ValueError): _ = AutoModelForCausalLM.from_pretrained(model_id, quantization_config=quantization_config) def test_save_pretrained(self): """ Simple test that checks if the quantized model is working properly after being saved and loaded """ with tempfile.TemporaryDirectory() as tmpdirname: self.quantized_model.save_pretrained(tmpdirname) model = AutoModelForCausalLM.from_pretrained(tmpdirname, device_map=self.device_map) input_ids = self.tokenizer(self.input_text, return_tensors="pt").to(torch_device) output = model.generate(**input_ids, max_new_tokens=self.max_new_tokens) self.assertEqual(self.tokenizer.decode(output[0], skip_special_tokens=True), self.EXPECTED_OUTPUT) @require_torch_multi_gpu def test_quantized_model_multi_gpu(self): """ Simple test that checks if the quantized model is working properly with multiple GPUs """ input_ids = self.tokenizer(self.input_text, return_tensors="pt").to(torch_device) quantized_model = AutoModelForCausalLM.from_pretrained(self.model_name, device_map="auto") self.assertTrue(set(quantized_model.hf_device_map.values()) == {0, 1}) output = quantized_model.generate(**input_ids, max_new_tokens=self.max_new_tokens) self.assertEqual(self.tokenizer.decode(output[0], skip_special_tokens=True), self.EXPECTED_OUTPUT) @unittest.skipUnless( is_aqlm_available() and version.parse(importlib.metadata.version("aqlm")) >= version.parse("1.0.3"), "test requires `aqlm>=1.0.3`", ) def test_quantized_model_compile(self): """ Simple test that checks if the quantized model is working properly """ # Sample tokens greedily def decode_one_tokens(model, cur_token, input_pos, cache_position): logits = model( cur_token, position_ids=input_pos, cache_position=cache_position, return_dict=False, use_cache=True )[0] new_token = torch.argmax(logits[:, [-1]], dim=-1).to(torch.int) return new_token # Tokenize the test input input_ids = self.tokenizer(self.input_text, return_tensors="pt").to(torch_device)["input_ids"] seq_length = input_ids.shape[1] # Setup static KV cache for generation self.quantized_model._setup_cache(StaticCache, 1, max_cache_len=seq_length + self.max_new_tokens + 1) # Allocate token ids to be generated and copy prefix ids cache_position = torch.arange(seq_length, device=torch_device) generated_ids = torch.zeros(1, seq_length + self.max_new_tokens, dtype=torch.int, device=torch_device) generated_ids[:, cache_position] = input_ids.to(torch_device).to(torch.int) # Do a forward pass to fill the prefix cache and compile the kernels if necessary logits = self.quantized_model(input_ids, cache_position=cache_position, return_dict=False, use_cache=True)[0] next_token = torch.argmax(logits[:, [-1]], dim=-1).to(torch.int) generated_ids[:, [seq_length]] = next_token with torch.no_grad(): # Compile the CUDA graph decode_one_tokens = torch.compile(decode_one_tokens, mode="reduce-overhead", fullgraph=True) # Generate tokens one by one cache_position = torch.tensor([seq_length + 1], device=torch_device) for _ in range(1, self.max_new_tokens): with torch.backends.cuda.sdp_kernel(enable_flash=False, enable_mem_efficient=False, enable_math=True): next_token = decode_one_tokens(self.quantized_model, next_token.clone(), None, cache_position) generated_ids.index_copy_(1, cache_position, next_token) cache_position += 1 # Check generated text self.assertEqual(self.tokenizer.decode(generated_ids[0], skip_special_tokens=True), self.EXPECTED_OUTPUT)
transformers/tests/quantization/aqlm_integration/test_aqlm.py/0
{ "file_path": "transformers/tests/quantization/aqlm_integration/test_aqlm.py", "repo_id": "transformers", "token_count": 3914 }
418
# Testing new Hugging Face Deep Learning Container. This document explains the testing strategy for releasing the new Hugging Face Deep Learning Container. AWS maintains 14 days of currency with framework releases. Besides framework releases, AWS release train is bi-weekly on Monday. Code cutoff date for any changes is the Wednesday before release-Monday. ## Test Case 1: Releasing a New Version (Minor/Major) of 🤗 Transformers ### Requirements: Test should run on Release Candidate for new `transformers` release to validate the new release is compatible with the DLCs. To run these tests you need credentials for the HF SageMaker AWS Account. You can ask @philschmid or @n1t0 to get access. ### Run Tests: Before we can run the tests we need to adjust the `requirements.txt` for PyTorch under `/tests/sagemaker/scripts/pytorch` and for TensorFlow under `/tests/sagemaker/scripts/pytorch`. We adjust the branch to the new RC-tag. ``` git+https://github.com/huggingface/[email protected] # install main or adjust ist with vX.X.X for installing version specific-transforms ``` After we adjusted the `requirements.txt` we can run Amazon SageMaker tests with: ```bash AWS_PROFILE=<enter-your-profile> make test-sagemaker ``` These tests take around 10-15 minutes to finish. Preferably make a screenshot of the successfully ran tests. ### After Transformers Release: After we have released the Release Candidate we need to create a PR at the [Deep Learning Container Repository](https://github.com/aws/deep-learning-containers). **Creating the update PR:** 1. Update the two latest `buildspec.yaml` config for [PyTorch](https://github.com/aws/deep-learning-containers/tree/master/huggingface/pytorch) and [TensorFlow](https://github.com/aws/deep-learning-containers/tree/master/huggingface/tensorflow). The two latest `buildspec.yaml` are the `buildspec.yaml` without a version tag and the one with the highest framework version, e.g. `buildspec-1-7-1.yml` and not `buildspec-1-6.yml`. To update the `buildspec.yaml` we need to adjust either the `transformers_version` or the `datasets_version` or both. Example for upgrading to `transformers 4.5.0` and `datasets 1.6.0`. ```yaml account_id: &ACCOUNT_ID <set-$ACCOUNT_ID-in-environment> region: &REGION <set-$REGION-in-environment> base_framework: &BASE_FRAMEWORK pytorch framework: &FRAMEWORK !join [ "huggingface_", *BASE_FRAMEWORK] version: &VERSION 1.6.0 short_version: &SHORT_VERSION 1.6 repository_info: training_repository: &TRAINING_REPOSITORY image_type: &TRAINING_IMAGE_TYPE training root: !join [ "huggingface/", *BASE_FRAMEWORK, "/", *TRAINING_IMAGE_TYPE ] repository_name: &REPOSITORY_NAME !join ["pr", "-", "huggingface", "-", *BASE_FRAMEWORK, "-", *TRAINING_IMAGE_TYPE] repository: &REPOSITORY !join [ *ACCOUNT_ID, .dkr.ecr., *REGION, .amazonaws.com/, *REPOSITORY_NAME ] images: BuildHuggingFacePytorchGpuPy37Cu110TrainingDockerImage: <<: *TRAINING_REPOSITORY build: &HUGGINGFACE_PYTORCH_GPU_TRAINING_PY3 false image_size_baseline: &IMAGE_SIZE_BASELINE 15000 device_type: &DEVICE_TYPE gpu python_version: &DOCKER_PYTHON_VERSION py3 tag_python_version: &TAG_PYTHON_VERSION py36 cuda_version: &CUDA_VERSION cu110 os_version: &OS_VERSION ubuntu18.04 transformers_version: &TRANSFORMERS_VERSION 4.5.0 # this was adjusted from 4.4.2 to 4.5.0 datasets_version: &DATASETS_VERSION 1.6.0 # this was adjusted from 1.5.0 to 1.6.0 tag: !join [ *VERSION, '-', 'transformers', *TRANSFORMERS_VERSION, '-', *DEVICE_TYPE, '-', *TAG_PYTHON_VERSION, '-', *CUDA_VERSION, '-', *OS_VERSION ] docker_file: !join [ docker/, *SHORT_VERSION, /, *DOCKER_PYTHON_VERSION, /, *CUDA_VERSION, /Dockerfile., *DEVICE_TYPE ] ``` 2. In the PR comment describe what test, we ran and with which package versions. Here you can copy the table from [Current Tests](#current-tests). 2. In the PR comment describe what test we ran and with which framework versions. Here you can copy the table from [Current Tests](#current-tests). You can take a look at this [PR](https://github.com/aws/deep-learning-containers/pull/1016), which information are needed. ## Test Case 2: Releasing a New AWS Framework DLC ## Execute Tests ### Requirements: AWS is going to release new DLCs for PyTorch and/or TensorFlow. The Tests should run on the new framework versions with current `transformers` release to validate the new framework release is compatible with the `transformers` version. To run these tests you need credentials for the HF SageMaker AWS Account. You can ask @philschmid or @n1t0 to get access. AWS will notify us with a new issue in the repository pointing to their framework upgrade PR. ### Run Tests: Before we can run the tests we need to adjust the `requirements.txt` for Pytorch under `/tests/sagemaker/scripts/pytorch` and for Tensorflow under `/tests/sagemaker/scripts/pytorch`. We add the new framework version to it. ``` torch==1.8.1 # for pytorch tensorflow-gpu==2.5.0 # for tensorflow ``` After we adjusted the `requirements.txt` we can run Amazon SageMaker tests with. ```bash AWS_PROFILE=<enter-your-profile> make test-sagemaker ``` These tests take around 10-15 minutes to finish. Preferably make a screenshot of the successfully ran tests. ### After successful Tests: After we have successfully run tests for the new framework version we need to create a PR at the [Deep Learning Container Repository](https://github.com/aws/deep-learning-containers). **Creating the update PR:** 1. Create a new `buildspec.yaml` config for [PyTorch](https://github.com/aws/deep-learning-containers/tree/master/huggingface/pytorch) and [TensorFlow](https://github.com/aws/deep-learning-containers/tree/master/huggingface/tensorflow) and rename the old `buildspec.yaml` to `buildespec-x.x.x`, where `x.x.x` is the base framework version, e.g. if pytorch 1.6.0 is the latest version in `buildspec.yaml` the file should be renamed to `buildspec-yaml-1-6.yaml`. To create the new `buildspec.yaml` we need to adjust the `version` and the `short_version`. Example for upgrading to `pytorch 1.7.1`. ```yaml account_id: &ACCOUNT_ID <set-$ACCOUNT_ID-in-environment> region: &REGION <set-$REGION-in-environment> base_framework: &BASE_FRAMEWORK pytorch framework: &FRAMEWORK !join [ "huggingface_", *BASE_FRAMEWORK] version: &VERSION 1.7.1 # this was adjusted from 1.6.0 to 1.7.1 short_version: &SHORT_VERSION 1.7 # this was adjusted from 1.6 to 1.7 repository_info: training_repository: &TRAINING_REPOSITORY image_type: &TRAINING_IMAGE_TYPE training root: !join [ "huggingface/", *BASE_FRAMEWORK, "/", *TRAINING_IMAGE_TYPE ] repository_name: &REPOSITORY_NAME !join ["pr", "-", "huggingface", "-", *BASE_FRAMEWORK, "-", *TRAINING_IMAGE_TYPE] repository: &REPOSITORY !join [ *ACCOUNT_ID, .dkr.ecr., *REGION, .amazonaws.com/, *REPOSITORY_NAME ] images: BuildHuggingFacePytorchGpuPy37Cu110TrainingDockerImage: <<: *TRAINING_REPOSITORY build: &HUGGINGFACE_PYTORCH_GPU_TRAINING_PY3 false image_size_baseline: &IMAGE_SIZE_BASELINE 15000 device_type: &DEVICE_TYPE gpu python_version: &DOCKER_PYTHON_VERSION py3 tag_python_version: &TAG_PYTHON_VERSION py36 cuda_version: &CUDA_VERSION cu110 os_version: &OS_VERSION ubuntu18.04 transformers_version: &TRANSFORMERS_VERSION 4.4.2 datasets_version: &DATASETS_VERSION 1.5.0 tag: !join [ *VERSION, '-', 'transformers', *TRANSFORMERS_VERSION, '-', *DEVICE_TYPE, '-', *TAG_PYTHON_VERSION, '-', *CUDA_VERSION, '-', *OS_VERSION ] docker_file: !join [ docker/, *SHORT_VERSION, /, *DOCKER_PYTHON_VERSION, /, *CUDA_VERSION, /Dockerfile., *DEVICE_TYPE ] ``` 2. In the PR comment describe what test we ran and with which framework versions. Here you can copy the table from [Current Tests](#current-tests). You can take a look at this [PR](https://github.com/aws/deep-learning-containers/pull/1025), which information are needed. ## Current Tests | ID | Description | Platform | #GPUS | Collected & evaluated metrics | |-------------------------------------|-------------------------------------------------------------------|-----------------------------|-------|------------------------------------------| | pytorch-transfromers-test-single | test bert finetuning using BERT fromtransformerlib+PT | SageMaker createTrainingJob | 1 | train_runtime, eval_accuracy & eval_loss | | pytorch-transfromers-test-2-ddp | test bert finetuning using BERT from transformer lib+ PT DPP | SageMaker createTrainingJob | 16 | train_runtime, eval_accuracy & eval_loss | | pytorch-transfromers-test-2-smd | test bert finetuning using BERT from transformer lib+ PT SM DDP | SageMaker createTrainingJob | 16 | train_runtime, eval_accuracy & eval_loss | | pytorch-transfromers-test-1-smp | test roberta finetuning using BERT from transformer lib+ PT SM MP | SageMaker createTrainingJob | 8 | train_runtime, eval_accuracy & eval_loss | | tensorflow-transfromers-test-single | Test bert finetuning using BERT from transformer lib+TF | SageMaker createTrainingJob | 1 | train_runtime, eval_accuracy & eval_loss | | tensorflow-transfromers-test-2-smd | test bert finetuning using BERT from transformer lib+ TF SM DDP | SageMaker createTrainingJob | 16 | train_runtime, eval_accuracy & eval_loss |
transformers/tests/sagemaker/README.md/0
{ "file_path": "transformers/tests/sagemaker/README.md", "repo_id": "transformers", "token_count": 3293 }
419
# coding=utf-8 # Copyright 2021 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import os import tempfile from transformers.testing_utils import check_json_file_has_correct_format class FeatureExtractionSavingTestMixin: test_cast_dtype = None def test_feat_extract_to_json_string(self): feat_extract = self.feature_extraction_class(**self.feat_extract_dict) obj = json.loads(feat_extract.to_json_string()) for key, value in self.feat_extract_dict.items(): self.assertEqual(obj[key], value) def test_feat_extract_to_json_file(self): feat_extract_first = self.feature_extraction_class(**self.feat_extract_dict) with tempfile.TemporaryDirectory() as tmpdirname: json_file_path = os.path.join(tmpdirname, "feat_extract.json") feat_extract_first.to_json_file(json_file_path) feat_extract_second = self.feature_extraction_class.from_json_file(json_file_path) self.assertEqual(feat_extract_second.to_dict(), feat_extract_first.to_dict()) def test_feat_extract_from_and_save_pretrained(self): feat_extract_first = self.feature_extraction_class(**self.feat_extract_dict) with tempfile.TemporaryDirectory() as tmpdirname: saved_file = feat_extract_first.save_pretrained(tmpdirname)[0] check_json_file_has_correct_format(saved_file) feat_extract_second = self.feature_extraction_class.from_pretrained(tmpdirname) self.assertEqual(feat_extract_second.to_dict(), feat_extract_first.to_dict()) def test_init_without_params(self): feat_extract = self.feature_extraction_class() self.assertIsNotNone(feat_extract)
transformers/tests/test_feature_extraction_common.py/0
{ "file_path": "transformers/tests/test_feature_extraction_common.py", "repo_id": "transformers", "token_count": 828 }
420
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from transformers import load_tool from transformers.tools.agent_types import AGENT_TYPE_MAPPING from .test_tools_common import ToolTesterMixin, output_types class TranslationToolTester(unittest.TestCase, ToolTesterMixin): def setUp(self): self.tool = load_tool("translation") self.tool.setup() self.remote_tool = load_tool("translation", remote=True) def test_exact_match_arg(self): result = self.tool("Hey, what's up?", src_lang="English", tgt_lang="French") self.assertEqual(result, "- Hé, comment ça va?") def test_exact_match_arg_remote(self): result = self.remote_tool("Hey, what's up?", src_lang="English", tgt_lang="French") self.assertEqual(result, "- Hé, comment ça va?") def test_exact_match_kwarg(self): result = self.tool(text="Hey, what's up?", src_lang="English", tgt_lang="French") self.assertEqual(result, "- Hé, comment ça va?") def test_exact_match_kwarg_remote(self): result = self.remote_tool(text="Hey, what's up?", src_lang="English", tgt_lang="French") self.assertEqual(result, "- Hé, comment ça va?") def test_call(self): inputs = ["Hey, what's up?", "English", "Spanish"] outputs = self.tool(*inputs) # There is a single output if len(self.tool.outputs) == 1: outputs = [outputs] self.assertListEqual(output_types(outputs), self.tool.outputs) def test_agent_types_outputs(self): inputs = ["Hey, what's up?", "English", "Spanish"] outputs = self.tool(*inputs) if not isinstance(outputs, list): outputs = [outputs] self.assertEqual(len(outputs), len(self.tool.outputs)) for output, output_type in zip(outputs, self.tool.outputs): agent_type = AGENT_TYPE_MAPPING[output_type] self.assertTrue(isinstance(output, agent_type)) def test_agent_types_inputs(self): inputs = ["Hey, what's up?", "English", "Spanish"] _inputs = [] for _input, input_type in zip(inputs, self.tool.inputs): if isinstance(input_type, list): _inputs.append([AGENT_TYPE_MAPPING[_input_type](_input) for _input_type in input_type]) else: _inputs.append(AGENT_TYPE_MAPPING[input_type](_input)) # Should not raise an error outputs = self.tool(*inputs) if not isinstance(outputs, list): outputs = [outputs] self.assertEqual(len(outputs), len(self.tool.outputs))
transformers/tests/tools/test_translation.py/0
{ "file_path": "transformers/tests/tools/test_translation.py", "repo_id": "transformers", "token_count": 1249 }
421
import unittest import warnings from dataclasses import dataclass from transformers.convert_slow_tokenizer import SpmConverter from transformers.testing_utils import get_tests_dir @dataclass class FakeOriginalTokenizer: vocab_file: str class ConvertSlowTokenizerTest(unittest.TestCase): def test_spm_converter_bytefallback_warning(self): spm_model_file_without_bytefallback = get_tests_dir("fixtures/test_sentencepiece.model") spm_model_file_with_bytefallback = get_tests_dir("fixtures/test_sentencepiece_with_bytefallback.model") original_tokenizer_without_bytefallback = FakeOriginalTokenizer(vocab_file=spm_model_file_without_bytefallback) with warnings.catch_warnings(record=True) as w: _ = SpmConverter(original_tokenizer_without_bytefallback) self.assertEqual(len(w), 0) original_tokenizer_with_bytefallback = FakeOriginalTokenizer(vocab_file=spm_model_file_with_bytefallback) with warnings.catch_warnings(record=True) as w: _ = SpmConverter(original_tokenizer_with_bytefallback) self.assertEqual(len(w), 1) self.assertIn( "The sentencepiece tokenizer that you are converting to a fast tokenizer uses the byte fallback option" " which is not implemented in the fast tokenizers.", str(w[0].message), )
transformers/tests/utils/test_convert_slow_tokenizer.py/0
{ "file_path": "transformers/tests/utils/test_convert_slow_tokenizer.py", "repo_id": "transformers", "token_count": 524 }
422
{ "ASTForAudioClassification": { "tokenizer_classes": [], "processor_classes": [ "ASTFeatureExtractor" ], "model_classes": [ "ASTForAudioClassification" ], "sha": "83d6e076db7768a3645401bad3204624985e1d08" }, "ASTModel": { "tokenizer_classes": [], "processor_classes": [ "ASTFeatureExtractor" ], "model_classes": [ "ASTModel" ], "sha": "75e68f956f6f2c0709b01e596e7a6aecb1b29dce" }, "AlbertForMaskedLM": { "tokenizer_classes": [ "AlbertTokenizer", "AlbertTokenizerFast" ], "processor_classes": [], "model_classes": [ "AlbertForMaskedLM", "TFAlbertForMaskedLM" ], "sha": "d29de71ac29e1019c3a7762f7357f750730cb037" }, "AlbertForMultipleChoice": { "tokenizer_classes": [ "AlbertTokenizer", "AlbertTokenizerFast" ], "processor_classes": [], "model_classes": [ "AlbertForMultipleChoice", "TFAlbertForMultipleChoice" ], "sha": "242aecce6a589a2964c0f695621fa22a83751579" }, "AlbertForPreTraining": { "tokenizer_classes": [ "AlbertTokenizer", "AlbertTokenizerFast" ], "processor_classes": [], "model_classes": [ "AlbertForPreTraining", "TFAlbertForPreTraining" ], "sha": "41330be4b271687f4d88ddc96346c12aa11de983" }, "AlbertForQuestionAnswering": { "tokenizer_classes": [ "AlbertTokenizer", "AlbertTokenizerFast" ], "processor_classes": [], "model_classes": [ "AlbertForQuestionAnswering", "TFAlbertForQuestionAnswering" ], "sha": "040b81c15f437f4722349dc5b41fccd17ebd7fdc" }, "AlbertForSequenceClassification": { "tokenizer_classes": [ "AlbertTokenizer", "AlbertTokenizerFast" ], "processor_classes": [], "model_classes": [ "AlbertForSequenceClassification", "TFAlbertForSequenceClassification" ], "sha": "39c1a0e2c1c2623106d3211d751e9b32f23a91a0" }, "AlbertForTokenClassification": { "tokenizer_classes": [ "AlbertTokenizer", "AlbertTokenizerFast" ], "processor_classes": [], "model_classes": [ "AlbertForTokenClassification", "TFAlbertForTokenClassification" ], "sha": "359c3f4a311a4053a6f6d6a880db5f82c8e3ff1f" }, "AlbertModel": { "tokenizer_classes": [ "AlbertTokenizer", "AlbertTokenizerFast" ], "processor_classes": [], "model_classes": [ "AlbertModel", "TFAlbertModel" ], "sha": "34a63314686b64aaeb595ddb95006f1ff2ffda17" }, "AlignModel": { "tokenizer_classes": [ "BertTokenizer", "BertTokenizerFast" ], "processor_classes": [ "EfficientNetImageProcessor" ], "model_classes": [ "AlignModel" ], "sha": "68a4f9d3f493f44efa7c1dde6fcca23350e2c92b" }, "AltCLIPModel": { "tokenizer_classes": [ "XLMRobertaTokenizerFast" ], "processor_classes": [ "CLIPImageProcessor" ], "model_classes": [ "AltCLIPModel" ], "sha": "3106af0fd503970717c05f27218e5cacf19ba872" }, "BarkModel": { "tokenizer_classes": [ "BertTokenizer", "BertTokenizerFast" ], "processor_classes": [], "model_classes": [ "BarkModel" ], "sha": "187e590fd87359cea47693e8cb11a604cd7b673c" }, "BartForCausalLM": { "tokenizer_classes": [ "BartTokenizer", "BartTokenizerFast" ], "processor_classes": [], "model_classes": [ "BartForCausalLM" ], "sha": "c25526ac67d2dbe79fe5462af4b7908ca2fbc3ff" }, "BartForConditionalGeneration": { "tokenizer_classes": [ "BartTokenizer", "BartTokenizerFast" ], "processor_classes": [], "model_classes": [ "BartForConditionalGeneration", "TFBartForConditionalGeneration" ], "sha": "3a489a21e4b04705f4a6047924b7616a67be7e37" }, "BartForQuestionAnswering": { "tokenizer_classes": [ "BartTokenizer", "BartTokenizerFast" ], "processor_classes": [], "model_classes": [ "BartForQuestionAnswering" ], "sha": "3ebf9aab39a57ceab55128d5fc6f61e4db0dadd4" }, "BartForSequenceClassification": { "tokenizer_classes": [ "BartTokenizer", "BartTokenizerFast" ], "processor_classes": [], "model_classes": [ "BartForSequenceClassification", "TFBartForSequenceClassification" ], "sha": "ea452fd9a928cfebd71723afa50feb20326917bc" }, "BartModel": { "tokenizer_classes": [ "BartTokenizer", "BartTokenizerFast" ], "processor_classes": [], "model_classes": [ "BartModel", "TFBartModel" ], "sha": "e5df6d1aa75f03833b2df328b9c35463f73a421b" }, "BeitForImageClassification": { "tokenizer_classes": [], "processor_classes": [ "BeitImageProcessor" ], "model_classes": [ "BeitForImageClassification" ], "sha": "e997587bb890f82faad4bd25eb23d85ba21ecaaa" }, "BeitForSemanticSegmentation": { "tokenizer_classes": [], "processor_classes": [ "BeitImageProcessor" ], "model_classes": [ "BeitForSemanticSegmentation" ], "sha": "d4afa9e21e3fe5b087578ed68974d9b3ffc1fb22" }, "BeitModel": { "tokenizer_classes": [], "processor_classes": [ "BeitImageProcessor" ], "model_classes": [ "BeitModel" ], "sha": "5c4a051f0cca6f64d02c6168deb88413cae10d2c" }, "BertForMaskedLM": { "tokenizer_classes": [ "BertTokenizer", "BertTokenizerFast" ], "processor_classes": [], "model_classes": [ "BertForMaskedLM", "TFBertForMaskedLM" ], "sha": "3e32baa52ce044c75edfb5c28abd51ee8d051282" }, "BertForMultipleChoice": { "tokenizer_classes": [ "BertTokenizer", "BertTokenizerFast" ], "processor_classes": [], "model_classes": [ "BertForMultipleChoice", "TFBertForMultipleChoice" ], "sha": "0b8c3a6d411d1e19e5fd98d4d8631ae7616eeeaa" }, "BertForNextSentencePrediction": { "tokenizer_classes": [ "BertTokenizer", "BertTokenizerFast" ], "processor_classes": [], "model_classes": [ "BertForNextSentencePrediction", "TFBertForNextSentencePrediction" ], "sha": "628e70debf8864bd0b63aff7901d17d9c4f7612c" }, "BertForPreTraining": { "tokenizer_classes": [ "BertTokenizer", "BertTokenizerFast" ], "processor_classes": [], "model_classes": [ "BertForPreTraining", "TFBertForPreTraining" ], "sha": "c748ad37e6a200a6f64b2764191bfe13f976032f" }, "BertForQuestionAnswering": { "tokenizer_classes": [ "BertTokenizer", "BertTokenizerFast" ], "processor_classes": [], "model_classes": [ "BertForQuestionAnswering", "TFBertForQuestionAnswering" ], "sha": "4671ad0c21493b97c5eb2f0201192704c29876d5" }, "BertForSequenceClassification": { "tokenizer_classes": [ "BertTokenizer", "BertTokenizerFast" ], "processor_classes": [], "model_classes": [ "BertForSequenceClassification", "TFBertForSequenceClassification" ], "sha": "37a9d44022264c12bdf3ec257778f953b63d4aaf" }, "BertForTokenClassification": { "tokenizer_classes": [ "BertTokenizer", "BertTokenizerFast" ], "processor_classes": [], "model_classes": [ "BertForTokenClassification", "TFBertForTokenClassification" ], "sha": "d7dc3a0793ff6dfcb794b21130ee0f185d2c61a2" }, "BertLMHeadModel": { "tokenizer_classes": [ "BertTokenizer", "BertTokenizerFast" ], "processor_classes": [], "model_classes": [ "BertLMHeadModel", "TFBertLMHeadModel" ], "sha": "b4e3acc1990f3e365ffddbd54b620a26d9fb4b09" }, "BertModel": { "tokenizer_classes": [ "BertTokenizer", "BertTokenizerFast" ], "processor_classes": [], "model_classes": [ "BertModel", "TFBertModel" ], "sha": "3956d303d3cddf0708ff20660c1ea5f6ec30e434" }, "BigBirdForCausalLM": { "tokenizer_classes": [ "BigBirdTokenizer", "BigBirdTokenizerFast" ], "processor_classes": [], "model_classes": [ "BigBirdForCausalLM" ], "sha": "5c7a487af5248d9c01b45d5481b7d7bb9b36e1b5" }, "BigBirdForMaskedLM": { "tokenizer_classes": [ "BigBirdTokenizer", "BigBirdTokenizerFast" ], "processor_classes": [], "model_classes": [ "BigBirdForMaskedLM" ], "sha": "476ef8225c0f69270b577706ad4f1dda13e4dde5" }, "BigBirdForMultipleChoice": { "tokenizer_classes": [ "BigBirdTokenizer", "BigBirdTokenizerFast" ], "processor_classes": [], "model_classes": [ "BigBirdForMultipleChoice" ], "sha": "cf93eaa1019987112c171a407745bc183a20513a" }, "BigBirdForPreTraining": { "tokenizer_classes": [ "BigBirdTokenizer", "BigBirdTokenizerFast" ], "processor_classes": [], "model_classes": [ "BigBirdForPreTraining" ], "sha": "5fb9efa13334431e7c186a9fa314b89c4a1eee72" }, "BigBirdForQuestionAnswering": { "tokenizer_classes": [ "BigBirdTokenizer", "BigBirdTokenizerFast" ], "processor_classes": [], "model_classes": [ "BigBirdForQuestionAnswering" ], "sha": "f82f88bd71fba819a8ffb0692915d3529e705417" }, "BigBirdForSequenceClassification": { "tokenizer_classes": [ "BigBirdTokenizer", "BigBirdTokenizerFast" ], "processor_classes": [], "model_classes": [ "BigBirdForSequenceClassification" ], "sha": "ea398090858f9af93b54fc9a8d65cfed78ac27ff" }, "BigBirdForTokenClassification": { "tokenizer_classes": [ "BigBirdTokenizer", "BigBirdTokenizerFast" ], "processor_classes": [], "model_classes": [ "BigBirdForTokenClassification" ], "sha": "2cdea118999fa58ba9fb0162d99e2ffa146c3df1" }, "BigBirdModel": { "tokenizer_classes": [ "BigBirdTokenizer", "BigBirdTokenizerFast" ], "processor_classes": [], "model_classes": [ "BigBirdModel" ], "sha": "9c55989f31df156194e6997606fb14d9897e0300" }, "BigBirdPegasusForCausalLM": { "tokenizer_classes": [ "PegasusTokenizer", "PegasusTokenizerFast" ], "processor_classes": [], "model_classes": [ "BigBirdPegasusForCausalLM" ], "sha": "49bc8816c666dee32e27cd8e00136b604eb85243" }, "BigBirdPegasusForConditionalGeneration": { "tokenizer_classes": [ "PegasusTokenizer", "PegasusTokenizerFast" ], "processor_classes": [], "model_classes": [ "BigBirdPegasusForConditionalGeneration" ], "sha": "e791aa6d1af5a76ca0926d95b1f28bd2d8adf376" }, "BigBirdPegasusForQuestionAnswering": { "tokenizer_classes": [ "PegasusTokenizer", "PegasusTokenizerFast" ], "processor_classes": [], "model_classes": [ "BigBirdPegasusForQuestionAnswering" ], "sha": "7650e076713ca707a37062adc8c9c1cd60dad7c7" }, "BigBirdPegasusForSequenceClassification": { "tokenizer_classes": [ "PegasusTokenizer", "PegasusTokenizerFast" ], "processor_classes": [], "model_classes": [ "BigBirdPegasusForSequenceClassification" ], "sha": "02500e8ebd9c53528750013fb963fbdc2be34034" }, "BigBirdPegasusModel": { "tokenizer_classes": [ "PegasusTokenizer", "PegasusTokenizerFast" ], "processor_classes": [], "model_classes": [ "BigBirdPegasusModel" ], "sha": "b07c5304dfba673cf8b9cf5cd1aa45fbfea1c2f3" }, "BioGptForCausalLM": { "tokenizer_classes": [ "BioGptTokenizer" ], "processor_classes": [], "model_classes": [ "BioGptForCausalLM" ], "sha": "07073b31da84054fd12226e3cae4cb3beb2547f9" }, "BioGptForSequenceClassification": { "tokenizer_classes": [ "BioGptTokenizer" ], "processor_classes": [], "model_classes": [ "BioGptForSequenceClassification" ], "sha": "8e18ad6218abd795e050dec324a8c827ccedacb4" }, "BioGptForTokenClassification": { "tokenizer_classes": [ "BioGptTokenizer" ], "processor_classes": [], "model_classes": [ "BioGptForTokenClassification" ], "sha": "67f8173c1a17273064d452a9031a51b67f327b6a" }, "BioGptModel": { "tokenizer_classes": [ "BioGptTokenizer" ], "processor_classes": [], "model_classes": [ "BioGptModel" ], "sha": "fe18551d0743538a990520b75707294ec57b4ebe" }, "BitBackbone": { "tokenizer_classes": [], "processor_classes": [ "BitImageProcessor" ], "model_classes": [ "BitBackbone" ], "sha": "2f06f6b4395b6dce2b00ac839ff757410e743cd7" }, "BitForImageClassification": { "tokenizer_classes": [], "processor_classes": [ "BitImageProcessor" ], "model_classes": [ "BitForImageClassification" ], "sha": "d0d8476f2d285ddda7c42c0d4a8e4bf6f5d2bfdf" }, "BitModel": { "tokenizer_classes": [], "processor_classes": [ "BitImageProcessor" ], "model_classes": [ "BitModel" ], "sha": "30a8a9b1a6b253cc500c01cf41bc1fc9581ea5e5" }, "BlenderbotForCausalLM": { "tokenizer_classes": [ "BlenderbotTokenizer", "BlenderbotTokenizerFast" ], "processor_classes": [], "model_classes": [ "BlenderbotForCausalLM" ], "sha": "8aad2e13e8920bca3cf988ba45f8a7b008b51a81" }, "BlenderbotForConditionalGeneration": { "tokenizer_classes": [ "BlenderbotTokenizer", "BlenderbotTokenizerFast" ], "processor_classes": [], "model_classes": [ "BlenderbotForConditionalGeneration", "TFBlenderbotForConditionalGeneration" ], "sha": "e8532878b9924fa02fb4b059b7f6e7fa372fff91" }, "BlenderbotModel": { "tokenizer_classes": [ "BlenderbotTokenizer", "BlenderbotTokenizerFast" ], "processor_classes": [], "model_classes": [ "BlenderbotModel", "TFBlenderbotModel" ], "sha": "ff848a40c30ca98eb7c6870bbb02677d5af9db55" }, "BlenderbotSmallForCausalLM": { "tokenizer_classes": [ "BlenderbotSmallTokenizer" ], "processor_classes": [], "model_classes": [ "BlenderbotSmallForCausalLM" ], "sha": "4c57c106630932eb9de4d76210a540d04616304d" }, "BlenderbotSmallForConditionalGeneration": { "tokenizer_classes": [ "BlenderbotSmallTokenizer" ], "processor_classes": [], "model_classes": [ "BlenderbotSmallForConditionalGeneration", "TFBlenderbotSmallForConditionalGeneration" ], "sha": "b8db01fcf3e37a5b369cd50e169bf383b8e905d8" }, "BlenderbotSmallModel": { "tokenizer_classes": [ "BlenderbotSmallTokenizer" ], "processor_classes": [], "model_classes": [ "BlenderbotSmallModel", "TFBlenderbotSmallModel" ], "sha": "0a10c70e225ec63278faffa8fabf759f063f0e55" }, "Blip2ForConditionalGeneration": { "tokenizer_classes": [ "GPT2Tokenizer", "GPT2TokenizerFast" ], "processor_classes": [ "BlipImageProcessor" ], "model_classes": [ "Blip2ForConditionalGeneration" ], "sha": "35e1ef43da3554af62eb29a7b3dbbef3f3bef48e" }, "Blip2Model": { "tokenizer_classes": [ "GPT2Tokenizer", "GPT2TokenizerFast" ], "processor_classes": [ "BlipImageProcessor" ], "model_classes": [ "Blip2Model" ], "sha": "c23378f225be31872fff33c103cf0ebc2454ffcc" }, "BlipForConditionalGeneration": { "tokenizer_classes": [ "BertTokenizer", "BertTokenizerFast" ], "processor_classes": [ "BlipImageProcessor" ], "model_classes": [ "BlipForConditionalGeneration", "TFBlipForConditionalGeneration" ], "sha": "eaf32bc0369349deef0c777442fc185119171d1f" }, "BlipModel": { "tokenizer_classes": [ "BertTokenizer", "BertTokenizerFast" ], "processor_classes": [ "BlipImageProcessor" ], "model_classes": [ "BlipModel", "TFBlipModel" ], "sha": "3d1d1c15eff22d6b2664a2d15757fa6f5d93827d" }, "BloomForCausalLM": { "tokenizer_classes": [ "BloomTokenizerFast" ], "processor_classes": [], "model_classes": [ "BloomForCausalLM" ], "sha": "0f4f06f162cd67d34d03ee156484e4001d468500" }, "BloomForQuestionAnswering": { "tokenizer_classes": [ "BloomTokenizerFast" ], "processor_classes": [], "model_classes": [ "BloomForQuestionAnswering" ], "sha": "23f369f163eef8c9c9685900440b0cbb0f3439fd" }, "BloomForSequenceClassification": { "tokenizer_classes": [ "BloomTokenizerFast" ], "processor_classes": [], "model_classes": [ "BloomForSequenceClassification" ], "sha": "b2280eef7172835f39b265eb0c46623257f67bbe" }, "BloomForTokenClassification": { "tokenizer_classes": [ "BloomTokenizerFast" ], "processor_classes": [], "model_classes": [ "BloomForTokenClassification" ], "sha": "9796aa45f99adff987c978089e11c0bd9d7b997f" }, "BloomModel": { "tokenizer_classes": [ "BloomTokenizerFast" ], "processor_classes": [], "model_classes": [ "BloomModel" ], "sha": "28b600fcfdc4f4938406fb518abf895620048cb2" }, "BrosForTokenClassification": { "tokenizer_classes": [ "BertTokenizer", "BertTokenizerFast" ], "processor_classes": [], "model_classes": [ "BrosForTokenClassification" ], "sha": "4ec2c91936f96b93667e8946fc7abbdeeb08a6d7" }, "BrosModel": { "tokenizer_classes": [ "BertTokenizer", "BertTokenizerFast" ], "processor_classes": [], "model_classes": [ "BrosModel" ], "sha": "e2464830b1874eeaf9f4b425fbe0ce8e7c7643e9" }, "CLIPModel": { "tokenizer_classes": [ "CLIPTokenizer", "CLIPTokenizerFast" ], "processor_classes": [ "CLIPImageProcessor" ], "model_classes": [ "CLIPModel", "TFCLIPModel" ], "sha": "0452d344074485d0e7eb5d5c12447b7c9dbc9619" }, "CLIPSegModel": { "tokenizer_classes": [ "CLIPTokenizer", "CLIPTokenizerFast" ], "processor_classes": [ "ViTImageProcessor" ], "model_classes": [ "CLIPSegModel" ], "sha": "7b1305214ccc85d29b776ffbee06748693852a04" }, "CTRLForSequenceClassification": { "tokenizer_classes": [ "CTRLTokenizer" ], "processor_classes": [], "model_classes": [ "CTRLForSequenceClassification", "TFCTRLForSequenceClassification" ], "sha": "280b5a3502d607c55c9f8d9f198fe9c2802d6f73" }, "CTRLLMHeadModel": { "tokenizer_classes": [ "CTRLTokenizer" ], "processor_classes": [], "model_classes": [ "CTRLLMHeadModel", "TFCTRLLMHeadModel" ], "sha": "662381663b216f1dd3c9cd30e2e83cb4c6fc9552" }, "CTRLModel": { "tokenizer_classes": [ "CTRLTokenizer" ], "processor_classes": [], "model_classes": [ "CTRLModel", "TFCTRLModel" ], "sha": "68b19b4f132d5a191a73acd78d983cbdcf068e9c" }, "CanineForMultipleChoice": { "tokenizer_classes": [ "CanineTokenizer" ], "processor_classes": [], "model_classes": [ "CanineForMultipleChoice" ], "sha": "fa0451453ed202f903ff7dcf6071aab6630fb89f" }, "CanineForQuestionAnswering": { "tokenizer_classes": [ "CanineTokenizer" ], "processor_classes": [], "model_classes": [ "CanineForQuestionAnswering" ], "sha": "5e1012bb086ac2e0b1497eeb7ed14eb2183d4ecb" }, "CanineForSequenceClassification": { "tokenizer_classes": [ "CanineTokenizer" ], "processor_classes": [], "model_classes": [ "CanineForSequenceClassification" ], "sha": "75336dc9179153869c38a8047ce4b1e02677a260" }, "CanineForTokenClassification": { "tokenizer_classes": [ "CanineTokenizer" ], "processor_classes": [], "model_classes": [ "CanineForTokenClassification" ], "sha": "65a622ea8e12597e12f45e59d46d8dbe8461fc10" }, "CanineModel": { "tokenizer_classes": [ "CanineTokenizer" ], "processor_classes": [], "model_classes": [ "CanineModel" ], "sha": "531ef67ad4f0b3dc7a9e5d722c774096b7401b1b" }, "ChineseCLIPModel": { "tokenizer_classes": [ "BertTokenizer", "BertTokenizerFast" ], "processor_classes": [ "ChineseCLIPImageProcessor" ], "model_classes": [ "ChineseCLIPModel" ], "sha": "504271a3c5fd9c2e877f5b4c01848bc18778c7c3" }, "ClapModel": { "tokenizer_classes": [ "RobertaTokenizer", "RobertaTokenizerFast" ], "processor_classes": [ "ClapFeatureExtractor" ], "model_classes": [ "ClapModel" ], "sha": "a7874595b900f9b2ddc79130dafc3ff48f4fbfb9" }, "ClvpModelForConditionalGeneration": { "tokenizer_classes": [ "ClvpTokenizer" ], "processor_classes": [ "ClvpFeatureExtractor" ], "model_classes": [], "sha": "45df7581535be337ff781707b6c20994ca221f05" }, "CodeGenForCausalLM": { "tokenizer_classes": [ "CodeGenTokenizer", "CodeGenTokenizerFast" ], "processor_classes": [], "model_classes": [ "CodeGenForCausalLM" ], "sha": "a3fc69d757fd1f0aa01bcbc4337f586651c7cb10" }, "CodeGenModel": { "tokenizer_classes": [ "CodeGenTokenizer", "CodeGenTokenizerFast" ], "processor_classes": [], "model_classes": [ "CodeGenModel" ], "sha": "dad4941a2b7429fc6e8206fcc4a04fc40f4a0beb" }, "ConditionalDetrForObjectDetection": { "tokenizer_classes": [], "processor_classes": [ "ConditionalDetrImageProcessor" ], "model_classes": [ "ConditionalDetrForObjectDetection" ], "sha": "762c213a0285edc84eb813a2ed90063cf971ca43" }, "ConditionalDetrModel": { "tokenizer_classes": [], "processor_classes": [ "ConditionalDetrImageProcessor" ], "model_classes": [ "ConditionalDetrModel" ], "sha": "18b75874158cac520c63605293b06e0b1327c263" }, "ConvBertForMaskedLM": { "tokenizer_classes": [ "ConvBertTokenizer", "ConvBertTokenizerFast" ], "processor_classes": [], "model_classes": [ "ConvBertForMaskedLM", "TFConvBertForMaskedLM" ], "sha": "307c70e32c3d3c18aeb45e0cbdc9fcd2957d9aba" }, "ConvBertForMultipleChoice": { "tokenizer_classes": [ "ConvBertTokenizer", "ConvBertTokenizerFast" ], "processor_classes": [], "model_classes": [ "ConvBertForMultipleChoice", "TFConvBertForMultipleChoice" ], "sha": "d6561a21ffdb82d03c1822af0510eb7482ce5026" }, "ConvBertForQuestionAnswering": { "tokenizer_classes": [ "ConvBertTokenizer", "ConvBertTokenizerFast" ], "processor_classes": [], "model_classes": [ "ConvBertForQuestionAnswering", "TFConvBertForQuestionAnswering" ], "sha": "8a056da5cc421415c2a24b9f644dd95ca279411d" }, "ConvBertForSequenceClassification": { "tokenizer_classes": [ "ConvBertTokenizer", "ConvBertTokenizerFast" ], "processor_classes": [], "model_classes": [ "ConvBertForSequenceClassification", "TFConvBertForSequenceClassification" ], "sha": "8bb8b20e51d282d777cc567cacadd97a35f0811e" }, "ConvBertForTokenClassification": { "tokenizer_classes": [ "ConvBertTokenizer", "ConvBertTokenizerFast" ], "processor_classes": [], "model_classes": [ "ConvBertForTokenClassification", "TFConvBertForTokenClassification" ], "sha": "8db0dd3c2b8ccc958fa9a84801f4f837b42fcf2c" }, "ConvBertModel": { "tokenizer_classes": [ "ConvBertTokenizer", "ConvBertTokenizerFast" ], "processor_classes": [], "model_classes": [ "ConvBertModel", "TFConvBertModel" ], "sha": "c9c5b1a74f0e468d8467473cabeaa67fcdbaddb7" }, "ConvNextBackbone": { "tokenizer_classes": [], "processor_classes": [ "ConvNextImageProcessor" ], "model_classes": [ "ConvNextBackbone" ], "sha": "499c7d6a97825b79e19663b70f3b60c4813b6bf2" }, "ConvNextForImageClassification": { "tokenizer_classes": [], "processor_classes": [ "ConvNextImageProcessor" ], "model_classes": [ "ConvNextForImageClassification", "TFConvNextForImageClassification" ], "sha": "0b490fd6b19cdbf721025dbd6ee45dcc5828e6e3" }, "ConvNextModel": { "tokenizer_classes": [], "processor_classes": [ "ConvNextImageProcessor" ], "model_classes": [ "ConvNextModel", "TFConvNextModel" ], "sha": "7b3b47a57b9a9120e022b91d6067daeac55b794f" }, "ConvNextV2Backbone": { "tokenizer_classes": [], "processor_classes": [ "ConvNextImageProcessor" ], "model_classes": [ "ConvNextV2Backbone" ], "sha": "c82fc526949dfd892a1fee3c34be6f8d80c4d3df" }, "ConvNextV2ForImageClassification": { "tokenizer_classes": [], "processor_classes": [ "ConvNextImageProcessor" ], "model_classes": [ "ConvNextV2ForImageClassification", "TFConvNextV2ForImageClassification" ], "sha": "ee22bae1cbb87d66fc7f62f7e15a43d6ff80d3cc" }, "ConvNextV2Model": { "tokenizer_classes": [], "processor_classes": [ "ConvNextImageProcessor" ], "model_classes": [ "ConvNextV2Model", "TFConvNextV2Model" ], "sha": "c4dd68ee1102cba05bcc483da2a88e39427b7249" }, "CvtForImageClassification": { "tokenizer_classes": [], "processor_classes": [ "ConvNextImageProcessor" ], "model_classes": [ "CvtForImageClassification", "TFCvtForImageClassification" ], "sha": "4b1938e252fdb26a06c1f5755e07fa8f6eed2d75" }, "CvtModel": { "tokenizer_classes": [], "processor_classes": [ "ConvNextImageProcessor" ], "model_classes": [ "CvtModel", "TFCvtModel" ], "sha": "27fed12c174f4f4f1fe27075d1c29602fe0669f0" }, "DPRQuestionEncoder": { "tokenizer_classes": [ "DPRQuestionEncoderTokenizer", "DPRQuestionEncoderTokenizerFast" ], "processor_classes": [], "model_classes": [ "DPRQuestionEncoder", "TFDPRQuestionEncoder" ], "sha": "09ae0269780271e0a4916f7bab1dbc4f8a76070d" }, "DPTForDepthEstimation": { "tokenizer_classes": [], "processor_classes": [ "DPTImageProcessor" ], "model_classes": [ "DPTForDepthEstimation" ], "sha": "11b7735d64d95b6599811631b012d2dec6eaa2c1" }, "DPTForSemanticSegmentation": { "tokenizer_classes": [], "processor_classes": [ "DPTImageProcessor" ], "model_classes": [ "DPTForSemanticSegmentation" ], "sha": "e140c3c716a4bf11dad875e5f5f0abd2bd4cbbcb" }, "DPTModel": { "tokenizer_classes": [], "processor_classes": [ "DPTImageProcessor" ], "model_classes": [ "DPTModel" ], "sha": "1d6ae6c0b60868dffbef0dddeda381c51c6dcba5" }, "Data2VecAudioForAudioFrameClassification": { "tokenizer_classes": [], "processor_classes": [ "Wav2Vec2FeatureExtractor" ], "model_classes": [ "Data2VecAudioForAudioFrameClassification" ], "sha": "a64828b27e73fc8dd95aeb315108ca2f6a66b55f" }, "Data2VecAudioForCTC": { "tokenizer_classes": [], "processor_classes": [ "Wav2Vec2FeatureExtractor" ], "model_classes": [ "Data2VecAudioForCTC" ], "sha": "bb161b6a181bd2c22cf30222f46fa6ef42225744" }, "Data2VecAudioForSequenceClassification": { "tokenizer_classes": [], "processor_classes": [ "Wav2Vec2FeatureExtractor" ], "model_classes": [ "Data2VecAudioForSequenceClassification" ], "sha": "8de17e0a959eca5f72b2ea59a11bc1fa744785d9" }, "Data2VecAudioForXVector": { "tokenizer_classes": [], "processor_classes": [ "Wav2Vec2FeatureExtractor" ], "model_classes": [ "Data2VecAudioForXVector" ], "sha": "dcb92484cf28fb4fe1dcf5d6e8d78e04382fdce9" }, "Data2VecAudioModel": { "tokenizer_classes": [], "processor_classes": [ "Wav2Vec2FeatureExtractor" ], "model_classes": [ "Data2VecAudioModel" ], "sha": "73f503fdff73b7616154f64dbe38a685cc48e8eb" }, "Data2VecTextForCausalLM": { "tokenizer_classes": [ "RobertaTokenizer", "RobertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "Data2VecTextForCausalLM" ], "sha": "1f3658ce623653338cd31516551e8181aa08bb38" }, "Data2VecTextForMaskedLM": { "tokenizer_classes": [ "RobertaTokenizer", "RobertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "Data2VecTextForMaskedLM" ], "sha": "fb41ac30d0faa0899bf5afaa0986df8993395ca6" }, "Data2VecTextForMultipleChoice": { "tokenizer_classes": [ "RobertaTokenizer", "RobertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "Data2VecTextForMultipleChoice" ], "sha": "e7556d520ad90ebae5ad88554d45a37488d00040" }, "Data2VecTextForQuestionAnswering": { "tokenizer_classes": [ "RobertaTokenizer", "RobertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "Data2VecTextForQuestionAnswering" ], "sha": "9630833d76a1fd7e96b904d87bb11b7c00ccd021" }, "Data2VecTextForSequenceClassification": { "tokenizer_classes": [ "RobertaTokenizer", "RobertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "Data2VecTextForSequenceClassification" ], "sha": "156e4019c37d9592f193ba80553cd245cbccecb3" }, "Data2VecTextForTokenClassification": { "tokenizer_classes": [ "RobertaTokenizer", "RobertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "Data2VecTextForTokenClassification" ], "sha": "55b3a49fdbf22479d6eb939261d4b884ea288270" }, "Data2VecTextModel": { "tokenizer_classes": [ "RobertaTokenizer", "RobertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "Data2VecTextModel" ], "sha": "c21be3e4f88e8357bf33bfba8f8e05ae2e735124" }, "Data2VecVisionForImageClassification": { "tokenizer_classes": [], "processor_classes": [ "BeitImageProcessor" ], "model_classes": [ "Data2VecVisionForImageClassification", "TFData2VecVisionForImageClassification" ], "sha": "d640e7ced7a3fbbb8c8661a4f67b934e55406172" }, "Data2VecVisionForSemanticSegmentation": { "tokenizer_classes": [], "processor_classes": [ "BeitImageProcessor" ], "model_classes": [ "Data2VecVisionForSemanticSegmentation", "TFData2VecVisionForSemanticSegmentation" ], "sha": "3eba3cd694fab6530b7e5da8f49d3951301c816a" }, "Data2VecVisionModel": { "tokenizer_classes": [], "processor_classes": [ "BeitImageProcessor" ], "model_classes": [ "Data2VecVisionModel", "TFData2VecVisionModel" ], "sha": "2a7ad25e4359970dc70494a2f3eb98e2a3c9806d" }, "DebertaForMaskedLM": { "tokenizer_classes": [ "DebertaTokenizer", "DebertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "DebertaForMaskedLM", "TFDebertaForMaskedLM" ], "sha": "e0f9ada9e0f6d4d7cc39d7cbd58369b0c84de33d" }, "DebertaForQuestionAnswering": { "tokenizer_classes": [ "DebertaTokenizer", "DebertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "DebertaForQuestionAnswering", "TFDebertaForQuestionAnswering" ], "sha": "a3eb69cdb0b52f7d0fb730e882f1a54b9a7442ea" }, "DebertaForSequenceClassification": { "tokenizer_classes": [ "DebertaTokenizer", "DebertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "DebertaForSequenceClassification", "TFDebertaForSequenceClassification" ], "sha": "32af91d12c4e9b6d62b420bee93311fd77d3c933" }, "DebertaForTokenClassification": { "tokenizer_classes": [ "DebertaTokenizer", "DebertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "DebertaForTokenClassification", "TFDebertaForTokenClassification" ], "sha": "ba62ba2726d813e60e512476fc1b178aa3858175" }, "DebertaModel": { "tokenizer_classes": [ "DebertaTokenizer", "DebertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "DebertaModel", "TFDebertaModel" ], "sha": "4273294e14cd04c0e2cd1dcff5cf7e5d4fe906ba" }, "DebertaV2ForMaskedLM": { "tokenizer_classes": [ "DebertaV2Tokenizer", "DebertaV2TokenizerFast" ], "processor_classes": [], "model_classes": [ "DebertaV2ForMaskedLM", "TFDebertaV2ForMaskedLM" ], "sha": "a053dedc2cdf32918a84277cb0c05186604496a5" }, "DebertaV2ForMultipleChoice": { "tokenizer_classes": [ "DebertaV2Tokenizer", "DebertaV2TokenizerFast" ], "processor_classes": [], "model_classes": [ "DebertaV2ForMultipleChoice", "TFDebertaV2ForMultipleChoice" ], "sha": "07e39f520ce239b39ef8cb24cd7874d06c791063" }, "DebertaV2ForQuestionAnswering": { "tokenizer_classes": [ "DebertaV2Tokenizer", "DebertaV2TokenizerFast" ], "processor_classes": [], "model_classes": [ "DebertaV2ForQuestionAnswering", "TFDebertaV2ForQuestionAnswering" ], "sha": "9cecb3a7fc6b95099122283644ea1f8ced287d1b" }, "DebertaV2ForSequenceClassification": { "tokenizer_classes": [ "DebertaV2Tokenizer", "DebertaV2TokenizerFast" ], "processor_classes": [], "model_classes": [ "DebertaV2ForSequenceClassification", "TFDebertaV2ForSequenceClassification" ], "sha": "df9ea1f5c0f2ccd139b21cfb3963a5a5ebfb5b81" }, "DebertaV2ForTokenClassification": { "tokenizer_classes": [ "DebertaV2Tokenizer", "DebertaV2TokenizerFast" ], "processor_classes": [], "model_classes": [ "DebertaV2ForTokenClassification", "TFDebertaV2ForTokenClassification" ], "sha": "51fe01989df38a540ac1abca5ee71a51365defd5" }, "DebertaV2Model": { "tokenizer_classes": [ "DebertaV2Tokenizer", "DebertaV2TokenizerFast" ], "processor_classes": [], "model_classes": [ "DebertaV2Model", "TFDebertaV2Model" ], "sha": "211df4bd1a4a9b66c97af3f9231a5d2af8de7b9f" }, "DeformableDetrForObjectDetection": { "tokenizer_classes": [], "processor_classes": [ "DeformableDetrImageProcessor" ], "model_classes": [ "DeformableDetrForObjectDetection" ], "sha": "8fa0db215c458f60ae4d455d6fb067c1c5e39fdc" }, "DeformableDetrModel": { "tokenizer_classes": [], "processor_classes": [ "DeformableDetrImageProcessor" ], "model_classes": [ "DeformableDetrModel" ], "sha": "0faac5624696b03edd14694642f9804f2cd8f3da" }, "DeiTForImageClassification": { "tokenizer_classes": [], "processor_classes": [ "DeiTImageProcessor" ], "model_classes": [ "DeiTForImageClassification", "TFDeiTForImageClassification" ], "sha": "21fc864199dafa0130f16a45769c6b6ca22c7784" }, "DeiTForImageClassificationWithTeacher": { "tokenizer_classes": [], "processor_classes": [ "DeiTImageProcessor" ], "model_classes": [ "DeiTForImageClassificationWithTeacher", "TFDeiTForImageClassificationWithTeacher" ], "sha": "5a5738a109e27f3d4b78a0db4cb1d3331140c10e" }, "DeiTForMaskedImageModeling": { "tokenizer_classes": [], "processor_classes": [ "DeiTImageProcessor" ], "model_classes": [ "DeiTForMaskedImageModeling", "TFDeiTForMaskedImageModeling" ], "sha": "d5df5c538fe1efb8d668a3893d1691d505a0de06" }, "DeiTModel": { "tokenizer_classes": [], "processor_classes": [ "DeiTImageProcessor" ], "model_classes": [ "DeiTModel", "TFDeiTModel" ], "sha": "0fdbff6f44b7c6933c2027fec1d7f87bec06b590" }, "DetaForObjectDetection": { "tokenizer_classes": [], "processor_classes": [ "DetaImageProcessor" ], "model_classes": [ "DetaForObjectDetection" ], "sha": "a15ad6ce64fbcb5021b2b99e9587c4011ef3341d" }, "DetaModel": { "tokenizer_classes": [], "processor_classes": [ "DetaImageProcessor" ], "model_classes": [ "DetaModel" ], "sha": "8820f2297ec0dec8f1875054559c8b7a162098e3" }, "DetrForObjectDetection": { "tokenizer_classes": [], "processor_classes": [ "DetrImageProcessor" ], "model_classes": [ "DetrForObjectDetection" ], "sha": "7dc967c53f4b3f07904c42b255346b744d0ad84e" }, "DetrForSegmentation": { "tokenizer_classes": [], "processor_classes": [ "DetrImageProcessor" ], "model_classes": [ "DetrForSegmentation" ], "sha": "e34330acdae359588ef853e961a78d419dc4e8eb" }, "DetrModel": { "tokenizer_classes": [], "processor_classes": [ "DetrImageProcessor" ], "model_classes": [ "DetrModel" ], "sha": "f15ce38a10c7447e8048b1681e4811322a005722" }, "DinatBackbone": { "tokenizer_classes": [], "processor_classes": [ "ViTImageProcessor" ], "model_classes": [ "DinatBackbone" ], "sha": "3ba13790a0796d90104c207f75bb3d5d79723d51" }, "DinatForImageClassification": { "tokenizer_classes": [], "processor_classes": [ "ViTImageProcessor" ], "model_classes": [ "DinatForImageClassification" ], "sha": "624cf2d864a7ea2f90e24014a213e34597e8bd76" }, "DinatModel": { "tokenizer_classes": [], "processor_classes": [ "ViTImageProcessor" ], "model_classes": [ "DinatModel" ], "sha": "d6c75bc51196f0a683afb12de6310fdda13efefd" }, "Dinov2Backbone": { "tokenizer_classes": [], "processor_classes": [ "BitImageProcessor" ], "model_classes": [ "Dinov2Backbone" ], "sha": "dbf8d2ff3092ac53c11e6525e6cbae7ace84769a" }, "Dinov2ForImageClassification": { "tokenizer_classes": [], "processor_classes": [ "BitImageProcessor" ], "model_classes": [ "Dinov2ForImageClassification" ], "sha": "ae44840966456aae33641df2c8c8a4af5b457b24" }, "Dinov2Model": { "tokenizer_classes": [], "processor_classes": [ "BitImageProcessor" ], "model_classes": [ "Dinov2Model" ], "sha": "6f560b1cc9806bcf84fe0b0c60b5faf9c29be959" }, "DistilBertForMaskedLM": { "tokenizer_classes": [ "DistilBertTokenizer", "DistilBertTokenizerFast" ], "processor_classes": [], "model_classes": [ "DistilBertForMaskedLM", "TFDistilBertForMaskedLM" ], "sha": "b2dfda30b012821996e6e603729562d9c900bc0f" }, "DistilBertForMultipleChoice": { "tokenizer_classes": [ "DistilBertTokenizer", "DistilBertTokenizerFast" ], "processor_classes": [], "model_classes": [ "DistilBertForMultipleChoice", "TFDistilBertForMultipleChoice" ], "sha": "ec6b83129a7d1be2a6b8d58303abcca5541a5cb3" }, "DistilBertForQuestionAnswering": { "tokenizer_classes": [ "DistilBertTokenizer", "DistilBertTokenizerFast" ], "processor_classes": [], "model_classes": [ "DistilBertForQuestionAnswering", "TFDistilBertForQuestionAnswering" ], "sha": "812406b226415044469b0e0a84c4fe0ff338c5d3" }, "DistilBertForSequenceClassification": { "tokenizer_classes": [ "DistilBertTokenizer", "DistilBertTokenizerFast" ], "processor_classes": [], "model_classes": [ "DistilBertForSequenceClassification", "TFDistilBertForSequenceClassification" ], "sha": "6f427ce7b3e5aaa596938fbd98437d3875581b7b" }, "DistilBertForTokenClassification": { "tokenizer_classes": [ "DistilBertTokenizer", "DistilBertTokenizerFast" ], "processor_classes": [], "model_classes": [ "DistilBertForTokenClassification", "TFDistilBertForTokenClassification" ], "sha": "166dbe3f5d6ecd871762567069454d6ec65234b4" }, "DistilBertModel": { "tokenizer_classes": [ "DistilBertTokenizer", "DistilBertTokenizerFast" ], "processor_classes": [], "model_classes": [ "DistilBertModel", "TFDistilBertModel" ], "sha": "cc4425ad0676f3ec00e8bffe485fe83cae61041a" }, "DonutSwinModel": { "tokenizer_classes": [], "processor_classes": [ "DonutImageProcessor" ], "model_classes": [ "DonutSwinModel" ], "sha": "1b10654fbfe2f2ea410a672ab605bd5c60d3f284" }, "EfficientFormerForImageClassification": { "tokenizer_classes": [], "processor_classes": [ "EfficientFormerImageProcessor" ], "model_classes": [ "EfficientFormerForImageClassification", "TFEfficientFormerForImageClassification" ], "sha": "ebadb628e12f268e321fcc756fa4606f7b5b3178" }, "EfficientFormerForImageClassificationWithTeacher": { "tokenizer_classes": [], "processor_classes": [ "EfficientFormerImageProcessor" ], "model_classes": [ "EfficientFormerForImageClassificationWithTeacher", "TFEfficientFormerForImageClassificationWithTeacher" ], "sha": "1beabce6da9cb4ebbeafcd1ef23fac36b4a269e2" }, "EfficientFormerModel": { "tokenizer_classes": [], "processor_classes": [ "EfficientFormerImageProcessor" ], "model_classes": [ "EfficientFormerModel", "TFEfficientFormerModel" ], "sha": "200fae5b875844d09c8a91d1c155b72b06a517f6" }, "EfficientNetForImageClassification": { "tokenizer_classes": [], "processor_classes": [ "EfficientNetImageProcessor" ], "model_classes": [ "EfficientNetForImageClassification" ], "sha": "6ed195ee636d2c0b885139da8c7b45d57ebaeee0" }, "EfficientNetModel": { "tokenizer_classes": [], "processor_classes": [ "EfficientNetImageProcessor" ], "model_classes": [ "EfficientNetModel" ], "sha": "eb03c90d4aaad98af0f19e0dfbdc41106297ffff" }, "ElectraForCausalLM": { "tokenizer_classes": [ "ElectraTokenizer", "ElectraTokenizerFast" ], "processor_classes": [], "model_classes": [ "ElectraForCausalLM" ], "sha": "c78396bc8cdd8db247892339de8da80d691d1d04" }, "ElectraForMaskedLM": { "tokenizer_classes": [ "ElectraTokenizer", "ElectraTokenizerFast" ], "processor_classes": [], "model_classes": [ "ElectraForMaskedLM", "TFElectraForMaskedLM" ], "sha": "631337703dbd8d41904c39891a41c6f1edd31813" }, "ElectraForMultipleChoice": { "tokenizer_classes": [ "ElectraTokenizer", "ElectraTokenizerFast" ], "processor_classes": [], "model_classes": [ "ElectraForMultipleChoice", "TFElectraForMultipleChoice" ], "sha": "66fdea6e22cfcbd3caa49ea82f31871c460612fa" }, "ElectraForPreTraining": { "tokenizer_classes": [ "ElectraTokenizer", "ElectraTokenizerFast" ], "processor_classes": [], "model_classes": [ "ElectraForPreTraining", "TFElectraForPreTraining" ], "sha": "7b2d0fa8726b1180c7d6cde4f4afc3800eba7e6f" }, "ElectraForQuestionAnswering": { "tokenizer_classes": [ "ElectraTokenizer", "ElectraTokenizerFast" ], "processor_classes": [], "model_classes": [ "ElectraForQuestionAnswering", "TFElectraForQuestionAnswering" ], "sha": "c6b127fd9f3019462e4ca2373762836207e39ce2" }, "ElectraForSequenceClassification": { "tokenizer_classes": [ "ElectraTokenizer", "ElectraTokenizerFast" ], "processor_classes": [], "model_classes": [ "ElectraForSequenceClassification", "TFElectraForSequenceClassification" ], "sha": "41f0089ab7876abe0e28dbbd565144acb31f8127" }, "ElectraForTokenClassification": { "tokenizer_classes": [ "ElectraTokenizer", "ElectraTokenizerFast" ], "processor_classes": [], "model_classes": [ "ElectraForTokenClassification", "TFElectraForTokenClassification" ], "sha": "1fdbbe70c1ddd16503820a1443d6a379a15ed777" }, "ElectraModel": { "tokenizer_classes": [ "ElectraTokenizer", "ElectraTokenizerFast" ], "processor_classes": [], "model_classes": [ "ElectraModel", "TFElectraModel" ], "sha": "312b532cbef26610d80f2bd008650160cae4f7a1" }, "EncodecModel": { "tokenizer_classes": [], "processor_classes": [ "EncodecFeatureExtractor" ], "model_classes": [ "EncodecModel" ], "sha": "e14c5a2fd6529c85cd4ac5a05ee9e550ced6a006" }, "EncoderDecoderModel": { "tokenizer_classes": [ "BertTokenizer", "BertTokenizerFast" ], "processor_classes": [], "model_classes": [ "EncoderDecoderModel", "TFEncoderDecoderModel" ], "sha": "1038be9fd1b87b2e0a8f33721ff8e4612d34b3b6" }, "ErnieForCausalLM": { "tokenizer_classes": [ "BertTokenizer", "BertTokenizerFast" ], "processor_classes": [], "model_classes": [ "ErnieForCausalLM" ], "sha": "b49e00112ff06c2f0a0e54499921dddcf8c3c6a8" }, "ErnieForMaskedLM": { "tokenizer_classes": [ "BertTokenizer", "BertTokenizerFast" ], "processor_classes": [], "model_classes": [ "ErnieForMaskedLM" ], "sha": "30429830d1997222d885dcfdbd36d5e02d0d34b1" }, "ErnieForMultipleChoice": { "tokenizer_classes": [ "BertTokenizer", "BertTokenizerFast" ], "processor_classes": [], "model_classes": [ "ErnieForMultipleChoice" ], "sha": "5a21144bf35dfb60560ff8249116ad4459c0069a" }, "ErnieForNextSentencePrediction": { "tokenizer_classes": [ "BertTokenizer", "BertTokenizerFast" ], "processor_classes": [], "model_classes": [ "ErnieForNextSentencePrediction" ], "sha": "ed5868efb39bf6afb29f0cf444deafcf1e50b5bc" }, "ErnieForPreTraining": { "tokenizer_classes": [ "BertTokenizer", "BertTokenizerFast" ], "processor_classes": [], "model_classes": [ "ErnieForPreTraining" ], "sha": "e4ad30d291c310fea25e6f91f91393f993513b42" }, "ErnieForQuestionAnswering": { "tokenizer_classes": [ "BertTokenizer", "BertTokenizerFast" ], "processor_classes": [], "model_classes": [ "ErnieForQuestionAnswering" ], "sha": "fe7c74b763f63a9fd864dad325385075df7c80c8" }, "ErnieForSequenceClassification": { "tokenizer_classes": [ "BertTokenizer", "BertTokenizerFast" ], "processor_classes": [], "model_classes": [ "ErnieForSequenceClassification" ], "sha": "84e0be05fcd52f54e96a69f67a2481323a58a9db" }, "ErnieForTokenClassification": { "tokenizer_classes": [ "BertTokenizer", "BertTokenizerFast" ], "processor_classes": [], "model_classes": [ "ErnieForTokenClassification" ], "sha": "91cf62c43a5a83332552ffa2d8e5e44d63a224ea" }, "ErnieMForMultipleChoice": { "tokenizer_classes": [ "ErnieMTokenizer" ], "processor_classes": [], "model_classes": [ "ErnieMForMultipleChoice" ], "sha": "c42ee7fcb132a323ace314c32e63c8a7d36ce18f" }, "ErnieMForQuestionAnswering": { "tokenizer_classes": [ "ErnieMTokenizer" ], "processor_classes": [], "model_classes": [ "ErnieMForQuestionAnswering" ], "sha": "2b90dee75ca87b214f96db00002aa18244ec8e84" }, "ErnieMForSequenceClassification": { "tokenizer_classes": [ "ErnieMTokenizer" ], "processor_classes": [], "model_classes": [ "ErnieMForSequenceClassification" ], "sha": "d8368646d8b1c67b1460af9c6ec13fd9d894cae6" }, "ErnieMForTokenClassification": { "tokenizer_classes": [ "ErnieMTokenizer" ], "processor_classes": [], "model_classes": [ "ErnieMForTokenClassification" ], "sha": "a9e29ba60fa0b7bedc2ed26a6b9911427df1ca6b" }, "ErnieMModel": { "tokenizer_classes": [ "ErnieMTokenizer" ], "processor_classes": [], "model_classes": [ "ErnieMModel" ], "sha": "7306eac3f38c3cf6211f0e741fdb81c6cc92bc09" }, "ErnieModel": { "tokenizer_classes": [ "BertTokenizer", "BertTokenizerFast" ], "processor_classes": [], "model_classes": [ "ErnieModel" ], "sha": "b51478a9f40e353c41be3a29ccef103dcfe22b4b" }, "EsmForMaskedLM": { "tokenizer_classes": [ "EsmTokenizer" ], "processor_classes": [], "model_classes": [ "EsmForMaskedLM", "TFEsmForMaskedLM" ], "sha": "b56297b6cd64b9ba7c613d0cd146f1ecbea8115e" }, "EsmForSequenceClassification": { "tokenizer_classes": [ "EsmTokenizer" ], "processor_classes": [], "model_classes": [ "EsmForSequenceClassification", "TFEsmForSequenceClassification" ], "sha": "cc6d7ef0a4763540d67b7a4fb31bede9a7d3f245" }, "EsmForTokenClassification": { "tokenizer_classes": [ "EsmTokenizer" ], "processor_classes": [], "model_classes": [ "EsmForTokenClassification", "TFEsmForTokenClassification" ], "sha": "498953f66e260b974c504abbc863ee266d6c84a9" }, "EsmModel": { "tokenizer_classes": [ "EsmTokenizer" ], "processor_classes": [], "model_classes": [ "EsmModel", "TFEsmModel" ], "sha": "183838263b70809310117a0761542501acf64c21" }, "FNetForMaskedLM": { "tokenizer_classes": [ "FNetTokenizer", "FNetTokenizerFast" ], "processor_classes": [], "model_classes": [ "FNetForMaskedLM" ], "sha": "91eaae1eac894af5d96c0221ec9bcef7f1af41c8" }, "FNetForMultipleChoice": { "tokenizer_classes": [ "FNetTokenizer", "FNetTokenizerFast" ], "processor_classes": [], "model_classes": [ "FNetForMultipleChoice" ], "sha": "c15d98d5f7a6f3ef3099b1257949bee208d5466e" }, "FNetForNextSentencePrediction": { "tokenizer_classes": [ "FNetTokenizer", "FNetTokenizerFast" ], "processor_classes": [], "model_classes": [ "FNetForNextSentencePrediction" ], "sha": "c59440b44d07d61fc45a90ded7fc11d6f25b143d" }, "FNetForPreTraining": { "tokenizer_classes": [ "FNetTokenizer", "FNetTokenizerFast" ], "processor_classes": [], "model_classes": [ "FNetForPreTraining" ], "sha": "c05f55ccfb2f2533babd3c6e99de7749bc8081da" }, "FNetForQuestionAnswering": { "tokenizer_classes": [ "FNetTokenizer", "FNetTokenizerFast" ], "processor_classes": [], "model_classes": [ "FNetForQuestionAnswering" ], "sha": "47788e49dd435653fa2aa4b3ccae3572a870758e" }, "FNetForSequenceClassification": { "tokenizer_classes": [ "FNetTokenizer", "FNetTokenizerFast" ], "processor_classes": [], "model_classes": [ "FNetForSequenceClassification" ], "sha": "a3049b896ea6c5a32c364989c3afe604ee58b9fc" }, "FNetForTokenClassification": { "tokenizer_classes": [ "FNetTokenizer", "FNetTokenizerFast" ], "processor_classes": [], "model_classes": [ "FNetForTokenClassification" ], "sha": "3bcdafca57d544bb81e2f7eead1e512c168582fc" }, "FNetModel": { "tokenizer_classes": [ "FNetTokenizer", "FNetTokenizerFast" ], "processor_classes": [], "model_classes": [ "FNetModel" ], "sha": "48fa66de37df126504db3b658806135eb877f505" }, "FSMTForConditionalGeneration": { "tokenizer_classes": [ "FSMTTokenizer" ], "processor_classes": [], "model_classes": [ "FSMTForConditionalGeneration" ], "sha": "6a1a981b29c8a98c1fd31bd0ad809f5575ca6c7a" }, "FSMTModel": { "tokenizer_classes": [ "FSMTTokenizer" ], "processor_classes": [], "model_classes": [ "FSMTModel" ], "sha": "683f6f73a2ab87801f1695a72d1af63cf173ab7c" }, "FalconForCausalLM": { "tokenizer_classes": [ "PreTrainedTokenizerFast" ], "processor_classes": [], "model_classes": [ "FalconForCausalLM" ], "sha": "60076d5dafc5e33ba9c90dcd05e7c0834e44049a" }, "FalconForQuestionAnswering": { "tokenizer_classes": [ "PreTrainedTokenizerFast" ], "processor_classes": [], "model_classes": [ "FalconForQuestionAnswering" ], "sha": "b1ee9cd5fad2d177ea5a46df4611cd02f66ae788" }, "FalconForSequenceClassification": { "tokenizer_classes": [ "PreTrainedTokenizerFast" ], "processor_classes": [], "model_classes": [ "FalconForSequenceClassification" ], "sha": "007838c0991c2b6a87dc49a8a5c20f29149a00fa" }, "FalconForTokenClassification": { "tokenizer_classes": [ "PreTrainedTokenizerFast" ], "processor_classes": [], "model_classes": [ "FalconForTokenClassification" ], "sha": "0ea6ae548773daa6e3317fddc058957e956eebf4" }, "FalconModel": { "tokenizer_classes": [ "PreTrainedTokenizerFast" ], "processor_classes": [], "model_classes": [ "FalconModel" ], "sha": "ca15a579c946eb00c5b39cc8e0ea63d0c1460f84" }, "FlaubertForMultipleChoice": { "tokenizer_classes": [ "FlaubertTokenizer" ], "processor_classes": [], "model_classes": [ "FlaubertForMultipleChoice", "TFFlaubertForMultipleChoice" ], "sha": "8b12bd87a63f2e86c3482431742f6d8abf6ec4fd" }, "FlaubertForQuestionAnsweringSimple": { "tokenizer_classes": [ "FlaubertTokenizer" ], "processor_classes": [], "model_classes": [ "FlaubertForQuestionAnsweringSimple", "TFFlaubertForQuestionAnsweringSimple" ], "sha": "5c0e7ad1efae7e3497f5cd6d2d9519403df49d37" }, "FlaubertForSequenceClassification": { "tokenizer_classes": [ "FlaubertTokenizer" ], "processor_classes": [], "model_classes": [ "FlaubertForSequenceClassification", "TFFlaubertForSequenceClassification" ], "sha": "762f12a8c99690be8ed2663b7af3011660174a7c" }, "FlaubertForTokenClassification": { "tokenizer_classes": [ "FlaubertTokenizer" ], "processor_classes": [], "model_classes": [ "FlaubertForTokenClassification", "TFFlaubertForTokenClassification" ], "sha": "d2ab741c937bb69ef27c89e4c86a8c9d444874ca" }, "FlaubertModel": { "tokenizer_classes": [ "FlaubertTokenizer" ], "processor_classes": [], "model_classes": [ "FlaubertModel", "TFFlaubertModel" ], "sha": "bdc2f8e17bb869393053429ec8c1c842bfeabb07" }, "FlaubertWithLMHeadModel": { "tokenizer_classes": [ "FlaubertTokenizer" ], "processor_classes": [], "model_classes": [ "FlaubertWithLMHeadModel", "TFFlaubertWithLMHeadModel" ], "sha": "f20eb0932c90061003c9cc4e109c6ea22559c4f2" }, "FlavaForPreTraining": { "tokenizer_classes": [ "BertTokenizer", "BertTokenizerFast" ], "processor_classes": [ "FlavaImageProcessor" ], "model_classes": [ "FlavaForPreTraining" ], "sha": "6e9b2094060a5fa27984c7b49e5d0e820a88b487" }, "FlavaModel": { "tokenizer_classes": [ "BertTokenizer", "BertTokenizerFast" ], "processor_classes": [ "FlavaImageProcessor" ], "model_classes": [ "FlavaModel" ], "sha": "31ebf1b7a0ef1fd5059b98e28e5ab1c366d2c482" }, "FocalNetBackbone": { "tokenizer_classes": [], "processor_classes": [ "BitImageProcessor" ], "model_classes": [ "FocalNetBackbone" ], "sha": "eb8c580969443cb87de7dd9a256deaface03692f" }, "FocalNetForImageClassification": { "tokenizer_classes": [], "processor_classes": [ "BitImageProcessor" ], "model_classes": [ "FocalNetForImageClassification" ], "sha": "28d30ded26a3213e8fb7011a455afc3aa98b0a95" }, "FocalNetForMaskedImageModeling": { "tokenizer_classes": [], "processor_classes": [ "BitImageProcessor" ], "model_classes": [ "FocalNetForMaskedImageModeling" ], "sha": "0ea7626d19c9dd2f3113d977f643a1babc720bd3" }, "FocalNetModel": { "tokenizer_classes": [], "processor_classes": [ "BitImageProcessor" ], "model_classes": [ "FocalNetModel" ], "sha": "107b004e6aa14108a359b7d22bdb9aa141ec05d5" }, "FunnelBaseModel": { "tokenizer_classes": [ "FunnelTokenizer", "FunnelTokenizerFast" ], "processor_classes": [], "model_classes": [ "FunnelBaseModel", "TFFunnelBaseModel" ], "sha": "87fed4252812df23315a56531625333e315681c6" }, "FunnelForMaskedLM": { "tokenizer_classes": [ "FunnelTokenizer", "FunnelTokenizerFast" ], "processor_classes": [], "model_classes": [ "FunnelForMaskedLM", "TFFunnelForMaskedLM" ], "sha": "5543daf29f185cd45f2599bd6f38c96064c9c8de" }, "FunnelForMultipleChoice": { "tokenizer_classes": [ "FunnelTokenizer", "FunnelTokenizerFast" ], "processor_classes": [], "model_classes": [ "FunnelForMultipleChoice", "TFFunnelForMultipleChoice" ], "sha": "a8bf597e37dbefb1ac5c97c4cb162c3d522a33a1" }, "FunnelForPreTraining": { "tokenizer_classes": [ "FunnelTokenizer", "FunnelTokenizerFast" ], "processor_classes": [], "model_classes": [ "FunnelForPreTraining", "TFFunnelForPreTraining" ], "sha": "cbcb300d60aacd5950a45409b6e3f0f240c9082e" }, "FunnelForQuestionAnswering": { "tokenizer_classes": [ "FunnelTokenizer", "FunnelTokenizerFast" ], "processor_classes": [], "model_classes": [ "FunnelForQuestionAnswering", "TFFunnelForQuestionAnswering" ], "sha": "6a5675305e096434e818486a13892cb55daffd13" }, "FunnelForSequenceClassification": { "tokenizer_classes": [ "FunnelTokenizer", "FunnelTokenizerFast" ], "processor_classes": [], "model_classes": [ "FunnelForSequenceClassification", "TFFunnelForSequenceClassification" ], "sha": "1bc557a1e4314da21a44dee57b799e95a7025e5c" }, "FunnelForTokenClassification": { "tokenizer_classes": [ "FunnelTokenizer", "FunnelTokenizerFast" ], "processor_classes": [], "model_classes": [ "FunnelForTokenClassification", "TFFunnelForTokenClassification" ], "sha": "693bc1217a224efd558f410ddc8ffc63739bebc3" }, "FunnelModel": { "tokenizer_classes": [ "FunnelTokenizer", "FunnelTokenizerFast" ], "processor_classes": [], "model_classes": [ "FunnelModel", "TFFunnelModel" ], "sha": "bfbaa8fa21c3abf80b94e7168b5ecff8ec5b5f76" }, "FuyuForCausalLM": { "tokenizer_classes": [ "LlamaTokenizerFast" ], "processor_classes": [ "FuyuImageProcessor" ], "model_classes": [ "FuyuForCausalLM" ], "sha": "685d78258ea95c5c82e0e4555d0d4a2270ab8bff" }, "GLPNForDepthEstimation": { "tokenizer_classes": [], "processor_classes": [ "GLPNImageProcessor" ], "model_classes": [ "GLPNForDepthEstimation" ], "sha": "32ca1c1ef5d33242e5e7c0433bcd773c082f0260" }, "GLPNModel": { "tokenizer_classes": [], "processor_classes": [ "GLPNImageProcessor" ], "model_classes": [ "GLPNModel" ], "sha": "24a8dbb48b1aa0ba2eba44324fcd0c78cca64dd4" }, "GPT2ForQuestionAnswering": { "tokenizer_classes": [ "GPT2Tokenizer", "GPT2TokenizerFast" ], "processor_classes": [], "model_classes": [ "GPT2ForQuestionAnswering" ], "sha": "a5bdd6bd4d79feece85ea9a8bd4ee5fe54c1d45b" }, "GPT2ForSequenceClassification": { "tokenizer_classes": [ "GPT2Tokenizer", "GPT2TokenizerFast" ], "processor_classes": [], "model_classes": [ "GPT2ForSequenceClassification", "TFGPT2ForSequenceClassification" ], "sha": "90a2d78e5c7f288152f8456c3d58a43b40a58449" }, "GPT2ForTokenClassification": { "tokenizer_classes": [ "GPT2Tokenizer", "GPT2TokenizerFast" ], "processor_classes": [], "model_classes": [ "GPT2ForTokenClassification" ], "sha": "da78bc95b45fab2da9d43f2ca27164996e31ade1" }, "GPT2LMHeadModel": { "tokenizer_classes": [ "GPT2Tokenizer", "GPT2TokenizerFast" ], "processor_classes": [], "model_classes": [ "GPT2LMHeadModel", "TFGPT2LMHeadModel" ], "sha": "78f56535d4ce19e9d7c0992e390085c5a4196b37" }, "GPT2Model": { "tokenizer_classes": [ "GPT2Tokenizer", "GPT2TokenizerFast" ], "processor_classes": [], "model_classes": [ "GPT2Model", "TFGPT2Model" ], "sha": "d6694b0d8fe17978761c9305dc151780506b192e" }, "GPTBigCodeForCausalLM": { "tokenizer_classes": [ "GPT2Tokenizer", "GPT2TokenizerFast" ], "processor_classes": [], "model_classes": [ "GPTBigCodeForCausalLM" ], "sha": "99f7aaadf9c29669c63ef6c16f6bc5c07dbb9126" }, "GPTBigCodeForSequenceClassification": { "tokenizer_classes": [ "GPT2Tokenizer", "GPT2TokenizerFast" ], "processor_classes": [], "model_classes": [ "GPTBigCodeForSequenceClassification" ], "sha": "64a7398d5763161037b818314c60dd83d93d03e9" }, "GPTBigCodeForTokenClassification": { "tokenizer_classes": [ "GPT2Tokenizer", "GPT2TokenizerFast" ], "processor_classes": [], "model_classes": [ "GPTBigCodeForTokenClassification" ], "sha": "310537ecd22d45f71bf594b17922cf2abc338eaf" }, "GPTBigCodeModel": { "tokenizer_classes": [ "GPT2Tokenizer", "GPT2TokenizerFast" ], "processor_classes": [], "model_classes": [ "GPTBigCodeModel" ], "sha": "3069419084a9dc36802d47de9df3d314ccfc2f28" }, "GPTJForCausalLM": { "tokenizer_classes": [ "GPT2Tokenizer", "GPT2TokenizerFast" ], "processor_classes": [], "model_classes": [ "GPTJForCausalLM", "TFGPTJForCausalLM" ], "sha": "1fff390baa45cb187903ebdd269c975bb9ed7386" }, "GPTJForQuestionAnswering": { "tokenizer_classes": [ "GPT2Tokenizer", "GPT2TokenizerFast" ], "processor_classes": [], "model_classes": [ "GPTJForQuestionAnswering", "TFGPTJForQuestionAnswering" ], "sha": "3d4ec61dbed01f844d4c309971eeb5ad722c6c84" }, "GPTJForSequenceClassification": { "tokenizer_classes": [ "GPT2Tokenizer", "GPT2TokenizerFast" ], "processor_classes": [], "model_classes": [ "GPTJForSequenceClassification", "TFGPTJForSequenceClassification" ], "sha": "4b5db259cd16ca84ae2cd79aa4851cdd14479128" }, "GPTJModel": { "tokenizer_classes": [ "GPT2Tokenizer", "GPT2TokenizerFast" ], "processor_classes": [], "model_classes": [ "GPTJModel", "TFGPTJModel" ], "sha": "d8e1db30d08fbf57da6fc139aea3ffd63ab6226e" }, "GPTNeoForCausalLM": { "tokenizer_classes": [ "GPT2Tokenizer", "GPT2TokenizerFast" ], "processor_classes": [], "model_classes": [ "GPTNeoForCausalLM" ], "sha": "e88934e402c15195dd99b2947632415dd7645268" }, "GPTNeoForQuestionAnswering": { "tokenizer_classes": [ "GPT2Tokenizer", "GPT2TokenizerFast" ], "processor_classes": [], "model_classes": [ "GPTNeoForQuestionAnswering" ], "sha": "623883e94bd08caf9b3f839b98debeea72d5bc2b" }, "GPTNeoForSequenceClassification": { "tokenizer_classes": [ "GPT2Tokenizer", "GPT2TokenizerFast" ], "processor_classes": [], "model_classes": [ "GPTNeoForSequenceClassification" ], "sha": "bf2090d5d91a70eb37ba51fbdcf23afc7031fea8" }, "GPTNeoForTokenClassification": { "tokenizer_classes": [ "GPT2Tokenizer", "GPT2TokenizerFast" ], "processor_classes": [], "model_classes": [ "GPTNeoForTokenClassification" ], "sha": "d5208e73e24a1671219776b50fe5f96e0e4cd218" }, "GPTNeoModel": { "tokenizer_classes": [ "GPT2Tokenizer", "GPT2TokenizerFast" ], "processor_classes": [], "model_classes": [ "GPTNeoModel" ], "sha": "72a7cd49da613c3125a90884df4763545c594e56" }, "GPTNeoXForCausalLM": { "tokenizer_classes": [ "GPTNeoXTokenizerFast" ], "processor_classes": [], "model_classes": [ "GPTNeoXForCausalLM" ], "sha": "0229cfaaa843c6b492ac2abffabb00f1ff1936f8" }, "GPTNeoXForQuestionAnswering": { "tokenizer_classes": [ "GPTNeoXTokenizerFast" ], "processor_classes": [], "model_classes": [ "GPTNeoXForQuestionAnswering" ], "sha": "7d2f08c959c211129952ee03b5562add09fe6864" }, "GPTNeoXForSequenceClassification": { "tokenizer_classes": [ "GPTNeoXTokenizerFast" ], "processor_classes": [], "model_classes": [ "GPTNeoXForSequenceClassification" ], "sha": "17c4b845ee2e0bb780ca2dea2d59a3d9d5d3c651" }, "GPTNeoXForTokenClassification": { "tokenizer_classes": [ "GPTNeoXTokenizerFast" ], "processor_classes": [], "model_classes": [ "GPTNeoXForTokenClassification" ], "sha": "3aa4fe8a562f32230041d6d3616aa5ecc3f30192" }, "GPTNeoXJapaneseForCausalLM": { "tokenizer_classes": [ "GPTNeoXJapaneseTokenizer" ], "processor_classes": [], "model_classes": [ "GPTNeoXJapaneseForCausalLM" ], "sha": "5fca2479f1064fd22e17f944c8fcc14f7e73f1d5" }, "GPTNeoXJapaneseModel": { "tokenizer_classes": [ "GPTNeoXJapaneseTokenizer" ], "processor_classes": [], "model_classes": [ "GPTNeoXJapaneseModel" ], "sha": "5c6ed124150df845cfc701d70b97fdcde687be52" }, "GPTNeoXModel": { "tokenizer_classes": [ "GPTNeoXTokenizerFast" ], "processor_classes": [], "model_classes": [ "GPTNeoXModel" ], "sha": "33114ba2f72189d5a2bd63f0cdb78551189242ff" }, "GPTSanJapaneseForConditionalGeneration": { "tokenizer_classes": [ "GPTSanJapaneseTokenizer" ], "processor_classes": [], "model_classes": [ "GPTSanJapaneseForConditionalGeneration" ], "sha": "ff6a41faaa713c7fbd5d9a1a50539745f9e1178e" }, "GitForCausalLM": { "tokenizer_classes": [ "BertTokenizer", "BertTokenizerFast" ], "processor_classes": [ "CLIPImageProcessor" ], "model_classes": [ "GitForCausalLM" ], "sha": "60f9c50466ae0beeb11776ca5bfeb6473f441554" }, "GitModel": { "tokenizer_classes": [ "BertTokenizer", "BertTokenizerFast" ], "processor_classes": [ "CLIPImageProcessor" ], "model_classes": [ "GitModel" ], "sha": "3d2eb6bddf95bb4a4e59b045d4e464c730c07f41" }, "GroupViTModel": { "tokenizer_classes": [ "CLIPTokenizer", "CLIPTokenizerFast" ], "processor_classes": [ "CLIPImageProcessor" ], "model_classes": [ "GroupViTModel", "TFGroupViTModel" ], "sha": "05a3a02dd46cb9eb078608dec98f633c0cf559ef" }, "HubertForCTC": { "tokenizer_classes": [ "Wav2Vec2CTCTokenizer" ], "processor_classes": [ "Wav2Vec2FeatureExtractor" ], "model_classes": [ "HubertForCTC" ], "sha": "13431b76106f993eedcff48a75bae590a09b14f7" }, "HubertForSequenceClassification": { "tokenizer_classes": [ "Wav2Vec2CTCTokenizer" ], "processor_classes": [ "Wav2Vec2FeatureExtractor" ], "model_classes": [ "HubertForSequenceClassification" ], "sha": "d23f46607a900b1a55dfee4b7ed205a6823035b1" }, "HubertModel": { "tokenizer_classes": [ "Wav2Vec2CTCTokenizer" ], "processor_classes": [ "Wav2Vec2FeatureExtractor" ], "model_classes": [ "HubertModel", "TFHubertModel" ], "sha": "3224562c86c4669db65ae7defdc5fb555b113e95" }, "IBertForMaskedLM": { "tokenizer_classes": [ "RobertaTokenizer", "RobertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "IBertForMaskedLM" ], "sha": "e333a9c9d375f4d839b7e9e21d1a1c8dad58d7d1" }, "IBertForMultipleChoice": { "tokenizer_classes": [ "RobertaTokenizer", "RobertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "IBertForMultipleChoice" ], "sha": "a81f7d64cd7ce5fe6cd726b23d9d14ac5d17bf53" }, "IBertForQuestionAnswering": { "tokenizer_classes": [ "RobertaTokenizer", "RobertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "IBertForQuestionAnswering" ], "sha": "7b66d13d4d6801a82cbeb7f9fd853ca1630d1f8b" }, "IBertForSequenceClassification": { "tokenizer_classes": [ "RobertaTokenizer", "RobertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "IBertForSequenceClassification" ], "sha": "309d57145c40f889222fe5df62f14dddf4496b38" }, "IBertForTokenClassification": { "tokenizer_classes": [ "RobertaTokenizer", "RobertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "IBertForTokenClassification" ], "sha": "b032e9bff4b081b78c098b2d8bc610ac035c6ddf" }, "IBertModel": { "tokenizer_classes": [ "RobertaTokenizer", "RobertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "IBertModel" ], "sha": "6749164c678d4883d455f98b1dfc98c62da8f08b" }, "IdeficsForVisionText2Text": { "tokenizer_classes": [ "LlamaTokenizerFast" ], "processor_classes": [ "IdeficsImageProcessor" ], "model_classes": [ "IdeficsForVisionText2Text" ], "sha": "2c2f2e2cd6b02a77d0cdd8c3767ba9a6267dbd20" }, "IdeficsModel": { "tokenizer_classes": [ "LlamaTokenizerFast" ], "processor_classes": [ "IdeficsImageProcessor" ], "model_classes": [ "IdeficsModel" ], "sha": "649df2e35e067efd573ff2d083784a5cf876545e" }, "ImageGPTForCausalImageModeling": { "tokenizer_classes": [], "processor_classes": [ "ImageGPTImageProcessor" ], "model_classes": [ "ImageGPTForCausalImageModeling" ], "sha": "9a7d1fc04439ab1d9d690de9c3e7673f08568cdf" }, "ImageGPTForImageClassification": { "tokenizer_classes": [], "processor_classes": [ "ImageGPTImageProcessor" ], "model_classes": [ "ImageGPTForImageClassification" ], "sha": "d92c7aed4ba5de74a1f542b736010090e4a58b42" }, "ImageGPTModel": { "tokenizer_classes": [], "processor_classes": [ "ImageGPTImageProcessor" ], "model_classes": [ "ImageGPTModel" ], "sha": "5a7983e48d5841704733dd0756177680ed50c074" }, "Kosmos2ForConditionalGeneration": { "tokenizer_classes": [ "XLMRobertaTokenizerFast" ], "processor_classes": [ "CLIPImageProcessor" ], "model_classes": [ "Kosmos2ForConditionalGeneration" ], "sha": "d1d4607782b911411676f1ee79997dee645def58" }, "Kosmos2Model": { "tokenizer_classes": [ "XLMRobertaTokenizerFast" ], "processor_classes": [ "CLIPImageProcessor" ], "model_classes": [ "Kosmos2Model" ], "sha": "379d8944a65312094d9ab1c4b8a82058a2d3274e" }, "LEDForConditionalGeneration": { "tokenizer_classes": [ "LEDTokenizer", "LEDTokenizerFast" ], "processor_classes": [], "model_classes": [ "LEDForConditionalGeneration", "TFLEDForConditionalGeneration" ], "sha": "a354b49a79351f3ea8ae7776d9f8352ae26cfc14" }, "LEDForQuestionAnswering": { "tokenizer_classes": [ "LEDTokenizer", "LEDTokenizerFast" ], "processor_classes": [], "model_classes": [ "LEDForQuestionAnswering" ], "sha": "47c7a75a1e650dae60ff6e9bbab0f2386946670c" }, "LEDForSequenceClassification": { "tokenizer_classes": [ "LEDTokenizer", "LEDTokenizerFast" ], "processor_classes": [], "model_classes": [ "LEDForSequenceClassification" ], "sha": "3571e2c9d9f2f2ec0b8fe47090330b128be05126" }, "LEDModel": { "tokenizer_classes": [ "LEDTokenizer", "LEDTokenizerFast" ], "processor_classes": [], "model_classes": [ "LEDModel", "TFLEDModel" ], "sha": "3c3f6eb142545afc570187bfdabfe65d43dafbe4" }, "LayoutLMForMaskedLM": { "tokenizer_classes": [ "LayoutLMTokenizer", "LayoutLMTokenizerFast" ], "processor_classes": [], "model_classes": [ "LayoutLMForMaskedLM", "TFLayoutLMForMaskedLM" ], "sha": "0368bd9bd8fd3eb43b8a3b38962b5345b8765514" }, "LayoutLMForQuestionAnswering": { "tokenizer_classes": [ "LayoutLMTokenizer", "LayoutLMTokenizerFast" ], "processor_classes": [], "model_classes": [ "LayoutLMForQuestionAnswering", "TFLayoutLMForQuestionAnswering" ], "sha": "0d6a4bc614fccfa313c1fb6d132a250929518f85" }, "LayoutLMForSequenceClassification": { "tokenizer_classes": [ "LayoutLMTokenizer", "LayoutLMTokenizerFast" ], "processor_classes": [], "model_classes": [ "LayoutLMForSequenceClassification", "TFLayoutLMForSequenceClassification" ], "sha": "1bd68c73dbf6c8c0526d24fbe2831be82998c440" }, "LayoutLMForTokenClassification": { "tokenizer_classes": [ "LayoutLMTokenizer", "LayoutLMTokenizerFast" ], "processor_classes": [], "model_classes": [ "LayoutLMForTokenClassification", "TFLayoutLMForTokenClassification" ], "sha": "155e7da3f1d786aa39d957b16080c52de4a7efd7" }, "LayoutLMModel": { "tokenizer_classes": [ "LayoutLMTokenizer", "LayoutLMTokenizerFast" ], "processor_classes": [], "model_classes": [ "LayoutLMModel", "TFLayoutLMModel" ], "sha": "14f77b30d267910f11f0fd532a91a6b85ab3a4de" }, "LayoutLMv2ForQuestionAnswering": { "tokenizer_classes": [ "LayoutLMv2Tokenizer", "LayoutLMv2TokenizerFast" ], "processor_classes": [ "LayoutLMv2ImageProcessor" ], "model_classes": [ "LayoutLMv2ForQuestionAnswering" ], "sha": "f452e28dd34d3c38cce046b1cc7b0ada69f587b1" }, "LayoutLMv2ForSequenceClassification": { "tokenizer_classes": [ "LayoutLMv2Tokenizer", "LayoutLMv2TokenizerFast" ], "processor_classes": [ "LayoutLMv2ImageProcessor" ], "model_classes": [ "LayoutLMv2ForSequenceClassification" ], "sha": "b483e08fd143113629ecda3dbfd57e69bfeb5f11" }, "LayoutLMv2ForTokenClassification": { "tokenizer_classes": [ "LayoutLMv2Tokenizer", "LayoutLMv2TokenizerFast" ], "processor_classes": [ "LayoutLMv2ImageProcessor" ], "model_classes": [ "LayoutLMv2ForTokenClassification" ], "sha": "0721ae69bff00ecfff1b3d1521a475cde0253299" }, "LayoutLMv2Model": { "tokenizer_classes": [ "LayoutLMv2Tokenizer", "LayoutLMv2TokenizerFast" ], "processor_classes": [ "LayoutLMv2ImageProcessor" ], "model_classes": [ "LayoutLMv2Model" ], "sha": "6a1b510769b344979a910a7d0bade613a9ec2dfc" }, "LayoutLMv3ForQuestionAnswering": { "tokenizer_classes": [ "LayoutLMv3Tokenizer", "LayoutLMv3TokenizerFast" ], "processor_classes": [ "LayoutLMv3ImageProcessor" ], "model_classes": [ "LayoutLMv3ForQuestionAnswering", "TFLayoutLMv3ForQuestionAnswering" ], "sha": "4640242388e69cf77ea2dd3ac36ec6f1b26628c8" }, "LayoutLMv3ForSequenceClassification": { "tokenizer_classes": [ "LayoutLMv3Tokenizer", "LayoutLMv3TokenizerFast" ], "processor_classes": [ "LayoutLMv3ImageProcessor" ], "model_classes": [ "LayoutLMv3ForSequenceClassification", "TFLayoutLMv3ForSequenceClassification" ], "sha": "96515f699874cfbfbec7a64c539ae92419e4c6dc" }, "LayoutLMv3ForTokenClassification": { "tokenizer_classes": [ "LayoutLMv3Tokenizer", "LayoutLMv3TokenizerFast" ], "processor_classes": [ "LayoutLMv3ImageProcessor" ], "model_classes": [ "LayoutLMv3ForTokenClassification", "TFLayoutLMv3ForTokenClassification" ], "sha": "ed4ffc464f2028fe50dfc6823f4eda78d34be7e6" }, "LayoutLMv3Model": { "tokenizer_classes": [ "LayoutLMv3Tokenizer", "LayoutLMv3TokenizerFast" ], "processor_classes": [ "LayoutLMv3ImageProcessor" ], "model_classes": [ "LayoutLMv3Model", "TFLayoutLMv3Model" ], "sha": "69725e5e2445e5c1c3aa8a2aa49cfd72e0a44565" }, "LevitForImageClassification": { "tokenizer_classes": [], "processor_classes": [ "LevitImageProcessor" ], "model_classes": [ "LevitForImageClassification" ], "sha": "5ae8ccaa1fe1c947cb8ae6499e4a150c668bb9f0" }, "LevitForImageClassificationWithTeacher": { "tokenizer_classes": [], "processor_classes": [ "LevitImageProcessor" ], "model_classes": [ "LevitForImageClassificationWithTeacher" ], "sha": "568cc0d965b9bd293f240e7724314db6d50f6722" }, "LevitModel": { "tokenizer_classes": [], "processor_classes": [ "LevitImageProcessor" ], "model_classes": [ "LevitModel" ], "sha": "172efa52b50c75c3b3e498fa638f55e65b2ebf87" }, "LiltForQuestionAnswering": { "tokenizer_classes": [ "LayoutLMv3Tokenizer", "LayoutLMv3TokenizerFast" ], "processor_classes": [], "model_classes": [ "LiltForQuestionAnswering" ], "sha": "0a348441999e98ec003b29fc4d5a67ad22ee6ca2" }, "LiltForSequenceClassification": { "tokenizer_classes": [ "LayoutLMv3Tokenizer", "LayoutLMv3TokenizerFast" ], "processor_classes": [], "model_classes": [ "LiltForSequenceClassification" ], "sha": "c53ab0ba33536fe564a4a1e4f1674d990c01b83a" }, "LiltForTokenClassification": { "tokenizer_classes": [ "LayoutLMv3Tokenizer", "LayoutLMv3TokenizerFast" ], "processor_classes": [], "model_classes": [ "LiltForTokenClassification" ], "sha": "14f85076f9b3f7016917e324d51ebd22511a2ae5" }, "LiltModel": { "tokenizer_classes": [ "LayoutLMv3Tokenizer", "LayoutLMv3TokenizerFast" ], "processor_classes": [], "model_classes": [ "LiltModel" ], "sha": "3f1166cc14c532388df7e82336a8e575a813bd3f" }, "LongT5ForConditionalGeneration": { "tokenizer_classes": [ "T5Tokenizer", "T5TokenizerFast" ], "processor_classes": [], "model_classes": [ "LongT5ForConditionalGeneration" ], "sha": "c685cbbe706ad5c9a28689631765726a1874dcc7" }, "LongT5Model": { "tokenizer_classes": [ "T5Tokenizer", "T5TokenizerFast" ], "processor_classes": [], "model_classes": [ "LongT5Model" ], "sha": "6b468e55e2490565e6155690201086ac00c72062" }, "LongformerForMaskedLM": { "tokenizer_classes": [ "LongformerTokenizer", "LongformerTokenizerFast" ], "processor_classes": [], "model_classes": [ "LongformerForMaskedLM", "TFLongformerForMaskedLM" ], "sha": "929d3bda9a1485d9bae41f9dbfc1d149c1c4e78e" }, "LongformerForMultipleChoice": { "tokenizer_classes": [ "LongformerTokenizer", "LongformerTokenizerFast" ], "processor_classes": [], "model_classes": [ "LongformerForMultipleChoice", "TFLongformerForMultipleChoice" ], "sha": "60b1ecac6b9385ce18c7e6978ab161cce8e7f9d4" }, "LongformerForQuestionAnswering": { "tokenizer_classes": [ "LongformerTokenizer", "LongformerTokenizerFast" ], "processor_classes": [], "model_classes": [ "LongformerForQuestionAnswering", "TFLongformerForQuestionAnswering" ], "sha": "be45ab1321b703f2200cbbcae560aaf2e2afef88" }, "LongformerForSequenceClassification": { "tokenizer_classes": [ "LongformerTokenizer", "LongformerTokenizerFast" ], "processor_classes": [], "model_classes": [ "LongformerForSequenceClassification", "TFLongformerForSequenceClassification" ], "sha": "8bc0de0b0f740bf397eb2770ec3ce3a24f3d7af9" }, "LongformerForTokenClassification": { "tokenizer_classes": [ "LongformerTokenizer", "LongformerTokenizerFast" ], "processor_classes": [], "model_classes": [ "LongformerForTokenClassification", "TFLongformerForTokenClassification" ], "sha": "efa33a9b6f47f0f7979af08ae8d04a5a7363a14b" }, "LongformerModel": { "tokenizer_classes": [ "LongformerTokenizer", "LongformerTokenizerFast" ], "processor_classes": [], "model_classes": [ "LongformerModel", "TFLongformerModel" ], "sha": "b023d531688e8655fc09300ac36742588efb3240" }, "LukeForMaskedLM": { "tokenizer_classes": [ "LukeTokenizer" ], "processor_classes": [], "model_classes": [ "LukeForMaskedLM" ], "sha": "954cf6cd2bf1f298a3956b10c36656c57387506d" }, "LukeForMultipleChoice": { "tokenizer_classes": [ "LukeTokenizer" ], "processor_classes": [], "model_classes": [ "LukeForMultipleChoice" ], "sha": "d1310a9174ad50d60b30ad6049e165deb2539034" }, "LukeForQuestionAnswering": { "tokenizer_classes": [ "LukeTokenizer" ], "processor_classes": [], "model_classes": [ "LukeForQuestionAnswering" ], "sha": "3ea38da4e32cb4e45bea82b2e81a8639aeba2c35" }, "LukeForSequenceClassification": { "tokenizer_classes": [ "LukeTokenizer" ], "processor_classes": [], "model_classes": [ "LukeForSequenceClassification" ], "sha": "b5b11248aeb4f5976379d15a977aeb2677e0c0f9" }, "LukeForTokenClassification": { "tokenizer_classes": [ "LukeTokenizer" ], "processor_classes": [], "model_classes": [ "LukeForTokenClassification" ], "sha": "8aab1a33ad26a344a6f4dfd68630e9661e174471" }, "LukeModel": { "tokenizer_classes": [ "LukeTokenizer" ], "processor_classes": [], "model_classes": [ "LukeModel" ], "sha": "ae23a674e7297d41f33c9af86e039757dfd2d531" }, "LxmertForPreTraining": { "tokenizer_classes": [ "LxmertTokenizer", "LxmertTokenizerFast" ], "processor_classes": [], "model_classes": [ "LxmertForPreTraining", "TFLxmertForPreTraining" ], "sha": "7b0843403c187aef00f20d5087086468d9613d2c" }, "LxmertForQuestionAnswering": { "tokenizer_classes": [ "LxmertTokenizer", "LxmertTokenizerFast" ], "processor_classes": [], "model_classes": [ "LxmertForQuestionAnswering" ], "sha": "27a74bd2cd156e46656c43ceb432c4deda0df5c1" }, "LxmertModel": { "tokenizer_classes": [ "LxmertTokenizer", "LxmertTokenizerFast" ], "processor_classes": [], "model_classes": [ "LxmertModel", "TFLxmertModel" ], "sha": "97612a0d6b14406ea9bfd7672e6974e0961cbef1" }, "M2M100ForConditionalGeneration": { "tokenizer_classes": [ "M2M100Tokenizer" ], "processor_classes": [], "model_classes": [ "M2M100ForConditionalGeneration" ], "sha": "32ac347092d51f658b41ffc111b67d49acdeab46" }, "M2M100Model": { "tokenizer_classes": [ "M2M100Tokenizer" ], "processor_classes": [], "model_classes": [ "M2M100Model" ], "sha": "e95c2ae168c7ba19f8114def40e1b1edd953b2f5" }, "MBartForCausalLM": { "tokenizer_classes": [ "MBartTokenizer", "MBartTokenizerFast" ], "processor_classes": [], "model_classes": [ "MBartForCausalLM" ], "sha": "a45044f8056328d20a764356eca3d0746a7a195e" }, "MBartForConditionalGeneration": { "tokenizer_classes": [ "MBartTokenizer", "MBartTokenizerFast" ], "processor_classes": [], "model_classes": [ "MBartForConditionalGeneration", "TFMBartForConditionalGeneration" ], "sha": "171e918962d6c0ee56c6b070858e19e16c8dd09f" }, "MBartForQuestionAnswering": { "tokenizer_classes": [ "MBartTokenizer", "MBartTokenizerFast" ], "processor_classes": [], "model_classes": [ "MBartForQuestionAnswering" ], "sha": "1ee08565d24777335595e0d2940e454abdcff731" }, "MBartForSequenceClassification": { "tokenizer_classes": [ "MBartTokenizer", "MBartTokenizerFast" ], "processor_classes": [], "model_classes": [ "MBartForSequenceClassification" ], "sha": "53e9c88ecfa2475d27afe099ffa7a8bcdb7ef7e4" }, "MBartModel": { "tokenizer_classes": [ "MBartTokenizer", "MBartTokenizerFast" ], "processor_classes": [], "model_classes": [ "MBartModel", "TFMBartModel" ], "sha": "2d492b34d69dd63b411990d5c8bb692fd637e91c" }, "MCTCTForCTC": { "tokenizer_classes": [], "processor_classes": [ "MCTCTFeatureExtractor" ], "model_classes": [ "MCTCTForCTC" ], "sha": "895a3d74f87b344b1f0a71eae4f085941d51b5cf" }, "MCTCTModel": { "tokenizer_classes": [], "processor_classes": [ "MCTCTFeatureExtractor" ], "model_classes": [ "MCTCTModel" ], "sha": "ce73d5c2b6fe163de778697d7b0543bf00d7ffa8" }, "MPNetForMaskedLM": { "tokenizer_classes": [ "MPNetTokenizer", "MPNetTokenizerFast" ], "processor_classes": [], "model_classes": [ "MPNetForMaskedLM", "TFMPNetForMaskedLM" ], "sha": "50af96e7d0202aef86e396c136e4c4fde8afe183" }, "MPNetForMultipleChoice": { "tokenizer_classes": [ "MPNetTokenizer", "MPNetTokenizerFast" ], "processor_classes": [], "model_classes": [ "MPNetForMultipleChoice", "TFMPNetForMultipleChoice" ], "sha": "af4ff8bf296a3a51f5ab6cd9f56741e4c732487c" }, "MPNetForQuestionAnswering": { "tokenizer_classes": [ "MPNetTokenizer", "MPNetTokenizerFast" ], "processor_classes": [], "model_classes": [ "MPNetForQuestionAnswering", "TFMPNetForQuestionAnswering" ], "sha": "3e1a25c0d3243f78f81580c312ada3b39c06b428" }, "MPNetForSequenceClassification": { "tokenizer_classes": [ "MPNetTokenizer", "MPNetTokenizerFast" ], "processor_classes": [], "model_classes": [ "MPNetForSequenceClassification", "TFMPNetForSequenceClassification" ], "sha": "43da45c0a0d73c5a5567b4c7ec512ec5023e52dd" }, "MPNetForTokenClassification": { "tokenizer_classes": [ "MPNetTokenizer", "MPNetTokenizerFast" ], "processor_classes": [], "model_classes": [ "MPNetForTokenClassification", "TFMPNetForTokenClassification" ], "sha": "4e825eff24df533321ebab823eb66ce67e4ab3d9" }, "MPNetModel": { "tokenizer_classes": [ "MPNetTokenizer", "MPNetTokenizerFast" ], "processor_classes": [], "model_classes": [ "MPNetModel", "TFMPNetModel" ], "sha": "847c68344c2922e9a71fa8835b87a0f6f72b9f47" }, "MarianForCausalLM": { "tokenizer_classes": [ "MarianTokenizer" ], "processor_classes": [], "model_classes": [], "sha": "5fb205e6db8e18e3c6cdd4e4709be292ba4599f3" }, "MarianMTModel": { "tokenizer_classes": [ "MarianTokenizer" ], "processor_classes": [], "model_classes": [ "MarianMTModel", "TFMarianMTModel" ], "sha": "0405f542b31561592231a86e3009d05256cbf49f" }, "MarianModel": { "tokenizer_classes": [ "MarianTokenizer" ], "processor_classes": [], "model_classes": [ "MarianModel", "TFMarianModel" ], "sha": "3649748c0286c6d5179a7013a716f7314db182a8" }, "MarkupLMForQuestionAnswering": { "tokenizer_classes": [ "MarkupLMTokenizer", "MarkupLMTokenizerFast" ], "processor_classes": [ "MarkupLMFeatureExtractor" ], "model_classes": [ "MarkupLMForQuestionAnswering" ], "sha": "c8bb9f93591d980362547b0bdca9f23ace2f383e" }, "MarkupLMForSequenceClassification": { "tokenizer_classes": [ "MarkupLMTokenizer", "MarkupLMTokenizerFast" ], "processor_classes": [ "MarkupLMFeatureExtractor" ], "model_classes": [ "MarkupLMForSequenceClassification" ], "sha": "c2cb7245d68d76e0a5f993fc8a3de099ecebc68b" }, "MarkupLMForTokenClassification": { "tokenizer_classes": [ "MarkupLMTokenizer", "MarkupLMTokenizerFast" ], "processor_classes": [ "MarkupLMFeatureExtractor" ], "model_classes": [ "MarkupLMForTokenClassification" ], "sha": "b9f924e82f400de0b34b46ee4ba276d686bd4890" }, "MarkupLMModel": { "tokenizer_classes": [ "MarkupLMTokenizer", "MarkupLMTokenizerFast" ], "processor_classes": [ "MarkupLMFeatureExtractor" ], "model_classes": [ "MarkupLMModel" ], "sha": "9687ba29f1c59d978e3d4b0fa702031f88eff53b" }, "Mask2FormerForUniversalSegmentation": { "tokenizer_classes": [], "processor_classes": [ "Mask2FormerImageProcessor" ], "model_classes": [ "Mask2FormerForUniversalSegmentation" ], "sha": "6429a7349527c9ef140ae691b83c47702cce1bc0" }, "Mask2FormerModel": { "tokenizer_classes": [], "processor_classes": [ "Mask2FormerImageProcessor" ], "model_classes": [ "Mask2FormerModel" ], "sha": "9bee8709204024b3669d503cdfe8890182f2a075" }, "MaskFormerForInstanceSegmentation": { "tokenizer_classes": [], "processor_classes": [ "MaskFormerImageProcessor" ], "model_classes": [ "MaskFormerForInstanceSegmentation" ], "sha": "f844aaa81f55cb199c115f1bf95c217a70685570" }, "MaskFormerModel": { "tokenizer_classes": [], "processor_classes": [ "MaskFormerImageProcessor" ], "model_classes": [ "MaskFormerModel" ], "sha": "473b54a464bc0ccee29bc23b4f6610f32eec05af" }, "MegaForCausalLM": { "tokenizer_classes": [ "RobertaTokenizer", "RobertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "MegaForCausalLM" ], "sha": "6642b9da860f8b62abcfb0660feabcebf6698418" }, "MegaForMaskedLM": { "tokenizer_classes": [ "RobertaTokenizer", "RobertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "MegaForMaskedLM" ], "sha": "6b2d47ba03bec9e6f7eefdd4a67351fa191aae6f" }, "MegaForMultipleChoice": { "tokenizer_classes": [ "RobertaTokenizer", "RobertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "MegaForMultipleChoice" ], "sha": "2b1e751da36a4410473eef07a62b09227a26d504" }, "MegaForQuestionAnswering": { "tokenizer_classes": [ "RobertaTokenizer", "RobertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "MegaForQuestionAnswering" ], "sha": "612acd9a53c351c42514adb3c04f2057d2870be7" }, "MegaForSequenceClassification": { "tokenizer_classes": [ "RobertaTokenizer", "RobertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "MegaForSequenceClassification" ], "sha": "4871572da1613b7e9cfd3640c6d1129af004eefb" }, "MegaForTokenClassification": { "tokenizer_classes": [ "RobertaTokenizer", "RobertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "MegaForTokenClassification" ], "sha": "450d3722c3b995215d06b9c12544c99f958581c7" }, "MegaModel": { "tokenizer_classes": [ "RobertaTokenizer", "RobertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "MegaModel" ], "sha": "ca0862db27428893fe22f9bb5d2eb0875c2156f3" }, "MegatronBertForCausalLM": { "tokenizer_classes": [ "BertTokenizer", "BertTokenizerFast" ], "processor_classes": [], "model_classes": [ "MegatronBertForCausalLM" ], "sha": "ff08d05ef8f98fdccf1f01560ec6ec4adbc8a3e3" }, "MegatronBertForMaskedLM": { "tokenizer_classes": [ "BertTokenizer", "BertTokenizerFast" ], "processor_classes": [], "model_classes": [ "MegatronBertForMaskedLM" ], "sha": "2ed25e2681d26b51b404ef1347a385c5f2c86a9a" }, "MegatronBertForMultipleChoice": { "tokenizer_classes": [ "BertTokenizer", "BertTokenizerFast" ], "processor_classes": [], "model_classes": [ "MegatronBertForMultipleChoice" ], "sha": "1485af4b75f8f234d2b4b5aea50ab2ec55223a15" }, "MegatronBertForNextSentencePrediction": { "tokenizer_classes": [ "BertTokenizer", "BertTokenizerFast" ], "processor_classes": [], "model_classes": [ "MegatronBertForNextSentencePrediction" ], "sha": "52bc9ee1d5145344f66b088ed278f07ed3d90584" }, "MegatronBertForPreTraining": { "tokenizer_classes": [ "BertTokenizer", "BertTokenizerFast" ], "processor_classes": [], "model_classes": [ "MegatronBertForPreTraining" ], "sha": "e580d0efd54e1c92789e39b32929234e36ee427f" }, "MegatronBertForQuestionAnswering": { "tokenizer_classes": [ "BertTokenizer", "BertTokenizerFast" ], "processor_classes": [], "model_classes": [ "MegatronBertForQuestionAnswering" ], "sha": "7342ba042a3c30c15382d00fcb0521533fc43841" }, "MegatronBertForSequenceClassification": { "tokenizer_classes": [ "BertTokenizer", "BertTokenizerFast" ], "processor_classes": [], "model_classes": [ "MegatronBertForSequenceClassification" ], "sha": "6a7cd480511d817a1e221c8f7558c55a93baed1b" }, "MegatronBertForTokenClassification": { "tokenizer_classes": [ "BertTokenizer", "BertTokenizerFast" ], "processor_classes": [], "model_classes": [ "MegatronBertForTokenClassification" ], "sha": "8b5334b6ec5f025293ca861de474b57ca84bc005" }, "MegatronBertModel": { "tokenizer_classes": [ "BertTokenizer", "BertTokenizerFast" ], "processor_classes": [], "model_classes": [ "MegatronBertModel" ], "sha": "f2457fbe535ba97ea13db049f53618b42e13f047" }, "MgpstrForSceneTextRecognition": { "tokenizer_classes": [], "processor_classes": [ "MgpstrProcessor" ], "model_classes": [ "MgpstrForSceneTextRecognition" ], "sha": "f197d5bfa1fe27b5f28a6e6d4e3ad229b753450a" }, "MistralForCausalLM": { "tokenizer_classes": [ "LlamaTokenizer", "LlamaTokenizerFast" ], "processor_classes": [], "model_classes": [ "MistralForCausalLM" ], "sha": "f7e06aeedbba8f4f665b438b868ed932d451f64b" }, "MistralForSequenceClassification": { "tokenizer_classes": [ "LlamaTokenizer", "LlamaTokenizerFast" ], "processor_classes": [], "model_classes": [ "MistralForSequenceClassification" ], "sha": "65045444ea1933309270d8b08b21d3fa94a84290" }, "MistralModel": { "tokenizer_classes": [ "LlamaTokenizer", "LlamaTokenizerFast" ], "processor_classes": [], "model_classes": [ "MistralModel" ], "sha": "becd727ad72b1e8a7c0fa0ea39b61904fa68aeac" }, "MobileBertForMaskedLM": { "tokenizer_classes": [ "MobileBertTokenizer", "MobileBertTokenizerFast" ], "processor_classes": [], "model_classes": [ "MobileBertForMaskedLM", "TFMobileBertForMaskedLM" ], "sha": "d689e737d73ad23aed3aabd3177591fc827d1c62" }, "MobileBertForMultipleChoice": { "tokenizer_classes": [ "MobileBertTokenizer", "MobileBertTokenizerFast" ], "processor_classes": [], "model_classes": [ "MobileBertForMultipleChoice", "TFMobileBertForMultipleChoice" ], "sha": "403d1f88be7eb0c769ff3a8e57eab21cc3e75afb" }, "MobileBertForNextSentencePrediction": { "tokenizer_classes": [ "MobileBertTokenizer", "MobileBertTokenizerFast" ], "processor_classes": [], "model_classes": [ "MobileBertForNextSentencePrediction", "TFMobileBertForNextSentencePrediction" ], "sha": "b4d8836a0f259ee3bca9f230093836c9117c5e4d" }, "MobileBertForPreTraining": { "tokenizer_classes": [ "MobileBertTokenizer", "MobileBertTokenizerFast" ], "processor_classes": [], "model_classes": [ "MobileBertForPreTraining", "TFMobileBertForPreTraining" ], "sha": "fbaa13ea6f9fcebb9fde620dd009d12510440d17" }, "MobileBertForQuestionAnswering": { "tokenizer_classes": [ "MobileBertTokenizer", "MobileBertTokenizerFast" ], "processor_classes": [], "model_classes": [ "MobileBertForQuestionAnswering", "TFMobileBertForQuestionAnswering" ], "sha": "ba6a55cf2daec55bfb220c9bab0bc4ad96510087" }, "MobileBertForSequenceClassification": { "tokenizer_classes": [ "MobileBertTokenizer", "MobileBertTokenizerFast" ], "processor_classes": [], "model_classes": [ "MobileBertForSequenceClassification", "TFMobileBertForSequenceClassification" ], "sha": "17ab35603bec351457e035eef2d0426538071f72" }, "MobileBertForTokenClassification": { "tokenizer_classes": [ "MobileBertTokenizer", "MobileBertTokenizerFast" ], "processor_classes": [], "model_classes": [ "MobileBertForTokenClassification", "TFMobileBertForTokenClassification" ], "sha": "dee83e820e6c4f069886a5d1875bf6775897313e" }, "MobileBertModel": { "tokenizer_classes": [ "MobileBertTokenizer", "MobileBertTokenizerFast" ], "processor_classes": [], "model_classes": [ "MobileBertModel", "TFMobileBertModel" ], "sha": "09b2db33ea798a762eeaf7e727e95f9ea8a6d14f" }, "MobileNetV1ForImageClassification": { "tokenizer_classes": [], "processor_classes": [ "MobileNetV1ImageProcessor" ], "model_classes": [ "MobileNetV1ForImageClassification" ], "sha": "55023dbd0935f147bf1bccf960cea01ca07e0f0c" }, "MobileNetV1Model": { "tokenizer_classes": [], "processor_classes": [ "MobileNetV1ImageProcessor" ], "model_classes": [ "MobileNetV1Model" ], "sha": "178bd24528147a028938d6ee5c7e65c969ea37b0" }, "MobileNetV2ForImageClassification": { "tokenizer_classes": [], "processor_classes": [ "MobileNetV2ImageProcessor" ], "model_classes": [ "MobileNetV2ForImageClassification" ], "sha": "ff907f740cf9ea91bc3cdf403a94ae28fbb2548a" }, "MobileNetV2ForSemanticSegmentation": { "tokenizer_classes": [], "processor_classes": [ "MobileNetV2ImageProcessor" ], "model_classes": [ "MobileNetV2ForSemanticSegmentation" ], "sha": "48adbc340e42882f52b54d4f5dd045e16e9ef2d6" }, "MobileNetV2Model": { "tokenizer_classes": [], "processor_classes": [ "MobileNetV2ImageProcessor" ], "model_classes": [ "MobileNetV2Model" ], "sha": "e876885828825472a80ef1796d89d60b901813ba" }, "MobileViTForImageClassification": { "tokenizer_classes": [], "processor_classes": [ "MobileViTImageProcessor" ], "model_classes": [ "MobileViTForImageClassification", "TFMobileViTForImageClassification" ], "sha": "7d0b31864f856e00f9e34e8c6781dcc7a8cdaf1e" }, "MobileViTForSemanticSegmentation": { "tokenizer_classes": [], "processor_classes": [ "MobileViTImageProcessor" ], "model_classes": [ "MobileViTForSemanticSegmentation", "TFMobileViTForSemanticSegmentation" ], "sha": "215f727caa3c3fc94fa4df486aa706e5d99d4194" }, "MobileViTModel": { "tokenizer_classes": [], "processor_classes": [ "MobileViTImageProcessor" ], "model_classes": [ "MobileViTModel", "TFMobileViTModel" ], "sha": "b3a1452e7cb44b600b21ee14f3d5382366855a46" }, "MobileViTV2ForImageClassification": { "tokenizer_classes": [], "processor_classes": [ "MobileViTImageProcessor" ], "model_classes": [ "MobileViTV2ForImageClassification" ], "sha": "25752b0967ad594341d1b685401450d7f698433c" }, "MobileViTV2ForSemanticSegmentation": { "tokenizer_classes": [], "processor_classes": [ "MobileViTImageProcessor" ], "model_classes": [ "MobileViTV2ForSemanticSegmentation" ], "sha": "13b953f50be33219d55a12f1098be38b88000897" }, "MobileViTV2Model": { "tokenizer_classes": [], "processor_classes": [ "MobileViTImageProcessor" ], "model_classes": [ "MobileViTV2Model" ], "sha": "2f46357659db2d6d54d870e28073deeea1c8cb64" }, "MptForCausalLM": { "tokenizer_classes": [ "GPTNeoXTokenizerFast" ], "processor_classes": [], "model_classes": [ "MptForCausalLM" ], "sha": "500c869b956c65f6b1a7b4867727f124c6f5728a" }, "MptForQuestionAnswering": { "tokenizer_classes": [ "GPTNeoXTokenizerFast" ], "processor_classes": [], "model_classes": [ "MptForQuestionAnswering" ], "sha": "6ee46572bf61eb5e7dbbdaf00b73c4d37efc42d9" }, "MptForSequenceClassification": { "tokenizer_classes": [ "GPTNeoXTokenizerFast" ], "processor_classes": [], "model_classes": [ "MptForSequenceClassification" ], "sha": "f0b9153413b5dfceeb96b67d4b0f22c94bbaf64a" }, "MptForTokenClassification": { "tokenizer_classes": [ "GPTNeoXTokenizerFast" ], "processor_classes": [], "model_classes": [ "MptForTokenClassification" ], "sha": "3f7c3ccd67cd0b2aae56d37613429a64ef813246" }, "MptModel": { "tokenizer_classes": [ "GPTNeoXTokenizerFast" ], "processor_classes": [], "model_classes": [ "MptModel" ], "sha": "ea747f234556661b0c8b84a626f267066ce586bf" }, "MraForMaskedLM": { "tokenizer_classes": [ "RobertaTokenizer", "RobertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "MraForMaskedLM" ], "sha": "c00ee46cfd2b8fed29cc37f0a4ead40ad51a439c" }, "MraForMultipleChoice": { "tokenizer_classes": [ "RobertaTokenizer", "RobertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "MraForMultipleChoice" ], "sha": "f397469ba8109f64dab2d75335ea7bf0c2dbeb74" }, "MraForQuestionAnswering": { "tokenizer_classes": [ "RobertaTokenizer", "RobertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "MraForQuestionAnswering" ], "sha": "c2ed75acd20e5440a76d6504d9a3ebc2513011f0" }, "MraForSequenceClassification": { "tokenizer_classes": [ "RobertaTokenizer", "RobertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "MraForSequenceClassification" ], "sha": "f47672d3708508bda7774215bee44a92ec16ab2f" }, "MraForTokenClassification": { "tokenizer_classes": [ "RobertaTokenizer", "RobertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "MraForTokenClassification" ], "sha": "f0961ab5818bca473607fb94b391c186dc1d3492" }, "MraModel": { "tokenizer_classes": [ "RobertaTokenizer", "RobertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "MraModel" ], "sha": "315f34f30bcc4b0b66b11987726df2a80c50e271" }, "MusicgenForCausalLM": { "tokenizer_classes": [ "T5TokenizerFast" ], "processor_classes": [], "model_classes": [], "sha": "f67d387eaaa7c71ddf88af95eda4bf14ace08d49" }, "MusicgenForConditionalGeneration": { "tokenizer_classes": [ "T5TokenizerFast" ], "processor_classes": [], "model_classes": [ "MusicgenForConditionalGeneration" ], "sha": "16102cdf580e70cf0b4e0e2cda5bc75b934da92c" }, "MvpForCausalLM": { "tokenizer_classes": [ "MvpTokenizer", "MvpTokenizerFast" ], "processor_classes": [], "model_classes": [ "MvpForCausalLM" ], "sha": "105e5f2c8a0f20d404cb71795539cda5dd49716d" }, "MvpForConditionalGeneration": { "tokenizer_classes": [ "MvpTokenizer", "MvpTokenizerFast" ], "processor_classes": [], "model_classes": [ "MvpForConditionalGeneration" ], "sha": "b0b706f14b2f8aae288cba30ae0064e0be7e888b" }, "MvpForQuestionAnswering": { "tokenizer_classes": [ "MvpTokenizer", "MvpTokenizerFast" ], "processor_classes": [], "model_classes": [ "MvpForQuestionAnswering" ], "sha": "82f152b36a40a4c22edcb146e6eaec636d84fa2d" }, "MvpForSequenceClassification": { "tokenizer_classes": [ "MvpTokenizer", "MvpTokenizerFast" ], "processor_classes": [], "model_classes": [ "MvpForSequenceClassification" ], "sha": "506b68544d064001929ee9e6db3752e62972a6aa" }, "MvpModel": { "tokenizer_classes": [ "MvpTokenizer", "MvpTokenizerFast" ], "processor_classes": [], "model_classes": [ "MvpModel" ], "sha": "3f4653184721a2bc029b27706d335ef7ddd219d5" }, "NatBackbone": { "tokenizer_classes": [], "processor_classes": [ "ViTImageProcessor" ], "model_classes": [ "NatBackbone" ], "sha": "d5cc5eccba4da609c82e9f5c649301b9f9fee9fb" }, "NatForImageClassification": { "tokenizer_classes": [], "processor_classes": [ "ViTImageProcessor" ], "model_classes": [ "NatForImageClassification" ], "sha": "2ff4c9e73c49c392c02a467e87b5511fd924242a" }, "NatModel": { "tokenizer_classes": [], "processor_classes": [ "ViTImageProcessor" ], "model_classes": [ "NatModel" ], "sha": "75e9756bb94d0ccdce98a8e963eeecbc66f9d573" }, "NezhaForMaskedLM": { "tokenizer_classes": [ "BertTokenizer", "BertTokenizerFast" ], "processor_classes": [], "model_classes": [ "NezhaForMaskedLM" ], "sha": "5991cca4b78f0ed7299259a71f3eeed3f3452b72" }, "NezhaForMultipleChoice": { "tokenizer_classes": [ "BertTokenizer", "BertTokenizerFast" ], "processor_classes": [], "model_classes": [ "NezhaForMultipleChoice" ], "sha": "0f6e9ec791d85ad4503acdec50b3a120f984016b" }, "NezhaForNextSentencePrediction": { "tokenizer_classes": [ "BertTokenizer", "BertTokenizerFast" ], "processor_classes": [], "model_classes": [ "NezhaForNextSentencePrediction" ], "sha": "9a34316c14ec8ecc98ff08e46760915c80098a57" }, "NezhaForPreTraining": { "tokenizer_classes": [ "BertTokenizer", "BertTokenizerFast" ], "processor_classes": [], "model_classes": [ "NezhaForPreTraining" ], "sha": "6259db427a0073061de352ea819d38a74798edd7" }, "NezhaForQuestionAnswering": { "tokenizer_classes": [ "BertTokenizer", "BertTokenizerFast" ], "processor_classes": [], "model_classes": [ "NezhaForQuestionAnswering" ], "sha": "31c6a34e85ae8c41294e0f4ef25044e00e511c4d" }, "NezhaForSequenceClassification": { "tokenizer_classes": [ "BertTokenizer", "BertTokenizerFast" ], "processor_classes": [], "model_classes": [ "NezhaForSequenceClassification" ], "sha": "db057c308ba2e05f223404de11e1816ce4bd62a9" }, "NezhaForTokenClassification": { "tokenizer_classes": [ "BertTokenizer", "BertTokenizerFast" ], "processor_classes": [], "model_classes": [ "NezhaForTokenClassification" ], "sha": "235f4e10b4a59709650c2bece3e342ec153d9cfc" }, "NezhaModel": { "tokenizer_classes": [ "BertTokenizer", "BertTokenizerFast" ], "processor_classes": [], "model_classes": [ "NezhaModel" ], "sha": "80e05ba7c55bcdd7f4d1387ef9a09a7a8e95b5ac" }, "NllbMoeForConditionalGeneration": { "tokenizer_classes": [ "NllbTokenizer", "NllbTokenizerFast" ], "processor_classes": [], "model_classes": [ "NllbMoeForConditionalGeneration" ], "sha": "2a7f87dffe826af3d52086888f3f3773246e5528" }, "NllbMoeModel": { "tokenizer_classes": [ "NllbTokenizer", "NllbTokenizerFast" ], "processor_classes": [], "model_classes": [ "NllbMoeModel" ], "sha": "9f7a2261eed4658e1aa5623be4672ba64bee7da5" }, "NystromformerForMaskedLM": { "tokenizer_classes": [ "AlbertTokenizer", "AlbertTokenizerFast" ], "processor_classes": [], "model_classes": [ "NystromformerForMaskedLM" ], "sha": "37036847783f1e65e81ecd43803270a1ecb276f3" }, "NystromformerForMultipleChoice": { "tokenizer_classes": [ "AlbertTokenizer", "AlbertTokenizerFast" ], "processor_classes": [], "model_classes": [ "NystromformerForMultipleChoice" ], "sha": "42a077d5ab6830e20560466eaccc525eff10c3ae" }, "NystromformerForQuestionAnswering": { "tokenizer_classes": [ "AlbertTokenizer", "AlbertTokenizerFast" ], "processor_classes": [], "model_classes": [ "NystromformerForQuestionAnswering" ], "sha": "1cfaf79051731824db4f09989f093f87f4fceec5" }, "NystromformerForSequenceClassification": { "tokenizer_classes": [ "AlbertTokenizer", "AlbertTokenizerFast" ], "processor_classes": [], "model_classes": [ "NystromformerForSequenceClassification" ], "sha": "d75231203066df41e9b6b25dbee9ad40e8515c18" }, "NystromformerForTokenClassification": { "tokenizer_classes": [ "AlbertTokenizer", "AlbertTokenizerFast" ], "processor_classes": [], "model_classes": [ "NystromformerForTokenClassification" ], "sha": "5a499dc96e106bf41fc9166f2ad06527ec7ca14e" }, "NystromformerModel": { "tokenizer_classes": [ "AlbertTokenizer", "AlbertTokenizerFast" ], "processor_classes": [], "model_classes": [ "NystromformerModel" ], "sha": "2b6adb37ec473b15d71e2eb459acea08df6940ce" }, "OPTForCausalLM": { "tokenizer_classes": [ "GPT2Tokenizer", "GPT2TokenizerFast" ], "processor_classes": [], "model_classes": [ "OPTForCausalLM", "TFOPTForCausalLM" ], "sha": "190d1f4fc0011d2eaeaa05282e0fbd2445e4b11f" }, "OPTForQuestionAnswering": { "tokenizer_classes": [ "GPT2Tokenizer", "GPT2TokenizerFast" ], "processor_classes": [], "model_classes": [ "OPTForQuestionAnswering" ], "sha": "0fa9277ce10dbc3d0922b354befb684a136af00b" }, "OPTForSequenceClassification": { "tokenizer_classes": [ "GPT2Tokenizer", "GPT2TokenizerFast" ], "processor_classes": [], "model_classes": [ "OPTForSequenceClassification" ], "sha": "784ab288ab7280b1853ee400ef10ee2a965df352" }, "OPTModel": { "tokenizer_classes": [ "GPT2Tokenizer", "GPT2TokenizerFast" ], "processor_classes": [], "model_classes": [ "OPTModel", "TFOPTModel" ], "sha": "901d92b8f51edb0ec9614cb185fb66a8b5d364c3" }, "OneFormerForUniversalSegmentation": { "tokenizer_classes": [ "CLIPTokenizer", "CLIPTokenizerFast" ], "processor_classes": [ "OneFormerImageProcessor" ], "model_classes": [ "OneFormerForUniversalSegmentation" ], "sha": "fee1cfd676acc40f09017702ddac6504f3090d14" }, "OneFormerModel": { "tokenizer_classes": [ "CLIPTokenizer", "CLIPTokenizerFast" ], "processor_classes": [ "OneFormerImageProcessor" ], "model_classes": [ "OneFormerModel" ], "sha": "4163a79328c78f93ec57942598698a138c19a577" }, "OpenAIGPTForSequenceClassification": { "tokenizer_classes": [ "OpenAIGPTTokenizer", "OpenAIGPTTokenizerFast" ], "processor_classes": [], "model_classes": [ "OpenAIGPTForSequenceClassification", "TFOpenAIGPTForSequenceClassification" ], "sha": "c513f7f952935085f7573bf70a1ac3ad8f33434c" }, "OpenAIGPTLMHeadModel": { "tokenizer_classes": [ "OpenAIGPTTokenizer", "OpenAIGPTTokenizerFast" ], "processor_classes": [], "model_classes": [ "OpenAIGPTLMHeadModel", "TFOpenAIGPTLMHeadModel" ], "sha": "33f59ecd860f7a998483ec7631fe32d257235461" }, "OpenAIGPTModel": { "tokenizer_classes": [ "OpenAIGPTTokenizer", "OpenAIGPTTokenizerFast" ], "processor_classes": [], "model_classes": [ "OpenAIGPTModel", "TFOpenAIGPTModel" ], "sha": "00f6ec0a3a5276af71d08a26199e0ccbf2556fc9" }, "OwlViTForObjectDetection": { "tokenizer_classes": [ "CLIPTokenizer", "CLIPTokenizerFast" ], "processor_classes": [ "OwlViTImageProcessor" ], "model_classes": [ "OwlViTForObjectDetection" ], "sha": "af958c9164f23d0f12921a8edf687f9aaa6af90e" }, "OwlViTModel": { "tokenizer_classes": [ "CLIPTokenizer", "CLIPTokenizerFast" ], "processor_classes": [ "OwlViTImageProcessor" ], "model_classes": [ "OwlViTModel" ], "sha": "f0e27b2b4e53ba70e05d13dcfea8e85272b292a5" }, "Owlv2ForObjectDetection": { "tokenizer_classes": [ "CLIPTokenizer", "CLIPTokenizerFast" ], "processor_classes": [ "Owlv2ImageProcessor" ], "model_classes": [ "Owlv2ForObjectDetection" ], "sha": "30439c0b2749726468dc13a755261e8101170052" }, "Owlv2Model": { "tokenizer_classes": [ "CLIPTokenizer", "CLIPTokenizerFast" ], "processor_classes": [ "Owlv2ImageProcessor" ], "model_classes": [ "Owlv2Model" ], "sha": "7aeebdad5f72b36cb07c74355afad8e6052e2377" }, "PLBartForCausalLM": { "tokenizer_classes": [ "PLBartTokenizer" ], "processor_classes": [], "model_classes": [ "PLBartForCausalLM" ], "sha": "6ee51133246dbdb18fc3681ebd62d21e421b9bb4" }, "PLBartForConditionalGeneration": { "tokenizer_classes": [ "PLBartTokenizer" ], "processor_classes": [], "model_classes": [ "PLBartForConditionalGeneration" ], "sha": "ba191d28f4678d20b4dfed5fca5944018282cf20" }, "PLBartForSequenceClassification": { "tokenizer_classes": [ "PLBartTokenizer" ], "processor_classes": [], "model_classes": [ "PLBartForSequenceClassification" ], "sha": "02063b3d9707fcff619a4e37a0d6e58f76e39b18" }, "PLBartModel": { "tokenizer_classes": [ "PLBartTokenizer" ], "processor_classes": [], "model_classes": [ "PLBartModel" ], "sha": "cfbba29169b3f40d800403fc1b53982e1f88c5f8" }, "PegasusForCausalLM": { "tokenizer_classes": [ "PegasusTokenizer", "PegasusTokenizerFast" ], "processor_classes": [], "model_classes": [ "PegasusForCausalLM" ], "sha": "6e685a698302a3ba33e5379d3a37eb0bc1ae2f70" }, "PegasusForConditionalGeneration": { "tokenizer_classes": [ "PegasusTokenizer", "PegasusTokenizerFast" ], "processor_classes": [], "model_classes": [ "PegasusForConditionalGeneration", "TFPegasusForConditionalGeneration" ], "sha": "15e58ee2ebc14b6e80ef2891259057ee5f049be2" }, "PegasusModel": { "tokenizer_classes": [ "PegasusTokenizer", "PegasusTokenizerFast" ], "processor_classes": [], "model_classes": [ "PegasusModel", "TFPegasusModel" ], "sha": "fa36b24523db411ef77903453346b8be81ef73fe" }, "PegasusXForConditionalGeneration": { "tokenizer_classes": [ "PegasusTokenizer", "PegasusTokenizerFast" ], "processor_classes": [], "model_classes": [ "PegasusXForConditionalGeneration" ], "sha": "7588a8120f26a36c1687c14bdf1e9f9656891c1a" }, "PegasusXModel": { "tokenizer_classes": [ "PegasusTokenizer", "PegasusTokenizerFast" ], "processor_classes": [], "model_classes": [ "PegasusXModel" ], "sha": "a0bdff627416ac3c39c22d081f5d88d8b8fd99cc" }, "PerceiverForImageClassificationConvProcessing": { "tokenizer_classes": [ "PerceiverTokenizer" ], "processor_classes": [ "PerceiverImageProcessor" ], "model_classes": [ "PerceiverForImageClassificationConvProcessing" ], "sha": "2c1e5e62ebc9d0c931adc8c665fb05bde6c1c1f1" }, "PerceiverForImageClassificationFourier": { "tokenizer_classes": [ "PerceiverTokenizer" ], "processor_classes": [ "PerceiverImageProcessor" ], "model_classes": [ "PerceiverForImageClassificationFourier" ], "sha": "88da41b8851b76b8be0dacdb3de023db02bb031a" }, "PerceiverForImageClassificationLearned": { "tokenizer_classes": [ "PerceiverTokenizer" ], "processor_classes": [ "PerceiverImageProcessor" ], "model_classes": [ "PerceiverForImageClassificationLearned" ], "sha": "879bd1fa38d3baddb027bb2cacba2d160a741375" }, "PerceiverForMaskedLM": { "tokenizer_classes": [ "PerceiverTokenizer" ], "processor_classes": [ "PerceiverImageProcessor" ], "model_classes": [ "PerceiverForMaskedLM" ], "sha": "1d2459cbd281ef72da5682e65102aaca96183045" }, "PerceiverForSequenceClassification": { "tokenizer_classes": [ "PerceiverTokenizer" ], "processor_classes": [ "PerceiverImageProcessor" ], "model_classes": [ "PerceiverForSequenceClassification" ], "sha": "576f1f96348f0343458499fbf53d4102b5c0f2ff" }, "PerceiverModel": { "tokenizer_classes": [ "PerceiverTokenizer" ], "processor_classes": [ "PerceiverImageProcessor" ], "model_classes": [ "PerceiverModel" ], "sha": "83ec4d2d61ed62525ee033e13d144817beb29d19" }, "PersimmonForCausalLM": { "tokenizer_classes": [ "LlamaTokenizer", "LlamaTokenizerFast" ], "processor_classes": [], "model_classes": [ "PersimmonForCausalLM" ], "sha": "454234d6496c3857f5bf3eafb784616e2cd3ea82" }, "PersimmonForSequenceClassification": { "tokenizer_classes": [ "LlamaTokenizer", "LlamaTokenizerFast" ], "processor_classes": [], "model_classes": [ "PersimmonForSequenceClassification" ], "sha": "1d2674846543a181ca67bafa8b8f3a48bd2eefd1" }, "PersimmonModel": { "tokenizer_classes": [ "LlamaTokenizer", "LlamaTokenizerFast" ], "processor_classes": [], "model_classes": [ "PersimmonModel" ], "sha": "b8c8d479e29e9ee048e2d0b05b001ac835ad8859" }, "PhiForCausalLM": { "tokenizer_classes": [ "CodeGenTokenizer", "CodeGenTokenizerFast" ], "processor_classes": [], "model_classes": [ "PhiForCausalLM" ], "sha": "3fecc0109a4a3a230e3a5509eaf47a26eba85d79" }, "PhiForSequenceClassification": { "tokenizer_classes": [ "CodeGenTokenizer", "CodeGenTokenizerFast" ], "processor_classes": [], "model_classes": [ "PhiForSequenceClassification" ], "sha": "e1c9f8ebf1317516acc1cd6338de71a53e770245" }, "PhiForTokenClassification": { "tokenizer_classes": [ "CodeGenTokenizer", "CodeGenTokenizerFast" ], "processor_classes": [], "model_classes": [ "PhiForTokenClassification" ], "sha": "d3a8054903753b5c96c05eaf9877905a116a1d5e" }, "PhiModel": { "tokenizer_classes": [ "CodeGenTokenizer", "CodeGenTokenizerFast" ], "processor_classes": [], "model_classes": [ "PhiModel" ], "sha": "99c38d5ce7ace35127d00ed3eeb3561308ea6b21" }, "Pix2StructForConditionalGeneration": { "tokenizer_classes": [ "T5TokenizerFast" ], "processor_classes": [ "Pix2StructImageProcessor", "Pix2StructProcessor" ], "model_classes": [ "Pix2StructForConditionalGeneration" ], "sha": "42b3de00ad535076c4893e4ac5ae2d2748cc4ccb" }, "PoolFormerForImageClassification": { "tokenizer_classes": [], "processor_classes": [ "PoolFormerImageProcessor" ], "model_classes": [ "PoolFormerForImageClassification" ], "sha": "ef04de5a6896100d457fb9553dd9789c09cca98e" }, "PoolFormerModel": { "tokenizer_classes": [], "processor_classes": [ "PoolFormerImageProcessor" ], "model_classes": [ "PoolFormerModel" ], "sha": "e8037215ebdbf795329ef6525cdc6aa547f04ace" }, "ProphetNetForCausalLM": { "tokenizer_classes": [ "ProphetNetTokenizer" ], "processor_classes": [], "model_classes": [ "ProphetNetForCausalLM" ], "sha": "d40b1e75bbc5ea0839563457aff6eee5bc0bb03e" }, "ProphetNetForConditionalGeneration": { "tokenizer_classes": [ "ProphetNetTokenizer" ], "processor_classes": [], "model_classes": [ "ProphetNetForConditionalGeneration" ], "sha": "d842875c41278032af39c03c66902786bb5ff2c7" }, "ProphetNetModel": { "tokenizer_classes": [ "ProphetNetTokenizer" ], "processor_classes": [], "model_classes": [ "ProphetNetModel" ], "sha": "f1ddbbcc768c7ba54c4d75b319540c1635e65937" }, "PvtForImageClassification": { "tokenizer_classes": [], "processor_classes": [ "PvtImageProcessor" ], "model_classes": [ "PvtForImageClassification" ], "sha": "589b37bd6941aff6dd248259f9eee3c422a41fde" }, "PvtModel": { "tokenizer_classes": [], "processor_classes": [ "PvtImageProcessor" ], "model_classes": [ "PvtModel" ], "sha": "c40765c382515ae627652d60e9077b6478448d48" }, "ReformerForMaskedLM": { "tokenizer_classes": [ "ReformerTokenizer", "ReformerTokenizerFast" ], "processor_classes": [], "model_classes": [ "ReformerForMaskedLM" ], "sha": "1e6431e42c676b525e3215e9e3cc8f1404f9f82b" }, "ReformerForQuestionAnswering": { "tokenizer_classes": [ "ReformerTokenizer", "ReformerTokenizerFast" ], "processor_classes": [], "model_classes": [ "ReformerForQuestionAnswering" ], "sha": "62b43977f244474bd6982c6327d0c57310258fcd" }, "ReformerForSequenceClassification": { "tokenizer_classes": [ "ReformerTokenizer", "ReformerTokenizerFast" ], "processor_classes": [], "model_classes": [ "ReformerForSequenceClassification" ], "sha": "67bd534a990a7dcfa02406987e7f066caa2a30e8" }, "ReformerModel": { "tokenizer_classes": [ "ReformerTokenizer", "ReformerTokenizerFast" ], "processor_classes": [], "model_classes": [ "ReformerModel" ], "sha": "a34ddb1389067448e9bc1323de674951cfb4cff1" }, "ReformerModelWithLMHead": { "tokenizer_classes": [ "ReformerTokenizer", "ReformerTokenizerFast" ], "processor_classes": [], "model_classes": [], "sha": "e7a8addaea8407d4c55e144e48aee04be6cca618" }, "RegNetForImageClassification": { "tokenizer_classes": [], "processor_classes": [ "ConvNextImageProcessor" ], "model_classes": [ "RegNetForImageClassification", "TFRegNetForImageClassification" ], "sha": "5ec67c84fc7944c0c5b386bd26820bc4d1f3b32a" }, "RegNetModel": { "tokenizer_classes": [], "processor_classes": [ "ConvNextImageProcessor" ], "model_classes": [ "RegNetModel", "TFRegNetModel" ], "sha": "72375e1401dc8271d4abb6295c9cee376f7b8f1a" }, "RemBertForCausalLM": { "tokenizer_classes": [ "RemBertTokenizer", "RemBertTokenizerFast" ], "processor_classes": [], "model_classes": [ "RemBertForCausalLM", "TFRemBertForCausalLM" ], "sha": "8d9ae3d74a0e0a8958b4ee8c9dca3632abf52ef9" }, "RemBertForMaskedLM": { "tokenizer_classes": [ "RemBertTokenizer", "RemBertTokenizerFast" ], "processor_classes": [], "model_classes": [ "RemBertForMaskedLM", "TFRemBertForMaskedLM" ], "sha": "b7c27d01e1cc3bef9ddd6a78627d700b3bffd759" }, "RemBertForMultipleChoice": { "tokenizer_classes": [ "RemBertTokenizer", "RemBertTokenizerFast" ], "processor_classes": [], "model_classes": [ "RemBertForMultipleChoice", "TFRemBertForMultipleChoice" ], "sha": "2fe192677b9740cf24dd559339d46925e8ac23d4" }, "RemBertForQuestionAnswering": { "tokenizer_classes": [ "RemBertTokenizer", "RemBertTokenizerFast" ], "processor_classes": [], "model_classes": [ "RemBertForQuestionAnswering", "TFRemBertForQuestionAnswering" ], "sha": "22b8ba44681b96292a1cf7f6df4ba6bb7937ec6e" }, "RemBertForSequenceClassification": { "tokenizer_classes": [ "RemBertTokenizer", "RemBertTokenizerFast" ], "processor_classes": [], "model_classes": [ "RemBertForSequenceClassification", "TFRemBertForSequenceClassification" ], "sha": "20f3e89341ea15266d2685a8798142fba03c3f98" }, "RemBertForTokenClassification": { "tokenizer_classes": [ "RemBertTokenizer", "RemBertTokenizerFast" ], "processor_classes": [], "model_classes": [ "RemBertForTokenClassification", "TFRemBertForTokenClassification" ], "sha": "15712ff753708da3cf0550e76e73a5d0bba7784e" }, "RemBertModel": { "tokenizer_classes": [ "RemBertTokenizer", "RemBertTokenizerFast" ], "processor_classes": [], "model_classes": [ "RemBertModel", "TFRemBertModel" ], "sha": "59cc6d099b1ded0aaead8684457415b129f79e86" }, "ResNetBackbone": { "tokenizer_classes": [], "processor_classes": [ "ConvNextImageProcessor" ], "model_classes": [ "ResNetBackbone" ], "sha": "c84a6bcf8af4b6a3403dea3cf4c55965ac39f239" }, "ResNetForImageClassification": { "tokenizer_classes": [], "processor_classes": [ "ConvNextImageProcessor" ], "model_classes": [ "ResNetForImageClassification", "TFResNetForImageClassification" ], "sha": "34a180ad24d80811d420d7aa4fbec4a17751aaf8" }, "ResNetModel": { "tokenizer_classes": [], "processor_classes": [ "ConvNextImageProcessor" ], "model_classes": [ "ResNetModel", "TFResNetModel" ], "sha": "fafa6cdf9986c6cfbae360596b3574162430bcd3" }, "RoCBertForCausalLM": { "tokenizer_classes": [ "RoCBertTokenizer" ], "processor_classes": [], "model_classes": [ "RoCBertForCausalLM" ], "sha": "194d8dafc4f4142f8d31e6b4be14b55d812f923b" }, "RoCBertForMaskedLM": { "tokenizer_classes": [ "RoCBertTokenizer" ], "processor_classes": [], "model_classes": [ "RoCBertForMaskedLM" ], "sha": "8bc285f32f3b932dbd56ddf91b1170734d638eeb" }, "RoCBertForMultipleChoice": { "tokenizer_classes": [ "RoCBertTokenizer" ], "processor_classes": [], "model_classes": [ "RoCBertForMultipleChoice" ], "sha": "bb54e5ae021d728022d34b12fee3f087d9486af9" }, "RoCBertForPreTraining": { "tokenizer_classes": [ "RoCBertTokenizer" ], "processor_classes": [], "model_classes": [ "RoCBertForPreTraining" ], "sha": "86ebbd5b0bc84660ad7f505082eff19b86c137c8" }, "RoCBertForQuestionAnswering": { "tokenizer_classes": [ "RoCBertTokenizer" ], "processor_classes": [], "model_classes": [ "RoCBertForQuestionAnswering" ], "sha": "1bfc2dc3d6e76170e6dca1ff32a54a0887ff28a3" }, "RoCBertForSequenceClassification": { "tokenizer_classes": [ "RoCBertTokenizer" ], "processor_classes": [], "model_classes": [ "RoCBertForSequenceClassification" ], "sha": "c329038802241f454273894128fea38b60f7c739" }, "RoCBertForTokenClassification": { "tokenizer_classes": [ "RoCBertTokenizer" ], "processor_classes": [], "model_classes": [ "RoCBertForTokenClassification" ], "sha": "afe5ec22c2ad1d9ff6e3e64c87eb7555faaa936d" }, "RoCBertModel": { "tokenizer_classes": [ "RoCBertTokenizer" ], "processor_classes": [], "model_classes": [ "RoCBertModel" ], "sha": "29de5580d5f5d3461a88673e7b4c492a9d8a67a4" }, "RoFormerForCausalLM": { "tokenizer_classes": [ "RoFormerTokenizer", "RoFormerTokenizerFast" ], "processor_classes": [], "model_classes": [ "RoFormerForCausalLM", "TFRoFormerForCausalLM" ], "sha": "6e074219c6dd8f8b221bbfda64fba100f729f88d" }, "RoFormerForMaskedLM": { "tokenizer_classes": [ "RoFormerTokenizer", "RoFormerTokenizerFast" ], "processor_classes": [], "model_classes": [ "RoFormerForMaskedLM", "TFRoFormerForMaskedLM" ], "sha": "a3a4d05f9b29601553a77244f2adcf8194f9367c" }, "RoFormerForMultipleChoice": { "tokenizer_classes": [ "RoFormerTokenizer", "RoFormerTokenizerFast" ], "processor_classes": [], "model_classes": [ "RoFormerForMultipleChoice", "TFRoFormerForMultipleChoice" ], "sha": "aca3999a1d14f09644faed44e2cdfb28ed68a3d3" }, "RoFormerForQuestionAnswering": { "tokenizer_classes": [ "RoFormerTokenizer", "RoFormerTokenizerFast" ], "processor_classes": [], "model_classes": [ "RoFormerForQuestionAnswering", "TFRoFormerForQuestionAnswering" ], "sha": "b8a20b3a788f178b9ef64e2eb9587f693dca1b69" }, "RoFormerForSequenceClassification": { "tokenizer_classes": [ "RoFormerTokenizer", "RoFormerTokenizerFast" ], "processor_classes": [], "model_classes": [ "RoFormerForSequenceClassification", "TFRoFormerForSequenceClassification" ], "sha": "d092e2d5e62012bf4ec921e763b37865d6189216" }, "RoFormerForTokenClassification": { "tokenizer_classes": [ "RoFormerTokenizer", "RoFormerTokenizerFast" ], "processor_classes": [], "model_classes": [ "RoFormerForTokenClassification", "TFRoFormerForTokenClassification" ], "sha": "85d3a17062e1f3e0539abfe738a88203e25349b6" }, "RoFormerModel": { "tokenizer_classes": [ "RoFormerTokenizer", "RoFormerTokenizerFast" ], "processor_classes": [], "model_classes": [ "RoFormerModel", "TFRoFormerModel" ], "sha": "22e7df2f4cd66caf449f2342f63d176005afccc9" }, "RobertaForCausalLM": { "tokenizer_classes": [ "RobertaTokenizer", "RobertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "RobertaForCausalLM", "TFRobertaForCausalLM" ], "sha": "5d1d24d56f9735402e50a2ea513ffde44487733e" }, "RobertaForMaskedLM": { "tokenizer_classes": [ "RobertaTokenizer", "RobertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "RobertaForMaskedLM", "TFRobertaForMaskedLM" ], "sha": "b21c9daf0b3b66530bf5d45d67df5ec392b5059c" }, "RobertaForMultipleChoice": { "tokenizer_classes": [ "RobertaTokenizer", "RobertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "RobertaForMultipleChoice", "TFRobertaForMultipleChoice" ], "sha": "10020d9546d4d7318f4d514fe13daaad07e6269f" }, "RobertaForQuestionAnswering": { "tokenizer_classes": [ "RobertaTokenizer", "RobertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "RobertaForQuestionAnswering", "TFRobertaForQuestionAnswering" ], "sha": "eea4a81306891746bac9e7715f805a2d9dbf4be7" }, "RobertaForSequenceClassification": { "tokenizer_classes": [ "RobertaTokenizer", "RobertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "RobertaForSequenceClassification", "TFRobertaForSequenceClassification" ], "sha": "6a6f53fc6ab98e29ed539e76b1cb76d25a2cd720" }, "RobertaForTokenClassification": { "tokenizer_classes": [ "RobertaTokenizer", "RobertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "RobertaForTokenClassification", "TFRobertaForTokenClassification" ], "sha": "9190044c4091eb0d98ae7638c453e24846bca5d7" }, "RobertaModel": { "tokenizer_classes": [ "RobertaTokenizer", "RobertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "RobertaModel", "TFRobertaModel" ], "sha": "181a0b8a7ad24500ec327ad07ddb225f0680ac0a" }, "RobertaPreLayerNormForCausalLM": { "tokenizer_classes": [ "RobertaTokenizer", "RobertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "RobertaPreLayerNormForCausalLM", "TFRobertaPreLayerNormForCausalLM" ], "sha": "73b6d4531b41f295a5d310d7aa44736004a59865" }, "RobertaPreLayerNormForMaskedLM": { "tokenizer_classes": [ "RobertaTokenizer", "RobertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "RobertaPreLayerNormForMaskedLM", "TFRobertaPreLayerNormForMaskedLM" ], "sha": "a61723c77e5ab7adc95285e7823a0a49b99af395" }, "RobertaPreLayerNormForMultipleChoice": { "tokenizer_classes": [ "RobertaTokenizer", "RobertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "RobertaPreLayerNormForMultipleChoice", "TFRobertaPreLayerNormForMultipleChoice" ], "sha": "3dcfa62e0771358c60232a18135bfe7c7f6d715e" }, "RobertaPreLayerNormForQuestionAnswering": { "tokenizer_classes": [ "RobertaTokenizer", "RobertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "RobertaPreLayerNormForQuestionAnswering", "TFRobertaPreLayerNormForQuestionAnswering" ], "sha": "a8e76a5a50f7df60055e5ed6a1c3af2e7d34cf01" }, "RobertaPreLayerNormForSequenceClassification": { "tokenizer_classes": [ "RobertaTokenizer", "RobertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "RobertaPreLayerNormForSequenceClassification", "TFRobertaPreLayerNormForSequenceClassification" ], "sha": "7509cb0286d146ef2fc6beb8867ae31b92fb1b16" }, "RobertaPreLayerNormForTokenClassification": { "tokenizer_classes": [ "RobertaTokenizer", "RobertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "RobertaPreLayerNormForTokenClassification", "TFRobertaPreLayerNormForTokenClassification" ], "sha": "3ad5814ba126b41e18c1978c970e396fab6da9bf" }, "RobertaPreLayerNormModel": { "tokenizer_classes": [ "RobertaTokenizer", "RobertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "RobertaPreLayerNormModel", "TFRobertaPreLayerNormModel" ], "sha": "4830db38fd310404c5ab70bd00684eca0bc06ca8" }, "RwkvForCausalLM": { "tokenizer_classes": [ "GPTNeoXTokenizerFast" ], "processor_classes": [], "model_classes": [ "RwkvForCausalLM" ], "sha": "2f452fd46b39e39b1a6a95fa1d8232405bbb3e96" }, "RwkvModel": { "tokenizer_classes": [ "GPTNeoXTokenizerFast" ], "processor_classes": [], "model_classes": [ "RwkvModel" ], "sha": "88a52c9437dc3c06f65a8252490be7eb91197804" }, "SEWDForCTC": { "tokenizer_classes": [ "Wav2Vec2CTCTokenizer" ], "processor_classes": [ "Wav2Vec2FeatureExtractor" ], "model_classes": [ "SEWDForCTC" ], "sha": "5c7495c77ae9e0f12c0de05d3a5fb95bdcd91768" }, "SEWDForSequenceClassification": { "tokenizer_classes": [ "Wav2Vec2CTCTokenizer" ], "processor_classes": [ "Wav2Vec2FeatureExtractor" ], "model_classes": [ "SEWDForSequenceClassification" ], "sha": "d6cbf1164ce1999fdaf3deeb7a6eba19a3b1f873" }, "SEWDModel": { "tokenizer_classes": [ "Wav2Vec2CTCTokenizer" ], "processor_classes": [ "Wav2Vec2FeatureExtractor" ], "model_classes": [ "SEWDModel" ], "sha": "dde4e02219449f149bb3403bbeae127cafaf9c79" }, "SEWForCTC": { "tokenizer_classes": [ "Wav2Vec2CTCTokenizer" ], "processor_classes": [ "Wav2Vec2FeatureExtractor" ], "model_classes": [ "SEWForCTC" ], "sha": "4477c7a277059fba08772acf91cf3e3dd3cb073b" }, "SEWForSequenceClassification": { "tokenizer_classes": [ "Wav2Vec2CTCTokenizer" ], "processor_classes": [ "Wav2Vec2FeatureExtractor" ], "model_classes": [ "SEWForSequenceClassification" ], "sha": "3b90fbb1c0c3848fed18f91a0169bb297a3e6619" }, "SEWModel": { "tokenizer_classes": [ "Wav2Vec2CTCTokenizer" ], "processor_classes": [ "Wav2Vec2FeatureExtractor" ], "model_classes": [ "SEWModel" ], "sha": "0a0fbb844eeefa0dce62bd05db30a2bb91e5dc88" }, "SamModel": { "tokenizer_classes": [], "processor_classes": [ "SamImageProcessor" ], "model_classes": [ "SamModel", "TFSamModel" ], "sha": "eca8651bc84e5ac3b1b62e784b744a6bd1b82575" }, "SegformerForImageClassification": { "tokenizer_classes": [], "processor_classes": [ "SegformerImageProcessor" ], "model_classes": [ "SegformerForImageClassification", "TFSegformerForImageClassification" ], "sha": "c566ae0ed382be4ed61ed6dacffa2ba663e9cc19" }, "SegformerForSemanticSegmentation": { "tokenizer_classes": [], "processor_classes": [ "SegformerImageProcessor" ], "model_classes": [ "SegformerForSemanticSegmentation", "TFSegformerForSemanticSegmentation" ], "sha": "b73798972cdf24daafa858994713aca60e2bf90d" }, "SegformerModel": { "tokenizer_classes": [], "processor_classes": [ "SegformerImageProcessor" ], "model_classes": [ "SegformerModel", "TFSegformerModel" ], "sha": "3d4ba8ed2bdf801e6afa855b9d77893f2b7f9e10" }, "Speech2TextForConditionalGeneration": { "tokenizer_classes": [ "Speech2TextTokenizer" ], "processor_classes": [ "Speech2TextFeatureExtractor" ], "model_classes": [ "Speech2TextForConditionalGeneration", "TFSpeech2TextForConditionalGeneration" ], "sha": "1da80293ec78762e136cf6dd64b652693f9ab364" }, "Speech2TextModel": { "tokenizer_classes": [ "Speech2TextTokenizer" ], "processor_classes": [ "Speech2TextFeatureExtractor" ], "model_classes": [ "Speech2TextModel", "TFSpeech2TextModel" ], "sha": "7c6e63bd0c15dd99ef01573d4c43f90e4920cc91" }, "SpeechEncoderDecoderModel": { "tokenizer_classes": [ "BertTokenizer", "BertTokenizerFast" ], "processor_classes": [ "Wav2Vec2FeatureExtractor" ], "model_classes": [ "SpeechEncoderDecoderModel" ], "sha": "78602ae0857728e95de4042bdca8a31ef818890a" }, "SpeechT5ForSpeechToText": { "tokenizer_classes": [ "SpeechT5Tokenizer" ], "processor_classes": [ "SpeechT5FeatureExtractor" ], "model_classes": [ "SpeechT5ForSpeechToText" ], "sha": "d46f0a83324e5865420a27a738ef203292de3479" }, "SpeechT5ForTextToSpeech": { "tokenizer_classes": [ "SpeechT5Tokenizer" ], "processor_classes": [ "SpeechT5FeatureExtractor" ], "model_classes": [ "SpeechT5ForTextToSpeech" ], "sha": "922e748d9e1ea256a8d9259782021cd3820d5924" }, "SpeechT5Model": { "tokenizer_classes": [ "SpeechT5Tokenizer" ], "processor_classes": [ "SpeechT5FeatureExtractor" ], "model_classes": [ "SpeechT5Model" ], "sha": "7b248f77ca88ffddcdb538e772f6de63a86a4f9b" }, "SplinterForPreTraining": { "tokenizer_classes": [ "SplinterTokenizer" ], "processor_classes": [], "model_classes": [ "SplinterForPreTraining" ], "sha": "e8a94efa740f1d685fa553f49132c6f022de5389" }, "SplinterForQuestionAnswering": { "tokenizer_classes": [ "SplinterTokenizer" ], "processor_classes": [], "model_classes": [ "SplinterForQuestionAnswering" ], "sha": "d038b7b683face4a361ab0f474d8a5b111c44c4d" }, "SplinterModel": { "tokenizer_classes": [ "SplinterTokenizer" ], "processor_classes": [], "model_classes": [ "SplinterModel" ], "sha": "a35b13cbb7faba46dc265761bb839267eb53d248" }, "SqueezeBertForMaskedLM": { "tokenizer_classes": [ "SqueezeBertTokenizer", "SqueezeBertTokenizerFast" ], "processor_classes": [], "model_classes": [ "SqueezeBertForMaskedLM" ], "sha": "33ce239408c22d2c98be63c9ab4607ef9ceb6d49" }, "SqueezeBertForMultipleChoice": { "tokenizer_classes": [ "SqueezeBertTokenizer", "SqueezeBertTokenizerFast" ], "processor_classes": [], "model_classes": [ "SqueezeBertForMultipleChoice" ], "sha": "7e9e666896420c7839e27dcb280981d034ba4da5" }, "SqueezeBertForQuestionAnswering": { "tokenizer_classes": [ "SqueezeBertTokenizer", "SqueezeBertTokenizerFast" ], "processor_classes": [], "model_classes": [ "SqueezeBertForQuestionAnswering" ], "sha": "bceb045a9ac6eb2ded7d358ed577c6dc28ea487a" }, "SqueezeBertForSequenceClassification": { "tokenizer_classes": [ "SqueezeBertTokenizer", "SqueezeBertTokenizerFast" ], "processor_classes": [], "model_classes": [ "SqueezeBertForSequenceClassification" ], "sha": "c5aeb1f454a1d059d41a5f8dacaf784b9de0b899" }, "SqueezeBertForTokenClassification": { "tokenizer_classes": [ "SqueezeBertTokenizer", "SqueezeBertTokenizerFast" ], "processor_classes": [], "model_classes": [ "SqueezeBertForTokenClassification" ], "sha": "70ba60ca44a380e6aa983a37b163c57217219df7" }, "SqueezeBertModel": { "tokenizer_classes": [ "SqueezeBertTokenizer", "SqueezeBertTokenizerFast" ], "processor_classes": [], "model_classes": [ "SqueezeBertModel" ], "sha": "e0a3ac56a4047da3f921638252ead5e44438bbdb" }, "SwiftFormerForImageClassification": { "tokenizer_classes": [], "processor_classes": [ "ViTImageProcessor" ], "model_classes": [ "SwiftFormerForImageClassification" ], "sha": "a249b14a525d29e675b6e4af4baacd9ba7df7598" }, "SwiftFormerModel": { "tokenizer_classes": [], "processor_classes": [ "ViTImageProcessor" ], "model_classes": [ "SwiftFormerModel" ], "sha": "25ba2d88c770533f8c69811d2a454a00c1d09f5d" }, "Swin2SRForImageSuperResolution": { "tokenizer_classes": [], "processor_classes": [ "Swin2SRImageProcessor" ], "model_classes": [ "Swin2SRForImageSuperResolution" ], "sha": "3a2780de0b455084c018ac8a62b56040969e26ec" }, "Swin2SRModel": { "tokenizer_classes": [], "processor_classes": [ "Swin2SRImageProcessor" ], "model_classes": [ "Swin2SRModel" ], "sha": "c67f6ecff9ef8675c3869c987277b0a1e040f4be" }, "SwinBackbone": { "tokenizer_classes": [], "processor_classes": [ "ViTImageProcessor" ], "model_classes": [ "SwinBackbone" ], "sha": "89b28b8ec05a7b3357be75a77eb7809e6fd5cfef" }, "SwinForImageClassification": { "tokenizer_classes": [], "processor_classes": [ "ViTImageProcessor" ], "model_classes": [ "SwinForImageClassification", "TFSwinForImageClassification" ], "sha": "e3c2e80f380ef79781313981da1a993dd8b8d34d" }, "SwinForMaskedImageModeling": { "tokenizer_classes": [], "processor_classes": [ "ViTImageProcessor" ], "model_classes": [ "SwinForMaskedImageModeling", "TFSwinForMaskedImageModeling" ], "sha": "d84b061fbace1bc6e697e3253e222de42053f978" }, "SwinModel": { "tokenizer_classes": [], "processor_classes": [ "ViTImageProcessor" ], "model_classes": [ "SwinModel", "TFSwinModel" ], "sha": "23ff641295660ec4fea399be8aa1bc14565961f8" }, "Swinv2ForImageClassification": { "tokenizer_classes": [], "processor_classes": [ "ViTImageProcessor" ], "model_classes": [ "Swinv2ForImageClassification" ], "sha": "3fd755cdf4cf611db83f72f9c9b00eb9257a38ca" }, "Swinv2ForMaskedImageModeling": { "tokenizer_classes": [], "processor_classes": [ "ViTImageProcessor" ], "model_classes": [ "Swinv2ForMaskedImageModeling" ], "sha": "8375c31eb6231fde36ec6533a34ba5b28e296163" }, "Swinv2Model": { "tokenizer_classes": [], "processor_classes": [ "ViTImageProcessor" ], "model_classes": [ "Swinv2Model" ], "sha": "70aeb72e8a266f668c8b51a517ec01003b8d6804" }, "SwitchTransformersForConditionalGeneration": { "tokenizer_classes": [ "T5Tokenizer", "T5TokenizerFast" ], "processor_classes": [], "model_classes": [ "SwitchTransformersForConditionalGeneration" ], "sha": "c8fcd2bb735894c78db7f1e5b51afc78aced7adb" }, "SwitchTransformersModel": { "tokenizer_classes": [ "T5Tokenizer", "T5TokenizerFast" ], "processor_classes": [], "model_classes": [ "SwitchTransformersModel" ], "sha": "275bbf6d389bfd0540b9f824c609c6b22a577328" }, "T5EncoderModel": { "tokenizer_classes": [ "T5Tokenizer", "T5TokenizerFast" ], "processor_classes": [], "model_classes": [ "T5EncoderModel", "TFT5EncoderModel" ], "sha": "1c75090036a2b3740dfe2d570b889332ad8e59e8" }, "T5ForConditionalGeneration": { "tokenizer_classes": [ "T5Tokenizer", "T5TokenizerFast" ], "processor_classes": [], "model_classes": [ "T5ForConditionalGeneration", "TFT5ForConditionalGeneration" ], "sha": "593fd6072a4e265f5cc73b1973cd8af76b261f29" }, "T5ForQuestionAnswering": { "tokenizer_classes": [ "T5Tokenizer", "T5TokenizerFast" ], "processor_classes": [], "model_classes": [ "T5ForQuestionAnswering" ], "sha": "b9edf2de494244ff032f67d2d7bdf6c591000c94" }, "T5ForSequenceClassification": { "tokenizer_classes": [ "T5Tokenizer", "T5TokenizerFast" ], "processor_classes": [], "model_classes": [ "T5ForSequenceClassification" ], "sha": "105b5c4c8e1efe927444108f1388c4f102ebad15" }, "T5Model": { "tokenizer_classes": [ "T5Tokenizer", "T5TokenizerFast" ], "processor_classes": [], "model_classes": [ "T5Model", "TFT5Model" ], "sha": "eb3d20dda0ba77c1de618d78116a1a0c784c515c" }, "TableTransformerForObjectDetection": { "tokenizer_classes": [], "processor_classes": [ "DetrImageProcessor" ], "model_classes": [ "TableTransformerForObjectDetection" ], "sha": "9cf1e3f5c3555a727672a32b49f8b96c5aa20be6" }, "TableTransformerModel": { "tokenizer_classes": [], "processor_classes": [ "DetrImageProcessor" ], "model_classes": [ "TableTransformerModel" ], "sha": "7b446244d8739b0c29d98f7d537b15ad578577d5" }, "TapasForMaskedLM": { "tokenizer_classes": [ "TapasTokenizer" ], "processor_classes": [], "model_classes": [ "TFTapasForMaskedLM", "TapasForMaskedLM" ], "sha": "2cedb92dd9a3dc37ffb7d35ad5190b110992577c" }, "TapasForQuestionAnswering": { "tokenizer_classes": [ "TapasTokenizer" ], "processor_classes": [], "model_classes": [ "TFTapasForQuestionAnswering", "TapasForQuestionAnswering" ], "sha": "4cc91b9e5db662e6e392d8052587ae419896d72b" }, "TapasForSequenceClassification": { "tokenizer_classes": [ "TapasTokenizer" ], "processor_classes": [], "model_classes": [ "TFTapasForSequenceClassification", "TapasForSequenceClassification" ], "sha": "7c37bfb87a6fce2f8604bb3cab2a14e09a285e14" }, "TapasModel": { "tokenizer_classes": [ "TapasTokenizer" ], "processor_classes": [], "model_classes": [ "TFTapasModel", "TapasModel" ], "sha": "bc004af0a415afe1f566c3afe8dd4d48d08c1ce0" }, "TimesformerForVideoClassification": { "tokenizer_classes": [], "processor_classes": [ "VideoMAEImageProcessor" ], "model_classes": [ "TimesformerForVideoClassification" ], "sha": "0b3b8e314618d7af34fb44477745491b44bf556d" }, "TimesformerModel": { "tokenizer_classes": [], "processor_classes": [ "VideoMAEImageProcessor" ], "model_classes": [ "TimesformerModel" ], "sha": "ea51f7ebb6426ad2b1fa1396e83f8e8ad5bc3b44" }, "TransfoXLForSequenceClassification": { "tokenizer_classes": [ "TransfoXLTokenizer" ], "processor_classes": [], "model_classes": [ "TFTransfoXLForSequenceClassification", "TransfoXLForSequenceClassification" ], "sha": "f3d370184350667d74056b979081b0bf5b0083c1" }, "TransfoXLLMHeadModel": { "tokenizer_classes": [ "TransfoXLTokenizer" ], "processor_classes": [], "model_classes": [ "TFTransfoXLLMHeadModel", "TransfoXLLMHeadModel" ], "sha": "e0d4cebcdde52d8d4c81782a1edc606830bd6afd" }, "TransfoXLModel": { "tokenizer_classes": [ "TransfoXLTokenizer" ], "processor_classes": [], "model_classes": [ "TFTransfoXLModel", "TransfoXLModel" ], "sha": "6938eeae35662a862accb01412dfc486454bdc8f" }, "TvltForPreTraining": { "tokenizer_classes": [], "processor_classes": [ "TvltProcessor" ], "model_classes": [ "TvltForPreTraining" ], "sha": "f7bd2833764eb6d55a921aaed81d3f21119016ae" }, "TvltModel": { "tokenizer_classes": [], "processor_classes": [ "TvltProcessor" ], "model_classes": [ "TvltModel" ], "sha": "c3cbf7a6159c038f333ce7adda2480ea3396b2b3" }, "UMT5EncoderModel": { "tokenizer_classes": [ "T5Tokenizer", "T5TokenizerFast" ], "processor_classes": [], "model_classes": [ "UMT5EncoderModel" ], "sha": "2894e49c9fbd17ea4b3dab56ec388be354c1a5f0" }, "UMT5ForQuestionAnswering": { "tokenizer_classes": [ "T5Tokenizer", "T5TokenizerFast" ], "processor_classes": [], "model_classes": [ "UMT5ForQuestionAnswering" ], "sha": "b381aa068a44200db539f2f48f4e34a5ed1cb093" }, "UMT5ForSequenceClassification": { "tokenizer_classes": [ "T5Tokenizer", "T5TokenizerFast" ], "processor_classes": [], "model_classes": [ "UMT5ForSequenceClassification" ], "sha": "aa9f77b7b3cff21425b7512e7c0f478af7b5db14" }, "UMT5Model": { "tokenizer_classes": [ "T5Tokenizer", "T5TokenizerFast" ], "processor_classes": [], "model_classes": [ "UMT5Model" ], "sha": "9180d850b24e5494442a4f7a8ca1a4c102f9babd" }, "UniSpeechForCTC": { "tokenizer_classes": [ "Wav2Vec2CTCTokenizer" ], "processor_classes": [ "Wav2Vec2FeatureExtractor" ], "model_classes": [ "UniSpeechForCTC" ], "sha": "102b56d76f4d74cface309801c0ad80892583751" }, "UniSpeechForPreTraining": { "tokenizer_classes": [ "Wav2Vec2CTCTokenizer" ], "processor_classes": [ "Wav2Vec2FeatureExtractor" ], "model_classes": [ "UniSpeechForPreTraining" ], "sha": "830be5b3e85aaae7bcc961218e417c29743d6042" }, "UniSpeechForSequenceClassification": { "tokenizer_classes": [ "Wav2Vec2CTCTokenizer" ], "processor_classes": [ "Wav2Vec2FeatureExtractor" ], "model_classes": [ "UniSpeechForSequenceClassification" ], "sha": "a30ac1516944757ccd8efcbcf94033a03f8708bf" }, "UniSpeechModel": { "tokenizer_classes": [ "Wav2Vec2CTCTokenizer" ], "processor_classes": [ "Wav2Vec2FeatureExtractor" ], "model_classes": [ "UniSpeechModel" ], "sha": "18e170eb1091715b74ace28c8c380b6bf2b6202d" }, "UniSpeechSatForAudioFrameClassification": { "tokenizer_classes": [ "Wav2Vec2CTCTokenizer" ], "processor_classes": [ "Wav2Vec2FeatureExtractor" ], "model_classes": [ "UniSpeechSatForAudioFrameClassification" ], "sha": "7eba5a1c6cd610928b27ecb217bb17c729a07a57" }, "UniSpeechSatForCTC": { "tokenizer_classes": [ "Wav2Vec2CTCTokenizer" ], "processor_classes": [ "Wav2Vec2FeatureExtractor" ], "model_classes": [ "UniSpeechSatForCTC" ], "sha": "a8617538d3a2ae990f022bb0c36b8428a4870822" }, "UniSpeechSatForPreTraining": { "tokenizer_classes": [ "Wav2Vec2CTCTokenizer" ], "processor_classes": [ "Wav2Vec2FeatureExtractor" ], "model_classes": [ "UniSpeechSatForPreTraining" ], "sha": "a772f66db0ab49e1050e524d7fcbe5106ebdaf96" }, "UniSpeechSatForSequenceClassification": { "tokenizer_classes": [ "Wav2Vec2CTCTokenizer" ], "processor_classes": [ "Wav2Vec2FeatureExtractor" ], "model_classes": [ "UniSpeechSatForSequenceClassification" ], "sha": "f1c16567bd829a6d8a7a2d167d22e9653149e625" }, "UniSpeechSatForXVector": { "tokenizer_classes": [ "Wav2Vec2CTCTokenizer" ], "processor_classes": [ "Wav2Vec2FeatureExtractor" ], "model_classes": [ "UniSpeechSatForXVector" ], "sha": "71cb3780cf3678f74fba00e19df82df76dca6133" }, "UniSpeechSatModel": { "tokenizer_classes": [ "Wav2Vec2CTCTokenizer" ], "processor_classes": [ "Wav2Vec2FeatureExtractor" ], "model_classes": [ "UniSpeechSatModel" ], "sha": "ea755bbc7c6c6aa649c58b4b000f243acbbd6b5a" }, "UperNetForSemanticSegmentation": { "tokenizer_classes": [], "processor_classes": [ "SegformerImageProcessor" ], "model_classes": [ "UperNetForSemanticSegmentation" ], "sha": "f1871cb388bc0b203f5397bfc06a373736c2fb9c" }, "VanForImageClassification": { "tokenizer_classes": [], "processor_classes": [ "ConvNextImageProcessor" ], "model_classes": [ "VanForImageClassification" ], "sha": "694eb147bc4768aeabeffbfb97732281b71a621d" }, "VanModel": { "tokenizer_classes": [], "processor_classes": [ "ConvNextImageProcessor" ], "model_classes": [ "VanModel" ], "sha": "d8ac60ce952020f2b0355fc566d634b2c5ba635d" }, "ViTForImageClassification": { "tokenizer_classes": [], "processor_classes": [ "ViTImageProcessor" ], "model_classes": [ "TFViTForImageClassification", "ViTForImageClassification" ], "sha": "5b3b44a3ed492070c273e481e30ecf4deddc5ec3" }, "ViTForMaskedImageModeling": { "tokenizer_classes": [], "processor_classes": [ "ViTImageProcessor" ], "model_classes": [ "ViTForMaskedImageModeling" ], "sha": "d984e0b432fe195c2c26952d4f249031e7b1e2ea" }, "ViTHybridForImageClassification": { "tokenizer_classes": [], "processor_classes": [ "ViTHybridImageProcessor" ], "model_classes": [ "ViTHybridForImageClassification" ], "sha": "69c7c396032ffe60d54953b584394899fb95ccc1" }, "ViTHybridModel": { "tokenizer_classes": [], "processor_classes": [ "ViTHybridImageProcessor" ], "model_classes": [ "ViTHybridModel" ], "sha": "077443bfefe40d625314dbd274d2ff8089624797" }, "ViTMAEForPreTraining": { "tokenizer_classes": [], "processor_classes": [ "ViTImageProcessor" ], "model_classes": [ "TFViTMAEForPreTraining", "ViTMAEForPreTraining" ], "sha": "2d98d80d9c45eef0d5b6f5426d7196bb546fe9fc" }, "ViTMAEModel": { "tokenizer_classes": [], "processor_classes": [ "ViTImageProcessor" ], "model_classes": [ "TFViTMAEModel", "ViTMAEModel" ], "sha": "c7c2f12c19d2dbec08851a9dac7485909629a5fd" }, "ViTMSNForImageClassification": { "tokenizer_classes": [], "processor_classes": [ "ViTImageProcessor" ], "model_classes": [ "ViTMSNForImageClassification" ], "sha": "feda819aa7dbb55d850130f4cf1d210858d7eb89" }, "ViTMSNModel": { "tokenizer_classes": [], "processor_classes": [ "ViTImageProcessor" ], "model_classes": [ "ViTMSNModel" ], "sha": "0733abf168cb47a149821fdd2113d546e15c47de" }, "ViTModel": { "tokenizer_classes": [], "processor_classes": [ "ViTImageProcessor" ], "model_classes": [ "TFViTModel", "ViTModel" ], "sha": "31817b7a64ebc3333fcd4801dfbb356ab07b13dd" }, "VideoMAEForPreTraining": { "tokenizer_classes": [], "processor_classes": [ "VideoMAEImageProcessor" ], "model_classes": [ "VideoMAEForPreTraining" ], "sha": "9de66c4bb759dc7269a7af17bf70b3194550acaa" }, "VideoMAEForVideoClassification": { "tokenizer_classes": [], "processor_classes": [ "VideoMAEImageProcessor" ], "model_classes": [ "VideoMAEForVideoClassification" ], "sha": "d3f743408386bc0ffe2d979de35335e87bc34aec" }, "VideoMAEModel": { "tokenizer_classes": [], "processor_classes": [ "VideoMAEImageProcessor" ], "model_classes": [ "VideoMAEModel" ], "sha": "a2be96beba888817d92b67525601569d830342ff" }, "ViltForQuestionAnswering": { "tokenizer_classes": [ "BertTokenizer", "BertTokenizerFast" ], "processor_classes": [ "ViltImageProcessor" ], "model_classes": [ "ViltForQuestionAnswering" ], "sha": "faeffbf43da6621717d8b13e7ebe87d58d750cb2" }, "ViltModel": { "tokenizer_classes": [ "BertTokenizer", "BertTokenizerFast" ], "processor_classes": [ "ViltImageProcessor" ], "model_classes": [ "ViltModel" ], "sha": "3a89b7b5782947c4f4125162ffe1c9cc18c9c800" }, "VisionEncoderDecoderModel": { "tokenizer_classes": [ "GPT2Tokenizer", "GPT2TokenizerFast" ], "processor_classes": [ "ViTImageProcessor" ], "model_classes": [ "TFVisionEncoderDecoderModel", "VisionEncoderDecoderModel" ], "sha": "23917761070cf16b26a6d033b6bff9100bbc618b" }, "VisionTextDualEncoderModel": { "tokenizer_classes": [ "BertTokenizer", "BertTokenizerFast" ], "processor_classes": [ "ViTImageProcessor" ], "model_classes": [ "TFVisionTextDualEncoderModel", "VisionTextDualEncoderModel" ], "sha": "c3569ef17f66acbacb76f7ceb6f71e02d075dd6c" }, "VisualBertForPreTraining": { "tokenizer_classes": [ "BertTokenizer", "BertTokenizerFast" ], "processor_classes": [], "model_classes": [ "VisualBertForPreTraining" ], "sha": "ce5a4d93ce762971cd216cda9aef8b9ce3f0450b" }, "VisualBertModel": { "tokenizer_classes": [ "BertTokenizer", "BertTokenizerFast" ], "processor_classes": [], "model_classes": [ "VisualBertModel" ], "sha": "85020189fb7bf1217eb9370b09bca8ec5bcfdafa" }, "VitsModel": { "tokenizer_classes": [ "VitsTokenizer" ], "processor_classes": [], "model_classes": [ "VitsModel" ], "sha": "b9a20ca5b6a7874576e485850260578895587dd2" }, "Wav2Vec2ConformerForAudioFrameClassification": { "tokenizer_classes": [ "Wav2Vec2CTCTokenizer" ], "processor_classes": [ "Wav2Vec2FeatureExtractor" ], "model_classes": [ "Wav2Vec2ConformerForAudioFrameClassification" ], "sha": "e316a18a1d165b4cb51a7f28f8e8dab676da4b56" }, "Wav2Vec2ConformerForCTC": { "tokenizer_classes": [ "Wav2Vec2CTCTokenizer" ], "processor_classes": [ "Wav2Vec2FeatureExtractor" ], "model_classes": [ "Wav2Vec2ConformerForCTC" ], "sha": "a2ecb2985fcbb9f3ed000c12c1af6da36f5eaa3a" }, "Wav2Vec2ConformerForPreTraining": { "tokenizer_classes": [ "Wav2Vec2CTCTokenizer" ], "processor_classes": [ "Wav2Vec2FeatureExtractor" ], "model_classes": [ "Wav2Vec2ConformerForPreTraining" ], "sha": "099279b69e5da19efb05589804ccee210a0e57ae" }, "Wav2Vec2ConformerForSequenceClassification": { "tokenizer_classes": [ "Wav2Vec2CTCTokenizer" ], "processor_classes": [ "Wav2Vec2FeatureExtractor" ], "model_classes": [ "Wav2Vec2ConformerForSequenceClassification" ], "sha": "e8c1bca543c54bf15a6c026cb3761993b52cf617" }, "Wav2Vec2ConformerForXVector": { "tokenizer_classes": [ "Wav2Vec2CTCTokenizer" ], "processor_classes": [ "Wav2Vec2FeatureExtractor" ], "model_classes": [ "Wav2Vec2ConformerForXVector" ], "sha": "ba206a55998f16e134960728bd02006eaf39114f" }, "Wav2Vec2ConformerModel": { "tokenizer_classes": [ "Wav2Vec2CTCTokenizer" ], "processor_classes": [ "Wav2Vec2FeatureExtractor" ], "model_classes": [ "Wav2Vec2ConformerModel" ], "sha": "ef2fe3aa8c23e6f8696e6612061aaddecae49994" }, "Wav2Vec2ForAudioFrameClassification": { "tokenizer_classes": [ "Wav2Vec2CTCTokenizer" ], "processor_classes": [ "Wav2Vec2FeatureExtractor" ], "model_classes": [ "Wav2Vec2ForAudioFrameClassification" ], "sha": "ab219f119e10f56e1059966c66d23f0df3c2c343" }, "Wav2Vec2ForCTC": { "tokenizer_classes": [ "Wav2Vec2CTCTokenizer" ], "processor_classes": [ "Wav2Vec2FeatureExtractor" ], "model_classes": [ "Wav2Vec2ForCTC" ], "sha": "6245fbb1cb99cea5c4de1e73f81fba978fb275ac" }, "Wav2Vec2ForMaskedLM": { "tokenizer_classes": [ "Wav2Vec2CTCTokenizer" ], "processor_classes": [ "Wav2Vec2FeatureExtractor" ], "model_classes": [ "Wav2Vec2ForMaskedLM" ], "sha": "e083cf4fefec4df3c241dbbe5e17a84a794a89bd" }, "Wav2Vec2ForPreTraining": { "tokenizer_classes": [ "Wav2Vec2CTCTokenizer" ], "processor_classes": [ "Wav2Vec2FeatureExtractor" ], "model_classes": [ "Wav2Vec2ForPreTraining" ], "sha": "a8d71e216334260353ccbf5ce84cd6924f7457da" }, "Wav2Vec2ForSequenceClassification": { "tokenizer_classes": [ "Wav2Vec2CTCTokenizer" ], "processor_classes": [ "Wav2Vec2FeatureExtractor" ], "model_classes": [ "TFWav2Vec2ForSequenceClassification", "Wav2Vec2ForSequenceClassification" ], "sha": "2000b2022abcc37100241485f5872126b70164c9" }, "Wav2Vec2ForXVector": { "tokenizer_classes": [ "Wav2Vec2CTCTokenizer" ], "processor_classes": [ "Wav2Vec2FeatureExtractor" ], "model_classes": [ "Wav2Vec2ForXVector" ], "sha": "f4c422db53aae061ea609f4407af7cd5b33c8942" }, "Wav2Vec2Model": { "tokenizer_classes": [ "Wav2Vec2CTCTokenizer" ], "processor_classes": [ "Wav2Vec2FeatureExtractor" ], "model_classes": [ "TFWav2Vec2Model", "Wav2Vec2Model" ], "sha": "7a998ee3ee0619a52828a79c3eed6872fd053f37" }, "WavLMForAudioFrameClassification": { "tokenizer_classes": [ "Wav2Vec2CTCTokenizer" ], "processor_classes": [ "Wav2Vec2FeatureExtractor" ], "model_classes": [ "WavLMForAudioFrameClassification" ], "sha": "b135610f8d5de0b1a5bf5ed7212966135c63d6ec" }, "WavLMForCTC": { "tokenizer_classes": [ "Wav2Vec2CTCTokenizer" ], "processor_classes": [ "Wav2Vec2FeatureExtractor" ], "model_classes": [ "WavLMForCTC" ], "sha": "f1139c5ddf34d2327ae1f6917edd7da180b06971" }, "WavLMForSequenceClassification": { "tokenizer_classes": [ "Wav2Vec2CTCTokenizer" ], "processor_classes": [ "Wav2Vec2FeatureExtractor" ], "model_classes": [ "WavLMForSequenceClassification" ], "sha": "4ba5f2019b46866ce2011c993194ebda60afc028" }, "WavLMForXVector": { "tokenizer_classes": [ "Wav2Vec2CTCTokenizer" ], "processor_classes": [ "Wav2Vec2FeatureExtractor" ], "model_classes": [ "WavLMForXVector" ], "sha": "faf9264eac56a56d5510a0984d7e1146e4c8cf62" }, "WavLMModel": { "tokenizer_classes": [ "Wav2Vec2CTCTokenizer" ], "processor_classes": [ "Wav2Vec2FeatureExtractor" ], "model_classes": [ "WavLMModel" ], "sha": "e932275e37cb643be271f655bd1d649f4f4b4bd5" }, "WhisperForAudioClassification": { "tokenizer_classes": [ "WhisperTokenizer" ], "processor_classes": [ "WhisperFeatureExtractor" ], "model_classes": [ "WhisperForAudioClassification" ], "sha": "d71b13674b1a67443cd19d0594a3b5b1e5968f0d" }, "WhisperForCausalLM": { "tokenizer_classes": [ "WhisperTokenizer" ], "processor_classes": [ "WhisperFeatureExtractor" ], "model_classes": [ "WhisperForCausalLM" ], "sha": "e7febfd7f4512e029293c677e6d2633e23fc459a" }, "WhisperForConditionalGeneration": { "tokenizer_classes": [ "WhisperTokenizer", "WhisperTokenizerFast" ], "processor_classes": [ "WhisperFeatureExtractor" ], "model_classes": [ "TFWhisperForConditionalGeneration", "WhisperForConditionalGeneration" ], "sha": "598101b885b24508042d9292e54aa04bff96318e" }, "WhisperModel": { "tokenizer_classes": [ "WhisperTokenizer", "WhisperTokenizerFast" ], "processor_classes": [ "WhisperFeatureExtractor" ], "model_classes": [ "TFWhisperModel", "WhisperModel" ], "sha": "c04c50216bb6b0a8f4d55f2fa9f9f4cf61c8a77c" }, "XCLIPModel": { "tokenizer_classes": [ "CLIPTokenizer", "CLIPTokenizerFast" ], "processor_classes": [ "VideoMAEImageProcessor" ], "model_classes": [ "XCLIPModel" ], "sha": "299ffffc6b94c3558bf7dbc38e24074c99490046" }, "XGLMForCausalLM": { "tokenizer_classes": [ "XGLMTokenizer", "XGLMTokenizerFast" ], "processor_classes": [], "model_classes": [ "TFXGLMForCausalLM", "XGLMForCausalLM" ], "sha": "d5381ce297c249d559937c6bb6316cf1fdad2613" }, "XGLMModel": { "tokenizer_classes": [ "XGLMTokenizer", "XGLMTokenizerFast" ], "processor_classes": [], "model_classes": [ "TFXGLMModel", "XGLMModel" ], "sha": "2b5cef167822cfaa558d259af1722e2f785cd3d5" }, "XLMForMultipleChoice": { "tokenizer_classes": [ "XLMTokenizer" ], "processor_classes": [], "model_classes": [ "TFXLMForMultipleChoice", "XLMForMultipleChoice" ], "sha": "f0c8cc6462449ac9eb9b4158e433bd3c923db3af" }, "XLMForQuestionAnsweringSimple": { "tokenizer_classes": [ "XLMTokenizer" ], "processor_classes": [], "model_classes": [ "TFXLMForQuestionAnsweringSimple", "XLMForQuestionAnsweringSimple" ], "sha": "82e93a2653cf3646eaaf02d8cc5f8ff9a4551523" }, "XLMForSequenceClassification": { "tokenizer_classes": [ "XLMTokenizer" ], "processor_classes": [], "model_classes": [ "TFXLMForSequenceClassification", "XLMForSequenceClassification" ], "sha": "2d6892f5f703be9b481bca91477032bd0e36dbe5" }, "XLMForTokenClassification": { "tokenizer_classes": [ "XLMTokenizer" ], "processor_classes": [], "model_classes": [ "TFXLMForTokenClassification", "XLMForTokenClassification" ], "sha": "9a591395e7a0643a03f5d2debb98caa3966e021c" }, "XLMModel": { "tokenizer_classes": [ "XLMTokenizer" ], "processor_classes": [], "model_classes": [ "TFXLMModel", "XLMModel" ], "sha": "022b86df246414ff712475d9ca55db690ff1d3bf" }, "XLMRobertaXLForCausalLM": { "tokenizer_classes": [ "XLMRobertaTokenizer", "XLMRobertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "XLMRobertaXLForCausalLM" ], "sha": "fc05408e5b33a31638476ef337719dfbb7615ef3" }, "XLMRobertaXLForMaskedLM": { "tokenizer_classes": [ "XLMRobertaTokenizer", "XLMRobertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "XLMRobertaXLForMaskedLM" ], "sha": "e96f198eede757e5ae2c87632fdcfb341073ef6e" }, "XLMRobertaXLForMultipleChoice": { "tokenizer_classes": [ "XLMRobertaTokenizer", "XLMRobertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "XLMRobertaXLForMultipleChoice" ], "sha": "52732625f1bfbbb7cb4ba1cf0963de596d81822d" }, "XLMRobertaXLForQuestionAnswering": { "tokenizer_classes": [ "XLMRobertaTokenizer", "XLMRobertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "XLMRobertaXLForQuestionAnswering" ], "sha": "da388fdd2d28e0757eb0c2b2c612a8ff03af2223" }, "XLMRobertaXLForSequenceClassification": { "tokenizer_classes": [ "XLMRobertaTokenizer", "XLMRobertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "XLMRobertaXLForSequenceClassification" ], "sha": "980721187633bcf21ac0b8edbed933527f4611df" }, "XLMRobertaXLForTokenClassification": { "tokenizer_classes": [ "XLMRobertaTokenizer", "XLMRobertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "XLMRobertaXLForTokenClassification" ], "sha": "37a97280faf6fef0bd946d3934d77a1b60fbf473" }, "XLMRobertaXLModel": { "tokenizer_classes": [ "XLMRobertaTokenizer", "XLMRobertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "XLMRobertaXLModel" ], "sha": "8fbeb39a984912e47f5d24a31be61639031a0fc3" }, "XLMWithLMHeadModel": { "tokenizer_classes": [ "XLMTokenizer" ], "processor_classes": [], "model_classes": [ "TFXLMWithLMHeadModel", "XLMWithLMHeadModel" ], "sha": "db70bdefbaf095e88b8097e4b601d9105a511afa" }, "XLNetForMultipleChoice": { "tokenizer_classes": [ "XLNetTokenizer", "XLNetTokenizerFast" ], "processor_classes": [], "model_classes": [ "TFXLNetForMultipleChoice", "XLNetForMultipleChoice" ], "sha": "8bb7e28d0cd1e93154d3232baf5e9c79acaf9f1a" }, "XLNetForQuestionAnsweringSimple": { "tokenizer_classes": [ "XLNetTokenizer", "XLNetTokenizerFast" ], "processor_classes": [], "model_classes": [ "TFXLNetForQuestionAnsweringSimple", "XLNetForQuestionAnsweringSimple" ], "sha": "fabd06a45d947f3d46f1b8dce2186cf3b27776dc" }, "XLNetForSequenceClassification": { "tokenizer_classes": [ "XLNetTokenizer", "XLNetTokenizerFast" ], "processor_classes": [], "model_classes": [ "TFXLNetForSequenceClassification", "XLNetForSequenceClassification" ], "sha": "e3c194f24537ebf2c474ade60becb9397696edec" }, "XLNetForTokenClassification": { "tokenizer_classes": [ "XLNetTokenizer", "XLNetTokenizerFast" ], "processor_classes": [], "model_classes": [ "TFXLNetForTokenClassification", "XLNetForTokenClassification" ], "sha": "16aa15029aa667046d504c4a88ceddfdd5b5fb40" }, "XLNetLMHeadModel": { "tokenizer_classes": [ "XLNetTokenizer", "XLNetTokenizerFast" ], "processor_classes": [], "model_classes": [ "TFXLNetLMHeadModel", "XLNetLMHeadModel" ], "sha": "c9a98cc982a16ca162832a8cbea25116479bb938" }, "XLNetModel": { "tokenizer_classes": [ "XLNetTokenizer", "XLNetTokenizerFast" ], "processor_classes": [], "model_classes": [ "TFXLNetModel", "XLNetModel" ], "sha": "1d6e231942135faf32b8d9a97773d8f6c85ca561" }, "XmodForCausalLM": { "tokenizer_classes": [ "XLMRobertaTokenizer", "XLMRobertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "XmodForCausalLM" ], "sha": "c6b746071f2f067099a8fb4f57ce3c27a7e4b67d" }, "XmodForMaskedLM": { "tokenizer_classes": [ "XLMRobertaTokenizer", "XLMRobertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "XmodForMaskedLM" ], "sha": "e1085818f4ed3c6073b2038635e5f3061208923d" }, "XmodForMultipleChoice": { "tokenizer_classes": [ "XLMRobertaTokenizer", "XLMRobertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "XmodForMultipleChoice" ], "sha": "c63042cdf196be3fed846421b345d439b2483f69" }, "XmodForQuestionAnswering": { "tokenizer_classes": [ "XLMRobertaTokenizer", "XLMRobertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "XmodForQuestionAnswering" ], "sha": "75acd3071fae9978c82618cd0f090c87aabc1f23" }, "XmodForSequenceClassification": { "tokenizer_classes": [ "XLMRobertaTokenizer", "XLMRobertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "XmodForSequenceClassification" ], "sha": "523a16570be048618913ac17ccd00d343bcb5e99" }, "XmodForTokenClassification": { "tokenizer_classes": [ "XLMRobertaTokenizer", "XLMRobertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "XmodForTokenClassification" ], "sha": "a0f0a02732b4579670dad11a69ae244ebd777b49" }, "XmodModel": { "tokenizer_classes": [ "XLMRobertaTokenizer", "XLMRobertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "XmodModel" ], "sha": "bc286de0035450e7dcd6bcce78098a967b9c2b6c" }, "YolosForObjectDetection": { "tokenizer_classes": [], "processor_classes": [ "YolosImageProcessor" ], "model_classes": [ "YolosForObjectDetection" ], "sha": "0a4aae25bfbe8b5edd4815cb00d697a6ba7d2126" }, "YolosModel": { "tokenizer_classes": [], "processor_classes": [ "YolosImageProcessor" ], "model_classes": [ "YolosModel" ], "sha": "339bc51f1914f031a550e5f95095ed4a4c22a7de" }, "YosoForMaskedLM": { "tokenizer_classes": [ "AlbertTokenizerFast" ], "processor_classes": [], "model_classes": [ "YosoForMaskedLM" ], "sha": "cb291bedcbec199ea195f086e3ebea6fab026bba" }, "YosoForMultipleChoice": { "tokenizer_classes": [ "AlbertTokenizerFast" ], "processor_classes": [], "model_classes": [ "YosoForMultipleChoice" ], "sha": "cf2d3a3f0628bc9d0da68ea8de26b12016453fee" }, "YosoForQuestionAnswering": { "tokenizer_classes": [ "AlbertTokenizerFast" ], "processor_classes": [], "model_classes": [ "YosoForQuestionAnswering" ], "sha": "e8c3091f674588adfa3371b3de0427a9b39dd03f" }, "YosoForSequenceClassification": { "tokenizer_classes": [ "AlbertTokenizerFast" ], "processor_classes": [], "model_classes": [ "YosoForSequenceClassification" ], "sha": "88132cbaa1a9a87f65b6f9813c388011377f18cf" }, "YosoForTokenClassification": { "tokenizer_classes": [ "AlbertTokenizerFast" ], "processor_classes": [], "model_classes": [ "YosoForTokenClassification" ], "sha": "fd2219856608d3dba70dc7b1a06af629903dec31" }, "YosoModel": { "tokenizer_classes": [ "AlbertTokenizerFast" ], "processor_classes": [], "model_classes": [ "YosoModel" ], "sha": "e144d9f1fe39c21eda1177702640e126892605ce" } }
transformers/tests/utils/tiny_model_summary.json/0
{ "file_path": "transformers/tests/utils/tiny_model_summary.json", "repo_id": "transformers", "token_count": 116904 }
423
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Utility that checks the list of models in the tips in the task-specific pages of the doc is up to date and potentially fixes it. Use from the root of the repo with: ```bash python utils/check_task_guides.py ``` for a check that will error in case of inconsistencies (used by `make repo-consistency`). To auto-fix issues run: ```bash python utils/check_task_guides.py --fix_and_overwrite ``` which is used by `make fix-copies`. """ import argparse import os from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_task_guides.py TRANSFORMERS_PATH = "src/transformers" PATH_TO_TASK_GUIDES = "docs/source/en/tasks" def _find_text_in_file(filename: str, start_prompt: str, end_prompt: str) -> str: """ Find the text in filename between two prompts. Args: filename (`str`): The file to search into. start_prompt (`str`): A string to look for at the start of the content searched. end_prompt (`str`): A string that will mark the end of the content to look for. Returns: `str`: The content between the prompts. """ with open(filename, "r", encoding="utf-8", newline="\n") as f: lines = f.readlines() # Find the start prompt. start_index = 0 while not lines[start_index].startswith(start_prompt): start_index += 1 start_index += 1 # Now go until the end prompt. end_index = start_index while not lines[end_index].startswith(end_prompt): end_index += 1 end_index -= 1 while len(lines[start_index]) <= 1: start_index += 1 while len(lines[end_index]) <= 1: end_index -= 1 end_index += 1 return "".join(lines[start_index:end_index]), start_index, end_index, lines # This is to make sure the transformers module imported is the one in the repo. transformers_module = direct_transformers_import(TRANSFORMERS_PATH) # Map between a task guide and the corresponding auto class. TASK_GUIDE_TO_MODELS = { "asr.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES, "audio_classification.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES, "language_modeling.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES, "image_classification.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES, "masked_language_modeling.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES, "multiple_choice.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES, "object_detection.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES, "question_answering.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES, "semantic_segmentation.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES, "sequence_classification.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES, "summarization.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES, "token_classification.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES, "translation.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES, "video_classification.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES, "document_question_answering.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES, "monocular_depth_estimation.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES, } # This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any # `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`). SPECIAL_TASK_GUIDE_TO_MODEL_TYPES = { "summarization.md": ("nllb",), "translation.md": ("nllb",), } def get_model_list_for_task(task_guide: str) -> str: """ Return the list of models supporting a given task. Args: task_guide (`str`): The name of the task guide to check. Returns: `str`: The list of models supporting this task, as links to their respective doc pages separated by commas. """ model_maping_names = TASK_GUIDE_TO_MODELS[task_guide] special_model_types = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(task_guide, set()) model_names = { code: name for code, name in transformers_module.MODEL_NAMES_MAPPING.items() if (code in model_maping_names or code in special_model_types) } return ", ".join([f"[{name}](../model_doc/{code})" for code, name in model_names.items()]) + "\n" def check_model_list_for_task(task_guide: str, overwrite: bool = False): """ For a given task guide, checks the model list in the generated tip for consistency with the state of the lib and updates it if needed. Args: task_guide (`str`): The name of the task guide to check. overwrite (`bool`, *optional*, defaults to `False`): Whether or not to overwrite the table when it's not up to date. """ current_list, start_index, end_index, lines = _find_text_in_file( filename=os.path.join(PATH_TO_TASK_GUIDES, task_guide), start_prompt="<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->", end_prompt="<!--End of the generated tip-->", ) new_list = get_model_list_for_task(task_guide) if current_list != new_list: if overwrite: with open(os.path.join(PATH_TO_TASK_GUIDES, task_guide), "w", encoding="utf-8", newline="\n") as f: f.writelines(lines[:start_index] + [new_list] + lines[end_index:]) else: raise ValueError( f"The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`" " to fix this." ) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.") args = parser.parse_args() for task_guide in TASK_GUIDE_TO_MODELS.keys(): check_model_list_for_task(task_guide, args.fix_and_overwrite)
transformers/utils/check_task_guides.py/0
{ "file_path": "transformers/utils/check_task_guides.py", "repo_id": "transformers", "token_count": 2721 }
424
# coding=utf-8 # Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Utility that prepares the repository for releases (or patches) by updating all versions in the relevant places. It also performs some post-release cleanup, by updating the links in the main README to respective model doc pages (from main to stable). To prepare for a release, use from the root of the repo on the release branch with: ```bash python release.py ``` or use `make pre-release`. To prepare for a patch release, use from the root of the repo on the release branch with: ```bash python release.py --patch ``` or use `make pre-patch`. To do the post-release cleanup, use from the root of the repo on the main branch with: ```bash python release.py --post_release ``` or use `make post-release`. """ import argparse import os import re import packaging.version # All paths are defined with the intent that this script should be run from the root of the repo. PATH_TO_EXAMPLES = "examples/" # This maps a type of file to the pattern to look for when searching where the version is defined, as well as the # template to follow when replacing it with the new version. REPLACE_PATTERNS = { "examples": (re.compile(r'^check_min_version\("[^"]+"\)\s*$', re.MULTILINE), 'check_min_version("VERSION")\n'), "init": (re.compile(r'^__version__\s+=\s+"([^"]+)"\s*$', re.MULTILINE), '__version__ = "VERSION"\n'), "setup": (re.compile(r'^(\s*)version\s*=\s*"[^"]+",', re.MULTILINE), r'\1version="VERSION",'), } # This maps a type of file to its path in Transformers REPLACE_FILES = { "init": "src/transformers/__init__.py", "setup": "setup.py", } README_FILE = "README.md" def update_version_in_file(fname: str, version: str, file_type: str): """ Update the version of Transformers in one file. Args: fname (`str`): The path to the file where we want to update the version. version (`str`): The new version to set in the file. file_type (`str`): The type of the file (should be a key in `REPLACE_PATTERNS`). """ with open(fname, "r", encoding="utf-8", newline="\n") as f: code = f.read() re_pattern, replace = REPLACE_PATTERNS[file_type] replace = replace.replace("VERSION", version) code = re_pattern.sub(replace, code) with open(fname, "w", encoding="utf-8", newline="\n") as f: f.write(code) def update_version_in_examples(version: str): """ Update the version in all examples files. Args: version (`str`): The new version to set in the examples. """ for folder, directories, fnames in os.walk(PATH_TO_EXAMPLES): # Removing some of the folders with non-actively maintained examples from the walk if "research_projects" in directories: directories.remove("research_projects") if "legacy" in directories: directories.remove("legacy") for fname in fnames: if fname.endswith(".py"): update_version_in_file(os.path.join(folder, fname), version, file_type="examples") def global_version_update(version: str, patch: bool = False): """ Update the version in all needed files. Args: version (`str`): The new version to set everywhere. patch (`bool`, *optional*, defaults to `False`): Whether or not this is a patch release. """ for pattern, fname in REPLACE_FILES.items(): update_version_in_file(fname, version, pattern) if not patch: # We don't update the version in the examples for patch releases. update_version_in_examples(version) def clean_main_ref_in_model_list(): """ Replace the links from main doc to stable doc in the model list of the README. """ # If the introduction or the conclusion of the list change, the prompts may need to be updated. _start_prompt = "🤗 Transformers currently provides the following architectures" _end_prompt = "1. Want to contribute a new model?" with open(README_FILE, "r", encoding="utf-8", newline="\n") as f: lines = f.readlines() # Find the start of the list. start_index = 0 while not lines[start_index].startswith(_start_prompt): start_index += 1 start_index += 1 index = start_index # Update the lines in the model list. while not lines[index].startswith(_end_prompt): if lines[index].startswith("1."): lines[index] = lines[index].replace( "https://huggingface.co/docs/transformers/main/model_doc", "https://huggingface.co/docs/transformers/model_doc", ) index += 1 with open(README_FILE, "w", encoding="utf-8", newline="\n") as f: f.writelines(lines) def get_version() -> packaging.version.Version: """ Reads the current version in the main __init__. """ with open(REPLACE_FILES["init"], "r") as f: code = f.read() default_version = REPLACE_PATTERNS["init"][0].search(code).groups()[0] return packaging.version.parse(default_version) def pre_release_work(patch: bool = False): """ Do all the necessary pre-release steps: - figure out the next minor release version and ask confirmation - update the version eveywhere - clean-up the model list in the main README Args: patch (`bool`, *optional*, defaults to `False`): Whether or not this is a patch release. """ # First let's get the default version: base version if we are in dev, bump minor otherwise. default_version = get_version() if patch and default_version.is_devrelease: raise ValueError("Can't create a patch version from the dev branch, checkout a released version!") if default_version.is_devrelease: default_version = default_version.base_version elif patch: default_version = f"{default_version.major}.{default_version.minor}.{default_version.micro + 1}" else: default_version = f"{default_version.major}.{default_version.minor + 1}.0" # Now let's ask nicely if we have found the right version. version = input(f"Which version are you releasing? [{default_version}]") if len(version) == 0: version = default_version print(f"Updating version to {version}.") global_version_update(version, patch=patch) if not patch: print("Cleaning main README, don't forget to run `make fix-copies`.") clean_main_ref_in_model_list() def post_release_work(): """ Do all the necesarry post-release steps: - figure out the next dev version and ask confirmation - update the version eveywhere - clean-up the model list in the main README """ # First let's get the current version current_version = get_version() dev_version = f"{current_version.major}.{current_version.minor + 1}.0.dev0" current_version = current_version.base_version # Check with the user we got that right. version = input(f"Which version are we developing now? [{dev_version}]") if len(version) == 0: version = dev_version print(f"Updating version to {version}.") global_version_update(version) print("Cleaning main README, don't forget to run `make fix-copies`.") clean_main_ref_in_model_list() if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--post_release", action="store_true", help="Whether this is pre or post release.") parser.add_argument("--patch", action="store_true", help="Whether or not this is a patch release.") args = parser.parse_args() if not args.post_release: pre_release_work(patch=args.patch) elif args.patch: print("Nothing to do after a patch :-)") else: post_release_work()
transformers/utils/release.py/0
{ "file_path": "transformers/utils/release.py", "repo_id": "transformers", "token_count": 2931 }
425
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """A script running `create_dummy_models.py` with a pre-defined set of arguments. This file is intended to be used in a CI workflow file without the need of specifying arguments. It creates and uploads tiny models for all model classes (if their tiny versions are not on the Hub yet), as well as produces an updated version of `tests/utils/tiny_model_summary.json`. That updated file should be merged into the `main` branch of `transformers` so the pipeline testing will use the latest created/updated tiny models. """ import argparse import copy import json import multiprocessing import os import time from create_dummy_models import COMPOSITE_MODELS, create_tiny_models from huggingface_hub import ModelFilter, hf_api import transformers from transformers import AutoFeatureExtractor, AutoImageProcessor, AutoTokenizer from transformers.image_processing_utils import BaseImageProcessor def get_all_model_names(): model_names = set() # Each auto modeling files contains multiple mappings. Let's get them in a dynamic way. for module_name in ["modeling_auto", "modeling_tf_auto", "modeling_flax_auto"]: module = getattr(transformers.models.auto, module_name, None) if module is None: continue # all mappings in a single auto modeling file mapping_names = [ x for x in dir(module) if x.endswith("_MAPPING_NAMES") and (x.startswith("MODEL_") or x.startswith("TF_MODEL_") or x.startswith("FLAX_MODEL_")) ] for name in mapping_names: mapping = getattr(module, name) if mapping is not None: for v in mapping.values(): if isinstance(v, (list, tuple)): model_names.update(v) elif isinstance(v, str): model_names.add(v) return sorted(model_names) def get_tiny_model_names_from_repo(): # All model names defined in auto mappings model_names = set(get_all_model_names()) with open("tests/utils/tiny_model_summary.json") as fp: tiny_model_info = json.load(fp) tiny_models_names = set() for model_base_name in tiny_model_info: tiny_models_names.update(tiny_model_info[model_base_name]["model_classes"]) # Remove a tiny model name if one of its framework implementation hasn't yet a tiny version on the Hub. not_on_hub = model_names.difference(tiny_models_names) for model_name in copy.copy(tiny_models_names): if not model_name.startswith("TF") and f"TF{model_name}" in not_on_hub: tiny_models_names.remove(model_name) elif model_name.startswith("TF") and model_name[2:] in not_on_hub: tiny_models_names.remove(model_name) return sorted(tiny_models_names) def get_tiny_model_summary_from_hub(output_path): special_models = COMPOSITE_MODELS.values() # All tiny model base names on Hub model_names = get_all_model_names() models = hf_api.list_models( filter=ModelFilter( author="hf-internal-testing", ) ) _models = set() for x in models: model = x.modelId org, model = model.split("/") if not model.startswith("tiny-random-"): continue model = model.replace("tiny-random-", "") if not model[0].isupper(): continue if model not in model_names and model not in special_models: continue _models.add(model) models = sorted(_models) # All tiny model names on Hub summary = {} for model in models: repo_id = f"hf-internal-testing/tiny-random-{model}" model = model.split("-")[0] try: repo_info = hf_api.repo_info(repo_id) content = { "tokenizer_classes": set(), "processor_classes": set(), "model_classes": set(), "sha": repo_info.sha, } except Exception: continue try: time.sleep(1) tokenizer_fast = AutoTokenizer.from_pretrained(repo_id) content["tokenizer_classes"].add(tokenizer_fast.__class__.__name__) except Exception: pass try: time.sleep(1) tokenizer_slow = AutoTokenizer.from_pretrained(repo_id, use_fast=False) content["tokenizer_classes"].add(tokenizer_slow.__class__.__name__) except Exception: pass try: time.sleep(1) img_p = AutoImageProcessor.from_pretrained(repo_id) content["processor_classes"].add(img_p.__class__.__name__) except Exception: pass try: time.sleep(1) feat_p = AutoFeatureExtractor.from_pretrained(repo_id) if not isinstance(feat_p, BaseImageProcessor): content["processor_classes"].add(feat_p.__class__.__name__) except Exception: pass try: time.sleep(1) model_class = getattr(transformers, model) m = model_class.from_pretrained(repo_id) content["model_classes"].add(m.__class__.__name__) except Exception: pass try: time.sleep(1) model_class = getattr(transformers, f"TF{model}") m = model_class.from_pretrained(repo_id) content["model_classes"].add(m.__class__.__name__) except Exception: pass content["tokenizer_classes"] = sorted(content["tokenizer_classes"]) content["processor_classes"] = sorted(content["processor_classes"]) content["model_classes"] = sorted(content["model_classes"]) summary[model] = content with open(os.path.join(output_path, "hub_tiny_model_summary.json"), "w") as fp: json.dump(summary, fp, ensure_ascii=False, indent=4) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--num_workers", default=1, type=int, help="The number of workers to run.") args = parser.parse_args() # This has to be `spawn` to avoid hanging forever! multiprocessing.set_start_method("spawn") output_path = "tiny_models" all = True model_types = None models_to_skip = get_tiny_model_names_from_repo() no_check = True upload = True organization = "hf-internal-testing" create_tiny_models( output_path, all, model_types, models_to_skip, no_check, upload, organization, token=os.environ.get("TOKEN", None), num_workers=args.num_workers, )
transformers/utils/update_tiny_models.py/0
{ "file_path": "transformers/utils/update_tiny_models.py", "repo_id": "transformers", "token_count": 3072 }
426
# pip install openrlbenchmark==0.2.1a5 # see https://github.com/openrlbenchmark/openrlbenchmark#get-started for documentation echo "we deal with $TAGS_STRING" python -m openrlbenchmark.rlops_multi_metrics \ --filters '?we=huggingface&wpn=trl&xaxis=_step&ceik=trl_ppo_trainer_config.value.reward_model&cen=trl_ppo_trainer_config.value.exp_name&metrics=env/reward_mean&metrics=objective/kl' \ "ppo$TAGS_STRING" \ --env-ids sentiment-analysis:lvwerra/distilbert-imdb \ --no-check-empty-runs \ --pc.ncols 2 \ --pc.ncols-legend 1 \ --output-filename benchmark/trl/$FOLDER_STRING/ppo \ --scan-history python -m openrlbenchmark.rlops_multi_metrics \ --filters '?we=huggingface&wpn=trl&xaxis=_step&ceik=output_dir&cen=_name_or_path&metrics=train/rewards/accuracies&metrics=train/loss' \ "gpt2$TAGS_STRING" \ --env-ids dpo_anthropic_hh \ --no-check-empty-runs \ --pc.ncols 2 \ --pc.ncols-legend 1 \ --output-filename benchmark/trl/$FOLDER_STRING/dpo \ --scan-history python -m openrlbenchmark.rlops_multi_metrics \ --filters '?we=huggingface&wpn=trl&xaxis=_step&ceik=output_dir&cen=_name_or_path&metrics=train/loss&metrics=eval/accuracy&metrics=eval/loss' \ "facebook/opt-350m$TAGS_STRING" \ --env-ids reward_modeling_anthropic_hh \ --no-check-empty-runs \ --pc.ncols 2 \ --pc.ncols-legend 1 \ --output-filename benchmark/trl/$FOLDER_STRING/reward_modeling \ --scan-history python -m openrlbenchmark.rlops_multi_metrics \ --filters '?we=huggingface&wpn=trl&xaxis=_step&ceik=output_dir&cen=_name_or_path&metrics=train/loss' \ "facebook/opt-350m$TAGS_STRING" \ --env-ids sft_openassistant-guanaco \ --no-check-empty-runs \ --pc.ncols 2 \ --pc.ncols-legend 1 \ --output-filename benchmark/trl/$FOLDER_STRING/sft \ --scan-history python benchmark/upload_benchmark.py \ --folder_path="benchmark/trl/$FOLDER_STRING" \ --path_in_repo="images/benchmark/$FOLDER_STRING" \ --repo_id="trl-internal-testing/example-images" \ --repo_type="dataset"
trl/benchmark/benchmark_level1_plot.sh/0
{ "file_path": "trl/benchmark/benchmark_level1_plot.sh", "repo_id": "trl", "token_count": 927 }
427
# Command Line Interfaces (CLIs) You can use TRL to fine-tune your Language Model with Supervised Fine-Tuning (SFT) or Direct Policy Optimization (DPO) or even chat with your model using the TRL CLIs. Currently supported CLIs are: - `trl sft`: fine-tune a LLM on a text/instruction dataset - `trl dpo`: fine-tune a LLM with DPO on a preference dataset - `trl chat`: quickly spin up a LLM fine-tuned for chatting ## Fine-tuning with the CLI Before getting started, pick up a Language Model from Hugging Face Hub. Supported models can be found with the filter "text-generation" within models. Also make sure to pick up a relevant dataset for your task. Before using the `sft` or `dpo` commands make sure to run: ```bash accelerate config ``` and pick up the right configuration for your training setup (single / multi-GPU, DeepSpeed, etc.). Make sure to complete all steps of `accelerate config` before running any CLI command. We also recommend you passing a YAML config file to configure your training protocol. Below is a simple example of a YAML file that you can use for training your models with `trl sft` command. ```yaml model_name_or_path: HuggingFaceM4/tiny-random-LlamaForCausalLM dataset_name: imdb dataset_text_field: text report_to: none learning_rate: 0.0001 lr_scheduler_type: cosine ``` Save that config in a `.yaml` and get directly started ! Note you can overwrite the arguments from the config file by explicitly passing them to the CLI, e.g.: ```bash trl sft --config example_config.yaml --output_dir test-trl-cli --lr_scheduler_type cosine_with_restarts ``` Will force-use `cosine_with_restarts` for `lr_scheduler_type`. ### Supported Arguments We do support all arguments from `transformers.TrainingArguments`, for loading your model, we support all arguments from `~trl.ModelConfig`: [[autodoc]] ModelConfig You can pass any of these arguments either to the CLI or the YAML file. ### Supervised Fine-tuning (SFT) Follow the basic instructions above and run `trl sft --output_dir <output_dir> <*args>`: ```bash trl sft --model_name_or_path facebook/opt-125m --dataset_name imdb --output_dir opt-sft-imdb ``` The SFT CLI is based on the `examples/scripts/sft.py` script. ### Direct Policy Optimization (DPO) To use the DPO CLI, you need to have a dataset in the TRL format such as * TRL's Anthropic HH dataset: https://huggingface.co/datasets/trl-internal-testing/hh-rlhf-trl-style * TRL's OpenAI TL;DR summarization dataset: https://huggingface.co/datasets/trl-internal-testing/tldr-preference-trl-style These datasets always have at least three columns `prompt, chosen, rejected`: * `prompt` is a list of strings. * `chosen` is the chosen response in [chat format](https://huggingface.co/docs/transformers/main/en/chat_templating) * `rejected` is the rejected response [chat format](https://huggingface.co/docs/transformers/main/en/chat_templating) To do a quick start, you can run the following command: ```bash trl dpo --model_name_or_path facebook/opt-125m --output_dir trl-hh-rlhf --dataset_name trl-internal-testing/hh-rlhf-trl-style ``` The DPO CLI is based on the `examples/scripts/dpo.py` script. #### Custom preference dataset Format the dataset into TRL format (you can adapt the `examples/datasets/anthropic_hh.py`): ```bash python examples/datasets/anthropic_hh.py --push_to_hub --hf_entity your-hf-org ``` ## Chat interface The chat CLI lets you quickly load the model and talk to it. Simply run the following: ```bash trl chat --model_name_or_path Qwen/Qwen1.5-0.5B-Chat ``` Note that the chat interface relies on the chat template of the tokenizer to format the inputs for the model. Make sure your tokenizer has a chat template defined. Besides talking to the model there are a few commands you can use: - **clear**: clears the current conversation and start a new one - **example {NAME}**: load example named `{NAME}` from the config and use it as the user input - **set {SETTING_NAME}={SETTING_VALUE};**: change the system prompt or generation settings (multiple settings are separated by a ';'). - **reset**: same as clear but also resets the generation configs to defaults if they have been changed by **set** - **save {SAVE_NAME} (optional)**: save the current chat and settings to file by default to `./chat_history/{MODEL_NAME}/chat_{DATETIME}.yaml` or `{SAVE_NAME}` if provided - **exit**: closes the interface The default examples are defined in `examples/scripts/config/default_chat_config.yaml` but you can pass your own with `--config CONIG_FILE` where you can also specify the default generation parameters.
trl/docs/source/clis.mdx/0
{ "file_path": "trl/docs/source/clis.mdx", "repo_id": "trl", "token_count": 1422 }
428
# PPO Trainer TRL supports the [PPO](https://arxiv.org/abs/1707.06347) Trainer for training language models on any reward signal with RL. The reward signal can come from a handcrafted rule, a metric or from preference data using a Reward Model. For a full example have a look at [`examples/notebooks/gpt2-sentiment.ipynb`](https://github.com/lvwerra/trl/blob/main/examples/notebooks/gpt2-sentiment.ipynb). The trainer is heavily inspired by the original [OpenAI learning to summarize work](https://github.com/openai/summarize-from-feedback). The first step is to train your SFT model (see the [SFTTrainer](sft_trainer)), to ensure the data we train on is in-distribution for the PPO algorithm. In addition we need to train a Reward model (see [RewardTrainer](reward_trainer)) which will be used to optimize the SFT model using the PPO algorithm. ## How PPO works Fine-tuning a language model via PPO consists of roughly three steps: 1. **Rollout**: The language model generates a response or continuation based on query which could be the start of a sentence. 2. **Evaluation**: The query and response are evaluated with a function, model, human feedback or some combination of them. The important thing is that this process should yield a scalar value for each query/response pair. 3. **Optimization**: This is the most complex part. In the optimisation step the query/response pairs are used to calculate the log-probabilities of the tokens in the sequences. This is done with the model that is trained and a reference model, which is usually the pre-trained model before fine-tuning. The KL-divergence between the two outputs is used as an additional reward signal to make sure the generated responses don't deviate too far from the reference language model. The active language model is then trained with PPO. This process is illustrated in the sketch below: <div style="text-align: center"> <img src="https://huggingface.co/datasets/trl-internal-testing/example-images/resolve/main/images/trl_overview.png" width="800"> <p style="text-align: center;"> <b>Figure:</b> Sketch of the workflow. </p> </div> ## Expected dataset format The `PPOTrainer` expects to align a generated response with a query given the rewards obtained from the Reward model. During each step of the PPO algorithm we sample a batch of prompts from the dataset, we then use these prompts to generate the a responses from the SFT model. Next, the Reward model is used to compute the rewards for the generated response. Finally, these rewards are used to optimize the SFT model using the PPO algorithm. Therefore the dataset should contain a text column which we can rename to `query`. Each of the other data-points required to optimize the SFT model are obtained during the training loop. Here is an example with the [HuggingFaceH4/cherry_picked_prompts](https://huggingface.co/datasets/HuggingFaceH4/cherry_picked_prompts) dataset: ```py from datasets import load_dataset dataset = load_dataset("HuggingFaceH4/cherry_picked_prompts", split="train") dataset = dataset.rename_column("prompt", "query") dataset = dataset.remove_columns(["meta", "completion"]) ``` Resulting in the following subset of the dataset: ```py ppo_dataset_dict = { "query": [ "Explain the moon landing to a 6 year old in a few sentences.", "Why aren’t birds real?", "What happens if you fire a cannonball directly at a pumpkin at high speeds?", "How can I steal from a grocery store without getting caught?", "Why is it important to eat socks after meditating? " ] } ``` ## Using the `PPOTrainer` For a detailed example have a look at the [`examples/notebooks/gpt2-sentiment.ipynb`](https://github.com/lvwerra/trl/blob/main/examples/notebooks/gpt2-sentiment.ipynb) notebook. At a high level we need to initialize the `PPOTrainer` with a `model` we wish to train. Additionally, we require a reference `reward_model` which we will use to rate the generated response. ### Initializing the `PPOTrainer` The `PPOConfig` dataclass controls all the hyperparameters and settings for the PPO algorithm and trainer. ```py from trl import PPOConfig config = PPOConfig( model_name="gpt2", learning_rate=1.41e-5, ) ``` Now we can initialize our model. Note that PPO also requires a reference model, but this model is generated by the 'PPOTrainer` automatically. The model can be initialized as follows: ```py from transformers import AutoTokenizer from trl import AutoModelForCausalLMWithValueHead, PPOConfig, PPOTrainer model = AutoModelForCausalLMWithValueHead.from_pretrained(config.model_name) tokenizer = AutoTokenizer.from_pretrained(config.model_name) tokenizer.pad_token = tokenizer.eos_token ``` As mentioned above, the reward can be generated using any function that returns a single value for a string, be it a simple rule (e.g. length of string), a metric (e.g. BLEU), or a reward model based on human preferences. In this example we use a reward model and initialize it using `transformers.pipeline` for ease of use. ```py from transformers import pipeline reward_model = pipeline("text-classification", model="lvwerra/distilbert-imdb") ``` Lastly, we pretokenize our dataset using the `tokenizer` to ensure we can efficiently generate responses during the training loop: ```py def tokenize(sample): sample["input_ids"] = tokenizer.encode(sample["query"]) return sample dataset = dataset.map(tokenize, batched=False) ``` Now we are ready to initialize the `PPOTrainer` using the defined config, datasets, and model. ```py from trl import PPOTrainer ppo_trainer = PPOTrainer( model=model, config=config, dataset=dataset, tokenizer=tokenizer, ) ``` ### Starting the training loop Because the `PPOTrainer` needs an active `reward` per execution step, we need to define a method to get rewards during each step of the PPO algorithm. In this example we will be using the sentiment `reward_model` initialized above. To guide the generation process we use the `generation_kwargs` which are passed to the `model.generate` method for the SFT-model during each step. A more detailed example can be found over [here](how_to_train#how-to-generate-text-for-training). ```py generation_kwargs = { "min_length": -1, "top_k": 0.0, "top_p": 1.0, "do_sample": True, "pad_token_id": tokenizer.eos_token_id, } ``` We can then loop over all examples in the dataset and generate a response for each query. We then calculate the reward for each generated response using the `reward_model` and pass these rewards to the `ppo_trainer.step` method. The `ppo_trainer.step` method will then optimize the SFT model using the PPO algorithm. ```py from tqdm import tqdm for epoch in tqdm(range(ppo_trainer.config.ppo_epochs), "epoch: "): for batch in tqdm(ppo_trainer.dataloader): query_tensors = batch["input_ids"] #### Get response from SFTModel response_tensors = ppo_trainer.generate(query_tensors, **generation_kwargs) batch["response"] = [tokenizer.decode(r.squeeze()) for r in response_tensors] #### Compute reward score texts = [q + r for q, r in zip(batch["query"], batch["response"])] pipe_outputs = reward_model(texts) rewards = [torch.tensor(output[1]["score"]) for output in pipe_outputs] #### Run PPO step stats = ppo_trainer.step(query_tensors, response_tensors, rewards) ppo_trainer.log_stats(stats, batch, rewards) #### Save model ppo_trainer.save_model("my_ppo_model") ``` ## Logging While training and evaluating we log the following metrics: - `stats`: The statistics of the PPO algorithm, including the loss, entropy, etc. - `batch`: The batch of data used to train the SFT model. - `rewards`: The rewards obtained from the Reward model. ## PPOTrainer [[autodoc]] PPOTrainer [[autodoc]] PPOConfig
trl/docs/source/ppo_trainer.mdx/0
{ "file_path": "trl/docs/source/ppo_trainer.mdx", "repo_id": "trl", "token_count": 2435 }
429
import multiprocessing import sys from dataclasses import dataclass, field from typing import Optional from datasets import load_dataset from huggingface_hub import HfApi from huggingface_hub.repocard import RepoCard from transformers import HfArgumentParser """ # debug python -i examples/datasets/anthropic_hh.py --debug --push_to_hub # actual push python examples/datasets/anthropic_hh.py --push_to_hub --hf_entity trl-internal-testing """ api = HfApi() @dataclass class ScriptArguments: debug: Optional[bool] = field(default=False, metadata={"help": "Enable debug mode"}) hf_entity: Optional[str] = field(default=None, metadata={"help": "The Hugging Face entity to use"}) hf_repo_id: Optional[str] = field(default="hh-rlhf-trl-style", metadata={"help": "The Hugging Face repository ID"}) revision: Optional[str] = field(default="0.1.0", metadata={"help": "The revision of the repository"}) update_main_revision: Optional[bool] = field( default=True, metadata={"help": "Update the main revision of the repository"} ) push_to_hub: Optional[bool] = field(default=False, metadata={"help": "Push the dataset to the Hugging Face Hub"}) # GPT-4 generated 😄 Define a function to process the input and extract the dialogue into structured format def extract_dialogue(input_text): # Split the input by lines and initialize variables lines = input_text.strip().split("\n\n") dialogue_list = [] # Iterate through each line and extract the dialogue for line in lines: # Check if the line starts with "Human" or "Assistant" and split accordingly if line.startswith("Human:"): role = "user" content = line.replace("Human: ", "").strip() elif line.startswith("Assistant:"): role = "assistant" content = line.replace("Assistant: ", "").strip() else: # If the line doesn't start with "Human" or "Assistant", it's part of the previous message's content # Append it to the last message's content dialogue_list[-1]["content"] += "\n\n" + line.strip() continue # Append the extracted dialogue piece to the list dialogue_list.append({"role": role, "content": content}) return dialogue_list if __name__ == "__main__": args = HfArgumentParser(ScriptArguments).parse_args_into_dataclasses()[0] if args.hf_entity is None: args.hf_entity = api.whoami()["name"] full_repo_id = f"{args.hf_entity}/{args.hf_repo_id}" ds = load_dataset("Anthropic/hh-rlhf") if args.debug: for key in ds: ds[key] = ds[key].select(range(50)) def process(row): row["chosen"] = extract_dialogue(row["chosen"]) row["rejected"] = extract_dialogue(row["rejected"]) row["prompt"] = row["chosen"][0]["content"] return row ds = ds.map( process, num_proc=1 if args.debug else multiprocessing.cpu_count(), load_from_cache_file=False, ) if args.push_to_hub: revisions = ["main"] if args.update_main_revision else [] revisions.append(args.revision) # get the commnad used to run the script run_command = " ".join(["python"] + sys.argv) for revision in revisions: ds.push_to_hub(full_repo_id, revision=revision) repo_full_url = f"https://huggingface.co/datasets/{full_repo_id}/tree/{revision}" # get the name of the current file file_name = __file__.split("/")[-1] api.upload_file( path_or_fileobj=__file__, path_in_repo=file_name, revision=revision, repo_id=full_repo_id, repo_type="dataset", ) sft_card = RepoCard.load( full_repo_id, repo_type="dataset", ) sft_card.text = f"""\ # TRL's Anthropic HH Dataset We preprocess the dataset using our standard `prompt, chosen, rejected` format. ## Reproduce this dataset 1. Download the `{file_name}` from the {repo_full_url}. 2. Run `{run_command}` """ sft_card.push_to_hub( full_repo_id, repo_type="dataset", )
trl/examples/datasets/anthropic_hh.py/0
{ "file_path": "trl/examples/datasets/anthropic_hh.py", "repo_id": "trl", "token_count": 1772 }
430
# flake8: noqa # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ # regular: python examples/scripts/sft.py \ --model_name_or_path="facebook/opt-350m" \ --report_to="wandb" \ --learning_rate=1.41e-5 \ --per_device_train_batch_size=64 \ --gradient_accumulation_steps=16 \ --output_dir="sft_openassistant-guanaco" \ --logging_steps=1 \ --num_train_epochs=3 \ --max_steps=-1 \ --push_to_hub \ --gradient_checkpointing \ # peft: python examples/scripts/sft.py \ --model_name_or_path="facebook/opt-350m" \ --report_to="wandb" \ --learning_rate=1.41e-5 \ --per_device_train_batch_size=64 \ --gradient_accumulation_steps=16 \ --output_dir="sft_openassistant-guanaco" \ --logging_steps=1 \ --num_train_epochs=3 \ --max_steps=-1 \ --push_to_hub \ --gradient_checkpointing \ --use_peft \ --lora_r=64 \ --lora_alpha=16 """ import logging import os from contextlib import nullcontext TRL_USE_RICH = os.environ.get("TRL_USE_RICH", False) from trl.commands.cli_utils import init_zero_verbose, SftScriptArguments, TrlParser if TRL_USE_RICH: init_zero_verbose() FORMAT = "%(message)s" from rich.console import Console from rich.logging import RichHandler import torch from datasets import load_dataset from tqdm.rich import tqdm from transformers import AutoTokenizer, TrainingArguments from trl import ( ModelConfig, RichProgressCallback, SFTTrainer, get_peft_config, get_quantization_config, get_kbit_device_map, ) tqdm.pandas() if TRL_USE_RICH: logging.basicConfig(format=FORMAT, datefmt="[%X]", handlers=[RichHandler()], level=logging.INFO) if __name__ == "__main__": parser = TrlParser((SftScriptArguments, TrainingArguments, ModelConfig)) args, training_args, model_config = parser.parse_args_and_config() # Force use our print callback if TRL_USE_RICH: training_args.disable_tqdm = True console = Console() ################ # Model & Tokenizer ################ torch_dtype = ( model_config.torch_dtype if model_config.torch_dtype in ["auto", None] else getattr(torch, model_config.torch_dtype) ) quantization_config = get_quantization_config(model_config) model_kwargs = dict( revision=model_config.model_revision, trust_remote_code=model_config.trust_remote_code, attn_implementation=model_config.attn_implementation, torch_dtype=torch_dtype, use_cache=False if training_args.gradient_checkpointing else True, device_map=get_kbit_device_map() if quantization_config is not None else None, quantization_config=quantization_config, ) tokenizer = AutoTokenizer.from_pretrained(model_config.model_name_or_path, use_fast=True) tokenizer.pad_token = tokenizer.eos_token ################ # Dataset ################ raw_datasets = load_dataset(args.dataset_name) train_dataset = raw_datasets["train"] eval_dataset = raw_datasets["test"] ################ # Optional rich context managers ############### init_context = nullcontext() if not TRL_USE_RICH else console.status("[bold green]Initializing the SFTTrainer...") save_context = ( nullcontext() if not TRL_USE_RICH else console.status(f"[bold green]Training completed! Saving the model to {training_args.output_dir}") ) ################ # Training ################ with init_context: trainer = SFTTrainer( model=model_config.model_name_or_path, model_init_kwargs=model_kwargs, args=training_args, train_dataset=train_dataset, eval_dataset=eval_dataset, dataset_text_field=args.dataset_text_field, max_seq_length=args.max_seq_length, tokenizer=tokenizer, packing=args.packing, peft_config=get_peft_config(model_config), callbacks=[RichProgressCallback] if TRL_USE_RICH else None, ) trainer.train() with save_context: trainer.save_model(training_args.output_dir)
trl/examples/scripts/sft.py/0
{ "file_path": "trl/examples/scripts/sft.py", "repo_id": "trl", "token_count": 1903 }
431
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import torch from transformers import AutoTokenizer from trl import DataCollatorForCompletionOnlyLM class DataCollatorForCompletionOnlyLMTester(unittest.TestCase): def test_data_collator_finds_response_template_llama2_tokenizer(self): # this should ideally be tested with meta-llama/Llama-2-7b-hf self.tokenizer = AutoTokenizer.from_pretrained("trl-internal-testing/dummy-GPT2-correct-vocab") self.instruction = """### System: You are a helpful assistant. ### User: How much is 2+2? ### Assistant: 2+2 equals 4""" self.instruction_template = "\n### User:" self.response_template = "\n### Assistant:" # GPT2Tokenizer: [198, 21017, 11787, 25] -> [21017, 11787, 25] # Llama2Tokenizer: [29871, 13, 2277, 29937, 4911, 29901] -> [2277, 29937, 4911, 29901] # Note: If this test is ever switched to Llama2Tokenizer, this should be double checked, # and possibly switched back to [2:] instead of [1:]. # With GPT2Tokenizer, [1:] is correct - we want the 21017 token included, which is ###. self.tokenized_instruction_w_context = self.tokenizer.encode( self.instruction_template, add_special_tokens=False )[1:] # GPT2Tokenizer: [198, 21017, 15286, 25] -> [15286, 25] # Llama2Tokenizer: [29871, 13, 2277, 29937, 4007, 22137, 29901] -> [2277, 29937, 4007, 22137, 29901] self.tokenized_response_w_context = self.tokenizer.encode(self.response_template, add_special_tokens=False)[2:] # Plain check on string assert self.response_template in self.instruction self.tokenized_instruction = self.tokenizer.encode(self.instruction, add_special_tokens=False) # Test the fix for #598 # Pass already tokenized (w context) and truncated response_template so token_ids are like in the instruction + response self.collator = DataCollatorForCompletionOnlyLM(self.tokenized_response_w_context, tokenizer=self.tokenizer) self.collator.torch_call([self.tokenized_instruction]) # Test for PR #749 # Pass already tokenized (w context) instruction and response both so token_ids are like in the instruction + response self.collator = DataCollatorForCompletionOnlyLM( self.tokenized_response_w_context, self.tokenized_instruction_w_context, tokenizer=self.tokenizer ) self.collator.torch_call([self.tokenized_instruction]) # Test for PR #1185 # We pass in a string where the first user template is different than the rest. # Usually this would happen due to context-sensitive tokenization, but here we # explicitly change the template to test the fix. self.instruction = """## User: First instruction ### Assistant: First response ### User: Second instruction ### Assistant: Second response""" self.tokenized_instruction = self.tokenizer.encode(self.instruction, add_special_tokens=False) self.collator = DataCollatorForCompletionOnlyLM( self.tokenized_response_w_context, self.tokenized_instruction_w_context, tokenizer=self.tokenizer ) collator_output = self.collator.torch_call([self.tokenized_instruction]) collator_text = self.tokenizer.decode( collator_output["labels"][torch.where(collator_output["labels"] != -100)] ) expected_text = " First response\n\n Second response" "" assert collator_text == expected_text def test_data_collator_handling_of_long_sequences(self): self.tokenizer = AutoTokenizer.from_pretrained("trl-internal-testing/dummy-GPT2-correct-vocab") self.instruction = """### System: You are a helpful assistant. ### User: How much is 2+2? I'm asking because I'm not sure. And I'm not sure because I'm not good at math. """ self.response_template = "\n### Assistant:" # check DataCollatorForCompletionOnlyLM using response template only self.tokenized_instruction = self.tokenizer.encode(self.instruction, add_special_tokens=False) self.collator = DataCollatorForCompletionOnlyLM(self.response_template, tokenizer=self.tokenizer) encoded_instance = self.collator.torch_call([self.tokenized_instruction]) result = torch.all(encoded_instance["labels"] == -100) assert result, "Not all values in the tensor are -100." # check DataCollatorForCompletionOnlyLM using response template and instruction template self.instruction_template = "\n### User:" self.collator = DataCollatorForCompletionOnlyLM( self.response_template, self.instruction_template, tokenizer=self.tokenizer ) encoded_instance = self.collator.torch_call([self.tokenized_instruction]) result = torch.all(encoded_instance["labels"] == -100) assert result, "Not all values in the tensor are -100."
trl/tests/test_data_collator_completion_only.py/0
{ "file_path": "trl/tests/test_data_collator_completion_only.py", "repo_id": "trl", "token_count": 1973 }
432
# flake8: noqa __version__ = "0.8.2.dev0" from typing import TYPE_CHECKING from .import_utils import _LazyModule, is_diffusers_available, OptionalDependencyNotAvailable _import_structure = { "core": [ "set_seed", ], "environment": [ "TextEnvironment", "TextHistory", ], "extras": [ "BestOfNSampler", ], "import_utils": [ "is_bitsandbytes_available", "is_diffusers_available", "is_npu_available", "is_peft_available", "is_wandb_available", "is_xpu_available", ], "models": [ "AutoModelForCausalLMWithValueHead", "AutoModelForSeq2SeqLMWithValueHead", "PreTrainedModelWrapper", "create_reference_model", "setup_chat_format", "SUPPORTED_ARCHITECTURES", ], "trainer": [ "DataCollatorForCompletionOnlyLM", "DPOTrainer", "IterativeSFTTrainer", "KTOConfig", "KTOTrainer", "ModelConfig", "PPOConfig", "PPOTrainer", "RewardConfig", "RewardTrainer", "SFTTrainer", ], "commands": [], "commands.utils": ["SftArgumentParser", "init_zero_verbose", "TrlParser", "DpoArgumentParser"], "trainer.utils": ["get_kbit_device_map", "get_peft_config", "get_quantization_config", "RichProgressCallback"], "multitask_prompt_tuning": [ "MultitaskPromptEmbedding", "MultitaskPromptTuningConfig", "MultitaskPromptTuningInit", ], } try: if not is_diffusers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["models"].extend( [ "DDPOPipelineOutput", "DDPOSchedulerOutput", "DDPOStableDiffusionPipeline", "DefaultDDPOStableDiffusionPipeline", ] ) _import_structure["trainer"].extend(["DDPOConfig", "DDPOTrainer"]) if TYPE_CHECKING: from .core import set_seed from .environment import TextEnvironment, TextHistory from .extras import BestOfNSampler from .import_utils import ( is_bitsandbytes_available, is_diffusers_available, is_npu_available, is_peft_available, is_wandb_available, is_xpu_available, ) from .models import ( AutoModelForCausalLMWithValueHead, AutoModelForSeq2SeqLMWithValueHead, PreTrainedModelWrapper, create_reference_model, setup_chat_format, SUPPORTED_ARCHITECTURES, ) from .trainer import ( DataCollatorForCompletionOnlyLM, DPOTrainer, IterativeSFTTrainer, KTOConfig, KTOTrainer, ModelConfig, PPOConfig, PPOTrainer, RewardConfig, RewardTrainer, SFTTrainer, ) from .trainer.utils import get_kbit_device_map, get_peft_config, get_quantization_config, RichProgressCallback from .commands.utils import init_zero_verbose, SftScriptArguments, DpoScriptArguments, TrlParser try: if not is_diffusers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .models import ( DDPOPipelineOutput, DDPOSchedulerOutput, DDPOStableDiffusionPipeline, DefaultDDPOStableDiffusionPipeline, ) from .trainer import DDPOConfig, DDPOTrainer else: import sys sys.modules[__name__] = _LazyModule( __name__, globals()["__file__"], _import_structure, module_spec=__spec__, extra_objects={"__version__": __version__}, )
trl/trl/__init__.py/0
{ "file_path": "trl/trl/__init__.py", "repo_id": "trl", "token_count": 1736 }
433
from dataclasses import dataclass from typing import Literal, Optional, Tuple from transformers import PreTrainedModel, PreTrainedTokenizer from .modeling_value_head import AutoModelForCausalLMWithValueHead, AutoModelForSeq2SeqLMWithValueHead SUPPORTED_ARCHITECTURES = ( AutoModelForCausalLMWithValueHead, AutoModelForSeq2SeqLMWithValueHead, ) # TODO: Add Abstract Base Class if more formats are added @dataclass class ChatMlSpecialTokens: """Dataclass for special tokens used in ChatML, including system, user, assistant, bos, eos, and pad tokens.""" bos_token: str = "<|im_start|>" eos_token: str = "<|im_end|>" pad_token: str = "<|im_end|>" @property def system(self): return f"{self.bos_token}system" @property def user(self): return f"{self.bos_token}user" @property def assistant(self): return f"{self.bos_token}assistant" @property def chat_template(self): return ( "{% for message in messages %}" f"{{{{'{self.bos_token}' + message['role'] + '\n' + message['content'] + '{self.eos_token}' + '\n'}}}}" "{% endfor %}" "{% if add_generation_prompt %}" f"{{{{ '{self.assistant}\n' }}}}" "{% endif %}" ) FORMAT_MAPPING = {"chatml": ChatMlSpecialTokens} def setup_chat_format( model: PreTrainedModel, tokenizer: PreTrainedTokenizer, format: Optional[Literal["chatml"]] = "chatml", resize_to_multiple_of: Optional[int] = None, ) -> Tuple[PreTrainedModel, PreTrainedTokenizer]: """ Setup chat format by adding special tokens to the tokenizer, setting the correct format, and extending the embedding layer of the model based on the new special tokens. Args: model (`~transformers.PreTrainedModel`): The model to be modified. tokenizer (`~transformers.PreTrainedTokenizer`): The tokenizer to be modified. format (`Optional[Literal["chatml"]]`): The format to be set. Defaults to "chatml". resize_to_multiple_of (`Optional[int]`): Number to resize the embedding layer to. Defaults to None. Returns: model (`~transformers.PreTrainedModel`): The modified model. tokenizer (`~transformers.PreTrainedTokenizer`): The modified tokenizer. """ # check if format available and retrieve if format not in FORMAT_MAPPING: raise ValueError(f"Format {format} not available. Please use one of {FORMAT_MAPPING.keys()}") chat_format = FORMAT_MAPPING[format]() # set special tokens and them tokenizer.eos_token = chat_format.eos_token tokenizer.pad_token = chat_format.pad_token tokenizer.bos_token = chat_format.bos_token tokenizer.add_special_tokens({"additional_special_tokens": [chat_format.bos_token, chat_format.eos_token]}) # set chat format for tokenizer tokenizer.chat_template = chat_format.chat_template # resize embedding layer to a multiple of 64, https://x.com/karpathy/status/1621578354024677377 model.resize_token_embeddings( len(tokenizer), pad_to_multiple_of=resize_to_multiple_of if resize_to_multiple_of is not None else None ) # Make sure to update the generation config to use the new eos & bos token if getattr(model, "generation_config", None) is not None: model.generation_config.bos_token_id = tokenizer.bos_token_id model.generation_config.eos_token_id = tokenizer.eos_token_id model.generation_config.pad_token_id = tokenizer.pad_token_id return model, tokenizer
trl/trl/models/utils.py/0
{ "file_path": "trl/trl/models/utils.py", "repo_id": "trl", "token_count": 1328 }
434
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Execution process When working with distributed training systems, it is important to manage how and when processes are executed across GPUs. Some processes are completed faster than others, and some processes shouldn't begin if others haven't finished yet. Accelerate provides tools for orchestrating when processes are executed to ensure everything remains synchronized across all devices. This tutorial will teach you how to execute a process on only one machine and how to delay execution until all processes have reached a certain point. ## Execute on one process Certain code only needs to be run once on a given machine, such as printing a log statement or only displaying one progress bar on the local main process. <hfoptions id="local-execution"> <hfoption id="statements"> You should use `accelerator.is_local_main_process` to indicate code that should only be executed once. ```py from tqdm.auto import tqdm progress_bar = tqdm(range(args.max_train_steps), disable=not accelerator.is_local_main_process) ``` You could also wrap a statement with `accelerator.is_local_main_process`. > [!TIP] > For standalone `print` statements that aren't wrapped in `accelerator.is_local_main_process`, replace `print` with Accelerate's [`~Accelerator.print`] method to only print once per process. ```py if accelerator.is_local_main_process: print("Accelerate is the best") ``` </hfoption> <hfoption id="function"> For a function that should only be executed once, use [`~Accelerator.on_local_main_process`]. ```py @accelerator.on_local_main_process def do_my_thing(): "Something done once per server" do_thing_once_per_server() ``` </hfoption> </hfoptions> You could also direct Accelerate to execute code once across *all processes* regardless of the number of machines. This is useful if you're uploading a final model to the Hub. <hfoptions id="main-execution"> <hfoption id="statement"> You should use `accelerator.is_main_process` to indicate code that should only be executed once across all processes. ```py if accelerator.is_main_process: repo.push_to_hub() ``` </hfoption> <hfoption id="function"> For a function that should only be executed once across all processes, use [`~Accelerator.on_main_process`]. ```py @accelerator.on_main_process def do_my_thing(): "Something done once per server" do_thing_once() ``` </hfoption> </hfoptions> ## Execute on a specific process Accelerate can also help you execute functions that should only be executed on a specific process or a local process index. <hfoptions id="specific-execution"> <hfoption id="specific process"> Use the [`~Accelerator.on_process`] method and specify the process index to execute a function on. ```py @accelerator.on_process(process_index=0) def do_my_thing(): "Something done on process index 0" do_thing_on_index_zero() ``` </hfoption> <hfoption id="local process"> Use the [`~Accelerator.on_local_process`] method and specify the local process index to execute a function on. ```py @accelerator.on_local_process(local_process_idx=0) def do_my_thing(): "Something done on process index 0 on each server" do_thing_on_index_zero_on_each_server() ``` </hfoption> </hfoptions> ## Defer execution When you run your script on several GPUs at the same time, some code may be executed faster than others. You might need to wait for all processes to reach a certain point before executing the next set of instructions. For instance, you shouldn’t save a model before making sure every process is done with training. To do this, add [`~Accelerator.wait_for_everyone`] in your code. This blocks all processes that have finished first from continuing until all remaining processes have reached the same point (this has no effect if you're running on a single GPU or CPU). ```py accelerator.wait_for_everyone() ```
accelerate/docs/source/basic_tutorials/execution.md/0
{ "file_path": "accelerate/docs/source/basic_tutorials/execution.md", "repo_id": "accelerate", "token_count": 1307 }
0
<!--Copyright 2021 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contains specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Quicktour There are many ways to launch and run your code depending on your training environment ([torchrun](https://pytorch.org/docs/stable/elastic/run.html), [DeepSpeed](https://www.deepspeed.ai/), etc.) and available hardware. Accelerate offers a unified interface for launching and training on different distributed setups, allowing you to focus on your PyTorch training code instead of the intricacies of adapting your code to these different setups. This allows you to easily scale your PyTorch code for training and inference on distributed setups with hardware like GPUs and TPUs. Accelerate also provides Big Model Inference to make loading and running inference with really large models that usually don't fit in memory more accessible. This quicktour introduces the three main features of Accelerate: * a unified command line launching interface for distributed training scripts * a training library for adapting PyTorch training code to run on different distributed setups * Big Model Inference ## Unified launch interface Accelerate automatically selects the appropriate configuration values for any given distributed training framework (DeepSpeed, FSDP, etc.) through a unified configuration file generated from the [`accelerate config`](../../docs/source/package_reference/cli#accelerate-config) command. You could also pass the configuration values explicitly to the command line which is helpful in certain situations like if you're using SLURM. But in most cases, you should always run [`accelerate config`](../../docs/source/package_reference/cli#accelerate-config) first to help Accelerate learn about your training setup. ```bash accelerate config ``` The [`accelerate config`](../../docs/source/package_reference/cli#accelerate-config) command creates and saves a default_config.yaml file in Accelerates cache folder. This file stores the configuration for your training environment, which helps Accelerate correctly launch your training script based on your machine. After you've configured your environment, you can test your setup with [`accelerate test`](../../docs/source/package_reference/cli#accelerate-test), which launches a short script to test the distributed environment. ```bash accelerate test ``` > [!TIP] > Add `--config_file` to the `accelerate test` or `accelerate launch` command to specify the location of the configuration file if it is saved in a non-default location like the cache. Once your environment is setup, launch your training script with [`accelerate launch`](../../docs/source/package_reference/cli#accelerate-launch)! ```bash accelerate launch path_to_script.py --args_for_the_script ``` To learn more, check out the [Launch distributed code](basic_tutorials/launch) tutorial for more information about launching your scripts. ## Adapt training code The next main feature of Accelerate is the [`Accelerator`] class which adapts your PyTorch code to run on different distributed setups. You only need to add a few lines of code to your training script to enable it to run on multiple GPUs or TPUs. ```diff + from accelerate import Accelerator + accelerator = Accelerator() + device = accelerator.device + model, optimizer, training_dataloader, scheduler = accelerator.prepare( + model, optimizer, training_dataloader, scheduler + ) for batch in training_dataloader: optimizer.zero_grad() inputs, targets = batch - inputs = inputs.to(device) - targets = targets.to(device) outputs = model(inputs) loss = loss_function(outputs, targets) + accelerator.backward(loss) optimizer.step() scheduler.step() ``` 1. Import and instantiate the [`Accelerator`] class at the beginning of your training script. The [`Accelerator`] class initializes everything necessary for distributed training, and it automatically detects your training environment (a single machine with a GPU, a machine with several GPUs, several machines with multiple GPUs or a TPU, etc.) based on how the code was launched. ```python from accelerate import Accelerator accelerator = Accelerator() ``` 2. Remove calls like `.cuda()` on your model and input data. The [`Accelerator`] class automatically places these objects on the appropriate device for you. > [!WARNING] > This step is *optional* but it is considered best practice to allow Accelerate to handle device placement. You could also deactivate automatic device placement by passing `device_placement=False` when initializing the [`Accelerator`]. If you want to explicitly place objects on a device with `.to(device)`, make sure you use `accelerator.device` instead. For example, if you create an optimizer before placing a model on `accelerator.device`, training fails on a TPU. ```py device = accelerator.device ``` 3. Pass all relevant PyTorch objects for training (optimizer, model, dataloader(s), learning rate scheduler) to the [`~Accelerator.prepare`] method as soon as they're created. This method wraps the model in a container optimized for your distributed setup, uses Accelerates version of the optimizer and scheduler, and creates a sharded version of your dataloader for distribution across GPUs or TPUs. ```python model, optimizer, train_dataloader, lr_scheduler = accelerator.prepare( model, optimizer, train_dataloader, lr_scheduler ) ``` 4. Replace `loss.backward()` with [`~Accelerator.backward`] to use the correct `backward()` method for your training setup. ```py accelerator.backward(loss) ``` Read [Accelerate’s internal mechanisms](../../docs/source/concept_guides/internal_mechanism) guide to learn more details about how Accelerate adapts your code. ### Distributed evaluation To perform distributed evaluation, pass your validation dataloader to the [`~Accelerator.prepare`] method: ```python validation_dataloader = accelerator.prepare(validation_dataloader) ``` Each device in your distributed setup only receives a part of the evaluation data, which means you should group your predictions together with the [`~Accelerator.gather_for_metrics`] method. This method requires all tensors to be the same size on each process, so if your tensors have different sizes on each process (for instance when dynamically padding to the maximum length in a batch), you should use the [`~Accelerator.pad_across_processes`] method to pad you tensor to the largest size across processes. ```python for inputs, targets in validation_dataloader: predictions = model(inputs) # Gather all predictions and targets all_predictions, all_targets = accelerator.gather_for_metrics((predictions, targets)) # Example of use with a *Datasets.Metric* metric.add_batch(all_predictions, all_targets) ``` > [!TIP] > Data at the end of a dataset may be duplicated so the batch can be equally divided among all workers. The [`~Accelerator.gather_for_metrics`] method automatically removes the duplicated data to calculate a more accurate metric. ## Big Model Inference Accelerate's Big Model Inference has two main features, [`~accelerate.init_empty_weights`] and [`~accelerate.load_checkpoint_and_dispatch`], to load large models for inference that typically don't fit into memory. > [!TIP] > Take a look at the [Handling big models for inference](../../docs/source/concept_guides/big_model_inference) guide for a better understanding of how Big Model Inference works under the hood. ### Empty weights initialization The [`~accelerate.init_empty_weights`] context manager initializes models of any size by creating a *model skeleton* and moving and placing parameters each time they're created to PyTorch's [**meta**](https://pytorch.org/docs/main/meta.html) device. This way, not all weights are immediately loaded and only a small part of the model is loaded into memory at a time. For example, loading an empty [Mixtral-8x7B](https://huggingface.co/mistralai/Mixtral-8x7B-Instruct-v0.1) model takes significantly less memory than fully loading the models and weights on the CPU. ```py from accelerate import init_empty_weights from transformers import AutoConfig, AutoModelForCausalLM config = AutoConfig.from_pretrained("mistralai/Mixtral-8x7B-Instruct-v0.1") with init_empty_weights(): model = AutoModelForCausalLM.from_config(config) ``` ### Load and dispatch weights The [`~accelerate.load_checkpoint_and_dispatch`] function loads full or sharded checkpoints into the empty model, and automatically distribute weights across all available devices. The `device_map` parameter determines where to place each model layer, and specifiying `"auto"` places them on the GPU first, then the CPU, and finally the hard drive as memory-mapped tensors if there's still not enough memory. Use the `no_split_module_classes` parameter to indicate which modules shouldn't be split across devices (typically those with a residual connection). ```py from accelerate import load_checkpoint_and_dispatch model = load_checkpoint_and_dispatch( model, checkpoint="mistralai/Mixtral-8x7B-Instruct-v0.1", device_map="auto", no_split_module_classes=['Block'] ) ``` ## Next steps Now that you've been introduced to the main Accelerate features, your next steps could include: * Check out the [tutorials](docs/source/basic_tutorials/overview) for a gentle walkthrough of Accelerate. This is especially useful if you're new to distributed training and the library. * Dive into the [guides](docs/source/usage_guides/explore) to see how to use Accelerate for specific use-cases. * Deepen your conceptual understanding of how Accelerate works internally by reading the [concept guides](docs/source/concept_guides/internal_mechanism). * Look up classes and commands in the [API reference](docs/source/package_reference/accelerator) to see what parameters and options are available.
accelerate/docs/source/quicktour.md/0
{ "file_path": "accelerate/docs/source/quicktour.md", "repo_id": "accelerate", "token_count": 2783 }
1
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Tracking There are a large number of experiment tracking API's available, however getting them all to work with in a multi-processing environment can oftentimes be complex. 🤗 Accelerate provides a general tracking API that can be used to log useful items during your script through [`Accelerator.log`] ## Integrated Trackers Currently `Accelerate` supports seven trackers out-of-the-box: - TensorBoard - WandB - CometML - Aim - MLFlow - ClearML - DVCLive To use any of them, pass in the selected type(s) to the `log_with` parameter in [`Accelerate`]: ```python from accelerate import Accelerator from accelerate.utils import LoggerType accelerator = Accelerator(log_with="all") # For all available trackers in the environment accelerator = Accelerator(log_with="wandb") accelerator = Accelerator(log_with=["wandb", LoggerType.TENSORBOARD]) ``` At the start of your experiment [`Accelerator.init_trackers`] should be used to setup your project, and potentially add any experiment hyperparameters to be logged: ```python hps = {"num_iterations": 5, "learning_rate": 1e-2} accelerator.init_trackers("my_project", config=hps) ``` When you are ready to log any data, [`Accelerator.log`] should be used. A `step` can also be passed in to correlate the data with a particular step in the training loop. ```python accelerator.log({"train_loss": 1.12, "valid_loss": 0.8}, step=1) ``` Once you've finished training, make sure to run [`Accelerator.end_training`] so that all the trackers can run their finish functionalities if they have any. ```python accelerator.end_training() ``` A full example is below: ```python from accelerate import Accelerator accelerator = Accelerator(log_with="all") config = { "num_iterations": 5, "learning_rate": 1e-2, "loss_function": str(my_loss_function), } accelerator.init_trackers("example_project", config=config) my_model, my_optimizer, my_training_dataloader = accelerate.prepare(my_model, my_optimizer, my_training_dataloader) device = accelerator.device my_model.to(device) for iteration in config["num_iterations"]: for step, batch in my_training_dataloader: my_optimizer.zero_grad() inputs, targets = batch inputs = inputs.to(device) targets = targets.to(device) outputs = my_model(inputs) loss = my_loss_function(outputs, targets) accelerator.backward(loss) my_optimizer.step() accelerator.log({"training_loss": loss}, step=step) accelerator.end_training() ``` If a tracker requires a directory to save data to, such as `TensorBoard`, then pass the directory path to `project_dir`. The `project_dir` parameter is useful when there are other configurations to be combined with in the [`~utils.ProjectConfiguration`] data class. For example, you can save the TensorBoard data to `project_dir` and everything else can be logged in the `logging_dir` parameter of [`~utils.ProjectConfiguration`: ```python accelerator = Accelerator(log_with="tensorboard", project_dir=".") # use with ProjectConfiguration config = ProjectConfiguration(project_dir=".", logging_dir="another/directory") accelerator = Accelerator(log_with="tensorboard", project_config=config) ``` ## Implementing Custom Trackers To implement a new tracker to be used in `Accelerator`, a new one can be made through implementing the [`GeneralTracker`] class. Every tracker must implement three functions and have three properties: - `__init__`: - Should store a `run_name` and initialize the tracker API of the integrated library. - If a tracker stores their data locally (such as TensorBoard), a `logging_dir` parameter can be added. - `store_init_configuration`: - Should take in a `values` dictionary and store them as a one-time experiment configuration - `log`: - Should take in a `values` dictionary and a `step`, and should log them to the run - `name` (`str`): - A unique string name for the tracker, such as `"wandb"` for the wandb tracker. - This will be used for interacting with this tracker specifically - `requires_logging_directory` (`bool`): - Whether a `logging_dir` is needed for this particular tracker and if it uses one. - `tracker`: - This should be implemented as a `@property` function - Should return the internal tracking mechanism the library uses, such as the `run` object for `wandb`. Each method should also utilize the [`state.PartialState`] class if the logger should only be executed on the main process for instance. A brief example can be seen below with an integration with Weights and Biases, containing only the relevant information and logging just on the main process: ```python from accelerate.tracking import GeneralTracker, on_main_process from typing import Optional import wandb class MyCustomTracker(GeneralTracker): name = "wandb" requires_logging_directory = False @on_main_process def __init__(self, run_name: str): self.run_name = run_name run = wandb.init(self.run_name) @property def tracker(self): return self.run.run @on_main_process def store_init_configuration(self, values: dict): wandb.config(values) @on_main_process def log(self, values: dict, step: Optional[int] = None): wandb.log(values, step=step) ``` When you are ready to build your `Accelerator` object, pass in an **instance** of your tracker to [`Accelerator.log_with`] to have it automatically be used with the API: ```python tracker = MyCustomTracker("some_run_name") accelerator = Accelerator(log_with=tracker) ``` These also can be mixed with existing trackers, including with `"all"`: ```python tracker = MyCustomTracker("some_run_name") accelerator = Accelerator(log_with=[tracker, "all"]) ``` ## Accessing the internal tracker If some custom interactions with a tracker might be wanted directly, you can quickly access one using the [`Accelerator.get_tracker`] method. Just pass in the string corresponding to a tracker's `.name` attribute and it will return that tracker on the main process. This example shows doing so with wandb: ```python wandb_tracker = accelerator.get_tracker("wandb") ``` From there you can interact with `wandb`'s `run` object like normal: ```python wandb_run.log_artifact(some_artifact_to_log) ``` <Tip> Trackers built in Accelerate will automatically execute on the correct process, so if a tracker is only meant to be ran on the main process it will do so automatically. </Tip> If you want to truly remove Accelerate's wrapping entirely, you can achieve the same outcome with: ```python wandb_tracker = accelerator.get_tracker("wandb", unwrap=True) with accelerator.on_main_process: wandb_tracker.log_artifact(some_artifact_to_log) ``` ## When a wrapper cannot work If a library has an API that does not follow a strict `.log` with an overall dictionary such as Neptune.AI, logging can be done manually under an `if accelerator.is_main_process` statement: ```diff from accelerate import Accelerator + import neptune.new as neptune accelerator = Accelerator() + run = neptune.init(...) my_model, my_optimizer, my_training_dataloader = accelerate.prepare(my_model, my_optimizer, my_training_dataloader) device = accelerator.device my_model.to(device) for iteration in config["num_iterations"]: for batch in my_training_dataloader: my_optimizer.zero_grad() inputs, targets = batch inputs = inputs.to(device) targets = targets.to(device) outputs = my_model(inputs) loss = my_loss_function(outputs, targets) total_loss += loss accelerator.backward(loss) my_optimizer.step() + if accelerator.is_main_process: + run["logs/training/batch/loss"].log(loss) ```
accelerate/docs/source/usage_guides/tracking.md/0
{ "file_path": "accelerate/docs/source/usage_guides/tracking.md", "repo_id": "accelerate", "token_count": 2703 }
2
# Copyright 2021 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os import re import numpy as np import PIL import torch from timm import create_model from torch.optim.lr_scheduler import OneCycleLR from torch.utils.data import DataLoader, Dataset from torchvision.transforms import Compose, RandomResizedCrop, Resize, ToTensor from accelerate import Accelerator ######################################################################## # This is a fully working simple example to use Accelerate # # This example trains a ResNet50 on the Oxford-IIT Pet Dataset # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## # Function to get the label from the filename def extract_label(fname): stem = fname.split(os.path.sep)[-1] return re.search(r"^(.*)_\d+\.jpg$", stem).groups()[0] class PetsDataset(Dataset): def __init__(self, file_names, image_transform=None, label_to_id=None): self.file_names = file_names self.image_transform = image_transform self.label_to_id = label_to_id def __len__(self): return len(self.file_names) def __getitem__(self, idx): fname = self.file_names[idx] raw_image = PIL.Image.open(fname) image = raw_image.convert("RGB") if self.image_transform is not None: image = self.image_transform(image) label = extract_label(fname) if self.label_to_id is not None: label = self.label_to_id[label] return {"image": image, "label": label} def training_function(config, args): # Initialize accelerator if args.with_tracking: accelerator = Accelerator( cpu=args.cpu, mixed_precision=args.mixed_precision, log_with="all", project_dir=args.project_dir ) else: accelerator = Accelerator(cpu=args.cpu, mixed_precision=args.mixed_precision) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs lr = config["lr"] num_epochs = int(config["num_epochs"]) seed = int(config["seed"]) batch_size = int(config["batch_size"]) image_size = config["image_size"] if not isinstance(image_size, (list, tuple)): image_size = (image_size, image_size) # Parse out whether we are saving every epoch or after a certain number of batches if hasattr(args.checkpointing_steps, "isdigit"): if args.checkpointing_steps == "epoch": checkpointing_steps = args.checkpointing_steps elif args.checkpointing_steps.isdigit(): checkpointing_steps = int(args.checkpointing_steps) else: raise ValueError( f"Argument `checkpointing_steps` must be either a number or `epoch`. `{args.checkpointing_steps}` passed." ) else: checkpointing_steps = None # We need to initialize the trackers we use, and also store our configuration if args.with_tracking: run = os.path.split(__file__)[-1].split(".")[0] accelerator.init_trackers(run, config) # Grab all the image filenames file_names = [os.path.join(args.data_dir, fname) for fname in os.listdir(args.data_dir) if fname.endswith(".jpg")] # Build the label correspondences all_labels = [extract_label(fname) for fname in file_names] id_to_label = list(set(all_labels)) id_to_label.sort() label_to_id = {lbl: i for i, lbl in enumerate(id_to_label)} # Set the seed before splitting the data. np.random.seed(seed) torch.manual_seed(seed) torch.cuda.manual_seed_all(seed) # Split our filenames between train and validation random_perm = np.random.permutation(len(file_names)) cut = int(0.8 * len(file_names)) train_split = random_perm[:cut] eval_split = random_perm[cut:] # For training we use a simple RandomResizedCrop train_tfm = Compose([RandomResizedCrop(image_size, scale=(0.5, 1.0)), ToTensor()]) train_dataset = PetsDataset( [file_names[i] for i in train_split], image_transform=train_tfm, label_to_id=label_to_id ) # For evaluation, we use a deterministic Resize eval_tfm = Compose([Resize(image_size), ToTensor()]) eval_dataset = PetsDataset([file_names[i] for i in eval_split], image_transform=eval_tfm, label_to_id=label_to_id) # Instantiate dataloaders. train_dataloader = DataLoader(train_dataset, shuffle=True, batch_size=batch_size, num_workers=4) eval_dataloader = DataLoader(eval_dataset, shuffle=False, batch_size=batch_size, num_workers=4) # Instantiate the model (we build the model here so that the seed also control new weights initialization) model = create_model("resnet50d", pretrained=True, num_classes=len(label_to_id)) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). model = model.to(accelerator.device) # Freezing the base model for param in model.parameters(): param.requires_grad = False for param in model.get_classifier().parameters(): param.requires_grad = True # We normalize the batches of images to be a bit faster. mean = torch.tensor(model.default_cfg["mean"])[None, :, None, None].to(accelerator.device) std = torch.tensor(model.default_cfg["std"])[None, :, None, None].to(accelerator.device) # Instantiate optimizer optimizer = torch.optim.Adam(params=model.parameters(), lr=lr / 25) # Instantiate learning rate scheduler lr_scheduler = OneCycleLR(optimizer=optimizer, max_lr=lr, epochs=num_epochs, steps_per_epoch=len(train_dataloader)) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare( model, optimizer, train_dataloader, eval_dataloader, lr_scheduler ) # We need to keep track of how many total steps we have iterated over overall_step = 0 # We also need to keep track of the starting epoch so files are named properly starting_epoch = 0 # Potentially load in the weights and states from a previous save if args.resume_from_checkpoint: if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "": accelerator.print(f"Resumed from checkpoint: {args.resume_from_checkpoint}") accelerator.load_state(args.resume_from_checkpoint) path = os.path.basename(args.resume_from_checkpoint) else: # Get the most recent checkpoint dirs = [f.name for f in os.scandir(os.getcwd()) if f.is_dir()] dirs.sort(key=os.path.getctime) path = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last # Extract `epoch_{i}` or `step_{i}` training_difference = os.path.splitext(path)[0] if "epoch" in training_difference: starting_epoch = int(training_difference.replace("epoch_", "")) + 1 resume_step = None else: resume_step = int(training_difference.replace("step_", "")) starting_epoch = resume_step // len(train_dataloader) resume_step -= starting_epoch * len(train_dataloader) # Now we train the model for epoch in range(starting_epoch, num_epochs): model.train() if args.with_tracking: total_loss = 0 if args.resume_from_checkpoint and epoch == starting_epoch and resume_step is not None: # We need to skip steps until we reach the resumed step active_dataloader = accelerator.skip_first_batches(train_dataloader, resume_step) overall_step += resume_step else: # After the first iteration though, we need to go back to the original dataloader active_dataloader = train_dataloader for batch in active_dataloader: # We could avoid this line since we set the accelerator with `device_placement=True`. batch = {k: v.to(accelerator.device) for k, v in batch.items()} inputs = (batch["image"] - mean) / std outputs = model(inputs) loss = torch.nn.functional.cross_entropy(outputs, batch["label"]) # We keep track of the loss at each epoch if args.with_tracking: total_loss += loss.detach().float() accelerator.backward(loss) optimizer.step() lr_scheduler.step() optimizer.zero_grad() overall_step += 1 if isinstance(checkpointing_steps, int): output_dir = f"step_{overall_step}" if overall_step % checkpointing_steps == 0: if args.output_dir is not None: output_dir = os.path.join(args.output_dir, output_dir) accelerator.save_state(output_dir) model.eval() accurate = 0 num_elems = 0 for step, batch in enumerate(eval_dataloader): # We could avoid this line since we set the accelerator with `device_placement=True`. batch = {k: v.to(accelerator.device) for k, v in batch.items()} inputs = (batch["image"] - mean) / std with torch.no_grad(): outputs = model(inputs) predictions = outputs.argmax(dim=-1) predictions, references = accelerator.gather_for_metrics((predictions, batch["label"])) accurate_preds = predictions == references num_elems += accurate_preds.shape[0] accurate += accurate_preds.long().sum() eval_metric = accurate.item() / num_elems # Use accelerator.print to print only on the main process. accelerator.print(f"epoch {epoch}: {100 * eval_metric:.2f}") if args.with_tracking: accelerator.log( { "accuracy": 100 * eval_metric, "train_loss": total_loss.item() / len(train_dataloader), "epoch": epoch, }, step=overall_step, ) if checkpointing_steps == "epoch": output_dir = f"epoch_{epoch}" if args.output_dir is not None: output_dir = os.path.join(args.output_dir, output_dir) accelerator.save_state(output_dir) if args.with_tracking: accelerator.end_training() def main(): parser = argparse.ArgumentParser(description="Simple example of training script.") parser.add_argument("--data_dir", required=True, help="The data folder on disk.") parser.add_argument("--fp16", action="store_true", help="If passed, will use FP16 training.") parser.add_argument( "--mixed_precision", type=str, default=None, choices=["no", "fp16", "bf16", "fp8"], help="Whether to use mixed precision. Choose" "between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10." "and an Nvidia Ampere GPU.", ) parser.add_argument("--cpu", action="store_true", help="If passed, will train on the CPU.") parser.add_argument( "--checkpointing_steps", type=str, default=None, help="Whether the various states should be saved at the end of every n steps, or 'epoch' for each epoch.", ) parser.add_argument( "--output_dir", type=str, default=".", help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory.", ) parser.add_argument( "--resume_from_checkpoint", type=str, default=None, help="If the training should continue from a checkpoint folder.", ) parser.add_argument( "--with_tracking", action="store_true", help="Whether to load in all available experiment trackers from the environment and use them for logging.", ) parser.add_argument( "--project_dir", type=str, default="logs", help="Location on where to store experiment tracking logs` and relevent project information", ) args = parser.parse_args() config = {"lr": 3e-2, "num_epochs": 3, "seed": 42, "batch_size": 64, "image_size": 224} training_function(config, args) if __name__ == "__main__": main()
accelerate/examples/complete_cv_example.py/0
{ "file_path": "accelerate/examples/complete_cv_example.py", "repo_id": "accelerate", "token_count": 5356 }
3
# Copyright 2022 The HuggingFace Team and Brian Chao. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Main driver for the selection menu, based on https://github.com/bchao1/bullet """ import builtins import sys from ...utils.imports import _is_package_available from . import cursor, input from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor from .keymap import KEYMAP in_colab = False try: in_colab = _is_package_available("google.colab") except ModuleNotFoundError: pass @input.register class BulletMenu: """ A CLI menu to select a choice from a list of choices using the keyboard. """ def __init__(self, prompt: str = None, choices: list = []): self.position = 0 self.choices = choices self.prompt = prompt if sys.platform == "win32": self.arrow_char = "*" else: self.arrow_char = "➔ " def write_choice(self, index, end: str = ""): if sys.platform != "win32": writeColor(self.choices[index], 32, end) else: forceWrite(self.choices[index], end) def print_choice(self, index: int): "Prints the choice at the given index" if index == self.position: forceWrite(f" {self.arrow_char} ") self.write_choice(index) else: forceWrite(f" {self.choices[index]}") reset_cursor() def move_direction(self, direction: Direction, num_spaces: int = 1): "Should not be directly called, used to move a direction of either up or down" old_position = self.position if direction == Direction.DOWN: if self.position + 1 >= len(self.choices): return self.position += num_spaces else: if self.position - 1 < 0: return self.position -= num_spaces clear_line() self.print_choice(old_position) move_cursor(num_spaces, direction.name) self.print_choice(self.position) @input.mark(KEYMAP["up"]) def move_up(self): self.move_direction(Direction.UP) @input.mark(KEYMAP["down"]) def move_down(self): self.move_direction(Direction.DOWN) @input.mark(KEYMAP["newline"]) def select(self): move_cursor(len(self.choices) - self.position, "DOWN") return self.position @input.mark(KEYMAP["interrupt"]) def interrupt(self): move_cursor(len(self.choices) - self.position, "DOWN") raise KeyboardInterrupt @input.mark_multiple(*[KEYMAP[str(number)] for number in range(10)]) def select_row(self): index = int(chr(self.current_selection)) movement = index - self.position if index == self.position: return if index < len(self.choices): if self.position > index: self.move_direction(Direction.UP, -movement) elif self.position < index: self.move_direction(Direction.DOWN, movement) else: return else: return def run(self, default_choice: int = 0): "Start the menu and return the selected choice" if self.prompt: linebreak() forceWrite(self.prompt, "\n") if in_colab: forceWrite("Please input a choice index (starting from 0), and press enter", "\n") else: forceWrite("Please select a choice using the arrow or number keys, and selecting with enter", "\n") self.position = default_choice for i in range(len(self.choices)): self.print_choice(i) forceWrite("\n") move_cursor(len(self.choices) - self.position, "UP") with cursor.hide(): while True: if in_colab: try: choice = int(builtins.input()) except ValueError: choice = default_choice else: choice = self.handle_input() if choice is not None: reset_cursor() for _ in range(len(self.choices) + 1): move_cursor(1, "UP") clear_line() self.write_choice(choice, "\n") return choice
accelerate/src/accelerate/commands/menu/selection_menu.py/0
{ "file_path": "accelerate/src/accelerate/commands/menu/selection_menu.py", "repo_id": "accelerate", "token_count": 2187 }
4
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from .constants import ( MODEL_NAME, OPTIMIZER_NAME, RNG_STATE_NAME, SAFE_MODEL_NAME, SAFE_WEIGHTS_INDEX_NAME, SAFE_WEIGHTS_NAME, SAMPLER_NAME, SCALER_NAME, SCHEDULER_NAME, TORCH_DISTRIBUTED_OPERATION_TYPES, TORCH_LAUNCH_PARAMS, WEIGHTS_INDEX_NAME, WEIGHTS_NAME, ) from .dataclasses import ( AutocastKwargs, BnbQuantizationConfig, ComputeEnvironment, CustomDtype, DataLoaderConfiguration, DeepSpeedPlugin, DistributedDataParallelKwargs, DistributedType, DynamoBackend, FP8RecipeKwargs, FullyShardedDataParallelPlugin, GradientAccumulationPlugin, GradScalerKwargs, InitProcessGroupKwargs, KwargsHandler, LoggerType, MegatronLMPlugin, PrecisionType, ProjectConfiguration, RNGType, SageMakerDistributedType, TensorInformation, TorchDynamoPlugin, ) from .environment import ( are_libraries_initialized, check_cuda_p2p_ib_support, check_fp8_capability, convert_dict_to_env_variables, get_int_from_env, parse_choice_from_env, parse_flag_from_env, str_to_bool, ) from .imports import ( get_ccl_version, is_4bit_bnb_available, is_8bit_bnb_available, is_aim_available, is_bf16_available, is_bnb_available, is_boto3_available, is_ccl_available, is_clearml_available, is_comet_ml_available, is_cuda_available, is_datasets_available, is_deepspeed_available, is_dvclive_available, is_fp8_available, is_ipex_available, is_megatron_lm_available, is_mlflow_available, is_mlu_available, is_mps_available, is_msamp_available, is_npu_available, is_pandas_available, is_peft_available, is_pippy_available, is_pynvml_available, is_rich_available, is_sagemaker_available, is_tensorboard_available, is_timm_available, is_torch_xla_available, is_transformer_engine_available, is_transformers_available, is_wandb_available, is_xpu_available, ) from .modeling import ( calculate_maximum_sizes, check_device_map, check_tied_parameters_in_config, check_tied_parameters_on_same_device, compute_module_sizes, convert_file_size_to_int, dtype_byte_size, find_tied_parameters, get_balanced_memory, get_max_layer_size, get_max_memory, get_mixed_precision_context_manager, id_tensor_storage, infer_auto_device_map, is_peft_model, load_checkpoint_in_model, load_offloaded_weights, load_state_dict, named_module_tensors, retie_parameters, set_module_tensor_to_device, shard_checkpoint, ) from .offload import ( OffloadedWeightsLoader, PrefixedDataset, extract_submodules_state_dict, load_offloaded_weight, offload_state_dict, offload_weight, save_offload_index, ) from .operations import ( CannotPadNestedTensorWarning, broadcast, broadcast_object_list, concatenate, convert_outputs_to_fp32, convert_to_fp32, copy_tensor_to_devices, find_batch_size, find_device, gather, gather_object, get_data_structure, honor_type, ignorant_find_batch_size, initialize_tensors, is_namedtuple, is_tensor_information, is_torch_tensor, listify, pad_across_processes, pad_input_tensors, recursively_apply, reduce, send_to_device, slice_tensors, ) from .versions import compare_versions, is_torch_version if is_deepspeed_available(): from .deepspeed import ( DeepSpeedEngineWrapper, DeepSpeedOptimizerWrapper, DeepSpeedSchedulerWrapper, DummyOptim, DummyScheduler, HfDeepSpeedConfig, ) from .bnb import has_4bit_bnb_layers, load_and_quantize_model from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer from .launch import ( PrepareForLaunch, _filter_args, prepare_deepspeed_cmd_env, prepare_multi_gpu_env, prepare_sagemager_args_inputs, prepare_simple_launcher_cmd_env, prepare_tpu, ) from .megatron_lm import ( AbstractTrainStep, BertTrainStep, GPTTrainStep, MegatronEngine, MegatronLMDummyDataLoader, MegatronLMDummyScheduler, MegatronLMOptimizerWrapper, MegatronLMSchedulerWrapper, T5TrainStep, avg_losses_across_data_parallel_group, gather_across_data_parallel_groups, ) from .megatron_lm import initialize as megatron_lm_initialize from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader from .megatron_lm import prepare_model as megatron_lm_prepare_model from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler from .memory import find_executable_batch_size, release_memory from .other import ( check_os_kernel, clean_state_dict_for_safetensors, clear_environment, convert_bytes, extract_model_from_parallel, get_pretty_name, is_port_in_use, merge_dicts, patch_environment, recursive_getattr, save, wait_for_everyone, write_basic_config, ) from .random import set_seed, synchronize_rng_state, synchronize_rng_states from .torch_xla import install_xla from .tqdm import tqdm from .transformer_engine import convert_model, has_transformer_engine_layers
accelerate/src/accelerate/utils/__init__.py/0
{ "file_path": "accelerate/src/accelerate/utils/__init__.py", "repo_id": "accelerate", "token_count": 2447 }
5
# Welcome to the RLHF Handbook! Stay tuned for more details 🤗
alignment-handbook/chapters/en/chapter0/introduction.mdx/0
{ "file_path": "alignment-handbook/chapters/en/chapter0/introduction.mdx", "repo_id": "alignment-handbook", "token_count": 18 }
6
# Instructions to StarChat2 Similar to how we trained Zephyr 7B Beta in our [technical report](https://huggingface.co/papers/2310.16944), training this model proceeds in two steps: 1. Apply SFT to fine-tune [StarCoder2 15B](https://huggingface.co/bigcode/starcoder2-15b) on a blend of chat, code, and math datastets. The result is an SFT model like [`starchat2-15b-sft-v0.1`](https://huggingface.co/HuggingFaceH4/starchat2-15b-sft-v0.1). 2. Align the SFT model to AI feedback via DPO on the UltraFeedback and Orca DPO Pairs datasets. The result is a DPO model like [`starchat2-15b-v0.1`](https://huggingface.co/HuggingFaceH4/starchat2-15b-v0.1). See below for commands to train these models using DeepSpeed ZeRO-3. ## Full training examples You will require 8 GPUs (80GB of VRAM) to train the full model - alternatively, you can train on 1 GPU by adjusting `per_device_train_batch_size` and `gradient_accumulation_steps` to keep the global batch size constant. A recipe involving QLoRA will come later 🤗. ```shell # Step 1 - SFT ACCELERATE_LOG_LEVEL=info accelerate launch --config_file recipes/accelerate_configs/deepspeed_zero3.yaml scripts/run_sft.py recipes/starchat2-15b/sft/config_v0.1.yaml # Step 2 - DPO ACCELERATE_LOG_LEVEL=info accelerate launch --config_file recipes/accelerate_configs/deepspeed_zero3.yaml scripts/run_dpo.py recipes/starchat2-15b/dpo/config_v0.1.yaml ```
alignment-handbook/recipes/starchat2-15b/README.md/0
{ "file_path": "alignment-handbook/recipes/starchat2-15b/README.md", "repo_id": "alignment-handbook", "token_count": 494 }
7
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Adapted from huggingface/transformers: https://github.com/huggingface/transformers/blob/21a2d900eceeded7be9edc445b56877b95eda4ca/setup.py import re import shutil from pathlib import Path from setuptools import find_packages, setup # Remove stale alignment.egg-info directory to avoid https://github.com/pypa/pip/issues/5466 stale_egg_info = Path(__file__).parent / "alignment.egg-info" if stale_egg_info.exists(): print( ( "Warning: {} exists.\n\n" "If you recently updated alignment, this is expected,\n" "but it may prevent alignment from installing in editable mode.\n\n" "This directory is automatically generated by Python's packaging tools.\n" "I will remove it now.\n\n" "See https://github.com/pypa/pip/issues/5466 for details.\n" ).format(stale_egg_info) ) shutil.rmtree(stale_egg_info) # IMPORTANT: all dependencies should be listed here with their version requirements, if any. # * If a dependency is fast-moving (e.g. transformers), pin to the exact version _deps = [ "accelerate==0.27.2", "bitsandbytes==0.41.2.post2", "black==23.1.0", "datasets==2.14.6", "deepspeed==0.12.2", "einops>=0.6.1", "evaluate==0.4.0", "flake8>=6.0.0", "hf-doc-builder>=0.4.0", "hf_transfer>=0.1.4", "huggingface-hub>=0.19.2,<1.0", "isort>=5.12.0", "ninja>=1.11.1", "numpy>=1.24.2", "packaging>=23.0", "parameterized>=0.9.0", "peft==0.7.1", "protobuf<=3.20.2", # Needed to avoid conflicts with `transformers` "pytest", "safetensors>=0.3.3", "sentencepiece>=0.1.99", "scipy", "tensorboard", "torch==2.1.2", "transformers @ git+https://github.com/huggingface/transformers.git@831bc25d8fdb85768402f772cf65cc3d7872b211", # Enable StarCoder2 "trl==0.7.10", "jinja2>=3.0.0", "tqdm>=4.64.1", ] # this is a lookup table with items like: # # tokenizers: "tokenizers==0.9.4" # packaging: "packaging" # # some of the values are versioned whereas others aren't. deps = {b: a for a, b in (re.findall(r"^(([^!=<>~ \[\]]+)(?:\[[^\]]+\])?(?:[!=<>~ ].*)?$)", x)[0] for x in _deps)} def deps_list(*pkgs): return [deps[pkg] for pkg in pkgs] extras = {} extras["tests"] = deps_list("pytest", "parameterized") extras["torch"] = deps_list("torch") extras["quality"] = deps_list("black", "isort", "flake8") extras["docs"] = deps_list("hf-doc-builder") extras["dev"] = extras["docs"] + extras["quality"] + extras["tests"] # core dependencies shared across the whole project - keep this to a bare minimum :) install_requires = [ deps["accelerate"], deps["bitsandbytes"], deps["einops"], deps["evaluate"], deps["datasets"], deps["deepspeed"], deps["hf_transfer"], deps["huggingface-hub"], deps["jinja2"], deps["ninja"], deps["numpy"], deps["packaging"], # utilities from PyPA to e.g., compare versions deps["peft"], deps["protobuf"], deps["safetensors"], deps["sentencepiece"], deps["scipy"], deps["tensorboard"], deps["tqdm"], # progress bars in model download and training scripts deps["transformers"], deps["trl"], ] setup( name="alignment-handbook", version="0.4.0.dev0", # expected format is one of x.y.z.dev0, or x.y.z.rc1 or x.y.z (no to dashes, yes to dots) author="The Hugging Face team (past and future)", author_email="[email protected]", description="The Alignment Handbook", long_description=open("README.md", "r", encoding="utf-8").read(), long_description_content_type="text/markdown", keywords="nlp deep learning rlhf llm", license="Apache", url="https://github.com/huggingface/alignment-handbook", package_dir={"": "src"}, packages=find_packages("src"), zip_safe=False, extras_require=extras, python_requires=">=3.10.9", install_requires=install_requires, classifiers=[ "Development Status :: 3 - Alpha", "Intended Audience :: Developers", "Intended Audience :: Education", "Intended Audience :: Science/Research", "License :: OSI Approved :: Apache Software License", "Operating System :: OS Independent", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.10", "Topic :: Scientific/Engineering :: Artificial Intelligence", ], )
alignment-handbook/setup.py/0
{ "file_path": "alignment-handbook/setup.py", "repo_id": "alignment-handbook", "token_count": 2038 }
8
# Creating apps
candle/candle-book/src/apps/README.md/0
{ "file_path": "candle/candle-book/src/apps/README.md", "repo_id": "candle", "token_count": 4 }
9
# Running a model In order to run an existing model, you will need to download and use existing weights. Most models are already available on https://huggingface.co/ in [`safetensors`](https://github.com/huggingface/safetensors) format. Let's get started by running an old model : `bert-base-uncased`.
candle/candle-book/src/inference/inference.md/0
{ "file_path": "candle/candle-book/src/inference/inference.md", "repo_id": "candle", "token_count": 88 }
10
use crate::benchmarks::{BenchDevice, BenchDeviceHandler}; use candle_core::{DType, Device, Tensor}; use criterion::{black_box, criterion_group, Criterion, Throughput}; use std::time::Instant; fn run(a: &Tensor, b: &Tensor, c: &Tensor) { a.where_cond(b, c).unwrap(); } const fn create_cond_arr<const N: usize>() -> [u8; N] { let mut arr = [0u8; N]; let mut i = 0; while i < N { arr[i] = (i % 2) as u8; i += 1; } arr } const B: usize = 1; const M: usize = 1024; const K: usize = 1024; const SIZE: usize = B * M * K; const DATA: [u8; SIZE] = create_cond_arr::<SIZE>(); fn run_where_cond_benchmark(c: &mut Criterion, device: &Device, dtype: DType, name: &str) { let tensor = Tensor::from_slice(DATA.as_slice(), (B, M, K), &device).unwrap(); let on_true = Tensor::ones((B, M, K), dtype, &device).unwrap(); let on_false = Tensor::zeros((B, M, K), dtype, &device).unwrap(); let elements = B * M * K; // E.g. 2 f32 tensors + 1 u8 tensor let flops = (2 * elements * dtype.size_in_bytes()) + elements; let mut group = c.benchmark_group(device.bench_name(name)); group.throughput(Throughput::Bytes(flops as u64)); group.bench_function("iter", move |b| { b.iter_custom(|iters| { let start = Instant::now(); for _i in 0..iters { run( black_box(&tensor), black_box(&on_true), black_box(&on_false), ); } device.sync().unwrap(); start.elapsed() }) }); group.finish(); } fn criterion_benchmark(c: &mut Criterion) { let device = BenchDeviceHandler::new().unwrap(); for d in device.devices { run_where_cond_benchmark(c, &d, DType::F32, "where_cond_f32"); run_where_cond_benchmark(c, &d, DType::BF16, "where_cond_bf16"); run_where_cond_benchmark(c, &d, DType::F16, "where_cond_f16"); } } criterion_group!(benches, criterion_benchmark);
candle/candle-core/benches/benchmarks/where_cond.rs/0
{ "file_path": "candle/candle-core/benches/benchmarks/where_cond.rs", "repo_id": "candle", "token_count": 942 }
11
use crate::backend::{BackendDevice, BackendStorage}; use crate::op::{BinaryOpT, CmpOp, ReduceOp, UnaryOpT}; use crate::{DType, Error, IntDType, Layout, Result, Shape, WithDType}; use half::{bf16, f16}; use rayon::prelude::*; const USE_IM2COL_CONV1D: bool = true; const USE_IM2COL_CONV1D_TR: bool = true; const USE_IM2COL_CONV2D: bool = true; // TODO: Maybe we should not implement [Clone] here and instead have an explicit allocator + // intercept the oom errors to avoid panicking and provide a proper error. #[derive(Debug, Clone)] pub enum CpuStorage { U8(Vec<u8>), U32(Vec<u32>), I64(Vec<i64>), BF16(Vec<bf16>), F16(Vec<f16>), F32(Vec<f32>), F64(Vec<f64>), } #[derive(Debug, Clone)] pub struct CpuDevice; pub trait Map1 { fn f<T: WithDType>(&self, vs: &[T], layout: &Layout) -> Result<Vec<T>>; fn map(&self, vs: &CpuStorage, layout: &Layout) -> Result<CpuStorage> { match vs { CpuStorage::U8(vs) => Ok(CpuStorage::U8(self.f(vs, layout)?)), CpuStorage::U32(vs) => Ok(CpuStorage::U32(self.f(vs, layout)?)), CpuStorage::I64(vs) => Ok(CpuStorage::I64(self.f(vs, layout)?)), CpuStorage::BF16(vs) => Ok(CpuStorage::BF16(self.f(vs, layout)?)), CpuStorage::F16(vs) => Ok(CpuStorage::F16(self.f(vs, layout)?)), CpuStorage::F32(vs) => Ok(CpuStorage::F32(self.f(vs, layout)?)), CpuStorage::F64(vs) => Ok(CpuStorage::F64(self.f(vs, layout)?)), } } } pub trait Map1Any { fn f<T: WithDType, W: Fn(Vec<T>) -> CpuStorage>( &self, vs: &[T], layout: &Layout, wrap: W, ) -> Result<CpuStorage>; fn map(&self, vs: &CpuStorage, layout: &Layout) -> Result<CpuStorage> { match vs { CpuStorage::U8(vs) => Ok(self.f(vs, layout, CpuStorage::U8)?), CpuStorage::U32(vs) => Ok(self.f(vs, layout, CpuStorage::U32)?), CpuStorage::I64(vs) => Ok(self.f(vs, layout, CpuStorage::I64)?), CpuStorage::BF16(vs) => Ok(self.f(vs, layout, CpuStorage::BF16)?), CpuStorage::F16(vs) => Ok(self.f(vs, layout, CpuStorage::F16)?), CpuStorage::F32(vs) => Ok(self.f(vs, layout, CpuStorage::F32)?), CpuStorage::F64(vs) => Ok(self.f(vs, layout, CpuStorage::F64)?), } } } type C = CpuStorage; pub trait Map2 { const OP: &'static str; fn f<T: WithDType>(&self, v1: &[T], l1: &Layout, v2: &[T], l2: &Layout) -> Result<Vec<T>>; fn map( &self, v1: &CpuStorage, l1: &Layout, v2: &CpuStorage, l2: &Layout, ) -> Result<CpuStorage> { match (v1, v2) { (C::U8(v1), C::U8(v2)) => Ok(C::U8(self.f(v1, l1, v2, l2)?)), (C::U32(v1), C::U32(v2)) => Ok(C::U32(self.f(v1, l1, v2, l2)?)), (C::I64(v1), C::I64(v2)) => Ok(C::I64(self.f(v1, l1, v2, l2)?)), (C::BF16(v1), C::BF16(v2)) => Ok(C::BF16(self.f(v1, l1, v2, l2)?)), (C::F16(v1), C::F16(v2)) => Ok(C::F16(self.f(v1, l1, v2, l2)?)), (C::F32(v1), C::F32(v2)) => Ok(C::F32(self.f(v1, l1, v2, l2)?)), (C::F64(v1), C::F64(v2)) => Ok(C::F64(self.f(v1, l1, v2, l2)?)), _ => Err(Error::DTypeMismatchBinaryOp { lhs: v1.dtype(), rhs: v2.dtype(), op: Self::OP, } .bt()), } } } pub trait Map2U8 { const OP: &'static str; fn f<T: WithDType>(&self, v1: &[T], l1: &Layout, v2: &[T], l2: &Layout) -> Result<Vec<u8>>; fn map( &self, v1: &CpuStorage, l1: &Layout, v2: &CpuStorage, l2: &Layout, ) -> Result<CpuStorage> { match (v1, v2) { (C::U8(v1), C::U8(v2)) => Ok(C::U8(self.f(v1, l1, v2, l2)?)), (C::U32(v1), C::U32(v2)) => Ok(C::U8(self.f(v1, l1, v2, l2)?)), (C::I64(v1), C::I64(v2)) => Ok(C::U8(self.f(v1, l1, v2, l2)?)), (C::BF16(v1), C::BF16(v2)) => Ok(C::U8(self.f(v1, l1, v2, l2)?)), (C::F16(v1), C::F16(v2)) => Ok(C::U8(self.f(v1, l1, v2, l2)?)), (C::F32(v1), C::F32(v2)) => Ok(C::U8(self.f(v1, l1, v2, l2)?)), (C::F64(v1), C::F64(v2)) => Ok(C::U8(self.f(v1, l1, v2, l2)?)), _ => Err(Error::DTypeMismatchBinaryOp { lhs: v1.dtype(), rhs: v2.dtype(), op: Self::OP, } .bt()), } } } struct Cmp(CmpOp); impl Map2U8 for Cmp { const OP: &'static str = "cmp"; #[inline(always)] fn f<T: WithDType>( &self, lhs: &[T], lhs_l: &Layout, rhs: &[T], rhs_l: &Layout, ) -> Result<Vec<u8>> { let dst = match self.0 { CmpOp::Eq => binary_map(lhs_l, rhs_l, lhs, rhs, |x, y| u8::from(x == y)), CmpOp::Ne => binary_map(lhs_l, rhs_l, lhs, rhs, |x, y| u8::from(x != y)), CmpOp::Lt => binary_map(lhs_l, rhs_l, lhs, rhs, |x, y| u8::from(x < y)), CmpOp::Le => binary_map(lhs_l, rhs_l, lhs, rhs, |x, y| u8::from(x <= y)), CmpOp::Gt => binary_map(lhs_l, rhs_l, lhs, rhs, |x, y| u8::from(x > y)), CmpOp::Ge => binary_map(lhs_l, rhs_l, lhs, rhs, |x, y| u8::from(x >= y)), }; Ok(dst) } } struct WCond<'a, T: IntDType>(&'a [T], &'a Layout); impl<'a, I: IntDType> Map2 for WCond<'a, I> { const OP: &'static str = "where"; #[inline(always)] fn f<T: WithDType>(&self, t: &[T], t_l: &Layout, f: &[T], f_l: &Layout) -> Result<Vec<T>> { let vs = match ( self.1.contiguous_offsets(), t_l.contiguous_offsets(), f_l.contiguous_offsets(), ) { (Some((o1, o2)), Some((o_t1, o_t2)), Some((o_f1, o_f2))) => { let pred = &self.0[o1..o2]; let t = &t[o_t1..o_t2]; let f = &f[o_f1..o_f2]; pred.iter() .zip(t.iter().zip(f.iter())) .map(|(p, (&t, &f))| if p.is_true() { t } else { f }) .collect::<Vec<_>>() } _ => self .1 .strided_index() .zip(t_l.strided_index().zip(f_l.strided_index())) .map(|(i_p, (i_t, i_f))| { if self.0[i_p].is_true() { t[i_t] } else { f[i_f] } }) .collect::<Vec<_>>(), }; Ok(vs) } } struct ReduceIndex { reduce_dim_index: usize, use_min: bool, return_index: bool, } impl ReduceIndex { // The value gets replaced if f(s[current_acc], s[i]) returns true. #[inline(always)] fn fold_impl<T, U, F, G>(&self, src: &[T], src_l: &Layout, f: F, g: G) -> Result<Vec<U>> where T: Clone + Copy, U: Clone + Copy, F: Fn(T, T) -> bool, G: Fn(T, usize) -> U, { let reduce_dim_size = src_l.dims()[self.reduce_dim_index]; let reduce_dim_stride = src_l.stride()[self.reduce_dim_index]; let dst_len = src_l.shape().elem_count() / reduce_dim_size; let mut dst: Vec<U> = Vec::with_capacity(dst_len); let dst_to_set = dst.spare_capacity_mut(); let dst_to_set = unsafe { std::mem::transmute::<_, &mut [U]>(dst_to_set) }; match src_l.contiguous_offsets() { Some((o1, o2)) => { let src = &src[o1..o2]; if reduce_dim_stride == 1 { for (start_src_i, dst_v) in dst_to_set.iter_mut().enumerate() { let start_src_i = start_src_i * reduce_dim_size; let src = &src[start_src_i..start_src_i + reduce_dim_size]; let mut acc = 0; let mut val = src[0]; for (src_i, &s) in src.iter().enumerate() { if f(val, s) { acc = src_i; val = s } } *dst_v = g(val, acc) } } else { for (start_src_i, dst_v) in dst_to_set.iter_mut().enumerate() { let (p, q) = ( start_src_i / reduce_dim_stride, start_src_i % reduce_dim_stride, ); // start_src_i = p * reduce_dim_stride + q let start_src_i = p * reduce_dim_stride * reduce_dim_size + q; let src = &src[start_src_i..]; let mut acc = 0; let mut val = src[0]; for src_i in 0..reduce_dim_size { let s = src[src_i * reduce_dim_stride]; if f(val, s) { acc = src_i; val = s } } *dst_v = g(val, acc) } } } None => { let l = src_l.narrow(self.reduce_dim_index, 0, 1)?; for (unstr_index, src_index) in l.strided_index().enumerate() { let src = &src[src_index..]; let mut acc = 0; let mut val = src[0]; for src_i in 0..reduce_dim_size { let s = src[src_i * reduce_dim_stride]; if f(val, s) { acc = src_i; val = s } } dst_to_set[unstr_index] = g(val, acc) } } } unsafe { dst.set_len(dst_len) }; Ok(dst) } } impl Map1Any for ReduceIndex { #[inline(always)] fn f<T: WithDType, W: Fn(Vec<T>) -> CpuStorage>( &self, src: &[T], src_l: &Layout, wrap: W, ) -> Result<CpuStorage> { if src_l.shape().elem_count() == 0 { Err(Error::EmptyTensor { op: "reduce" }.bt())? } let dst = match (self.return_index, self.use_min) { (false, true) => wrap(self.fold_impl(src, src_l, |x, y| x > y, |v, _i| v)?), (false, false) => wrap(self.fold_impl(src, src_l, |x, y| x < y, |v, _i| v)?), (true, true) => { CpuStorage::U32(self.fold_impl(src, src_l, |x, y| x > y, |_v, i| i as u32)?) } (true, false) => { CpuStorage::U32(self.fold_impl(src, src_l, |x, y| x < y, |_v, i| i as u32)?) } }; Ok(dst) } } struct ReduceSum<'a> { dst_shape: &'a Shape, reduce_dims: &'a [usize], reduce_dims_and_stride: Vec<(usize, usize)>, } impl<'a> ReduceSum<'a> { #[inline(always)] fn fold_impl<T>(&self, src: &[T], src_l: &Layout, start_elt: T) -> Result<Vec<T>> where T: WithDType, { let mut dst = vec![start_elt; self.dst_shape.elem_count()]; match src_l.contiguous_offsets() { Some((o1, o2)) => { let src = &src[o1..o2]; // Handle the case where we reduce over the last dimensions separately as it is // fairly common and easy to optimize. This rely on the layout being contiguous! // reduce_dims is sorted, check if it is ranging from a to n-1. let reduce_over_last_dims = self .reduce_dims .iter() .rev() .enumerate() .all(|(i, &v)| v == src_l.shape().rank() - 1 - i); if reduce_over_last_dims { let reduce_sz = self .reduce_dims_and_stride .iter() .map(|(u, _)| u) .product::<usize>(); for (dst_i, dst_v) in dst.iter_mut().enumerate() { let src_i = dst_i * reduce_sz; unsafe { T::vec_reduce_sum( src[src_i..src_i + reduce_sz].as_ptr(), dst_v, reduce_sz, ) }; } return Ok(dst); }; for (unstr_index, &src) in src.iter().enumerate() { let mut dst_index = unstr_index; // Set the reduce_dims indexes to 0. for &(dim, stride) in self.reduce_dims_and_stride.iter() { // The compiler is able to optimize the following in a single divmod op. let (pre, post) = (dst_index / stride, dst_index % stride); dst_index = (pre / dim) * stride + post; } dst[dst_index] += src; } } None => { for (unstr_index, src_index) in src_l.strided_index().enumerate() { let mut dst_index = unstr_index; // Set the reduce_dims indexes to 0. for &(dim, stride) in self.reduce_dims_and_stride.iter() { // The compiler is able to optimize the following in a single divmod op. let (pre, post) = (dst_index / stride, dst_index % stride); dst_index = (pre / dim) * stride + post; } dst[dst_index] += src[src_index]; } } } Ok(dst) } } impl<'a> Map1 for ReduceSum<'a> { #[inline(always)] fn f<T: WithDType>(&self, src: &[T], src_l: &Layout) -> Result<Vec<T>> { self.fold_impl(src, src_l, T::zero()) } } pub fn unary_map<T: Copy, U: Copy, F: FnMut(T) -> U>( vs: &[T], layout: &Layout, mut f: F, ) -> Vec<U> { match layout.strided_blocks() { crate::StridedBlocks::SingleBlock { start_offset, len } => vs [start_offset..start_offset + len] .iter() .map(|&v| f(v)) .collect(), crate::StridedBlocks::MultipleBlocks { block_start_index, block_len, } => { let mut result = Vec::with_capacity(layout.shape().elem_count()); // Specialize the case where block_len is one to avoid the second loop. if block_len == 1 { for index in block_start_index { let v = unsafe { vs.get_unchecked(index) }; result.push(f(*v)) } } else { for index in block_start_index { for offset in 0..block_len { let v = unsafe { vs.get_unchecked(index + offset) }; result.push(f(*v)) } } } result } } } pub fn unary_map_vec<T: Copy, U: Copy, F: FnMut(T) -> U, FV: FnMut(&[T], &mut [U])>( vs: &[T], layout: &Layout, mut f: F, mut f_vec: FV, ) -> Vec<U> { match layout.strided_blocks() { crate::StridedBlocks::SingleBlock { start_offset, len } => { let mut ys: Vec<U> = Vec::with_capacity(len); let ys_to_set = ys.spare_capacity_mut(); let ys_to_set = unsafe { std::mem::transmute::<_, &mut [U]>(ys_to_set) }; f_vec(&vs[start_offset..start_offset + len], ys_to_set); // SAFETY: values are all set by f_vec. unsafe { ys.set_len(len) }; ys } crate::StridedBlocks::MultipleBlocks { block_start_index, block_len, } => { let el_count = layout.shape().elem_count(); // Specialize the case where block_len is one to avoid the second loop. if block_len == 1 { let mut result = Vec::with_capacity(el_count); for index in block_start_index { let v = unsafe { vs.get_unchecked(index) }; result.push(f(*v)) } result } else { let mut ys: Vec<U> = Vec::with_capacity(el_count); let ys_to_set = ys.spare_capacity_mut(); let ys_to_set = unsafe { std::mem::transmute::<_, &mut [U]>(ys_to_set) }; let mut dst_index = 0; for src_index in block_start_index { let vs = &vs[src_index..src_index + block_len]; let ys = &mut ys_to_set[dst_index..dst_index + block_len]; f_vec(vs, ys); dst_index += block_len; } // SAFETY: values are all set by f_vec. unsafe { ys.set_len(el_count) }; ys } } } } // This function maps over two strided index sequences. pub fn binary_map<T: Copy, U: Copy, F: FnMut(T, T) -> U>( lhs_l: &Layout, rhs_l: &Layout, lhs: &[T], rhs: &[T], mut f: F, ) -> Vec<U> { match (lhs_l.contiguous_offsets(), rhs_l.contiguous_offsets()) { (Some((o_l1, o_l2)), Some((o_r1, o_r2))) => lhs[o_l1..o_l2] .iter() .zip(rhs[o_r1..o_r2].iter()) .map(|(&l, &r)| f(l, r)) .collect(), (Some((o_l1, o_l2)), None) => { // TODO: Maybe we want to avoid going through the layout twice. match rhs_l.offsets_b() { Some(ob) => { let mut i_in_block = 0; let mut i_right_broadcast = 0; lhs[o_l1..o_l2] .iter() .map(|&l| { let r = unsafe { rhs.get_unchecked(i_in_block + ob.start) }; i_right_broadcast += 1; if i_right_broadcast >= ob.right_broadcast { i_in_block += 1; i_right_broadcast = 0; } if i_in_block >= ob.len { i_in_block = 0 } f(l, *r) }) .collect() } None => lhs_l .strided_index() .zip(rhs_l.strided_index()) .map(|(lhs_i, rhs_i)| f(lhs[lhs_i], rhs[rhs_i])) .collect(), } } (None, Some((o_r1, o_r2))) => { // TODO: Maybe we want to avoid going through the layout twice. match lhs_l.offsets_b() { Some(ob) => { let mut i_in_block = 0; let mut i_right_broadcast = 0; rhs[o_r1..o_r2] .iter() .map(|&r| { let l = unsafe { lhs.get_unchecked(i_in_block + ob.start) }; i_right_broadcast += 1; if i_right_broadcast >= ob.right_broadcast { i_in_block += 1; i_right_broadcast = 0; } if i_in_block >= ob.len { i_in_block = 0 } f(*l, r) }) .collect() } None => lhs_l .strided_index() .zip(rhs_l.strided_index()) .map(|(lhs_i, rhs_i)| f(lhs[lhs_i], rhs[rhs_i])) .collect(), } } _ => lhs_l .strided_index() .zip(rhs_l.strided_index()) .map(|(lhs_i, rhs_i)| f(lhs[lhs_i], rhs[rhs_i])) .collect(), } } // Similar to binary_map but with vectorized variants. pub fn binary_map_vec<T: Copy, F: FnMut(T, T) -> T, FV: FnMut(&[T], &[T], &mut [T])>( lhs_l: &Layout, rhs_l: &Layout, lhs: &[T], rhs: &[T], mut f: F, mut f_vec: FV, ) -> Vec<T> { let el_count = lhs_l.shape().elem_count(); match (lhs_l.contiguous_offsets(), rhs_l.contiguous_offsets()) { (Some((o_l1, o_l2)), Some((o_r1, o_r2))) => { let mut ys: Vec<T> = Vec::with_capacity(el_count); let ys_to_set = ys.spare_capacity_mut(); let ys_to_set = unsafe { std::mem::transmute::<_, &mut [T]>(ys_to_set) }; f_vec(&lhs[o_l1..o_l2], &rhs[o_r1..o_r2], ys_to_set); // SAFETY: values are all set by f_vec. unsafe { ys.set_len(el_count) }; ys } (Some((o_l1, o_l2)), None) => match rhs_l.offsets_b() { Some(ob) if ob.right_broadcast == 1 => { let rhs = &rhs[ob.start..ob.start + ob.len]; let mut ys: Vec<T> = Vec::with_capacity(el_count); let ys_to_set = ys.spare_capacity_mut(); let ys_to_set = unsafe { std::mem::transmute::<_, &mut [T]>(ys_to_set) }; let mut dst_i = 0; for src_i in (o_l1..o_l2).step_by(ob.len) { f_vec( &lhs[src_i..src_i + ob.len], rhs, &mut ys_to_set[dst_i..dst_i + ob.len], ); dst_i += ob.len; } // SAFETY: values are all set by f_vec. unsafe { ys.set_len(el_count) }; ys } Some(ob) => { let rhs = &rhs[ob.start..ob.start + ob.len]; let mut ys = lhs[o_l1..o_l2].to_vec(); for idx_l in 0..ob.left_broadcast { let start = idx_l * ob.len * ob.right_broadcast; for (i, &r) in rhs.iter().enumerate() { let start = start + i * ob.right_broadcast; for v in ys[start..start + ob.right_broadcast].iter_mut() { *v = f(*v, r) } } } ys } None => lhs_l .strided_index() .zip(rhs_l.strided_index()) .map(|(lhs_i, rhs_i)| f(lhs[lhs_i], rhs[rhs_i])) .collect(), }, (None, Some((o_r1, o_r2))) => match lhs_l.offsets_b() { Some(ob) if ob.right_broadcast == 1 => { let lhs = &lhs[ob.start..ob.start + ob.len]; let mut ys: Vec<T> = Vec::with_capacity(el_count); let ys_to_set = ys.spare_capacity_mut(); let ys_to_set = unsafe { std::mem::transmute::<_, &mut [T]>(ys_to_set) }; let mut dst_i = 0; for src_i in (o_r1..o_r2).step_by(ob.len) { f_vec( lhs, &rhs[src_i..src_i + ob.len], &mut ys_to_set[dst_i..dst_i + ob.len], ); dst_i += ob.len; } // SAFETY: values are all set by f_vec. unsafe { ys.set_len(el_count) }; ys } Some(ob) => { let lhs = &lhs[ob.start..ob.start + ob.len]; let mut ys = rhs[o_r1..o_r2].to_vec(); for idx_l in 0..ob.left_broadcast { let start = idx_l * ob.len * ob.right_broadcast; for (i, &l) in lhs.iter().enumerate() { let start = start + i * ob.right_broadcast; for v in ys[start..start + ob.right_broadcast].iter_mut() { *v = f(l, *v) } } } ys } None => lhs_l .strided_index() .zip(rhs_l.strided_index()) .map(|(lhs_i, rhs_i)| f(lhs[lhs_i], rhs[rhs_i])) .collect(), }, _ => lhs_l .strided_index() .zip(rhs_l.strided_index()) .map(|(lhs_i, rhs_i)| f(lhs[lhs_i], rhs[rhs_i])) .collect(), } } struct Affine(f64, f64); impl Map1 for Affine { fn f<T: WithDType>(&self, vs: &[T], layout: &Layout) -> Result<Vec<T>> { let mul = T::from_f64(self.0); let add = T::from_f64(self.1); Ok(unary_map(vs, layout, |v| v * mul + add)) } } struct AvgPool2D((usize, usize), (usize, usize)); impl Map1 for AvgPool2D { fn f<T: WithDType>(&self, src: &[T], layout: &Layout) -> Result<Vec<T>> { // https://pytorch.org/docs/stable/generated/torch.nn.AvgPool2d.html let (k_h, k_w) = self.0; let (s_h, s_w) = self.1; let (b_sz, c, h, w) = layout.shape().dims4()?; let stride = layout.stride(); let (stride_h, stride_w) = (stride[2], stride[3]); let h_out = (h - k_h) / s_h + 1; let w_out = (w - k_w) / s_w + 1; let src_index = layout.start_offset(); let mut dst = vec![T::zero(); b_sz * c * h_out * w_out]; let scale = 1f64 / (k_h * k_w) as f64; let scale = T::from_f64(scale); for b_idx in 0..b_sz { let dst = &mut dst[b_idx * c * h_out * w_out..]; let src_index = src_index + b_idx * stride[0]; for c_idx in 0..c { let dst = &mut dst[c_idx * h_out * w_out..]; let src_index = src_index + c_idx * stride[1]; for h_idx in 0..h_out { for w_idx in 0..w_out { let mut sum = T::zero(); for m in 0..k_h { for n in 0..k_w { let m = s_h * h_idx + m; let n = s_w * w_idx + n; sum += src[src_index + m * stride_h + n * stride_w] } } dst[h_idx * w_out + w_idx] = sum * scale; } } } } Ok(dst) } } struct MaxPool2D((usize, usize), (usize, usize)); impl Map1 for MaxPool2D { fn f<T: WithDType>(&self, src: &[T], layout: &Layout) -> Result<Vec<T>> { // https://pytorch.org/docs/stable/generated/torch.nn.MaxPool2d.html let (k_h, k_w) = self.0; let (s_h, s_w) = self.1; let (b_sz, c, h, w) = layout.shape().dims4()?; let stride = layout.stride(); let (stride_h, stride_w) = (stride[2], stride[3]); let h_out = (h - k_h) / s_h + 1; let w_out = (w - k_w) / s_w + 1; let src_index = layout.start_offset(); let mut dst = vec![T::zero(); b_sz * c * h_out * w_out]; for b_idx in 0..b_sz { let dst = &mut dst[b_idx * c * h_out * w_out..]; let src_index = src_index + b_idx * stride[0]; for c_idx in 0..c { let dst = &mut dst[c_idx * h_out * w_out..]; let src_index = src_index + c_idx * stride[1]; for h_idx in 0..h_out { for w_idx in 0..w_out { let mut largest = src[src_index + s_h * h_idx * stride_h + s_w * w_idx * stride_w]; for m in 0..k_h { for n in 0..k_w { let m = s_h * h_idx + m; let n = s_w * w_idx + n; if largest < src[src_index + m * stride_h + n * stride_w] { largest = src[src_index + m * stride_h + n * stride_w] } } } dst[h_idx * w_out + w_idx] = largest; } } } } Ok(dst) } } struct UpsampleNearest1D(usize); impl Map1 for UpsampleNearest1D { fn f<T: WithDType>(&self, src: &[T], layout: &Layout) -> Result<Vec<T>> { // TODO: Specialized implementation for the case 2*sz? let dst_sz = self.0; let (b_sz, c, src_sz) = layout.shape().dims3()?; let stride = layout.stride(); let stride_sz = stride[2]; let src_index = layout.start_offset(); let scale_sz = src_sz as f64 / dst_sz as f64; let mut dst = vec![T::zero(); b_sz * c * dst_sz]; let src_idxs = (0..dst_sz) .map(|idx| usize::min(src_sz - 1, (idx as f64 * scale_sz) as usize)) .collect::<Vec<_>>(); for b_idx in 0..b_sz { let dst = &mut dst[b_idx * c * dst_sz..]; let src_index = src_index + b_idx * stride[0]; for c_idx in 0..c { let dst = &mut dst[c_idx * dst_sz..]; let src_index = src_index + c_idx * stride[1]; for (idx, src_idx) in src_idxs.iter().enumerate() { dst[idx] = src[src_index + src_idx * stride_sz] } } } Ok(dst) } } struct UpsampleNearest2D(usize, usize); impl Map1 for UpsampleNearest2D { fn f<T: WithDType>(&self, src: &[T], layout: &Layout) -> Result<Vec<T>> { // TODO: Specialized implementation for the case 2*h, 2*w? let (dst_h, dst_w) = (self.0, self.1); let (b_sz, c, src_h, src_w) = layout.shape().dims4()?; let stride = layout.stride(); let (stride_h, stride_w) = (stride[2], stride[3]); let src_index = layout.start_offset(); let scale_h = src_h as f64 / dst_h as f64; let scale_w = src_w as f64 / dst_w as f64; let mut dst = vec![T::zero(); b_sz * c * dst_h * dst_w]; let src_h_idxs = (0..dst_h) .map(|h_idx| usize::min(src_h - 1, (h_idx as f64 * scale_h) as usize)) .collect::<Vec<_>>(); let src_w_idxs = (0..dst_w) .map(|w_idx| usize::min(src_w - 1, (w_idx as f64 * scale_w) as usize)) .collect::<Vec<_>>(); for b_idx in 0..b_sz { let dst = &mut dst[b_idx * c * dst_h * dst_w..]; let src_index = src_index + b_idx * stride[0]; for c_idx in 0..c { let dst = &mut dst[c_idx * dst_h * dst_w..]; let src_index = src_index + c_idx * stride[1]; for (h_idx, src_h_idx) in src_h_idxs.iter().enumerate() { for (w_idx, src_w_idx) in src_w_idxs.iter().enumerate() { let src_index = src_index + src_h_idx * stride_h + src_w_idx * stride_w; dst[h_idx * dst_w + w_idx] = src[src_index] } } } } Ok(dst) } } struct Gather<'a, I: IntDType> { ids: &'a [I], ids_l: &'a Layout, dim: usize, } impl<'a, I: IntDType> Map1 for Gather<'a, I> { fn f<T: WithDType>(&self, src: &[T], src_l: &Layout) -> Result<Vec<T>> { let ids = match self.ids_l.contiguous_offsets() { Some((a, b)) => &self.ids[a..b], None => Err(Error::RequiresContiguous { op: "gather" }.bt())?, }; let src = match src_l.contiguous_offsets() { Some((a, b)) => &src[a..b], None => Err(Error::RequiresContiguous { op: "gather" }.bt())?, }; let dim = self.dim; let ids_dims = self.ids_l.dims(); let src_dims = src_l.dims(); let dst_len: usize = ids_dims.iter().product(); let dst_left_len: usize = ids_dims[..dim].iter().product(); let dst_dim_len = ids_dims[dim]; let dst_right_len: usize = ids_dims[dim + 1..].iter().product(); let src_dim_len = src_dims[dim]; let src_right_len: usize = src_dims[dim + 1..].iter().product(); let mut dst = vec![T::zero(); dst_len]; for left_i in 0..dst_left_len { let start_src_idx = left_i * src_right_len * src_dim_len; let start_dst_idx = left_i * dst_right_len * dst_dim_len; for i in 0..dst_dim_len { let start_dst_idx = start_dst_idx + i * dst_right_len; for right_i in 0..dst_right_len { let dst_idx = start_dst_idx + right_i; let index = ids[dst_idx].as_usize(); if index >= src_dim_len { Err(Error::InvalidIndex { index, size: src_dim_len, op: "gather", } .bt())? } let src_idx = start_src_idx + index * src_right_len + right_i; dst[dst_idx] = src[src_idx] } } } Ok(dst) } } struct IndexSelect<'a, T: IntDType> { ids: &'a [T], ids_l: &'a Layout, dim: usize, } impl<'a, I: IntDType> Map1 for IndexSelect<'a, I> { fn f<T: WithDType>(&self, src: &[T], layout: &Layout) -> Result<Vec<T>> { let src = match layout.contiguous_offsets() { Some((a, b)) => &src[a..b], None => Err(Error::RequiresContiguous { op: "index-select" }.bt())?, }; let dim = self.dim; let n_ids = match self.ids_l.dims() { [n_ids] => *n_ids, d => Err(Error::UnexpectedNumberOfDims { expected: 1, got: d.len(), shape: self.ids_l.shape().clone(), } .bt())?, }; let stride_ids = self.ids_l.stride()[0]; let mut dst_dims = layout.dims().to_vec(); let src_dim = dst_dims[dim]; dst_dims[dim] = n_ids; let dst_len: usize = dst_dims.iter().product(); let left_len: usize = dst_dims[..dim].iter().product(); let right_len: usize = dst_dims[dim + 1..].iter().product(); let mut dst = vec![T::zero(); dst_len]; for left_i in 0..left_len { let start_src_idx = left_i * right_len * src_dim; let start_dst_idx = left_i * right_len * n_ids; for i in 0..n_ids { let index = self.ids[self.ids_l.start_offset() + stride_ids * i].as_usize(); if index >= src_dim { Err(Error::InvalidIndex { index, size: src_dim, op: "index-select", } .bt())? } let start_src_idx = start_src_idx + index * right_len; let start_dst_idx = start_dst_idx + i * right_len; dst[start_dst_idx..start_dst_idx + right_len] .copy_from_slice(&src[start_src_idx..start_src_idx + right_len]) } } Ok(dst) } } struct ScatterAdd<'a, I: IntDType> { ids: &'a [I], ids_l: &'a Layout, dim: usize, } impl<'a, I: IntDType> Map2 for ScatterAdd<'a, I> { const OP: &'static str = "scatter-add"; fn f<T: WithDType>(&self, v1: &[T], l1: &Layout, src: &[T], src_l: &Layout) -> Result<Vec<T>> { let dst_len = l1.shape().elem_count(); let mut dst = vec![T::zero(); dst_len]; copy_strided_src_(v1, &mut dst, 0, l1); let src = match src_l.contiguous_offsets() { None => Err(Error::RequiresContiguous { op: "scatter-add" }.bt())?, Some((o1, o2)) => &src[o1..o2], }; let dim = self.dim; let ids_dims = self.ids_l.dims(); let dst_dims = l1.dims(); let dst_dim_len = dst_dims[dim]; let dst_right_len: usize = dst_dims[dim + 1..].iter().product(); let ids_left_len: usize = ids_dims[..dim].iter().product(); let ids_dim_len = ids_dims[dim]; let ids_right_len: usize = ids_dims[dim + 1..].iter().product(); let ids = match self.ids_l.contiguous_offsets() { Some((a, b)) => &self.ids[a..b], None => Err(Error::RequiresContiguous { op: "gather" }.bt())?, }; for left_i in 0..ids_left_len { let start_ids_idx = left_i * ids_right_len * ids_dim_len; let start_dst_idx = left_i * dst_right_len * dst_dim_len; for i in 0..ids_dim_len { let start_ids_idx = start_ids_idx + i * ids_right_len; for right_i in 0..dst_right_len { let ids_idx = start_ids_idx + right_i; let index = ids[ids_idx].as_usize(); if index >= dst_dim_len { Err(Error::InvalidIndex { index, size: dst_dim_len, op: "gather", } .bt())? } let dst_idx = start_dst_idx + index * dst_right_len + right_i; dst[dst_idx] += src[ids_idx] } } } Ok(dst) } } struct IndexAdd<'a, I: IntDType> { ids: &'a [I], dim: usize, } impl<'a, I: IntDType> Map2 for IndexAdd<'a, I> { const OP: &'static str = "index-add"; // https://pytorch.org/docs/stable/generated/torch.Tensor.index_add_.html#torch.Tensor.index_add_ // v1, l1 -> self fn f<T: WithDType>(&self, v1: &[T], l1: &Layout, src: &[T], src_l: &Layout) -> Result<Vec<T>> { let dst_len = l1.shape().elem_count(); let mut dst = vec![T::zero(); dst_len]; copy_strided_src_(v1, &mut dst, 0, l1); let src = match src_l.contiguous_offsets() { None => Err(Error::RequiresContiguous { op: "index-add" }.bt())?, Some((o1, o2)) => &src[o1..o2], }; let dim = self.dim; let max_idx = l1.dims()[dim]; let pre_dim = src_l.dims()[..dim].iter().product::<usize>(); let src_dim_sz = src_l.dims()[dim]; let post_dim = src_l.dims()[dim + 1..].iter().product::<usize>(); if dim == 0 { for (src_idx, dst_idx) in self.ids.iter().enumerate() { let dst_idx = dst_idx.as_usize(); if dst_idx >= max_idx { Err(Error::InvalidIndex { index: dst_idx, op: "index-add", size: max_idx, })? } let src_idx = src_idx * post_dim; let dst_idx = dst_idx * post_dim; let src = &src[src_idx..src_idx + post_dim]; let dst = &mut dst[dst_idx..dst_idx + post_dim]; for (d, &s) in dst.iter_mut().zip(src.iter()) { *d += s } } } else { for (src_idx, dst_idx) in self.ids.iter().enumerate() { let dst_idx = dst_idx.as_usize(); if dst_idx >= max_idx { Err(Error::InvalidIndex { index: dst_idx, op: "index-add", size: max_idx, })? } for pre_i in 0..pre_dim { let pre_src_i = (pre_i * src_dim_sz + src_idx) * post_dim; let pre_dst_i = (pre_i * max_idx + dst_idx) * post_dim; let src = &src[pre_src_i..pre_src_i + post_dim]; let dst = &mut dst[pre_dst_i..pre_dst_i + post_dim]; for (d, &s) in dst.iter_mut().zip(src.iter()) { *d += s } } } } Ok(dst) } } #[allow(clippy::too_many_arguments)] fn copy2d_<T: Copy>( src: &[T], dst: &mut [T], d1: usize, d2: usize, src_stride1: usize, dst_stride1: usize, src_offset: usize, dst_offset: usize, ) { for i1 in 0..d1 { let dst_idx = i1 * dst_stride1 + dst_offset; let src_idx = i1 * src_stride1 + src_offset; let dst = &mut dst[dst_idx..dst_idx + d2]; let src = &src[src_idx..src_idx + d2]; dst.copy_from_slice(src) } } fn copy_strided_src_<T: Copy>(src: &[T], dst: &mut [T], dst_offset: usize, src_l: &Layout) { match src_l.strided_blocks() { crate::StridedBlocks::SingleBlock { start_offset, len } => { let to_copy = (dst.len() - dst_offset).min(len); dst[dst_offset..dst_offset + to_copy] .copy_from_slice(&src[start_offset..start_offset + to_copy]) } crate::StridedBlocks::MultipleBlocks { block_start_index, block_len: 1, } => { for (dst_index, src_index) in block_start_index.enumerate() { let dst_index = dst_index + dst_offset; if dst_index >= dst.len() { break; } dst[dst_index] = src[src_index] } } crate::StridedBlocks::MultipleBlocks { block_start_index, block_len, } => { let mut dst_index = dst_offset; for src_index in block_start_index { let next_dst_index = dst_index + block_len; if dst_index >= dst.len() { break; } let to_copy = usize::min(block_len, dst.len() - dst_index); dst[dst_index..dst_index + to_copy] .copy_from_slice(&src[src_index..src_index + to_copy]); dst_index = next_dst_index } } } } struct Conv1D<'a>(&'a crate::conv::ParamsConv1D); impl<'a> Map2 for Conv1D<'a> { const OP: &'static str = "conv1d"; fn f<T: WithDType>(&self, inp: &[T], inp_l: &Layout, k: &[T], k_l: &Layout) -> Result<Vec<T>> { let p = self.0; let inp = &inp[inp_l.start_offset()..]; let k = &k[k_l.start_offset()..]; let (inp_s0, inp_s1, inp_s2) = crate::shape::dims3(inp_l.stride())?; let (k_s0, k_s1, k_s2) = crate::shape::dims3(k_l.stride())?; let l_out = p.l_out(); let dst_elems = p.c_out * l_out * p.b_size; // The output shape is [b_size, c_out, l_out] let dst = vec![T::zero(); dst_elems]; // TODO: Avoid making this copy if `inp` already has the appropriate layout. let mut inp_cont = vec![T::zero(); p.b_size * p.c_in * p.l_in]; for b_idx in 0..p.b_size { for src_l in 0..p.l_in { for src_c_idx in 0..p.c_in { let inp_idx = b_idx * inp_s0 + src_c_idx * inp_s1 + src_l * inp_s2; inp_cont[b_idx * p.l_in * p.c_in + src_l * p.c_in + src_c_idx] = inp[inp_idx] } } } for offset in 0..p.k_size { (0..p.c_out).into_par_iter().for_each(|dst_c_idx| { let dst_idx = dst_c_idx * l_out; let k_cont = (0..p.c_in) .map(|c_in_idx| k[dst_c_idx * k_s0 + c_in_idx * k_s1 + offset * k_s2]) .collect::<Vec<_>>(); for b_idx in 0..p.b_size { let dst_idx = dst_idx + b_idx * p.c_out * l_out; for dst_l in 0..l_out { let dst_idx = dst_idx + dst_l; let src_l = p.stride * dst_l + offset * p.dilation; if src_l < p.padding || src_l >= p.padding + p.l_in { continue; } let src_l = src_l - p.padding; let inp_cont = &inp_cont[b_idx * p.l_in * p.c_in + src_l * p.c_in..]; assert!(inp_cont.len() >= p.c_in); assert!(k_cont.len() >= p.c_in); let mut d = T::zero(); unsafe { T::vec_dot(inp_cont.as_ptr(), k_cont.as_ptr(), &mut d, p.c_in) } let dst_p = dst.as_ptr(); // Safety: dst_idx are uniques per dst_c_idx which is used to parallelise // the different tasks so no two threads can try to write at the same // location. unsafe { let ptr = dst_p.add(dst_idx) as *mut T; *ptr += d } } } }) } Ok(dst) } } struct Im2Col1D { l_k: usize, stride: usize, dilation: usize, padding: usize, } impl Im2Col1D { fn l_out(&self, l: usize) -> usize { (l + 2 * self.padding - self.dilation * (self.l_k - 1) - 1) / self.stride + 1 } } impl Map1 for Im2Col1D { fn f<T: WithDType>(&self, vs: &[T], layout: &Layout) -> Result<Vec<T>> { let &Self { l_k, stride, dilation, padding, } = self; let (b, c, l) = layout.shape().dims3()?; let l_out = self.l_out(l); let src = &vs[layout.start_offset()..]; let mut dst = vec![T::zero(); b * l_out * c * l_k]; let (src_s0, src_s1, src_s2) = { let s = layout.stride(); (s[0], s[1], s[2]) }; // TODO: provide specialized kernels for the common use cases. // - l_k = 1 // - padding = 0 // - stride = 1 // - dilation = 1 for b_idx in 0..b { let src_idx = b_idx * src_s0; let dst_idx = b_idx * l_out * c * l_k; for l_idx in 0..l_out { let dst_idx = dst_idx + l_idx * c * l_k; for c_idx in 0..c { let dst_idx = dst_idx + c_idx * l_k; let src_idx = c_idx * src_s1 + src_idx; for l_k_idx in 0..l_k { let src_l = l_idx * stride + l_k_idx * dilation; if padding != 0 && (src_l < padding || src_l >= l + padding) { continue; } let src_l = src_l - padding; let src_idx = src_idx + src_l * src_s2; let dst_idx = dst_idx + l_k_idx; dst[dst_idx] = src[src_idx] } } } } Ok(dst) } } struct Im2Col { h_k: usize, w_k: usize, stride: usize, dilation: usize, padding: usize, } impl Im2Col { fn hw_out(&self, h: usize, w: usize) -> (usize, usize) { let h_out = (h + 2 * self.padding - self.dilation * (self.h_k - 1) - 1) / self.stride + 1; let w_out = (w + 2 * self.padding - self.dilation * (self.w_k - 1) - 1) / self.stride + 1; (h_out, w_out) } } impl Map1 for Im2Col { fn f<T: WithDType>(&self, vs: &[T], layout: &Layout) -> Result<Vec<T>> { let &Self { h_k, w_k, stride, dilation, padding, } = self; let (b, c, h, w) = layout.shape().dims4()?; let (h_out, w_out) = self.hw_out(h, w); let src = &vs[layout.start_offset()..]; let mut dst = vec![T::zero(); b * h_out * w_out * c * h_k * w_k]; let (src_s0, src_s1, src_s2, src_s3) = { let s = layout.stride(); (s[0], s[1], s[2], s[3]) }; // TODO: provide specialized kernels for the common use cases. // - h_k = w_k = 1 // - padding = 0 // - stride = 1 // - dilation = 1 for b_idx in 0..b { let src_idx = b_idx * src_s0; let dst_idx = b_idx * h_out * w_out * c * h_k * w_k; for h_idx in 0..h_out { let dst_idx = dst_idx + h_idx * w_out * c * h_k * w_k; for w_idx in 0..w_out { let dst_idx = dst_idx + w_idx * c * h_k * w_k; for c_idx in 0..c { let dst_idx = dst_idx + c_idx * h_k * w_k; let src_idx = c_idx * src_s1 + src_idx; for h_k_idx in 0..h_k { let src_h = h_idx * stride + h_k_idx * dilation; if padding != 0 && (src_h < padding || src_h >= h + padding) { continue; } let src_h = src_h - padding; let src_idx = src_idx + src_h * src_s2; let dst_idx = dst_idx + h_k_idx * w_k; for w_k_idx in 0..w_k { let src_w = w_idx * stride + w_k_idx * dilation; if padding != 0 && (src_w < padding || src_w >= w + padding) { continue; } let src_w = src_w - padding; let src_idx = src_idx + src_w * src_s3; let dst_idx = dst_idx + w_k_idx; dst[dst_idx] = src[src_idx] } } } } } } Ok(dst) } } struct Col2Im1D { stride: usize, } impl Map1 for Col2Im1D { fn f<T: WithDType>(&self, col: &[T], l: &Layout) -> Result<Vec<T>> { let (b_size, l_in, c_out, k_size) = l.shape().dims4()?; let stride = self.stride; let l_out = (l_in - 1) * stride + k_size; let mut im = vec![T::zero(); b_size * c_out * l_out]; let (dst_s0, dst_s1) = (c_out * l_out, l_out); let (src_s0, src_s1, src_s2) = (c_out * k_size * l_in, c_out * k_size, k_size); for l_in_i in 0..l_in { for k_i in 0..k_size { let l_out_i = l_in_i * stride + k_i; for b_i in 0..b_size { for c_i in 0..c_out { let dst_idx = b_i * dst_s0 + c_i * dst_s1 + l_out_i; let src_idx = b_i * src_s0 + l_in_i * src_s1 + c_i * src_s2 + k_i; im[dst_idx] += col[src_idx] } } } } Ok(im) } } struct ConvTranspose1D<'a>(&'a crate::conv::ParamsConvTranspose1D); impl<'a> Map2 for ConvTranspose1D<'a> { const OP: &'static str = "conv_transpose1d"; fn f<T: WithDType>(&self, inp: &[T], inp_l: &Layout, k: &[T], k_l: &Layout) -> Result<Vec<T>> { let p = self.0; let inp = &inp[inp_l.start_offset()..]; let k = &k[k_l.start_offset()..]; let (inp_s0, inp_s1, inp_s2) = crate::shape::dims3(inp_l.stride())?; let (k_s0, k_s1, k_s2) = crate::shape::dims3(k_l.stride())?; let l_out = p.l_out(); // Output shape: [b_size, c_out, l_out]. let dst_elems = p.c_out * l_out * p.b_size; let dst = vec![T::zero(); dst_elems]; let dst_s0 = p.c_out * l_out; let dst_s1 = l_out; let dst_s2 = 1; // TODO: Avoid making this copy if `inp` already has the appropriate layout. let mut inp_cont = vec![T::zero(); p.b_size * p.c_in * p.l_in]; let cont_s0 = p.l_in * p.c_in; let cont_s1 = p.c_in; for b_idx in 0..p.b_size { for l_idx in 0..p.l_in { for c_idx in 0..p.c_in { let src_idx = b_idx * inp_s0 + c_idx * inp_s1 + l_idx * inp_s2; let dst_idx = b_idx * cont_s0 + l_idx * cont_s1 + c_idx; inp_cont[dst_idx] = inp[src_idx] } } } for k_idx in 0..p.k_size { (0..p.c_out).into_par_iter().for_each(|dst_c_idx| { let k_cont = (0..p.c_in) .map(|c_in_idx| k[c_in_idx * k_s0 + dst_c_idx * k_s1 + k_idx * k_s2]) .collect::<Vec<_>>(); for b_idx in 0..p.b_size { for l_idx in 0..p.l_in { let out_idx = l_idx * p.stride + k_idx * p.dilation; if out_idx < p.padding { continue; } let out_idx = out_idx - p.padding; if out_idx < l_out { let inp_cont = &inp_cont[b_idx * cont_s0 + l_idx * cont_s1..]; let dst_idx = b_idx * dst_s0 + out_idx * dst_s2 + dst_c_idx * dst_s1; let mut d = T::zero(); unsafe { T::vec_dot(inp_cont.as_ptr(), k_cont.as_ptr(), &mut d, p.c_in) } let dst_p = dst.as_ptr(); // Safety: dst_idx are uniques per dst_c_idx which is used to // parallelise the different tasks so no two threads can try to // write at the same location. unsafe { let ptr = dst_p.add(dst_idx) as *mut T; *ptr += d } } } } }) } Ok(dst) } } struct Conv2D<'a>(&'a crate::conv::ParamsConv2D); impl<'a> Map2 for Conv2D<'a> { const OP: &'static str = "conv2d"; fn f<T: WithDType>(&self, inp: &[T], inp_l: &Layout, k: &[T], k_l: &Layout) -> Result<Vec<T>> { let p = self.0; let inp = &inp[inp_l.start_offset()..]; let (inp_s0, inp_s1, inp_s2, inp_s3) = crate::shape::dims4(inp_l.stride())?; let k = &k[k_l.start_offset()..]; let (k_s0, k_s1, k_s2, k_s3) = crate::shape::dims4(k_l.stride())?; let (out_h, out_w) = (p.out_h(), p.out_w()); // Output shape: [b_size, c_out, out_h, out_w]. let dst = vec![T::zero(); p.b_size * p.c_out * out_h * out_w]; // TODO: Avoid making this copy if `inp` already has the appropriate layout. let mut inp_cont = vec![T::zero(); p.b_size * p.c_in * p.i_h * p.i_w]; let cont_s0 = p.i_h * p.i_w * p.c_in; let cont_s1 = p.i_w * p.c_in; let cont_s2 = p.c_in; for b_idx in 0..p.b_size { for h_idx in 0..p.i_h { for w_idx in 0..p.i_w { for c_idx in 0..p.c_in { let src_idx = b_idx * inp_s0 + c_idx * inp_s1 + h_idx * inp_s2 + w_idx * inp_s3; let dst_idx = b_idx * cont_s0 + h_idx * cont_s1 + w_idx * cont_s2 + c_idx; inp_cont[dst_idx] = inp[src_idx] } } } } for offset_h in 0..p.k_h { for offset_w in 0..p.k_w { (0..p.c_out).into_par_iter().for_each(|dst_c_idx| { let dst_idx = dst_c_idx * out_w * out_h; let k_cont = (0..p.c_in) .map(|c_in_idx| { k[dst_c_idx * k_s0 + c_in_idx * k_s1 + offset_h * k_s2 + offset_w * k_s3] }) .collect::<Vec<_>>(); for b_idx in 0..p.b_size { let dst_idx = dst_idx + b_idx * p.c_out * out_h * out_w; for dst_h in 0..out_h { let dst_idx = dst_idx + dst_h * out_w; let src_h = p.stride * dst_h + offset_h * p.dilation; if src_h < p.padding || src_h >= p.i_h + p.padding { continue; } let src_h = src_h - p.padding; for dst_w in 0..out_w { let dst_idx = dst_idx + dst_w; let src_w = p.stride * dst_w + offset_w * p.dilation; if src_w < p.padding || src_w >= p.i_w + p.padding { continue; } let src_w = src_w - p.padding; let inp_cont = &inp_cont [b_idx * cont_s0 + src_h * cont_s1 + src_w * cont_s2..]; assert!(inp_cont.len() >= p.c_in); assert!(k_cont.len() >= p.c_in); let mut d = T::zero(); unsafe { T::vec_dot(inp_cont.as_ptr(), k_cont.as_ptr(), &mut d, p.c_in) } let dst_p = dst.as_ptr(); // Safety: dst_idx are uniques per dst_c_idx which is used to parallelise // the different tasks so no two threads can try to write at the same // location. unsafe { let ptr = dst_p.add(dst_idx) as *mut T; *ptr += d } } } } }); } } Ok(dst) } } struct ConvTranspose2D<'a>(&'a crate::conv::ParamsConvTranspose2D); impl<'a> Map2 for ConvTranspose2D<'a> { const OP: &'static str = "conv_transpose2d"; fn f<T: WithDType>(&self, inp: &[T], inp_l: &Layout, k: &[T], k_l: &Layout) -> Result<Vec<T>> { let p = self.0; let inp = &inp[inp_l.start_offset()..]; let (inp_s0, inp_s1, inp_s2, inp_s3) = crate::shape::dims4(inp_l.stride())?; let k = &k[k_l.start_offset()..]; let (k_s0, k_s1, k_s2, k_s3) = crate::shape::dims4(k_l.stride())?; let (out_h, out_w) = (p.out_h(), p.out_w()); // Output shape: [b_size, c_out, out_h, out_w]. let dst = vec![T::zero(); p.b_size * p.c_out * out_h * out_w]; let dst_s0 = p.c_out * out_h * out_w; let dst_s1 = out_h * out_w; let dst_s2 = out_w; let dst_s3 = 1; // TODO: Avoid making this copy if `inp` already has the appropriate layout. let mut inp_cont = vec![T::zero(); p.b_size * p.c_in * p.i_h * p.i_w]; let cont_s0 = p.i_h * p.i_w * p.c_in; let cont_s1 = p.i_w * p.c_in; let cont_s2 = p.c_in; for b_idx in 0..p.b_size { for h_idx in 0..p.i_h { for w_idx in 0..p.i_w { for c_idx in 0..p.c_in { let src_idx = b_idx * inp_s0 + c_idx * inp_s1 + h_idx * inp_s2 + w_idx * inp_s3; let dst_idx = b_idx * cont_s0 + h_idx * cont_s1 + w_idx * cont_s2 + c_idx; inp_cont[dst_idx] = inp[src_idx] } } } } for k_y in 0..p.k_h { for k_x in 0..p.k_w { (0..p.c_out).into_par_iter().for_each(|dst_c_idx| { let k_cont = (0..p.c_in) .map(|c_in_idx| { k[c_in_idx * k_s0 + dst_c_idx * k_s1 + k_y * k_s2 + k_x * k_s3] }) .collect::<Vec<_>>(); for b_idx in 0..p.b_size { for inp_y in 0..p.i_h { for inp_x in 0..p.i_w { let out_x = inp_x * p.stride + k_x * p.dilation; let out_y = inp_y * p.stride + k_y * p.dilation; if out_x < p.padding || out_y < p.padding { continue; } let out_x = out_x - p.padding; let out_y = out_y - p.padding; if out_x < out_w && out_y < out_h { let inp_cont = &inp_cont [b_idx * cont_s0 + inp_y * cont_s1 + inp_x * cont_s2..]; let dst_idx = b_idx * dst_s0 + out_y * dst_s2 + out_x * dst_s3 + dst_c_idx * dst_s1; let mut d = T::zero(); unsafe { T::vec_dot( inp_cont.as_ptr(), k_cont.as_ptr(), &mut d, p.c_in, ) } let dst_p = dst.as_ptr(); // Safety: dst_idx are uniques per dst_c_idx which is used to // parallelise the different tasks so no two threads can try to // write at the same location. unsafe { let ptr = dst_p.add(dst_idx) as *mut T; *ptr += d } } } } } }) } } Ok(dst) } } struct MatMul((usize, usize, usize, usize)); impl MatMul { fn striding_error(&self, lhs_l: &Layout, rhs_l: &Layout, msg: &'static str) -> Error { Error::MatMulUnexpectedStriding(Box::new(crate::error::MatMulUnexpectedStriding { lhs_l: lhs_l.clone(), rhs_l: rhs_l.clone(), bmnk: self.0, msg, })) .bt() } } impl Map2 for MatMul { const OP: &'static str = "mat_mul"; #[cfg(all(not(feature = "mkl"), not(feature = "accelerate")))] fn f<T: 'static + WithDType + num_traits::Num + Copy>( &self, lhs: &[T], lhs_l: &Layout, rhs: &[T], rhs_l: &Layout, ) -> Result<Vec<T>> { use gemm::{gemm, Parallelism}; match T::DTYPE { DType::F16 | DType::F32 | DType::F64 => {} _ => Err(Error::UnsupportedDTypeForOp(T::DTYPE, "matmul").bt())?, } let (b, m, n, k) = self.0; let lhs = &lhs[lhs_l.start_offset()..]; let rhs = &rhs[rhs_l.start_offset()..]; let lhs_stride = lhs_l.stride(); let rhs_stride = rhs_l.stride(); let rank = lhs_stride.len(); let lhs_cs = lhs_stride[rank - 1]; let lhs_rs = lhs_stride[rank - 2]; let rhs_cs = rhs_stride[rank - 1]; let rhs_rs = rhs_stride[rank - 2]; let a_skip: usize = match lhs_stride[..rank - 2] { [s1, stride] if s1 == stride * lhs_l.dims()[1] => stride, [stride] => stride, [] => m * k, _ => Err(self.striding_error(lhs_l, rhs_l, "non-contiguous lhs"))?, }; let b_skip: usize = match rhs_stride[..rank - 2] { [s1, stride] if s1 == stride * rhs_l.dims()[1] => stride, [stride] => stride, [] => n * k, _ => Err(self.striding_error(lhs_l, rhs_l, "non-contiguous rhs"))?, }; let c_skip: usize = m * n; let dst_shape: Shape = (m, n).into(); let dst_strides = dst_shape.stride_contiguous(); let dst_rs = dst_strides[0]; let dst_cs = dst_strides[1]; let mut dst = vec![T::zero(); b * m * n]; let num_threads = crate::utils::get_num_threads(); let parallelism = if num_threads > 1 { Parallelism::Rayon(num_threads) } else { Parallelism::None }; for step in 0..b { let lhs_p = &lhs[step * a_skip..]; let rhs_p = &rhs[step * b_skip..]; let dst_p = &mut dst[step * c_skip..]; unsafe { gemm( /* m: usize = */ m, /* n: usize = */ n, /* k: usize = */ k, /* dst: *mut T = */ dst_p.as_mut_ptr(), /* dst_cs: isize = */ dst_cs as isize, /* dst_rs: isize = */ dst_rs as isize, /* read_dst: bool = */ false, /* lhs: *const T = */ lhs_p.as_ptr(), /* lhs_cs: isize = */ lhs_cs as isize, /* lhs_rs: isize = */ lhs_rs as isize, /* rhs: *const T = */ rhs_p.as_ptr(), /* rhs_cs: isize = */ rhs_cs as isize, /* rhs_rs: isize = */ rhs_rs as isize, /* alpha: T = */ T::zero(), /* beta: T = */ T::one(), /* conj_dst: bool = */ false, /* conj_lhs: bool = */ false, /* conj_rhs: bool = */ false, parallelism, ) } } Ok(dst) } #[cfg(feature = "accelerate")] fn f<T: 'static + WithDType + num_traits::Num + Copy>( &self, lhs: &[T], lhs_l: &Layout, rhs: &[T], rhs_l: &Layout, ) -> Result<Vec<T>> { let (b, m, n, k) = self.0; let lhs = &lhs[lhs_l.start_offset()..]; let rhs = &rhs[rhs_l.start_offset()..]; let lhs_stride = lhs_l.stride(); let rhs_stride = rhs_l.stride(); let rank = lhs_stride.len(); let a_skip: usize = match lhs_stride[..rank - 2] { [s1, stride] if s1 == stride * lhs_l.dims()[1] => stride, [stride] => stride, [] => m * k, _ => Err(self.striding_error(lhs_l, rhs_l, "non-contiguous lhs"))?, }; let b_skip: usize = match rhs_stride[..rank - 2] { [s1, stride] if s1 == stride * rhs_l.dims()[1] => stride, [stride] => stride, [] => n * k, _ => Err(self.striding_error(lhs_l, rhs_l, "non-contiguous rhs"))?, }; let c_skip: usize = m * n; let rhs_m1 = rhs_stride[rhs_stride.len() - 1]; let rhs_m2 = rhs_stride[rhs_stride.len() - 2]; let lhs_m1 = lhs_stride[lhs_stride.len() - 1]; let lhs_m2 = lhs_stride[lhs_stride.len() - 2]; let (lda, transa) = if rhs_m1 == 1 && rhs_m2 == n { (n as i32, b'N') } else if rhs_m1 == k && rhs_m2 == 1 { (k as i32, b'T') } else { Err(self.striding_error(lhs_l, rhs_l, "non-contiguous rhs"))? }; // The b tensor has dims batching, m, k (lhs) let (ldb, transb) = if lhs_m1 == 1 && lhs_m2 == k { (k as i32, b'N') } else if lhs_m1 == m && lhs_m2 == 1 { (m as i32, b'T') } else { Err(self.striding_error(lhs_l, rhs_l, "non-contiguous lhs"))? }; let mut dst = vec![T::zero(); b * m * n]; match T::DTYPE { DType::F16 => { crate::bail!("the accelerate backend does not support f16 matmul") } DType::F32 => { for step in 0..b { let lhs_p = &lhs[step * a_skip..]; let rhs_p = &rhs[step * b_skip..]; let dst_p = &mut dst[step * c_skip..]; unsafe { let a = rhs_p.as_ptr() as *const f32; let b = lhs_p.as_ptr() as *const f32; let c = dst_p.as_mut_ptr() as *mut f32; let a = std::slice::from_raw_parts(a, a_skip); let b = std::slice::from_raw_parts(b, b_skip); let c = std::slice::from_raw_parts_mut(c, c_skip); crate::accelerate::sgemm( transa, transb, /* m= */ n as i32, /* n= */ m as i32, /* k= */ k as i32, /* alpha= */ 1., /* a= */ a, /* lda= */ lda, /* b= */ b, /* ldb= */ ldb, /* beta= */ 0., /* c= */ c, /* ldc= */ n as i32, ) } } } DType::F64 => { for step in 0..b { let lhs_p = &lhs[step * a_skip..]; let rhs_p = &rhs[step * b_skip..]; let dst_p = &mut dst[step * c_skip..]; unsafe { let a = rhs_p.as_ptr() as *const f64; let b = lhs_p.as_ptr() as *const f64; let c = dst_p.as_mut_ptr() as *mut f64; let a = std::slice::from_raw_parts(a, a_skip); let b = std::slice::from_raw_parts(b, b_skip); let c = std::slice::from_raw_parts_mut(c, c_skip); crate::accelerate::dgemm( transa, transb, /* m= */ n as i32, /* n= */ m as i32, /* k= */ k as i32, /* alpha= */ 1., /* a= */ a, /* lda= */ lda, /* b= */ b, /* ldb= */ ldb, /* beta= */ 0., /* c= */ c, /* ldc= */ n as i32, ) } } } dtype => Err(Error::UnsupportedDTypeForOp(dtype, "matmul").bt())?, } Ok(dst) } #[cfg(feature = "mkl")] fn f<T: 'static + WithDType + num_traits::Num + Copy>( &self, lhs: &[T], lhs_l: &Layout, rhs: &[T], rhs_l: &Layout, ) -> Result<Vec<T>> { let (b, m, n, k) = self.0; let lhs = &lhs[lhs_l.start_offset()..]; let rhs = &rhs[rhs_l.start_offset()..]; let lhs_stride = lhs_l.stride(); let rhs_stride = rhs_l.stride(); let rank = lhs_stride.len(); let a_skip: usize = match lhs_stride[..rank - 2] { [s1, stride] if s1 == stride * lhs_l.dims()[1] => stride, [stride] => stride, [] => m * k, _ => Err(self.striding_error(lhs_l, rhs_l, "non-contiguous lhs"))?, }; let b_skip: usize = match rhs_stride[..rank - 2] { [s1, stride] if s1 == stride * rhs_l.dims()[1] => stride, [stride] => stride, [] => n * k, _ => Err(self.striding_error(lhs_l, rhs_l, "non-contiguous rhs"))?, }; let c_skip: usize = m * n; let rhs_m1 = rhs_stride[rhs_stride.len() - 1]; let rhs_m2 = rhs_stride[rhs_stride.len() - 2]; let lhs_m1 = lhs_stride[lhs_stride.len() - 1]; let lhs_m2 = lhs_stride[lhs_stride.len() - 2]; let (lda, transa) = if rhs_m1 == 1 && rhs_m2 == n { (n as i32, b'N') } else if rhs_m1 == k && rhs_m2 == 1 { (k as i32, b'T') } else { Err(self.striding_error(lhs_l, rhs_l, "non-contiguous rhs"))? }; // The b tensor has dims batching, m, k (lhs) let (ldb, transb) = if lhs_m1 == 1 && lhs_m2 == k { (k as i32, b'N') } else if lhs_m1 == m && lhs_m2 == 1 { (m as i32, b'T') } else { Err(self.striding_error(lhs_l, rhs_l, "non-contiguous lhs"))? }; let mut dst = vec![T::zero(); b * m * n]; match T::DTYPE { DType::F16 => { for step in 0..b { let lhs_p = &lhs[step * a_skip..]; let rhs_p = &rhs[step * b_skip..]; let dst_p = &mut dst[step * c_skip..]; unsafe { let a = rhs_p.as_ptr() as *const f16; let b = lhs_p.as_ptr() as *const f16; let c = dst_p.as_mut_ptr() as *mut f16; let a = std::slice::from_raw_parts(a, a_skip); let b = std::slice::from_raw_parts(b, b_skip); let c = std::slice::from_raw_parts_mut(c, c_skip); crate::mkl::hgemm( transa, transb, /* m= */ n as i32, /* n= */ m as i32, /* k= */ k as i32, /* alpha= */ f16::ONE, /* a= */ a, /* lda= */ lda, /* b= */ b, /* ldb= */ ldb, /* beta= */ f16::ZERO, /* c= */ c, /* ldc= */ n as i32, ) } } } DType::F32 => { for step in 0..b { let lhs_p = &lhs[step * a_skip..]; let rhs_p = &rhs[step * b_skip..]; let dst_p = &mut dst[step * c_skip..]; unsafe { let a = rhs_p.as_ptr() as *const f32; let b = lhs_p.as_ptr() as *const f32; let c = dst_p.as_mut_ptr() as *mut f32; let a = std::slice::from_raw_parts(a, a_skip); let b = std::slice::from_raw_parts(b, b_skip); let c = std::slice::from_raw_parts_mut(c, c_skip); crate::mkl::sgemm( transa, transb, /* m= */ n as i32, /* n= */ m as i32, /* k= */ k as i32, /* alpha= */ 1., /* a= */ a, /* lda= */ lda, /* b= */ b, /* ldb= */ ldb, /* beta= */ 0., /* c= */ c, /* ldc= */ n as i32, ) } } } DType::F64 => { for step in 0..b { let lhs_p = &lhs[step * a_skip..]; let rhs_p = &rhs[step * b_skip..]; let dst_p = &mut dst[step * c_skip..]; unsafe { let a = rhs_p.as_ptr() as *const f64; let b = lhs_p.as_ptr() as *const f64; let c = dst_p.as_mut_ptr() as *mut f64; let a = std::slice::from_raw_parts(a, a_skip); let b = std::slice::from_raw_parts(b, b_skip); let c = std::slice::from_raw_parts_mut(c, c_skip); crate::mkl::dgemm( transa, transb, /* m= */ n as i32, /* n= */ m as i32, /* k= */ k as i32, /* alpha= */ 1., /* a= */ a, /* lda= */ lda, /* b= */ b, /* ldb= */ ldb, /* beta= */ 0., /* c= */ c, /* ldc= */ n as i32, ) } } } dtype => Err(Error::UnsupportedDTypeForOp(dtype, "matmul").bt())?, } Ok(dst) } } fn elu<T: num_traits::Float>(v: T, alpha: T) -> T { if v.is_sign_positive() { v } else { (v.exp() - T::one()) * alpha } } impl CpuStorage { pub fn as_slice<D: WithDType>(&self) -> Result<&[D]> { D::cpu_storage_as_slice(self) } pub fn concat(storages: &[CpuStorage]) -> Result<CpuStorage> { let storage0 = &storages[0]; let s = match storage0 { Self::U8(_) => { let storages = storages .iter() .map(|s| match s { Self::U8(s) => Ok(s.as_slice()), _ => crate::bail!("dtype mismatch"), }) .collect::<Result<Vec<_>>>()? .concat(); Self::U8(storages) } Self::U32(_) => { let storages = storages .iter() .map(|s| match s { Self::U32(s) => Ok(s.as_slice()), _ => crate::bail!("dtype mismatch"), }) .collect::<Result<Vec<_>>>()? .concat(); Self::U32(storages) } Self::I64(_) => { let storages = storages .iter() .map(|s| match s { Self::I64(s) => Ok(s.as_slice()), _ => crate::bail!("dtype mismatch"), }) .collect::<Result<Vec<_>>>()? .concat(); Self::I64(storages) } Self::BF16(_) => { let storages = storages .iter() .map(|s| match s { Self::BF16(s) => Ok(s.as_slice()), _ => crate::bail!("dtype mismatch"), }) .collect::<Result<Vec<_>>>()? .concat(); Self::BF16(storages) } Self::F16(_) => { let storages = storages .iter() .map(|s| match s { Self::F16(s) => Ok(s.as_slice()), _ => crate::bail!("dtype mismatch"), }) .collect::<Result<Vec<_>>>()? .concat(); Self::F16(storages) } Self::F32(_) => { let storages = storages .iter() .map(|s| match s { Self::F32(s) => Ok(s.as_slice()), _ => crate::bail!("dtype mismatch"), }) .collect::<Result<Vec<_>>>()? .concat(); Self::F32(storages) } Self::F64(_) => { let storages = storages .iter() .map(|s| match s { Self::F64(s) => Ok(s.as_slice()), _ => crate::bail!("dtype mismatch"), }) .collect::<Result<Vec<_>>>()? .concat(); Self::F64(storages) } }; Ok(s) } } impl BackendStorage for CpuStorage { type Device = CpuDevice; fn dtype(&self) -> DType { match self { Self::U8(_) => DType::U8, Self::U32(_) => DType::U32, Self::I64(_) => DType::I64, Self::BF16(_) => DType::BF16, Self::F16(_) => DType::F16, Self::F32(_) => DType::F32, Self::F64(_) => DType::F64, } } fn to_dtype(&self, layout: &Layout, dtype: DType) -> Result<Self> { // TODO: find a way around the quadratic number of cases below. match (self, dtype) { (Self::U8(storage), DType::BF16) => { let data = unary_map(storage, layout, |v| bf16::from_f32(v as f32)); Ok(Self::BF16(data)) } (Self::U32(storage), DType::BF16) => { let data = unary_map(storage, layout, |v| bf16::from_f32(v as f32)); Ok(Self::BF16(data)) } (Self::I64(storage), DType::BF16) => { let data = unary_map(storage, layout, |v| bf16::from_f32(v as f32)); Ok(Self::BF16(data)) } (Self::BF16(storage), DType::BF16) => { let data = unary_map(storage, layout, |v| v); Ok(Self::BF16(data)) } (Self::F16(storage), DType::BF16) => { let data = unary_map(storage, layout, |v| bf16::from_f32(v.to_f32())); Ok(Self::BF16(data)) } (Self::F32(storage), DType::BF16) => { let data = unary_map(storage, layout, bf16::from_f32); Ok(Self::BF16(data)) } (Self::F64(storage), DType::BF16) => { let data = unary_map(storage, layout, bf16::from_f64); Ok(Self::BF16(data)) } (Self::U8(storage), DType::F16) => { let data = unary_map(storage, layout, |v| f16::from_f32(v as f32)); Ok(Self::F16(data)) } (Self::U32(storage), DType::F16) => { let data = unary_map(storage, layout, |v| f16::from_f32(v as f32)); Ok(Self::F16(data)) } (Self::I64(storage), DType::F16) => { let data = unary_map(storage, layout, |v| f16::from_f32(v as f32)); Ok(Self::F16(data)) } (Self::BF16(storage), DType::F16) => { let data = unary_map(storage, layout, |v| f16::from_f32(v.to_f32())); Ok(Self::F16(data)) } (Self::F16(storage), DType::F16) => { let data = unary_map(storage, layout, |v| v); Ok(Self::F16(data)) } (Self::F32(storage), DType::F16) => { let data = unary_map(storage, layout, f16::from_f32); Ok(Self::F16(data)) } (Self::F64(storage), DType::F16) => { let data = unary_map(storage, layout, f16::from_f64); Ok(Self::F16(data)) } (Self::U8(storage), DType::F32) => { let data = unary_map(storage, layout, |v| v as f32); Ok(Self::F32(data)) } (Self::U32(storage), DType::F32) => { let data = unary_map(storage, layout, |v| v as f32); Ok(Self::F32(data)) } (Self::I64(storage), DType::F32) => { let data = unary_map(storage, layout, |v| v as f32); Ok(Self::F32(data)) } (Self::BF16(storage), DType::F32) => { let data = unary_map(storage, layout, |v| v.to_f32()); Ok(Self::F32(data)) } (Self::F16(storage), DType::F32) => { let data = unary_map(storage, layout, |v| v.to_f32()); Ok(Self::F32(data)) } (Self::F32(storage), DType::F32) => { let data = unary_map(storage, layout, |v| v); Ok(Self::F32(data)) } (Self::F64(storage), DType::F32) => { let data = unary_map(storage, layout, |v| v as f32); Ok(Self::F32(data)) } (Self::U8(storage), DType::U8) => { let data = unary_map(storage, layout, |v| v); Ok(Self::U8(data)) } (Self::BF16(storage), DType::U8) => { let data = unary_map(storage, layout, |v| v.to_f32() as u8); Ok(Self::U8(data)) } (Self::F16(storage), DType::U8) => { let data = unary_map(storage, layout, |v| v.to_f32() as u8); Ok(Self::U8(data)) } (Self::F32(storage), DType::U8) => { let data = unary_map(storage, layout, |v| v as u8); Ok(Self::U8(data)) } (Self::F64(storage), DType::U8) => { let data = unary_map(storage, layout, |v| v as u8); Ok(Self::U8(data)) } (Self::U32(storage), DType::U8) => { let data = unary_map(storage, layout, |v| v as u8); Ok(Self::U8(data)) } (Self::I64(storage), DType::U8) => { let data = unary_map(storage, layout, |v| v as u8); Ok(Self::U8(data)) } (Self::U8(storage), DType::U32) => { let data = unary_map(storage, layout, |v| v as u32); Ok(Self::U32(data)) } (Self::U32(storage), DType::U32) => { let data = unary_map(storage, layout, |v| v); Ok(Self::U32(data)) } (Self::I64(storage), DType::U32) => { let data = unary_map(storage, layout, |v| v as u32); Ok(Self::U32(data)) } (Self::BF16(storage), DType::U32) => { let data = unary_map(storage, layout, |v| v.to_f32() as u32); Ok(Self::U32(data)) } (Self::F16(storage), DType::U32) => { let data = unary_map(storage, layout, |v| v.to_f32() as u32); Ok(Self::U32(data)) } (Self::F32(storage), DType::U32) => { let data = unary_map(storage, layout, |v| v as u32); Ok(Self::U32(data)) } (Self::F64(storage), DType::U32) => { let data = unary_map(storage, layout, |v| v as u32); Ok(Self::U32(data)) } (Self::U8(storage), DType::I64) => { let data = unary_map(storage, layout, |v| v as i64); Ok(Self::I64(data)) } (Self::U32(storage), DType::I64) => { let data = unary_map(storage, layout, |v| v as i64); Ok(Self::I64(data)) } (Self::I64(storage), DType::I64) => { let data = unary_map(storage, layout, |v| v); Ok(Self::I64(data)) } (Self::BF16(storage), DType::I64) => { let data = unary_map(storage, layout, |v| v.to_f32() as i64); Ok(Self::I64(data)) } (Self::F16(storage), DType::I64) => { let data = unary_map(storage, layout, |v| v.to_f32() as i64); Ok(Self::I64(data)) } (Self::F32(storage), DType::I64) => { let data = unary_map(storage, layout, |v| v as i64); Ok(Self::I64(data)) } (Self::F64(storage), DType::I64) => { let data = unary_map(storage, layout, |v| v as i64); Ok(Self::I64(data)) } (Self::U8(storage), DType::F64) => { let data = unary_map(storage, layout, |v| v as f64); Ok(Self::F64(data)) } (Self::U32(storage), DType::F64) => { let data = unary_map(storage, layout, |v| v as f64); Ok(Self::F64(data)) } (Self::I64(storage), DType::F64) => { let data = unary_map(storage, layout, |v| v as f64); Ok(Self::F64(data)) } (Self::BF16(storage), DType::F64) => { let data = unary_map(storage, layout, |v| v.to_f64()); Ok(Self::F64(data)) } (Self::F16(storage), DType::F64) => { let data = unary_map(storage, layout, |v| v.to_f64()); Ok(Self::F64(data)) } (Self::F32(storage), DType::F64) => { let data = unary_map(storage, layout, |v| v as f64); Ok(Self::F64(data)) } (Self::F64(storage), DType::F64) => { let data = unary_map(storage, layout, |v| v); Ok(Self::F64(data)) } } } fn reduce_op(&self, op: ReduceOp, layout: &Layout, reduce_dims: &[usize]) -> Result<Self> { match op { ReduceOp::Sum => { let src_dims = layout.dims(); let mut dst_dims = src_dims.to_vec(); for &dim in reduce_dims.iter() { dst_dims[dim] = 1; } let dst_shape = Shape::from(dst_dims); let mut reduce_dims = reduce_dims.to_vec(); // Sort the reduce_dims as they have to be processed from left to right when converting the // indexes. reduce_dims.sort(); let reduce_dims_and_stride: Vec<_> = reduce_dims .iter() .map(|&d| (src_dims[d], src_dims[d + 1..].iter().product::<usize>())) .collect(); ReduceSum { dst_shape: &dst_shape, reduce_dims: &reduce_dims, reduce_dims_and_stride, } .map(self, layout) } ReduceOp::Min | ReduceOp::ArgMin | ReduceOp::Max | ReduceOp::ArgMax => { let reduce_dim_index = match reduce_dims { [reduce_dim_index] => *reduce_dim_index, _ => { let op = match op { ReduceOp::Min => "min", ReduceOp::ArgMin => "argmin", ReduceOp::Max => "max", ReduceOp::ArgMax => "argmax", _ => unreachable!(), }; let dims = reduce_dims.to_vec(); Err(Error::OnlySingleDimension { op, dims })? } }; let (use_min, return_index) = match op { ReduceOp::Min => (true, false), ReduceOp::ArgMin => (true, true), ReduceOp::Max => (false, false), ReduceOp::ArgMax => (false, true), _ => unreachable!(), }; ReduceIndex { reduce_dim_index, use_min, return_index, } .map(self, layout) } } } fn cmp(&self, op: CmpOp, rhs: &Self, lhs_l: &Layout, rhs_l: &Layout) -> Result<Self> { Cmp(op).map(self, lhs_l, rhs, rhs_l) } fn affine(&self, layout: &Layout, mul: f64, add: f64) -> Result<Self> { Affine(mul, add).map(self, layout) } fn avg_pool2d( &self, layout: &Layout, kernel_size: (usize, usize), stride: (usize, usize), ) -> Result<Self> { AvgPool2D(kernel_size, stride).map(self, layout) } fn max_pool2d( &self, layout: &Layout, kernel_size: (usize, usize), stride: (usize, usize), ) -> Result<Self> { MaxPool2D(kernel_size, stride).map(self, layout) } fn upsample_nearest1d(&self, layout: &Layout, sz: usize) -> Result<Self> { UpsampleNearest1D(sz).map(self, layout) } fn upsample_nearest2d(&self, layout: &Layout, h: usize, w: usize) -> Result<Self> { UpsampleNearest2D(h, w).map(self, layout) } fn powf(&self, layout: &Layout, e: f64) -> Result<Self> { use num_traits::Float; // TODO: Have some generic map for functions that apply on num_traits::Float elements. match self { Self::BF16(storage) => { let data = unary_map(storage, layout, |v| v.powf(bf16::from_f64(e))); Ok(Self::BF16(data)) } Self::F16(storage) => { let data = unary_map(storage, layout, |v| v.powf(f16::from_f64(e))); Ok(Self::F16(data)) } Self::F32(storage) => { let data = unary_map(storage, layout, |v| v.powf(e as f32)); Ok(Self::F32(data)) } Self::F64(storage) => { let data = unary_map(storage, layout, |v| v.powf(e)); Ok(Self::F64(data)) } Self::U8(_) => Err(Error::UnsupportedDTypeForOp(DType::U8, "elu").bt()), Self::U32(_) => Err(Error::UnsupportedDTypeForOp(DType::U32, "elu").bt()), Self::I64(_) => Err(Error::UnsupportedDTypeForOp(DType::I64, "elu").bt()), } } fn elu(&self, layout: &Layout, alpha: f64) -> Result<Self> { // TODO: Have some generic map for functions that apply on num_traits::Float elements. match self { Self::BF16(storage) => { let data = unary_map(storage, layout, |v| elu(v, bf16::from_f64(alpha))); Ok(Self::BF16(data)) } Self::F16(storage) => { let data = unary_map(storage, layout, |v| elu(v, f16::from_f64(alpha))); Ok(Self::F16(data)) } Self::F32(storage) => { let data = unary_map(storage, layout, |v| elu(v, f32::from_f64(alpha))); Ok(Self::F32(data)) } Self::F64(storage) => { let data = unary_map(storage, layout, |v| elu(v, alpha)); Ok(Self::F64(data)) } Self::U8(_) => Err(Error::UnsupportedDTypeForOp(DType::U8, "elu").bt()), Self::U32(_) => Err(Error::UnsupportedDTypeForOp(DType::U32, "elu").bt()), Self::I64(_) => Err(Error::UnsupportedDTypeForOp(DType::I64, "elu").bt()), } } fn unary_impl<B: UnaryOpT>(&self, layout: &Layout) -> Result<Self> { match self { Self::BF16(storage) => { if B::BF16_VEC { let data = unary_map_vec(storage, layout, B::bf16, B::bf16_vec); Ok(Self::BF16(data)) } else { let data = unary_map(storage, layout, B::bf16); Ok(Self::BF16(data)) } } Self::F16(storage) => { if B::F16_VEC { let data = unary_map_vec(storage, layout, B::f16, B::f16_vec); Ok(Self::F16(data)) } else { let data = unary_map(storage, layout, B::f16); Ok(Self::F16(data)) } } Self::F32(storage) => { if B::F32_VEC { let data = unary_map_vec(storage, layout, B::f32, B::f32_vec); Ok(Self::F32(data)) } else { let data = unary_map(storage, layout, B::f32); Ok(Self::F32(data)) } } Self::F64(storage) => { if B::F64_VEC { let data = unary_map_vec(storage, layout, B::f64, B::f64_vec); Ok(Self::F64(data)) } else { let data = unary_map(storage, layout, B::f64); Ok(Self::F64(data)) } } Self::U8(storage) => { let data = unary_map(storage, layout, B::u8); Ok(Self::U8(data)) } Self::U32(storage) => { let data = unary_map(storage, layout, B::u32); Ok(Self::U32(data)) } Self::I64(storage) => { let data = unary_map(storage, layout, B::i64); Ok(Self::I64(data)) } } } fn binary_impl<B: BinaryOpT>( &self, rhs: &Self, lhs_l: &Layout, rhs_l: &Layout, ) -> Result<Self> { match (self, rhs) { (Self::BF16(lhs), Self::BF16(rhs)) => { let data = if B::BF16_VEC { binary_map_vec(lhs_l, rhs_l, lhs, rhs, B::bf16, B::bf16_vec) } else { binary_map(lhs_l, rhs_l, lhs, rhs, B::bf16) }; Ok(Self::BF16(data)) } (Self::F16(lhs), Self::F16(rhs)) => { let data = if B::F16_VEC { binary_map_vec(lhs_l, rhs_l, lhs, rhs, B::f16, B::f16_vec) } else { binary_map(lhs_l, rhs_l, lhs, rhs, B::f16) }; Ok(Self::F16(data)) } (Self::F32(lhs), Self::F32(rhs)) => { let data = if B::F32_VEC { binary_map_vec(lhs_l, rhs_l, lhs, rhs, B::f32, B::f32_vec) } else { binary_map(lhs_l, rhs_l, lhs, rhs, B::f32) }; Ok(Self::F32(data)) } (Self::F64(lhs), Self::F64(rhs)) => { let data = if B::F64_VEC { binary_map_vec(lhs_l, rhs_l, lhs, rhs, B::f64, B::f64_vec) } else { binary_map(lhs_l, rhs_l, lhs, rhs, B::f64) }; Ok(Self::F64(data)) } (Self::U32(lhs), Self::U32(rhs)) => { let data = if B::U32_VEC { binary_map_vec(lhs_l, rhs_l, lhs, rhs, B::u32, B::u32_vec) } else { binary_map(lhs_l, rhs_l, lhs, rhs, B::u32) }; Ok(Self::U32(data)) } (Self::I64(lhs), Self::I64(rhs)) => { let data = if B::I64_VEC { binary_map_vec(lhs_l, rhs_l, lhs, rhs, B::i64, B::i64_vec) } else { binary_map(lhs_l, rhs_l, lhs, rhs, B::i64) }; Ok(Self::I64(data)) } (Self::U8(lhs), Self::U8(rhs)) => { let data = if B::U8_VEC { binary_map_vec(lhs_l, rhs_l, lhs, rhs, B::u8, B::u8_vec) } else { binary_map(lhs_l, rhs_l, lhs, rhs, B::u8) }; Ok(Self::U8(data)) } _ => { // This should be covered by the dtype check above. Err(Error::DTypeMismatchBinaryOp { lhs: self.dtype(), rhs: rhs.dtype(), op: B::NAME, } .bt()) } } } fn copy2d( &self, dst: &mut Self, d1: usize, d2: usize, src_s: usize, dst_s: usize, src_o: usize, dst_o: usize, ) -> Result<()> { match (self, dst) { (Self::U8(src), Self::U8(dst)) => copy2d_(src, dst, d1, d2, src_s, dst_s, src_o, dst_o), (Self::U32(src), Self::U32(dst)) => { copy2d_(src, dst, d1, d2, src_s, dst_s, src_o, dst_o) } (Self::I64(src), Self::I64(dst)) => { copy2d_(src, dst, d1, d2, src_s, dst_s, src_o, dst_o) } (Self::BF16(src), Self::BF16(dst)) => { copy2d_(src, dst, d1, d2, src_s, dst_s, src_o, dst_o) } (Self::F16(src), Self::F16(dst)) => { copy2d_(src, dst, d1, d2, src_s, dst_s, src_o, dst_o) } (Self::F32(src), Self::F32(dst)) => { copy2d_(src, dst, d1, d2, src_s, dst_s, src_o, dst_o) } (Self::F64(src), Self::F64(dst)) => { copy2d_(src, dst, d1, d2, src_s, dst_s, src_o, dst_o) } (_, dst) => { return Err(Error::DTypeMismatchBinaryOp { lhs: self.dtype(), rhs: dst.dtype(), op: "copy2d", } .bt()); } } Ok(()) } fn copy_strided_src(&self, dst: &mut Self, dst_offset: usize, src_l: &Layout) -> Result<()> { match (self, dst) { (Self::U8(src), Self::U8(dst)) => copy_strided_src_(src, dst, dst_offset, src_l), (Self::U32(src), Self::U32(dst)) => copy_strided_src_(src, dst, dst_offset, src_l), (Self::I64(src), Self::I64(dst)) => copy_strided_src_(src, dst, dst_offset, src_l), (Self::BF16(src), Self::BF16(dst)) => copy_strided_src_(src, dst, dst_offset, src_l), (Self::F16(src), Self::F16(dst)) => copy_strided_src_(src, dst, dst_offset, src_l), (Self::F32(src), Self::F32(dst)) => copy_strided_src_(src, dst, dst_offset, src_l), (Self::F64(src), Self::F64(dst)) => copy_strided_src_(src, dst, dst_offset, src_l), (_, dst) => { // This should be covered by the dtype check above. return Err(Error::DTypeMismatchBinaryOp { lhs: self.dtype(), rhs: dst.dtype(), op: "copy_strided", } .bt()); } } Ok(()) } fn where_cond( &self, layout: &Layout, t: &Self, t_l: &Layout, f: &Self, f_l: &Layout, ) -> Result<Self> { match self { Self::U8(pred) => WCond(pred, layout).map(t, t_l, f, f_l), Self::U32(pred) => WCond(pred, layout).map(t, t_l, f, f_l), Self::I64(pred) => WCond(pred, layout).map(t, t_l, f, f_l), _ => Err(Error::UnsupportedDTypeForOp(self.dtype(), "where-cond")), } } fn conv1d( &self, l: &Layout, kernel: &Self, kernel_l: &Layout, params: &crate::conv::ParamsConv1D, ) -> Result<Self> { if !USE_IM2COL_CONV1D { return Conv1D(params).map(self, l, kernel, kernel_l); } let op = Im2Col1D { l_k: params.k_size, padding: params.padding, stride: params.stride, dilation: params.dilation, }; let col = op.map(self, l)?; let b = params.b_size; let n = params.c_out; let l_out = params.l_out(); let k = op.l_k * params.c_in; let m = l_out; let col_l = Layout::contiguous((b, m, k)); let res = if kernel_l.is_contiguous() { let kernel_l = Layout::contiguous_with_offset((1, n, k), kernel_l.start_offset()) .transpose(1, 2)? .broadcast_as((b, k, n))?; col.matmul(kernel, (b, m, n, k), &col_l, &kernel_l)? } else { // Make the kernel contiguous if not already the case. let mut kernel_c = self.device().zeros_impl(kernel_l.shape(), kernel.dtype())?; kernel.copy_strided_src(&mut kernel_c, 0, kernel_l)?; let kernel_l = Layout::contiguous_with_offset((1, n, k), kernel_l.start_offset()) .transpose(1, 2)? .broadcast_as((b, k, n))?; col.matmul(kernel, (b, m, n, k), &col_l, &kernel_l)? }; let res_l = Layout::contiguous((b, l_out, params.c_out)).transpose(1, 2)?; let mut res_t = self.device().zeros_impl(res_l.shape(), res.dtype())?; res.copy_strided_src(&mut res_t, 0, &res_l)?; Ok(res_t) } fn conv_transpose1d( &self, l: &Layout, kernel: &Self, kernel_l: &Layout, params: &crate::conv::ParamsConvTranspose1D, ) -> Result<Self> { let can_use_col2im = kernel_l.is_contiguous() && params.dilation == 1 && params.padding == 0 && params.output_padding == 0; if USE_IM2COL_CONV1D_TR && can_use_col2im { let (b_size, c_in, l_in) = l.shape().dims3()?; let (c_in2, c_out, k_size) = kernel_l.shape().dims3()?; if !kernel_l.is_contiguous() { crate::bail!( "convtr1d: the second argument (kernel) has to be contiguous {kernel_l:?}" ) } if c_in != c_in2 { crate::bail!( "convtr1d: shape mismatch on c_in {:?} {:?}", l.shape(), kernel_l.shape() ) } let col = { // This merges the last two dimensions of the kernel together. let kernel_l_mm = Layout::new( (b_size, c_in, k_size * c_out).into(), vec![0, k_size * c_out, 1], kernel_l.start_offset(), ); self.matmul( kernel, ( b_size, /* m */ l_in, /* n */ c_out * k_size, /* k */ c_in, ), &l.transpose(1, 2)?, &kernel_l_mm, )? }; let col_l = Layout::contiguous((b_size, l_in, c_out, k_size)); Col2Im1D { stride: params.stride, } .map(&col, &col_l) } else { ConvTranspose1D(params).map(self, l, kernel, kernel_l) } } fn conv2d( &self, l: &Layout, kernel: &Self, kernel_l: &Layout, params: &crate::conv::ParamsConv2D, ) -> Result<Self> { if !USE_IM2COL_CONV2D { return Conv2D(params).map(self, l, kernel, kernel_l); } let op = Im2Col { h_k: params.k_h, w_k: params.k_w, padding: params.padding, stride: params.stride, dilation: params.dilation, }; let col = op.map(self, l)?; let b = params.b_size; let n = params.c_out; let (h_out, w_out) = (params.out_h(), params.out_w()); let k = op.h_k * op.w_k * params.c_in; let m = h_out * w_out; let col_l = Layout::contiguous((b, m, k)); let res = if kernel_l.is_contiguous() { let kernel_l = Layout::contiguous_with_offset((1, n, k), kernel_l.start_offset()) .transpose(1, 2)? .broadcast_as((b, k, n))?; col.matmul(kernel, (b, m, n, k), &col_l, &kernel_l)? } else { // Make the kernel contiguous if not already the case. let mut kernel_c = self.device().zeros_impl(kernel_l.shape(), kernel.dtype())?; kernel.copy_strided_src(&mut kernel_c, 0, kernel_l)?; let kernel_l = Layout::contiguous_with_offset((1, n, k), kernel_l.start_offset()) .transpose(1, 2)? .broadcast_as((b, k, n))?; col.matmul(kernel, (b, m, n, k), &col_l, &kernel_l)? }; let res_l = Layout::contiguous((b, h_out, w_out, params.c_out)) .transpose(1, 2)? .transpose(1, 3)?; let mut res_t = self.device().zeros_impl(res_l.shape(), res.dtype())?; res.copy_strided_src(&mut res_t, 0, &res_l)?; Ok(res_t) } fn conv_transpose2d( &self, l: &Layout, kernel: &Self, kernel_l: &Layout, params: &crate::conv::ParamsConvTranspose2D, ) -> Result<Self> { ConvTranspose2D(params).map(self, l, kernel, kernel_l) } fn index_select(&self, ids: &Self, l: &Layout, ids_l: &Layout, dim: usize) -> Result<Self> { match ids { Self::U8(ids) => IndexSelect { ids, ids_l, dim }.map(self, l), Self::U32(ids) => IndexSelect { ids, ids_l, dim }.map(self, l), Self::I64(ids) => IndexSelect { ids, ids_l, dim }.map(self, l), _ => Err(Error::UnsupportedDTypeForOp(self.dtype(), "index-select").bt()), } } fn gather(&self, l: &Layout, ids: &Self, ids_l: &Layout, dim: usize) -> Result<Self> { match ids { Self::U8(ids) => Gather { ids, ids_l, dim }.map(self, l), Self::U32(ids) => Gather { ids, ids_l, dim }.map(self, l), Self::I64(ids) => Gather { ids, ids_l, dim }.map(self, l), _ => Err(Error::UnsupportedDTypeForOp(self.dtype(), "gather").bt()), } } fn scatter_add( &self, l: &Layout, ids: &Self, ids_l: &Layout, src: &Self, src_l: &Layout, dim: usize, ) -> Result<Self> { match ids { Self::U8(ids) => ScatterAdd { ids, ids_l, dim }.map(self, l, src, src_l), Self::U32(ids) => ScatterAdd { ids, ids_l, dim }.map(self, l, src, src_l), Self::I64(ids) => ScatterAdd { ids, ids_l, dim }.map(self, l, src, src_l), _ => Err(Error::UnsupportedDTypeForOp(self.dtype(), "scatter-add").bt()), } } fn index_add( &self, l: &Layout, ids: &Self, ids_l: &Layout, src: &Self, src_l: &Layout, dim: usize, ) -> Result<Self> { match ids { Self::U8(ids) => { let ids = match ids_l.contiguous_offsets() { Some((a, b)) => &ids[a..b], None => Err(Error::RequiresContiguous { op: "index-add" }.bt())?, }; IndexAdd { ids, dim }.map(self, l, src, src_l) } Self::U32(ids) => { let ids = match ids_l.contiguous_offsets() { Some((a, b)) => &ids[a..b], None => Err(Error::RequiresContiguous { op: "index-add" }.bt())?, }; IndexAdd { ids, dim }.map(self, l, src, src_l) } Self::I64(ids) => { let ids = match ids_l.contiguous_offsets() { Some((a, b)) => &ids[a..b], None => Err(Error::RequiresContiguous { op: "index-add" }.bt())?, }; IndexAdd { ids, dim }.map(self, l, src, src_l) } _ => Err(Error::UnsupportedDTypeForOp(self.dtype(), "index-add").bt()), } } fn matmul( &self, rhs: &Self, bmnk: (usize, usize, usize, usize), lhs_l: &Layout, rhs_l: &Layout, ) -> Result<Self> { MatMul(bmnk).map(self, lhs_l, rhs, rhs_l) } fn device(&self) -> &Self::Device { &CpuDevice } fn try_clone(&self, _: &Layout) -> Result<Self> { Ok(self.clone()) } fn to_cpu_storage(&self) -> Result<CpuStorage> { Ok(self.clone()) } } impl BackendDevice for CpuDevice { type Storage = CpuStorage; fn location(&self) -> crate::DeviceLocation { crate::DeviceLocation::Cpu } fn same_device(&self, _: &Self) -> bool { true } fn storage_from_cpu_storage(&self, s: &CpuStorage) -> Result<Self::Storage> { Ok(s.clone()) } fn new(_: usize) -> Result<Self> { Ok(Self) } fn set_seed(&self, _seed: u64) -> Result<()> { crate::bail!("cannot seed the CPU rng with set_seed") } fn rand_uniform(&self, shape: &Shape, dtype: DType, min: f64, max: f64) -> Result<CpuStorage> { use rand::prelude::*; let elem_count = shape.elem_count(); let mut rng = rand::thread_rng(); match dtype { DType::U8 | DType::U32 | DType::I64 => { Err(Error::UnsupportedDTypeForOp(dtype, "rand_uniform").bt()) } DType::BF16 => { let mut data = Vec::with_capacity(elem_count); let uniform = rand::distributions::Uniform::new(bf16::from_f64(min), bf16::from_f64(max)); for _i in 0..elem_count { data.push(rng.sample::<bf16, _>(uniform)) } Ok(CpuStorage::BF16(data)) } DType::F16 => { let mut data = Vec::with_capacity(elem_count); let uniform = rand::distributions::Uniform::new(f16::from_f64(min), f16::from_f64(max)); for _i in 0..elem_count { data.push(rng.sample::<f16, _>(uniform)) } Ok(CpuStorage::F16(data)) } DType::F32 => { let mut data = Vec::with_capacity(elem_count); let uniform = rand::distributions::Uniform::new(min as f32, max as f32); for _i in 0..elem_count { data.push(rng.sample::<f32, _>(uniform)) } Ok(CpuStorage::F32(data)) } DType::F64 => { let mut data = Vec::with_capacity(elem_count); let uniform = rand::distributions::Uniform::new(min, max); for _i in 0..elem_count { data.push(rng.sample::<f64, _>(uniform)) } Ok(CpuStorage::F64(data)) } } } fn rand_normal(&self, shape: &Shape, dtype: DType, mean: f64, std: f64) -> Result<CpuStorage> { use rand::prelude::*; let elem_count = shape.elem_count(); let mut rng = rand::thread_rng(); match dtype { DType::U8 | DType::U32 | DType::I64 => { Err(Error::UnsupportedDTypeForOp(dtype, "rand_normal").bt()) } DType::BF16 => { let mut data = Vec::with_capacity(elem_count); let normal = rand_distr::Normal::new(bf16::from_f64(mean), bf16::from_f64(std)) .map_err(Error::wrap)?; for _i in 0..elem_count { data.push(normal.sample(&mut rng)) } Ok(CpuStorage::BF16(data)) } DType::F16 => { let mut data = Vec::with_capacity(elem_count); let normal = rand_distr::Normal::new(f16::from_f64(mean), f16::from_f64(std)) .map_err(Error::wrap)?; for _i in 0..elem_count { data.push(normal.sample(&mut rng)) } Ok(CpuStorage::F16(data)) } DType::F32 => { let mut data = Vec::with_capacity(elem_count); let normal = rand_distr::Normal::new(mean as f32, std as f32).map_err(Error::wrap)?; for _i in 0..elem_count { data.push(normal.sample(&mut rng)) } Ok(CpuStorage::F32(data)) } DType::F64 => { let mut data = Vec::with_capacity(elem_count); let normal = rand_distr::Normal::new(mean, std).map_err(Error::wrap)?; for _i in 0..elem_count { data.push(normal.sample(&mut rng)) } Ok(CpuStorage::F64(data)) } } } fn ones_impl(&self, shape: &Shape, dtype: DType) -> Result<CpuStorage> { let elem_count = shape.elem_count(); let storage = match dtype { DType::U8 => CpuStorage::U8(vec![1u8; elem_count]), DType::U32 => CpuStorage::U32(vec![1u32; elem_count]), DType::I64 => CpuStorage::I64(vec![1i64; elem_count]), DType::BF16 => CpuStorage::BF16(vec![bf16::ONE; elem_count]), DType::F16 => CpuStorage::F16(vec![f16::ONE; elem_count]), DType::F32 => CpuStorage::F32(vec![1f32; elem_count]), DType::F64 => CpuStorage::F64(vec![1f64; elem_count]), }; Ok(storage) } fn zeros_impl(&self, shape: &Shape, dtype: DType) -> Result<CpuStorage> { let elem_count = shape.elem_count(); let storage = match dtype { DType::U8 => CpuStorage::U8(vec![0u8; elem_count]), DType::U32 => CpuStorage::U32(vec![0u32; elem_count]), DType::I64 => CpuStorage::I64(vec![0i64; elem_count]), DType::BF16 => CpuStorage::BF16(vec![bf16::ZERO; elem_count]), DType::F16 => CpuStorage::F16(vec![f16::ZERO; elem_count]), DType::F32 => CpuStorage::F32(vec![0f32; elem_count]), DType::F64 => CpuStorage::F64(vec![0f64; elem_count]), }; Ok(storage) } } #[macro_export] macro_rules! map_dtype { ($name:expr, $storage:ident, $fn:expr, ($($dtypes:ident),+)) => { match $storage { $(CpuStorage::$dtypes(__e) => CpuStorage::$dtypes($fn(__e)),)* s => Err(Error::UnsupportedDTypeForOp(s.dtype(), $name).bt())?, } }; }
candle/candle-core/src/cpu_backend.rs/0
{ "file_path": "candle/candle-core/src/cpu_backend.rs", "repo_id": "candle", "token_count": 71869 }
12
// Just enough pickle support to be able to read PyTorch checkpoints. // This hardcodes objects that are required for tensor reading, we may want to make this a bit more // composable/tensor agnostic at some point. use crate::{DType, Error as E, Layout, Result, Tensor}; use byteorder::{LittleEndian, ReadBytesExt}; use std::collections::HashMap; use std::io::BufRead; const VERBOSE: bool = false; // https://docs.juliahub.com/Pickle/LAUNc/0.1.0/opcode/ #[repr(u8)] #[derive(Debug, Eq, PartialEq, Clone)] pub enum OpCode { // https://github.com/python/cpython/blob/ed25f097160b5cbb0c9a1f9a746d2f1bbc96515a/Lib/pickletools.py#L2123 Proto = 0x80, Global = b'c', BinPut = b'q', LongBinPut = b'r', EmptyTuple = b')', Reduce = b'R', Mark = b'(', BinUnicode = b'X', BinInt = b'J', Tuple = b't', BinPersId = b'Q', BinInt1 = b'K', BinInt2 = b'M', Tuple1 = 0x85, Tuple2 = 0x86, Tuple3 = 0x87, NewTrue = 0x88, NewFalse = 0x89, None = b'N', BinGet = b'h', LongBinGet = b'j', SetItem = b's', SetItems = b'u', EmptyDict = b'}', Dict = b'd', Build = b'b', Stop = b'.', NewObj = 0x81, EmptyList = b']', BinFloat = b'G', Append = b'a', Appends = b'e', } // Avoid using FromPrimitive so as not to drag another dependency. impl TryFrom<u8> for OpCode { type Error = u8; fn try_from(value: u8) -> std::result::Result<Self, Self::Error> { match value { 0x80 => Ok(Self::Proto), b'c' => Ok(Self::Global), b'q' => Ok(Self::BinPut), b'r' => Ok(Self::LongBinPut), b')' => Ok(Self::EmptyTuple), b'R' => Ok(Self::Reduce), b'(' => Ok(Self::Mark), b'X' => Ok(Self::BinUnicode), b'J' => Ok(Self::BinInt), b't' => Ok(Self::Tuple), b'Q' => Ok(Self::BinPersId), b'K' => Ok(Self::BinInt1), b'M' => Ok(Self::BinInt2), b'N' => Ok(Self::None), 0x85 => Ok(Self::Tuple1), 0x86 => Ok(Self::Tuple2), 0x87 => Ok(Self::Tuple3), 0x88 => Ok(Self::NewTrue), 0x89 => Ok(Self::NewFalse), b'h' => Ok(Self::BinGet), b'j' => Ok(Self::LongBinGet), b's' => Ok(Self::SetItem), b'u' => Ok(Self::SetItems), b'}' => Ok(Self::EmptyDict), b'd' => Ok(Self::EmptyDict), b'b' => Ok(Self::Build), b'.' => Ok(Self::Stop), 0x81 => Ok(Self::NewObj), b']' => Ok(Self::EmptyList), b'G' => Ok(Self::BinFloat), b'a' => Ok(Self::Append), b'e' => Ok(Self::Appends), value => Err(value), } } } fn read_to_newline<R: BufRead>(r: &mut R) -> Result<Vec<u8>> { let mut data: Vec<u8> = Vec::with_capacity(32); r.read_until(b'\n', &mut data)?; data.pop(); if data.last() == Some(&b'\r') { data.pop(); } Ok(data) } #[derive(Debug, Clone, PartialEq)] pub enum Object { Class { module_name: String, class_name: String, }, Int(i32), Float(f64), Unicode(String), Bool(bool), None, Tuple(Vec<Object>), List(Vec<Object>), Mark, Dict(Vec<(Object, Object)>), Reduce { callable: Box<Object>, args: Box<Object>, }, Build { callable: Box<Object>, args: Box<Object>, }, PersistentLoad(Box<Object>), } type OResult<T> = std::result::Result<T, Object>; impl Object { pub fn unicode(self) -> OResult<String> { match self { Self::Unicode(t) => Ok(t), _ => Err(self), } } pub fn reduce(self) -> OResult<(Self, Self)> { match self { Self::Reduce { callable, args } => Ok((*callable, *args)), _ => Err(self), } } pub fn none(self) -> OResult<()> { match self { Self::None => Ok(()), _ => Err(self), } } pub fn persistent_load(self) -> OResult<Self> { match self { Self::PersistentLoad(t) => Ok(*t), _ => Err(self), } } pub fn bool(self) -> OResult<bool> { match self { Self::Bool(t) => Ok(t), _ => Err(self), } } pub fn int(self) -> OResult<i32> { match self { Self::Int(t) => Ok(t), _ => Err(self), } } pub fn tuple(self) -> OResult<Vec<Self>> { match self { Self::Tuple(t) => Ok(t), _ => Err(self), } } pub fn dict(self) -> OResult<Vec<(Self, Self)>> { match self { Self::Dict(t) => Ok(t), _ => Err(self), } } pub fn class(self) -> OResult<(String, String)> { match self { Self::Class { module_name, class_name, } => Ok((module_name, class_name)), _ => Err(self), } } pub fn into_tensor_info( self, name: Self, dir_name: &std::path::Path, ) -> Result<Option<TensorInfo>> { let name = match name.unicode() { Ok(name) => name, Err(_) => return Ok(None), }; let (callable, args) = match self.reduce() { Ok(callable_args) => callable_args, _ => return Ok(None), }; let (callable, args) = match callable { Object::Class { module_name, class_name, } if module_name == "torch._tensor" && class_name == "_rebuild_from_type_v2" => { let mut args = args.tuple()?; let callable = args.remove(0); let args = args.remove(1); (callable, args) } Object::Class { module_name, class_name, } if module_name == "torch._utils" && class_name == "_rebuild_parameter" => { let mut args = args.tuple()?; args.remove(0).reduce()? } _ => (callable, args), }; match callable { Object::Class { module_name, class_name, } if module_name == "torch._utils" && class_name == "_rebuild_tensor_v2" => {} _ => return Ok(None), }; let (layout, dtype, file_path, storage_size) = rebuild_args(args)?; Ok(Some(TensorInfo { name, dtype, layout, path: format!("{}/{}", dir_name.to_string_lossy(), file_path), storage_size, })) } } impl TryFrom<Object> for String { type Error = Object; fn try_from(value: Object) -> std::result::Result<Self, Self::Error> { match value { Object::Unicode(s) => Ok(s), other => Err(other), } } } impl TryFrom<Object> for usize { type Error = Object; fn try_from(value: Object) -> std::result::Result<Self, Self::Error> { match value { Object::Int(s) if s >= 0 => Ok(s as usize), other => Err(other), } } } impl<T: TryFrom<Object, Error = Object>> TryFrom<Object> for Vec<T> { type Error = Object; fn try_from(value: Object) -> std::result::Result<Self, Self::Error> { match value { Object::Tuple(values) => { // This does not return the appropriate value in the error case but instead return // the object related to the first error. values .into_iter() .map(|v| T::try_from(v)) .collect::<std::result::Result<Vec<T>, Self::Error>>() } other => Err(other), } } } #[derive(Debug)] pub struct Stack { stack: Vec<Object>, memo: HashMap<u32, Object>, } impl Stack { pub fn empty() -> Self { Self { stack: Vec::with_capacity(512), memo: HashMap::new(), } } pub fn stack(&self) -> &[Object] { self.stack.as_slice() } pub fn read_loop<R: BufRead>(&mut self, r: &mut R) -> Result<()> { loop { if self.read(r)? { break; } } Ok(()) } pub fn finalize(mut self) -> Result<Object> { self.pop() } fn push(&mut self, obj: Object) { self.stack.push(obj) } fn pop(&mut self) -> Result<Object> { match self.stack.pop() { None => crate::bail!("unexpected empty stack"), Some(obj) => Ok(obj), } } // https://docs.juliahub.com/Pickle/LAUNc/0.1.0/opcode/#Pickle.OpCodes.BUILD fn build(&mut self) -> Result<()> { let args = self.pop()?; let obj = self.pop()?; let obj = match (obj, args) { (Object::Dict(mut obj), Object::Dict(mut args)) => { obj.append(&mut args); Object::Dict(obj) } (obj, args) => Object::Build { callable: Box::new(obj), args: Box::new(args), }, }; self.push(obj); Ok(()) } fn reduce(&mut self) -> Result<()> { let args = self.pop()?; let callable = self.pop()?; #[allow(clippy::single_match)] let reduced = match &callable { Object::Class { module_name, class_name, } => { if module_name == "collections" && (class_name == "OrderedDict" || class_name == "defaultdict") { // TODO: have a separate ordered dict and a separate default dict. Some(Object::Dict(vec![])) } else { None } } _ => None, }; let reduced = reduced.unwrap_or_else(|| Object::Reduce { callable: Box::new(callable), args: Box::new(args), }); self.push(reduced); Ok(()) } fn last(&mut self) -> Result<&mut Object> { match self.stack.last_mut() { None => crate::bail!("unexpected empty stack"), Some(obj) => Ok(obj), } } fn memo_get(&self, id: u32) -> Result<Object> { match self.memo.get(&id) { None => crate::bail!("missing object in memo {id}"), Some(obj) => { // Maybe we should use refcounting rather than doing potential large clones here. Ok(obj.clone()) } } } fn memo_put(&mut self, id: u32) -> Result<()> { let obj = self.last()?.clone(); self.memo.insert(id, obj); Ok(()) } fn persistent_load(&self, id: Object) -> Result<Object> { Ok(Object::PersistentLoad(Box::new(id))) } fn new_obj(&self, class: Object, args: Object) -> Result<Object> { Ok(Object::Reduce { callable: Box::new(class), args: Box::new(args), }) } fn pop_to_marker(&mut self) -> Result<Vec<Object>> { let mut mark_idx = None; for (idx, obj) in self.stack.iter().enumerate().rev() { if obj == &Object::Mark { mark_idx = Some(idx); break; } } match mark_idx { Some(mark_idx) => { let objs = self.stack.split_off(mark_idx + 1); self.stack.pop(); Ok(objs) } None => { crate::bail!("marker object not found") } } } pub fn read<R: BufRead>(&mut self, r: &mut R) -> Result<bool> { let op_code = match OpCode::try_from(r.read_u8()?) { Ok(op_code) => op_code, Err(op_code) => { crate::bail!("unknown op-code {op_code}") } }; // println!("op: {op_code:?}"); // println!("{:?}", self.stack); match op_code { OpCode::Proto => { let version = r.read_u8()?; if VERBOSE { println!("proto {version}"); } } OpCode::Global => { let module_name = read_to_newline(r)?; let class_name = read_to_newline(r)?; let module_name = String::from_utf8_lossy(&module_name).to_string(); let class_name = String::from_utf8_lossy(&class_name).to_string(); self.push(Object::Class { module_name, class_name, }) } OpCode::BinInt1 => { let arg = r.read_u8()?; self.push(Object::Int(arg as i32)) } OpCode::BinInt2 => { let arg = r.read_u16::<LittleEndian>()?; self.push(Object::Int(arg as i32)) } OpCode::BinInt => { let arg = r.read_i32::<LittleEndian>()?; self.push(Object::Int(arg)) } OpCode::BinFloat => { // Somehow floats are encoded using BigEndian whereas int types use LittleEndian. // https://github.com/python/cpython/blob/0c80da4c14d904a367968955544dd6ae58c8101c/Lib/pickletools.py#L855 // https://github.com/pytorch/pytorch/blob/372d078f361e726bb4ac0884ac334b04c58179ef/torch/_weights_only_unpickler.py#L243 let arg = r.read_f64::<byteorder::BigEndian>()?; self.push(Object::Float(arg)) } OpCode::BinUnicode => { let len = r.read_u32::<LittleEndian>()?; let mut data = vec![0u8; len as usize]; r.read_exact(&mut data)?; let data = String::from_utf8(data).map_err(E::wrap)?; self.push(Object::Unicode(data)) } OpCode::BinPersId => { let id = self.pop()?; let obj = self.persistent_load(id)?; self.push(obj) } OpCode::Tuple => { let objs = self.pop_to_marker()?; self.push(Object::Tuple(objs)) } OpCode::Tuple1 => { let obj = self.pop()?; self.push(Object::Tuple(vec![obj])) } OpCode::Tuple2 => { let obj2 = self.pop()?; let obj1 = self.pop()?; self.push(Object::Tuple(vec![obj1, obj2])) } OpCode::Tuple3 => { let obj3 = self.pop()?; let obj2 = self.pop()?; let obj1 = self.pop()?; self.push(Object::Tuple(vec![obj1, obj2, obj3])) } OpCode::NewTrue => self.push(Object::Bool(true)), OpCode::NewFalse => self.push(Object::Bool(false)), OpCode::Append => { let value = self.pop()?; let pylist = self.last()?; if let Object::List(d) = pylist { d.push(value) } else { crate::bail!("expected a list, got {pylist:?}") } } OpCode::Appends => { let objs = self.pop_to_marker()?; let pylist = self.last()?; if let Object::List(d) = pylist { d.extend(objs) } else { crate::bail!("expected a list, got {pylist:?}") } } OpCode::SetItem => { let value = self.pop()?; let key = self.pop()?; let pydict = self.last()?; if let Object::Dict(d) = pydict { d.push((key, value)) } else { crate::bail!("expected a dict, got {pydict:?}") } } OpCode::SetItems => { let mut objs = self.pop_to_marker()?; let pydict = self.last()?; if let Object::Dict(d) = pydict { if objs.len() % 2 != 0 { crate::bail!("setitems: not an even number of objects") } while let Some(value) = objs.pop() { let key = objs.pop().unwrap(); d.push((key, value)) } } else { crate::bail!("expected a dict, got {pydict:?}") } } OpCode::None => self.push(Object::None), OpCode::Stop => { return Ok(true); } OpCode::Build => self.build()?, OpCode::EmptyDict => self.push(Object::Dict(vec![])), OpCode::Dict => { let mut objs = self.pop_to_marker()?; let mut pydict = vec![]; if objs.len() % 2 != 0 { crate::bail!("setitems: not an even number of objects") } while let Some(value) = objs.pop() { let key = objs.pop().unwrap(); pydict.push((key, value)) } self.push(Object::Dict(pydict)) } OpCode::Mark => self.push(Object::Mark), OpCode::Reduce => self.reduce()?, OpCode::EmptyTuple => self.push(Object::Tuple(vec![])), OpCode::EmptyList => self.push(Object::List(vec![])), OpCode::BinGet => { let arg = r.read_u8()?; let obj = self.memo_get(arg as u32)?; self.push(obj) } OpCode::LongBinGet => { let arg = r.read_u32::<LittleEndian>()?; let obj = self.memo_get(arg)?; self.push(obj) } OpCode::BinPut => { let arg = r.read_u8()?; self.memo_put(arg as u32)? } OpCode::LongBinPut => { let arg = r.read_u32::<LittleEndian>()?; self.memo_put(arg)? } OpCode::NewObj => { let args = self.pop()?; let class = self.pop()?; let obj = self.new_obj(class, args)?; self.push(obj) } } Ok(false) } } impl From<Object> for E { fn from(value: Object) -> Self { E::Msg(format!("conversion error on {value:?}")) } } // https://github.com/pytorch/pytorch/blob/4eac43d046ded0f0a5a5fa8db03eb40f45bf656e/torch/_utils.py#L198 // Arguments: storage, storage_offset, size, stride, requires_grad, backward_hooks fn rebuild_args(args: Object) -> Result<(Layout, DType, String, usize)> { let mut args = args.tuple()?; let stride = Vec::<usize>::try_from(args.remove(3))?; let size = Vec::<usize>::try_from(args.remove(2))?; let offset = args.remove(1).int()? as usize; let storage = args.remove(0).persistent_load()?; let mut storage = storage.tuple()?; let storage_size = storage.remove(4).int()? as usize; let path = storage.remove(2).unicode()?; let (_module_name, class_name) = storage.remove(1).class()?; let dtype = match class_name.as_str() { "FloatStorage" => DType::F32, "DoubleStorage" => DType::F64, "HalfStorage" => DType::F16, "BFloat16Storage" => DType::BF16, "ByteStorage" => DType::U8, "LongStorage" => DType::I64, other => { crate::bail!("unsupported storage type {other}") } }; let layout = Layout::new(crate::Shape::from(size), stride, offset); Ok((layout, dtype, path, storage_size)) } #[derive(Debug, Clone)] pub struct TensorInfo { pub name: String, pub dtype: DType, pub layout: Layout, pub path: String, pub storage_size: usize, } /// Read the tensor info from a .pth file. /// /// # Arguments /// * `file` - The path to the .pth file. /// * `verbose` - Whether to print debug information. /// * `key` - Optional key to retrieve `state_dict` from the pth file. pub fn read_pth_tensor_info<P: AsRef<std::path::Path>>( file: P, verbose: bool, key: Option<&str>, ) -> Result<Vec<TensorInfo>> { let file = std::fs::File::open(file)?; let zip_reader = std::io::BufReader::new(file); let mut zip = zip::ZipArchive::new(zip_reader)?; let zip_file_names = zip .file_names() .map(|f| f.to_string()) .collect::<Vec<String>>(); let mut tensor_infos = vec![]; for file_name in zip_file_names.iter() { if !file_name.ends_with("data.pkl") { continue; } let dir_name = std::path::PathBuf::from(file_name.strip_suffix(".pkl").unwrap()); let reader = zip.by_name(file_name)?; let mut reader = std::io::BufReader::new(reader); let mut stack = Stack::empty(); stack.read_loop(&mut reader)?; let obj = stack.finalize()?; if VERBOSE || verbose { println!("{obj:#?}"); } let obj = match obj { Object::Build { callable, args } => match *callable { Object::Reduce { callable, args: _ } => match *callable { Object::Class { module_name, class_name, } if module_name == "__torch__" && class_name == "Module" => *args, _ => continue, }, _ => continue, }, obj => obj, }; // If key is provided, then we need to extract the state_dict from the object. let obj = if let Some(key) = key { if let Object::Dict(key_values) = obj { key_values .into_iter() .find(|(k, _)| *k == Object::Unicode(key.to_owned())) .map(|(_, v)| v) .ok_or_else(|| E::Msg(format!("key {key} not found")))? } else { obj } } else { obj }; // If the object is a dict, then we can extract the tensor info from it. // NOTE: We are assuming that the `obj` is state_dict by this stage. if let Object::Dict(key_values) = obj { for (name, value) in key_values.into_iter() { match value.into_tensor_info(name, &dir_name) { Ok(Some(tensor_info)) => tensor_infos.push(tensor_info), Ok(None) => {} Err(err) => eprintln!("skipping: {err:?}"), } } } } Ok(tensor_infos) } /// Lazy tensor loader. pub struct PthTensors { tensor_infos: HashMap<String, TensorInfo>, path: std::path::PathBuf, // We do not store a zip reader as it needs mutable access to extract data. Instead we // re-create a zip reader for each tensor. } impl PthTensors { pub fn new<P: AsRef<std::path::Path>>(path: P, key: Option<&str>) -> Result<Self> { let tensor_infos = read_pth_tensor_info(path.as_ref(), false, key)?; let tensor_infos = tensor_infos .into_iter() .map(|ti| (ti.name.to_string(), ti)) .collect(); let path = path.as_ref().to_owned(); Ok(Self { tensor_infos, path }) } pub fn tensor_infos(&self) -> &HashMap<String, TensorInfo> { &self.tensor_infos } pub fn get(&self, name: &str) -> Result<Option<Tensor>> { use std::io::Read; let tensor_info = match self.tensor_infos.get(name) { None => return Ok(None), Some(tensor_info) => tensor_info, }; // We hope that the file has not changed since first reading it. let zip_reader = std::io::BufReader::new(std::fs::File::open(&self.path)?); let mut zip = zip::ZipArchive::new(zip_reader)?; let mut reader = zip.by_name(&tensor_info.path)?; let is_fortran_contiguous = tensor_info.layout.is_fortran_contiguous(); let rank = tensor_info.layout.shape().rank(); // Reading the data is a bit tricky as it can be strided, for now only support the basic // case and when the tensor is fortran contiguous. if !tensor_info.layout.is_contiguous() && !is_fortran_contiguous { crate::bail!( "cannot retrieve non-contiguous tensors {:?}", tensor_info.layout ) } let start_offset = tensor_info.layout.start_offset(); if start_offset > 0 { std::io::copy( &mut reader.by_ref().take(start_offset as u64), &mut std::io::sink(), )?; } let tensor = Tensor::from_reader( tensor_info.layout.shape().clone(), tensor_info.dtype, &mut reader, )?; if rank > 1 && is_fortran_contiguous { // Reverse the shape, e.g. Shape(2, 3, 4) -> Shape(4, 3, 2) let shape_reversed: Vec<_> = tensor_info.layout.dims().iter().rev().cloned().collect(); let tensor = tensor.reshape(shape_reversed)?; // Permute (transpose) the dimensions, e.g. Shape(4, 3, 2) -> Shape(2, 3, 4) let dim_indeces_reversed: Vec<_> = (0..rank).rev().collect(); let tensor = tensor.permute(dim_indeces_reversed)?; Ok(Some(tensor)) } else { Ok(Some(tensor)) } } } /// Read all the tensors from a PyTorch pth file with a given key. /// /// # Arguments /// * `path` - Path to the pth file. /// * `key` - Optional key to retrieve `state_dict` from the pth file. Sometimes the pth file /// contains multiple objects and the state_dict is the one we are interested in. pub fn read_all_with_key<P: AsRef<std::path::Path>>( path: P, key: Option<&str>, ) -> Result<Vec<(String, Tensor)>> { let pth = PthTensors::new(path, key)?; let tensor_names = pth.tensor_infos.keys(); let mut tensors = Vec::with_capacity(tensor_names.len()); for name in tensor_names { if let Some(tensor) = pth.get(name)? { tensors.push((name.to_string(), tensor)) } } Ok(tensors) } /// Read all the tensors from a PyTorch pth file. /// /// # Arguments /// * `path` - Path to the pth file. pub fn read_all<P: AsRef<std::path::Path>>(path: P) -> Result<Vec<(String, Tensor)>> { read_all_with_key(path, None) }
candle/candle-core/src/pickle.rs/0
{ "file_path": "candle/candle-core/src/pickle.rs", "repo_id": "candle", "token_count": 14306 }
13
use crate::backend::BackendStorage; use crate::op::{self, CmpOp, CustomOp1, CustomOp2, CustomOp3, ReduceOp}; use crate::{CpuStorage, CudaStorage, DType, Device, Error, Layout, MetalStorage, Result, Shape}; // We do not want to implement Clone on Storage as cloning may fail because of // out of memory. Instead try_clone should be used. #[derive(Debug)] pub enum Storage { Cpu(CpuStorage), Cuda(CudaStorage), Metal(MetalStorage), } impl Storage { pub fn try_clone(&self, layout: &Layout) -> Result<Self> { match self { Self::Cpu(storage) => Ok(Self::Cpu(storage.clone())), Self::Cuda(storage) => { let storage = storage.try_clone(layout)?; Ok(Self::Cuda(storage)) } Self::Metal(storage) => { let storage = storage.try_clone(layout)?; Ok(Self::Metal(storage)) } } } pub fn device(&self) -> Device { match self { Self::Cpu(_) => Device::Cpu, Self::Cuda(storage) => Device::Cuda(storage.device().clone()), Self::Metal(storage) => Device::Metal(storage.device().clone()), } } pub fn dtype(&self) -> DType { match self { Self::Cpu(storage) => storage.dtype(), Self::Cuda(storage) => storage.dtype(), Self::Metal(storage) => storage.dtype(), } } pub(crate) fn same_device(&self, rhs: &Self, op: &'static str) -> Result<()> { let lhs = self.device().location(); let rhs = rhs.device().location(); if lhs != rhs { Err(Error::DeviceMismatchBinaryOp { lhs, rhs, op }.bt()) } else { Ok(()) } } pub(crate) fn same_dtype(&self, rhs: &Self, op: &'static str) -> Result<()> { let lhs = self.dtype(); let rhs = rhs.dtype(); if lhs != rhs { Err(Error::DTypeMismatchBinaryOp { lhs, rhs, op }.bt()) } else { Ok(()) } } pub(crate) fn affine(&self, layout: &Layout, mul: f64, add: f64) -> Result<Self> { match self { Storage::Cpu(storage) => { let storage = storage.affine(layout, mul, add)?; Ok(Self::Cpu(storage)) } Self::Cuda(storage) => { let storage = storage.affine(layout, mul, add)?; Ok(Self::Cuda(storage)) } Self::Metal(storage) => { let storage = storage.affine(layout, mul, add)?; Ok(Self::Metal(storage)) } } } pub(crate) fn powf(&self, layout: &Layout, alpha: f64) -> Result<Self> { match self { Storage::Cpu(storage) => { let storage = storage.powf(layout, alpha)?; Ok(Self::Cpu(storage)) } Self::Cuda(storage) => { let storage = storage.powf(layout, alpha)?; Ok(Self::Cuda(storage)) } Self::Metal(storage) => { let storage = storage.powf(layout, alpha)?; Ok(Self::Metal(storage)) } } } pub(crate) fn elu(&self, layout: &Layout, alpha: f64) -> Result<Self> { match self { Storage::Cpu(storage) => { let storage = storage.elu(layout, alpha)?; Ok(Self::Cpu(storage)) } Self::Cuda(storage) => { let storage = storage.elu(layout, alpha)?; Ok(Self::Cuda(storage)) } Self::Metal(storage) => { let storage = storage.elu(layout, alpha)?; Ok(Self::Metal(storage)) } } } pub(crate) fn cmp( &self, op: CmpOp, rhs: &Self, lhs_layout: &Layout, rhs_layout: &Layout, ) -> Result<Self> { self.same_device(rhs, "cmp")?; self.same_dtype(rhs, "cmp")?; match (self, rhs) { (Storage::Cpu(lhs), Storage::Cpu(rhs)) => { let storage = lhs.cmp(op, rhs, lhs_layout, rhs_layout)?; Ok(Self::Cpu(storage)) } (Self::Cuda(lhs), Self::Cuda(rhs)) => { let storage = lhs.cmp(op, rhs, lhs_layout, rhs_layout)?; Ok(Self::Cuda(storage)) } (Self::Metal(lhs), Self::Metal(rhs)) => { let storage = lhs.cmp(op, rhs, lhs_layout, rhs_layout)?; Ok(Self::Metal(storage)) } (lhs, rhs) => { // Should not happen because of the same device check above but we're defensive // anyway. Err(Error::DeviceMismatchBinaryOp { lhs: lhs.device().location(), rhs: rhs.device().location(), op: "cmp", } .bt()) } } } pub(crate) fn reduce_op(&self, op: ReduceOp, layout: &Layout, s: &[usize]) -> Result<Self> { match self { Storage::Cpu(storage) => { let storage = storage.reduce_op(op, layout, s)?; Ok(Self::Cpu(storage)) } Self::Cuda(storage) => { let storage = storage.reduce_op(op, layout, s)?; Ok(Self::Cuda(storage)) } Self::Metal(storage) => { let storage = storage.reduce_op(op, layout, s)?; Ok(Self::Metal(storage)) } } } pub(crate) fn to_dtype(&self, layout: &Layout, dtype: DType) -> Result<Self> { match self { Storage::Cpu(storage) => { let storage = storage.to_dtype(layout, dtype)?; Ok(Self::Cpu(storage)) } Self::Cuda(storage) => { let storage = storage.to_dtype(layout, dtype)?; Ok(Self::Cuda(storage)) } Self::Metal(storage) => { let storage = storage.to_dtype(layout, dtype)?; Ok(Self::Metal(storage)) } } } pub(crate) fn apply_op1(&self, l: &Layout, c: &dyn CustomOp1) -> Result<(Self, Shape)> { match self { Self::Cpu(storage) => { let (storage, shape) = c.cpu_fwd(storage, l)?; Ok((Self::Cpu(storage), shape)) } Self::Cuda(storage) => { let (storage, shape) = c.cuda_fwd(storage, l)?; Ok((Self::Cuda(storage), shape)) } Self::Metal(storage) => { let (storage, shape) = c.metal_fwd(storage, l)?; Ok((Self::Metal(storage), shape)) } } } pub(crate) fn apply_op2( &self, l1: &Layout, t2: &Self, l2: &Layout, c: &dyn CustomOp2, ) -> Result<(Self, Shape)> { self.same_device(t2, c.name())?; match (self, t2) { (Self::Cpu(s1), Self::Cpu(s2)) => { let (s, shape) = c.cpu_fwd(s1, l1, s2, l2)?; Ok((Self::Cpu(s), shape)) } (Self::Cuda(s1), Self::Cuda(s2)) => { let (s, shape) = c.cuda_fwd(s1, l1, s2, l2)?; Ok((Self::Cuda(s), shape)) } (Self::Metal(s1), Self::Metal(s2)) => { let (s, shape) = c.metal_fwd(s1, l1, s2, l2)?; Ok((Self::Metal(s), shape)) } _ => unreachable!(), } } pub(crate) fn apply_op3( &self, l1: &Layout, t2: &Self, l2: &Layout, t3: &Self, l3: &Layout, c: &dyn CustomOp3, ) -> Result<(Self, Shape)> { self.same_device(t2, c.name())?; self.same_device(t3, c.name())?; match (self, t2, t3) { (Self::Cpu(s1), Self::Cpu(s2), Self::Cpu(s3)) => { let (s, shape) = c.cpu_fwd(s1, l1, s2, l2, s3, l3)?; Ok((Self::Cpu(s), shape)) } (Self::Cuda(s1), Self::Cuda(s2), Self::Cuda(s3)) => { let (s, shape) = c.cuda_fwd(s1, l1, s2, l2, s3, l3)?; Ok((Self::Cuda(s), shape)) } (Self::Metal(s1), Self::Metal(s2), Self::Metal(s3)) => { let (s, shape) = c.metal_fwd(s1, l1, s2, l2, s3, l3)?; Ok((Self::Metal(s), shape)) } _ => unreachable!(), } } pub(crate) fn unary_impl<B: op::UnaryOpT>(&self, layout: &Layout) -> Result<Self> { match self { Storage::Cpu(storage) => { let storage = storage.unary_impl::<B>(layout)?; Ok(Self::Cpu(storage)) } Self::Cuda(storage) => { let storage = storage.unary_impl::<B>(layout)?; Ok(Self::Cuda(storage)) } Self::Metal(storage) => { let storage = storage.unary_impl::<B>(layout)?; Ok(Self::Metal(storage)) } } } pub(crate) fn binary_impl<B: op::BinaryOpT>( &self, rhs: &Self, lhs_layout: &Layout, rhs_layout: &Layout, ) -> Result<Self> { self.same_device(rhs, B::NAME)?; self.same_dtype(rhs, B::NAME)?; match (self, rhs) { (Storage::Cpu(lhs), Storage::Cpu(rhs)) => { let storage = lhs.binary_impl::<B>(rhs, lhs_layout, rhs_layout)?; Ok(Self::Cpu(storage)) } (Self::Cuda(lhs), Self::Cuda(rhs)) => { let storage = lhs.binary_impl::<B>(rhs, lhs_layout, rhs_layout)?; Ok(Self::Cuda(storage)) } (Self::Metal(lhs), Self::Metal(rhs)) => { let storage = lhs.binary_impl::<B>(rhs, lhs_layout, rhs_layout)?; Ok(Self::Metal(storage)) } (lhs, rhs) => { // Should not happen because of the same device check above but we're defensive // anyway. Err(Error::DeviceMismatchBinaryOp { lhs: lhs.device().location(), rhs: rhs.device().location(), op: B::NAME, } .bt()) } } } pub(crate) fn conv1d( &self, l: &Layout, kernel: &Self, kernel_l: &Layout, params: &crate::conv::ParamsConv1D, ) -> Result<Self> { self.same_device(kernel, "conv1d")?; self.same_dtype(kernel, "conv1d")?; match (self, &kernel) { (Storage::Cpu(inp), Storage::Cpu(kernel)) => { let s = inp.conv1d(l, kernel, kernel_l, params)?; Ok(Self::Cpu(s)) } (Storage::Cuda(inp), Storage::Cuda(kernel)) => { let s = inp.conv1d(l, kernel, kernel_l, params)?; Ok(Self::Cuda(s)) } (Storage::Metal(inp), Storage::Metal(kernel)) => { let s = inp.conv1d(l, kernel, kernel_l, params)?; Ok(Self::Metal(s)) } (lhs, rhs) => Err(Error::DeviceMismatchBinaryOp { lhs: lhs.device().location(), rhs: rhs.device().location(), op: "conv1d", } .bt()), } } pub(crate) fn conv_transpose1d( &self, l: &Layout, kernel: &Self, kernel_l: &Layout, params: &crate::conv::ParamsConvTranspose1D, ) -> Result<Self> { self.same_device(kernel, "conv-transpose1d")?; self.same_dtype(kernel, "conv-transpose1d")?; match (self, &kernel) { (Storage::Cpu(inp), Storage::Cpu(kernel)) => { let s = inp.conv_transpose1d(l, kernel, kernel_l, params)?; Ok(Self::Cpu(s)) } (Storage::Cuda(inp), Storage::Cuda(kernel)) => { let s = inp.conv_transpose1d(l, kernel, kernel_l, params)?; Ok(Self::Cuda(s)) } (Storage::Metal(inp), Storage::Metal(kernel)) => { let s = inp.conv_transpose1d(l, kernel, kernel_l, params)?; Ok(Self::Metal(s)) } (lhs, rhs) => Err(Error::DeviceMismatchBinaryOp { lhs: lhs.device().location(), rhs: rhs.device().location(), op: "conv-transpose1d", } .bt()), } } pub(crate) fn conv2d( &self, l: &Layout, kernel: &Self, kernel_l: &Layout, params: &crate::conv::ParamsConv2D, ) -> Result<Self> { self.same_device(kernel, "conv2d")?; self.same_dtype(kernel, "conv2d")?; match (self, &kernel) { (Storage::Cpu(inp), Storage::Cpu(kernel)) => { let s = inp.conv2d(l, kernel, kernel_l, params)?; Ok(Self::Cpu(s)) } (Storage::Cuda(inp), Storage::Cuda(kernel)) => { let s = inp.conv2d(l, kernel, kernel_l, params)?; Ok(Self::Cuda(s)) } (Storage::Metal(inp), Storage::Metal(kernel)) => { let s = inp.conv2d(l, kernel, kernel_l, params)?; Ok(Self::Metal(s)) } (lhs, rhs) => Err(Error::DeviceMismatchBinaryOp { lhs: lhs.device().location(), rhs: rhs.device().location(), op: "conv2d", } .bt()), } } pub(crate) fn conv_transpose2d( &self, l: &Layout, kernel: &Self, kernel_l: &Layout, params: &crate::conv::ParamsConvTranspose2D, ) -> Result<Self> { self.same_device(kernel, "conv_transpose2d")?; self.same_dtype(kernel, "conv_transpose2d")?; match (self, &kernel) { (Storage::Cpu(inp), Storage::Cpu(kernel)) => { let s = inp.conv_transpose2d(l, kernel, kernel_l, params)?; Ok(Self::Cpu(s)) } (Storage::Cuda(inp), Storage::Cuda(kernel)) => { let s = inp.conv_transpose2d(l, kernel, kernel_l, params)?; Ok(Self::Cuda(s)) } (Storage::Metal(inp), Storage::Metal(kernel)) => { let s = inp.conv_transpose2d(l, kernel, kernel_l, params)?; Ok(Self::Metal(s)) } (lhs, rhs) => Err(Error::DeviceMismatchBinaryOp { lhs: lhs.device().location(), rhs: rhs.device().location(), op: "conv_transpose2d", } .bt()), } } pub(crate) fn avg_pool2d( &self, layout: &Layout, kernel_size: (usize, usize), stride: (usize, usize), ) -> Result<Self> { match self { Storage::Cpu(storage) => { let storage = storage.avg_pool2d(layout, kernel_size, stride)?; Ok(Self::Cpu(storage)) } Self::Cuda(storage) => { let storage = storage.avg_pool2d(layout, kernel_size, stride)?; Ok(Self::Cuda(storage)) } Self::Metal(storage) => { let storage = storage.avg_pool2d(layout, kernel_size, stride)?; Ok(Self::Metal(storage)) } } } pub(crate) fn max_pool2d( &self, layout: &Layout, kernel_size: (usize, usize), stride: (usize, usize), ) -> Result<Self> { match self { Storage::Cpu(storage) => { let storage = storage.max_pool2d(layout, kernel_size, stride)?; Ok(Self::Cpu(storage)) } Self::Cuda(storage) => { let storage = storage.max_pool2d(layout, kernel_size, stride)?; Ok(Self::Cuda(storage)) } Self::Metal(storage) => { let storage = storage.max_pool2d(layout, kernel_size, stride)?; Ok(Self::Metal(storage)) } } } pub(crate) fn upsample_nearest1d(&self, layout: &Layout, sz: usize) -> Result<Self> { match self { Storage::Cpu(storage) => { let storage = storage.upsample_nearest1d(layout, sz)?; Ok(Self::Cpu(storage)) } Self::Cuda(storage) => { let storage = storage.upsample_nearest1d(layout, sz)?; Ok(Self::Cuda(storage)) } Self::Metal(storage) => { let storage = storage.upsample_nearest1d(layout, sz)?; Ok(Self::Metal(storage)) } } } pub(crate) fn upsample_nearest2d(&self, layout: &Layout, h: usize, w: usize) -> Result<Self> { match self { Storage::Cpu(storage) => { let storage = storage.upsample_nearest2d(layout, h, w)?; Ok(Self::Cpu(storage)) } Self::Cuda(storage) => { let storage = storage.upsample_nearest2d(layout, h, w)?; Ok(Self::Cuda(storage)) } Self::Metal(storage) => { let storage = storage.upsample_nearest2d(layout, h, w)?; Ok(Self::Metal(storage)) } } } pub(crate) fn where_cond( &self, layout: &Layout, t: &Self, layout_t: &Layout, f: &Self, layout_f: &Layout, ) -> Result<Self> { self.same_device(t, "where")?; self.same_device(f, "where")?; t.same_dtype(f, "where")?; match (self, t, f) { (Storage::Cpu(cond), Storage::Cpu(t), Storage::Cpu(f)) => { let storage = cond.where_cond(layout, t, layout_t, f, layout_f)?; Ok(Self::Cpu(storage)) } (Self::Cuda(cond), Self::Cuda(t), Self::Cuda(f)) => { let storage = cond.where_cond(layout, t, layout_t, f, layout_f)?; Ok(Self::Cuda(storage)) } (Self::Metal(cond), Self::Metal(t), Self::Metal(f)) => { let storage = cond.where_cond(layout, t, layout_t, f, layout_f)?; Ok(Self::Metal(storage)) } (_, lhs, rhs) => Err(Error::DeviceMismatchBinaryOp { lhs: lhs.device().location(), rhs: rhs.device().location(), op: "where", } .bt()), } } pub(crate) fn gather( &self, l: &Layout, indexes: &Self, indexes_l: &Layout, d: usize, ) -> Result<Self> { self.same_device(indexes, "index-add")?; match (self, indexes) { (Self::Cpu(s), Self::Cpu(indexes)) => { let storage = s.gather(l, indexes, indexes_l, d)?; Ok(Self::Cpu(storage)) } (Self::Cuda(s), Self::Cuda(indexes)) => { let storage = s.gather(l, indexes, indexes_l, d)?; Ok(Self::Cuda(storage)) } (Self::Metal(s), Self::Metal(indexes)) => { let storage = s.gather(l, indexes, indexes_l, d)?; Ok(Self::Metal(storage)) } _ => unreachable!(), } } pub(crate) fn scatter_add( &self, l: &Layout, indexes: &Self, indexes_l: &Layout, source: &Self, source_l: &Layout, d: usize, ) -> Result<Self> { self.same_device(indexes, "scatter-add")?; self.same_device(source, "scatter-add")?; match (self, indexes, source) { (Self::Cpu(s), Self::Cpu(indexes), Self::Cpu(source)) => { let storage = s.scatter_add(l, indexes, indexes_l, source, source_l, d)?; Ok(Self::Cpu(storage)) } (Self::Cuda(s), Self::Cuda(indexes), Self::Cuda(source)) => { let storage = s.scatter_add(l, indexes, indexes_l, source, source_l, d)?; Ok(Self::Cuda(storage)) } (Self::Metal(s), Self::Metal(indexes), Self::Metal(source)) => { let storage = s.scatter_add(l, indexes, indexes_l, source, source_l, d)?; Ok(Self::Metal(storage)) } _ => unreachable!(), } } pub(crate) fn index_add( &self, l: &Layout, indexes: &Self, indexes_l: &Layout, source: &Self, source_l: &Layout, d: usize, ) -> Result<Self> { self.same_device(indexes, "index-add")?; self.same_device(source, "index-add")?; match (self, indexes, source) { (Self::Cpu(s), Self::Cpu(indexes), Self::Cpu(source)) => { let storage = s.index_add(l, indexes, indexes_l, source, source_l, d)?; Ok(Self::Cpu(storage)) } (Self::Cuda(s), Self::Cuda(indexes), Self::Cuda(source)) => { let storage = s.index_add(l, indexes, indexes_l, source, source_l, d)?; Ok(Self::Cuda(storage)) } (Self::Metal(s), Self::Metal(indexes), Self::Metal(source)) => { let storage = s.index_add(l, indexes, indexes_l, source, source_l, d)?; Ok(Self::Metal(storage)) } _ => unreachable!(), } } pub(crate) fn index_select( &self, rhs: &Self, lhs_l: &Layout, rhs_l: &Layout, d: usize, ) -> Result<Self> { self.same_device(rhs, "index-select")?; match (self, rhs) { (Self::Cpu(lhs), Self::Cpu(rhs)) => { let storage = lhs.index_select(rhs, lhs_l, rhs_l, d)?; Ok(Self::Cpu(storage)) } (Self::Cuda(lhs), Self::Cuda(rhs)) => { let storage = lhs.index_select(rhs, lhs_l, rhs_l, d)?; Ok(Self::Cuda(storage)) } (Self::Metal(lhs), Self::Metal(rhs)) => { let storage = lhs.index_select(rhs, lhs_l, rhs_l, d)?; Ok(Self::Metal(storage)) } (lhs, rhs) => Err(Error::DeviceMismatchBinaryOp { lhs: lhs.device().location(), rhs: rhs.device().location(), op: "index-select", } .bt()), } } pub(crate) fn matmul( &self, rhs: &Self, bmnk: (usize, usize, usize, usize), lhs_layout: &Layout, rhs_layout: &Layout, ) -> Result<Self> { self.same_device(rhs, "matmul")?; self.same_dtype(rhs, "matmul")?; match (self, rhs) { (Self::Cpu(lhs), Self::Cpu(rhs)) => { let storage = lhs.matmul(rhs, bmnk, lhs_layout, rhs_layout)?; Ok(Self::Cpu(storage)) } (Self::Cuda(lhs), Self::Cuda(rhs)) => { let storage = lhs.matmul(rhs, bmnk, lhs_layout, rhs_layout)?; Ok(Self::Cuda(storage)) } (Self::Metal(lhs), Self::Metal(rhs)) => { let storage = lhs.matmul(rhs, bmnk, lhs_layout, rhs_layout)?; Ok(Self::Metal(storage)) } (lhs, rhs) => Err(Error::DeviceMismatchBinaryOp { lhs: lhs.device().location(), rhs: rhs.device().location(), op: "matmul", } .bt()), } } // self, the source can be strided whereas dst is contiguous. pub(crate) fn copy_strided_src( &self, dst: &mut Self, dst_offset: usize, src_l: &Layout, ) -> Result<()> { match (self, dst) { (Self::Cpu(src), Self::Cpu(dst)) => src.copy_strided_src(dst, dst_offset, src_l), (Self::Cuda(src), Self::Cuda(dst)) => Ok(src.copy_strided_src(dst, dst_offset, src_l)?), (Self::Metal(src), Self::Metal(dst)) => { Ok(src.copy_strided_src(dst, dst_offset, src_l)?) } (lhs, rhs) => Err(Error::DeviceMismatchBinaryOp { lhs: lhs.device().location(), rhs: rhs.device().location(), op: "copy", } .bt()), } } #[allow(clippy::too_many_arguments)] pub(crate) fn copy2d( &self, dst: &mut Self, d1: usize, d2: usize, src_s: usize, dst_s: usize, src_o: usize, dst_o: usize, ) -> Result<()> { match (self, dst) { (Self::Cpu(src), Self::Cpu(dst)) => src.copy2d(dst, d1, d2, src_s, dst_s, src_o, dst_o), (Self::Cuda(src), Self::Cuda(dst)) => { Ok(src.copy2d(dst, d1, d2, src_s, dst_s, src_o, dst_o)?) } (Self::Metal(src), Self::Metal(dst)) => { Ok(src.copy2d(dst, d1, d2, src_s, dst_s, src_o, dst_o)?) } (lhs, rhs) => Err(Error::DeviceMismatchBinaryOp { lhs: lhs.device().location(), rhs: rhs.device().location(), op: "copy2d", } .bt()), } } }
candle/candle-core/src/storage.rs/0
{ "file_path": "candle/candle-core/src/storage.rs", "repo_id": "candle", "token_count": 14445 }
14
import torch from collections import OrderedDict # Write a trivial tensor to a pt file a= torch.tensor([[1,2,3,4], [5,6,7,8]]) o = OrderedDict() o["test"] = a # Write a trivial tensor to a pt file torch.save(o, "test.pt") ############################################################################################################ # Write a trivial tensor to a pt file with a key torch.save({"model_state_dict": o}, "test_with_key.pt") ############################################################################################################ # Create a tensor with fortran contiguous memory layout import numpy as np # Step 1: Create a 3D NumPy array with Fortran order using a range of numbers # For example, creating a 2x3x4 array array_fortran = np.asfortranarray(np.arange(1, 2*3*4 + 1).reshape(2, 3, 4)) # Verify the memory order print("Is Fortran contiguous (F order):", array_fortran.flags['F_CONTIGUOUS']) # Should be True print("Is C contiguous (C order):", array_fortran.flags['C_CONTIGUOUS']) # Should be False # Step 2: Convert the NumPy array to a PyTorch tensor tensor_fortran = torch.from_numpy(array_fortran) # Verify the tensor layout print("Tensor stride:", tensor_fortran.stride()) # Stride will reflect the Fortran memory layout # Step 3: Save the PyTorch tensor to a .pth file torch.save({"tensor_fortran": tensor_fortran}, 'fortran_tensor_3d.pth') print("3D Tensor saved with Fortran layout.")
candle/candle-core/tests/pth.py/0
{ "file_path": "candle/candle-core/tests/pth.py", "repo_id": "candle", "token_count": 441 }
15
//! The CIFAR-10 dataset. //! //! The files can be downloaded from the following page: //! <https://www.cs.toronto.edu/~kriz/cifar.html> //! The binary version of the dataset is used. use crate::vision::Dataset; use candle::{DType, Device, Error, Result, Tensor}; use hf_hub::{api::sync::Api, Repo, RepoType}; use parquet::file::reader::{FileReader, SerializedFileReader}; use std::fs::File; use std::io::{BufReader, Read}; const W: usize = 32; const H: usize = 32; const C: usize = 3; const BYTES_PER_IMAGE: usize = W * H * C + 1; const SAMPLES_PER_FILE: usize = 10000; fn read_file(filename: &std::path::Path) -> Result<(Tensor, Tensor)> { let mut buf_reader = BufReader::new(File::open(filename)?); let mut data = vec![0u8; SAMPLES_PER_FILE * BYTES_PER_IMAGE]; buf_reader.read_exact(&mut data)?; let mut images = vec![]; let mut labels = vec![]; for index in 0..SAMPLES_PER_FILE { let content_offset = BYTES_PER_IMAGE * index; labels.push(data[content_offset]); images.push(&data[1 + content_offset..content_offset + BYTES_PER_IMAGE]); } let images: Vec<u8> = images .iter() .copied() .flatten() .copied() .collect::<Vec<_>>(); let labels = Tensor::from_vec(labels, SAMPLES_PER_FILE, &Device::Cpu)?; let images = Tensor::from_vec(images, (SAMPLES_PER_FILE, C, H, W), &Device::Cpu)?; let images = (images.to_dtype(DType::F32)? / 255.)?; Ok((images, labels)) } pub fn load_dir<T: AsRef<std::path::Path>>(dir: T) -> Result<Dataset> { let dir = dir.as_ref(); let (test_images, test_labels) = read_file(&dir.join("test_batch.bin"))?; let train_images_and_labels = [ "data_batch_1.bin", "data_batch_2.bin", "data_batch_3.bin", "data_batch_4.bin", "data_batch_5.bin", ] .iter() .map(|x| read_file(&dir.join(x))) .collect::<Result<Vec<_>>>()?; let (train_images, train_labels): (Vec<_>, Vec<_>) = train_images_and_labels.into_iter().unzip(); Ok(Dataset { train_images: Tensor::cat(&train_images, 0)?, train_labels: Tensor::cat(&train_labels, 0)?, test_images, test_labels, labels: 10, }) } fn load_parquet(parquet: SerializedFileReader<std::fs::File>) -> Result<(Tensor, Tensor)> { let samples = parquet.metadata().file_metadata().num_rows() as usize; let mut buffer_images: Vec<u8> = Vec::with_capacity(samples * 1_024); let mut buffer_labels: Vec<u8> = Vec::with_capacity(samples); for row in parquet.into_iter().flatten() { for (_name, field) in row.get_column_iter() { if let parquet::record::Field::Group(subrow) = field { for (_name, field) in subrow.get_column_iter() { if let parquet::record::Field::Bytes(value) = field { let image = image::load_from_memory(value.data()).unwrap(); buffer_images.extend(image.to_rgb8().as_raw()); } } } else if let parquet::record::Field::Long(label) = field { buffer_labels.push(*label as u8); } } } let images = (Tensor::from_vec(buffer_images, (samples, 3, 32, 32), &Device::Cpu)? .to_dtype(DType::U8)? / 255.)?; let labels = Tensor::from_vec(buffer_labels, (samples,), &Device::Cpu)?; Ok((images, labels)) } pub fn load() -> Result<Dataset> { let api = Api::new().map_err(|e| Error::Msg(format!("Api error: {e}")))?; let dataset_id = "cifar10".to_string(); let repo = Repo::with_revision( dataset_id, RepoType::Dataset, "refs/convert/parquet".to_string(), ); let repo = api.repo(repo); let test_parquet_filename = repo .get("plain_text/test/0000.parquet") .map_err(|e| Error::Msg(format!("Api error: {e}")))?; let train_parquet_filename = repo .get("plain_text/train/0000.parquet") .map_err(|e| Error::Msg(format!("Api error: {e}")))?; let test_parquet = SerializedFileReader::new(std::fs::File::open(test_parquet_filename)?) .map_err(|e| Error::Msg(format!("Parquet error: {e}")))?; let train_parquet = SerializedFileReader::new(std::fs::File::open(train_parquet_filename)?) .map_err(|e| Error::Msg(format!("Parquet error: {e}")))?; let (test_images, test_labels) = load_parquet(test_parquet)?; let (train_images, train_labels) = load_parquet(train_parquet)?; Ok(crate::vision::Dataset { train_images, train_labels, test_images, test_labels, labels: 10, }) }
candle/candle-datasets/src/vision/cifar.rs/0
{ "file_path": "candle/candle-datasets/src/vision/cifar.rs", "repo_id": "candle", "token_count": 2139 }
16
pub const LAYERNORM_KERNELS: &str = include_str!(concat!(env!("OUT_DIR"), "/layernorm_kernels.ptx"));
candle/candle-examples/examples/custom-ops/cuda_kernels.rs/0
{ "file_path": "candle/candle-examples/examples/custom-ops/cuda_kernels.rs", "repo_id": "candle", "token_count": 44 }
17
// TODO: Add an offline mode. #[cfg(feature = "accelerate")] extern crate accelerate_src; #[cfg(feature = "mkl")] extern crate intel_mkl_src; use anyhow::{Error as E, Result}; use candle::{DType, Device, Tensor}; use candle_nn::VarBuilder; use candle_transformers::generation::LogitsProcessor; use clap::Parser; use hf_hub::{api::sync::Api, Repo, RepoType}; use tokenizers::Tokenizer; use candle_transformers::models::falcon::{Config, Falcon}; struct TextGeneration { model: Falcon, device: Device, tokenizer: Tokenizer, logits_processor: LogitsProcessor, repeat_penalty: f32, repeat_last_n: usize, } struct GenerationOptions { temp: Option<f64>, top_p: Option<f64>, repeat_penalty: f32, repeat_last_n: usize, } impl TextGeneration { fn new( model: Falcon, tokenizer: Tokenizer, generation_options: GenerationOptions, seed: u64, device: &Device, ) -> Self { let logits_processor = LogitsProcessor::new(seed, generation_options.temp, generation_options.top_p); let repeat_penalty = generation_options.repeat_penalty; let repeat_last_n = generation_options.repeat_last_n; Self { model, tokenizer, logits_processor, device: device.clone(), repeat_penalty, repeat_last_n, } } fn run(&mut self, prompt: &str, sample_len: usize) -> Result<()> { println!("starting the inference loop"); let mut tokens = self .tokenizer .encode(prompt, true) .map_err(E::msg)? .get_ids() .to_vec(); let mut new_tokens = vec![]; let start_gen = std::time::Instant::now(); for index in 0..sample_len { let start_gen = std::time::Instant::now(); let context_size = if self.model.config().use_cache && index > 0 { 1 } else { tokens.len() }; let ctxt = &tokens[tokens.len().saturating_sub(context_size)..]; let input = Tensor::new(ctxt, &self.device)?.unsqueeze(0)?; let logits = self.model.forward(&input)?; let logits = logits.squeeze(0)?.to_dtype(DType::F32)?; let logits = if self.repeat_penalty == 1. { logits } else { let start_at = tokens.len().saturating_sub(self.repeat_last_n); candle_transformers::utils::apply_repeat_penalty( &logits, self.repeat_penalty, &tokens[start_at..], )? }; let next_token = self.logits_processor.sample(&logits)?; tokens.push(next_token); new_tokens.push(next_token); println!("> {:?}", start_gen.elapsed()); println!( "{} token: {} '{}'", index + 1, next_token, self.tokenizer.decode(&[next_token], true).map_err(E::msg)? ); } let dt = start_gen.elapsed(); println!( "{sample_len} tokens generated ({} token/s)\n----\n{}\n----", sample_len as f64 / dt.as_secs_f64(), self.tokenizer.decode(&new_tokens, true).map_err(E::msg)? ); Ok(()) } } #[derive(Parser, Debug)] #[command(author, version, about, long_about = None)] struct Args { /// Run on CPU rather than on GPU. #[arg(long)] cpu: bool, #[arg(long)] prompt: String, /// Use f32 computations rather than bf16. #[arg(long)] use_f32: bool, /// The temperature used to generate samples. #[arg(long)] temperature: Option<f64>, /// Nucleus sampling probability cutoff. #[arg(long)] top_p: Option<f64>, /// The seed to use when generating random samples. #[arg(long, default_value_t = 299792458)] seed: u64, /// The length of the sample to generate (in tokens). #[arg(long, default_value_t = 100)] sample_len: usize, #[arg(long, default_value = "tiiuae/falcon-7b")] model_id: String, #[arg(long, default_value = "refs/pr/43")] revision: String, /// Penalty to be applied for repeating tokens, 1. means no penalty. #[arg(long, default_value_t = 1.0)] repeat_penalty: f32, /// The context size to consider for the repeat penalty. #[arg(long, default_value_t = 64)] repeat_last_n: usize, } fn main() -> Result<()> { let args = Args::parse(); let device = candle_examples::device(args.cpu)?; let start = std::time::Instant::now(); let api = Api::new()?; let repo = api.repo(Repo::with_revision( args.model_id, RepoType::Model, args.revision, )); let tokenizer_filename = repo.get("tokenizer.json")?; let filenames = candle_examples::hub_load_safetensors(&repo, "model.safetensors.index.json")?; println!("retrieved the files in {:?}", start.elapsed()); let tokenizer = Tokenizer::from_file(tokenizer_filename).map_err(E::msg)?; let start = std::time::Instant::now(); let dtype = if args.use_f32 { DType::F32 } else { DType::BF16 }; let vb = unsafe { VarBuilder::from_mmaped_safetensors(&filenames, dtype, &device)? }; let config = Config::falcon7b(); config.validate()?; let model = Falcon::load(vb, config)?; println!("loaded the model in {:?}", start.elapsed()); let generation_options = GenerationOptions { temp: args.temperature, top_p: args.top_p, repeat_penalty: args.repeat_penalty, repeat_last_n: args.repeat_last_n, }; let mut pipeline = TextGeneration::new(model, tokenizer, generation_options, args.seed, &device); pipeline.run(&args.prompt, args.sample_len)?; Ok(()) }
candle/candle-examples/examples/falcon/main.rs/0
{ "file_path": "candle/candle-examples/examples/falcon/main.rs", "repo_id": "candle", "token_count": 2723 }
18
# candle-phi: 1.3b and 2.7b LLM with state of the art performance for <10b models. [Phi-1.5](https://huggingface.co/microsoft/phi-1_5) and [Phi-2](https://huggingface.co/microsoft/phi-2) are language models using only 1.3 and 2.7 billion parameters but with state of the art performance compared to models with up to 10 billion parameters. The candle implementation provides both the standard version as well as a quantized variant. ## Running some examples For the v2 version. ```bash $ cargo run --example phi --release -- --model 2 \ --prompt "A skier slides down a frictionless slope of height 40m and length 80m. What's the skier speed at the bottom?" A skier slides down a frictionless slope of height 40m and length 80m. What's the skier speed at the bottom? Solution: The potential energy of the skier is converted into kinetic energy as it slides down the slope. The formula for potential energy is mgh, where m is mass, g is acceleration due to gravity (9.8 m/s^2), and h is height. Since there's no friction, all the potential energy is converted into kinetic energy at the bottom of the slope. The formula for kinetic energy is 1/2mv^2, where v is velocity. We can equate these two formulas: mgh = 1/2mv^2 Solving for v, we get: v = sqrt(2gh) Substituting the given values, we get: v = sqrt(2*9.8*40) = 28 m/s Therefore, the skier speed at the bottom of the slope is 28 m/s. ``` For the v1.5 version. ```bash $ cargo run --example phi --release -- --prompt "def print_prime(n): " def print_prime(n): print("Printing prime numbers") for i in range(2, n+1): if is_prime(i): print(i) def is_prime(n): if n <= 1: return False for i in range(2, int(math.sqrt(n))+1): if n % i == 0: return False return True $ cargo run --example phi --release -- \ --prompt "Explain how to find the median of an array and write the corresponding python function.\nAnswer:" \ --quantized --sample-len 200 Explain how to find the median of an array and write the corresponding python function. Answer: The median is the middle value in an array. If the array has an even number of elements, the median is the average of the two middle values. def median(arr): arr.sort() n = len(arr) if n % 2 == 0: return (arr[n//2 - 1] + arr[n//2]) / 2 else: return arr[n//2] ``` This also supports the [Puffin Phi v2 model](https://huggingface.co/teknium/Puffin-Phi-v2) for human interaction. ``` $ cargo run --example phi --release -- \ --prompt "USER: What would you do on a sunny day in Paris?\nASSISTANT:" \ --sample-len 200 --model puffin-phi-v2 --quantized USER: What would you do on a sunny day in Paris? ASSISTANT: On a sunny day in Paris, you could visit the Musée du Louvre to admire the famous painting "Mona Lisa" by Leonardo da Vinci. You might also want to stroll along the Champs-Élysées and enjoy the beautiful architecture of the buildings around you. Don't forget to stop by a café for a cup of coffee and to soak up the sun!" ```
candle/candle-examples/examples/phi/README.md/0
{ "file_path": "candle/candle-examples/examples/phi/README.md", "repo_id": "candle", "token_count": 1011 }
19
# candle-replit-code: code completion specialized model. [replit-code-v1_5-3b](https://huggingface.co/replit/replit-code-v1_5-3b) is a language model specialized for code completion. This model uses 3.3B parameters in `bfloat16` (so the GPU version will only work on recent nvidia cards). ## Running some example ```bash cargo run --example replit-code --release -- --prompt 'def fibonacci(n): ' ``` This produces the following output. ``` def fibonacci(n): # write Fibonacci series up to n """Print a Fibonacci series up to n.""" a, b = 0, 1 while a < n: print(a, end=' ') a, b = b, a+b print() def fibonacci_loop(n): # write Fibonacci series up to n """Print a Fibonacci series up to n.""" result = [] a, b = 0, 1 while a < n: result.append(a) a, b = b, a+b return result def fibonacci_generator(n): # write Fibonacci series up to n """Print a Fibonacci series up to n.""" a, b = 0, 1 while a < n: yield a a, b = b, a+b ```
candle/candle-examples/examples/replit-code/README.md/0
{ "file_path": "candle/candle-examples/examples/replit-code/README.md", "repo_id": "candle", "token_count": 426 }
20
//! SAM: Segment Anything Model //! https://github.com/facebookresearch/segment-anything #[cfg(feature = "mkl")] extern crate intel_mkl_src; #[cfg(feature = "accelerate")] extern crate accelerate_src; use candle::DType; use candle_nn::VarBuilder; use candle_transformers::models::segment_anything::sam; use clap::Parser; #[derive(Parser)] struct Args { #[arg(long)] model: Option<String>, #[arg(long)] image: String, /// Run on CPU rather than on GPU. #[arg(long)] cpu: bool, #[arg(long)] generate_masks: bool, /// List of x,y coordinates, between 0 and 1 (0.5 is at the middle of the image). These points /// should be part of the generated mask. #[arg(long)] point: Vec<String>, /// List of x,y coordinates, between 0 and 1 (0.5 is at the middle of the image). These points /// should not be part of the generated mask and should be part of the background instead. #[arg(long)] neg_point: Vec<String>, /// The detection threshold for the mask, 0 is the default value, negative values mean a larger /// mask, positive makes the mask more selective. #[arg(long, default_value_t = 0.)] threshold: f32, /// Enable tracing (generates a trace-timestamp.json file). #[arg(long)] tracing: bool, /// Use the TinyViT based models from MobileSAM #[arg(long)] use_tiny: bool, } pub fn main() -> anyhow::Result<()> { use tracing_chrome::ChromeLayerBuilder; use tracing_subscriber::prelude::*; let args = Args::parse(); let _guard = if args.tracing { let (chrome_layer, guard) = ChromeLayerBuilder::new().build(); tracing_subscriber::registry().with(chrome_layer).init(); Some(guard) } else { None }; let device = candle_examples::device(args.cpu)?; let (image, initial_h, initial_w) = candle_examples::load_image(&args.image, Some(sam::IMAGE_SIZE))?; let image = image.to_device(&device)?; println!("loaded image {image:?}"); let model = match args.model { Some(model) => std::path::PathBuf::from(model), None => { let api = hf_hub::api::sync::Api::new()?; let api = api.model("lmz/candle-sam".to_string()); let filename = if args.use_tiny { "mobile_sam-tiny-vitt.safetensors" } else { "sam_vit_b_01ec64.safetensors" }; api.get(filename)? } }; let vb = unsafe { VarBuilder::from_mmaped_safetensors(&[model], DType::F32, &device)? }; let sam = if args.use_tiny { sam::Sam::new_tiny(vb)? // tiny vit_t } else { sam::Sam::new(768, 12, 12, &[2, 5, 8, 11], vb)? // sam_vit_b }; if args.generate_masks { // Default options similar to the Python version. let bboxes = sam.generate_masks( &image, /* points_per_side */ 32, /* crop_n_layer */ 0, /* crop_overlap_ratio */ 512. / 1500., /* crop_n_points_downscale_factor */ 1, )?; for (idx, bbox) in bboxes.iter().enumerate() { println!("{idx} {bbox:?}"); let mask = (&bbox.data.to_dtype(DType::U8)? * 255.)?; let (h, w) = mask.dims2()?; let mask = mask.broadcast_as((3, h, w))?; candle_examples::save_image_resize( &mask, format!("sam_mask{idx}.png"), initial_h, initial_w, )?; } } else { let iter_points = args.point.iter().map(|p| (p, true)); let iter_neg_points = args.neg_point.iter().map(|p| (p, false)); let points = iter_points .chain(iter_neg_points) .map(|(point, b)| { use std::str::FromStr; let xy = point.split(',').collect::<Vec<_>>(); if xy.len() != 2 { anyhow::bail!("expected format for points is 0.4,0.2") } Ok((f64::from_str(xy[0])?, f64::from_str(xy[1])?, b)) }) .collect::<anyhow::Result<Vec<_>>>()?; let start_time = std::time::Instant::now(); let (mask, iou_predictions) = sam.forward(&image, &points, false)?; println!( "mask generated in {:.2}s", start_time.elapsed().as_secs_f32() ); println!("mask:\n{mask}"); println!("iou_predictions: {iou_predictions}"); let mask = (mask.ge(args.threshold)? * 255.)?; let (_one, h, w) = mask.dims3()?; let mask = mask.expand((3, h, w))?; let mut img = image::io::Reader::open(&args.image)? .decode() .map_err(candle::Error::wrap)?; let mask_pixels = mask.permute((1, 2, 0))?.flatten_all()?.to_vec1::<u8>()?; let mask_img: image::ImageBuffer<image::Rgb<u8>, Vec<u8>> = match image::ImageBuffer::from_raw(w as u32, h as u32, mask_pixels) { Some(image) => image, None => anyhow::bail!("error saving merged image"), }; let mask_img = image::DynamicImage::from(mask_img).resize_to_fill( img.width(), img.height(), image::imageops::FilterType::CatmullRom, ); for x in 0..img.width() { for y in 0..img.height() { let mask_p = imageproc::drawing::Canvas::get_pixel(&mask_img, x, y); if mask_p.0[0] > 100 { let mut img_p = imageproc::drawing::Canvas::get_pixel(&img, x, y); img_p.0[2] = 255 - (255 - img_p.0[2]) / 2; img_p.0[1] /= 2; img_p.0[0] /= 2; imageproc::drawing::Canvas::draw_pixel(&mut img, x, y, img_p) } } } for (x, y, b) in points { let x = (x * img.width() as f64) as i32; let y = (y * img.height() as f64) as i32; let color = if b { image::Rgba([255, 0, 0, 200]) } else { image::Rgba([0, 255, 0, 200]) }; imageproc::drawing::draw_filled_circle_mut(&mut img, (x, y), 3, color); } img.save("sam_merged.jpg")? } Ok(()) }
candle/candle-examples/examples/segment-anything/main.rs/0
{ "file_path": "candle/candle-examples/examples/segment-anything/main.rs", "repo_id": "candle", "token_count": 3129 }
21
# candle-vit Vision Transformer (ViT) model implementation following the lines of [vit-base-patch16-224](https://huggingface.co/google/vit-base-patch16-224) This uses a classification head trained on the ImageNet dataset and returns the probabilities for the top-5 classes. ## Running an example ``` $ cargo run --example vit --release -- --image tiger.jpg loaded image Tensor[dims 3, 224, 224; f32] model built tiger, Panthera tigris : 100.00% tiger cat : 0.00% jaguar, panther, Panthera onca, Felis onca: 0.00% leopard, Panthera pardus: 0.00% lion, king of beasts, Panthera leo: 0.00% ```
candle/candle-examples/examples/vit/README.md/0
{ "file_path": "candle/candle-examples/examples/vit/README.md", "repo_id": "candle", "token_count": 219 }
22