Spaces:
Sleeping
Sleeping
import warnings | |
import copy | |
from typing import Optional, Tuple, Union | |
import torch | |
from torch.nn import CrossEntropyLoss | |
from transformers.modeling_outputs import ( | |
BaseModelOutput, | |
Seq2SeqLMOutput, | |
BaseModelOutputWithPastAndCrossAttentions, | |
) | |
from transformers.models.t5.modeling_t5 import T5Stack, T5ForConditionalGeneration, __HEAD_MASK_WARNING_MSG | |
from transformers import T5Config | |
from transformers.cache_utils import Cache, DynamicCache, EncoderDecoderCache | |
from transformers.utils import ( | |
is_torchdynamo_compiling, | |
) | |
import logging | |
logger = logging.getLogger(__name__) | |
class LingConvT5Stack(T5Stack): | |
def __init__(self, config: T5Config, embed_tokens=None): | |
super().__init__(config, embed_tokens) | |
# Add new attributes for ling injection | |
self.ling_injection_layer = getattr(config, 'ling_injection_layer', -1) | |
self.ling_injection_type = getattr(config, 'ling_injection_type', 'none') # 'none', 'first', 'all' | |
def forward( | |
self, | |
input_ids=None, | |
attention_mask=None, | |
encoder_hidden_states=None, | |
encoder_attention_mask=None, | |
inputs_embeds=None, | |
head_mask=None, | |
cross_attn_head_mask=None, | |
past_key_values=None, | |
use_cache=None, | |
output_attentions=None, | |
output_hidden_states=None, | |
return_dict=None, | |
cache_position=None, | |
ling_embed=None, | |
): | |
# Model parallel | |
if self.model_parallel: | |
torch.cuda.set_device(self.first_device) | |
self.embed_tokens = self.embed_tokens.to(self.first_device) | |
use_cache = use_cache if use_cache is not None else self.config.use_cache | |
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions | |
output_hidden_states = ( | |
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states | |
) | |
return_dict = return_dict if return_dict is not None else self.config.use_return_dict | |
if input_ids is not None and inputs_embeds is not None: | |
err_msg_prefix = "decoder_" if self.is_decoder else "" | |
raise ValueError( | |
f"You cannot specify both {err_msg_prefix}input_ids and {err_msg_prefix}inputs_embeds at the same time" | |
) | |
elif input_ids is not None: | |
input_shape = input_ids.size() | |
input_ids = input_ids.view(-1, input_shape[-1]) | |
elif inputs_embeds is not None: | |
input_shape = inputs_embeds.size()[:-1] | |
else: | |
err_msg_prefix = "decoder_" if self.is_decoder else "" | |
raise ValueError(f"You have to specify either {err_msg_prefix}input_ids or {err_msg_prefix}inputs_embeds") | |
if self.gradient_checkpointing and self.training: | |
if use_cache: | |
logger.warning_once( | |
"`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." | |
) | |
use_cache = False | |
if inputs_embeds is None: | |
if self.embed_tokens is None: | |
raise ValueError("You have to initialize the model with valid token embeddings") | |
inputs_embeds = self.embed_tokens(input_ids) | |
batch_size, seq_length = input_shape | |
if use_cache is True: | |
if not self.is_decoder: | |
raise ValueError(f"`use_cache` can only be set to `True` if {self} is used as a decoder") | |
# initialize past_key_values | |
return_legacy_cache = False | |
return_self_attention_cache = False | |
if self.is_decoder and (use_cache or past_key_values is not None): | |
if isinstance(past_key_values, Cache) and not isinstance(past_key_values, EncoderDecoderCache): | |
return_self_attention_cache = True | |
past_key_values = EncoderDecoderCache(past_key_values, DynamicCache()) | |
elif not isinstance(past_key_values, EncoderDecoderCache): | |
return_legacy_cache = True | |
logger.warning_once( | |
"Passing a tuple of `past_key_values` is deprecated and will be removed in Transformers v4.48.0. " | |
"You should pass an instance of `EncoderDecoderCache` instead, e.g. " | |
"`past_key_values=EncoderDecoderCache.from_legacy_cache(past_key_values)`." | |
) | |
past_key_values = EncoderDecoderCache.from_legacy_cache(past_key_values) | |
elif past_key_values is None: | |
past_key_values = EncoderDecoderCache(DynamicCache(), DynamicCache()) | |
elif not self.is_decoder: | |
# do not pass cache object down the line for encoder stack | |
# it messes indexing later in decoder-stack because cache object is modified in-place | |
past_key_values = None | |
past_key_values_length = past_key_values.get_seq_length() if past_key_values is not None else 0 | |
if cache_position is None: | |
cache_position = torch.arange( | |
past_key_values_length, past_key_values_length + seq_length, device=inputs_embeds.device | |
) | |
if attention_mask is None and not is_torchdynamo_compiling(): | |
# required mask seq length can be calculated via length of past cache | |
mask_seq_length = past_key_values_length + seq_length | |
attention_mask = torch.ones(batch_size, mask_seq_length, device=inputs_embeds.device) | |
if self.config.is_decoder: | |
causal_mask = self._update_causal_mask( | |
attention_mask, | |
inputs_embeds, | |
cache_position, | |
past_key_values.self_attention_cache if past_key_values is not None else None, | |
output_attentions, | |
) | |
elif attention_mask is not None: | |
causal_mask = attention_mask[:, None, None, :] | |
causal_mask = causal_mask.to(dtype=inputs_embeds.dtype) | |
causal_mask = (1.0 - causal_mask) * torch.finfo(inputs_embeds.dtype).min | |
else: | |
causal_mask = None | |
# If a 2D or 3D attention mask is provided for the cross-attention | |
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length] | |
if self.is_decoder and encoder_hidden_states is not None: | |
encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size() | |
encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length) | |
if encoder_attention_mask is None: | |
encoder_attention_mask = torch.ones( | |
encoder_hidden_shape, device=inputs_embeds.device, dtype=torch.long | |
) | |
encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask) | |
else: | |
encoder_extended_attention_mask = None | |
# Prepare head mask if needed | |
head_mask = self.get_head_mask(head_mask, self.config.num_layers) | |
cross_attn_head_mask = self.get_head_mask(cross_attn_head_mask, self.config.num_layers) | |
all_hidden_states = () if output_hidden_states else None | |
all_attentions = () if output_attentions else None | |
all_cross_attentions = () if (output_attentions and self.is_decoder) else None | |
position_bias = None | |
encoder_decoder_position_bias = None | |
hidden_states = self.dropout(inputs_embeds) | |
for i, layer_module in enumerate(self.block): | |
layer_head_mask = head_mask[i] | |
cross_attn_layer_head_mask = cross_attn_head_mask[i] | |
# Model parallel | |
if self.model_parallel: | |
torch.cuda.set_device(hidden_states.device) | |
# Ensure that attention_mask is always on the same device as hidden_states | |
if causal_mask is not None: | |
causal_mask = causal_mask.to(hidden_states.device) | |
if position_bias is not None: | |
position_bias = position_bias.to(hidden_states.device) | |
if encoder_hidden_states is not None: | |
encoder_hidden_states = encoder_hidden_states.to(hidden_states.device) | |
if encoder_extended_attention_mask is not None: | |
encoder_extended_attention_mask = encoder_extended_attention_mask.to(hidden_states.device) | |
if encoder_decoder_position_bias is not None: | |
encoder_decoder_position_bias = encoder_decoder_position_bias.to(hidden_states.device) | |
if layer_head_mask is not None: | |
layer_head_mask = layer_head_mask.to(hidden_states.device) | |
if cross_attn_layer_head_mask is not None: | |
cross_attn_layer_head_mask = cross_attn_layer_head_mask.to(hidden_states.device) | |
if output_hidden_states: | |
all_hidden_states = all_hidden_states + (hidden_states,) | |
if self.gradient_checkpointing and self.training: | |
layer_outputs = self._gradient_checkpointing_func( | |
layer_module.forward, | |
hidden_states, | |
causal_mask, | |
position_bias, | |
encoder_hidden_states, | |
encoder_extended_attention_mask, | |
encoder_decoder_position_bias, | |
layer_head_mask, | |
cross_attn_layer_head_mask, | |
None, # past_key_value is always None with gradient checkpointing | |
use_cache, | |
output_attentions, | |
return_dict, | |
cache_position, | |
) | |
else: | |
layer_outputs = layer_module( | |
hidden_states, | |
attention_mask=causal_mask, | |
position_bias=position_bias, | |
encoder_hidden_states=encoder_hidden_states, | |
encoder_attention_mask=encoder_extended_attention_mask, | |
encoder_decoder_position_bias=encoder_decoder_position_bias, | |
layer_head_mask=layer_head_mask, | |
cross_attn_layer_head_mask=cross_attn_layer_head_mask, | |
past_key_value=past_key_values, | |
use_cache=use_cache, | |
output_attentions=output_attentions, | |
return_dict=return_dict, | |
cache_position=cache_position, | |
) | |
# layer_outputs is a tuple with: | |
# hidden-states, key-value-states, (self-attention position bias), (self-attention weights), (cross-attention position bias), (cross-attention weights) | |
if use_cache is False: | |
layer_outputs = layer_outputs[:1] + (None,) + layer_outputs[1:] | |
hidden_states, next_decoder_cache = layer_outputs[:2] | |
# Add linguistic embedding injection after specified layer | |
if (self.is_decoder and | |
self.ling_injection_layer == i and | |
ling_embed is not None and | |
self.ling_injection_type != 'none'): | |
hidden_states = hidden_states + ling_embed | |
# We share the position biases between the layers - the first layer store them | |
# layer_outputs = hidden-states, key-value-states (self-attention position bias), (self-attention weights), | |
# (cross-attention position bias), (cross-attention weights) | |
position_bias = layer_outputs[2] | |
if self.is_decoder and encoder_hidden_states is not None: | |
encoder_decoder_position_bias = layer_outputs[4 if output_attentions else 3] | |
if output_attentions: | |
all_attentions = all_attentions + (layer_outputs[3],) | |
if self.is_decoder: | |
all_cross_attentions = all_cross_attentions + (layer_outputs[5],) | |
# Model Parallel: If it's the last layer for that device, put things on the next device | |
if self.model_parallel: | |
for k, v in self.device_map.items(): | |
if i == v[-1] and "cuda:" + str(k) != self.last_device: | |
hidden_states = hidden_states.to("cuda:" + str(k + 1)) | |
hidden_states = self.final_layer_norm(hidden_states) | |
hidden_states = self.dropout(hidden_states) | |
# Add last layer | |
if output_hidden_states: | |
all_hidden_states = all_hidden_states + (hidden_states,) | |
next_cache = next_decoder_cache if use_cache else None | |
if return_self_attention_cache: | |
next_cache = past_key_values.self_attention_cache | |
if return_legacy_cache: | |
next_cache = past_key_values.to_legacy_cache() | |
if not return_dict: | |
return tuple( | |
v | |
for v in [ | |
hidden_states, | |
next_cache, | |
all_hidden_states, | |
all_attentions, | |
all_cross_attentions, | |
] | |
if v is not None | |
) | |
return BaseModelOutputWithPastAndCrossAttentions( | |
last_hidden_state=hidden_states, | |
past_key_values=next_cache, | |
hidden_states=all_hidden_states, | |
attentions=all_attentions, | |
cross_attentions=all_cross_attentions, | |
) | |
class LingConvT5ForConditionalGeneration(T5ForConditionalGeneration): | |
def __init__(self, config): | |
super().__init__(config) | |
# Replace default decoder with our custom decoder | |
decoder_config = copy.deepcopy(config) | |
decoder_config.is_decoder = True | |
decoder_config.is_encoder_decoder = False | |
decoder_config.num_layers = config.num_decoder_layers | |
self.decoder = LingConvT5Stack(decoder_config, embed_tokens=self.shared) | |
def forward( | |
self, | |
input_ids: Optional[torch.LongTensor] = None, | |
attention_mask: Optional[torch.FloatTensor] = None, | |
decoder_input_ids: Optional[torch.LongTensor] = None, | |
decoder_attention_mask: Optional[torch.BoolTensor] = None, | |
head_mask: Optional[torch.FloatTensor] = None, | |
decoder_head_mask: Optional[torch.FloatTensor] = None, | |
cross_attn_head_mask: Optional[torch.Tensor] = None, | |
encoder_outputs: Optional[Tuple[Tuple[torch.Tensor]]] = None, | |
past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None, | |
inputs_embeds: Optional[torch.FloatTensor] = None, | |
decoder_inputs_embeds: Optional[torch.FloatTensor] = None, | |
labels: Optional[torch.LongTensor] = None, | |
use_cache: Optional[bool] = None, | |
output_attentions: Optional[bool] = None, | |
output_hidden_states: Optional[bool] = None, | |
return_dict: Optional[bool] = None, | |
cache_position: Optional[torch.LongTensor] = None, | |
ling_embed: Optional[torch.FloatTensor] = None, | |
) -> Union[Tuple[torch.FloatTensor], Seq2SeqLMOutput]: | |
r""" | |
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): | |
Labels for computing the sequence classification/regression loss. Indices should be in `[-100, 0, ..., | |
config.vocab_size - 1]`. All labels set to `-100` are ignored (masked), the loss is only computed for | |
labels in `[0, ..., config.vocab_size]` | |
Returns: | |
Examples: | |
```python | |
>>> from transformers import AutoTokenizer, T5ForConditionalGeneration | |
>>> tokenizer = AutoTokenizer.from_pretrained("google-t5/t5-small") | |
>>> model = T5ForConditionalGeneration.from_pretrained("google-t5/t5-small") | |
>>> # training | |
>>> input_ids = tokenizer("The <extra_id_0> walks in <extra_id_1> park", return_tensors="pt").input_ids | |
>>> labels = tokenizer("<extra_id_0> cute dog <extra_id_1> the <extra_id_2>", return_tensors="pt").input_ids | |
>>> outputs = model(input_ids=input_ids, labels=labels) | |
>>> loss = outputs.loss | |
>>> logits = outputs.logits | |
>>> # inference | |
>>> input_ids = tokenizer( | |
... "summarize: studies have shown that owning a dog is good for you", return_tensors="pt" | |
... ).input_ids # Batch size 1 | |
>>> outputs = model.generate(input_ids) | |
>>> print(tokenizer.decode(outputs[0], skip_special_tokens=True)) | |
>>> # studies have shown that owning a dog is good for you. | |
```""" | |
use_cache = use_cache if use_cache is not None else self.config.use_cache | |
return_dict = return_dict if return_dict is not None else self.config.use_return_dict | |
# FutureWarning: head_mask was separated into two input args - head_mask, decoder_head_mask | |
if head_mask is not None and decoder_head_mask is None: | |
if self.config.num_layers == self.config.num_decoder_layers: | |
warnings.warn(__HEAD_MASK_WARNING_MSG, FutureWarning) | |
decoder_head_mask = head_mask | |
# Encode if needed (training, first prediction pass) | |
if encoder_outputs is None: | |
# Convert encoder inputs in embeddings if needed | |
encoder_outputs = self.encoder( | |
input_ids=input_ids, | |
attention_mask=attention_mask, | |
inputs_embeds=inputs_embeds, | |
head_mask=head_mask, | |
output_attentions=output_attentions, | |
output_hidden_states=output_hidden_states, | |
return_dict=return_dict, | |
) | |
elif return_dict and not isinstance(encoder_outputs, BaseModelOutput): | |
encoder_outputs = BaseModelOutput( | |
last_hidden_state=encoder_outputs[0], | |
hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None, | |
attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None, | |
) | |
hidden_states = encoder_outputs[0] | |
if self.model_parallel: | |
torch.cuda.set_device(self.decoder.first_device) | |
if labels is not None and decoder_input_ids is None and decoder_inputs_embeds is None: | |
# get decoder inputs from shifting lm labels to the right | |
decoder_input_ids = self._shift_right(labels) | |
# Set device for model parallelism | |
if self.model_parallel: | |
torch.cuda.set_device(self.decoder.first_device) | |
hidden_states = hidden_states.to(self.decoder.first_device) | |
if decoder_input_ids is not None: | |
decoder_input_ids = decoder_input_ids.to(self.decoder.first_device) | |
if attention_mask is not None: | |
attention_mask = attention_mask.to(self.decoder.first_device) | |
if decoder_attention_mask is not None: | |
decoder_attention_mask = decoder_attention_mask.to(self.decoder.first_device) | |
# Decode | |
decoder_outputs = self.decoder( | |
input_ids=decoder_input_ids, | |
attention_mask=decoder_attention_mask, | |
inputs_embeds=decoder_inputs_embeds, | |
past_key_values=past_key_values, | |
encoder_hidden_states=hidden_states, | |
encoder_attention_mask=attention_mask, | |
head_mask=decoder_head_mask, | |
cross_attn_head_mask=cross_attn_head_mask, | |
use_cache=use_cache, | |
output_attentions=output_attentions, | |
output_hidden_states=output_hidden_states, | |
return_dict=return_dict, | |
cache_position=cache_position, | |
ling_embed=ling_embed, | |
) | |
sequence_output = decoder_outputs[0] | |
# Set device for model parallelism | |
if self.model_parallel: | |
torch.cuda.set_device(self.encoder.first_device) | |
self.lm_head = self.lm_head.to(self.encoder.first_device) | |
sequence_output = sequence_output.to(self.lm_head.weight.device) | |
if self.config.tie_word_embeddings: | |
# Rescale output before projecting on vocab | |
# See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/transformer/transformer.py#L586 | |
sequence_output = sequence_output * (self.model_dim**-0.5) | |
lm_logits = self.lm_head(sequence_output) | |
loss = None | |
if labels is not None: | |
loss_fct = CrossEntropyLoss(ignore_index=-100) | |
# move labels to correct device to enable PP | |
labels = labels.to(lm_logits.device) | |
loss = loss_fct(lm_logits.view(-1, lm_logits.size(-1)), labels.view(-1)) | |
# TODO(thom): Add z_loss https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/layers.py#L666 | |
if not return_dict: | |
output = (lm_logits,) + decoder_outputs[1:] + encoder_outputs | |
return ((loss,) + output) if loss is not None else output | |
return Seq2SeqLMOutput( | |
loss=loss, | |
logits=lm_logits, | |
past_key_values=decoder_outputs.past_key_values, | |
decoder_hidden_states=decoder_outputs.hidden_states, | |
decoder_attentions=decoder_outputs.attentions, | |
cross_attentions=decoder_outputs.cross_attentions, | |
encoder_last_hidden_state=encoder_outputs.last_hidden_state, | |
encoder_hidden_states=encoder_outputs.hidden_states, | |
encoder_attentions=encoder_outputs.attentions, | |
) |