File size: 14,754 Bytes
79c1c23 1b847ce 2b9cc07 1b847ce dde6157 1bdca4b dde6157 1bdca4b dde6157 1bdca4b dde6157 79c1c23 dde6157 2b9cc07 dde6157 06bb1c8 dde6157 8f38a33 dde6157 79c1c23 dde6157 2b9cc07 dde6157 2b9cc07 dde6157 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 |
from transformers import Starcoder2Model
import sys
from config import ModularStarEncoderConfig
import os
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import sys
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import CrossEntropyLoss
from transformers.activations import ACT2FN
from transformers.modeling_utils import PreTrainedModel
from transformers.utils import (
ModelOutput,
logging,
)
logger = logging.get_logger(__name__)
class StarEncoder2PreTrainedModel(PreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = ModularStarEncoderConfig
base_model_prefix = "ModularStarEncoder"
model_type = "ModularStarEncoder"
supports_gradient_checkpointing = True
_supports_flash_attn_2 = True
_supports_sdpa = True
_supports_cache_class = True
def _init_weights(self, module):
"""Initialize the weights"""
if isinstance(module, nn.Linear):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
class StarEncoder2Pooler(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.activation = nn.Tanh()
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
# We "pool" the model by simply taking the hidden state corresponding
# to the last token.
last_token_tensor = hidden_states[:, -1]
pooled_output = self.dense(last_token_tensor)
pooled_output = self.activation(pooled_output)
return pooled_output
@dataclass
class ModularStarEncoderOutput(ModelOutput):
"""
Output type of [`BertForPreTraining`].
Args:
loss (*optional*, returned when `labels` is provided, `torch.FloatTensor` of shape `(1,)`):
Total loss as the sum of the masked language modeling loss and the next sequence prediction
(classification) loss.
prediction_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
seq_relationship_logits (`torch.FloatTensor` of shape `(batch_size, 2)`):
Prediction scores of the in context classification (classification) head (scores of True/False continuation
before SoftMax).
hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
shape `(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
"""
last_hidden_state: Optional[torch.FloatTensor] = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
loss: Optional[torch.FloatTensor] = None
prediction_logits: torch.FloatTensor = None
seq_relationship_logits: torch.FloatTensor = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
class StarEncoder2PredictionHeadTransform(nn.Module):
def __init__(self, config):
super().__init__()
self.is_matryoshka = config.layer_matryoshka_loss
if self.is_matryoshka:
self.dense = nn.Linear(config.hidden_size + config.conditional_size, config.hidden_size + config.conditional_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size + config.conditional_size, eps=config.layer_norm_eps)
else:
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
if isinstance(config.hidden_act, str):
self.transform_act_fn = ACT2FN[config.hidden_act]
else:
self.transform_act_fn = config.hidden_act
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.transform_act_fn(hidden_states)
hidden_states = self.LayerNorm(hidden_states)
return hidden_states
class StarEncoder2LMPredictionHead(nn.Module):
def __init__(self, config):
super().__init__()
for element in dir(config):
value = getattr(config, element) # Get the attribute value
if (isinstance(value, tuple) or isinstance(value, list)) and len(value)>0:
setattr(config, element, value[0])
self.transform = StarEncoder2PredictionHeadTransform(config)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.is_matryoshka = config.layer_matryoshka_loss
if self.is_matryoshka:
self.decoder = nn.Linear(config.hidden_size + config.conditional_size, config.vocab_size, bias=False)
else:
self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
# Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
self.decoder.bias = self.bias
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
hidden_states = self.decoder(hidden_states)
return hidden_states
class StarEncoder2PreTrainingHeads(nn.Module):
def __init__(self, config):
super().__init__()
self.predictions = StarEncoder2LMPredictionHead(config)
self.is_matryoshka = config.layer_matryoshka_loss
if self.is_matryoshka:
self.seq_relationship = nn.Linear(config.hidden_size + config.conditional_size, 2)
self.conditional_embeddings = nn.Embedding(len(config.matryoshka_layers),config.conditional_size)
else:
self.seq_relationship = nn.Linear(config.hidden_size, 2)
def forward(self, sequence_output, pooled_output,idx_layer: Optional[torch.Tensor] = None):
if self.is_matryoshka:
device_sequence = sequence_output.get_device()
if device_sequence<0:
device_sequence = "cpu"
prediction_scores = self.predictions(torch.cat([sequence_output , self.conditional_embeddings(torch.tensor(idx_layer,device=device_sequence).int()).expand(sequence_output.size()[0],sequence_output.size()[1],-1)],dim=-1))
seq_relationship_score = self.seq_relationship(torch.cat([pooled_output , self.conditional_embeddings(torch.tensor(idx_layer,device=device_sequence).int()).expand(pooled_output.size()[0],-1)],dim=-1))
else:
prediction_scores = self.predictions(sequence_output)
seq_relationship_score = self.seq_relationship(pooled_output)
return prediction_scores, seq_relationship_score
class ModularStarEncoder(StarEncoder2PreTrainedModel):
_tied_weights_keys = ["predictions.decoder.bias", "cls.predictions.decoder.weight"]
config_class = ModularStarEncoderConfig
def __init__(self, config):
super().__init__(config)
self.model_type = "ModularStarEncoder"
self.cls = StarEncoder2PreTrainingHeads(config)
self.layer_matryoshka_loss = config.layer_matryoshka_loss
self.matryoshka_layers = config.matryoshka_layers
if self.layer_matryoshka_loss:
config.sliding_window = None
logger.warning_once(
"The matryoshka loss is implemented without sliding_window, if you want to use the sliding window set sliding_window to True"
)
if self.matryoshka_layers[-1] != config.num_hidden_layers:
logger.warning_once(
f"To get optimal results, the last layer on matryoshka layers, which now is {self.matryoshka_layers[-1]} "
"must be set as the overall number of hidden layers."
f"The overall number of hidden layers is now set to {config.num_hidden_layers}"
)
sys.exit()
self.starEncoder2 = Starcoder2Model(config)
self.pooler = StarEncoder2Pooler(config)
#setting off causal masking
for layer in self.starEncoder2.layers:
layer.self_attn.is_causal=False
# Initialize weights and apply final processing
self.post_init()
def forward(
self,
input_ids: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
#token_type_ids: Optional[torch.Tensor] = None,
position_ids: Optional[torch.Tensor] = None,
head_mask: Optional[torch.Tensor] = None,
inputs_embeds: Optional[torch.Tensor] = None,
labels: Optional[torch.Tensor] = None,
next_sentence_label: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple[torch.Tensor], ModularStarEncoderOutput]:
r"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked),
the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
next_sentence_label (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
This label is assigned to the in context loss:
- 0 indicates sequence B belongs to the same repository of A,
- 1 indicates sequence B is a random repository.
kwargs (`Dict[str, any]`, optional, defaults to *{}*):
Used to hide legacy arguments that have been deprecated.
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.starEncoder2(
input_ids,
attention_mask=attention_mask,
position_ids=position_ids,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=True,
return_dict=return_dict,
)
#if layer matryoshka on, compute the scores for all the heads
if self.layer_matryoshka_loss:
prediction_scores = []
seq_relationship_score = []
#for layer in outputs.hidden_states:
for counter,idx_layer in enumerate(self.matryoshka_layers):
#pooling head to project last hidden states as CLS token is in the last position
pooled_output = self.pooler(outputs.hidden_states[idx_layer])
#all the hidden states related to the last layer
sequence_output = outputs.hidden_states[idx_layer]
temp_prediction_scores, temp_seq_relationship_score = self.cls(sequence_output, pooled_output,counter)
prediction_scores.append(temp_prediction_scores)
seq_relationship_score.append(temp_seq_relationship_score)
else:
#pooling head to project last hidden states as CLS token is in the last position
pooled_output = self.pooler(outputs.last_hidden_state)
#all the hidden states related to the last layer
sequence_output = outputs.last_hidden_state
prediction_scores, seq_relationship_score = self.cls(sequence_output, pooled_output)
total_loss = None
if labels is not None and next_sentence_label is not None and not self.layer_matryoshka_loss:
loss_fct = CrossEntropyLoss()
masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2), next_sentence_label.view(-1))
total_loss = masked_lm_loss + next_sentence_loss
elif labels is not None and next_sentence_label is not None and self.layer_matryoshka_loss:
loss_fct = CrossEntropyLoss()
num_layers = len(prediction_scores)
#for layer in self.matryoshka_layers: seq_relationship_score
for index in range(num_layers):
masked_lm_loss = loss_fct(prediction_scores[index].view(-1, self.config.vocab_size), labels.view(-1))
next_sentence_loss = loss_fct(seq_relationship_score[index].view(-1, 2), next_sentence_label.view(-1))
if total_loss:
total_loss += (masked_lm_loss + next_sentence_loss) * ((index+1)/num_layers)
else:
total_loss = (masked_lm_loss + next_sentence_loss) * ((index+1)/num_layers)
if not return_dict:
output = (prediction_scores, seq_relationship_score) + outputs[2:]
return ((total_loss,) + output) if total_loss is not None else output
last_hidden_state= outputs.hidden_states[-1]
return ModularStarEncoderOutput(
last_hidden_state = last_hidden_state,
hidden_states = outputs.hidden_states,
loss = total_loss,
prediction_logits = prediction_scores,
seq_relationship_logits = seq_relationship_score,
attentions = outputs.attentions,
)
|