devve1 commited on
Commit
bea1da8
·
verified ·
1 Parent(s): f0b4505

Upload modeling.py

Browse files
Files changed (1) hide show
  1. modeling.py +1389 -0
modeling.py ADDED
@@ -0,0 +1,1389 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2024 The GTE Team Authors and Alibaba Group.
3
+ # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ """PyTorch NEW model."""
17
+
18
+ import math
19
+ from typing import List, Optional, Tuple, Union
20
+
21
+ import torch
22
+ import torch.utils.checkpoint
23
+ from torch import nn
24
+
25
+ from transformers.activations import ACT2FN
26
+ from transformers.modeling_outputs import (
27
+ BaseModelOutput,
28
+ BaseModelOutputWithPooling,
29
+ MaskedLMOutput,
30
+ MultipleChoiceModelOutput,
31
+ QuestionAnsweringModelOutput,
32
+ SequenceClassifierOutput,
33
+ TokenClassifierOutput,
34
+ )
35
+ from transformers.modeling_utils import PreTrainedModel
36
+ from transformers.utils import logging
37
+
38
+ try:
39
+ import xformers.ops as xops
40
+ except ImportError as e:
41
+ xops = None
42
+
43
+ from .configuration import NewConfig
44
+
45
+
46
+ logger = logging.get_logger(__name__)
47
+
48
+
49
+ # Adapted from https://github.com/HazyResearch/flash-attention/blob/main/flash_attn/bert_padding.py
50
+ # Which was adapted from https://github.com/mlcommons/training_results_v1.1/blob/main/NVIDIA/benchmarks/bert/implementations/pytorch/padding.py
51
+ class IndexFirstAxis(torch.autograd.Function):
52
+ @staticmethod
53
+ def forward(ctx, input, indices):
54
+ ctx.save_for_backward(indices)
55
+ assert input.ndim >= 2
56
+ ctx.first_axis_dim, other_shape = input.shape[0], input.shape[1:]
57
+ second_dim = other_shape.numel()
58
+ # TD [2022-03-04] For some reason torch.gather is a bit faster than indexing.
59
+ # return input[indices]
60
+ # return torch.gather(
61
+ # rearrange(input, "b ... -> b (...)"), 0, repeat(indices, "z -> z d", d=second_dim)
62
+ # ).reshape(-1, *other_shape)
63
+ return torch.gather(
64
+ input.view(ctx.first_axis_dim, second_dim),
65
+ 0,
66
+ indices.unsqueeze(-1).expand(indices.size(0), second_dim)
67
+ ).reshape(-1, *other_shape)
68
+
69
+ @staticmethod
70
+ def backward(ctx, grad_output):
71
+ (indices,) = ctx.saved_tensors
72
+ assert grad_output.ndim >= 2
73
+ other_shape = grad_output.shape[1:]
74
+ # grad_output = rearrange(grad_output, "b ... -> b (...)")
75
+ grad_output = grad_output.view(grad_output.size(0), other_shape.numel())
76
+ grad_input = torch.zeros(
77
+ [ctx.first_axis_dim, grad_output.shape[1]],
78
+ device=grad_output.device,
79
+ dtype=grad_output.dtype,
80
+ )
81
+ # TD [2022-03-04] For some reason torch.scatter is a bit faster than indexing.
82
+ # grad_input[indices] = grad_output
83
+ # grad_input.scatter_(0, repeat(indices, "z -> z d", d=grad_output.shape[1]), grad_output)
84
+ grad_input.scatter_(
85
+ 0, indices.unsqueeze(-1).expand(indices.size(0), grad_output.size(1)), grad_output
86
+ )
87
+ return grad_input.reshape(ctx.first_axis_dim, *other_shape), None
88
+
89
+
90
+ index_first_axis = IndexFirstAxis.apply
91
+
92
+
93
+ def unpad_input(hidden_states, attention_mask=None, indices=None):
94
+ """
95
+ Arguments:
96
+ hidden_states: (batch, seqlen, ...)
97
+ attention_mask: (batch, seqlen), bool / int, 1 means valid and 0 means not valid.
98
+ indices: (total_nnz), the indices of non-masked tokens from the flattened input sequence.
99
+ Return:
100
+ hidden_states: (total_nnz, ...), where total_nnz = number of tokens in selected in attention_mask.
101
+ """
102
+ if indices is None:
103
+ assert attention_mask is not None
104
+ indices = torch.nonzero(attention_mask.flatten(), as_tuple=False).flatten()
105
+
106
+ # TD [2022-03-04] We don't want to index with a bool mask, because Pytorch will expand the
107
+ # bool mask, then call nonzero to get the indices, then index with those. The indices is @dim
108
+ # times larger than it needs to be, wasting memory. It's faster and more memory-efficient to
109
+ # index with integer indices. Moreover, torch's index is a bit slower than it needs to be,
110
+ # so we write custom forward and backward to make it a bit faster.
111
+ hidden_states = hidden_states.view(-1, *hidden_states.shape[2:])
112
+ return index_first_axis(hidden_states, indices)
113
+
114
+
115
+ class IndexPutFirstAxis(torch.autograd.Function):
116
+ @staticmethod
117
+ def forward(
118
+ ctx,
119
+ values: torch.Tensor,
120
+ indices: torch.Tensor,
121
+ first_axis_dim
122
+ ) -> torch.Tensor:
123
+ ctx.save_for_backward(indices)
124
+ assert indices.ndim == 1
125
+ assert values.ndim >= 2
126
+ output = torch.zeros(
127
+ first_axis_dim, *values.shape[1:], device=values.device, dtype=values.dtype
128
+ )
129
+ output[indices] = values
130
+ return output
131
+
132
+ @staticmethod
133
+ def backward(ctx, grad_output: torch.Tensor) -> Tuple[torch.Tensor, None, None]:
134
+ indices, = ctx.saved_tensors
135
+ grad_values = grad_output[indices]
136
+ return grad_values, None, None
137
+
138
+
139
+ index_put_first_axis = IndexPutFirstAxis.apply
140
+
141
+
142
+ def pad_input(inputs: torch.Tensor, indices: torch.Tensor, batch: int, seqlen: int) -> torch.Tensor:
143
+ """Add padding to sequences.
144
+
145
+ Arguments:
146
+ inputs: (total_nnz, ...), where total_nnz = number of tokens in selected in attention_mask.
147
+ indices: (total_nnz), `indices = torch.nonzero(attention_mask.flatten(), as_tuple=False).flatten()`
148
+ batch: int batch_size
149
+ seqlen: int max sequence length
150
+
151
+ Returns:
152
+ inputs: (batch, seqlen, ...)
153
+ """
154
+ output = index_put_first_axis(inputs, indices, batch * seqlen)
155
+ return output.view(batch, seqlen, *inputs.shape[1:])
156
+
157
+
158
+ def rotate_half(x):
159
+ """Rotates half the hidden dims of the input."""
160
+ x1 = x[..., : x.shape[-1] // 2]
161
+ x2 = x[..., x.shape[-1] // 2 :]
162
+ return torch.cat((-x2, x1), dim=-1)
163
+
164
+
165
+ def apply_rotary_pos_emb(q, k, cos, sin):
166
+ """Applies Rotary Position Embedding to the query and key tensors.
167
+
168
+ Args:
169
+ q (`torch.Tensor`): The query tensor.
170
+ k (`torch.Tensor`): The key tensor.
171
+ cos (`torch.Tensor`): The cosine part of the rotary embedding.
172
+ sin (`torch.Tensor`): The sine part of the rotary embedding.
173
+ Returns:
174
+ `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
175
+ """
176
+ cos, sin = cos.to(q.dtype), sin.to(q.dtype)
177
+ q_embed = (q * cos) + (rotate_half(q) * sin)
178
+ k_embed = (k * cos) + (rotate_half(k) * sin)
179
+ return q_embed, k_embed
180
+
181
+
182
+ class RotaryEmbedding(torch.nn.Module):
183
+ def __init__(self, dim, max_position_embeddings=512, base=10000.0, device=None):
184
+ super().__init__()
185
+
186
+ self.dim = dim
187
+ self.max_position_embeddings = max_position_embeddings
188
+ self.base = base
189
+ inv_freq = 1.0 / (self.base ** (torch.arange(0, self.dim, 2).float().to(device) / self.dim))
190
+ self.register_buffer("inv_freq", inv_freq, persistent=False)
191
+
192
+ # Build here to make `torch.jit.trace` work.
193
+ self._set_cos_sin_cache(
194
+ seq_len=max_position_embeddings, device=self.inv_freq.device, dtype=torch.get_default_dtype()
195
+ )
196
+
197
+ def _set_cos_sin_cache(self, seq_len, device, dtype):
198
+ self.max_seq_len_cached = seq_len
199
+ t = torch.arange(self.max_seq_len_cached, device=device, dtype=torch.float32)
200
+
201
+ freqs = torch.einsum("i,j->ij", t, self.inv_freq)
202
+ # Different from paper, but it uses a different permutation in order to obtain the same calculation
203
+ emb = torch.cat((freqs, freqs), dim=-1)
204
+ self.register_buffer("cos_cached", emb.cos().to(dtype), persistent=False)
205
+ self.register_buffer("sin_cached", emb.sin().to(dtype), persistent=False)
206
+
207
+ def forward(self, x, seq_len=None):
208
+ # x: [bs, num_attention_heads, seq_len, head_size]
209
+ if seq_len > self.max_seq_len_cached:
210
+ self._set_cos_sin_cache(seq_len=seq_len, device=x.device, dtype=x.dtype)
211
+
212
+ return (
213
+ self.cos_cached[:seq_len, ...].to(dtype=x.dtype),
214
+ self.sin_cached[:seq_len, ...].to(dtype=x.dtype),
215
+ )
216
+
217
+
218
+ class NTKScalingRotaryEmbedding(RotaryEmbedding):
219
+ """RotaryEmbedding extended with fixed and mixed NTK scaling. https://kexue.fm/archives/9706 """
220
+
221
+ def __init__(self, dim, max_position_embeddings=512, base=10000, device=None, scaling_factor=1.0, mixed_b=None):
222
+ self.scaling_factor = scaling_factor
223
+ self.mixed_b = mixed_b
224
+ super().__init__(dim, max_position_embeddings, base, device)
225
+ max_position_embeddings = max_position_embeddings * self.scaling_factor
226
+ self._set_cos_sin_cache(max_position_embeddings, self.inv_freq.device, torch.get_default_dtype())
227
+
228
+ def _set_cos_sin_cache(self, seq_len, device, dtype):
229
+ self.max_seq_len_cached = seq_len
230
+
231
+ if seq_len > self.max_position_embeddings:
232
+ base = self.base * (self.scaling_factor if self.mixed_b is None else 1)
233
+ inv_freq = 1.0 / (base ** (torch.arange(0, self.dim, 2).float().to(device) / self.dim))
234
+
235
+ if self.mixed_b is None:
236
+ inv_freq = inv_freq / self.scaling_factor ** (2 / self.dim) # (6)
237
+ else:
238
+ a = torch.tensor(self.scaling_factor).log() / (self.dim / 2) ** self.mixed_b # (13)
239
+ lambda_1_m = (a * torch.arange(1, self.dim // 2 + 1).float().to(device) ** self.mixed_b).exp() # (12)
240
+ inv_freq = inv_freq / lambda_1_m # (10)
241
+
242
+ self.register_buffer("inv_freq", inv_freq, persistent=False)
243
+
244
+ t = torch.arange(self.max_seq_len_cached, device=device, dtype=torch.float32)
245
+
246
+ freqs = torch.einsum("i,j->ij", t, self.inv_freq)
247
+ # Different from paper, but it uses a different permutation in order to obtain the same calculation
248
+ emb = torch.cat((freqs, freqs), dim=-1)
249
+ self.register_buffer("cos_cached", emb.cos().to(dtype), persistent=False)
250
+ self.register_buffer("sin_cached", emb.sin().to(dtype), persistent=False)
251
+
252
+
253
+ class RMSNorm(nn.Module):
254
+ def __init__(self, hidden_size, eps=1e-6):
255
+ """
256
+ RMSNorm is equivalent to T5LayerNorm
257
+ """
258
+ super().__init__()
259
+ self.weight = nn.Parameter(torch.ones(hidden_size))
260
+ self.variance_epsilon = eps
261
+
262
+ def forward(self, hidden_states):
263
+ input_dtype = hidden_states.dtype
264
+ hidden_states = hidden_states.to(torch.float32)
265
+ variance = hidden_states.pow(2).mean(-1, keepdim=True)
266
+ hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
267
+ return self.weight * hidden_states.to(input_dtype)
268
+
269
+
270
+ LAYER_NORM = {
271
+ 'layer_norm': nn.LayerNorm,
272
+ 'rms_norm': RMSNorm
273
+ }
274
+
275
+
276
+ class NewEmbeddings(nn.Module):
277
+ """
278
+ Embedding and Unpadding.
279
+ """
280
+
281
+ def __init__(self, config: NewConfig):
282
+ super().__init__()
283
+ self.padding_idx = config.pad_token_id
284
+ self.word_embeddings = nn.Embedding(
285
+ config.vocab_size, config.hidden_size, padding_idx=self.padding_idx
286
+ )
287
+
288
+ self.position_embedding_type = config.position_embedding_type
289
+ if self.position_embedding_type == 'absolute':
290
+ self.position_embeddings = nn.Embedding(
291
+ config.max_position_embeddings, config.hidden_size, padding_idx=self.padding_idx
292
+ )
293
+ elif self.position_embedding_type == 'rope':
294
+ self._init_rope(config)
295
+ else:
296
+ raise ValueError
297
+
298
+ self.type_vocab_size = config.type_vocab_size
299
+ if self.type_vocab_size > 0:
300
+ self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
301
+
302
+ # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
303
+ # any TensorFlow checkpoint file
304
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
305
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
306
+ # position_ids is contiguous in memory and excluded when serialized
307
+ self.register_buffer(
308
+ "position_ids", torch.arange(config.max_position_embeddings), persistent=False
309
+ )
310
+
311
+ def _init_rope(self, config):
312
+ kwargs = dict(
313
+ dim=int(config.hidden_size / config.num_attention_heads),
314
+ max_position_embeddings=config.max_position_embeddings,
315
+ base=config.rope_theta
316
+ )
317
+ if config.rope_scaling is None:
318
+ self.rotary_emb = RotaryEmbedding(**kwargs)
319
+ else:
320
+ kwargs.update(scaling_factor=config.rope_scaling["factor"])
321
+ scaling_type = config.rope_scaling["type"]
322
+ if scaling_type == 'ntk':
323
+ kwargs.update(mixed_b=config.rope_scaling.get('mixed_b', None))
324
+ self.rotary_emb = NTKScalingRotaryEmbedding(**kwargs)
325
+ # elif scaling_type == "linear":
326
+ # self.rotary_emb = LinearScalingRotaryEmbedding(**kwargs)
327
+ # elif scaling_type == "dynamic":
328
+ # self.rotary_emb = DynamicNTKScalingRotaryEmbedding(**kwargs)
329
+ else:
330
+ raise ValueError(f"Unknown RoPE scaling type {scaling_type}")
331
+
332
+ def forward(
333
+ self,
334
+ unpad_inputs: bool,
335
+ input_ids: Optional[torch.Tensor] = None,
336
+ attention_mask: Optional[torch.Tensor] = None,
337
+ length: Optional[List[int]] = None,
338
+ token_type_ids: Optional[torch.Tensor] = None,
339
+ position_ids: Optional[torch.Tensor] = None,
340
+ inputs_embeds: Optional[torch.Tensor] = None,
341
+ ) -> Tuple[torch.Tensor, torch.Tensor, Optional[Tuple], Optional[List[int]]]:
342
+ """
343
+ """
344
+ if inputs_embeds is None:
345
+ device, input_shape = input_ids.device, input_ids.shape
346
+ else:
347
+ device, input_shape = inputs_embeds.device, inputs_embeds.shape[:2]
348
+ batch_size, seq_length = input_shape
349
+
350
+ # Set attention_mask if it's None
351
+ if attention_mask is None:
352
+ attention_mask = torch.ones(input_shape, device=device)
353
+ if length is not None:
354
+ for i, l in enumerate(length):
355
+ attention_mask[i, l:] = 0
356
+
357
+ # Set attention_mask_bool for unpadding
358
+ unpad_inputs = False
359
+ if unpad_inputs:
360
+ attention_mask_bool = attention_mask.bool()
361
+ if length is None:
362
+ length = attention_mask.sum(-1).tolist()
363
+
364
+ # Get word embeddings
365
+ if inputs_embeds is None:
366
+ if unpad_inputs:
367
+ input_ids = input_ids[attention_mask_bool].unsqueeze(0)
368
+ inputs_embeds = self.word_embeddings(input_ids)
369
+ else:
370
+ if unpad_inputs:
371
+ inputs_embeds = inputs_embeds[attention_mask_bool].unsqueeze(0)
372
+ embeddings = inputs_embeds
373
+
374
+ # Set and unpad position_ids
375
+ if position_ids is None:
376
+ if seq_length > self.position_ids.size(0):
377
+ self.register_buffer(
378
+ "position_ids", torch.arange(seq_length), persistent=False
379
+ )
380
+ if unpad_inputs:
381
+ # [1, cumsum_seq_len]
382
+ position_ids = torch.cat([self.position_ids[:l] for l in length]).unsqueeze(0)
383
+ else:
384
+ # [bs, seq_len]
385
+ position_ids = self.position_ids[:seq_length].expand(batch_size, -1)
386
+ elif unpad_inputs:
387
+ position_ids = position_ids[attention_mask_bool].unsqueeze(0) # [1, cumsum_seq_len]
388
+
389
+ # Compute rotary embedding
390
+ if self.position_embedding_type == 'rope':
391
+ rope_cos, rope_sin = self.rotary_emb(inputs_embeds, seq_len=seq_length)
392
+ rope_cos = rope_cos[position_ids].unsqueeze(2) # [bs, seq_len, 1, dim]
393
+ rope_sin = rope_sin[position_ids].unsqueeze(2) # [bs, seq_len, 1, dim]
394
+ rope_embeds = rope_cos, rope_sin
395
+ else:
396
+ rope_embeds = None
397
+
398
+ if self.type_vocab_size > 0:
399
+ if token_type_ids is None:
400
+ token_type_ids = position_ids.mul(0)
401
+ elif unpad_inputs:
402
+ token_type_ids = token_type_ids[attention_mask_bool].unsqueeze(0)
403
+
404
+ token_type_embeddings = self.token_type_embeddings(token_type_ids)
405
+ embeddings += token_type_embeddings
406
+
407
+ # BERT position
408
+ if self.position_embedding_type == "absolute":
409
+ position_embeddings = self.position_embeddings(position_ids)
410
+ embeddings += position_embeddings
411
+
412
+ embeddings = self.LayerNorm(embeddings)
413
+ embeddings = self.dropout(embeddings)
414
+
415
+ return embeddings, attention_mask, rope_embeds, length
416
+
417
+
418
+ class NewAttention(nn.Module):
419
+ def __init__(self, config: NewConfig, pack_qkv=None, use_memory_efficient_attention=None):
420
+ super().__init__()
421
+ self.config = config
422
+ if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
423
+ raise ValueError(
424
+ f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
425
+ f"heads ({config.num_attention_heads})"
426
+ )
427
+
428
+ self.hidden_size = config.hidden_size
429
+ self.num_attention_heads = config.num_attention_heads
430
+ self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
431
+ self.all_head_size = self.num_attention_heads * self.attention_head_size
432
+
433
+ if pack_qkv is None:
434
+ pack_qkv = config.pack_qkv
435
+ self.pack_qkv = pack_qkv
436
+
437
+ if self.pack_qkv:
438
+ self.qkv_proj = nn.Linear(config.hidden_size, self.all_head_size * 3, bias=True)
439
+ else:
440
+ self.q_proj = nn.Linear(config.hidden_size, self.all_head_size, bias=True)
441
+ self.k_proj = nn.Linear(config.hidden_size, self.all_head_size, bias=True)
442
+ self.v_proj = nn.Linear(config.hidden_size, self.all_head_size, bias=True)
443
+
444
+ self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
445
+ self.o_proj = nn.Linear(config.hidden_size, config.hidden_size, bias=True)
446
+ use_memory_efficient_attention = False
447
+ if use_memory_efficient_attention is None:
448
+ use_memory_efficient_attention = self.config.use_memory_efficient_attention
449
+ self.use_memory_efficient_attention = use_memory_efficient_attention
450
+ self.memory_efficient_attention = None if xops is None else xops.memory_efficient_attention
451
+ if self.use_memory_efficient_attention:
452
+ assert self.memory_efficient_attention is not None, 'please install xformers'
453
+ if self.config.unpad_inputs:
454
+ assert self.config.use_memory_efficient_attention, 'unpad only with xformers'
455
+
456
+ def forward(
457
+ self,
458
+ hidden_states: torch.Tensor,
459
+ attention_bias: torch.FloatTensor,
460
+ rope_embeds: Optional[Tuple[torch.FloatTensor, torch.FloatTensor]] = None,
461
+ attention_scale: Optional[torch.FloatTensor] = None,
462
+ head_mask: Optional[torch.FloatTensor] = None,
463
+ output_attentions: Optional[bool] = False,
464
+ qkv_inputs: Optional[Tuple] = None, # For RetroMAE
465
+ padding_inputs: Optional[Tuple] = None, # indices, batch, seqlen
466
+ ) -> Tuple[torch.Tensor, ...]:
467
+ shape_hd = (self.num_attention_heads, self.attention_head_size)
468
+ # qkv
469
+ if self.pack_qkv and qkv_inputs is None:
470
+ qkv_pack = self.qkv_proj(hidden_states).split(self.all_head_size, dim=-1)
471
+ else:
472
+ if qkv_inputs is None:
473
+ qkv_inputs = (hidden_states, hidden_states, hidden_states)
474
+ qkv_pack = [
475
+ getattr(self, n + '_proj')(s) for s, n in zip(qkv_inputs, 'qkv')
476
+ ]
477
+ query_states, key_states, value_states = [t.view(t.shape[:-1] + shape_hd) for t in qkv_pack]
478
+
479
+ if self.config.position_embedding_type == 'rope':
480
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, *rope_embeds)
481
+
482
+ dtype = query_states.dtype
483
+
484
+ if self.config.logn_attention_scale and attention_scale is not None:
485
+ # https://kexue.fm/archives/8823
486
+ query_states = query_states * attention_scale.to(dtype)
487
+
488
+ if padding_inputs is not None:
489
+ query_states = pad_input(query_states.squeeze(), *padding_inputs)
490
+ key_states = pad_input(key_states.squeeze(), *padding_inputs)
491
+ value_states = pad_input(value_states.squeeze(), *padding_inputs)
492
+
493
+ if self.use_memory_efficient_attention:
494
+ assert self.memory_efficient_attention is not None, "xformers is not loaded"
495
+ assert output_attentions is False, "memory_efficient_attention do not output attentions"
496
+ assert head_mask is None, "Not support yet"
497
+ attention_probs = None
498
+ if torch.is_tensor(attention_bias):
499
+ attention_bias = attention_bias.to(dtype)
500
+ context_layer = self.memory_efficient_attention(
501
+ query_states,
502
+ key_states,
503
+ value_states,
504
+ attn_bias=attention_bias,
505
+ p=self.dropout.p
506
+ )
507
+ else:
508
+ context_layer = self._attention(query_states, key_states, value_states, attention_bias, head_mask)
509
+
510
+ if padding_inputs is not None:
511
+ context_layer = unpad_input(context_layer, indices=padding_inputs[0])
512
+
513
+ new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
514
+ context_layer = context_layer.view(new_context_layer_shape)
515
+
516
+ # output proj
517
+ attn_output = self.o_proj(context_layer)
518
+
519
+ # add attentions if we output them
520
+ outputs = (attn_output, attention_probs) if output_attentions else (attn_output,)
521
+ return outputs
522
+
523
+ def _attention(self, query_states, key_states, value_states, attention_bias, head_mask):
524
+ """
525
+ Args:
526
+ q/k/v: (B, L, n_head, head_dim),
527
+ Returns:
528
+ attn_output: (B L, n_head, head_dim)
529
+ """
530
+ query_states = query_states.transpose(1, 2)
531
+ key_states = key_states.transpose(1, 2)
532
+ value_states = value_states.transpose(1, 2)
533
+ # Take the dot product between "query" and "key" to get the raw attention scores.
534
+ attention_scores = torch.matmul(query_states, key_states.transpose(-1, -2))
535
+
536
+ attention_scores = attention_scores / math.sqrt(self.attention_head_size)
537
+ if attention_bias is not None:
538
+ # Apply the attention mask is (precomputed for all layers in BertModel forward() function)
539
+ attention_scores = attention_scores + attention_bias
540
+
541
+ # Normalize the attention scores to probabilities.
542
+ attention_probs = nn.functional.softmax(attention_scores, dim=-1)
543
+
544
+ # This is actually dropping out entire tokens to attend to, which might
545
+ # seem a bit unusual, but is taken from the original Transformer paper.
546
+ attention_probs = self.dropout(attention_probs)
547
+
548
+ # Mask heads if we want to
549
+ if head_mask is not None:
550
+ attention_probs = attention_probs * head_mask
551
+
552
+ context_layer = torch.matmul(attention_probs, value_states)
553
+
554
+ context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
555
+ return context_layer
556
+
557
+
558
+ class NewSdpaAttention(NewAttention):
559
+ """
560
+ New attention module using torch.nn.functional.scaled_dot_product_attention. This module inherits from
561
+ `NewAttention` as the weights of the module stays untouched. The only changes are on the forward pass to adapt to
562
+ SDPA API.
563
+ """
564
+ def __init__(self, config: NewConfig, **kwargs):
565
+ super().__init__(config, **kwargs)
566
+ torch.backends.cuda.enable_mem_efficient_sdp(False)
567
+ logger.warning(
568
+ "Disable memory efficient attention kernel for `NewSdpaAttention`, you can set "
569
+ "`use_memory_efficient_attention=True` if it expected to use."
570
+ )
571
+
572
+ def _attention(self, query_states, key_states, value_states, attention_bias, head_mask):
573
+ attn_output = torch.nn.functional.scaled_dot_product_attention(
574
+ query_states.transpose(1, 2),
575
+ key_states.transpose(1, 2),
576
+ value_states.transpose(1, 2),
577
+ attn_mask=attention_bias,
578
+ dropout_p=self.dropout.p if self.training else 0.0,
579
+ )
580
+ attn_output = attn_output.permute(0, 2, 1, 3).contiguous()
581
+ return attn_output
582
+
583
+
584
+ NEW_ATTENTION_CLASSES = {
585
+ "eager": NewAttention,
586
+ # "flash_attention_2": , # TODO: xformers will dispatch to flash_attn
587
+ "sdpa": NewSdpaAttention,
588
+ }
589
+
590
+
591
+ class NewGatedMLP(nn.Module):
592
+ """
593
+ GLU Variants Improve Transformer.
594
+ """
595
+
596
+ def __init__(self, config: NewConfig):
597
+ super().__init__()
598
+ self.intermediate_size = config.intermediate_size
599
+ self.up_gate_proj = nn.Linear(config.hidden_size, self.intermediate_size * 2, bias=False)
600
+ self.down_proj = nn.Linear(self.intermediate_size, config.hidden_size, bias=True)
601
+ self.act_fn = ACT2FN[config.hidden_act]
602
+ if config.hidden_dropout_prob > 0:
603
+ self.hidden_dropout = nn.Dropout(config.hidden_dropout_prob)
604
+ else:
605
+ self.hidden_dropout = None
606
+
607
+ def forward(self, hidden_states):
608
+ up_gate = self.up_gate_proj(hidden_states)
609
+ up_states, gate = torch.split(up_gate, self.intermediate_size, dim=-1)
610
+ gate = self.act_fn(gate)
611
+ gated_states = gate * up_states
612
+ if self.hidden_dropout is not None:
613
+ gated_states = self.hidden_dropout(gated_states)
614
+ down_states = self.down_proj(gated_states)
615
+ return down_states
616
+
617
+
618
+ class NewLayer(nn.Module):
619
+ def __init__(
620
+ self,
621
+ config: NewConfig,
622
+ pack_qkv=None,
623
+ use_memory_efficient_attention=None,
624
+ attn_implementation=None
625
+ ):
626
+ super().__init__()
627
+ if attn_implementation is None:
628
+ attn_implementation = config._attn_implementation
629
+ if attn_implementation != 'eager':
630
+ use_memory_efficient_attention = False
631
+ self.attention = NEW_ATTENTION_CLASSES[attn_implementation](
632
+ config, pack_qkv=pack_qkv, use_memory_efficient_attention=use_memory_efficient_attention
633
+ )
634
+ self.mlp = NewGatedMLP(config)
635
+
636
+ ln_class = LAYER_NORM[config.layer_norm_type]
637
+ self.attn_ln = ln_class(config.hidden_size, eps=config.layer_norm_eps)
638
+ self.mlp_ln = ln_class(config.hidden_size, eps=config.layer_norm_eps)
639
+
640
+ if config.hidden_dropout_prob > 0:
641
+ self.hidden_dropout = nn.Dropout(config.hidden_dropout_prob)
642
+ else:
643
+ self.hidden_dropout = None
644
+
645
+ def forward(
646
+ self,
647
+ hidden_states: torch.Tensor,
648
+ attention_bias: torch.FloatTensor,
649
+ rope_embeds: Optional[Tuple[torch.FloatTensor, torch.FloatTensor]] = None,
650
+ attention_scale: Optional[torch.FloatTensor] = None,
651
+ subset_indices: Optional[torch.LongTensor] = None,
652
+ head_mask: Optional[torch.FloatTensor] = None,
653
+ output_attentions: Optional[bool] = False,
654
+ qkv_inputs: Optional[Tuple] = None, # For RetroMAE
655
+ padding_inputs: Optional[Tuple] = None,
656
+ ) -> Tuple[torch.Tensor, ...]:
657
+ # Multi head self attention
658
+ residual = hidden_states if qkv_inputs is None else qkv_inputs[0]
659
+ attention_outputs = self.attention(
660
+ hidden_states,
661
+ attention_bias,
662
+ rope_embeds,
663
+ attention_scale,
664
+ head_mask,
665
+ output_attentions=output_attentions,
666
+ qkv_inputs=qkv_inputs,
667
+ padding_inputs=padding_inputs,
668
+ )
669
+ hidden_states = attention_outputs[0]
670
+ if self.hidden_dropout is not None:
671
+ hidden_states = self.hidden_dropout(hidden_states)
672
+ hidden_states = residual + hidden_states
673
+
674
+ # In pretraining, after the attention of last layer, we only need the masked tokens.
675
+ if subset_indices is not None:
676
+ hidden_states = hidden_states[subset_indices]
677
+
678
+ hidden_states = self.attn_ln(hidden_states)
679
+
680
+ # Fully Connected
681
+ residual = hidden_states
682
+ hidden_states = self.mlp(hidden_states)
683
+ if self.hidden_dropout is not None:
684
+ hidden_states = self.hidden_dropout(hidden_states)
685
+ hidden_states = residual + hidden_states
686
+ hidden_states = self.mlp_ln(hidden_states)
687
+
688
+ # add self attentions if we output attention weights
689
+ outputs = (hidden_states,) + attention_outputs[1:]
690
+ return outputs
691
+
692
+
693
+ class NewEncoder(nn.Module):
694
+ def __init__(self, config):
695
+ super().__init__()
696
+ self.config = config
697
+ self.layer = nn.ModuleList([NewLayer(config) for _ in range(config.num_hidden_layers)])
698
+ self.gradient_checkpointing = False
699
+
700
+ def forward(
701
+ self,
702
+ hidden_states: torch.Tensor,
703
+ attention_bias: Optional[torch.FloatTensor] = None,
704
+ rope_embeds: Optional[Tuple[torch.FloatTensor, torch.FloatTensor]] = None,
705
+ attention_scale: Optional[torch.FloatTensor] = None,
706
+ subset_indices: Optional[torch.LongTensor] = None,
707
+ head_mask: Optional[torch.FloatTensor] = None,
708
+ output_attentions: Optional[bool] = False,
709
+ output_hidden_states: Optional[bool] = False,
710
+ return_dict: Optional[bool] = True,
711
+ ) -> Union[Tuple[torch.Tensor], BaseModelOutput]:
712
+ all_hidden_states = () if output_hidden_states else None
713
+ all_self_attentions = () if output_attentions else None
714
+
715
+ for i, layer_module in enumerate(self.layer):
716
+ if output_hidden_states:
717
+ all_hidden_states = all_hidden_states + (hidden_states,)
718
+
719
+ if i >= len(self.layer) - 1:
720
+ layer_subset_indices = subset_indices
721
+ else:
722
+ layer_subset_indices = None
723
+
724
+ layer_head_mask = head_mask[i] if head_mask is not None else None
725
+
726
+ if self.gradient_checkpointing and self.training:
727
+ layer_outputs = self._gradient_checkpointing_func(
728
+ layer_module.__call__,
729
+ hidden_states,
730
+ attention_bias,
731
+ rope_embeds,
732
+ attention_scale,
733
+ layer_subset_indices,
734
+ layer_head_mask,
735
+ )
736
+ else:
737
+ layer_outputs = layer_module(
738
+ hidden_states,
739
+ attention_bias,
740
+ rope_embeds,
741
+ attention_scale,
742
+ layer_subset_indices,
743
+ layer_head_mask,
744
+ output_attentions,
745
+ )
746
+
747
+ hidden_states = layer_outputs[0]
748
+ if output_attentions:
749
+ all_self_attentions = all_self_attentions + (layer_outputs[1],)
750
+
751
+ if output_hidden_states:
752
+ all_hidden_states = all_hidden_states + (hidden_states,)
753
+
754
+ if not return_dict:
755
+ return tuple(
756
+ v
757
+ for v in [
758
+ hidden_states,
759
+ all_hidden_states,
760
+ all_self_attentions,
761
+ ]
762
+ if v is not None
763
+ )
764
+ return BaseModelOutput(
765
+ last_hidden_state=hidden_states,
766
+ hidden_states=all_hidden_states,
767
+ attentions=all_self_attentions,
768
+ )
769
+
770
+
771
+ # Copied from transformers.models.bert.modeling_bert.BertPooler with Bert->New
772
+ class NewPooler(nn.Module):
773
+ def __init__(self, config):
774
+ super().__init__()
775
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
776
+ self.activation = nn.Tanh()
777
+
778
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
779
+ # We "pool" the model by simply taking the hidden state corresponding
780
+ # to the first token.
781
+ first_token_tensor = hidden_states[:, 0]
782
+ pooled_output = self.dense(first_token_tensor)
783
+ pooled_output = self.activation(pooled_output)
784
+ return pooled_output
785
+
786
+
787
+ class NewPreTrainedModel(PreTrainedModel):
788
+ """
789
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
790
+ models.
791
+ """
792
+
793
+ config_class = NewConfig
794
+ base_model_prefix = "new"
795
+ supports_gradient_checkpointing = True
796
+
797
+ def _init_weights(self, module):
798
+ """Initialize the weights"""
799
+ if isinstance(module, nn.Linear):
800
+ # Slightly different from the TF version which uses truncated_normal for initialization
801
+ # cf https://github.com/pytorch/pytorch/pull/5617
802
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
803
+ if module.bias is not None:
804
+ module.bias.data.zero_()
805
+ elif isinstance(module, nn.Embedding):
806
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
807
+ if module.padding_idx is not None:
808
+ module.weight.data[module.padding_idx].zero_()
809
+ elif isinstance(module, nn.LayerNorm):
810
+ module.bias.data.zero_()
811
+ module.weight.data.fill_(1.0)
812
+
813
+
814
+ class NewModel(NewPreTrainedModel):
815
+ """
816
+ The bare New Model transformer outputting raw hidden-states without any specific head on top.
817
+ """
818
+
819
+ def __init__(self, config: NewConfig, add_pooling_layer=False):
820
+ super().__init__(config)
821
+ self.config = config
822
+
823
+ self.embeddings = NewEmbeddings(config)
824
+ self.encoder = NewEncoder(config)
825
+
826
+ self.pooler = NewPooler(config) if add_pooling_layer else None
827
+
828
+ # Initialize weights and apply final processing
829
+ self.post_init()
830
+
831
+ def get_input_embeddings(self):
832
+ return self.embeddings.word_embeddings
833
+
834
+ def set_input_embeddings(self, value):
835
+ self.embeddings.word_embeddings = value
836
+
837
+ def forward(
838
+ self,
839
+ input_ids: Optional[torch.Tensor] = None,
840
+ attention_mask: Optional[torch.Tensor] = None,
841
+ length: Optional[List[int]] = None,
842
+ subset_indices: Optional[torch.LongTensor] = None,
843
+ token_type_ids: Optional[torch.Tensor] = None,
844
+ position_ids: Optional[torch.Tensor] = None,
845
+ head_mask: Optional[torch.Tensor] = None,
846
+ inputs_embeds: Optional[torch.Tensor] = None,
847
+ output_attentions: Optional[bool] = None,
848
+ output_hidden_states: Optional[bool] = None,
849
+ return_dict: Optional[bool] = None,
850
+ unpad_inputs: Optional[bool] = None,
851
+ ) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPooling]:
852
+ r"""
853
+ length (`list` of length `batch_size`, *optional*):
854
+ If is `None`, return padded `last_hidden_state`.
855
+ subset_indices ():
856
+ pass
857
+ unpad_inputs (`bool`, *optional*):
858
+ pass
859
+ """
860
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
861
+ output_hidden_states = (
862
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
863
+ )
864
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
865
+ # unpad_inputs = unpad_inputs if unpad_inputs is not None else self.config.unpad_inputs
866
+ unpad_inputs = False
867
+ output_padded = length is None
868
+
869
+ if input_ids is not None and inputs_embeds is not None:
870
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
871
+ elif input_ids is not None:
872
+ self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
873
+ input_shape = input_ids.size()
874
+ elif inputs_embeds is not None:
875
+ input_shape = inputs_embeds.size()[:-1]
876
+ else:
877
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
878
+
879
+ # TODO: not used
880
+ # # Prepare head mask if needed
881
+ # # 1.0 in head_mask indicate we keep the head
882
+ # # attention_probs has shape bsz x n_heads x N x N
883
+ # # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
884
+ # # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
885
+ # head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
886
+
887
+ # Get embeddings, may unpad them
888
+ (embedding_output, attention_mask, rope_embeds, length) = self.embeddings(
889
+ unpad_inputs,
890
+ input_ids=input_ids,
891
+ attention_mask=attention_mask,
892
+ length=length,
893
+ token_type_ids=token_type_ids,
894
+ position_ids=position_ids,
895
+ inputs_embeds=inputs_embeds
896
+ )
897
+
898
+ batch_size, seq_length = input_shape
899
+
900
+ if unpad_inputs:
901
+ assert self.config.use_memory_efficient_attention
902
+ attention_bias = xops.fmha.attn_bias.BlockDiagonalMask.from_seqlens(length)
903
+ else:
904
+ # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
905
+ # ourselves in which case we just need to make it broadcastable to all heads.
906
+ attention_bias = self.get_extended_attention_mask(attention_mask, input_shape)
907
+ if self.config.use_memory_efficient_attention:
908
+ # Invalid shape for attention bias: torch.Size([48, 1, 1, 512]) (expected (48, 12, 512, 512))
909
+ attention_bias = attention_bias.expand(-1, self.config.num_attention_heads, seq_length, -1)
910
+
911
+ if self.config.logn_attention_scale:
912
+ # attention scale log_512(input_len)
913
+ attention_scale = attention_mask.sum(1).log() / torch.tensor(self.config.max_position_embeddings).log()
914
+ # inference-time logn scale need clip 1
915
+ if self.config.logn_attention_clip1:
916
+ attention_scale.clip_(1)
917
+ attention_scale = attention_scale[:, None, None, None]
918
+ else:
919
+ attention_scale = None
920
+
921
+ encoder_outputs = self.encoder(
922
+ embedding_output,
923
+ attention_bias=attention_bias,
924
+ rope_embeds=rope_embeds,
925
+ attention_scale=attention_scale,
926
+ subset_indices=subset_indices,
927
+ head_mask=head_mask,
928
+ output_attentions=output_attentions,
929
+ output_hidden_states=output_hidden_states,
930
+ return_dict=return_dict,
931
+ )
932
+ sequence_output = encoder_outputs[0]
933
+ if unpad_inputs and output_padded:
934
+ indices = torch.nonzero(attention_mask.flatten(), as_tuple=False).flatten()
935
+ sequence_output = pad_input(
936
+ sequence_output.squeeze(), indices, batch_size, seq_length
937
+ )
938
+
939
+ pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
940
+
941
+ if not return_dict:
942
+ return (sequence_output, pooled_output) + encoder_outputs[1:]
943
+
944
+ return BaseModelOutputWithPooling(
945
+ last_hidden_state=sequence_output,
946
+ pooler_output=pooled_output,
947
+ hidden_states=encoder_outputs.hidden_states,
948
+ attentions=encoder_outputs.attentions,
949
+ )
950
+
951
+
952
+ class NewLMPredictionHead(nn.Module):
953
+ def __init__(self, config):
954
+ super().__init__()
955
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
956
+ self.transform_act_fn = ACT2FN[config.hidden_act]
957
+ self.norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
958
+
959
+ # The output weights are the same as the input embeddings, but there is
960
+ # an output-only bias for each token.
961
+ self.decoder = nn.Linear(config.hidden_size, config.vocab_size)
962
+
963
+ def forward(self, hidden_states):
964
+ hidden_states = self.dense(hidden_states)
965
+ hidden_states = self.transform_act_fn(hidden_states)
966
+ hidden_states = self.norm(hidden_states)
967
+ hidden_states = self.decoder(hidden_states)
968
+ return hidden_states
969
+
970
+
971
+ class NewForMaskedLM(NewPreTrainedModel):
972
+ _tied_weights_keys = ["lm_head.decoder.bias", "lm_head.decoder.weight"]
973
+
974
+ def __init__(self, config: NewConfig):
975
+ super().__init__(config)
976
+ self.new = NewModel(config, add_pooling_layer=False)
977
+ self.lm_head = NewLMPredictionHead(config)
978
+ self.loss_fct = nn.CrossEntropyLoss()
979
+
980
+ # Initialize weights and apply final processing
981
+ self.post_init()
982
+
983
+ def get_output_embeddings(self):
984
+ return self.lm_head.decoder
985
+
986
+ def set_output_embeddings(self, new_embeddings):
987
+ self.lm_head.decoder = new_embeddings
988
+
989
+ def forward(
990
+ self,
991
+ input_ids: Optional[torch.Tensor] = None,
992
+ attention_mask: Optional[torch.Tensor] = None,
993
+ token_type_ids: Optional[torch.Tensor] = None,
994
+ position_ids: Optional[torch.Tensor] = None,
995
+ head_mask: Optional[torch.Tensor] = None,
996
+ inputs_embeds: Optional[torch.Tensor] = None,
997
+ labels: Optional[torch.Tensor] = None,
998
+ output_attentions: Optional[bool] = None,
999
+ output_hidden_states: Optional[bool] = None,
1000
+ return_dict: Optional[bool] = None,
1001
+ unpad_inputs: Optional[bool] = None,
1002
+ ) -> Union[Tuple[torch.Tensor], MaskedLMOutput]:
1003
+ r"""
1004
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1005
+ Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
1006
+ config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
1007
+ loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
1008
+ """
1009
+
1010
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1011
+ unpad_inputs = False
1012
+ if labels is None or not self.new.config.unpad_inputs:
1013
+ length = None
1014
+ subset_indices = None
1015
+ else:
1016
+ length = attention_mask.sum(-1).tolist()
1017
+ labels = labels[attention_mask.bool()].unsqueeze(0)
1018
+ subset_indices = labels > -100
1019
+
1020
+ outputs = self.new(
1021
+ input_ids,
1022
+ attention_mask=attention_mask,
1023
+ length=length,
1024
+ subset_indices=subset_indices,
1025
+ token_type_ids=token_type_ids,
1026
+ position_ids=position_ids,
1027
+ head_mask=head_mask,
1028
+ inputs_embeds=inputs_embeds,
1029
+ output_attentions=output_attentions,
1030
+ output_hidden_states=output_hidden_states,
1031
+ return_dict=return_dict,
1032
+ unpad_inputs=unpad_inputs,
1033
+ )
1034
+
1035
+ sequence_output = outputs[0]
1036
+ prediction_scores = self.lm_head(sequence_output)
1037
+
1038
+ masked_lm_loss = None
1039
+ if labels is not None:
1040
+ if subset_indices is None:
1041
+ mask = attention_mask.bool()
1042
+ prediction_scores = prediction_scores[mask]
1043
+ labels = labels[mask]
1044
+ else:
1045
+ labels = labels[subset_indices]
1046
+ masked_lm_loss = self.loss_fct(prediction_scores, labels)
1047
+
1048
+ if not return_dict:
1049
+ output = (prediction_scores,) + outputs[2:]
1050
+ return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
1051
+
1052
+ return MaskedLMOutput(
1053
+ loss=masked_lm_loss,
1054
+ logits=prediction_scores,
1055
+ hidden_states=outputs.hidden_states,
1056
+ attentions=outputs.attentions,
1057
+ )
1058
+
1059
+
1060
+ class NewForSequenceClassification(NewPreTrainedModel):
1061
+ def __init__(self, config):
1062
+ super().__init__(config)
1063
+ self.num_labels = config.num_labels
1064
+ self.config = config
1065
+
1066
+ self.new = NewModel(config, add_pooling_layer=True)
1067
+ classifier_dropout = (
1068
+ config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
1069
+ )
1070
+ self.dropout = nn.Dropout(classifier_dropout)
1071
+ self.classifier = nn.Linear(config.hidden_size, config.num_labels)
1072
+
1073
+ # Initialize weights and apply final processing
1074
+ self.post_init()
1075
+
1076
+ def forward(
1077
+ self,
1078
+ input_ids: Optional[torch.Tensor] = None,
1079
+ attention_mask: Optional[torch.Tensor] = None,
1080
+ token_type_ids: Optional[torch.Tensor] = None,
1081
+ position_ids: Optional[torch.Tensor] = None,
1082
+ head_mask: Optional[torch.Tensor] = None,
1083
+ inputs_embeds: Optional[torch.Tensor] = None,
1084
+ labels: Optional[torch.Tensor] = None,
1085
+ output_attentions: Optional[bool] = None,
1086
+ output_hidden_states: Optional[bool] = None,
1087
+ return_dict: Optional[bool] = None,
1088
+ unpad_inputs: Optional[bool] = None,
1089
+ ) -> Union[Tuple[torch.Tensor], SequenceClassifierOutput]:
1090
+ r"""
1091
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1092
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
1093
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
1094
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
1095
+ """
1096
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1097
+ unpad_inputs = False
1098
+ outputs = self.new(
1099
+ input_ids,
1100
+ attention_mask=attention_mask,
1101
+ token_type_ids=token_type_ids,
1102
+ position_ids=position_ids,
1103
+ head_mask=head_mask,
1104
+ inputs_embeds=inputs_embeds,
1105
+ output_attentions=output_attentions,
1106
+ output_hidden_states=output_hidden_states,
1107
+ return_dict=return_dict,
1108
+ unpad_inputs=unpad_inputs,
1109
+ )
1110
+
1111
+ pooled_output = outputs[1]
1112
+
1113
+ pooled_output = self.dropout(pooled_output)
1114
+ logits = self.classifier(pooled_output)
1115
+
1116
+ loss = None
1117
+ if labels is not None:
1118
+ if self.config.problem_type is None:
1119
+ if self.num_labels == 1:
1120
+ self.config.problem_type = "regression"
1121
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
1122
+ self.config.problem_type = "single_label_classification"
1123
+ else:
1124
+ self.config.problem_type = "multi_label_classification"
1125
+
1126
+ if self.config.problem_type == "regression":
1127
+ loss_fct = nn.MSELoss()
1128
+ if self.num_labels == 1:
1129
+ loss = loss_fct(logits.squeeze(), labels.squeeze())
1130
+ else:
1131
+ loss = loss_fct(logits, labels)
1132
+ elif self.config.problem_type == "single_label_classification":
1133
+ loss_fct = nn.CrossEntropyLoss()
1134
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
1135
+ elif self.config.problem_type == "multi_label_classification":
1136
+ loss_fct = nn.BCEWithLogitsLoss()
1137
+ loss = loss_fct(logits, labels)
1138
+
1139
+ if not return_dict:
1140
+ output = (logits,) + outputs[2:]
1141
+ return ((loss,) + output) if loss is not None else output
1142
+
1143
+ return SequenceClassifierOutput(
1144
+ loss=loss,
1145
+ logits=logits,
1146
+ hidden_states=outputs.hidden_states,
1147
+ attentions=outputs.attentions,
1148
+ )
1149
+
1150
+
1151
+ class NewForMultipleChoice(NewPreTrainedModel):
1152
+ def __init__(self, config):
1153
+ super().__init__(config)
1154
+
1155
+ self.new = NewModel(config, add_pooling_layer=True)
1156
+ classifier_dropout = (
1157
+ config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
1158
+ )
1159
+ self.dropout = nn.Dropout(classifier_dropout)
1160
+ self.classifier = nn.Linear(config.hidden_size, 1)
1161
+
1162
+ # Initialize weights and apply final processing
1163
+ self.post_init()
1164
+
1165
+ def forward(
1166
+ self,
1167
+ input_ids: Optional[torch.Tensor] = None,
1168
+ attention_mask: Optional[torch.Tensor] = None,
1169
+ token_type_ids: Optional[torch.Tensor] = None,
1170
+ position_ids: Optional[torch.Tensor] = None,
1171
+ head_mask: Optional[torch.Tensor] = None,
1172
+ inputs_embeds: Optional[torch.Tensor] = None,
1173
+ labels: Optional[torch.Tensor] = None,
1174
+ output_attentions: Optional[bool] = None,
1175
+ output_hidden_states: Optional[bool] = None,
1176
+ return_dict: Optional[bool] = None,
1177
+ unpad_inputs: Optional[bool] = None,
1178
+ ) -> Union[Tuple[torch.Tensor], MultipleChoiceModelOutput]:
1179
+ r"""
1180
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1181
+ Labels for computing the multiple choice classification loss. Indices should be in `[0, ...,
1182
+ num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See
1183
+ `input_ids` above)
1184
+ """
1185
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1186
+ num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]
1187
+ unpad_inputs = False
1188
+ input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None
1189
+ attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None
1190
+ token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None
1191
+ position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None
1192
+ inputs_embeds = (
1193
+ inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1))
1194
+ if inputs_embeds is not None
1195
+ else None
1196
+ )
1197
+
1198
+ outputs = self.new(
1199
+ input_ids,
1200
+ attention_mask=attention_mask,
1201
+ token_type_ids=token_type_ids,
1202
+ position_ids=position_ids,
1203
+ head_mask=head_mask,
1204
+ inputs_embeds=inputs_embeds,
1205
+ output_attentions=output_attentions,
1206
+ output_hidden_states=output_hidden_states,
1207
+ return_dict=return_dict,
1208
+ unpad_inputs=unpad_inputs,
1209
+ )
1210
+
1211
+ pooled_output = outputs[1]
1212
+
1213
+ pooled_output = self.dropout(pooled_output)
1214
+ logits = self.classifier(pooled_output)
1215
+ reshaped_logits = logits.view(-1, num_choices)
1216
+
1217
+ loss = None
1218
+ if labels is not None:
1219
+ loss_fct = nn.CrossEntropyLoss()
1220
+ loss = loss_fct(reshaped_logits, labels)
1221
+
1222
+ if not return_dict:
1223
+ output = (reshaped_logits,) + outputs[2:]
1224
+ return ((loss,) + output) if loss is not None else output
1225
+
1226
+ return MultipleChoiceModelOutput(
1227
+ loss=loss,
1228
+ logits=reshaped_logits,
1229
+ hidden_states=outputs.hidden_states,
1230
+ attentions=outputs.attentions,
1231
+ )
1232
+
1233
+
1234
+ class NewForTokenClassification(NewPreTrainedModel):
1235
+ def __init__(self, config):
1236
+ super().__init__(config)
1237
+ self.num_labels = config.num_labels
1238
+
1239
+ self.new = NewModel(config, add_pooling_layer=False)
1240
+ classifier_dropout = (
1241
+ config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
1242
+ )
1243
+ self.dropout = nn.Dropout(classifier_dropout)
1244
+ self.classifier = nn.Linear(config.hidden_size, config.num_labels)
1245
+
1246
+ # Initialize weights and apply final processing
1247
+ self.post_init()
1248
+
1249
+ def forward(
1250
+ self,
1251
+ input_ids: Optional[torch.Tensor] = None,
1252
+ attention_mask: Optional[torch.Tensor] = None,
1253
+ token_type_ids: Optional[torch.Tensor] = None,
1254
+ position_ids: Optional[torch.Tensor] = None,
1255
+ head_mask: Optional[torch.Tensor] = None,
1256
+ inputs_embeds: Optional[torch.Tensor] = None,
1257
+ labels: Optional[torch.Tensor] = None,
1258
+ output_attentions: Optional[bool] = None,
1259
+ output_hidden_states: Optional[bool] = None,
1260
+ return_dict: Optional[bool] = None,
1261
+ unpad_inputs: Optional[bool] = None,
1262
+ ) -> Union[Tuple[torch.Tensor], TokenClassifierOutput]:
1263
+ r"""
1264
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1265
+ Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
1266
+ """
1267
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1268
+ unpad_inputs = False
1269
+ outputs = self.new(
1270
+ input_ids,
1271
+ attention_mask=attention_mask,
1272
+ token_type_ids=token_type_ids,
1273
+ position_ids=position_ids,
1274
+ head_mask=head_mask,
1275
+ inputs_embeds=inputs_embeds,
1276
+ output_attentions=output_attentions,
1277
+ output_hidden_states=output_hidden_states,
1278
+ return_dict=return_dict,
1279
+ unpad_inputs=unpad_inputs,
1280
+ )
1281
+
1282
+ sequence_output = outputs[0]
1283
+
1284
+ sequence_output = self.dropout(sequence_output)
1285
+ logits = self.classifier(sequence_output)
1286
+
1287
+ loss = None
1288
+ if labels is not None:
1289
+ loss_fct = nn.CrossEntropyLoss()
1290
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
1291
+
1292
+ if not return_dict:
1293
+ output = (logits,) + outputs[2:]
1294
+ return ((loss,) + output) if loss is not None else output
1295
+
1296
+ return TokenClassifierOutput(
1297
+ loss=loss,
1298
+ logits=logits,
1299
+ hidden_states=outputs.hidden_states,
1300
+ attentions=outputs.attentions,
1301
+ )
1302
+
1303
+
1304
+ class NewForQuestionAnswering(NewPreTrainedModel):
1305
+ def __init__(self, config):
1306
+ super().__init__(config)
1307
+ self.num_labels = config.num_labels
1308
+
1309
+ self.new = NewModel(config, add_pooling_layer=False)
1310
+ self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
1311
+
1312
+ # Initialize weights and apply final processing
1313
+ self.post_init()
1314
+
1315
+ def forward(
1316
+ self,
1317
+ input_ids: Optional[torch.Tensor] = None,
1318
+ attention_mask: Optional[torch.Tensor] = None,
1319
+ token_type_ids: Optional[torch.Tensor] = None,
1320
+ position_ids: Optional[torch.Tensor] = None,
1321
+ head_mask: Optional[torch.Tensor] = None,
1322
+ inputs_embeds: Optional[torch.Tensor] = None,
1323
+ start_positions: Optional[torch.Tensor] = None,
1324
+ end_positions: Optional[torch.Tensor] = None,
1325
+ output_attentions: Optional[bool] = None,
1326
+ output_hidden_states: Optional[bool] = None,
1327
+ return_dict: Optional[bool] = None,
1328
+ unpad_inputs: Optional[bool] = None,
1329
+ ) -> Union[Tuple[torch.Tensor], QuestionAnsweringModelOutput]:
1330
+ r"""
1331
+ start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1332
+ Labels for position (index) of the start of the labelled span for computing the token classification loss.
1333
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
1334
+ are not taken into account for computing the loss.
1335
+ end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1336
+ Labels for position (index) of the end of the labelled span for computing the token classification loss.
1337
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
1338
+ are not taken into account for computing the loss.
1339
+ """
1340
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1341
+ unpad_inputs = False
1342
+ outputs = self.new(
1343
+ input_ids,
1344
+ attention_mask=attention_mask,
1345
+ token_type_ids=token_type_ids,
1346
+ position_ids=position_ids,
1347
+ head_mask=head_mask,
1348
+ inputs_embeds=inputs_embeds,
1349
+ output_attentions=output_attentions,
1350
+ output_hidden_states=output_hidden_states,
1351
+ return_dict=return_dict,
1352
+ unpad_inputs=unpad_inputs,
1353
+ )
1354
+
1355
+ sequence_output = outputs[0]
1356
+
1357
+ logits = self.qa_outputs(sequence_output)
1358
+ start_logits, end_logits = logits.split(1, dim=-1)
1359
+ start_logits = start_logits.squeeze(-1).contiguous()
1360
+ end_logits = end_logits.squeeze(-1).contiguous()
1361
+
1362
+ total_loss = None
1363
+ if start_positions is not None and end_positions is not None:
1364
+ # If we are on multi-GPU, split add a dimension
1365
+ if len(start_positions.size()) > 1:
1366
+ start_positions = start_positions.squeeze(-1)
1367
+ if len(end_positions.size()) > 1:
1368
+ end_positions = end_positions.squeeze(-1)
1369
+ # sometimes the start/end positions are outside our model inputs, we ignore these terms
1370
+ ignored_index = start_logits.size(1)
1371
+ start_positions = start_positions.clamp(0, ignored_index)
1372
+ end_positions = end_positions.clamp(0, ignored_index)
1373
+
1374
+ loss_fct = nn.CrossEntropyLoss(ignore_index=ignored_index)
1375
+ start_loss = loss_fct(start_logits, start_positions)
1376
+ end_loss = loss_fct(end_logits, end_positions)
1377
+ total_loss = (start_loss + end_loss) / 2
1378
+
1379
+ if not return_dict:
1380
+ output = (start_logits, end_logits) + outputs[2:]
1381
+ return ((total_loss,) + output) if total_loss is not None else output
1382
+
1383
+ return QuestionAnsweringModelOutput(
1384
+ loss=total_loss,
1385
+ start_logits=start_logits,
1386
+ end_logits=end_logits,
1387
+ hidden_states=outputs.hidden_states,
1388
+ attentions=outputs.attentions,
1389
+ )