|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
""" PyTorch DeBERTa-v2 model.""" |
|
|
|
import math |
|
from collections.abc import Sequence |
|
from typing import Optional, Tuple, Union |
|
|
|
import torch |
|
import torch.utils.checkpoint |
|
from torch import nn |
|
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, LayerNorm, MSELoss |
|
|
|
from transformers.activations import ACT2FN |
|
from transformers.modeling_outputs import ( |
|
BaseModelOutput, |
|
MaskedLMOutput, |
|
CausalLMOutput, |
|
MultipleChoiceModelOutput, |
|
QuestionAnsweringModelOutput, |
|
SequenceClassifierOutput, |
|
TokenClassifierOutput, |
|
) |
|
from transformers.modeling_utils import PreTrainedModel |
|
from transformers.pytorch_utils import softmax_backward_data |
|
from transformers.utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging |
|
from .configuration_deberta import DebertaV2Config |
|
|
|
|
|
logger = logging.get_logger(__name__) |
|
|
|
|
|
|
|
class ContextPooler(nn.Module): |
|
def __init__(self, config): |
|
super().__init__() |
|
self.dense = nn.Linear(config.pooler_hidden_size, config.pooler_hidden_size) |
|
self.dropout = StableDropout(config.pooler_dropout) |
|
self.config = config |
|
|
|
def forward(self, hidden_states): |
|
|
|
|
|
|
|
context_token = hidden_states[:, 0] |
|
context_token = self.dropout(context_token) |
|
pooled_output = self.dense(context_token) |
|
pooled_output = ACT2FN[self.config.pooler_hidden_act](pooled_output) |
|
return pooled_output |
|
|
|
@property |
|
def output_dim(self): |
|
return self.config.hidden_size |
|
|
|
|
|
|
|
class XSoftmax(torch.autograd.Function): |
|
""" |
|
Masked Softmax which is optimized for saving memory |
|
|
|
Args: |
|
input (`torch.tensor`): The input tensor that will apply softmax. |
|
mask (`torch.IntTensor`): |
|
The mask matrix where 0 indicate that element will be ignored in the softmax calculation. |
|
dim (int): The dimension that will apply softmax |
|
|
|
Example: |
|
|
|
```python |
|
>>> import torch |
|
>>> from transformers.models.deberta_v2.modeling_deberta_v2 import XSoftmax |
|
|
|
>>> # Make a tensor |
|
>>> x = torch.randn([4, 20, 100]) |
|
|
|
>>> # Create a mask |
|
>>> mask = (x > 0).int() |
|
|
|
>>> # Specify the dimension to apply softmax |
|
>>> dim = -1 |
|
|
|
>>> y = XSoftmax.apply(x, mask, dim) |
|
```""" |
|
|
|
@staticmethod |
|
def forward(self, input, mask, dim): |
|
self.dim = dim |
|
rmask = ~(mask.to(torch.bool)) |
|
|
|
output = input.masked_fill(rmask, torch.tensor(torch.finfo(input.dtype).min)) |
|
output = torch.softmax(output, self.dim) |
|
output.masked_fill_(rmask, 0) |
|
self.save_for_backward(output) |
|
return output |
|
|
|
@staticmethod |
|
def backward(self, grad_output): |
|
(output,) = self.saved_tensors |
|
inputGrad = softmax_backward_data(self, grad_output, output, self.dim, output) |
|
return inputGrad, None, None |
|
|
|
@staticmethod |
|
def symbolic(g, self, mask, dim): |
|
import torch.onnx.symbolic_helper as sym_help |
|
from torch.onnx.symbolic_opset9 import masked_fill, softmax |
|
|
|
mask_cast_value = g.op("Cast", mask, to_i=sym_help.cast_pytorch_to_onnx["Long"]) |
|
r_mask = g.op( |
|
"Cast", |
|
g.op("Sub", g.op("Constant", value_t=torch.tensor(1, dtype=torch.int64)), mask_cast_value), |
|
to_i=sym_help.cast_pytorch_to_onnx["Bool"], |
|
) |
|
output = masked_fill( |
|
g, self, r_mask, g.op("Constant", value_t=torch.tensor(torch.finfo(self.type().dtype()).min)) |
|
) |
|
output = softmax(g, output, dim) |
|
return masked_fill(g, output, r_mask, g.op("Constant", value_t=torch.tensor(0, dtype=torch.bool))) |
|
|
|
|
|
|
|
class DropoutContext(object): |
|
def __init__(self): |
|
self.dropout = 0 |
|
self.mask = None |
|
self.scale = 1 |
|
self.reuse_mask = True |
|
|
|
|
|
|
|
def get_mask(input, local_context): |
|
if not isinstance(local_context, DropoutContext): |
|
dropout = local_context |
|
mask = None |
|
else: |
|
dropout = local_context.dropout |
|
dropout *= local_context.scale |
|
mask = local_context.mask if local_context.reuse_mask else None |
|
|
|
if dropout > 0 and mask is None: |
|
mask = (1 - torch.empty_like(input).bernoulli_(1 - dropout)).to(torch.bool) |
|
|
|
if isinstance(local_context, DropoutContext): |
|
if local_context.mask is None: |
|
local_context.mask = mask |
|
|
|
return mask, dropout |
|
|
|
|
|
|
|
class XDropout(torch.autograd.Function): |
|
"""Optimized dropout function to save computation and memory by using mask operation instead of multiplication.""" |
|
|
|
@staticmethod |
|
def forward(ctx, input, local_ctx): |
|
mask, dropout = get_mask(input, local_ctx) |
|
ctx.scale = 1.0 / (1 - dropout) |
|
if dropout > 0: |
|
ctx.save_for_backward(mask) |
|
return input.masked_fill(mask, 0) * ctx.scale |
|
else: |
|
return input |
|
|
|
@staticmethod |
|
def backward(ctx, grad_output): |
|
if ctx.scale > 1: |
|
(mask,) = ctx.saved_tensors |
|
return grad_output.masked_fill(mask, 0) * ctx.scale, None |
|
else: |
|
return grad_output, None |
|
|
|
@staticmethod |
|
def symbolic(g: torch._C.Graph, input: torch._C.Value, local_ctx: Union[float, DropoutContext]) -> torch._C.Value: |
|
from torch.onnx import symbolic_opset12 |
|
|
|
dropout_p = local_ctx |
|
if isinstance(local_ctx, DropoutContext): |
|
dropout_p = local_ctx.dropout |
|
|
|
train = True |
|
|
|
|
|
|
|
|
|
|
|
|
|
return symbolic_opset12.dropout(g, input, dropout_p, train) |
|
|
|
|
|
|
|
class StableDropout(nn.Module): |
|
""" |
|
Optimized dropout module for stabilizing the training |
|
|
|
Args: |
|
drop_prob (float): the dropout probabilities |
|
""" |
|
|
|
def __init__(self, drop_prob): |
|
super().__init__() |
|
self.drop_prob = drop_prob |
|
self.count = 0 |
|
self.context_stack = None |
|
|
|
def forward(self, x): |
|
""" |
|
Call the module |
|
|
|
Args: |
|
x (`torch.tensor`): The input tensor to apply dropout |
|
""" |
|
if self.training and self.drop_prob > 0: |
|
return XDropout.apply(x, self.get_context()) |
|
return x |
|
|
|
def clear_context(self): |
|
self.count = 0 |
|
self.context_stack = None |
|
|
|
def init_context(self, reuse_mask=True, scale=1): |
|
if self.context_stack is None: |
|
self.context_stack = [] |
|
self.count = 0 |
|
for c in self.context_stack: |
|
c.reuse_mask = reuse_mask |
|
c.scale = scale |
|
|
|
def get_context(self): |
|
if self.context_stack is not None: |
|
if self.count >= len(self.context_stack): |
|
self.context_stack.append(DropoutContext()) |
|
ctx = self.context_stack[self.count] |
|
ctx.dropout = self.drop_prob |
|
self.count += 1 |
|
return ctx |
|
else: |
|
return self.drop_prob |
|
|
|
|
|
|
|
class DebertaV2SelfOutput(nn.Module): |
|
def __init__(self, config): |
|
super().__init__() |
|
self.dense = nn.Linear(config.hidden_size, config.hidden_size) |
|
self.LayerNorm = LayerNorm(config.hidden_size, config.layer_norm_eps) |
|
self.dropout = StableDropout(config.hidden_dropout_prob) |
|
|
|
def forward(self, hidden_states, input_tensor): |
|
hidden_states = self.dense(hidden_states) |
|
hidden_states = self.dropout(hidden_states) |
|
hidden_states = self.LayerNorm(hidden_states + input_tensor) |
|
return hidden_states |
|
|
|
|
|
|
|
class DebertaV2Attention(nn.Module): |
|
def __init__(self, config): |
|
super().__init__() |
|
self.self = DisentangledSelfAttention(config) |
|
self.output = DebertaV2SelfOutput(config) |
|
self.config = config |
|
|
|
def forward( |
|
self, |
|
hidden_states, |
|
attention_mask, |
|
output_attentions=False, |
|
query_states=None, |
|
relative_pos=None, |
|
rel_embeddings=None, |
|
): |
|
self_output = self.self( |
|
hidden_states, |
|
attention_mask, |
|
output_attentions, |
|
query_states=query_states, |
|
relative_pos=relative_pos, |
|
rel_embeddings=rel_embeddings, |
|
) |
|
if output_attentions: |
|
self_output, att_matrix = self_output |
|
if query_states is None: |
|
query_states = hidden_states |
|
attention_output = self.output(self_output, query_states) |
|
|
|
if output_attentions: |
|
return (attention_output, att_matrix) |
|
else: |
|
return attention_output |
|
|
|
|
|
|
|
class DebertaV2Intermediate(nn.Module): |
|
def __init__(self, config): |
|
super().__init__() |
|
self.dense = nn.Linear(config.hidden_size, config.intermediate_size) |
|
if isinstance(config.hidden_act, str): |
|
self.intermediate_act_fn = ACT2FN[config.hidden_act] |
|
else: |
|
self.intermediate_act_fn = config.hidden_act |
|
|
|
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: |
|
hidden_states = self.dense(hidden_states) |
|
hidden_states = self.intermediate_act_fn(hidden_states) |
|
return hidden_states |
|
|
|
|
|
|
|
class DebertaV2Output(nn.Module): |
|
def __init__(self, config): |
|
super().__init__() |
|
self.dense = nn.Linear(config.intermediate_size, config.hidden_size) |
|
self.LayerNorm = LayerNorm(config.hidden_size, config.layer_norm_eps) |
|
self.dropout = StableDropout(config.hidden_dropout_prob) |
|
self.config = config |
|
|
|
def forward(self, hidden_states, input_tensor): |
|
hidden_states = self.dense(hidden_states) |
|
hidden_states = self.dropout(hidden_states) |
|
hidden_states = self.LayerNorm(hidden_states + input_tensor) |
|
return hidden_states |
|
|
|
|
|
|
|
class DebertaV2Layer(nn.Module): |
|
def __init__(self, config): |
|
super().__init__() |
|
self.attention = DebertaV2Attention(config) |
|
self.intermediate = DebertaV2Intermediate(config) |
|
self.output = DebertaV2Output(config) |
|
|
|
def forward( |
|
self, |
|
hidden_states, |
|
attention_mask, |
|
query_states=None, |
|
relative_pos=None, |
|
rel_embeddings=None, |
|
output_attentions=False, |
|
): |
|
attention_output = self.attention( |
|
hidden_states, |
|
attention_mask, |
|
output_attentions=output_attentions, |
|
query_states=query_states, |
|
relative_pos=relative_pos, |
|
rel_embeddings=rel_embeddings, |
|
) |
|
if output_attentions: |
|
attention_output, att_matrix = attention_output |
|
intermediate_output = self.intermediate(attention_output) |
|
layer_output = self.output(intermediate_output, attention_output) |
|
if output_attentions: |
|
return (layer_output, att_matrix) |
|
else: |
|
return layer_output |
|
|
|
|
|
class ConvLayer(nn.Module): |
|
def __init__(self, config): |
|
super().__init__() |
|
kernel_size = getattr(config, "conv_kernel_size", 3) |
|
groups = getattr(config, "conv_groups", 1) |
|
self.conv_act = getattr(config, "conv_act", "tanh") |
|
self.conv = nn.Conv1d( |
|
config.hidden_size, config.hidden_size, kernel_size, padding=(kernel_size - 1) // 2, groups=groups |
|
) |
|
self.LayerNorm = LayerNorm(config.hidden_size, config.layer_norm_eps) |
|
self.dropout = StableDropout(config.hidden_dropout_prob) |
|
self.config = config |
|
|
|
def forward(self, hidden_states, residual_states, input_mask): |
|
out = self.conv(hidden_states.permute(0, 2, 1).contiguous()).permute(0, 2, 1).contiguous() |
|
rmask = (1 - input_mask).bool() |
|
out.masked_fill_(rmask.unsqueeze(-1).expand(out.size()), 0) |
|
out = ACT2FN[self.conv_act](self.dropout(out)) |
|
|
|
layer_norm_input = residual_states + out |
|
output = self.LayerNorm(layer_norm_input).to(layer_norm_input) |
|
|
|
if input_mask is None: |
|
output_states = output |
|
else: |
|
if input_mask.dim() != layer_norm_input.dim(): |
|
if input_mask.dim() == 4: |
|
input_mask = input_mask.squeeze(1).squeeze(1) |
|
input_mask = input_mask.unsqueeze(2) |
|
|
|
input_mask = input_mask.to(output.dtype) |
|
output_states = output * input_mask |
|
|
|
return output_states |
|
|
|
|
|
class DebertaV2Encoder(nn.Module): |
|
"""Modified BertEncoder with relative position bias support""" |
|
|
|
def __init__(self, config): |
|
super().__init__() |
|
|
|
self.layer = nn.ModuleList([DebertaV2Layer(config) for _ in range(config.num_hidden_layers)]) |
|
self.relative_attention = getattr(config, "relative_attention", False) |
|
|
|
if self.relative_attention: |
|
self.max_relative_positions = getattr(config, "max_relative_positions", -1) |
|
if self.max_relative_positions < 1: |
|
self.max_relative_positions = config.max_position_embeddings |
|
|
|
self.position_buckets = getattr(config, "position_buckets", -1) |
|
pos_ebd_size = self.max_relative_positions * 2 |
|
|
|
if self.position_buckets > 0: |
|
pos_ebd_size = self.position_buckets * 2 |
|
|
|
self.rel_embeddings = nn.Embedding(pos_ebd_size, config.hidden_size) |
|
|
|
self.norm_rel_ebd = [x.strip() for x in getattr(config, "norm_rel_ebd", "none").lower().split("|")] |
|
|
|
if "layer_norm" in self.norm_rel_ebd: |
|
self.LayerNorm = LayerNorm(config.hidden_size, config.layer_norm_eps, elementwise_affine=True) |
|
|
|
self.conv = ConvLayer(config) if getattr(config, "conv_kernel_size", 0) > 0 else None |
|
self.gradient_checkpointing = False |
|
|
|
def get_rel_embedding(self): |
|
rel_embeddings = self.rel_embeddings.weight if self.relative_attention else None |
|
if rel_embeddings is not None and ("layer_norm" in self.norm_rel_ebd): |
|
rel_embeddings = self.LayerNorm(rel_embeddings) |
|
return rel_embeddings |
|
|
|
def get_attention_mask(self, attention_mask): |
|
if attention_mask.dim() <= 2: |
|
extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2) |
|
attention_mask = extended_attention_mask * extended_attention_mask.squeeze(-2).unsqueeze(-1) |
|
elif attention_mask.dim() == 3: |
|
attention_mask = attention_mask.unsqueeze(1) |
|
|
|
return attention_mask |
|
|
|
def get_rel_pos(self, hidden_states, query_states=None, relative_pos=None): |
|
if self.relative_attention and relative_pos is None: |
|
q = query_states.size(-2) if query_states is not None else hidden_states.size(-2) |
|
relative_pos = build_relative_position( |
|
q, |
|
hidden_states.size(-2), |
|
bucket_size=self.position_buckets, |
|
max_position=self.max_relative_positions, |
|
device=hidden_states.device, |
|
) |
|
return relative_pos |
|
|
|
def forward( |
|
self, |
|
hidden_states, |
|
attention_mask, |
|
output_hidden_states=True, |
|
output_attentions=False, |
|
query_states=None, |
|
relative_pos=None, |
|
return_dict=True, |
|
): |
|
if attention_mask.dim() <= 2: |
|
input_mask = attention_mask |
|
else: |
|
input_mask = attention_mask.sum(-2) > 0 |
|
attention_mask = self.get_attention_mask(attention_mask) |
|
relative_pos = self.get_rel_pos(hidden_states, query_states, relative_pos) |
|
|
|
all_hidden_states = () if output_hidden_states else None |
|
all_attentions = () if output_attentions else None |
|
|
|
if isinstance(hidden_states, Sequence): |
|
next_kv = hidden_states[0] |
|
else: |
|
next_kv = hidden_states |
|
rel_embeddings = self.get_rel_embedding() |
|
output_states = next_kv |
|
for i, layer_module in enumerate(self.layer): |
|
if output_hidden_states: |
|
all_hidden_states = all_hidden_states + (output_states,) |
|
|
|
if self.gradient_checkpointing and self.training: |
|
output_states = self._gradient_checkpointing_func( |
|
layer_module.__call__, |
|
next_kv, |
|
attention_mask, |
|
query_states, |
|
relative_pos, |
|
rel_embeddings, |
|
output_attentions, |
|
) |
|
else: |
|
output_states = layer_module( |
|
next_kv, |
|
attention_mask, |
|
query_states=query_states, |
|
relative_pos=relative_pos, |
|
rel_embeddings=rel_embeddings, |
|
output_attentions=output_attentions, |
|
) |
|
|
|
if output_attentions: |
|
output_states, att_m = output_states |
|
|
|
if i == 0 and self.conv is not None: |
|
output_states = self.conv(hidden_states, output_states, input_mask) |
|
|
|
if query_states is not None: |
|
query_states = output_states |
|
if isinstance(hidden_states, Sequence): |
|
next_kv = hidden_states[i + 1] if i + 1 < len(self.layer) else None |
|
else: |
|
next_kv = output_states |
|
|
|
if output_attentions: |
|
all_attentions = all_attentions + (att_m,) |
|
|
|
if output_hidden_states: |
|
all_hidden_states = all_hidden_states + (output_states,) |
|
|
|
if not return_dict: |
|
return tuple(v for v in [output_states, all_hidden_states, all_attentions] if v is not None) |
|
return BaseModelOutput( |
|
last_hidden_state=output_states, hidden_states=all_hidden_states, attentions=all_attentions |
|
) |
|
|
|
|
|
def make_log_bucket_position(relative_pos, bucket_size, max_position): |
|
sign = torch.sign(relative_pos) |
|
mid = bucket_size // 2 |
|
abs_pos = torch.where((relative_pos < mid) & (relative_pos > -mid), mid - 1, torch.abs(relative_pos).clamp(max=max_position - 1)) |
|
log_pos = torch.ceil(torch.log(abs_pos / mid) / math.log((max_position-1) / mid) * (mid - 1)).int() + mid |
|
bucket_pos = torch.where(abs_pos <= mid, relative_pos, log_pos * sign).long() |
|
return bucket_pos |
|
|
|
|
|
def build_relative_position(query_size, key_size, bucket_size=-1, max_position=-1, device=None): |
|
""" |
|
Build relative position according to the query and key |
|
|
|
We assume the absolute position of query \\(P_q\\) is range from (0, query_size) and the absolute position of key |
|
\\(P_k\\) is range from (0, key_size), The relative positions from query to key is \\(R_{q \\rightarrow k} = P_q - |
|
P_k\\) |
|
|
|
Args: |
|
query_size (int): the length of query |
|
key_size (int): the length of key |
|
bucket_size (int): the size of position bucket |
|
max_position (int): the maximum allowed absolute position |
|
device (`torch.device`): the device on which tensors will be created. |
|
|
|
Return: |
|
`torch.LongTensor`: A tensor with shape [1, query_size, key_size] |
|
""" |
|
|
|
q_ids = torch.arange(0, query_size, device=device) |
|
k_ids = torch.arange(0, key_size, device=device) |
|
rel_pos_ids = q_ids[:, None] - k_ids[None, :] |
|
if bucket_size > 0 and max_position > 0: |
|
rel_pos_ids = make_log_bucket_position(rel_pos_ids, bucket_size, max_position) |
|
rel_pos_ids = rel_pos_ids.to(torch.long) |
|
rel_pos_ids = rel_pos_ids[:query_size, :] |
|
rel_pos_ids = rel_pos_ids.unsqueeze(0) |
|
return rel_pos_ids |
|
|
|
|
|
@torch.jit.script |
|
|
|
def c2p_dynamic_expand(c2p_pos, query_layer, relative_pos): |
|
return c2p_pos.expand([query_layer.size(0), query_layer.size(1), query_layer.size(2), relative_pos.size(-1)]) |
|
|
|
|
|
@torch.jit.script |
|
|
|
def p2c_dynamic_expand(c2p_pos, query_layer, key_layer): |
|
return c2p_pos.expand([query_layer.size(0), query_layer.size(1), key_layer.size(-2), key_layer.size(-2)]) |
|
|
|
|
|
@torch.jit.script |
|
|
|
def pos_dynamic_expand(pos_index, p2c_att, key_layer): |
|
return pos_index.expand(p2c_att.size()[:2] + (pos_index.size(-2), key_layer.size(-2))) |
|
|
|
|
|
class DisentangledSelfAttention(nn.Module): |
|
""" |
|
Disentangled self-attention module |
|
|
|
Parameters: |
|
config (`DebertaV2Config`): |
|
A model config class instance with the configuration to build a new model. The schema is similar to |
|
*BertConfig*, for more details, please refer [`DebertaV2Config`] |
|
|
|
""" |
|
|
|
def __init__(self, config): |
|
super().__init__() |
|
if config.hidden_size % config.num_attention_heads != 0: |
|
raise ValueError( |
|
f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention " |
|
f"heads ({config.num_attention_heads})" |
|
) |
|
self.num_attention_heads = config.num_attention_heads |
|
_attention_head_size = config.hidden_size // config.num_attention_heads |
|
self.attention_head_size = getattr(config, "attention_head_size", _attention_head_size) |
|
self.all_head_size = self.num_attention_heads * self.attention_head_size |
|
self.query_proj = nn.Linear(config.hidden_size, self.all_head_size, bias=True) |
|
self.key_proj = nn.Linear(config.hidden_size, self.all_head_size, bias=True) |
|
self.value_proj = nn.Linear(config.hidden_size, self.all_head_size, bias=True) |
|
|
|
self.share_att_key = getattr(config, "share_att_key", False) |
|
self.pos_att_type = config.pos_att_type if config.pos_att_type is not None else [] |
|
self.relative_attention = getattr(config, "relative_attention", False) |
|
|
|
if self.relative_attention: |
|
self.position_buckets = getattr(config, "position_buckets", -1) |
|
self.max_relative_positions = getattr(config, "max_relative_positions", -1) |
|
if self.max_relative_positions < 1: |
|
self.max_relative_positions = config.max_position_embeddings |
|
self.pos_ebd_size = self.max_relative_positions |
|
if self.position_buckets > 0: |
|
self.pos_ebd_size = self.position_buckets |
|
|
|
self.pos_dropout = StableDropout(config.hidden_dropout_prob) |
|
|
|
if not self.share_att_key: |
|
if "c2p" in self.pos_att_type: |
|
self.pos_key_proj = nn.Linear(config.hidden_size, self.all_head_size, bias=True) |
|
if "p2c" in self.pos_att_type: |
|
self.pos_query_proj = nn.Linear(config.hidden_size, self.all_head_size) |
|
|
|
self.dropout = StableDropout(config.attention_probs_dropout_prob) |
|
|
|
def transpose_for_scores(self, x, attention_heads): |
|
new_x_shape = x.size()[:-1] + (attention_heads, -1) |
|
x = x.view(new_x_shape) |
|
return x.permute(0, 2, 1, 3).contiguous().view(-1, x.size(1), x.size(-1)) |
|
|
|
def forward( |
|
self, |
|
hidden_states, |
|
attention_mask, |
|
output_attentions=False, |
|
query_states=None, |
|
relative_pos=None, |
|
rel_embeddings=None, |
|
): |
|
""" |
|
Call the module |
|
|
|
Args: |
|
hidden_states (`torch.FloatTensor`): |
|
Input states to the module usually the output from previous layer, it will be the Q,K and V in |
|
*Attention(Q,K,V)* |
|
|
|
attention_mask (`torch.BoolTensor`): |
|
An attention mask matrix of shape [*B*, *N*, *N*] where *B* is the batch size, *N* is the maximum |
|
sequence length in which element [i,j] = *1* means the *i* th token in the input can attend to the *j* |
|
th token. |
|
|
|
output_attentions (`bool`, optional): |
|
Whether return the attention matrix. |
|
|
|
query_states (`torch.FloatTensor`, optional): |
|
The *Q* state in *Attention(Q,K,V)*. |
|
|
|
relative_pos (`torch.LongTensor`): |
|
The relative position encoding between the tokens in the sequence. It's of shape [*B*, *N*, *N*] with |
|
values ranging in [*-max_relative_positions*, *max_relative_positions*]. |
|
|
|
rel_embeddings (`torch.FloatTensor`): |
|
The embedding of relative distances. It's a tensor of shape [\\(2 \\times |
|
\\text{max_relative_positions}\\), *hidden_size*]. |
|
|
|
|
|
""" |
|
if query_states is None: |
|
query_states = hidden_states |
|
query_layer = self.transpose_for_scores(self.query_proj(query_states), self.num_attention_heads) |
|
key_layer = self.transpose_for_scores(self.key_proj(hidden_states), self.num_attention_heads) |
|
value_layer = self.transpose_for_scores(self.value_proj(hidden_states), self.num_attention_heads) |
|
|
|
rel_att = None |
|
|
|
scale_factor = 1 |
|
if "c2p" in self.pos_att_type: |
|
scale_factor += 1 |
|
if "p2c" in self.pos_att_type: |
|
scale_factor += 1 |
|
scale = torch.sqrt(torch.tensor(query_layer.size(-1), dtype=torch.float) * scale_factor) |
|
attention_scores = torch.bmm(query_layer, key_layer.transpose(-1, -2) / scale.to(dtype=query_layer.dtype)) |
|
if self.relative_attention: |
|
rel_embeddings = self.pos_dropout(rel_embeddings) |
|
rel_att = self.disentangled_attention_bias( |
|
query_layer, key_layer, relative_pos, rel_embeddings, scale_factor |
|
) |
|
|
|
if rel_att is not None: |
|
attention_scores = attention_scores + rel_att |
|
attention_scores = attention_scores |
|
attention_scores = attention_scores.view( |
|
-1, self.num_attention_heads, attention_scores.size(-2), attention_scores.size(-1) |
|
) |
|
|
|
|
|
attention_probs = XSoftmax.apply(attention_scores, attention_mask, -1) |
|
attention_probs = self.dropout(attention_probs) |
|
context_layer = torch.bmm( |
|
attention_probs.view(-1, attention_probs.size(-2), attention_probs.size(-1)), value_layer |
|
) |
|
context_layer = ( |
|
context_layer.view(-1, self.num_attention_heads, context_layer.size(-2), context_layer.size(-1)) |
|
.permute(0, 2, 1, 3) |
|
.contiguous() |
|
) |
|
new_context_layer_shape = context_layer.size()[:-2] + (-1,) |
|
context_layer = context_layer.view(new_context_layer_shape) |
|
if output_attentions: |
|
return (context_layer, attention_probs) |
|
else: |
|
return context_layer |
|
|
|
def disentangled_attention_bias(self, query_layer, key_layer, relative_pos, rel_embeddings, scale_factor): |
|
if relative_pos is None: |
|
q = query_layer.size(-2) |
|
relative_pos = build_relative_position( |
|
q, |
|
key_layer.size(-2), |
|
bucket_size=self.position_buckets, |
|
max_position=self.max_relative_positions, |
|
device=query_layer.device, |
|
) |
|
if relative_pos.dim() == 2: |
|
relative_pos = relative_pos.unsqueeze(0).unsqueeze(0) |
|
elif relative_pos.dim() == 3: |
|
relative_pos = relative_pos.unsqueeze(1) |
|
|
|
elif relative_pos.dim() != 4: |
|
raise ValueError(f"Relative position ids must be of dim 2 or 3 or 4. {relative_pos.dim()}") |
|
|
|
att_span = self.pos_ebd_size |
|
relative_pos = relative_pos.long().to(query_layer.device) |
|
|
|
rel_embeddings = rel_embeddings[0 : att_span * 2, :].unsqueeze(0) |
|
if self.share_att_key: |
|
pos_query_layer = self.transpose_for_scores( |
|
self.query_proj(rel_embeddings), self.num_attention_heads |
|
).repeat(query_layer.size(0) // self.num_attention_heads, 1, 1) |
|
pos_key_layer = self.transpose_for_scores(self.key_proj(rel_embeddings), self.num_attention_heads).repeat( |
|
query_layer.size(0) // self.num_attention_heads, 1, 1 |
|
) |
|
else: |
|
if "c2p" in self.pos_att_type: |
|
pos_key_layer = self.transpose_for_scores( |
|
self.pos_key_proj(rel_embeddings), self.num_attention_heads |
|
).repeat(query_layer.size(0) // self.num_attention_heads, 1, 1) |
|
if "p2c" in self.pos_att_type: |
|
pos_query_layer = self.transpose_for_scores( |
|
self.pos_query_proj(rel_embeddings), self.num_attention_heads |
|
).repeat(query_layer.size(0) // self.num_attention_heads, 1, 1) |
|
|
|
score = 0 |
|
|
|
if "c2p" in self.pos_att_type: |
|
scale = torch.sqrt(torch.tensor(pos_key_layer.size(-1), dtype=torch.float) * scale_factor) |
|
c2p_att = torch.bmm(query_layer, pos_key_layer.transpose(-1, -2)) |
|
c2p_pos = torch.clamp(relative_pos + att_span, 0, att_span * 2 - 1) |
|
c2p_att = torch.gather( |
|
c2p_att, |
|
dim=-1, |
|
index=c2p_pos.squeeze(0).expand([query_layer.size(0), query_layer.size(1), relative_pos.size(-1)]), |
|
) |
|
score += c2p_att / scale.to(dtype=c2p_att.dtype) |
|
|
|
|
|
if "p2c" in self.pos_att_type: |
|
scale = torch.sqrt(torch.tensor(pos_query_layer.size(-1), dtype=torch.float) * scale_factor) |
|
if key_layer.size(-2) != query_layer.size(-2): |
|
r_pos = build_relative_position( |
|
key_layer.size(-2), |
|
key_layer.size(-2), |
|
bucket_size=self.position_buckets, |
|
max_position=self.max_relative_positions, |
|
device=query_layer.device, |
|
) |
|
r_pos = r_pos.unsqueeze(0) |
|
else: |
|
r_pos = relative_pos |
|
|
|
p2c_pos = torch.clamp(-r_pos + att_span, 0, att_span * 2 - 1) |
|
p2c_att = torch.bmm(key_layer, pos_query_layer.transpose(-1, -2)) |
|
p2c_att = torch.gather( |
|
p2c_att, |
|
dim=-1, |
|
index=p2c_pos.squeeze(0).expand([query_layer.size(0), key_layer.size(-2), key_layer.size(-2)]), |
|
).transpose(-1, -2) |
|
score += p2c_att / scale.to(dtype=p2c_att.dtype) |
|
|
|
return score |
|
|
|
|
|
|
|
class DebertaV2Embeddings(nn.Module): |
|
"""Construct the embeddings from word, position and token_type embeddings.""" |
|
|
|
def __init__(self, config): |
|
super().__init__() |
|
pad_token_id = getattr(config, "pad_token_id", 0) |
|
self.embedding_size = getattr(config, "embedding_size", config.hidden_size) |
|
self.word_embeddings = nn.Embedding(config.vocab_size, self.embedding_size, padding_idx=pad_token_id) |
|
|
|
self.position_biased_input = getattr(config, "position_biased_input", True) |
|
self.position_embeddings = nn.Embedding(config.max_position_embeddings, self.embedding_size) |
|
|
|
if config.type_vocab_size > 0: |
|
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, self.embedding_size) |
|
|
|
if self.embedding_size != config.hidden_size: |
|
self.embed_proj = nn.Linear(self.embedding_size, config.hidden_size, bias=False) |
|
self.LayerNorm = LayerNorm(config.hidden_size, config.layer_norm_eps) |
|
self.dropout = StableDropout(config.hidden_dropout_prob) |
|
self.config = config |
|
|
|
|
|
self.register_buffer( |
|
"position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False |
|
) |
|
|
|
def forward(self, input_ids=None, token_type_ids=None, position_ids=None, mask=None, inputs_embeds=None): |
|
if input_ids is not None: |
|
input_shape = input_ids.size() |
|
else: |
|
input_shape = inputs_embeds.size()[:-1] |
|
|
|
seq_length = input_shape[1] |
|
|
|
if position_ids is None: |
|
if seq_length > self.position_ids.size(1): |
|
position_ids = torch.cat([ |
|
torch.zeros(1, seq_length - self.position_ids.size(1), dtype=self.position_ids.dtype, device=self.position_ids.device), |
|
self.position_ids |
|
], dim=1) |
|
else: |
|
position_ids = self.position_ids[:, :seq_length] |
|
elif position_ids.size(1) > self.position_ids.size(1): |
|
position_ids = (position_ids + self.position_ids.size(1) - position_ids.size(1)).clamp(min=0) |
|
|
|
if token_type_ids is None: |
|
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device) |
|
|
|
if inputs_embeds is None: |
|
inputs_embeds = self.word_embeddings(input_ids) |
|
|
|
if self.position_embeddings is not None: |
|
position_embeddings = self.position_embeddings(position_ids.long()) |
|
else: |
|
position_embeddings = torch.zeros_like(inputs_embeds) |
|
|
|
embeddings = inputs_embeds |
|
if self.position_biased_input: |
|
embeddings += position_embeddings |
|
if self.config.type_vocab_size > 0: |
|
token_type_embeddings = self.token_type_embeddings(token_type_ids) |
|
embeddings += token_type_embeddings |
|
|
|
if self.embedding_size != self.config.hidden_size: |
|
embeddings = self.embed_proj(embeddings) |
|
|
|
embeddings = self.LayerNorm(embeddings) |
|
|
|
if mask is not None: |
|
if mask.dim() != embeddings.dim(): |
|
if mask.dim() == 4: |
|
mask = mask.squeeze(1).squeeze(1) |
|
mask = mask.unsqueeze(2) |
|
mask = mask.to(embeddings.dtype) |
|
|
|
embeddings = embeddings * mask |
|
|
|
embeddings = self.dropout(embeddings) |
|
return embeddings, position_embeddings |
|
|
|
|
|
|
|
class DebertaV2PreTrainedModel(PreTrainedModel): |
|
""" |
|
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained |
|
models. |
|
""" |
|
config_class = DebertaV2Config |
|
base_model_prefix = "deberta" |
|
supports_gradient_checkpointing = True |
|
|
|
def _init_weights(self, module): |
|
"""Initialize the weights.""" |
|
if isinstance(module, nn.Linear): |
|
|
|
|
|
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) |
|
if module.bias is not None: |
|
module.bias.data.zero_() |
|
elif isinstance(module, nn.Embedding): |
|
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) |
|
if module.padding_idx is not None: |
|
module.weight.data[module.padding_idx].zero_() |
|
|
|
|
|
DEBERTA_START_DOCSTRING = r""" |
|
The DeBERTa model was proposed in [DeBERTa: Decoding-enhanced BERT with Disentangled |
|
Attention](https://arxiv.org/abs/2006.03654) by Pengcheng He, Xiaodong Liu, Jianfeng Gao, Weizhu Chen. It's build |
|
on top of BERT/RoBERTa with two improvements, i.e. disentangled attention and enhanced mask decoder. With those two |
|
improvements, it out perform BERT/RoBERTa on a majority of tasks with 80GB pretraining data. |
|
|
|
This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. |
|
Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage |
|
and behavior. |
|
|
|
|
|
Parameters: |
|
config ([`DebertaV2Config`]): Model configuration class with all the parameters of the model. |
|
Initializing with a config file does not load the weights associated with the model, only the |
|
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. |
|
""" |
|
|
|
DEBERTA_INPUTS_DOCSTRING = r""" |
|
Args: |
|
input_ids (`torch.LongTensor` of shape `({0})`): |
|
Indices of input sequence tokens in the vocabulary. |
|
|
|
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and |
|
[`PreTrainedTokenizer.__call__`] for details. |
|
|
|
[What are input IDs?](../glossary#input-ids) |
|
attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*): |
|
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: |
|
|
|
- 1 for tokens that are **not masked**, |
|
- 0 for tokens that are **masked**. |
|
|
|
[What are attention masks?](../glossary#attention-mask) |
|
token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*): |
|
Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0, |
|
1]`: |
|
|
|
- 0 corresponds to a *sentence A* token, |
|
- 1 corresponds to a *sentence B* token. |
|
|
|
[What are token type IDs?](../glossary#token-type-ids) |
|
position_ids (`torch.LongTensor` of shape `({0})`, *optional*): |
|
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, |
|
config.max_position_embeddings - 1]`. |
|
|
|
[What are position IDs?](../glossary#position-ids) |
|
inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*): |
|
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This |
|
is useful if you want more control over how to convert *input_ids* indices into associated vectors than the |
|
model's internal embedding lookup matrix. |
|
output_attentions (`bool`, *optional*): |
|
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned |
|
tensors for more detail. |
|
output_hidden_states (`bool`, *optional*): |
|
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for |
|
more detail. |
|
return_dict (`bool`, *optional*): |
|
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. |
|
""" |
|
|
|
|
|
@add_start_docstrings( |
|
"The bare DeBERTa Model transformer outputting raw hidden-states without any specific head on top.", |
|
DEBERTA_START_DOCSTRING, |
|
) |
|
|
|
class DebertaV2Model(DebertaV2PreTrainedModel): |
|
def __init__(self, config): |
|
super().__init__(config) |
|
|
|
self.embeddings = DebertaV2Embeddings(config) |
|
self.encoder = DebertaV2Encoder(config) |
|
self.z_steps = 4 |
|
self.config = config |
|
|
|
self.post_init() |
|
|
|
def get_input_embeddings(self): |
|
return self.embeddings.word_embeddings |
|
|
|
def set_input_embeddings(self, new_embeddings): |
|
self.embeddings.word_embeddings = new_embeddings |
|
|
|
def _prune_heads(self, heads_to_prune): |
|
""" |
|
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base |
|
class PreTrainedModel |
|
""" |
|
raise NotImplementedError("The prune function is not implemented in DeBERTa model.") |
|
|
|
def forward( |
|
self, |
|
input_ids: Optional[torch.Tensor] = None, |
|
attention_mask: Optional[torch.Tensor] = None, |
|
token_type_ids: Optional[torch.Tensor] = None, |
|
position_ids: Optional[torch.Tensor] = None, |
|
inputs_embeds: Optional[torch.Tensor] = None, |
|
output_attentions: Optional[bool] = None, |
|
output_hidden_states: Optional[bool] = None, |
|
return_dict: Optional[bool] = None, |
|
) -> Union[Tuple, BaseModelOutput]: |
|
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions |
|
output_hidden_states = ( |
|
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states |
|
) |
|
return_dict = return_dict if return_dict is not None else self.config.use_return_dict |
|
|
|
if input_ids is not None and inputs_embeds is not None: |
|
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") |
|
elif input_ids is not None: |
|
self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask) |
|
input_shape = input_ids.size() |
|
elif inputs_embeds is not None: |
|
input_shape = inputs_embeds.size()[:-1] |
|
else: |
|
raise ValueError("You have to specify either input_ids or inputs_embeds") |
|
|
|
device = input_ids.device if input_ids is not None else inputs_embeds.device |
|
|
|
if attention_mask is None: |
|
attention_mask = torch.ones(input_shape, device=device) |
|
|
|
if token_type_ids is None: |
|
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device) |
|
|
|
embedding_output, position_embeddings = self.embeddings( |
|
input_ids=input_ids, |
|
token_type_ids=token_type_ids, |
|
position_ids=position_ids, |
|
mask=attention_mask, |
|
inputs_embeds=inputs_embeds, |
|
) |
|
|
|
encoder_outputs = self.encoder( |
|
embedding_output, |
|
attention_mask, |
|
output_hidden_states=True, |
|
output_attentions=output_attentions, |
|
return_dict=return_dict, |
|
) |
|
encoded_layers = list(encoder_outputs[1]) |
|
|
|
|
|
|
|
if self.z_steps > 0: |
|
hidden_states = encoded_layers[-2] |
|
layers = [self.encoder.layer[-1] for _ in range(self.z_steps)] |
|
query_states = position_embeddings + encoded_layers[-2] |
|
rel_embeddings = self.encoder.get_rel_embedding() |
|
attention_mask = self.encoder.get_attention_mask(attention_mask) |
|
rel_pos = self.encoder.get_rel_pos(embedding_output) |
|
for layer in layers: |
|
query_states = layer( |
|
hidden_states, |
|
attention_mask, |
|
output_attentions=False, |
|
query_states=query_states, |
|
relative_pos=rel_pos, |
|
rel_embeddings=rel_embeddings, |
|
) |
|
encoded_layers.append(query_states) |
|
|
|
sequence_output = encoded_layers[-1] |
|
|
|
if not return_dict: |
|
return (sequence_output,) + encoder_outputs[(1 if output_hidden_states else 2) :] |
|
|
|
return BaseModelOutput( |
|
last_hidden_state=sequence_output, |
|
hidden_states=encoder_outputs.hidden_states if output_hidden_states else None, |
|
attentions=encoder_outputs.attentions, |
|
) |
|
|
|
|
|
@add_start_docstrings("""DeBERTa Model with a `language modeling` head on top.""", DEBERTA_START_DOCSTRING) |
|
class DebertaV2ForMaskedLM(DebertaV2PreTrainedModel): |
|
_tied_weights_keys = ["cls.predictions.decoder.weight", "cls.predictions.decoder.bias"] |
|
|
|
def __init__(self, config): |
|
super().__init__(config) |
|
|
|
self.deberta = DebertaV2Model(config) |
|
self.cls = DebertaV2OnlyMLMHead(config) |
|
|
|
self.verbose = False |
|
|
|
|
|
self.post_init() |
|
|
|
def get_output_embeddings(self): |
|
return self.cls.predictions.decoder |
|
|
|
def set_output_embeddings(self, new_embeddings): |
|
self.cls.predictions.decoder = new_embeddings |
|
|
|
def forward( |
|
self, |
|
input_ids: Optional[torch.Tensor] = None, |
|
attention_mask: Optional[torch.Tensor] = None, |
|
token_type_ids: Optional[torch.Tensor] = None, |
|
position_ids: Optional[torch.Tensor] = None, |
|
inputs_embeds: Optional[torch.Tensor] = None, |
|
labels: Optional[torch.Tensor] = None, |
|
output_attentions: Optional[bool] = None, |
|
output_hidden_states: Optional[bool] = None, |
|
return_dict: Optional[bool] = None, |
|
) -> Union[Tuple, MaskedLMOutput]: |
|
r""" |
|
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): |
|
Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ..., |
|
config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the |
|
loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]` |
|
""" |
|
|
|
return_dict = return_dict if return_dict is not None else self.config.use_return_dict |
|
|
|
if self.verbose: |
|
for i in input_ids[0, :].tolist(): |
|
print(i, end=", ") |
|
print() |
|
if attention_mask is not None: |
|
for i in attention_mask[0, :].tolist(): |
|
print(i, end=", ") |
|
print() |
|
if position_ids is not None: |
|
for i in position_ids[0, :].tolist(): |
|
print(i, end=", ") |
|
print() |
|
|
|
outputs = self.deberta( |
|
input_ids, |
|
attention_mask=attention_mask, |
|
token_type_ids=token_type_ids, |
|
position_ids=position_ids, |
|
inputs_embeds=inputs_embeds, |
|
output_attentions=output_attentions, |
|
output_hidden_states=output_hidden_states, |
|
return_dict=return_dict, |
|
) |
|
|
|
sequence_output = outputs[0] |
|
prediction_scores = self.cls(sequence_output) |
|
|
|
masked_lm_loss = None |
|
if labels is not None: |
|
loss_fct = CrossEntropyLoss() |
|
masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1)) |
|
|
|
if not return_dict: |
|
output = (prediction_scores,) + outputs[1:] |
|
return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output |
|
|
|
return MaskedLMOutput( |
|
loss=masked_lm_loss, |
|
logits=prediction_scores, |
|
hidden_states=outputs.hidden_states, |
|
attentions=outputs.attentions, |
|
) |
|
|
|
@add_start_docstrings("""DeBERTa Model with a `language modeling` head on top.""", DEBERTA_START_DOCSTRING) |
|
class DebertaV2ForCausalLM(DebertaV2ForMaskedLM): |
|
_tied_weights_keys = ["cls.predictions.decoder.weight", "cls.predictions.decoder.bias"] |
|
|
|
def __init__(self, config): |
|
super().__init__(config) |
|
config.is_decoder = True |
|
self.mask_token_id = config.mask_token_id |
|
self.sep_token_id = config.sep_token_id |
|
self.n_masks = 3 |
|
|
|
def set_decoder(self, decoder): |
|
self.deberta = decoder |
|
|
|
def get_decoder(self): |
|
return self.deberta |
|
|
|
def can_generate(self): |
|
return True |
|
|
|
def prepare_inputs_for_generation( |
|
self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, **kwargs |
|
): |
|
position_ids = kwargs.get("position_ids", None) |
|
|
|
if input_ids[0, -1] == 2: |
|
input_ids = input_ids[:, :-1] |
|
if attention_mask is not None: |
|
attention_mask = attention_mask[:, :-1] |
|
if position_ids is not None: |
|
position_ids = position_ids[:, :-1] |
|
|
|
|
|
if past_key_values is not None: |
|
pass |
|
|
|
if attention_mask is not None and position_ids is None: |
|
|
|
position_ids = attention_mask.long().cumsum(-1) - 1 |
|
position_ids.masked_fill_(attention_mask == 0, 1) |
|
if past_key_values: |
|
position_ids = position_ids[:, -input_ids.shape[1] :] |
|
|
|
|
|
if inputs_embeds is not None and past_key_values is None: |
|
model_inputs = {"inputs_embeds": inputs_embeds} |
|
else: |
|
model_inputs = {"input_ids": input_ids} |
|
|
|
model_inputs.update( |
|
{ |
|
"position_ids": position_ids, |
|
"past_key_values": past_key_values, |
|
"use_cache": kwargs.get("use_cache"), |
|
"attention_mask": attention_mask, |
|
} |
|
) |
|
return model_inputs |
|
|
|
def forward( |
|
self, |
|
input_ids: Optional[torch.Tensor] = None, |
|
attention_mask: Optional[torch.Tensor] = None, |
|
token_type_ids: Optional[torch.Tensor] = None, |
|
position_ids: Optional[torch.Tensor] = None, |
|
past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None, |
|
inputs_embeds: Optional[torch.Tensor] = None, |
|
labels: Optional[torch.Tensor] = None, |
|
use_cache: Optional[bool] = None, |
|
output_attentions: Optional[bool] = None, |
|
output_hidden_states: Optional[bool] = None, |
|
return_dict: Optional[bool] = None, |
|
) -> Union[Tuple, CausalLMOutput]: |
|
|
|
assert labels is None, "only inference is supported for now" |
|
assert inputs_embeds is None, "inputs_embeds is not supported for now" |
|
assert token_type_ids is None, "token_type_ids is not supported for now" |
|
assert past_key_values is None, "past_key_values is not supported for now" |
|
assert use_cache is None, "use_cache is not supported for now" |
|
|
|
assert input_ids[0, -1] != self.sep_token_id, "remove the last token if it is a sep token" |
|
|
|
batch_size, seq_length = input_ids.shape |
|
input_ids = torch.cat( |
|
[ |
|
input_ids, |
|
torch.full((batch_size, self.n_masks), self.mask_token_id, device=input_ids.device), |
|
torch.full((batch_size, 1), self.sep_token_id, device=input_ids.device) |
|
], |
|
dim=-1 |
|
) |
|
|
|
if attention_mask is not None: |
|
attention_mask = torch.cat( |
|
[ |
|
attention_mask, |
|
torch.full((batch_size, self.n_masks + 1), attention_mask[0, -1], device=attention_mask.device), |
|
], |
|
dim=-1 |
|
) |
|
|
|
if position_ids is not None: |
|
position_ids = torch.cat( |
|
[ |
|
position_ids, |
|
torch.arange(0, self.n_masks + 1, device=position_ids.device).unsqueeze(0) + position_ids[:, -1:], |
|
], |
|
dim=-1 |
|
) |
|
|
|
outputs = super().forward( |
|
input_ids, |
|
attention_mask=attention_mask, |
|
token_type_ids=token_type_ids, |
|
position_ids=position_ids, |
|
inputs_embeds=inputs_embeds, |
|
output_attentions=output_attentions, |
|
output_hidden_states=output_hidden_states, |
|
return_dict=return_dict, |
|
) |
|
|
|
|
|
logits = outputs.logits[:, 1:-(self.n_masks), :].contiguous() |
|
|
|
loss = None |
|
if labels is not None: |
|
pass |
|
|
|
if not return_dict: |
|
output = (logits,) + outputs[1:] |
|
return (loss,) + output if loss is not None else output |
|
|
|
return CausalLMOutput( |
|
loss=loss, |
|
logits=logits, |
|
hidden_states=outputs.hidden_states, |
|
attentions=outputs.attentions, |
|
) |
|
|
|
|
|
|
|
class DebertaV2PredictionHeadTransform(nn.Module): |
|
def __init__(self, config): |
|
super().__init__() |
|
self.embedding_size = getattr(config, "embedding_size", config.hidden_size) |
|
|
|
self.dense = nn.Linear(config.hidden_size, self.embedding_size) |
|
if isinstance(config.hidden_act, str): |
|
self.transform_act_fn = ACT2FN[config.hidden_act] |
|
else: |
|
self.transform_act_fn = config.hidden_act |
|
self.LayerNorm = nn.LayerNorm(self.embedding_size, eps=config.layer_norm_eps) |
|
|
|
def forward(self, hidden_states): |
|
hidden_states = self.dense(hidden_states) |
|
hidden_states = self.transform_act_fn(hidden_states) |
|
hidden_states = self.LayerNorm(hidden_states) |
|
return hidden_states |
|
|
|
|
|
|
|
class DebertaV2LMPredictionHead(nn.Module): |
|
def __init__(self, config): |
|
super().__init__() |
|
self.transform = DebertaV2PredictionHeadTransform(config) |
|
|
|
self.embedding_size = getattr(config, "embedding_size", config.hidden_size) |
|
|
|
|
|
self.decoder = nn.Linear(self.embedding_size, config.vocab_size, bias=True) |
|
|
|
|
|
|
|
|
|
|
|
|
|
def forward(self, hidden_states): |
|
hidden_states = self.transform(hidden_states) |
|
hidden_states = self.decoder(hidden_states) |
|
return hidden_states |
|
|
|
|
|
|
|
class DebertaV2OnlyMLMHead(nn.Module): |
|
def __init__(self, config): |
|
super().__init__() |
|
self.predictions = DebertaV2LMPredictionHead(config) |
|
|
|
def forward(self, sequence_output): |
|
prediction_scores = self.predictions(sequence_output) |
|
return prediction_scores |
|
|
|
|