|
""" |
|
Code modified from DETR tranformer: |
|
https://github.com/facebookresearch/detr |
|
Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved |
|
""" |
|
|
|
import copy |
|
from typing import Optional, List |
|
import pickle as cp |
|
|
|
import torch |
|
import torch.nn.functional as F |
|
from torch import nn, Tensor |
|
|
|
|
|
class TransformerDecoder(nn.Module): |
|
def __init__(self, decoder_layer, num_layers, norm=None, return_intermediate=False): |
|
super().__init__() |
|
self.layers = _get_clones(decoder_layer, num_layers) |
|
self.num_layers = num_layers |
|
self.norm = norm |
|
self.return_intermediate = return_intermediate |
|
|
|
def forward( |
|
self, |
|
tgt, |
|
memory, |
|
tgt_mask: Optional[Tensor] = None, |
|
memory_mask: Optional[Tensor] = None, |
|
tgt_key_padding_mask: Optional[Tensor] = None, |
|
memory_key_padding_mask: Optional[Tensor] = None, |
|
pos: Optional[Tensor] = None, |
|
query_pos: Optional[Tensor] = None, |
|
): |
|
output = tgt |
|
T, B, C = memory.shape |
|
intermediate = [] |
|
atten_layers = [] |
|
for n, layer in enumerate(self.layers): |
|
|
|
residual = True |
|
output, ws = layer( |
|
output, |
|
memory, |
|
tgt_mask=tgt_mask, |
|
memory_mask=memory_mask, |
|
tgt_key_padding_mask=tgt_key_padding_mask, |
|
memory_key_padding_mask=memory_key_padding_mask, |
|
pos=pos, |
|
query_pos=query_pos, |
|
residual=residual, |
|
) |
|
atten_layers.append(ws) |
|
if self.return_intermediate: |
|
intermediate.append(self.norm(output)) |
|
if self.norm is not None: |
|
output = self.norm(output) |
|
if self.return_intermediate: |
|
intermediate.pop() |
|
intermediate.append(output) |
|
|
|
if self.return_intermediate: |
|
return torch.stack(intermediate) |
|
return output, atten_layers |
|
|
|
|
|
class TransformerDecoderLayer(nn.Module): |
|
def __init__( |
|
self, |
|
d_model, |
|
nhead, |
|
dim_feedforward=2048, |
|
dropout=0.1, |
|
activation="relu", |
|
normalize_before=False, |
|
): |
|
super().__init__() |
|
self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout) |
|
self.multihead_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout) |
|
|
|
self.linear1 = nn.Linear(d_model, dim_feedforward) |
|
self.dropout = nn.Dropout(dropout) |
|
self.linear2 = nn.Linear(dim_feedforward, d_model) |
|
|
|
self.norm1 = nn.LayerNorm(d_model) |
|
self.norm2 = nn.LayerNorm(d_model) |
|
self.norm3 = nn.LayerNorm(d_model) |
|
self.dropout1 = nn.Dropout(dropout) |
|
self.dropout2 = nn.Dropout(dropout) |
|
self.dropout3 = nn.Dropout(dropout) |
|
|
|
self.activation = _get_activation_fn(activation) |
|
self.normalize_before = normalize_before |
|
|
|
def with_pos_embed(self, tensor, pos: Optional[Tensor]): |
|
return tensor if pos is None else tensor + pos |
|
|
|
def forward_post( |
|
self, |
|
tgt, |
|
memory, |
|
tgt_mask: Optional[Tensor] = None, |
|
memory_mask: Optional[Tensor] = None, |
|
tgt_key_padding_mask: Optional[Tensor] = None, |
|
memory_key_padding_mask: Optional[Tensor] = None, |
|
pos: Optional[Tensor] = None, |
|
query_pos: Optional[Tensor] = None, |
|
residual=True, |
|
): |
|
q = k = self.with_pos_embed(tgt, query_pos) |
|
tgt2, ws = self.self_attn( |
|
q, k, value=tgt, attn_mask=tgt_mask, key_padding_mask=tgt_key_padding_mask |
|
) |
|
tgt = self.norm1(tgt) |
|
tgt2, ws = self.multihead_attn( |
|
query=self.with_pos_embed(tgt, query_pos), |
|
key=self.with_pos_embed(memory, pos), |
|
value=memory, |
|
attn_mask=memory_mask, |
|
key_padding_mask=memory_key_padding_mask, |
|
) |
|
|
|
|
|
tgt = tgt + self.dropout2(tgt2) |
|
tgt = self.norm2(tgt) |
|
tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt)))) |
|
tgt = tgt + self.dropout3(tgt2) |
|
tgt = self.norm3(tgt) |
|
return tgt, ws |
|
|
|
def forward_pre( |
|
self, |
|
tgt, |
|
memory, |
|
tgt_mask: Optional[Tensor] = None, |
|
memory_mask: Optional[Tensor] = None, |
|
tgt_key_padding_mask: Optional[Tensor] = None, |
|
memory_key_padding_mask: Optional[Tensor] = None, |
|
pos: Optional[Tensor] = None, |
|
query_pos: Optional[Tensor] = None, |
|
): |
|
tgt2 = self.norm1(tgt) |
|
q = k = self.with_pos_embed(tgt2, query_pos) |
|
tgt2, ws = self.self_attn( |
|
q, k, value=tgt2, attn_mask=tgt_mask, key_padding_mask=tgt_key_padding_mask |
|
) |
|
tgt = tgt + self.dropout1(tgt2) |
|
tgt2 = self.norm2(tgt) |
|
tgt2, attn_weights = self.multihead_attn( |
|
query=self.with_pos_embed(tgt2, query_pos), |
|
key=self.with_pos_embed(memory, pos), |
|
value=memory, |
|
attn_mask=memory_mask, |
|
key_padding_mask=memory_key_padding_mask, |
|
) |
|
tgt = tgt + self.dropout2(tgt2) |
|
tgt2 = self.norm3(tgt) |
|
tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt2)))) |
|
tgt = tgt + self.dropout3(tgt2) |
|
return tgt, attn_weights |
|
|
|
def forward( |
|
self, |
|
tgt, |
|
memory, |
|
tgt_mask: Optional[Tensor] = None, |
|
memory_mask: Optional[Tensor] = None, |
|
tgt_key_padding_mask: Optional[Tensor] = None, |
|
memory_key_padding_mask: Optional[Tensor] = None, |
|
pos: Optional[Tensor] = None, |
|
query_pos: Optional[Tensor] = None, |
|
residual=True, |
|
): |
|
if self.normalize_before: |
|
return self.forward_pre( |
|
tgt, |
|
memory, |
|
tgt_mask, |
|
memory_mask, |
|
tgt_key_padding_mask, |
|
memory_key_padding_mask, |
|
pos, |
|
query_pos, |
|
) |
|
return self.forward_post( |
|
tgt, |
|
memory, |
|
tgt_mask, |
|
memory_mask, |
|
tgt_key_padding_mask, |
|
memory_key_padding_mask, |
|
pos, |
|
query_pos, |
|
residual, |
|
) |
|
|
|
|
|
def _get_clones(module, N): |
|
return nn.ModuleList([copy.deepcopy(module) for i in range(N)]) |
|
|
|
|
|
def _get_activation_fn(activation): |
|
"""Return an activation function given a string""" |
|
if activation == "relu": |
|
return F.relu |
|
if activation == "gelu": |
|
return F.gelu |
|
if activation == "glu": |
|
return F.glu |
|
raise RuntimeError(f"activation should be relu/gelu, not {activation}.") |
|
|