Spaces:
Running
Running
""" | |
This file provides fine stage LETR definition | |
""" | |
import io | |
from collections import defaultdict | |
from typing import List, Optional | |
import torch | |
import torch.nn as nn | |
import torch.nn.functional as F | |
from torch import Tensor | |
from PIL import Image | |
from .misc import NestedTensor, nested_tensor_from_tensor_list | |
import copy | |
class LETRstack(nn.Module): | |
def __init__(self, letr, args): | |
super().__init__() | |
self.letr = letr | |
self.backbone = self.letr.backbone | |
if args.layer1_frozen: | |
# freeze backbone, encoder, decoder | |
for n, p in self.named_parameters(): | |
p.requires_grad_(False) | |
hidden_dim, nheads = letr.transformer.d_model, letr.transformer.nhead | |
# add new input proj layer | |
channel = [256, 512, 1024, 2048] | |
self.input_proj = nn.Conv2d(channel[args.layer2_num], hidden_dim, kernel_size=1) | |
# add new transformer encoder decoder | |
self.transformer = Transformer( d_model=args.second_hidden_dim, dropout=args.second_dropout, nhead=args.second_nheads, | |
dim_feedforward=args.second_dim_feedforward, num_encoder_layers=args.second_enc_layers, | |
num_decoder_layers=args.second_dec_layers, normalize_before=args.second_pre_norm, return_intermediate_dec=True,) | |
# output layer | |
self.class_embed = nn.Linear(hidden_dim, 1 + 1) | |
self.lines_embed = MLP(hidden_dim, hidden_dim, 4, 3) | |
self.aux_loss=args.aux_loss | |
self.args = args | |
def forward(self, samples, postprocessors=None, targets=None, criterion=None): | |
if isinstance(samples, (list, torch.Tensor)): | |
samples = nested_tensor_from_tensor_list(samples) | |
# backbone | |
features, pos = self.letr.backbone(samples) | |
# layer 1 | |
l1_num = self.args.layer1_num | |
src1, mask1 = features[l1_num].decompose() | |
assert mask1 is not None | |
# layer 1 transformer | |
hs1, _ = self.letr.transformer(self.letr.input_proj(src1), mask1, self.letr.query_embed.weight, pos[l1_num]) | |
# layer 2 | |
l2_num = self.args.layer2_num | |
src2, mask2 = features[l2_num].decompose() | |
src2 = self.input_proj(src2) | |
# layer 2 transformer | |
hs2, memory, _ = self.transformer(src2, mask2, hs1[-1], pos[l2_num]) | |
outputs_class = self.class_embed(hs2) | |
outputs_coord = self.lines_embed(hs2).sigmoid() | |
out = {} | |
out["pred_logits"] = outputs_class[-1] | |
out["pred_lines"] = outputs_coord[-1] | |
if self.aux_loss: | |
out['aux_outputs'] = self._set_aux_loss(outputs_class, outputs_coord) | |
return out, None | |
def _set_aux_loss(self, outputs_class, outputs_coord): | |
# this is a workaround to make torchscript happy, as torchscript | |
# doesn't support dictionary with non-homogeneous values, such | |
# as a dict having both a Tensor and a list. | |
return [{'pred_logits': a, 'pred_lines': b} | |
for a, b in zip(outputs_class[:-1], outputs_coord[:-1])] | |
def _set_aux_loss_POST(self, outputs_class, outputs_coord): | |
# this is a workaround to make torchscript happy, as torchscript | |
# doesn't support dictionary with non-homogeneous values, such | |
# as a dict having both a Tensor and a list. | |
return [{'POST_pred_lines': b} for b in outputs_coord[:-1]] | |
def _expand(tensor, length: int): | |
return tensor.unsqueeze(1).repeat(1, int(length), 1, 1, 1).flatten(0, 1) | |
class MLP(nn.Module): | |
""" Very simple multi-layer perceptron (also called FFN)""" | |
def __init__(self, input_dim, hidden_dim, output_dim, num_layers): | |
super().__init__() | |
self.num_layers = num_layers | |
h = [hidden_dim] * (num_layers - 1) | |
self.layers = nn.ModuleList(nn.Linear(n, k) for n, k in zip([input_dim] + h, h + [output_dim])) | |
def forward(self, x): | |
for i, layer in enumerate(self.layers): | |
x = F.relu(layer(x)) if i < self.num_layers - 1 else layer(x) | |
return x | |
class Transformer(nn.Module): | |
def __init__(self, d_model=512, nhead=8, num_encoder_layers=6, | |
num_decoder_layers=6, dim_feedforward=2048, dropout=0.1, | |
activation="relu", normalize_before=False, | |
return_intermediate_dec=False): | |
super().__init__() | |
encoder_layer = TransformerEncoderLayer(d_model, nhead, dim_feedforward, dropout, activation, normalize_before) | |
encoder_norm = nn.LayerNorm(d_model) if normalize_before else None | |
self.encoder = TransformerEncoder(encoder_layer, num_encoder_layers, encoder_norm) | |
decoder_layer = TransformerDecoderLayer(d_model, nhead, dim_feedforward, dropout, activation, normalize_before) | |
decoder_norm = nn.LayerNorm(d_model) | |
self.decoder = TransformerDecoder(decoder_layer, num_decoder_layers, decoder_norm, | |
return_intermediate=return_intermediate_dec) | |
self._reset_parameters() | |
self.d_model = d_model | |
self.nhead = nhead | |
def _reset_parameters(self): | |
for p in self.parameters(): | |
if p.dim() > 1: | |
nn.init.xavier_uniform_(p) | |
def forward(self, src, mask, query_embed, pos_embed): | |
# flatten NxCxHxW to HWxNxC | |
bs, c, h, w = src.shape | |
src = src.flatten(2).permute(2, 0, 1) | |
pos_embed = pos_embed.flatten(2).permute(2, 0, 1) | |
mask = mask.flatten(1) | |
query_embed = query_embed.permute(1, 0, 2) | |
tgt = torch.zeros_like(query_embed) | |
memory = self.encoder(src, src_key_padding_mask=mask, pos=pos_embed) | |
hs, attn_output_weights = self.decoder(tgt, memory, memory_key_padding_mask=mask, pos=pos_embed, query_pos=query_embed) | |
return hs.transpose(1, 2), memory, attn_output_weights | |
class TransformerEncoder(nn.Module): | |
def __init__(self, encoder_layer, num_layers, norm=None): | |
super().__init__() | |
self.layers = _get_clones(encoder_layer, num_layers) | |
self.num_layers = num_layers | |
self.norm = norm | |
def forward(self, src, | |
mask: Optional[Tensor] = None, | |
src_key_padding_mask: Optional[Tensor] = None, | |
pos: Optional[Tensor] = None): | |
output = src | |
for layer in self.layers: | |
output = layer(output, src_mask=mask, | |
src_key_padding_mask=src_key_padding_mask, pos=pos) | |
if self.norm is not None: | |
output = self.norm(output) | |
return output | |
class TransformerDecoder(nn.Module): | |
def __init__(self, decoder_layer, num_layers, norm=None, return_intermediate=False): | |
super().__init__() | |
self.layers = _get_clones(decoder_layer, num_layers) | |
self.num_layers = num_layers | |
self.norm = norm | |
self.return_intermediate = return_intermediate | |
def forward(self, tgt, memory, | |
tgt_mask: Optional[Tensor] = None, | |
memory_mask: Optional[Tensor] = None, | |
tgt_key_padding_mask: Optional[Tensor] = None, | |
memory_key_padding_mask: Optional[Tensor] = None, | |
pos: Optional[Tensor] = None, | |
query_pos: Optional[Tensor] = None): | |
output = tgt | |
intermediate = [] | |
attn_output_weights_list = [] | |
for layer in self.layers: | |
output, attn_output_weights = layer(output, memory, tgt_mask=tgt_mask, | |
memory_mask=memory_mask, | |
tgt_key_padding_mask=tgt_key_padding_mask, | |
memory_key_padding_mask=memory_key_padding_mask, | |
pos=pos, query_pos=query_pos) | |
if self.return_intermediate: | |
intermediate.append(self.norm(output)) | |
attn_output_weights_list.append(attn_output_weights) | |
if self.norm is not None: | |
output = self.norm(output) | |
if self.return_intermediate: | |
intermediate.pop() | |
intermediate.append(output) | |
if self.return_intermediate: | |
return torch.stack(intermediate), attn_output_weights_list | |
return output.unsqueeze(0), attn_output_weights | |
class TransformerEncoderLayer(nn.Module): | |
def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1, | |
activation="relu", normalize_before=False): | |
super().__init__() | |
self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout) | |
# Implementation of Feedforward model | |
self.linear1 = nn.Linear(d_model, dim_feedforward) | |
self.dropout = nn.Dropout(dropout) | |
self.linear2 = nn.Linear(dim_feedforward, d_model) | |
self.norm1 = nn.LayerNorm(d_model) | |
self.norm2 = nn.LayerNorm(d_model) | |
self.dropout1 = nn.Dropout(dropout) | |
self.dropout2 = nn.Dropout(dropout) | |
self.activation = _get_activation_fn(activation) | |
self.normalize_before = normalize_before | |
def with_pos_embed(self, tensor, pos: Optional[Tensor]): | |
return tensor if pos is None else tensor + pos | |
def forward_post(self, | |
src, | |
src_mask: Optional[Tensor] = None, | |
src_key_padding_mask: Optional[Tensor] = None, | |
pos: Optional[Tensor] = None): | |
q = k = self.with_pos_embed(src, pos) | |
src2 = self.self_attn(q, k, value=src, attn_mask=src_mask, | |
key_padding_mask=src_key_padding_mask)[0] | |
src = src + self.dropout1(src2) | |
src = self.norm1(src) | |
src2 = self.linear2(self.dropout(self.activation(self.linear1(src)))) | |
src = src + self.dropout2(src2) | |
src = self.norm2(src) | |
return src | |
def forward_pre(self, src, | |
src_mask: Optional[Tensor] = None, | |
src_key_padding_mask: Optional[Tensor] = None, | |
pos: Optional[Tensor] = None): | |
src2 = self.norm1(src) | |
q = k = self.with_pos_embed(src2, pos) | |
src2 = self.self_attn(q, k, value=src2, attn_mask=src_mask, | |
key_padding_mask=src_key_padding_mask)[0] | |
src = src + self.dropout1(src2) | |
src2 = self.norm2(src) | |
src2 = self.linear2(self.dropout(self.activation(self.linear1(src2)))) | |
src = src + self.dropout2(src2) | |
return src | |
def forward(self, src, | |
src_mask: Optional[Tensor] = None, | |
src_key_padding_mask: Optional[Tensor] = None, | |
pos: Optional[Tensor] = None): | |
if self.normalize_before: | |
return self.forward_pre(src, src_mask, src_key_padding_mask, pos) | |
return self.forward_post(src, src_mask, src_key_padding_mask, pos) | |
class TransformerDecoderLayer(nn.Module): | |
def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1, | |
activation="relu", normalize_before=False): | |
super().__init__() | |
self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout) | |
self.multihead_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout) | |
# Implementation of Feedforward model | |
self.linear1 = nn.Linear(d_model, dim_feedforward) | |
self.dropout = nn.Dropout(dropout) | |
self.linear2 = nn.Linear(dim_feedforward, d_model) | |
self.norm1 = nn.LayerNorm(d_model) | |
self.norm2 = nn.LayerNorm(d_model) | |
self.norm3 = nn.LayerNorm(d_model) | |
self.dropout1 = nn.Dropout(dropout) | |
self.dropout2 = nn.Dropout(dropout) | |
self.dropout3 = nn.Dropout(dropout) | |
self.activation = _get_activation_fn(activation) | |
self.normalize_before = normalize_before | |
def with_pos_embed(self, tensor, pos: Optional[Tensor]): | |
return tensor if pos is None else tensor + pos | |
def forward_post(self, tgt, memory, | |
tgt_mask: Optional[Tensor] = None, | |
memory_mask: Optional[Tensor] = None, | |
tgt_key_padding_mask: Optional[Tensor] = None, | |
memory_key_padding_mask: Optional[Tensor] = None, | |
pos: Optional[Tensor] = None, | |
query_pos: Optional[Tensor] = None): | |
q = k = self.with_pos_embed(tgt, query_pos) | |
tgt2 = self.self_attn(q, k, value=tgt, attn_mask=tgt_mask, | |
key_padding_mask=tgt_key_padding_mask)[0] | |
tgt = tgt + self.dropout1(tgt2) | |
tgt = self.norm1(tgt) | |
tgt2, attn_output_weights = self.multihead_attn(query=self.with_pos_embed(tgt, query_pos), | |
key=self.with_pos_embed(memory, pos), | |
value=memory, attn_mask=memory_mask, | |
key_padding_mask=memory_key_padding_mask) | |
tgt = tgt + self.dropout2(tgt2) | |
tgt = self.norm2(tgt) | |
tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt)))) | |
tgt = tgt + self.dropout3(tgt2) | |
tgt = self.norm3(tgt) | |
return tgt, attn_output_weights | |
def forward_pre(self, tgt, memory, | |
tgt_mask: Optional[Tensor] = None, | |
memory_mask: Optional[Tensor] = None, | |
tgt_key_padding_mask: Optional[Tensor] = None, | |
memory_key_padding_mask: Optional[Tensor] = None, | |
pos: Optional[Tensor] = None, | |
query_pos: Optional[Tensor] = None): | |
tgt2 = self.norm1(tgt) | |
q = k = self.with_pos_embed(tgt2, query_pos) | |
tgt2 = self.self_attn(q, k, value=tgt2, attn_mask=tgt_mask, | |
key_padding_mask=tgt_key_padding_mask)[0] | |
tgt = tgt + self.dropout1(tgt2) | |
tgt2 = self.norm2(tgt) | |
tgt2 = self.multihead_attn(query=self.with_pos_embed(tgt2, query_pos), | |
key=self.with_pos_embed(memory, pos), | |
value=memory, attn_mask=memory_mask, | |
key_padding_mask=memory_key_padding_mask)[0] | |
tgt = tgt + self.dropout2(tgt2) | |
tgt2 = self.norm3(tgt) | |
tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt2)))) | |
tgt = tgt + self.dropout3(tgt2) | |
return tgt | |
def forward(self, tgt, memory, | |
tgt_mask: Optional[Tensor] = None, | |
memory_mask: Optional[Tensor] = None, | |
tgt_key_padding_mask: Optional[Tensor] = None, | |
memory_key_padding_mask: Optional[Tensor] = None, | |
pos: Optional[Tensor] = None, | |
query_pos: Optional[Tensor] = None): | |
if self.normalize_before: | |
return self.forward_pre(tgt, memory, tgt_mask, memory_mask, | |
tgt_key_padding_mask, memory_key_padding_mask, pos, query_pos) | |
return self.forward_post(tgt, memory, tgt_mask, memory_mask, | |
tgt_key_padding_mask, memory_key_padding_mask, pos, query_pos) | |
def _get_clones(module, N): | |
return nn.ModuleList([copy.deepcopy(module) for i in range(N)]) | |
def _get_activation_fn(activation): | |
"""Return an activation function given a string""" | |
if activation == "relu": | |
return F.relu | |
if activation == "gelu": | |
return F.gelu | |
if activation == "glu": | |
return F.glu | |
raise RuntimeError(F"activation should be relu/gelu, not {activation}.") | |