|
import torch |
|
import torch.nn as nn |
|
import torch.nn.init as init |
|
import torch.nn.functional as F |
|
from torch.nn import Transformer |
|
|
|
import math |
|
import numpy as np |
|
|
|
from helpers import * |
|
from torch import Tensor |
|
from models.PhonemeTransformer import ( |
|
PositionalEncoding, TokenEmbedding |
|
) |
|
|
|
|
|
class LipNetPlus(torch.nn.Module): |
|
def __init__( |
|
self, output_classes, dropout_p=0.0, pre_gru_repeats=0, |
|
gru_output_size=512, embeds_size=256, |
|
output_vocab_size=512, dropout_t=0.1, |
|
src_vocab_size=4, num_encoder_layers: int = 3, |
|
num_decoder_layers: int = 3, nhead: int = 8, |
|
dim_feedforward: int = 512, |
|
): |
|
super(LipNetPlus, self).__init__() |
|
assert gru_output_size % 2 == 0 |
|
self.pre_gru_repeats = pre_gru_repeats |
|
self.gru_out_size = gru_output_size |
|
self.gru_hidden_size = gru_output_size // 2 |
|
self.embeds_size = embeds_size |
|
|
|
self.output_vocab_size = output_vocab_size |
|
self.gru_output_size = gru_output_size |
|
self.dropout_t = dropout_t |
|
|
|
self.conv1 = nn.Conv3d(3, 32, (3, 5, 5), (1, 2, 2), (1, 2, 2)) |
|
self.pool1 = nn.MaxPool3d((1, 2, 2), (1, 2, 2)) |
|
|
|
self.conv2 = nn.Conv3d(32, 64, (3, 5, 5), (1, 1, 1), (1, 2, 2)) |
|
self.pool2 = nn.MaxPool3d((1, 2, 2), (1, 2, 2)) |
|
|
|
self.conv3 = nn.Conv3d(64, 96, (3, 3, 3), (1, 1, 1), (1, 1, 1)) |
|
self.pool3 = nn.MaxPool3d((1, 2, 2), (1, 2, 2)) |
|
|
|
self.gru1 = nn.GRU( |
|
96 * 4 * 8, self.gru_hidden_size, 1, bidirectional=True |
|
) |
|
self.gru2 = nn.GRU( |
|
self.gru_output_size, self.gru_hidden_size, 1, bidirectional=True |
|
) |
|
|
|
self.output_classes = output_classes |
|
self.FC = nn.Linear(self.gru_output_size, output_classes + 1) |
|
self.dropout_p = dropout_p |
|
|
|
self.relu = nn.ReLU(inplace=True) |
|
self.dropout = nn.Dropout(self.dropout_p) |
|
self.dropout3d = nn.Dropout3d(self.dropout_p) |
|
|
|
self.src_tok_emb = TokenEmbedding( |
|
src_vocab_size, self.embeds_size |
|
) |
|
self.tgt_tok_emb = TokenEmbedding( |
|
output_vocab_size, self.embeds_size |
|
) |
|
|
|
self.embeds_layer = nn.Linear( |
|
self.gru_output_size, self.embeds_size |
|
) |
|
self.transformer = Transformer( |
|
d_model=self.embeds_size, nhead=nhead, |
|
num_encoder_layers=num_encoder_layers, |
|
num_decoder_layers=num_decoder_layers, |
|
dim_feedforward=dim_feedforward, |
|
dropout=dropout_t |
|
) |
|
self.positional_encoding = PositionalEncoding( |
|
self.embeds_size, dropout=self.dropout_t |
|
) |
|
self.generator = nn.Linear( |
|
self.embeds_size, self.output_vocab_size |
|
) |
|
self._init() |
|
|
|
def _init(self): |
|
init.kaiming_normal_(self.conv1.weight, nonlinearity='relu') |
|
init.constant_(self.conv1.bias, 0) |
|
|
|
init.kaiming_normal_(self.conv2.weight, nonlinearity='relu') |
|
init.constant_(self.conv2.bias, 0) |
|
|
|
init.kaiming_normal_(self.conv3.weight, nonlinearity='relu') |
|
init.constant_(self.conv3.bias, 0) |
|
|
|
init.kaiming_normal_(self.FC.weight, nonlinearity='sigmoid') |
|
init.constant_(self.FC.bias, 0) |
|
|
|
transformer_components = [ |
|
self.transformer, self.generator, |
|
self.positional_encoding |
|
] |
|
|
|
for component in transformer_components: |
|
for p in component.parameters(): |
|
if p.dim() > 1: |
|
nn.init.xavier_uniform_(p) |
|
|
|
for m in (self.gru1, self.gru2): |
|
stdv = math.sqrt(2 / (96 * 3 * 6 + 256)) |
|
|
|
for i in range(0, 256 * 3, 256): |
|
init.uniform_(m.weight_ih_l0[i: i + 256], |
|
-math.sqrt(3) * stdv, math.sqrt(3) * stdv) |
|
init.orthogonal_(m.weight_hh_l0[i: i + 256]) |
|
init.constant_(m.bias_ih_l0[i: i + 256], 0) |
|
init.uniform_(m.weight_ih_l0_reverse[i: i + 256], |
|
-math.sqrt(3) * stdv, math.sqrt(3) * stdv) |
|
init.orthogonal_(m.weight_hh_l0_reverse[i: i + 256]) |
|
init.constant_(m.bias_ih_l0_reverse[i: i + 256], 0) |
|
|
|
def forward_gru(self, x): |
|
x = self.conv1(x) |
|
x = self.relu(x) |
|
x = self.dropout3d(x) |
|
x = self.pool1(x) |
|
|
|
x = self.conv2(x) |
|
x = self.relu(x) |
|
x = self.dropout3d(x) |
|
x = self.pool2(x) |
|
|
|
x = self.conv3(x) |
|
x = self.relu(x) |
|
x = self.dropout3d(x) |
|
x = self.pool3(x) |
|
|
|
|
|
x = x.permute(2, 0, 1, 3, 4).contiguous() |
|
|
|
x = x.view(x.size(0), x.size(1), -1) |
|
|
|
self.gru1.flatten_parameters() |
|
self.gru2.flatten_parameters() |
|
|
|
if self.pre_gru_repeats > 1: |
|
x = torch.repeat_interleave( |
|
x, dim=0, repeats=self.pre_gru_repeats |
|
) |
|
|
|
x, h = self.gru1(x) |
|
x = self.dropout(x) |
|
x, h = self.gru2(x) |
|
x = self.dropout(x) |
|
return x |
|
|
|
def predict_from_gru_out(self, x): |
|
x = self.FC(x) |
|
x = x.permute(1, 0, 2).contiguous() |
|
|
|
return x |
|
|
|
def forward(self, x): |
|
x = self.forward_gru(x) |
|
x = self.predict_from_gru_out(x) |
|
return x |
|
|
|
def make_src_embeds(self, x): |
|
x = self.embeds_layer(x) |
|
x = self.relu(x) |
|
return x |
|
|
|
def seq_forward( |
|
self, src_embeds: Tensor, trg: Tensor, |
|
src_mask: Tensor, tgt_mask: Tensor, src_padding_mask: Tensor, |
|
tgt_padding_mask: Tensor, memory_key_padding_mask: Tensor |
|
): |
|
src_emb = self.positional_encoding(src_embeds) |
|
tgt_emb = self.positional_encoding(self.tgt_tok_emb(trg)) |
|
|
|
outs = self.transformer( |
|
src_emb, tgt_emb, src_mask, tgt_mask, None, |
|
src_padding_mask, tgt_padding_mask, memory_key_padding_mask |
|
) |
|
return self.generator(outs) |
|
|