|
import math |
|
import torch |
|
from torch import nn |
|
from torch.nn import Parameter |
|
import torch.onnx.operators |
|
import torch.nn.functional as F |
|
from collections import defaultdict |
|
|
|
|
|
def make_positions(tensor, padding_idx): |
|
"""Replace non-padding symbols with their position numbers. |
|
|
|
Position numbers begin at padding_idx+1. Padding symbols are ignored. |
|
""" |
|
|
|
|
|
|
|
|
|
mask = tensor.ne(padding_idx).int() |
|
return ( |
|
torch.cumsum(mask, dim=1).type_as(mask) * mask |
|
).long() + padding_idx |
|
|
|
|
|
def softmax(x, dim): |
|
return F.softmax(x, dim=dim, dtype=torch.float32) |
|
|
|
|
|
INCREMENTAL_STATE_INSTANCE_ID = defaultdict(lambda: 0) |
|
|
|
def _get_full_incremental_state_key(module_instance, key): |
|
module_name = module_instance.__class__.__name__ |
|
|
|
|
|
|
|
if not hasattr(module_instance, '_instance_id'): |
|
INCREMENTAL_STATE_INSTANCE_ID[module_name] += 1 |
|
module_instance._instance_id = INCREMENTAL_STATE_INSTANCE_ID[module_name] |
|
|
|
return '{}.{}.{}'.format(module_name, module_instance._instance_id, key) |
|
|
|
|
|
|
|
def get_incremental_state(module, incremental_state, key): |
|
"""Helper for getting incremental state for an nn.Module.""" |
|
full_key = _get_full_incremental_state_key(module, key) |
|
if incremental_state is None or full_key not in incremental_state: |
|
return None |
|
return incremental_state[full_key] |
|
|
|
|
|
def set_incremental_state(module, incremental_state, key, value): |
|
"""Helper for setting incremental state for an nn.Module.""" |
|
if incremental_state is not None: |
|
full_key = _get_full_incremental_state_key(module, key) |
|
incremental_state[full_key] = value |
|
|
|
|
|
|
|
class Reshape(nn.Module): |
|
def __init__(self, *args): |
|
super(Reshape, self).__init__() |
|
self.shape = args |
|
|
|
def forward(self, x): |
|
return x.view(self.shape) |
|
|
|
|
|
class Permute(nn.Module): |
|
def __init__(self, *args): |
|
super(Permute, self).__init__() |
|
self.args = args |
|
|
|
def forward(self, x): |
|
return x.permute(self.args) |
|
|
|
|
|
class LinearNorm(torch.nn.Module): |
|
def __init__(self, in_dim, out_dim, bias=True, w_init_gain='linear'): |
|
super(LinearNorm, self).__init__() |
|
self.linear_layer = torch.nn.Linear(in_dim, out_dim, bias=bias) |
|
|
|
torch.nn.init.xavier_uniform_( |
|
self.linear_layer.weight, |
|
gain=torch.nn.init.calculate_gain(w_init_gain)) |
|
|
|
def forward(self, x): |
|
return self.linear_layer(x) |
|
|
|
|
|
class ConvNorm(torch.nn.Module): |
|
def __init__(self, in_channels, out_channels, kernel_size=1, stride=1, |
|
padding=None, dilation=1, bias=True, w_init_gain='linear'): |
|
super(ConvNorm, self).__init__() |
|
if padding is None: |
|
assert (kernel_size % 2 == 1) |
|
padding = int(dilation * (kernel_size - 1) / 2) |
|
|
|
self.conv = torch.nn.Conv1d(in_channels, out_channels, |
|
kernel_size=kernel_size, stride=stride, |
|
padding=padding, dilation=dilation, |
|
bias=bias) |
|
|
|
torch.nn.init.xavier_uniform_( |
|
self.conv.weight, gain=torch.nn.init.calculate_gain(w_init_gain)) |
|
|
|
def forward(self, signal): |
|
conv_signal = self.conv(signal) |
|
return conv_signal |
|
|
|
|
|
def Embedding(num_embeddings, embedding_dim, padding_idx=None): |
|
m = nn.Embedding(num_embeddings, embedding_dim, padding_idx=padding_idx) |
|
nn.init.normal_(m.weight, mean=0, std=embedding_dim ** -0.5) |
|
if padding_idx is not None: |
|
nn.init.constant_(m.weight[padding_idx], 0) |
|
return m |
|
|
|
|
|
class GroupNorm1DTBC(nn.GroupNorm): |
|
def forward(self, input): |
|
return super(GroupNorm1DTBC, self).forward(input.permute(1, 2, 0)).permute(2, 0, 1) |
|
|
|
|
|
def LayerNorm(normalized_shape, eps=1e-5, elementwise_affine=True, export=False): |
|
if not export and torch.cuda.is_available(): |
|
try: |
|
from apex.normalization import FusedLayerNorm |
|
return FusedLayerNorm(normalized_shape, eps, elementwise_affine) |
|
except ImportError: |
|
pass |
|
return torch.nn.LayerNorm(normalized_shape, eps, elementwise_affine) |
|
|
|
|
|
def Linear(in_features, out_features, bias=True): |
|
m = nn.Linear(in_features, out_features, bias) |
|
nn.init.xavier_uniform_(m.weight) |
|
if bias: |
|
nn.init.constant_(m.bias, 0.) |
|
return m |
|
|
|
|
|
class SinusoidalPositionalEmbedding(nn.Module): |
|
"""This module produces sinusoidal positional embeddings of any length. |
|
|
|
Padding symbols are ignored. |
|
""" |
|
|
|
def __init__(self, embedding_dim, padding_idx, init_size=1024): |
|
super().__init__() |
|
self.embedding_dim = embedding_dim |
|
self.padding_idx = padding_idx |
|
self.weights = SinusoidalPositionalEmbedding.get_embedding( |
|
init_size, |
|
embedding_dim, |
|
padding_idx, |
|
) |
|
self.register_buffer('_float_tensor', torch.FloatTensor(1)) |
|
|
|
@staticmethod |
|
def get_embedding(num_embeddings, embedding_dim, padding_idx=None): |
|
"""Build sinusoidal embeddings. |
|
|
|
This matches the implementation in tensor2tensor, but differs slightly |
|
from the description in Section 3.5 of "Attention Is All You Need". |
|
""" |
|
half_dim = embedding_dim // 2 |
|
emb = math.log(10000) / (half_dim - 1) |
|
emb = torch.exp(torch.arange(half_dim, dtype=torch.float) * -emb) |
|
emb = torch.arange(num_embeddings, dtype=torch.float).unsqueeze(1) * emb.unsqueeze(0) |
|
emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1).view(num_embeddings, -1) |
|
if embedding_dim % 2 == 1: |
|
|
|
emb = torch.cat([emb, torch.zeros(num_embeddings, 1)], dim=1) |
|
if padding_idx is not None: |
|
emb[padding_idx, :] = 0 |
|
return emb |
|
|
|
def forward(self, input, incremental_state=None, timestep=None, positions=None, **kwargs): |
|
"""Input is expected to be of size [bsz x seqlen].""" |
|
bsz, seq_len = input.shape[:2] |
|
max_pos = self.padding_idx + 1 + seq_len |
|
if self.weights is None or max_pos > self.weights.size(0): |
|
|
|
self.weights = SinusoidalPositionalEmbedding.get_embedding( |
|
max_pos, |
|
self.embedding_dim, |
|
self.padding_idx, |
|
) |
|
self.weights = self.weights.to(self._float_tensor) |
|
|
|
if incremental_state is not None: |
|
|
|
pos = timestep.view(-1)[0] + 1 if timestep is not None else seq_len |
|
return self.weights[self.padding_idx + pos, :].expand(bsz, 1, -1) |
|
|
|
positions = make_positions(input, self.padding_idx) if positions is None else positions |
|
return self.weights.index_select(0, positions.view(-1)).view(bsz, seq_len, -1).detach() |
|
|
|
def max_positions(self): |
|
"""Maximum number of supported positions.""" |
|
return int(1e5) |
|
|
|
|
|
class ConvTBC(nn.Module): |
|
def __init__(self, in_channels, out_channels, kernel_size, padding=0): |
|
super(ConvTBC, self).__init__() |
|
self.in_channels = in_channels |
|
self.out_channels = out_channels |
|
self.kernel_size = kernel_size |
|
self.padding = padding |
|
|
|
self.weight = torch.nn.Parameter(torch.Tensor( |
|
self.kernel_size, in_channels, out_channels)) |
|
self.bias = torch.nn.Parameter(torch.Tensor(out_channels)) |
|
|
|
def forward(self, input): |
|
return torch.conv_tbc(input.contiguous(), self.weight, self.bias, self.padding) |
|
|
|
|
|
class MultiheadAttention(nn.Module): |
|
def __init__(self, embed_dim, num_heads, kdim=None, vdim=None, dropout=0., bias=True, |
|
add_bias_kv=False, add_zero_attn=False, self_attention=False, |
|
encoder_decoder_attention=False): |
|
super().__init__() |
|
self.embed_dim = embed_dim |
|
self.kdim = kdim if kdim is not None else embed_dim |
|
self.vdim = vdim if vdim is not None else embed_dim |
|
self.qkv_same_dim = self.kdim == embed_dim and self.vdim == embed_dim |
|
|
|
self.num_heads = num_heads |
|
self.dropout = dropout |
|
self.head_dim = embed_dim // num_heads |
|
assert self.head_dim * num_heads == self.embed_dim, "embed_dim must be divisible by num_heads" |
|
self.scaling = self.head_dim ** -0.5 |
|
|
|
self.self_attention = self_attention |
|
self.encoder_decoder_attention = encoder_decoder_attention |
|
|
|
assert not self.self_attention or self.qkv_same_dim, 'Self-attention requires query, key and ' \ |
|
'value to be of the same size' |
|
|
|
if self.qkv_same_dim: |
|
self.in_proj_weight = Parameter(torch.Tensor(3 * embed_dim, embed_dim)) |
|
else: |
|
self.k_proj_weight = Parameter(torch.Tensor(embed_dim, self.kdim)) |
|
self.v_proj_weight = Parameter(torch.Tensor(embed_dim, self.vdim)) |
|
self.q_proj_weight = Parameter(torch.Tensor(embed_dim, embed_dim)) |
|
|
|
if bias: |
|
self.in_proj_bias = Parameter(torch.Tensor(3 * embed_dim)) |
|
else: |
|
self.register_parameter('in_proj_bias', None) |
|
|
|
self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias) |
|
|
|
if add_bias_kv: |
|
self.bias_k = Parameter(torch.Tensor(1, 1, embed_dim)) |
|
self.bias_v = Parameter(torch.Tensor(1, 1, embed_dim)) |
|
else: |
|
self.bias_k = self.bias_v = None |
|
|
|
self.add_zero_attn = add_zero_attn |
|
|
|
self.reset_parameters() |
|
|
|
self.enable_torch_version = False |
|
if hasattr(F, "multi_head_attention_forward"): |
|
self.enable_torch_version = True |
|
else: |
|
self.enable_torch_version = False |
|
self.last_attn_probs = None |
|
|
|
def reset_parameters(self): |
|
if self.qkv_same_dim: |
|
nn.init.xavier_uniform_(self.in_proj_weight) |
|
else: |
|
nn.init.xavier_uniform_(self.k_proj_weight) |
|
nn.init.xavier_uniform_(self.v_proj_weight) |
|
nn.init.xavier_uniform_(self.q_proj_weight) |
|
|
|
nn.init.xavier_uniform_(self.out_proj.weight) |
|
if self.in_proj_bias is not None: |
|
nn.init.constant_(self.in_proj_bias, 0.) |
|
nn.init.constant_(self.out_proj.bias, 0.) |
|
if self.bias_k is not None: |
|
nn.init.xavier_normal_(self.bias_k) |
|
if self.bias_v is not None: |
|
nn.init.xavier_normal_(self.bias_v) |
|
|
|
def forward( |
|
self, |
|
query, key, value, |
|
key_padding_mask=None, |
|
incremental_state=None, |
|
need_weights=True, |
|
static_kv=False, |
|
attn_mask=None, |
|
before_softmax=False, |
|
need_head_weights=False, |
|
enc_dec_attn_constraint_mask=None, |
|
reset_attn_weight=None |
|
): |
|
"""Input shape: Time x Batch x Channel |
|
|
|
Args: |
|
key_padding_mask (ByteTensor, optional): mask to exclude |
|
keys that are pads, of shape `(batch, src_len)`, where |
|
padding elements are indicated by 1s. |
|
need_weights (bool, optional): return the attention weights, |
|
averaged over heads (default: False). |
|
attn_mask (ByteTensor, optional): typically used to |
|
implement causal attention, where the mask prevents the |
|
attention from looking forward in time (default: None). |
|
before_softmax (bool, optional): return the raw attention |
|
weights and values before the attention softmax. |
|
need_head_weights (bool, optional): return the attention |
|
weights for each head. Implies *need_weights*. Default: |
|
return the average attention weights over all heads. |
|
""" |
|
if need_head_weights: |
|
need_weights = True |
|
|
|
tgt_len, bsz, embed_dim = query.size() |
|
assert embed_dim == self.embed_dim |
|
assert list(query.size()) == [tgt_len, bsz, embed_dim] |
|
if self.enable_torch_version and incremental_state is None and not static_kv and reset_attn_weight is None: |
|
if self.qkv_same_dim: |
|
return F.multi_head_attention_forward(query, key, value, |
|
self.embed_dim, self.num_heads, |
|
self.in_proj_weight, |
|
self.in_proj_bias, self.bias_k, self.bias_v, |
|
self.add_zero_attn, self.dropout, |
|
self.out_proj.weight, self.out_proj.bias, |
|
self.training, key_padding_mask, need_weights, |
|
attn_mask) |
|
else: |
|
return F.multi_head_attention_forward(query, key, value, |
|
self.embed_dim, self.num_heads, |
|
torch.empty([0]), |
|
self.in_proj_bias, self.bias_k, self.bias_v, |
|
self.add_zero_attn, self.dropout, |
|
self.out_proj.weight, self.out_proj.bias, |
|
self.training, key_padding_mask, need_weights, |
|
attn_mask, use_separate_proj_weight=True, |
|
q_proj_weight=self.q_proj_weight, |
|
k_proj_weight=self.k_proj_weight, |
|
v_proj_weight=self.v_proj_weight) |
|
|
|
if incremental_state is not None: |
|
saved_state = self._get_input_buffer(incremental_state) |
|
if 'prev_key' in saved_state: |
|
|
|
|
|
if static_kv: |
|
assert self.encoder_decoder_attention and not self.self_attention |
|
key = value = None |
|
else: |
|
saved_state = None |
|
|
|
if self.self_attention: |
|
|
|
q, k, v = self.in_proj_qkv(query) |
|
elif self.encoder_decoder_attention: |
|
|
|
q = self.in_proj_q(query) |
|
if key is None: |
|
assert value is None |
|
k = v = None |
|
else: |
|
k = self.in_proj_k(key) |
|
v = self.in_proj_v(key) |
|
|
|
else: |
|
q = self.in_proj_q(query) |
|
k = self.in_proj_k(key) |
|
v = self.in_proj_v(value) |
|
q *= self.scaling |
|
|
|
if self.bias_k is not None: |
|
assert self.bias_v is not None |
|
k = torch.cat([k, self.bias_k.repeat(1, bsz, 1)]) |
|
v = torch.cat([v, self.bias_v.repeat(1, bsz, 1)]) |
|
if attn_mask is not None: |
|
attn_mask = torch.cat([attn_mask, attn_mask.new_zeros(attn_mask.size(0), 1)], dim=1) |
|
if key_padding_mask is not None: |
|
key_padding_mask = torch.cat( |
|
[key_padding_mask, key_padding_mask.new_zeros(key_padding_mask.size(0), 1)], dim=1) |
|
|
|
q = q.contiguous().view(tgt_len, bsz * self.num_heads, self.head_dim).transpose(0, 1) |
|
if k is not None: |
|
k = k.contiguous().view(-1, bsz * self.num_heads, self.head_dim).transpose(0, 1) |
|
if v is not None: |
|
v = v.contiguous().view(-1, bsz * self.num_heads, self.head_dim).transpose(0, 1) |
|
|
|
if saved_state is not None: |
|
|
|
if 'prev_key' in saved_state: |
|
prev_key = saved_state['prev_key'].view(bsz * self.num_heads, -1, self.head_dim) |
|
if static_kv: |
|
k = prev_key |
|
else: |
|
k = torch.cat((prev_key, k), dim=1) |
|
if 'prev_value' in saved_state: |
|
prev_value = saved_state['prev_value'].view(bsz * self.num_heads, -1, self.head_dim) |
|
if static_kv: |
|
v = prev_value |
|
else: |
|
v = torch.cat((prev_value, v), dim=1) |
|
if 'prev_key_padding_mask' in saved_state and saved_state['prev_key_padding_mask'] is not None: |
|
prev_key_padding_mask = saved_state['prev_key_padding_mask'] |
|
if static_kv: |
|
key_padding_mask = prev_key_padding_mask |
|
else: |
|
key_padding_mask = torch.cat((prev_key_padding_mask, key_padding_mask), dim=1) |
|
|
|
saved_state['prev_key'] = k.view(bsz, self.num_heads, -1, self.head_dim) |
|
saved_state['prev_value'] = v.view(bsz, self.num_heads, -1, self.head_dim) |
|
saved_state['prev_key_padding_mask'] = key_padding_mask |
|
|
|
self._set_input_buffer(incremental_state, saved_state) |
|
|
|
src_len = k.size(1) |
|
|
|
|
|
|
|
if key_padding_mask is not None and key_padding_mask.shape == torch.Size([]): |
|
key_padding_mask = None |
|
|
|
if key_padding_mask is not None: |
|
assert key_padding_mask.size(0) == bsz |
|
assert key_padding_mask.size(1) == src_len |
|
|
|
if self.add_zero_attn: |
|
src_len += 1 |
|
k = torch.cat([k, k.new_zeros((k.size(0), 1) + k.size()[2:])], dim=1) |
|
v = torch.cat([v, v.new_zeros((v.size(0), 1) + v.size()[2:])], dim=1) |
|
if attn_mask is not None: |
|
attn_mask = torch.cat([attn_mask, attn_mask.new_zeros(attn_mask.size(0), 1)], dim=1) |
|
if key_padding_mask is not None: |
|
key_padding_mask = torch.cat( |
|
[key_padding_mask, torch.zeros(key_padding_mask.size(0), 1).type_as(key_padding_mask)], dim=1) |
|
|
|
attn_weights = torch.bmm(q, k.transpose(1, 2)) |
|
attn_weights = self.apply_sparse_mask(attn_weights, tgt_len, src_len, bsz) |
|
|
|
assert list(attn_weights.size()) == [bsz * self.num_heads, tgt_len, src_len] |
|
|
|
if attn_mask is not None: |
|
if len(attn_mask.shape) == 2: |
|
attn_mask = attn_mask.unsqueeze(0) |
|
elif len(attn_mask.shape) == 3: |
|
attn_mask = attn_mask[:, None].repeat([1, self.num_heads, 1, 1]).reshape( |
|
bsz * self.num_heads, tgt_len, src_len) |
|
attn_weights = attn_weights + attn_mask |
|
|
|
if enc_dec_attn_constraint_mask is not None: |
|
attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) |
|
attn_weights = attn_weights.masked_fill( |
|
enc_dec_attn_constraint_mask.unsqueeze(2).bool(), |
|
-1e8, |
|
) |
|
attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) |
|
|
|
if key_padding_mask is not None: |
|
|
|
attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) |
|
attn_weights = attn_weights.masked_fill( |
|
key_padding_mask.unsqueeze(1).unsqueeze(2), |
|
-1e8, |
|
) |
|
attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) |
|
|
|
attn_logits = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) |
|
|
|
if before_softmax: |
|
return attn_weights, v |
|
|
|
attn_weights_float = softmax(attn_weights, dim=-1) |
|
attn_weights = attn_weights_float.type_as(attn_weights) |
|
attn_probs = F.dropout(attn_weights_float.type_as(attn_weights), p=self.dropout, training=self.training) |
|
|
|
if reset_attn_weight is not None: |
|
if reset_attn_weight: |
|
self.last_attn_probs = attn_probs.detach() |
|
else: |
|
assert self.last_attn_probs is not None |
|
attn_probs = self.last_attn_probs |
|
attn = torch.bmm(attn_probs, v) |
|
assert list(attn.size()) == [bsz * self.num_heads, tgt_len, self.head_dim] |
|
attn = attn.transpose(0, 1).contiguous().view(tgt_len, bsz, embed_dim) |
|
attn = self.out_proj(attn) |
|
|
|
if need_weights: |
|
attn_weights = attn_weights_float.view(bsz, self.num_heads, tgt_len, src_len).transpose(1, 0) |
|
if not need_head_weights: |
|
|
|
attn_weights = attn_weights.mean(dim=0) |
|
else: |
|
attn_weights = None |
|
|
|
return attn, (attn_weights, attn_logits) |
|
|
|
def in_proj_qkv(self, query): |
|
return self._in_proj(query).chunk(3, dim=-1) |
|
|
|
def in_proj_q(self, query): |
|
if self.qkv_same_dim: |
|
return self._in_proj(query, end=self.embed_dim) |
|
else: |
|
bias = self.in_proj_bias |
|
if bias is not None: |
|
bias = bias[:self.embed_dim] |
|
return F.linear(query, self.q_proj_weight, bias) |
|
|
|
def in_proj_k(self, key): |
|
if self.qkv_same_dim: |
|
return self._in_proj(key, start=self.embed_dim, end=2 * self.embed_dim) |
|
else: |
|
weight = self.k_proj_weight |
|
bias = self.in_proj_bias |
|
if bias is not None: |
|
bias = bias[self.embed_dim:2 * self.embed_dim] |
|
return F.linear(key, weight, bias) |
|
|
|
def in_proj_v(self, value): |
|
if self.qkv_same_dim: |
|
return self._in_proj(value, start=2 * self.embed_dim) |
|
else: |
|
weight = self.v_proj_weight |
|
bias = self.in_proj_bias |
|
if bias is not None: |
|
bias = bias[2 * self.embed_dim:] |
|
return F.linear(value, weight, bias) |
|
|
|
def _in_proj(self, input, start=0, end=None): |
|
weight = self.in_proj_weight |
|
bias = self.in_proj_bias |
|
weight = weight[start:end, :] |
|
if bias is not None: |
|
bias = bias[start:end] |
|
return F.linear(input, weight, bias) |
|
|
|
def _get_input_buffer(self, incremental_state): |
|
return get_incremental_state( |
|
self, |
|
incremental_state, |
|
'attn_state', |
|
) or {} |
|
|
|
def _set_input_buffer(self, incremental_state, buffer): |
|
set_incremental_state( |
|
self, |
|
incremental_state, |
|
'attn_state', |
|
buffer, |
|
) |
|
|
|
def apply_sparse_mask(self, attn_weights, tgt_len, src_len, bsz): |
|
return attn_weights |
|
|
|
def clear_buffer(self, incremental_state=None): |
|
if incremental_state is not None: |
|
saved_state = self._get_input_buffer(incremental_state) |
|
if 'prev_key' in saved_state: |
|
del saved_state['prev_key'] |
|
if 'prev_value' in saved_state: |
|
del saved_state['prev_value'] |
|
self._set_input_buffer(incremental_state, saved_state) |
|
|
|
|
|
class Swish(torch.autograd.Function): |
|
@staticmethod |
|
def forward(ctx, i): |
|
result = i * torch.sigmoid(i) |
|
ctx.save_for_backward(i) |
|
return result |
|
|
|
@staticmethod |
|
def backward(ctx, grad_output): |
|
i = ctx.saved_variables[0] |
|
sigmoid_i = torch.sigmoid(i) |
|
return grad_output * (sigmoid_i * (1 + i * (1 - sigmoid_i))) |
|
|
|
|
|
class CustomSwish(nn.Module): |
|
def forward(self, input_tensor): |
|
return Swish.apply(input_tensor) |
|
|
|
|
|
class TransformerFFNLayer(nn.Module): |
|
def __init__(self, hidden_size, filter_size, padding="SAME", kernel_size=1, dropout=0., act='gelu'): |
|
super().__init__() |
|
self.kernel_size = kernel_size |
|
self.dropout = dropout |
|
self.act = act |
|
if padding == 'SAME': |
|
self.ffn_1 = nn.Conv1d(hidden_size, filter_size, kernel_size, padding=kernel_size // 2) |
|
elif padding == 'LEFT': |
|
self.ffn_1 = nn.Sequential( |
|
nn.ConstantPad1d((kernel_size - 1, 0), 0.0), |
|
nn.Conv1d(hidden_size, filter_size, kernel_size) |
|
) |
|
self.ffn_2 = Linear(filter_size, hidden_size) |
|
if self.act == 'swish': |
|
self.swish_fn = CustomSwish() |
|
|
|
def forward(self, x, incremental_state=None): |
|
|
|
if incremental_state is not None: |
|
saved_state = self._get_input_buffer(incremental_state) |
|
if 'prev_input' in saved_state: |
|
prev_input = saved_state['prev_input'] |
|
x = torch.cat((prev_input, x), dim=0) |
|
x = x[-self.kernel_size:] |
|
saved_state['prev_input'] = x |
|
self._set_input_buffer(incremental_state, saved_state) |
|
|
|
x = self.ffn_1(x.permute(1, 2, 0)).permute(2, 0, 1) |
|
x = x * self.kernel_size ** -0.5 |
|
|
|
if incremental_state is not None: |
|
x = x[-1:] |
|
if self.act == 'gelu': |
|
x = F.gelu(x) |
|
if self.act == 'relu': |
|
x = F.relu(x) |
|
if self.act == 'swish': |
|
x = self.swish_fn(x) |
|
x = F.dropout(x, self.dropout, training=self.training) |
|
x = self.ffn_2(x) |
|
return x |
|
|
|
def _get_input_buffer(self, incremental_state): |
|
return get_incremental_state( |
|
self, |
|
incremental_state, |
|
'f', |
|
) or {} |
|
|
|
def _set_input_buffer(self, incremental_state, buffer): |
|
set_incremental_state( |
|
self, |
|
incremental_state, |
|
'f', |
|
buffer, |
|
) |
|
|
|
def clear_buffer(self, incremental_state): |
|
if incremental_state is not None: |
|
saved_state = self._get_input_buffer(incremental_state) |
|
if 'prev_input' in saved_state: |
|
del saved_state['prev_input'] |
|
self._set_input_buffer(incremental_state, saved_state) |
|
|
|
|
|
class BatchNorm1dTBC(nn.Module): |
|
def __init__(self, c): |
|
super(BatchNorm1dTBC, self).__init__() |
|
self.bn = nn.BatchNorm1d(c) |
|
|
|
def forward(self, x): |
|
""" |
|
|
|
:param x: [T, B, C] |
|
:return: [T, B, C] |
|
""" |
|
x = x.permute(1, 2, 0) |
|
x = self.bn(x) |
|
x = x.permute(2, 0, 1) |
|
return x |
|
|
|
|
|
class EncSALayer(nn.Module): |
|
def __init__(self, c, num_heads, dropout, attention_dropout=0.1, |
|
relu_dropout=0.1, kernel_size=9, padding='SAME', norm='ln', act='gelu'): |
|
super().__init__() |
|
self.c = c |
|
self.dropout = dropout |
|
self.num_heads = num_heads |
|
if num_heads > 0: |
|
if norm == 'ln': |
|
self.layer_norm1 = LayerNorm(c) |
|
elif norm == 'bn': |
|
self.layer_norm1 = BatchNorm1dTBC(c) |
|
elif norm == 'gn': |
|
self.layer_norm1 = GroupNorm1DTBC(8, c) |
|
self.self_attn = MultiheadAttention( |
|
self.c, num_heads, self_attention=True, dropout=attention_dropout, bias=False) |
|
if norm == 'ln': |
|
self.layer_norm2 = LayerNorm(c) |
|
elif norm == 'bn': |
|
self.layer_norm2 = BatchNorm1dTBC(c) |
|
elif norm == 'gn': |
|
self.layer_norm2 = GroupNorm1DTBC(8, c) |
|
self.ffn = TransformerFFNLayer( |
|
c, 4 * c, kernel_size=kernel_size, dropout=relu_dropout, padding=padding, act=act) |
|
|
|
def forward(self, x, encoder_padding_mask=None, **kwargs): |
|
layer_norm_training = kwargs.get('layer_norm_training', None) |
|
if layer_norm_training is not None: |
|
self.layer_norm1.training = layer_norm_training |
|
self.layer_norm2.training = layer_norm_training |
|
if self.num_heads > 0: |
|
residual = x |
|
x = self.layer_norm1(x) |
|
x, _, = self.self_attn( |
|
query=x, |
|
key=x, |
|
value=x, |
|
key_padding_mask=encoder_padding_mask |
|
) |
|
x = F.dropout(x, self.dropout, training=self.training) |
|
x = residual + x |
|
x = x * (1 - encoder_padding_mask.float()).transpose(0, 1)[..., None] |
|
|
|
residual = x |
|
x = self.layer_norm2(x) |
|
x = self.ffn(x) |
|
x = F.dropout(x, self.dropout, training=self.training) |
|
x = residual + x |
|
x = x * (1 - encoder_padding_mask.float()).transpose(0, 1)[..., None] |
|
return x |
|
|
|
|
|
class DecSALayer(nn.Module): |
|
def __init__(self, c, num_heads, dropout, attention_dropout=0.1, relu_dropout=0.1, |
|
kernel_size=9, act='gelu', norm='ln'): |
|
super().__init__() |
|
self.c = c |
|
self.dropout = dropout |
|
if norm == 'ln': |
|
self.layer_norm1 = LayerNorm(c) |
|
elif norm == 'gn': |
|
self.layer_norm1 = GroupNorm1DTBC(8, c) |
|
self.self_attn = MultiheadAttention( |
|
c, num_heads, self_attention=True, dropout=attention_dropout, bias=False |
|
) |
|
if norm == 'ln': |
|
self.layer_norm2 = LayerNorm(c) |
|
elif norm == 'gn': |
|
self.layer_norm2 = GroupNorm1DTBC(8, c) |
|
self.encoder_attn = MultiheadAttention( |
|
c, num_heads, encoder_decoder_attention=True, dropout=attention_dropout, bias=False, |
|
) |
|
if norm == 'ln': |
|
self.layer_norm3 = LayerNorm(c) |
|
elif norm == 'gn': |
|
self.layer_norm3 = GroupNorm1DTBC(8, c) |
|
self.ffn = TransformerFFNLayer( |
|
c, 4 * c, padding='LEFT', kernel_size=kernel_size, dropout=relu_dropout, act=act) |
|
|
|
def forward( |
|
self, |
|
x, |
|
encoder_out=None, |
|
encoder_padding_mask=None, |
|
incremental_state=None, |
|
self_attn_mask=None, |
|
self_attn_padding_mask=None, |
|
attn_out=None, |
|
reset_attn_weight=None, |
|
**kwargs, |
|
): |
|
layer_norm_training = kwargs.get('layer_norm_training', None) |
|
if layer_norm_training is not None: |
|
self.layer_norm1.training = layer_norm_training |
|
self.layer_norm2.training = layer_norm_training |
|
self.layer_norm3.training = layer_norm_training |
|
residual = x |
|
x = self.layer_norm1(x) |
|
x, _ = self.self_attn( |
|
query=x, |
|
key=x, |
|
value=x, |
|
key_padding_mask=self_attn_padding_mask, |
|
incremental_state=incremental_state, |
|
attn_mask=self_attn_mask |
|
) |
|
x = F.dropout(x, self.dropout, training=self.training) |
|
x = residual + x |
|
|
|
attn_logits = None |
|
if encoder_out is not None or attn_out is not None: |
|
residual = x |
|
x = self.layer_norm2(x) |
|
if encoder_out is not None: |
|
x, attn = self.encoder_attn( |
|
query=x, |
|
key=encoder_out, |
|
value=encoder_out, |
|
key_padding_mask=encoder_padding_mask, |
|
incremental_state=incremental_state, |
|
static_kv=True, |
|
enc_dec_attn_constraint_mask=get_incremental_state(self, incremental_state, |
|
'enc_dec_attn_constraint_mask'), |
|
reset_attn_weight=reset_attn_weight |
|
) |
|
attn_logits = attn[1] |
|
elif attn_out is not None: |
|
x = self.encoder_attn.in_proj_v(attn_out) |
|
if encoder_out is not None or attn_out is not None: |
|
x = F.dropout(x, self.dropout, training=self.training) |
|
x = residual + x |
|
|
|
residual = x |
|
x = self.layer_norm3(x) |
|
x = self.ffn(x, incremental_state=incremental_state) |
|
x = F.dropout(x, self.dropout, training=self.training) |
|
x = residual + x |
|
return x, attn_logits |
|
|
|
def clear_buffer(self, input, encoder_out=None, encoder_padding_mask=None, incremental_state=None): |
|
self.encoder_attn.clear_buffer(incremental_state) |
|
self.ffn.clear_buffer(incremental_state) |
|
|
|
def set_buffer(self, name, tensor, incremental_state): |
|
return set_incremental_state(self, incremental_state, name, tensor) |
|
|
|
|
|
class ConvBlock(nn.Module): |
|
def __init__(self, idim=80, n_chans=256, kernel_size=3, stride=1, norm='gn', dropout=0): |
|
super().__init__() |
|
self.conv = ConvNorm(idim, n_chans, kernel_size, stride=stride) |
|
self.norm = norm |
|
if self.norm == 'bn': |
|
self.norm = nn.BatchNorm1d(n_chans) |
|
elif self.norm == 'in': |
|
self.norm = nn.InstanceNorm1d(n_chans, affine=True) |
|
elif self.norm == 'gn': |
|
self.norm = nn.GroupNorm(n_chans // 16, n_chans) |
|
elif self.norm == 'ln': |
|
self.norm = LayerNorm(n_chans // 16, n_chans) |
|
elif self.norm == 'wn': |
|
self.conv = torch.nn.utils.weight_norm(self.conv.conv) |
|
self.dropout = nn.Dropout(dropout) |
|
self.relu = nn.ReLU() |
|
|
|
def forward(self, x): |
|
""" |
|
|
|
:param x: [B, C, T] |
|
:return: [B, C, T] |
|
""" |
|
x = self.conv(x) |
|
if not isinstance(self.norm, str): |
|
if self.norm == 'none': |
|
pass |
|
elif self.norm == 'ln': |
|
x = self.norm(x.transpose(1, 2)).transpose(1, 2) |
|
else: |
|
x = self.norm(x) |
|
x = self.relu(x) |
|
x = self.dropout(x) |
|
return x |
|
|
|
|
|
class ConvStacks(nn.Module): |
|
def __init__(self, idim=80, n_layers=5, n_chans=256, odim=32, kernel_size=5, norm='gn', |
|
dropout=0, strides=None, res=True): |
|
super().__init__() |
|
self.conv = torch.nn.ModuleList() |
|
self.kernel_size = kernel_size |
|
self.res = res |
|
self.in_proj = Linear(idim, n_chans) |
|
if strides is None: |
|
strides = [1] * n_layers |
|
else: |
|
assert len(strides) == n_layers |
|
for idx in range(n_layers): |
|
self.conv.append(ConvBlock( |
|
n_chans, n_chans, kernel_size, stride=strides[idx], norm=norm, dropout=dropout)) |
|
self.out_proj = Linear(n_chans, odim) |
|
|
|
def forward(self, x, return_hiddens=False): |
|
""" |
|
|
|
:param x: [B, T, H] |
|
:return: [B, T, H] |
|
""" |
|
x = self.in_proj(x) |
|
x = x.transpose(1, -1) |
|
hiddens = [] |
|
for f in self.conv: |
|
x_ = f(x) |
|
x = x + x_ if self.res else x_ |
|
hiddens.append(x) |
|
x = x.transpose(1, -1) |
|
x = self.out_proj(x) |
|
if return_hiddens: |
|
hiddens = torch.stack(hiddens, 1) |
|
return x, hiddens |
|
return x |
|
|
|
|
|
class ConvGlobalStacks(nn.Module): |
|
def __init__(self, idim=80, n_layers=5, n_chans=256, odim=32, kernel_size=5, norm='gn', dropout=0, |
|
strides=[2, 2, 2, 2, 2]): |
|
super().__init__() |
|
self.conv = torch.nn.ModuleList() |
|
self.pooling = torch.nn.ModuleList() |
|
self.kernel_size = kernel_size |
|
self.in_proj = Linear(idim, n_chans) |
|
for idx in range(n_layers): |
|
self.conv.append(ConvBlock(n_chans, n_chans, kernel_size, stride=strides[idx], |
|
norm=norm, dropout=dropout)) |
|
self.pooling.append(nn.MaxPool1d(strides[idx])) |
|
self.out_proj = Linear(n_chans, odim) |
|
|
|
def forward(self, x): |
|
""" |
|
|
|
:param x: [B, T, H] |
|
:return: [B, T, H] |
|
""" |
|
x = self.in_proj(x) |
|
x = x.transpose(1, -1) |
|
for f, p in zip(self.conv, self.pooling): |
|
x = f(x) |
|
x = x.transpose(1, -1) |
|
x = self.out_proj(x.mean(1)) |
|
return x |
|
|
|
|
|
class ConvDecoder(nn.Module): |
|
def __init__(self, c, dropout, kernel_size=9, act='gelu'): |
|
super().__init__() |
|
self.c = c |
|
self.dropout = dropout |
|
|
|
self.pre_convs = nn.ModuleList() |
|
self.pre_lns = nn.ModuleList() |
|
for i in range(2): |
|
self.pre_convs.append(TransformerFFNLayer( |
|
c, c * 2, padding='LEFT', kernel_size=kernel_size, dropout=dropout, act=act)) |
|
self.pre_lns.append(LayerNorm(c)) |
|
|
|
self.layer_norm_attn = LayerNorm(c) |
|
self.encoder_attn = MultiheadAttention(c, 1, encoder_decoder_attention=True, bias=False) |
|
|
|
self.post_convs = nn.ModuleList() |
|
self.post_lns = nn.ModuleList() |
|
for i in range(8): |
|
self.post_convs.append(TransformerFFNLayer( |
|
c, c * 2, padding='LEFT', kernel_size=kernel_size, dropout=dropout, act=act)) |
|
self.post_lns.append(LayerNorm(c)) |
|
|
|
def forward( |
|
self, |
|
x, |
|
encoder_out=None, |
|
encoder_padding_mask=None, |
|
incremental_state=None, |
|
**kwargs, |
|
): |
|
attn_logits = None |
|
for conv, ln in zip(self.pre_convs, self.pre_lns): |
|
residual = x |
|
x = ln(x) |
|
x = conv(x) + residual |
|
if encoder_out is not None: |
|
residual = x |
|
x = self.layer_norm_attn(x) |
|
x, attn = self.encoder_attn( |
|
query=x, |
|
key=encoder_out, |
|
value=encoder_out, |
|
key_padding_mask=encoder_padding_mask, |
|
incremental_state=incremental_state, |
|
static_kv=True, |
|
enc_dec_attn_constraint_mask=get_incremental_state(self, incremental_state, |
|
'enc_dec_attn_constraint_mask'), |
|
) |
|
attn_logits = attn[1] |
|
x = F.dropout(x, self.dropout, training=self.training) |
|
x = residual + x |
|
for conv, ln in zip(self.post_convs, self.post_lns): |
|
residual = x |
|
x = ln(x) |
|
x = conv(x) + residual |
|
return x, attn_logits |
|
|
|
def clear_buffer(self, input, encoder_out=None, encoder_padding_mask=None, incremental_state=None): |
|
self.encoder_attn.clear_buffer(incremental_state) |
|
self.ffn.clear_buffer(incremental_state) |
|
|
|
def set_buffer(self, name, tensor, incremental_state): |
|
return set_incremental_state(self, incremental_state, name, tensor) |
|
|