|
|
|
|
|
|
|
"""Conformer encoder definition.""" |
|
|
|
from typing import Optional |
|
from typing import Tuple |
|
|
|
import logging |
|
import torch |
|
|
|
from typeguard import check_argument_types |
|
|
|
from espnet.nets.pytorch_backend.conformer.convolution import ConvolutionModule |
|
from espnet.nets.pytorch_backend.conformer.encoder_layer import EncoderLayer |
|
from espnet.nets.pytorch_backend.nets_utils import get_activation |
|
from espnet.nets.pytorch_backend.nets_utils import make_pad_mask |
|
from espnet.nets.pytorch_backend.transformer.attention import ( |
|
MultiHeadedAttention, |
|
RelPositionMultiHeadedAttention, |
|
LegacyRelPositionMultiHeadedAttention, |
|
) |
|
from espnet.nets.pytorch_backend.transformer.embedding import ( |
|
PositionalEncoding, |
|
ScaledPositionalEncoding, |
|
RelPositionalEncoding, |
|
LegacyRelPositionalEncoding, |
|
) |
|
from espnet.nets.pytorch_backend.transformer.layer_norm import LayerNorm |
|
from espnet.nets.pytorch_backend.transformer.multi_layer_conv import Conv1dLinear |
|
from espnet.nets.pytorch_backend.transformer.multi_layer_conv import MultiLayeredConv1d |
|
from espnet.nets.pytorch_backend.transformer.positionwise_feed_forward import ( |
|
PositionwiseFeedForward, |
|
) |
|
from espnet.nets.pytorch_backend.transformer.repeat import repeat |
|
from espnet.nets.pytorch_backend.transformer.subsampling import check_short_utt |
|
from espnet.nets.pytorch_backend.transformer.subsampling import Conv2dSubsampling |
|
from espnet.nets.pytorch_backend.transformer.subsampling import Conv2dSubsampling6 |
|
from espnet.nets.pytorch_backend.transformer.subsampling import Conv2dSubsampling8 |
|
from espnet.nets.pytorch_backend.transformer.subsampling import TooShortUttError |
|
from espnet2.asr.encoder.abs_encoder import AbsEncoder |
|
|
|
|
|
class ConformerEncoder(AbsEncoder): |
|
"""Conformer encoder module. |
|
|
|
Args: |
|
input_size (int): Input dimension. |
|
output_size (int): Dimention of attention. |
|
attention_heads (int): The number of heads of multi head attention. |
|
linear_units (int): The number of units of position-wise feed forward. |
|
num_blocks (int): The number of decoder blocks. |
|
dropout_rate (float): Dropout rate. |
|
attention_dropout_rate (float): Dropout rate in attention. |
|
positional_dropout_rate (float): Dropout rate after adding positional encoding. |
|
input_layer (Union[str, torch.nn.Module]): Input layer type. |
|
normalize_before (bool): Whether to use layer_norm before the first block. |
|
concat_after (bool): Whether to concat attention layer's input and output. |
|
If True, additional linear will be applied. |
|
i.e. x -> x + linear(concat(x, att(x))) |
|
If False, no additional linear will be applied. i.e. x -> x + att(x) |
|
positionwise_layer_type (str): "linear", "conv1d", or "conv1d-linear". |
|
positionwise_conv_kernel_size (int): Kernel size of positionwise conv1d layer. |
|
rel_pos_type (str): Whether to use the latest relative positional encoding or |
|
the legacy one. The legacy relative positional encoding will be deprecated |
|
in the future. More Details can be found in |
|
https://github.com/espnet/espnet/pull/2816. |
|
encoder_pos_enc_layer_type (str): Encoder positional encoding layer type. |
|
encoder_attn_layer_type (str): Encoder attention layer type. |
|
activation_type (str): Encoder activation function type. |
|
macaron_style (bool): Whether to use macaron style for positionwise layer. |
|
use_cnn_module (bool): Whether to use convolution module. |
|
zero_triu (bool): Whether to zero the upper triangular part of attention matrix. |
|
cnn_module_kernel (int): Kernerl size of convolution module. |
|
padding_idx (int): Padding idx for input_layer=embed. |
|
|
|
""" |
|
|
|
def __init__( |
|
self, |
|
input_size: int, |
|
output_size: int = 256, |
|
attention_heads: int = 4, |
|
linear_units: int = 2048, |
|
num_blocks: int = 6, |
|
dropout_rate: float = 0.1, |
|
positional_dropout_rate: float = 0.1, |
|
attention_dropout_rate: float = 0.0, |
|
input_layer: str = "conv2d", |
|
normalize_before: bool = True, |
|
concat_after: bool = False, |
|
positionwise_layer_type: str = "linear", |
|
positionwise_conv_kernel_size: int = 3, |
|
macaron_style: bool = False, |
|
rel_pos_type: str = "legacy", |
|
pos_enc_layer_type: str = "rel_pos", |
|
selfattention_layer_type: str = "rel_selfattn", |
|
activation_type: str = "swish", |
|
use_cnn_module: bool = True, |
|
zero_triu: bool = False, |
|
cnn_module_kernel: int = 31, |
|
padding_idx: int = -1, |
|
): |
|
assert check_argument_types() |
|
super().__init__() |
|
self._output_size = output_size |
|
|
|
if rel_pos_type == "legacy": |
|
if pos_enc_layer_type == "rel_pos": |
|
pos_enc_layer_type = "legacy_rel_pos" |
|
if selfattention_layer_type == "rel_selfattn": |
|
selfattention_layer_type = "legacy_rel_selfattn" |
|
elif rel_pos_type == "latest": |
|
assert selfattention_layer_type != "legacy_rel_selfattn" |
|
assert pos_enc_layer_type != "legacy_rel_pos" |
|
else: |
|
raise ValueError("unknown rel_pos_type: " + rel_pos_type) |
|
|
|
activation = get_activation(activation_type) |
|
if pos_enc_layer_type == "abs_pos": |
|
pos_enc_class = PositionalEncoding |
|
elif pos_enc_layer_type == "scaled_abs_pos": |
|
pos_enc_class = ScaledPositionalEncoding |
|
elif pos_enc_layer_type == "rel_pos": |
|
assert selfattention_layer_type == "rel_selfattn" |
|
pos_enc_class = RelPositionalEncoding |
|
elif pos_enc_layer_type == "legacy_rel_pos": |
|
assert selfattention_layer_type == "legacy_rel_selfattn" |
|
pos_enc_class = LegacyRelPositionalEncoding |
|
logging.warning( |
|
"Using legacy_rel_pos and it will be deprecated in the future." |
|
) |
|
else: |
|
raise ValueError("unknown pos_enc_layer: " + pos_enc_layer_type) |
|
|
|
if input_layer == "linear": |
|
self.embed = torch.nn.Sequential( |
|
torch.nn.Linear(input_size, output_size), |
|
torch.nn.LayerNorm(output_size), |
|
torch.nn.Dropout(dropout_rate), |
|
pos_enc_class(output_size, positional_dropout_rate), |
|
) |
|
elif input_layer == "conv2d": |
|
self.embed = Conv2dSubsampling( |
|
input_size, |
|
output_size, |
|
dropout_rate, |
|
pos_enc_class(output_size, positional_dropout_rate), |
|
) |
|
elif input_layer == "conv2d6": |
|
self.embed = Conv2dSubsampling6( |
|
input_size, |
|
output_size, |
|
dropout_rate, |
|
pos_enc_class(output_size, positional_dropout_rate), |
|
) |
|
elif input_layer == "conv2d8": |
|
self.embed = Conv2dSubsampling8( |
|
input_size, |
|
output_size, |
|
dropout_rate, |
|
pos_enc_class(output_size, positional_dropout_rate), |
|
) |
|
elif input_layer == "embed": |
|
self.embed = torch.nn.Sequential( |
|
torch.nn.Embedding(input_size, output_size, padding_idx=padding_idx), |
|
pos_enc_class(output_size, positional_dropout_rate), |
|
) |
|
elif isinstance(input_layer, torch.nn.Module): |
|
self.embed = torch.nn.Sequential( |
|
input_layer, |
|
pos_enc_class(output_size, positional_dropout_rate), |
|
) |
|
elif input_layer is None: |
|
self.embed = torch.nn.Sequential( |
|
pos_enc_class(output_size, positional_dropout_rate) |
|
) |
|
else: |
|
raise ValueError("unknown input_layer: " + input_layer) |
|
self.normalize_before = normalize_before |
|
if positionwise_layer_type == "linear": |
|
positionwise_layer = PositionwiseFeedForward |
|
positionwise_layer_args = ( |
|
output_size, |
|
linear_units, |
|
dropout_rate, |
|
activation, |
|
) |
|
elif positionwise_layer_type == "conv1d": |
|
positionwise_layer = MultiLayeredConv1d |
|
positionwise_layer_args = ( |
|
output_size, |
|
linear_units, |
|
positionwise_conv_kernel_size, |
|
dropout_rate, |
|
) |
|
elif positionwise_layer_type == "conv1d-linear": |
|
positionwise_layer = Conv1dLinear |
|
positionwise_layer_args = ( |
|
output_size, |
|
linear_units, |
|
positionwise_conv_kernel_size, |
|
dropout_rate, |
|
) |
|
else: |
|
raise NotImplementedError("Support only linear or conv1d.") |
|
|
|
if selfattention_layer_type == "selfattn": |
|
encoder_selfattn_layer = MultiHeadedAttention |
|
encoder_selfattn_layer_args = ( |
|
attention_heads, |
|
output_size, |
|
attention_dropout_rate, |
|
) |
|
elif selfattention_layer_type == "legacy_rel_selfattn": |
|
assert pos_enc_layer_type == "legacy_rel_pos" |
|
encoder_selfattn_layer = LegacyRelPositionMultiHeadedAttention |
|
encoder_selfattn_layer_args = ( |
|
attention_heads, |
|
output_size, |
|
attention_dropout_rate, |
|
) |
|
logging.warning( |
|
"Using legacy_rel_selfattn and it will be deprecated in the future." |
|
) |
|
elif selfattention_layer_type == "rel_selfattn": |
|
assert pos_enc_layer_type == "rel_pos" |
|
encoder_selfattn_layer = RelPositionMultiHeadedAttention |
|
encoder_selfattn_layer_args = ( |
|
attention_heads, |
|
output_size, |
|
attention_dropout_rate, |
|
zero_triu, |
|
) |
|
else: |
|
raise ValueError("unknown encoder_attn_layer: " + selfattention_layer_type) |
|
|
|
convolution_layer = ConvolutionModule |
|
convolution_layer_args = (output_size, cnn_module_kernel, activation) |
|
|
|
self.encoders = repeat( |
|
num_blocks, |
|
lambda lnum: EncoderLayer( |
|
output_size, |
|
encoder_selfattn_layer(*encoder_selfattn_layer_args), |
|
positionwise_layer(*positionwise_layer_args), |
|
positionwise_layer(*positionwise_layer_args) if macaron_style else None, |
|
convolution_layer(*convolution_layer_args) if use_cnn_module else None, |
|
dropout_rate, |
|
normalize_before, |
|
concat_after, |
|
), |
|
) |
|
if self.normalize_before: |
|
self.after_norm = LayerNorm(output_size) |
|
|
|
def output_size(self) -> int: |
|
return self._output_size |
|
|
|
def forward( |
|
self, |
|
xs_pad: torch.Tensor, |
|
ilens: torch.Tensor, |
|
prev_states: torch.Tensor = None, |
|
) -> Tuple[torch.Tensor, torch.Tensor, Optional[torch.Tensor]]: |
|
"""Calculate forward propagation. |
|
|
|
Args: |
|
xs_pad (torch.Tensor): Input tensor (#batch, L, input_size). |
|
ilens (torch.Tensor): Input length (#batch). |
|
prev_states (torch.Tensor): Not to be used now. |
|
|
|
Returns: |
|
torch.Tensor: Output tensor (#batch, L, output_size). |
|
torch.Tensor: Output length (#batch). |
|
torch.Tensor: Not to be used now. |
|
|
|
""" |
|
masks = (~make_pad_mask(ilens)[:, None, :]).to(xs_pad.device) |
|
|
|
if ( |
|
isinstance(self.embed, Conv2dSubsampling) |
|
or isinstance(self.embed, Conv2dSubsampling6) |
|
or isinstance(self.embed, Conv2dSubsampling8) |
|
): |
|
short_status, limit_size = check_short_utt(self.embed, xs_pad.size(1)) |
|
if short_status: |
|
raise TooShortUttError( |
|
f"has {xs_pad.size(1)} frames and is too short for subsampling " |
|
+ f"(it needs more than {limit_size} frames), return empty results", |
|
xs_pad.size(1), |
|
limit_size, |
|
) |
|
xs_pad, masks = self.embed(xs_pad, masks) |
|
else: |
|
xs_pad = self.embed(xs_pad) |
|
xs_pad, masks = self.encoders(xs_pad, masks) |
|
if isinstance(xs_pad, tuple): |
|
xs_pad = xs_pad[0] |
|
if self.normalize_before: |
|
xs_pad = self.after_norm(xs_pad) |
|
|
|
olens = masks.squeeze(1).sum(1) |
|
return xs_pad, olens, None |
|
|