|
|
|
|
|
|
|
"""Encodec SEANet-based encoder and decoder implementation.""" |
|
|
|
import numpy as np |
|
|
|
"""LSTM layers module.""" |
|
|
|
from torch import nn |
|
|
|
|
|
class SLSTM(nn.Module): |
|
""" |
|
LSTM without worrying about the hidden state, nor the layout of the data. |
|
Expects input as convolutional layout. |
|
""" |
|
|
|
def __init__(self, dimension: int, num_layers: int = 2, skip: bool = True): |
|
super().__init__() |
|
self.skip = skip |
|
self.lstm = nn.LSTM(dimension, dimension, num_layers) |
|
|
|
def forward(self, x): |
|
x = x.permute(2, 0, 1) |
|
y, _ = self.lstm(x) |
|
if self.skip: |
|
y = y + x |
|
y = y.permute(1, 2, 0) |
|
return y |
|
|
|
|
|
"""Convolutional layers wrappers and utilities.""" |
|
|
|
import math |
|
import warnings |
|
|
|
from torch.nn import functional as F |
|
from torch.nn.utils import spectral_norm, weight_norm |
|
|
|
"""Normalization modules.""" |
|
|
|
import typing as tp |
|
|
|
import einops |
|
import torch |
|
from torch import nn |
|
|
|
|
|
class ConvLayerNorm(nn.LayerNorm): |
|
""" |
|
Convolution-friendly LayerNorm that moves channels to last dimensions |
|
before running the normalization and moves them back to original position right after. |
|
""" |
|
|
|
def __init__(self, normalized_shape: tp.Union[int, tp.List[int], torch.Size], **kwargs): |
|
super().__init__(normalized_shape, **kwargs) |
|
|
|
def forward(self, x): |
|
x = einops.rearrange(x, 'b ... t -> b t ...') |
|
x = super().forward(x) |
|
x = einops.rearrange(x, 'b t ... -> b ... t') |
|
return |
|
|
|
|
|
CONV_NORMALIZATIONS = frozenset(['none', 'weight_norm', 'spectral_norm', |
|
'time_layer_norm', 'layer_norm', 'time_group_norm']) |
|
|
|
|
|
def apply_parametrization_norm(module: nn.Module, norm: str = 'none') -> nn.Module: |
|
assert norm in CONV_NORMALIZATIONS |
|
if norm == 'weight_norm': |
|
return weight_norm(module) |
|
elif norm == 'spectral_norm': |
|
return spectral_norm(module) |
|
else: |
|
|
|
|
|
return module |
|
|
|
|
|
def get_norm_module(module: nn.Module, causal: bool = False, norm: str = 'none', **norm_kwargs) -> nn.Module: |
|
"""Return the proper normalization module. If causal is True, this will ensure the returned |
|
module is causal, or return an error if the normalization doesn't support causal evaluation. |
|
""" |
|
assert norm in CONV_NORMALIZATIONS |
|
if norm == 'layer_norm': |
|
assert isinstance(module, nn.modules.conv._ConvNd) |
|
return ConvLayerNorm(module.out_channels, **norm_kwargs) |
|
elif norm == 'time_group_norm': |
|
if causal: |
|
raise ValueError("GroupNorm doesn't support causal evaluation.") |
|
assert isinstance(module, nn.modules.conv._ConvNd) |
|
return nn.GroupNorm(1, module.out_channels, **norm_kwargs) |
|
else: |
|
return nn.Identity() |
|
|
|
|
|
def get_extra_padding_for_conv1d(x: torch.Tensor, kernel_size: int, stride: int, |
|
padding_total: int = 0) -> int: |
|
"""See `pad_for_conv1d`. |
|
""" |
|
length = x.shape[-1] |
|
n_frames = (length - kernel_size + padding_total) / stride + 1 |
|
ideal_length = (math.ceil(n_frames) - 1) * stride + (kernel_size - padding_total) |
|
return ideal_length - length |
|
|
|
|
|
def pad_for_conv1d(x: torch.Tensor, kernel_size: int, stride: int, padding_total: int = 0): |
|
"""Pad for a convolution to make sure that the last window is full. |
|
Extra padding is added at the end. This is required to ensure that we can rebuild |
|
an output of the same length, as otherwise, even with padding, some time steps |
|
might get removed. |
|
For instance, with total padding = 4, kernel size = 4, stride = 2: |
|
0 0 1 2 3 4 5 0 0 # (0s are padding) |
|
1 2 3 # (output frames of a convolution, last 0 is never used) |
|
0 0 1 2 3 4 5 0 # (output of tr. conv., but pos. 5 is going to get removed as padding) |
|
1 2 3 4 # once you removed padding, we are missing one time step ! |
|
""" |
|
extra_padding = get_extra_padding_for_conv1d(x, kernel_size, stride, padding_total) |
|
return F.pad(x, (0, extra_padding)) |
|
|
|
|
|
def pad1d(x: torch.Tensor, paddings: tp.Tuple[int, int], mode: str = 'zero', value: float = 0.): |
|
"""Tiny wrapper around F.pad, just to allow for reflect padding on small input. |
|
If this is the case, we insert extra 0 padding to the right before the reflection happen. |
|
""" |
|
length = x.shape[-1] |
|
padding_left, padding_right = paddings |
|
assert padding_left >= 0 and padding_right >= 0, (padding_left, padding_right) |
|
if mode == 'reflect': |
|
max_pad = max(padding_left, padding_right) |
|
extra_pad = 0 |
|
if length <= max_pad: |
|
extra_pad = max_pad - length + 1 |
|
x = F.pad(x, (0, extra_pad)) |
|
padded = F.pad(x, paddings, mode, value) |
|
end = padded.shape[-1] - extra_pad |
|
return padded[..., :end] |
|
else: |
|
return F.pad(x, paddings, mode, value) |
|
|
|
|
|
def unpad1d(x: torch.Tensor, paddings: tp.Tuple[int, int]): |
|
"""Remove padding from x, handling properly zero padding. Only for 1d!""" |
|
padding_left, padding_right = paddings |
|
assert padding_left >= 0 and padding_right >= 0, (padding_left, padding_right) |
|
assert (padding_left + padding_right) <= x.shape[-1] |
|
end = x.shape[-1] - padding_right |
|
return x[..., padding_left: end] |
|
|
|
|
|
class NormConv1d(nn.Module): |
|
"""Wrapper around Conv1d and normalization applied to this conv |
|
to provide a uniform interface across normalization approaches. |
|
""" |
|
|
|
def __init__(self, *args, causal: bool = False, norm: str = 'none', |
|
norm_kwargs: tp.Dict[str, tp.Any] = {}, **kwargs): |
|
super().__init__() |
|
self.conv = apply_parametrization_norm(nn.Conv1d(*args, **kwargs), norm) |
|
self.norm = get_norm_module(self.conv, causal, norm, **norm_kwargs) |
|
self.norm_type = norm |
|
|
|
def forward(self, x): |
|
x = self.conv(x) |
|
x = self.norm(x) |
|
return x |
|
|
|
|
|
class NormConv2d(nn.Module): |
|
"""Wrapper around Conv2d and normalization applied to this conv |
|
to provide a uniform interface across normalization approaches. |
|
""" |
|
|
|
def __init__(self, *args, norm: str = 'none', |
|
norm_kwargs: tp.Dict[str, tp.Any] = {}, **kwargs): |
|
super().__init__() |
|
self.conv = apply_parametrization_norm(nn.Conv2d(*args, **kwargs), norm) |
|
self.norm = get_norm_module(self.conv, causal=False, norm=norm, **norm_kwargs) |
|
self.norm_type = norm |
|
|
|
def forward(self, x): |
|
x = self.conv(x) |
|
x = self.norm(x) |
|
return x |
|
|
|
|
|
class NormConvTranspose1d(nn.Module): |
|
"""Wrapper around ConvTranspose1d and normalization applied to this conv |
|
to provide a uniform interface across normalization approaches. |
|
""" |
|
|
|
def __init__(self, *args, causal: bool = False, norm: str = 'none', |
|
norm_kwargs: tp.Dict[str, tp.Any] = {}, **kwargs): |
|
super().__init__() |
|
self.convtr = apply_parametrization_norm(nn.ConvTranspose1d(*args, **kwargs), norm) |
|
self.norm = get_norm_module(self.convtr, causal, norm, **norm_kwargs) |
|
self.norm_type = norm |
|
|
|
def forward(self, x): |
|
x = self.convtr(x) |
|
x = self.norm(x) |
|
return x |
|
|
|
|
|
class NormConvTranspose2d(nn.Module): |
|
"""Wrapper around ConvTranspose2d and normalization applied to this conv |
|
to provide a uniform interface across normalization approaches. |
|
""" |
|
|
|
def __init__(self, *args, norm: str = 'none', |
|
norm_kwargs: tp.Dict[str, tp.Any] = {}, **kwargs): |
|
super().__init__() |
|
self.convtr = apply_parametrization_norm(nn.ConvTranspose2d(*args, **kwargs), norm) |
|
self.norm = get_norm_module(self.convtr, causal=False, norm=norm, **norm_kwargs) |
|
|
|
def forward(self, x): |
|
x = self.convtr(x) |
|
x = self.norm(x) |
|
return x |
|
|
|
|
|
class SConv1d(nn.Module): |
|
"""Conv1d with some builtin handling of asymmetric or causal padding |
|
and normalization. |
|
""" |
|
|
|
def __init__(self, in_channels: int, out_channels: int, |
|
kernel_size: int, stride: int = 1, dilation: int = 1, |
|
groups: int = 1, bias: bool = True, causal: bool = False, |
|
norm: str = 'none', norm_kwargs: tp.Dict[str, tp.Any] = {}, |
|
pad_mode: str = 'reflect'): |
|
super().__init__() |
|
|
|
if stride > 1 and dilation > 1: |
|
warnings.warn('SConv1d has been initialized with stride > 1 and dilation > 1' |
|
f' (kernel_size={kernel_size} stride={stride}, dilation={dilation}).') |
|
self.conv = NormConv1d(in_channels, out_channels, kernel_size, stride, |
|
dilation=dilation, groups=groups, bias=bias, causal=causal, |
|
norm=norm, norm_kwargs=norm_kwargs) |
|
self.causal = causal |
|
self.pad_mode = pad_mode |
|
|
|
def forward(self, x): |
|
B, C, T = x.shape |
|
kernel_size = self.conv.conv.kernel_size[0] |
|
stride = self.conv.conv.stride[0] |
|
dilation = self.conv.conv.dilation[0] |
|
padding_total = (kernel_size - 1) * dilation - (stride - 1) |
|
extra_padding = get_extra_padding_for_conv1d(x, kernel_size, stride, padding_total) |
|
if self.causal: |
|
|
|
x = pad1d(x, (padding_total, extra_padding), mode=self.pad_mode) |
|
else: |
|
|
|
padding_right = padding_total // 2 |
|
padding_left = padding_total - padding_right |
|
x = pad1d(x, (padding_left, padding_right + extra_padding), mode=self.pad_mode) |
|
return self.conv(x) |
|
|
|
|
|
class SConvTranspose1d(nn.Module): |
|
"""ConvTranspose1d with some builtin handling of asymmetric or causal padding |
|
and normalization. |
|
""" |
|
|
|
def __init__(self, in_channels: int, out_channels: int, |
|
kernel_size: int, stride: int = 1, causal: bool = False, |
|
norm: str = 'none', trim_right_ratio: float = 1., |
|
norm_kwargs: tp.Dict[str, tp.Any] = {}): |
|
super().__init__() |
|
self.convtr = NormConvTranspose1d(in_channels, out_channels, kernel_size, stride, |
|
causal=causal, norm=norm, norm_kwargs=norm_kwargs) |
|
self.causal = causal |
|
self.trim_right_ratio = trim_right_ratio |
|
assert self.causal or self.trim_right_ratio == 1., \ |
|
"`trim_right_ratio` != 1.0 only makes sense for causal convolutions" |
|
assert self.trim_right_ratio >= 0. and self.trim_right_ratio <= 1. |
|
|
|
def forward(self, x): |
|
kernel_size = self.convtr.convtr.kernel_size[0] |
|
stride = self.convtr.convtr.stride[0] |
|
padding_total = kernel_size - stride |
|
|
|
y = self.convtr(x) |
|
|
|
|
|
|
|
|
|
|
|
if self.causal: |
|
|
|
|
|
padding_right = math.ceil(padding_total * self.trim_right_ratio) |
|
padding_left = padding_total - padding_right |
|
y = unpad1d(y, (padding_left, padding_right)) |
|
else: |
|
|
|
padding_right = padding_total // 2 |
|
padding_left = padding_total - padding_right |
|
y = unpad1d(y, (padding_left, padding_right)) |
|
return y |
|
|
|
|
|
class SEANetResnetBlock(nn.Module): |
|
"""Residual block from SEANet model. |
|
Args: |
|
dim (int): Dimension of the input/output |
|
kernel_sizes (list): List of kernel sizes for the convolutions. |
|
dilations (list): List of dilations for the convolutions. |
|
activation (str): Activation function. |
|
activation_params (dict): Parameters to provide to the activation function |
|
norm (str): Normalization method. |
|
norm_params (dict): Parameters to provide to the underlying normalization used along with the convolution. |
|
causal (bool): Whether to use fully causal convolution. |
|
pad_mode (str): Padding mode for the convolutions. |
|
compress (int): Reduced dimensionality in residual branches (from Demucs v3) |
|
true_skip (bool): Whether to use true skip connection or a simple convolution as the skip connection. |
|
""" |
|
|
|
def __init__(self, dim: int, kernel_sizes: tp.List[int] = [3, 1], dilations: tp.List[int] = [1, 1], |
|
activation: str = 'ELU', activation_params: dict = {'alpha': 1.0}, |
|
norm: str = 'weight_norm', norm_params: tp.Dict[str, tp.Any] = {}, causal: bool = False, |
|
pad_mode: str = 'reflect', compress: int = 2, true_skip: bool = True): |
|
super().__init__() |
|
assert len(kernel_sizes) == len(dilations), 'Number of kernel sizes should match number of dilations' |
|
act = getattr(nn, activation) |
|
hidden = dim // compress |
|
block = [] |
|
for i, (kernel_size, dilation) in enumerate(zip(kernel_sizes, dilations)): |
|
in_chs = dim if i == 0 else hidden |
|
out_chs = dim if i == len(kernel_sizes) - 1 else hidden |
|
block += [ |
|
act(**activation_params), |
|
SConv1d(in_chs, out_chs, kernel_size=kernel_size, dilation=dilation, |
|
norm=norm, norm_kwargs=norm_params, |
|
causal=causal, pad_mode=pad_mode), |
|
] |
|
self.block = nn.Sequential(*block) |
|
self.shortcut: nn.Module |
|
if true_skip: |
|
self.shortcut = nn.Identity() |
|
else: |
|
self.shortcut = SConv1d(dim, dim, kernel_size=1, norm=norm, norm_kwargs=norm_params, |
|
causal=causal, pad_mode=pad_mode) |
|
|
|
def forward(self, x): |
|
return self.shortcut(x) + self.block(x) |
|
|
|
|
|
class SEANetEncoder(nn.Module): |
|
"""SEANet encoder. |
|
Args: |
|
channels (int): Audio channels. |
|
dimension (int): Intermediate representation dimension. |
|
n_filters (int): Base width for the model. |
|
n_residual_layers (int): nb of residual layers. |
|
ratios (Sequence[int]): kernel size and stride ratios. The encoder uses downsampling ratios instead of |
|
upsampling ratios, hence it will use the ratios in the reverse order to the ones specified here |
|
that must match the decoder order |
|
activation (str): Activation function. |
|
activation_params (dict): Parameters to provide to the activation function |
|
norm (str): Normalization method. |
|
norm_params (dict): Parameters to provide to the underlying normalization used along with the convolution. |
|
kernel_size (int): Kernel size for the initial convolution. |
|
last_kernel_size (int): Kernel size for the initial convolution. |
|
residual_kernel_size (int): Kernel size for the residual layers. |
|
dilation_base (int): How much to increase the dilation with each layer. |
|
causal (bool): Whether to use fully causal convolution. |
|
pad_mode (str): Padding mode for the convolutions. |
|
true_skip (bool): Whether to use true skip connection or a simple |
|
(streamable) convolution as the skip connection in the residual network blocks. |
|
compress (int): Reduced dimensionality in residual branches (from Demucs v3). |
|
lstm (int): Number of LSTM layers at the end of the encoder. |
|
""" |
|
|
|
def __init__(self, channels: int = 1, dimension: int = 128, n_filters: int = 32, n_residual_layers: int = 1, |
|
ratios: tp.List[int] = [8, 5, 4, 2], activation: str = 'ELU', activation_params: dict = {'alpha': 1.0}, |
|
norm: str = 'weight_norm', norm_params: tp.Dict[str, tp.Any] = {}, kernel_size: int = 7, |
|
last_kernel_size: int = 7, residual_kernel_size: int = 3, dilation_base: int = 2, causal: bool = False, |
|
pad_mode: str = 'reflect', true_skip: bool = False, compress: int = 2, lstm: int = 2): |
|
super().__init__() |
|
self.channels = channels |
|
self.dimension = dimension |
|
self.n_filters = n_filters |
|
self.ratios = list(reversed(ratios)) |
|
del ratios |
|
self.n_residual_layers = n_residual_layers |
|
self.hop_length = np.prod(self.ratios) |
|
|
|
act = getattr(nn, activation) |
|
mult = 1 |
|
model: tp.List[nn.Module] = [ |
|
SConv1d(channels, mult * n_filters, kernel_size, norm=norm, norm_kwargs=norm_params, |
|
causal=causal, pad_mode=pad_mode) |
|
] |
|
|
|
for i, ratio in enumerate(self.ratios): |
|
|
|
for j in range(n_residual_layers): |
|
model += [ |
|
SEANetResnetBlock(mult * n_filters, kernel_sizes=[residual_kernel_size, 1], |
|
dilations=[dilation_base ** j, 1], |
|
norm=norm, norm_params=norm_params, |
|
activation=activation, activation_params=activation_params, |
|
causal=causal, pad_mode=pad_mode, compress=compress, true_skip=true_skip)] |
|
|
|
|
|
model += [ |
|
act(**activation_params), |
|
SConv1d(mult * n_filters, mult * n_filters * 2, |
|
kernel_size=ratio * 2, stride=ratio, |
|
norm=norm, norm_kwargs=norm_params, |
|
causal=causal, pad_mode=pad_mode), |
|
] |
|
mult *= 2 |
|
|
|
if lstm: |
|
model += [SLSTM(mult * n_filters, num_layers=lstm)] |
|
|
|
model += [ |
|
act(**activation_params), |
|
SConv1d(mult * n_filters, dimension, last_kernel_size, norm=norm, norm_kwargs=norm_params, |
|
causal=causal, pad_mode=pad_mode) |
|
] |
|
|
|
self.model = nn.Sequential(*model) |
|
|
|
def forward(self, x): |
|
return self.model(x) |
|
|
|
|
|
class SEANetDecoder(nn.Module): |
|
"""SEANet decoder. |
|
Args: |
|
channels (int): Audio channels. |
|
dimension (int): Intermediate representation dimension. |
|
n_filters (int): Base width for the model. |
|
n_residual_layers (int): nb of residual layers. |
|
ratios (Sequence[int]): kernel size and stride ratios |
|
activation (str): Activation function. |
|
activation_params (dict): Parameters to provide to the activation function |
|
final_activation (str): Final activation function after all convolutions. |
|
final_activation_params (dict): Parameters to provide to the activation function |
|
norm (str): Normalization method. |
|
norm_params (dict): Parameters to provide to the underlying normalization used along with the convolution. |
|
kernel_size (int): Kernel size for the initial convolution. |
|
last_kernel_size (int): Kernel size for the initial convolution. |
|
residual_kernel_size (int): Kernel size for the residual layers. |
|
dilation_base (int): How much to increase the dilation with each layer. |
|
causal (bool): Whether to use fully causal convolution. |
|
pad_mode (str): Padding mode for the convolutions. |
|
true_skip (bool): Whether to use true skip connection or a simple |
|
(streamable) convolution as the skip connection in the residual network blocks. |
|
compress (int): Reduced dimensionality in residual branches (from Demucs v3). |
|
lstm (int): Number of LSTM layers at the end of the encoder. |
|
trim_right_ratio (float): Ratio for trimming at the right of the transposed convolution under the causal setup. |
|
If equal to 1.0, it means that all the trimming is done at the right. |
|
""" |
|
|
|
def __init__(self, channels: int = 1, dimension: int = 128, n_filters: int = 32, n_residual_layers: int = 1, |
|
ratios: tp.List[int] = [8, 5, 4, 2], activation: str = 'ELU', activation_params: dict = {'alpha': 1.0}, |
|
final_activation: tp.Optional[str] = None, final_activation_params: tp.Optional[dict] = None, |
|
norm: str = 'weight_norm', norm_params: tp.Dict[str, tp.Any] = {}, kernel_size: int = 7, |
|
last_kernel_size: int = 7, residual_kernel_size: int = 3, dilation_base: int = 2, causal: bool = False, |
|
pad_mode: str = 'reflect', true_skip: bool = False, compress: int = 2, lstm: int = 2, |
|
trim_right_ratio: float = 1.0): |
|
super().__init__() |
|
self.dimension = dimension |
|
self.channels = channels |
|
self.n_filters = n_filters |
|
self.ratios = ratios |
|
del ratios |
|
self.n_residual_layers = n_residual_layers |
|
self.hop_length = np.prod(self.ratios) |
|
|
|
act = getattr(nn, activation) |
|
mult = int(2 ** len(self.ratios)) |
|
model: tp.List[nn.Module] = [ |
|
SConv1d(dimension, mult * n_filters, kernel_size, norm=norm, norm_kwargs=norm_params, |
|
causal=causal, pad_mode=pad_mode) |
|
] |
|
|
|
if lstm: |
|
model += [SLSTM(mult * n_filters, num_layers=lstm)] |
|
|
|
|
|
for i, ratio in enumerate(self.ratios): |
|
|
|
model += [ |
|
act(**activation_params), |
|
SConvTranspose1d(mult * n_filters, mult * n_filters // 2, |
|
kernel_size=ratio * 2, stride=ratio, |
|
norm=norm, norm_kwargs=norm_params, |
|
causal=causal, trim_right_ratio=trim_right_ratio), |
|
] |
|
|
|
for j in range(n_residual_layers): |
|
model += [ |
|
SEANetResnetBlock(mult * n_filters // 2, kernel_sizes=[residual_kernel_size, 1], |
|
dilations=[dilation_base ** j, 1], |
|
activation=activation, activation_params=activation_params, |
|
norm=norm, norm_params=norm_params, causal=causal, |
|
pad_mode=pad_mode, compress=compress, true_skip=true_skip)] |
|
|
|
mult //= 2 |
|
|
|
|
|
model += [ |
|
act(**activation_params), |
|
SConv1d(n_filters, channels, last_kernel_size, norm=norm, norm_kwargs=norm_params, |
|
causal=causal, pad_mode=pad_mode) |
|
] |
|
|
|
if final_activation is not None: |
|
final_act = getattr(nn, final_activation) |
|
final_activation_params = final_activation_params or {} |
|
model += [ |
|
final_act(**final_activation_params) |
|
] |
|
self.model = nn.Sequential(*model) |
|
|
|
def forward(self, z): |
|
y = self.model(z) |
|
return y |
|
|
|
|
|
def test(): |
|
import torch |
|
encoder = SEANetEncoder() |
|
decoder = SEANetDecoder() |
|
x = torch.randn(1, 1, 24000) |
|
z = encoder(x) |
|
print('z ', z.shape) |
|
assert 1 == 2 |
|
assert list(z.shape) == [1, 128, 75], z.shape |
|
y = decoder(z) |
|
assert y.shape == x.shape, (x.shape, y.shape) |
|
|
|
|
|
if __name__ == '__main__': |
|
test() |
|
|