# MIT License # Copyright (c) Meta Platforms, Inc. and affiliates. # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. # Copyright (c) [2023] [Meta Platforms, Inc. and affiliates.] # Copyright (c) [2025] [Ziyue Jiang] # SPDX-License-Identifier: MIT # This file has been modified by Ziyue Jiang on 2025/03/19 # Original file was released under MIT, with the full license text # available at https://github.com/facebookresearch/encodec/blob/gh-pages/LICENSE. # This modified file is released under the same license. """Encodec SEANet-based encoder and decoder implementation.""" import typing as tp import numpy as np import torch.nn as nn from .conv import SConv1d from .lstm import SLSTM class SEANetResnetBlock(nn.Module): def __init__(self, dim: int, kernel_sizes: tp.List[int] = [3, 1], dilations: tp.List[int] = [1, 1], activation: str = 'ELU', activation_params: dict = {'alpha': 1.0}, norm: str = 'weight_norm', norm_params: tp.Dict[str, tp.Any] = {}, causal: bool = False, pad_mode: str = 'reflect', compress: int = 2, true_skip: bool = True): super().__init__() assert len(kernel_sizes) == len(dilations), 'Number of kernel sizes should match number of dilations' act = getattr(nn, activation) hidden = dim // compress block = [] for i, (kernel_size, dilation) in enumerate(zip(kernel_sizes, dilations)): in_chs = dim if i == 0 else hidden out_chs = dim if i == len(kernel_sizes) - 1 else hidden block += [ act(**activation_params), SConv1d(in_chs, out_chs, kernel_size=kernel_size, dilation=dilation, norm=norm, norm_kwargs=norm_params, causal=causal, pad_mode=pad_mode), ] self.block = nn.Sequential(*block) self.shortcut: nn.Module if true_skip: self.shortcut = nn.Identity() else: self.shortcut = SConv1d(dim, dim, kernel_size=1, norm=norm, norm_kwargs=norm_params, causal=causal, pad_mode=pad_mode) def forward(self, x): return self.shortcut(x) + self.block(x) class SEANetEncoder(nn.Module): def __init__(self, channels: int = 1, dimension: int = 128, n_filters: int = 32, n_residual_layers: int = 1, ratios: tp.List[int] = [8, 5, 4, 2], activation: str = 'ELU', activation_params: dict = {'alpha': 1.0}, norm: str = 'weight_norm', norm_params: tp.Dict[str, tp.Any] = {}, kernel_size: int = 7, last_kernel_size: int = 7, residual_kernel_size: int = 3, dilation_base: int = 2, causal: bool = False, pad_mode: str = 'reflect', true_skip: bool = False, compress: int = 2, lstm: int = 2): super().__init__() self.channels = channels self.dimension = dimension self.n_filters = n_filters self.ratios = list(reversed(ratios)) del ratios self.n_residual_layers = n_residual_layers self.hop_length = np.prod(self.ratios) act = getattr(nn, activation) mult = 1 model: tp.List[nn.Module] = [ SConv1d(channels, mult * n_filters, kernel_size, norm=norm, norm_kwargs=norm_params, causal=causal, pad_mode=pad_mode) ] # Downsample to raw audio scale for i, ratio in enumerate(self.ratios): # Add residual layers for j in range(n_residual_layers): model += [ SEANetResnetBlock(mult * n_filters, kernel_sizes=[residual_kernel_size, 1], dilations=[dilation_base ** j, 1], norm=norm, norm_params=norm_params, activation=activation, activation_params=activation_params, causal=causal, pad_mode=pad_mode, compress=compress, true_skip=true_skip)] # Add downsampling layers model += [ act(**activation_params), SConv1d(mult * n_filters, mult * n_filters * 2, kernel_size=ratio * 2, stride=ratio, norm=norm, norm_kwargs=norm_params, causal=causal, pad_mode=pad_mode), ] mult *= 2 if lstm: model += [SLSTM(mult * n_filters, num_layers=lstm)] model += [ act(**activation_params), SConv1d(mult * n_filters, dimension, last_kernel_size, norm=norm, norm_kwargs=norm_params, causal=causal, pad_mode=pad_mode) ] self.model = nn.Sequential(*model) def forward(self, x): return self.model(x)