Spaces:
Running
on
Zero
Running
on
Zero
from typing import Optional | |
import torch | |
from torch import nn | |
from torch.nn.utils import weight_norm | |
from inspiremusic.wavtokenizer.decoder.modules import ConvNeXtBlock, ResBlock1, AdaLayerNorm | |
def nonlinearity(x): | |
# swish | |
return x * torch.sigmoid(x) | |
def Normalize(in_channels, num_groups=32): | |
return torch.nn.GroupNorm(num_groups=num_groups, num_channels=in_channels, eps=1e-6, affine=True) | |
class ResnetBlock(nn.Module): | |
def __init__(self, *, in_channels, out_channels=None, conv_shortcut=False, | |
dropout, temb_channels=512): | |
super().__init__() | |
self.in_channels = in_channels | |
out_channels = in_channels if out_channels is None else out_channels | |
self.out_channels = out_channels | |
self.use_conv_shortcut = conv_shortcut | |
self.norm1 = Normalize(in_channels) | |
self.conv1 = torch.nn.Conv1d(in_channels, | |
out_channels, | |
kernel_size=3, | |
stride=1, | |
padding=1) | |
if temb_channels > 0: | |
self.temb_proj = torch.nn.Linear(temb_channels, | |
out_channels) | |
self.norm2 = Normalize(out_channels) | |
self.dropout = torch.nn.Dropout(dropout) | |
self.conv2 = torch.nn.Conv1d(out_channels, | |
out_channels, | |
kernel_size=3, | |
stride=1, | |
padding=1) | |
if self.in_channels != self.out_channels: | |
if self.use_conv_shortcut: | |
self.conv_shortcut = torch.nn.Conv1d(in_channels, | |
out_channels, | |
kernel_size=3, | |
stride=1, | |
padding=1) | |
else: | |
self.nin_shortcut = torch.nn.Conv1d(in_channels, | |
out_channels, | |
kernel_size=1, | |
stride=1, | |
padding=0) | |
def forward(self, x, temb=None): | |
h = x | |
h = self.norm1(h) | |
h = nonlinearity(h) | |
h = self.conv1(h) | |
if temb is not None: | |
h = h + self.temb_proj(nonlinearity(temb))[:, :, None, None] | |
h = self.norm2(h) | |
h = nonlinearity(h) | |
h = self.dropout(h) | |
h = self.conv2(h) | |
if self.in_channels != self.out_channels: | |
if self.use_conv_shortcut: | |
x = self.conv_shortcut(x) | |
else: | |
x = self.nin_shortcut(x) | |
return x + h | |
class AttnBlock(nn.Module): | |
def __init__(self, in_channels): | |
super().__init__() | |
self.in_channels = in_channels | |
self.norm = Normalize(in_channels) | |
self.q = torch.nn.Conv1d(in_channels, | |
in_channels, | |
kernel_size=1, | |
stride=1, | |
padding=0) | |
self.k = torch.nn.Conv1d(in_channels, | |
in_channels, | |
kernel_size=1, | |
stride=1, | |
padding=0) | |
self.v = torch.nn.Conv1d(in_channels, | |
in_channels, | |
kernel_size=1, | |
stride=1, | |
padding=0) | |
self.proj_out = torch.nn.Conv1d(in_channels, | |
in_channels, | |
kernel_size=1, | |
stride=1, | |
padding=0) | |
def forward(self, x): | |
h_ = x | |
h_ = self.norm(h_) | |
q = self.q(h_) | |
k = self.k(h_) | |
v = self.v(h_) | |
# compute attention | |
b, c, h = q.shape | |
q = q.permute(0, 2, 1) # b,hw,c | |
w_ = torch.bmm(q, k) # b,hw,hw w[b,i,j]=sum_c q[b,i,c]k[b,c,j] | |
w_ = w_ * (int(c) ** (-0.5)) | |
w_ = torch.nn.functional.softmax(w_, dim=2) | |
# attend to values | |
w_ = w_.permute(0, 2, 1) # b,hw,hw (first hw of k, second of q) | |
h_ = torch.bmm(v, w_) # b, c,hw (hw of q) h_[b,c,j] = sum_i v[b,c,i] w_[b,i,j] | |
h_ = self.proj_out(h_) | |
return x + h_ | |
def make_attn(in_channels, attn_type="vanilla"): | |
assert attn_type in ["vanilla", "linear", "none"], f'attn_type {attn_type} unknown' | |
print(f"making attention of type '{attn_type}' with {in_channels} in_channels") | |
if attn_type == "vanilla": | |
return AttnBlock(in_channels) | |
class Backbone(nn.Module): | |
"""Base class for the generator's backbone. It preserves the same temporal resolution across all layers.""" | |
def forward(self, x: torch.Tensor, **kwargs) -> torch.Tensor: | |
""" | |
Args: | |
x (Tensor): Input tensor of shape (B, C, L), where B is the batch size, | |
C denotes output features, and L is the sequence length. | |
Returns: | |
Tensor: Output of shape (B, L, H), where B is the batch size, L is the sequence length, | |
and H denotes the model dimension. | |
""" | |
raise NotImplementedError("Subclasses must implement the forward method.") | |
class VocosBackbone(Backbone): | |
""" | |
Vocos backbone module built with ConvNeXt blocks. Supports additional conditioning with Adaptive Layer Normalization | |
Args: | |
input_channels (int): Number of input features channels. | |
dim (int): Hidden dimension of the model. | |
intermediate_dim (int): Intermediate dimension used in ConvNeXtBlock. | |
num_layers (int): Number of ConvNeXtBlock layers. | |
layer_scale_init_value (float, optional): Initial value for layer scaling. Defaults to `1 / num_layers`. | |
adanorm_num_embeddings (int, optional): Number of embeddings for AdaLayerNorm. | |
None means non-conditional model. Defaults to None. | |
""" | |
def __init__( | |
self, | |
input_channels: int, | |
dim: int, | |
intermediate_dim: int, | |
num_layers: int, | |
layer_scale_init_value: Optional[float] = None, | |
adanorm_num_embeddings: Optional[int] = None, | |
): | |
super().__init__() | |
self.input_channels = input_channels | |
self.embed = nn.Conv1d(input_channels, dim, kernel_size=7, padding=3) | |
self.adanorm = adanorm_num_embeddings is not None | |
if adanorm_num_embeddings: | |
self.norm = AdaLayerNorm(adanorm_num_embeddings, dim, eps=1e-6) | |
else: | |
self.norm = nn.LayerNorm(dim, eps=1e-6) | |
layer_scale_init_value = layer_scale_init_value or 1 / num_layers | |
self.convnext = nn.ModuleList( | |
[ | |
ConvNeXtBlock( | |
dim=dim, | |
intermediate_dim=intermediate_dim, | |
layer_scale_init_value=layer_scale_init_value, | |
adanorm_num_embeddings=adanorm_num_embeddings, | |
) | |
for _ in range(num_layers) | |
] | |
) | |
self.final_layer_norm = nn.LayerNorm(dim, eps=1e-6) | |
self.apply(self._init_weights) | |
self.temb_ch = 0 | |
block_in = dim | |
dropout = 0.1 | |
attn_type="vanilla" | |
pos_net : tp.List[nn.Module] = [ | |
ResnetBlock(in_channels=block_in,out_channels=block_in, | |
temb_channels=self.temb_ch,dropout=dropout), | |
ResnetBlock(in_channels=block_in,out_channels=block_in, | |
temb_channels=self.temb_ch,dropout=dropout), | |
make_attn(block_in, attn_type=attn_type), | |
ResnetBlock(in_channels=block_in,out_channels=block_in, | |
temb_channels=self.temb_ch,dropout=dropout), | |
ResnetBlock(in_channels=block_in,out_channels=block_in, | |
temb_channels=self.temb_ch,dropout=dropout), | |
Normalize(block_in) | |
] | |
self.pos_net = nn.Sequential(*pos_net) | |
def _init_weights(self, m): | |
if isinstance(m, (nn.Conv1d, nn.Linear)): | |
nn.init.trunc_normal_(m.weight, std=0.02) | |
nn.init.constant_(m.bias, 0) | |
def forward(self, x: torch.Tensor, bandwidth_id: Optional[torch.Tensor] = None) -> torch.Tensor: | |
x = self.embed(x) | |
x = self.pos_net(x) | |
if self.adanorm: | |
# assert bandwidth_id is not None | |
if bandwidth_id is None: | |
bandwidth_id = torch.tensor(0, device='cuda') | |
x = self.norm(x.transpose(1, 2), cond_embedding_id=bandwidth_id) | |
else: | |
x = self.norm(x.transpose(1, 2)) | |
x = x.transpose(1, 2) | |
for conv_block in self.convnext: | |
x = conv_block(x, cond_embedding_id=bandwidth_id) | |
x = self.final_layer_norm(x.transpose(1, 2)) | |
return x | |
class VocosResNetBackbone(Backbone): | |
""" | |
Vocos backbone module built with ResBlocks. | |
Args: | |
input_channels (int): Number of input features channels. | |
dim (int): Hidden dimension of the model. | |
num_blocks (int): Number of ResBlock1 blocks. | |
layer_scale_init_value (float, optional): Initial value for layer scaling. Defaults to None. | |
""" | |
def __init__( | |
self, input_channels, dim, num_blocks, layer_scale_init_value=None, | |
): | |
super().__init__() | |
self.input_channels = input_channels | |
self.embed = weight_norm(nn.Conv1d(input_channels, dim, kernel_size=3, padding=1)) | |
layer_scale_init_value = layer_scale_init_value or 1 / num_blocks / 3 | |
self.resnet = nn.Sequential( | |
*[ResBlock1(dim=dim, layer_scale_init_value=layer_scale_init_value) for _ in range(num_blocks)] | |
) | |
def forward(self, x: torch.Tensor, **kwargs) -> torch.Tensor: | |
x = self.embed(x) | |
x = self.resnet(x) | |
x = x.transpose(1, 2) | |
return x | |