Spaces:
Running
Running
File size: 7,623 Bytes
2f5f13b |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 |
import torch
import torch.utils.data
from librosa.filters import mel as librosa_mel_fn
def dynamic_range_compression_torch(x, C=1, clip_val=1e-5):
"""
Dynamic range compression using log10.
Args:
x (torch.Tensor): Input tensor.
C (float, optional): Scaling factor. Defaults to 1.
clip_val (float, optional): Minimum value for clamping. Defaults to 1e-5.
"""
return torch.log(torch.clamp(x, min=clip_val) * C)
def dynamic_range_decompression_torch(x, C=1):
"""
Dynamic range decompression using exp.
Args:
x (torch.Tensor): Input tensor.
C (float, optional): Scaling factor. Defaults to 1.
"""
return torch.exp(x) / C
def spectral_normalize_torch(magnitudes):
"""
Spectral normalization using dynamic range compression.
Args:
magnitudes (torch.Tensor): Magnitude spectrogram.
"""
return dynamic_range_compression_torch(magnitudes)
def spectral_de_normalize_torch(magnitudes):
"""
Spectral de-normalization using dynamic range decompression.
Args:
magnitudes (torch.Tensor): Normalized spectrogram.
"""
return dynamic_range_decompression_torch(magnitudes)
mel_basis = {}
hann_window = {}
def spectrogram_torch(y, n_fft, hop_size, win_size, center=False):
"""
Compute the spectrogram of a signal using STFT.
Args:
y (torch.Tensor): Input signal.
n_fft (int): FFT window size.
hop_size (int): Hop size between frames.
win_size (int): Window size.
center (bool, optional): Whether to center the window. Defaults to False.
"""
global hann_window
dtype_device = str(y.dtype) + "_" + str(y.device)
wnsize_dtype_device = str(win_size) + "_" + dtype_device
if wnsize_dtype_device not in hann_window:
hann_window[wnsize_dtype_device] = torch.hann_window(win_size).to(
dtype=y.dtype, device=y.device
)
y = torch.nn.functional.pad(
y.unsqueeze(1),
(int((n_fft - hop_size) / 2), int((n_fft - hop_size) / 2)),
mode="reflect",
)
y = y.squeeze(1)
spec = torch.stft(
y,
n_fft=n_fft,
hop_length=hop_size,
win_length=win_size,
window=hann_window[wnsize_dtype_device],
center=center,
pad_mode="reflect",
normalized=False,
onesided=True,
return_complex=True,
)
spec = torch.sqrt(spec.real.pow(2) + spec.imag.pow(2) + 1e-6)
return spec
def spec_to_mel_torch(spec, n_fft, num_mels, sample_rate, fmin, fmax):
"""
Convert a spectrogram to a mel-spectrogram.
Args:
spec (torch.Tensor): Magnitude spectrogram.
n_fft (int): FFT window size.
num_mels (int): Number of mel frequency bins.
sample_rate (int): Sampling rate of the audio signal.
fmin (float): Minimum frequency.
fmax (float): Maximum frequency.
"""
global mel_basis
dtype_device = str(spec.dtype) + "_" + str(spec.device)
fmax_dtype_device = str(fmax) + "_" + dtype_device
if fmax_dtype_device not in mel_basis:
mel = librosa_mel_fn(
sr=sample_rate, n_fft=n_fft, n_mels=num_mels, fmin=fmin, fmax=fmax
)
mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(
dtype=spec.dtype, device=spec.device
)
melspec = torch.matmul(mel_basis[fmax_dtype_device], spec)
melspec = spectral_normalize_torch(melspec)
return melspec
def mel_spectrogram_torch(
y, n_fft, num_mels, sample_rate, hop_size, win_size, fmin, fmax, center=False
):
"""
Compute the mel-spectrogram of a signal.
Args:
y (torch.Tensor): Input signal.
n_fft (int): FFT window size.
num_mels (int): Number of mel frequency bins.
sample_rate (int): Sampling rate of the audio signal.
hop_size (int): Hop size between frames.
win_size (int): Window size.
fmin (float): Minimum frequency.
fmax (float): Maximum frequency.
center (bool, optional): Whether to center the window. Defaults to False.
"""
spec = spectrogram_torch(y, n_fft, hop_size, win_size, center)
melspec = spec_to_mel_torch(spec, n_fft, num_mels, sample_rate, fmin, fmax)
return melspec
def compute_window_length(n_mels: int, sample_rate: int):
f_min = 0
f_max = sample_rate / 2
window_length_seconds = 8 * n_mels / (f_max - f_min)
window_length = int(window_length_seconds * sample_rate)
return 2 ** (window_length.bit_length() - 1)
class MultiScaleMelSpectrogramLoss(torch.nn.Module):
def __init__(
self,
sample_rate: int = 24000,
n_mels: list[int] = [5, 10, 20, 40, 80, 160, 320, 480],
loss_fn=torch.nn.L1Loss(),
):
super().__init__()
self.sample_rate = sample_rate
self.loss_fn = loss_fn
self.log_base = torch.log(torch.tensor(10.0))
self.stft_params: list[tuple] = []
self.hann_window: dict[int, torch.Tensor] = {}
self.mel_banks: dict[int, torch.Tensor] = {}
self.stft_params = [
(mel, compute_window_length(mel, sample_rate), self.sample_rate // 100)
for mel in n_mels
]
def mel_spectrogram(
self,
wav: torch.Tensor,
n_mels: int,
window_length: int,
hop_length: int,
):
# IDs for caching
dtype_device = str(wav.dtype) + "_" + str(wav.device)
win_dtype_device = str(window_length) + "_" + dtype_device
mel_dtype_device = str(n_mels) + "_" + dtype_device
# caching hann window
if win_dtype_device not in self.hann_window:
self.hann_window[win_dtype_device] = torch.hann_window(
window_length, device=wav.device, dtype=torch.float32
)
wav = wav.squeeze(1) # -> torch(B, T)
stft = torch.stft(
wav.float(),
n_fft=window_length,
hop_length=hop_length,
window=self.hann_window[win_dtype_device],
return_complex=True,
) # -> torch (B, window_length // 2 + 1, (T - window_length)/hop_length + 1)
magnitude = torch.sqrt(stft.real.pow(2) + stft.imag.pow(2) + 1e-6)
# caching mel filter
if mel_dtype_device not in self.mel_banks:
self.mel_banks[mel_dtype_device] = torch.from_numpy(
librosa_mel_fn(
sr=self.sample_rate,
n_mels=n_mels,
n_fft=window_length,
fmin=0,
fmax=None,
)
).to(device=wav.device, dtype=torch.float32)
mel_spectrogram = torch.matmul(
self.mel_banks[mel_dtype_device], magnitude
) # torch(B, n_mels, stft.frames)
return mel_spectrogram
def forward(
self, real: torch.Tensor, fake: torch.Tensor
): # real: torch(B, 1, T) , fake: torch(B, 1, T)
loss = 0.0
for p in self.stft_params:
real_mels = self.mel_spectrogram(real, *p)
fake_mels = self.mel_spectrogram(fake, *p)
real_logmels = torch.log(real_mels.clamp(min=1e-5)) / self.log_base
fake_logmels = torch.log(fake_mels.clamp(min=1e-5)) / self.log_base
loss += self.loss_fn(real_logmels, fake_logmels)
return loss
|