|
import librosa |
|
import librosa.filters |
|
import numpy as np |
|
from scipy import signal |
|
from wav2mel_hparams import hparams as hp |
|
from librosa.core.audio import resample |
|
import soundfile as sf |
|
|
|
def load_wav(path, sr): |
|
return librosa.core.load(path, sr=sr) |
|
|
|
def preemphasis(wav, k, preemphasize=True): |
|
if preemphasize: |
|
return signal.lfilter([1, -k], [1], wav) |
|
return wav |
|
|
|
def inv_preemphasis(wav, k, inv_preemphasize=True): |
|
if inv_preemphasize: |
|
return signal.lfilter([1], [1, -k], wav) |
|
return wav |
|
|
|
def get_hop_size(): |
|
hop_size = hp.hop_size |
|
if hop_size is None: |
|
assert hp.frame_shift_ms is not None |
|
hop_size = int(hp.frame_shift_ms / 1000 * hp.sample_rate) |
|
return hop_size |
|
|
|
def linearspectrogram(wav): |
|
D = _stft(preemphasis(wav, hp.preemphasis, hp.preemphasize)) |
|
S = _amp_to_db(np.abs(D)) - hp.ref_level_db |
|
|
|
if hp.signal_normalization: |
|
return _normalize(S) |
|
return S |
|
|
|
def melspectrogram(wav): |
|
D = _stft(preemphasis(wav, hp.preemphasis, hp.preemphasize)) |
|
S = _amp_to_db(_linear_to_mel(np.abs(D))) - hp.ref_level_db |
|
|
|
if hp.signal_normalization: |
|
return _normalize(S) |
|
return S |
|
|
|
def _stft(y): |
|
return librosa.stft(y=y, n_fft=hp.n_fft, hop_length=get_hop_size(), win_length=hp.win_size) |
|
|
|
|
|
|
|
def num_frames(length, fsize, fshift): |
|
"""Compute number of time frames of spectrogram |
|
""" |
|
pad = (fsize - fshift) |
|
if length % fshift == 0: |
|
M = (length + pad * 2 - fsize) // fshift + 1 |
|
else: |
|
M = (length + pad * 2 - fsize) // fshift + 2 |
|
return M |
|
|
|
|
|
def pad_lr(x, fsize, fshift): |
|
"""Compute left and right padding |
|
""" |
|
M = num_frames(len(x), fsize, fshift) |
|
pad = (fsize - fshift) |
|
T = len(x) + 2 * pad |
|
r = (M - 1) * fshift + fsize - T |
|
return pad, pad + r |
|
|
|
|
|
def librosa_pad_lr(x, fsize, fshift): |
|
return 0, (x.shape[0] // fshift + 1) * fshift - x.shape[0] |
|
|
|
|
|
_mel_basis = None |
|
|
|
def _linear_to_mel(spectogram): |
|
global _mel_basis |
|
if _mel_basis is None: |
|
_mel_basis = _build_mel_basis() |
|
return np.dot(_mel_basis, spectogram) |
|
|
|
def _build_mel_basis(): |
|
assert hp.fmax <= hp.sample_rate // 2 |
|
return librosa.filters.mel(sr=hp.sample_rate, n_fft=hp.n_fft, n_mels=hp.num_mels, |
|
fmin=hp.fmin, fmax=hp.fmax) |
|
|
|
def _amp_to_db(x): |
|
min_level = np.exp(hp.min_level_db / 20 * np.log(10)) |
|
return 20 * np.log10(np.maximum(min_level, x)) |
|
|
|
def _db_to_amp(x): |
|
return np.power(10.0, (x) * 0.05) |
|
|
|
def _normalize(S): |
|
if hp.allow_clipping_in_normalization: |
|
if hp.symmetric_mels: |
|
return np.clip((2 * hp.max_abs_value) * ((S - hp.min_level_db) / (-hp.min_level_db)) - hp.max_abs_value, |
|
-hp.max_abs_value, hp.max_abs_value) |
|
else: |
|
return np.clip(hp.max_abs_value * ((S - hp.min_level_db) / (-hp.min_level_db)), 0, hp.max_abs_value) |
|
|
|
assert S.max() <= 0 and S.min() - hp.min_level_db >= 0 |
|
if hp.symmetric_mels: |
|
return (2 * hp.max_abs_value) * ((S - hp.min_level_db) / (-hp.min_level_db)) - hp.max_abs_value |
|
else: |
|
return hp.max_abs_value * ((S - hp.min_level_db) / (-hp.min_level_db)) |
|
|
|
def _denormalize(D): |
|
if hp.allow_clipping_in_normalization: |
|
if hp.symmetric_mels: |
|
return (((np.clip(D, -hp.max_abs_value, |
|
hp.max_abs_value) + hp.max_abs_value) * -hp.min_level_db / (2 * hp.max_abs_value)) |
|
+ hp.min_level_db) |
|
else: |
|
return ((np.clip(D, 0, hp.max_abs_value) * -hp.min_level_db / hp.max_abs_value) + hp.min_level_db) |
|
|
|
if hp.symmetric_mels: |
|
return (((D + hp.max_abs_value) * -hp.min_level_db / (2 * hp.max_abs_value)) + hp.min_level_db) |
|
else: |
|
return ((D * -hp.min_level_db / hp.max_abs_value) + hp.min_level_db) |
|
|
|
|
|
|
|
def wav2mel(wav, sr): |
|
wav16k = resample(wav, orig_sr=sr, target_sr=16000) |
|
|
|
mel = melspectrogram(wav16k) |
|
|
|
if np.isnan(mel.reshape(-1)).sum() > 0: |
|
raise ValueError( |
|
'Mel contains nan! Using a TTS voice? Add a small epsilon noise to the wav file and try again') |
|
|
|
mel_chunks = [] |
|
mel_idx_multiplier = 80. / 25 |
|
mel_step_size = 8 |
|
i = start_idx = 0 |
|
while start_idx < len(mel[0]): |
|
start_idx = int(i * mel_idx_multiplier) |
|
if start_idx + mel_step_size // 2 > len(mel[0]): |
|
mel_chunks.append(mel[:, len(mel[0]) - mel_step_size:]) |
|
elif start_idx - mel_step_size // 2 < 0: |
|
mel_chunks.append(mel[:, :mel_step_size]) |
|
else: |
|
mel_chunks.append(mel[:, start_idx - mel_step_size // 2 : start_idx + mel_step_size // 2]) |
|
i += 1 |
|
return mel_chunks |
|
|
|
|
|
|
|
if __name__ == '__main__': |
|
import argparse |
|
|
|
parser = argparse.ArgumentParser() |
|
parser.add_argument('--wav', type=str, default='') |
|
parser.add_argument('--save_feats', action='store_true') |
|
|
|
opt = parser.parse_args() |
|
|
|
wav, sr = librosa.core.load(opt.wav) |
|
mel_chunks = np.array(wav2mel(wav.T, sr)) |
|
print(mel_chunks.shape, mel_chunks.transpose(0,2,1).shape) |
|
|
|
if opt.save_feats: |
|
save_path = opt.wav.replace('.wav', '_mel.npy') |
|
np.save(save_path, mel_chunks.transpose(0,2,1)) |
|
print(f"[INFO] saved logits to {save_path}") |