|
import librosa |
|
import tensorflow as tf |
|
from tensorflow.keras.models import model_from_json |
|
import soundfile as sf |
|
import numpy as np |
|
import os |
|
import scipy |
|
from scipy.io import wavfile |
|
import gradio as gr |
|
|
|
def audio_to_audio_frame_stack(sound_data, frame_length, hop_length_frame): |
|
"""This function takes an audio and splits it into several frames |
|
returning a numpy matrix of size (nb_frame, frame_length).""" |
|
sequence_sample_length = sound_data.shape[0] |
|
sound_data_list = [ |
|
sound_data[start:start + frame_length] |
|
for start in range(0, sequence_sample_length - frame_length + 1, hop_length_frame) |
|
] |
|
sound_data_array = np.vstack(sound_data_list) |
|
return sound_data_array |
|
|
|
def audio_files_to_numpy(audio_dir, list_audio_files, sample_rate, frame_length, hop_length_frame, min_duration): |
|
"""This function takes audio files in a directory and merges them |
|
into a numpy matrix of size (nb_frame, frame_length) for a sliding window of size hop_length_frame.""" |
|
list_sound_array = [] |
|
for file in list_audio_files: |
|
y, sr = librosa.load(os.path.join(audio_dir, file), sr=sample_rate) |
|
total_duration = librosa.get_duration(y=y, sr=sr) |
|
|
|
if total_duration >= min_duration: |
|
list_sound_array.append(audio_to_audio_frame_stack(y, frame_length, hop_length_frame)) |
|
else: |
|
print(f"The following file {os.path.join(audio_dir,file)} is below the min duration") |
|
return np.vstack(list_sound_array) if len(list_sound_array) > 0 else np.array([]) |
|
|
|
def blend_noise_randomly(voice, noise, nb_samples, frame_length): |
|
"""This function randomly blends voice frames with noise frames.""" |
|
prod_voice = np.zeros((nb_samples, frame_length)) |
|
prod_noise = np.zeros((nb_samples, frame_length)) |
|
prod_noisy_voice = np.zeros((nb_samples, frame_length)) |
|
|
|
for i in range(nb_samples): |
|
id_voice = np.random.randint(0, voice.shape[0]) |
|
id_noise = np.random.randint(0, noise.shape[0]) |
|
level_noise = np.random.uniform(0.2, 0.8) |
|
prod_voice[i, :] = voice[id_voice, :] |
|
prod_noise[i, :] = level_noise * noise[id_noise, :] |
|
prod_noisy_voice[i, :] = prod_voice[i, :] + prod_noise[i, :] |
|
|
|
return prod_voice, prod_noise, prod_noisy_voice |
|
|
|
def audio_to_magnitude_db_and_phase(n_fft, hop_length_fft, audio): |
|
"""Convert audio into a spectrogram, returning the magnitude in dB and the phase.""" |
|
stftaudio = librosa.stft(audio, n_fft=n_fft, hop_length=hop_length_fft) |
|
stftaudio_magnitude, stftaudio_phase = librosa.magphase(stftaudio) |
|
stftaudio_magnitude_db = librosa.amplitude_to_db(stftaudio_magnitude, ref=np.max) |
|
return stftaudio_magnitude_db, stftaudio_phase |
|
|
|
def numpy_audio_to_matrix_spectrogram(numpy_audio, dim_square_spec, n_fft, hop_length_fft): |
|
"""Takes a numpy array of shape (nb_frame, frame_length) and returns |
|
the matrix spectrogram for amplitude in dB and phase (each of shape (nb_frame, dim_square_spec, dim_square_spec)).""" |
|
nb_audio = numpy_audio.shape[0] |
|
m_mag_db = np.zeros((nb_audio, dim_square_spec, dim_square_spec)) |
|
m_phase = np.zeros((nb_audio, dim_square_spec, dim_square_spec), dtype=complex) |
|
|
|
for i in range(nb_audio): |
|
m_mag_db[i, :, :], m_phase[i, :, :] = audio_to_magnitude_db_and_phase( |
|
n_fft, hop_length_fft, numpy_audio[i]) |
|
return m_mag_db, m_phase |
|
|
|
def magnitude_db_and_phase_to_audio(frame_length, hop_length_fft, stftaudio_magnitude_db, stftaudio_phase): |
|
"""Reverts a dB spectrogram to audio.""" |
|
stftaudio_magnitude_rev = librosa.db_to_amplitude(stftaudio_magnitude_db, ref=1.0) |
|
audio_reverse_stft = stftaudio_magnitude_rev * stftaudio_phase |
|
audio_reconstruct = librosa.istft(audio_reverse_stft, hop_length=hop_length_fft, length=frame_length) |
|
return audio_reconstruct |
|
|
|
def matrix_spectrogram_to_numpy_audio(m_mag_db, m_phase, frame_length, hop_length_fft): |
|
"""Reverts matrix spectrograms to a stacked numpy audio array.""" |
|
list_audio = [] |
|
nb_spec = m_mag_db.shape[0] |
|
|
|
for i in range(nb_spec): |
|
audio_reconstruct = magnitude_db_and_phase_to_audio( |
|
frame_length, hop_length_fft, m_mag_db[i], m_phase[i]) |
|
list_audio.append(audio_reconstruct) |
|
return np.vstack(list_audio) |
|
|
|
def scaled_in(matrix_spec): |
|
"""Global scaling applied to noisy voice spectrograms (scale between -1 and 1).""" |
|
matrix_spec = (matrix_spec + 46) / 50 |
|
return matrix_spec |
|
|
|
def scaled_ou(matrix_spec): |
|
"""Global scaling applied to noise model spectrograms (scale between -1 and 1).""" |
|
matrix_spec = (matrix_spec - 6) / 82 |
|
return matrix_spec |
|
|
|
def inv_scaled_in(matrix_spec): |
|
"""Inverse global scaling applied to noisy voices spectrograms.""" |
|
matrix_spec = matrix_spec * 50 - 46 |
|
return matrix_spec |
|
|
|
def inv_scaled_ou(matrix_spec): |
|
"""Inverse global scaling applied to noise model spectrograms.""" |
|
matrix_spec = matrix_spec * 82 + 6 |
|
return matrix_spec |
|
|
|
def prediction(weights_path, name_model, audio_dir_prediction, dir_save_prediction, audio_input_prediction, |
|
audio_output_prediction, sample_rate, min_duration, frame_length, hop_length_frame, n_fft, hop_length_fft): |
|
"""Use pretrained weights to denoise a noisy voice audio, and save the result.""" |
|
|
|
json_file = open(os.path.join(weights_path, name_model + '.json'), 'r') |
|
loaded_model_json = json_file.read() |
|
json_file.close() |
|
loaded_model = model_from_json(loaded_model_json) |
|
loaded_model.load_weights(os.path.join(weights_path, name_model + '.h5')) |
|
print("Loaded model from disk") |
|
|
|
|
|
audio = audio_files_to_numpy( |
|
audio_dir_prediction, |
|
audio_input_prediction, |
|
sample_rate, |
|
frame_length, |
|
hop_length_frame, |
|
min_duration |
|
) |
|
|
|
if audio.size == 0: |
|
print("No valid audio frames found, skipping prediction.") |
|
return |
|
|
|
dim_square_spec = int(n_fft / 2) + 1 |
|
|
|
m_amp_db_audio, m_pha_audio = numpy_audio_to_matrix_spectrogram(audio, dim_square_spec, n_fft, hop_length_fft) |
|
|
|
|
|
X_in = scaled_in(m_amp_db_audio) |
|
|
|
X_in = X_in.reshape(X_in.shape[0], X_in.shape[1], X_in.shape[2], 1) |
|
|
|
|
|
X_pred = loaded_model.predict(X_in) |
|
|
|
inv_sca_X_pred = inv_scaled_ou(X_pred) |
|
|
|
|
|
X_denoise = m_amp_db_audio - inv_sca_X_pred[:, :, :, 0] |
|
|
|
|
|
audio_denoise_recons = matrix_spectrogram_to_numpy_audio(X_denoise, m_pha_audio, frame_length, hop_length_fft) |
|
|
|
|
|
nb_samples = audio_denoise_recons.shape[0] |
|
denoise_long = audio_denoise_recons.reshape(1, nb_samples * frame_length) * 10 |
|
|
|
|
|
sf.write(audio_output_prediction, denoise_long[0, :], sample_rate) |
|
print(f"Saved denoised audio to: {audio_output_prediction}") |
|
|
|
def denoise_audio(audio_input): |
|
""" |
|
Gradio callback function to denoise audio. |
|
`audio_input` can be None, a dict {"name", "sample_rate", "data"}, or a tuple (sr, data). |
|
""" |
|
|
|
if audio_input is None: |
|
print("No audio was provided.") |
|
return None |
|
|
|
|
|
if isinstance(audio_input, dict): |
|
sr = audio_input["sample_rate"] |
|
data = audio_input["data"] |
|
else: |
|
sr, data = audio_input |
|
|
|
|
|
temp_wav = "temp.wav" |
|
sf.write(temp_wav, data, sr) |
|
|
|
|
|
len_data = len(data) |
|
t = len_data / sr |
|
print("t:", t) |
|
|
|
|
|
weights_path = os.path.abspath("./") |
|
name_model = "model_unet" |
|
audio_dir_prediction = os.path.abspath("./") |
|
dir_save_prediction = os.path.abspath("./") |
|
audio_output_prediction = "test.wav" |
|
audio_input_prediction = [temp_wav] |
|
sample_rate = 8000 |
|
min_duration = t |
|
frame_length = 8064 |
|
hop_length_frame = 8064 |
|
n_fft = 255 |
|
hop_length_fft = 63 |
|
|
|
|
|
prediction(weights_path, name_model, |
|
audio_dir_prediction, |
|
dir_save_prediction, |
|
audio_input_prediction, |
|
audio_output_prediction, |
|
sample_rate, |
|
min_duration, |
|
frame_length, |
|
hop_length_frame, |
|
n_fft, |
|
hop_length_fft) |
|
|
|
|
|
return os.path.abspath(audio_output_prediction) |
|
|
|
|
|
examples = [ |
|
[os.path.abspath("crowdNoise.wav")], |
|
[os.path.abspath("CrowdNoise2.wav")], |
|
[os.path.abspath("whiteNoise.wav")] |
|
] |
|
|
|
iface = gr.Interface( |
|
fn=denoise_audio, |
|
inputs="audio", |
|
outputs="audio", |
|
title="Audio to Denoised Audio Application", |
|
description=( |
|
"A simple application to denoise audio speech using a UNet model. " |
|
"Upload your own audio or click one of the examples to load it." |
|
), |
|
article=""" |
|
<div style="text-align: center"> |
|
<p>All you need to do is to upload or record an audio file and hit 'Submit'. |
|
After processing, you can click 'Play' to hear the denoised audio. |
|
The audio is saved in WAV format.</p> |
|
</div> |
|
""", |
|
examples=examples |
|
) |
|
|
|
iface.launch() |
|
|