Voice / app.py
Staticaliza's picture
Update app.py
55f3e87 verified
raw
history blame
11.7 kB
import gradio as gr
import numpy as np
import soundfile as sf
import spaces
import torch
import torchaudio
import librosa
import yaml
import tempfile
import os
from huggingface_hub import hf_hub_download
from transformers import AutoFeatureExtractor, WhisperModel
from torch.nn.utils import parametrizations
from modules.commons import build_model, load_checkpoint, recursive_munch
from modules.campplus.DTDNN import CAMPPlus
from modules.bigvgan import bigvgan
from modules.rmvpe import RMVPE
from modules.audio import mel_spectrogram
# ----------------------------
# Optimization Settings
# ----------------------------
# Set the number of threads to the number of CPU cores
torch.set_num_threads(os.cpu_count())
torch.set_num_interop_threads(os.cpu_count())
# Enable optimized backends
torch.backends.openmp.enabled = True
torch.backends.mkldnn.enabled = True
torch.backends.cudnn.enabled = False
torch.backends.cuda.enabled = False
torch.set_grad_enabled(False)
# Force CPU usage and set default dtype to float16
torch.set_default_dtype(torch.float16)
device = torch.device("cpu")
print(f"[DEVICE] | Using device: {device} with dtype {torch.get_default_dtype()}")
# ----------------------------
# Load Models and Configuration
# ----------------------------
def load_custom_model_from_hf(repo_id, model_filename="pytorch_model.bin", config_filename="config.yml"):
os.makedirs("./checkpoints", exist_ok=True)
model_path = hf_hub_download(repo_id=repo_id, filename=model_filename, cache_dir="./checkpoints")
if config_filename is None:
return model_path
config_path = hf_hub_download(repo_id=repo_id, filename=config_filename, cache_dir="./checkpoints")
return model_path, config_path
# Load DiT model
dit_checkpoint_path, dit_config_path = load_custom_model_from_hf(
"Plachta/Seed-VC",
"DiT_seed_v2_uvit_whisper_small_wavenet_bigvgan_pruned.pth",
"config_dit_mel_seed_uvit_whisper_small_wavenet.yml"
)
config = yaml.safe_load(open(dit_config_path, 'r'))
model_params = recursive_munch(config['model_params'])
model = build_model(model_params, stage='DiT')
# Debug: Print model keys to identify correct key
print(f"[INFO] | Model keys: {model.keys()}")
hop_length = config['preprocess_params']['spect_params']['hop_length']
sr = config['preprocess_params']['sr']
# Load DiT checkpoints
model, _, _, _ = load_checkpoint(model, None, dit_checkpoint_path, load_only_params=True, ignore_modules=[], is_distributed=False)
for key in model:
model[key] = model[key].eval().to(device).half()
print("[INFO] | DiT model loaded, set to eval mode, and converted to float16.")
model.cfm.estimator.setup_caches(max_batch_size=1, max_seq_length=8192)
# Ensure 'CAMPPlus' is correctly imported and defined
try:
campplus_model = CAMPPlus(feat_dim=80, embedding_size=192)
print("[INFO] | CAMPPlus model instantiated.")
except NameError:
print("[ERROR] | CAMPPlus is not defined. Please check the import path and ensure CAMPPlus is correctly defined.")
raise
campplus_ckpt_path = load_custom_model_from_hf("funasr/campplus", "campplus_cn_common.bin", config_filename=None)
campplus_state = torch.load(campplus_ckpt_path, map_location="cpu")
campplus_model.load_state_dict(campplus_state)
campplus_model = campplus_model.eval().to(device).half()
print("[INFO] | CAMPPlus model loaded, set to eval mode, and converted to float16.")
# Load BigVGAN model
bigvgan_model = bigvgan.BigVGAN.from_pretrained('nvidia/bigvgan_v2_22khz_80band_256x', use_cuda_kernel=False)
bigvgan_model.remove_weight_norm()
bigvgan_model = bigvgan_model.eval().to(device).half()
print("[INFO] | BigVGAN model loaded, weight norm removed, set to eval mode, and converted to float16.")
# Load FAcodec model
ckpt_path, config_path = load_custom_model_from_hf("Plachta/FAcodec", 'pytorch_model.bin', 'config.yml')
codec_config = yaml.safe_load(open(config_path))
codec_model_params = recursive_munch(codec_config['model_params'])
codec_encoder = build_model(codec_model_params, stage="codec")
ckpt_params = torch.load(ckpt_path, map_location="cpu")
for key in codec_encoder:
codec_encoder[key].load_state_dict(ckpt_params[key], strict=False)
codec_encoder = {k: v.eval().to(device).half() for k, v in codec_encoder.items()}
print("[INFO] | FAcodec model loaded, set to eval mode, and converted to float16.")
# Load Whisper model with float16 and compatible size
whisper_name = model_params.speech_tokenizer.whisper_name if hasattr(model_params.speech_tokenizer, 'whisper_name') else "openai/whisper-small"
whisper_model = WhisperModel.from_pretrained(whisper_name, torch_dtype=torch.float16).to(device)
del whisper_model.decoder # Remove decoder as it's not used
whisper_feature_extractor = AutoFeatureExtractor.from_pretrained(whisper_name)
print(f"[INFO] | Whisper model '{whisper_name}' loaded with dtype {whisper_model.dtype} and moved to CPU.")
# Generate mel spectrograms with optimized parameters
mel_fn_args = {
"n_fft": 1024,
"win_size": 1024,
"hop_size": 256,
"num_mels": 80,
"sampling_rate": sr,
"fmin": 0,
"fmax": None,
"center": False
}
to_mel = lambda x: mel_spectrogram(x, **mel_fn_args)
# Load F0 conditioned model
dit_checkpoint_path_f0, dit_config_path_f0 = load_custom_model_from_hf(
"Plachta/Seed-VC",
"DiT_seed_v2_uvit_whisper_base_f0_44k_bigvgan_pruned_ft_ema.pth",
"config_dit_mel_seed_uvit_whisper_base_f0_44k.yml"
)
config_f0 = yaml.safe_load(open(dit_config_path_f0, 'r'))
model_params_f0 = recursive_munch(config_f0['model_params'])
model_f0 = build_model(model_params_f0, stage='DiT')
hop_length_f0 = config_f0['preprocess_params']['spect_params']['hop_length']
sr_f0 = config_f0['preprocess_params']['sr']
# Load F0 model checkpoints
model_f0, _, _, _ = load_checkpoint(model_f0, None, dit_checkpoint_path_f0, load_only_params=True, ignore_modules=[], is_distributed=False)
for key in model_f0:
model_f0[key] = model_f0[key].eval().to(device).half()
print("[INFO] | F0 conditioned DiT model loaded and set to eval mode.")
model_f0.cfm.estimator.setup_caches(max_batch_size=1, max_seq_length=8192)
# Load F0 extractor
model_path = load_custom_model_from_hf("lj1995/VoiceConversionWebUI", "rmvpe.pt", None)
rmvpe = RMVPE(model_path, is_half=True, device=device) # Ensure RMVPE supports half precision
print("[INFO] | RMVPE model loaded and converted to float16.")
mel_fn_args_f0 = {
"n_fft": config_f0['preprocess_params']['spect_params']['n_fft'],
"win_size": config_f0['preprocess_params']['spect_params']['win_length'],
"hop_size": config_f0['preprocess_params']['spect_params']['hop_length'],
"num_mels": 80, # Ensure this matches the primary model
"sampling_rate": sr_f0,
"fmin": 0,
"fmax": None,
"center": False
}
to_mel_f0 = lambda x: mel_spectrogram(x, **mel_fn_args_f0)
# Load BigVGAN 44kHz model
bigvgan_44k_model = bigvgan.BigVGAN.from_pretrained('nvidia/bigvgan_v2_44khz_128band_512x', use_cuda_kernel=False)
bigvgan_44k_model.remove_weight_norm()
bigvgan_44k_model = bigvgan_44k_model.eval().to(device).half()
print("[INFO] | BigVGAN 44kHz model loaded, weight norm removed, set to eval mode, and converted to float16.")
# CSS Styling
css = '''
.gradio-container{max-width: 560px !important}
h1{text-align:center}
footer {
visibility: hidden
}
'''
# ----------------------------
# Functions
# ----------------------------
@torch.no_grad()
@torch.inference_mode()
def voice_conversion(input, reference, steps, guidance, pitch, speed):
inference_module, mel_fn, bigvgan_fn = model, to_mel, bigvgan_model
bitrate, sampling_rate, sr_current, hop_length_current = "320k", 16000, 22050, 256
max_context_window, overlap_wave_len = sr_current // hop_length_current * 30, 16 * hop_length_current
# Load and process input audio
source_audio, _ = librosa.load(input, sr=sr_current)
ref_audio, _ = librosa.load(reference, sr=sr_current)
source_audio_tensor = torch.tensor(source_audio, dtype=torch.float16).unsqueeze(0).to(device)
ref_audio_tensor = torch.tensor(ref_audio, dtype=torch.float16).unsqueeze(0).to(device)
# Generate Whisper features
alt_inputs = whisper_feature_extractor(
[source_audio_tensor.squeeze(0).cpu().numpy()],
return_tensors="pt",
sampling_rate=sampling_rate
)
alt_input_features = whisper_model._mask_input_features(
alt_inputs.input_features.to(torch.float16),
attention_mask=alt_inputs.attention_mask
).to(device)
alt_outputs = whisper_model.encoder(alt_input_features).last_hidden_state.to(torch.float16)
# Generate mel spectrograms
mel = mel_fn(source_audio_tensor)
mel2 = mel_fn(ref_audio_tensor)
# Extract style features
feat2 = torchaudio.compliance.kaldi.fbank(
ref_audio_tensor, num_mel_bins=80, dither=0, sample_frequency=sampling_rate
)
style2 = campplus_model(feat2.unsqueeze(0).to(torch.float16))
# Length regulation
cond, _, _, _, _ = inference_module.length_regulator(
alt_outputs, ylens=target_lengths, n_quantizers=3, f0=None
)
prompt_condition, _, _, _, _ = inference_module.length_regulator(
mel2, ylens=target2_lengths, n_quantizers=3, f0=None
)
# Inference and waveform generation
generated_wave_chunks = []
while processed_frames < cond.size(1):
chunk_cond = cond[:, processed_frames:processed_frames + max_source_window]
cat_condition = torch.cat([prompt_condition, chunk_cond], dim=1).to(torch.float16)
vc_target = inference_module.cfm.inference(
cat_condition,
torch.LongTensor([cat_condition.size(1)]).to(mel2.device),
mel2, style2, None, steps, inference_cfg_rate=guidance
)
vc_wave = bigvgan_model(vc_target.float())[0].to(torch.float16)
generated_wave_chunks.append(vc_wave.cpu().numpy())
# Concatenate and process final audio
final_audio = np.concatenate(generated_wave_chunks).astype(np.float16)
final_audio = librosa.effects.pitch_shift(
final_audio.astype(np.float32), sr=sr_current, n_steps=pitch
).astype(np.float16)
final_audio /= np.max(np.abs(final_audio)).astype(np.float16)
# Save and return audio
with tempfile.NamedTemporaryFile(suffix=".wav", delete=False) as tmp_file:
sf.write(tmp_file.name, final_audio, sr_current, format='WAV')
return tmp_file.name
def cloud():
print("[CLOUD] | Space maintained.")
@spaces.GPU(duration=15)
def gpu():
return
# ----------------------------
# Gradio Interface
# ----------------------------
with gr.Blocks(css=css) as main:
with gr.Column():
gr.Markdown("🪄 Add tone to audio.")
with gr.Column():
input = gr.Audio(label="Input Audio", type="filepath")
reference_input = gr.Audio(label="Reference Audio", type="filepath")
with gr.Column():
steps = gr.Slider(label="Steps", value=4, minimum=1, maximum=100, step=1)
guidance = gr.Slider(label="Guidance", value=0.7, minimum=0.0, maximum=1.0, step=0.1)
pitch = gr.Slider(label="Pitch", value=0.0, minimum=-10.0, maximum=10.0, step=0.1)
speed = gr.Slider(label="Speed", value=1.0, minimum=0.1, maximum=10.0, step=0.1)
with gr.Column():
submit = gr.Button("▶")
maintain = gr.Button("☁️")
with gr.Column():
output = gr.Audio(label="Output", type="filepath")
submit.click(voice_conversion, inputs=[input, reference_input, steps, guidance, pitch, speed], outputs=output, queue=False)
maintain.click(cloud, inputs=[], outputs=[], queue=False)
main.launch(show_api=True)