File size: 11,685 Bytes
a6370a9
23dc4e5
 
944743c
23dc4e5
 
 
 
 
 
944743c
23dc4e5
 
 
6cad840
23dc4e5
 
 
 
 
b382e61
23dc4e5
 
 
b382e61
23dc4e5
 
7cd73f4
a6370a9
23dc4e5
 
 
 
 
3171475
23dc4e5
3171475
8bfe9e9
 
23dc4e5
8bfe9e9
23dc4e5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8bfe9e9
 
 
 
 
23dc4e5
 
 
 
 
 
 
 
 
944743c
23dc4e5
 
 
8bfe9e9
 
23dc4e5
 
 
 
 
 
 
 
 
 
 
 
8bfe9e9
23dc4e5
8bfe9e9
 
23dc4e5
 
 
 
8bfe9e9
 
23dc4e5
 
 
 
 
 
8bfe9e9
23dc4e5
 
8bfe9e9
 
23dc4e5
8bfe9e9
23dc4e5
8bfe9e9
23dc4e5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8bfe9e9
 
 
 
 
23dc4e5
 
 
3171475
23dc4e5
 
3171475
23dc4e5
 
 
8bfe9e9
23dc4e5
3a7347e
23dc4e5
 
 
 
8bfe9e9
 
23dc4e5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8bfe9e9
 
23dc4e5
8bb8aaa
 
 
 
 
 
 
 
23dc4e5
 
8bb8aaa
23dc4e5
 
 
 
8bfe9e9
55f3e87
 
 
 
8bfe9e9
8bb8aaa
 
8bfe9e9
 
 
 
 
 
 
18943e0
 
8bfe9e9
 
 
18943e0
8bfe9e9
 
23dc4e5
8bfe9e9
 
 
23dc4e5
18943e0
8bfe9e9
18943e0
8bfe9e9
 
 
18943e0
8bfe9e9
18943e0
 
8bfe9e9
18943e0
8bfe9e9
 
23dc4e5
 
 
8bfe9e9
23dc4e5
18943e0
8bfe9e9
 
 
18943e0
1ec5128
8bfe9e9
 
 
 
 
 
 
 
 
 
 
 
 
 
23dc4e5
8bb8aaa
 
2be57ea
23dc4e5
 
 
 
 
 
 
 
8bb8aaa
 
 
 
 
138fa16
 
8bb8aaa
 
2e18475
138fa16
b6050ac
 
8bb8aaa
 
 
 
 
 
 
 
2e18475
8bb8aaa
407b0ed
8bb8aaa
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
import gradio as gr
import numpy as np
import soundfile as sf
import spaces
import torch
import torchaudio
import librosa
import yaml
import tempfile
import os

from huggingface_hub import hf_hub_download
from transformers import AutoFeatureExtractor, WhisperModel
from torch.nn.utils import parametrizations

from modules.commons import build_model, load_checkpoint, recursive_munch
from modules.campplus.DTDNN import CAMPPlus
from modules.bigvgan import bigvgan
from modules.rmvpe import RMVPE
from modules.audio import mel_spectrogram

# ----------------------------
# Optimization Settings
# ----------------------------

# Set the number of threads to the number of CPU cores
torch.set_num_threads(os.cpu_count())
torch.set_num_interop_threads(os.cpu_count())

# Enable optimized backends
torch.backends.openmp.enabled = True
torch.backends.mkldnn.enabled = True
torch.backends.cudnn.enabled = False
torch.backends.cuda.enabled = False

torch.set_grad_enabled(False)

# Force CPU usage and set default dtype to float16
torch.set_default_dtype(torch.float16)
device = torch.device("cpu")
print(f"[DEVICE] | Using device: {device} with dtype {torch.get_default_dtype()}")

# ----------------------------
# Load Models and Configuration
# ----------------------------

def load_custom_model_from_hf(repo_id, model_filename="pytorch_model.bin", config_filename="config.yml"):
    os.makedirs("./checkpoints", exist_ok=True)
    model_path = hf_hub_download(repo_id=repo_id, filename=model_filename, cache_dir="./checkpoints")
    if config_filename is None:
        return model_path
    config_path = hf_hub_download(repo_id=repo_id, filename=config_filename, cache_dir="./checkpoints")

    return model_path, config_path
    
# Load DiT model
dit_checkpoint_path, dit_config_path = load_custom_model_from_hf(
    "Plachta/Seed-VC", 
    "DiT_seed_v2_uvit_whisper_small_wavenet_bigvgan_pruned.pth", 
    "config_dit_mel_seed_uvit_whisper_small_wavenet.yml"
)
config = yaml.safe_load(open(dit_config_path, 'r'))
model_params = recursive_munch(config['model_params'])
model = build_model(model_params, stage='DiT')

# Debug: Print model keys to identify correct key
print(f"[INFO] | Model keys: {model.keys()}")

hop_length = config['preprocess_params']['spect_params']['hop_length']
sr = config['preprocess_params']['sr']

# Load DiT checkpoints
model, _, _, _ = load_checkpoint(model, None, dit_checkpoint_path, load_only_params=True, ignore_modules=[], is_distributed=False)
for key in model:
    model[key] = model[key].eval().to(device).half()
print("[INFO] | DiT model loaded, set to eval mode, and converted to float16.")

model.cfm.estimator.setup_caches(max_batch_size=1, max_seq_length=8192)

# Ensure 'CAMPPlus' is correctly imported and defined
try:
    campplus_model = CAMPPlus(feat_dim=80, embedding_size=192)
    print("[INFO] | CAMPPlus model instantiated.")
except NameError:
    print("[ERROR] | CAMPPlus is not defined. Please check the import path and ensure CAMPPlus is correctly defined.")
    raise

campplus_ckpt_path = load_custom_model_from_hf("funasr/campplus", "campplus_cn_common.bin", config_filename=None)
campplus_state = torch.load(campplus_ckpt_path, map_location="cpu")
campplus_model.load_state_dict(campplus_state)
campplus_model = campplus_model.eval().to(device).half()
print("[INFO] | CAMPPlus model loaded, set to eval mode, and converted to float16.")

# Load BigVGAN model
bigvgan_model = bigvgan.BigVGAN.from_pretrained('nvidia/bigvgan_v2_22khz_80band_256x', use_cuda_kernel=False)
bigvgan_model.remove_weight_norm()
bigvgan_model = bigvgan_model.eval().to(device).half()
print("[INFO] | BigVGAN model loaded, weight norm removed, set to eval mode, and converted to float16.")

# Load FAcodec model
ckpt_path, config_path = load_custom_model_from_hf("Plachta/FAcodec", 'pytorch_model.bin', 'config.yml')
codec_config = yaml.safe_load(open(config_path))
codec_model_params = recursive_munch(codec_config['model_params'])
codec_encoder = build_model(codec_model_params, stage="codec")
ckpt_params = torch.load(ckpt_path, map_location="cpu")
for key in codec_encoder:
    codec_encoder[key].load_state_dict(ckpt_params[key], strict=False)
codec_encoder = {k: v.eval().to(device).half() for k, v in codec_encoder.items()}
print("[INFO] | FAcodec model loaded, set to eval mode, and converted to float16.")

# Load Whisper model with float16 and compatible size
whisper_name = model_params.speech_tokenizer.whisper_name if hasattr(model_params.speech_tokenizer, 'whisper_name') else "openai/whisper-small"
whisper_model = WhisperModel.from_pretrained(whisper_name, torch_dtype=torch.float16).to(device)
del whisper_model.decoder  # Remove decoder as it's not used
whisper_feature_extractor = AutoFeatureExtractor.from_pretrained(whisper_name)
print(f"[INFO] | Whisper model '{whisper_name}' loaded with dtype {whisper_model.dtype} and moved to CPU.")

# Generate mel spectrograms with optimized parameters
mel_fn_args = {
    "n_fft": 1024,
    "win_size": 1024,
    "hop_size": 256,
    "num_mels": 80,
    "sampling_rate": sr,
    "fmin": 0,
    "fmax": None,
    "center": False
}
to_mel = lambda x: mel_spectrogram(x, **mel_fn_args)

# Load F0 conditioned model
dit_checkpoint_path_f0, dit_config_path_f0 = load_custom_model_from_hf(
    "Plachta/Seed-VC", 
    "DiT_seed_v2_uvit_whisper_base_f0_44k_bigvgan_pruned_ft_ema.pth", 
    "config_dit_mel_seed_uvit_whisper_base_f0_44k.yml"
)
config_f0 = yaml.safe_load(open(dit_config_path_f0, 'r'))
model_params_f0 = recursive_munch(config_f0['model_params'])
model_f0 = build_model(model_params_f0, stage='DiT')

hop_length_f0 = config_f0['preprocess_params']['spect_params']['hop_length']
sr_f0 = config_f0['preprocess_params']['sr']

# Load F0 model checkpoints
model_f0, _, _, _ = load_checkpoint(model_f0, None, dit_checkpoint_path_f0, load_only_params=True, ignore_modules=[], is_distributed=False)
for key in model_f0:
    model_f0[key] = model_f0[key].eval().to(device).half()
print("[INFO] | F0 conditioned DiT model loaded and set to eval mode.")

model_f0.cfm.estimator.setup_caches(max_batch_size=1, max_seq_length=8192)

# Load F0 extractor
model_path = load_custom_model_from_hf("lj1995/VoiceConversionWebUI", "rmvpe.pt", None)
rmvpe = RMVPE(model_path, is_half=True, device=device)  # Ensure RMVPE supports half precision
print("[INFO] | RMVPE model loaded and converted to float16.")

mel_fn_args_f0 = {
    "n_fft": config_f0['preprocess_params']['spect_params']['n_fft'],
    "win_size": config_f0['preprocess_params']['spect_params']['win_length'],
    "hop_size": config_f0['preprocess_params']['spect_params']['hop_length'],
    "num_mels": 80,  # Ensure this matches the primary model
    "sampling_rate": sr_f0,
    "fmin": 0,
    "fmax": None,
    "center": False
}
to_mel_f0 = lambda x: mel_spectrogram(x, **mel_fn_args_f0)

# Load BigVGAN 44kHz model
bigvgan_44k_model = bigvgan.BigVGAN.from_pretrained('nvidia/bigvgan_v2_44khz_128band_512x', use_cuda_kernel=False)
bigvgan_44k_model.remove_weight_norm()
bigvgan_44k_model = bigvgan_44k_model.eval().to(device).half()
print("[INFO] | BigVGAN 44kHz model loaded, weight norm removed, set to eval mode, and converted to float16.")

# CSS Styling
css = '''
.gradio-container{max-width: 560px !important}
h1{text-align:center}
footer {
    visibility: hidden
}
'''

# ----------------------------
# Functions
# ----------------------------

@torch.no_grad()
@torch.inference_mode()
def voice_conversion(input, reference, steps, guidance, pitch, speed):
    inference_module, mel_fn, bigvgan_fn = model, to_mel, bigvgan_model
    bitrate, sampling_rate, sr_current, hop_length_current = "320k", 16000, 22050, 256
    max_context_window, overlap_wave_len = sr_current // hop_length_current * 30, 16 * hop_length_current

    # Load and process input audio
    source_audio, _ = librosa.load(input, sr=sr_current)
    ref_audio, _ = librosa.load(reference, sr=sr_current)
    source_audio_tensor = torch.tensor(source_audio, dtype=torch.float16).unsqueeze(0).to(device)
    ref_audio_tensor = torch.tensor(ref_audio, dtype=torch.float16).unsqueeze(0).to(device)

    # Generate Whisper features
    alt_inputs = whisper_feature_extractor(
        [source_audio_tensor.squeeze(0).cpu().numpy()], 
        return_tensors="pt", 
        sampling_rate=sampling_rate
    )
    alt_input_features = whisper_model._mask_input_features(
        alt_inputs.input_features.to(torch.float16), 
        attention_mask=alt_inputs.attention_mask
    ).to(device)
    alt_outputs = whisper_model.encoder(alt_input_features).last_hidden_state.to(torch.float16)

    # Generate mel spectrograms
    mel = mel_fn(source_audio_tensor)
    mel2 = mel_fn(ref_audio_tensor)

    # Extract style features
    feat2 = torchaudio.compliance.kaldi.fbank(
        ref_audio_tensor, num_mel_bins=80, dither=0, sample_frequency=sampling_rate
    )
    style2 = campplus_model(feat2.unsqueeze(0).to(torch.float16))

    # Length regulation
    cond, _, _, _, _ = inference_module.length_regulator(
        alt_outputs, ylens=target_lengths, n_quantizers=3, f0=None
    )
    prompt_condition, _, _, _, _ = inference_module.length_regulator(
        mel2, ylens=target2_lengths, n_quantizers=3, f0=None
    )

    # Inference and waveform generation
    generated_wave_chunks = []
    while processed_frames < cond.size(1):
        chunk_cond = cond[:, processed_frames:processed_frames + max_source_window]
        cat_condition = torch.cat([prompt_condition, chunk_cond], dim=1).to(torch.float16)
        
        vc_target = inference_module.cfm.inference(
            cat_condition, 
            torch.LongTensor([cat_condition.size(1)]).to(mel2.device), 
            mel2, style2, None, steps, inference_cfg_rate=guidance
        )
        
        vc_wave = bigvgan_model(vc_target.float())[0].to(torch.float16)
        generated_wave_chunks.append(vc_wave.cpu().numpy())

    # Concatenate and process final audio
    final_audio = np.concatenate(generated_wave_chunks).astype(np.float16)
    final_audio = librosa.effects.pitch_shift(
        final_audio.astype(np.float32), sr=sr_current, n_steps=pitch
    ).astype(np.float16)
    final_audio /= np.max(np.abs(final_audio)).astype(np.float16)

    # Save and return audio
    with tempfile.NamedTemporaryFile(suffix=".wav", delete=False) as tmp_file:
        sf.write(tmp_file.name, final_audio, sr_current, format='WAV')
        return tmp_file.name
    
def cloud():
    print("[CLOUD] | Space maintained.")

@spaces.GPU(duration=15)
def gpu():
    return

# ----------------------------
# Gradio Interface
# ----------------------------

with gr.Blocks(css=css) as main:
    with gr.Column():
        gr.Markdown("🪄 Add tone to audio.")

    with gr.Column():
        input = gr.Audio(label="Input Audio", type="filepath")
        reference_input = gr.Audio(label="Reference Audio", type="filepath")
        
    with gr.Column():
        steps = gr.Slider(label="Steps", value=4, minimum=1, maximum=100, step=1)
        guidance = gr.Slider(label="Guidance", value=0.7, minimum=0.0, maximum=1.0, step=0.1)
        pitch = gr.Slider(label="Pitch", value=0.0, minimum=-10.0, maximum=10.0, step=0.1)
        speed = gr.Slider(label="Speed", value=1.0, minimum=0.1, maximum=10.0, step=0.1)

    with gr.Column():
        submit = gr.Button("▶")
        maintain = gr.Button("☁️")
        
    with gr.Column():
        output = gr.Audio(label="Output", type="filepath")

    submit.click(voice_conversion, inputs=[input, reference_input, steps, guidance, pitch, speed], outputs=output, queue=False)
    maintain.click(cloud, inputs=[], outputs=[], queue=False)

main.launch(show_api=True)