Spaces:
Running
Running
import torch | |
import torchaudio | |
import librosa | |
from modules.commons import build_model, load_checkpoint, recursive_munch | |
import yaml | |
from hf_utils import load_custom_model_from_hf | |
import numpy as np | |
# Загрузка моделей и конфигураций | |
device = torch.device("cuda" if torch.cuda.is_available() else "cpu") | |
# Загрузка конфигурации и модели DiT | |
dit_checkpoint_path, dit_config_path = load_custom_model_from_hf( | |
"Plachta/Seed-VC", | |
"DiT_seed_v2_uvit_whisper_small_wavenet_bigvgan_pruned.pth", | |
"config_dit_mel_seed_uvit_whisper_small_wavenet.yml" | |
) | |
config = yaml.safe_load(open(dit_config_path, 'r')) | |
model_params = recursive_munch(config['model_params']) | |
model = build_model(model_params, stage='DiT') | |
hop_length = config['preprocess_params']['spect_params']['hop_length'] | |
sr = config['preprocess_params']['sr'] | |
# Загрузка контрольных точек модели | |
model, _, _, _ = load_checkpoint( | |
model, None, dit_checkpoint_path, | |
load_only_params=True, ignore_modules=[], is_distributed=False | |
) | |
for key in model: | |
model[key].eval() | |
model[key].to(device) | |
model.cfm.estimator.setup_caches(max_batch_size=1, max_seq_length=8192) | |
# Загрузка дополнительной модели CAMPPlus | |
from modules.campplus.DTDNN import CAMPPlus | |
campplus_ckpt_path = load_custom_model_from_hf("funasr/campplus", "campplus_cn_common.bin", config_filename=None) | |
campplus_model = CAMPPlus(feat_dim=80, embedding_size=192) | |
campplus_model.load_state_dict(torch.load(campplus_ckpt_path, map_location="cpu")) | |
campplus_model.eval() | |
campplus_model.to(device) | |
# Загрузка модели BigVGAN | |
from modules.bigvgan import bigvgan | |
bigvgan_model = bigvgan.BigVGAN.from_pretrained('nvidia/bigvgan_v2_22khz_80band_256x', use_cuda_kernel=False) | |
bigvgan_model.remove_weight_norm() | |
bigvgan_model = bigvgan_model.eval().to(device) | |
# Загрузка модели FAcodec | |
ckpt_path, config_path = load_custom_model_from_hf("Plachta/FAcodec", 'pytorch_model.bin', 'config.yml') | |
codec_config = yaml.safe_load(open(config_path)) | |
codec_model_params = recursive_munch(codec_config['model_params']) | |
codec_encoder = build_model(codec_model_params, stage="codec") | |
ckpt_params = torch.load(ckpt_path, map_location="cpu") | |
for key in codec_encoder: | |
codec_encoder[key].load_state_dict(ckpt_params[key], strict=False) | |
_ = [codec_encoder[key].eval() for key in codec_encoder] | |
_ = [codec_encoder[key].to(device) for key in codec_encoder] | |
# Загрузка модели Whisper | |
from transformers import AutoFeatureExtractor, WhisperModel | |
whisper_name = model_params.speech_tokenizer.whisper_name if hasattr(model_params.speech_tokenizer, 'whisper_name') else "openai/whisper-small" | |
whisper_model = WhisperModel.from_pretrained(whisper_name, torch_dtype=torch.float16).to(device) | |
del whisper_model.decoder | |
whisper_feature_extractor = AutoFeatureExtractor.from_pretrained(whisper_name) | |
# Функция для генерации мел-спектрограммы | |
mel_fn_args = { | |
"n_fft": config['preprocess_params']['spect_params']['n_fft'], | |
"win_size": config['preprocess_params']['spect_params']['win_length'], | |
"hop_size": config['preprocess_params']['spect_params']['hop_length'], | |
"num_mels": config['preprocess_params']['spect_params']['n_mels'], | |
"sampling_rate": sr, | |
"fmin": 0, | |
"fmax": None, | |
"center": False | |
} | |
from modules.audio import mel_spectrogram | |
to_mel = lambda x: mel_spectrogram(x, **mel_fn_args) | |
# Модель с F0 условием | |
dit_checkpoint_path_f0, dit_config_path_f0 = load_custom_model_from_hf( | |
"Plachta/Seed-VC", | |
"DiT_seed_v2_uvit_whisper_base_f0_44k_bigvgan_pruned_ft_ema.pth", | |
"config_dit_mel_seed_uvit_whisper_base_f0_44k.yml" | |
) | |
config_f0 = yaml.safe_load(open(dit_config_path_f0, 'r')) | |
model_params_f0 = recursive_munch(config_f0['model_params']) | |
model_f0 = build_model(model_params_f0, stage='DiT') | |
hop_length_f0 = config_f0['preprocess_params']['spect_params']['hop_length'] | |
sr_f0 = config_f0['preprocess_params']['sr'] | |
# Загрузка контрольных точек модели с F0 | |
model_f0, _, _, _ = load_checkpoint( | |
model_f0, None, dit_checkpoint_path_f0, | |
load_only_params=True, ignore_modules=[], is_distributed=False | |
) | |
for key in model_f0: | |
model_f0[key].eval() | |
model_f0[key].to(device) | |
model_f0.cfm.estimator.setup_caches(max_batch_size=1, max_seq_length=8192) | |
# Загрузка F0-экстрактора RMVPE | |
from modules.rmvpe import RMVPE | |
model_path = load_custom_model_from_hf("lj1995/VoiceConversionWebUI", "rmvpe.pt", None) | |
rmvpe = RMVPE(model_path, is_half=False, device=device) | |
# Параметры мел-спектрограммы для F0 | |
mel_fn_args_f0 = { | |
"n_fft": config_f0['preprocess_params']['spect_params']['n_fft'], | |
"win_size": config_f0['preprocess_params']['spect_params']['win_length'], | |
"hop_size": config_f0['preprocess_params']['spect_params']['hop_length'], | |
"num_mels": config_f0['preprocess_params']['spect_params']['n_mels'], | |
"sampling_rate": sr_f0, | |
"fmin": 0, | |
"fmax": None, | |
"center": False | |
} | |
to_mel_f0 = lambda x: mel_spectrogram(x, **mel_fn_args_f0) | |
# Загрузка модели BigVGAN для 44kHz | |
bigvgan_44k_model = bigvgan.BigVGAN.from_pretrained('nvidia/bigvgan_v2_44khz_128band_512x', use_cuda_kernel=False) | |
bigvgan_44k_model.remove_weight_norm() | |
bigvgan_44k_model = bigvgan_44k_model.eval().to(device) | |
def adjust_f0_semitones(f0_sequence, n_semitones): | |
factor = 2 ** (n_semitones / 12) | |
return f0_sequence * factor | |
def crossfade(chunk1, chunk2, overlap): | |
fade_out = np.cos(np.linspace(0, np.pi / 2, overlap)) ** 2 | |
fade_in = np.cos(np.linspace(np.pi / 2, 0, overlap)) ** 2 | |
chunk2[:overlap] = chunk2[:overlap] * fade_in + chunk1[-overlap:] * fade_out | |
return chunk2 | |
# Параметры для обработки потоков и чанков | |
max_context_window = sr // hop_length * 30 | |
overlap_frame_len = 16 | |
overlap_wave_len = overlap_frame_len * hop_length | |
bitrate = "320k" | |
def voice_conversion(source, target, diffusion_steps, length_adjust, inference_cfg_rate, f0_condition, auto_f0_adjust, pitch_shift): | |
""" | |
Функция для голосового преобразования. | |
Параметры: | |
- source (str): Путь к исходному аудио файлу. | |
- target (str): Путь к целевому аудио файлу (голос, на который нужно преобразовать). | |
- diffusion_steps (int): Количество шагов диффузии. | |
- length_adjust (float): Коэффициент регулировки длины. | |
- inference_cfg_rate (float): Коэффициент CFG для инференса. | |
- f0_condition (bool): Использовать ли условие F0. | |
- auto_f0_adjust (bool): Автоматически ли корректировать F0. | |
- pitch_shift (int): Сдвиг тона в полутонах. | |
Возвращает: | |
- tuple: (частота дискретизации, numpy массив аудио данных) | |
""" | |
inference_module = model_f0 if f0_condition else model | |
mel_fn = to_mel_f0 if f0_condition else to_mel | |
bigvgan_fn = bigvgan_44k_model if f0_condition else bigvgan_model | |
sr_used = sr_f0 if f0_condition else sr | |
# Загрузка аудио | |
source_audio, _ = librosa.load(source, sr=sr_used) | |
ref_audio, _ = librosa.load(target, sr=sr_used) | |
# Ограничение длины целевого аудио | |
ref_audio = ref_audio[:sr_used * 25] | |
# Преобразование аудио в тензоры | |
source_audio = torch.tensor(source_audio).unsqueeze(0).float().to(device) | |
ref_audio = torch.tensor(ref_audio).unsqueeze(0).float().to(device) | |
# Ресемплирование для Whisper | |
ref_waves_16k = torchaudio.functional.resample(ref_audio, sr_used, 16000) | |
converted_waves_16k = torchaudio.functional.resample(source_audio, sr_used, 16000) | |
# Извлечение признаков с помощью Whisper | |
if converted_waves_16k.size(-1) <= 16000 * 30: | |
alt_inputs = whisper_feature_extractor([converted_waves_16k.squeeze(0).cpu().numpy()], | |
return_tensors="pt", | |
return_attention_mask=True, | |
sampling_rate=16000) | |
alt_input_features = whisper_model._mask_input_features( | |
alt_inputs.input_features, attention_mask=alt_inputs.attention_mask).to(device) | |
alt_outputs = whisper_model.encoder( | |
alt_input_features.to(whisper_model.encoder.dtype), | |
head_mask=None, | |
output_attentions=False, | |
output_hidden_states=False, | |
return_dict=True, | |
) | |
S_alt = alt_outputs.last_hidden_state.to(torch.float32) | |
S_alt = S_alt[:, :converted_waves_16k.size(-1) // 320 + 1] | |
else: | |
# Обработка длинного аудио в чанках | |
overlapping_time = 5 # секунд | |
S_alt_list = [] | |
buffer = None | |
traversed_time = 0 | |
while traversed_time < converted_waves_16k.size(-1): | |
if buffer is None: | |
chunk = converted_waves_16k[:, traversed_time:traversed_time + 16000 * 30] | |
else: | |
chunk = torch.cat([buffer, converted_waves_16k[:, traversed_time:traversed_time + 16000 * (30 - overlapping_time)]], dim=-1) | |
alt_inputs = whisper_feature_extractor([chunk.squeeze(0).cpu().numpy()], | |
return_tensors="pt", | |
return_attention_mask=True, | |
sampling_rate=16000) | |
alt_input_features = whisper_model._mask_input_features( | |
alt_inputs.input_features, attention_mask=alt_inputs.attention_mask).to(device) | |
alt_outputs = whisper_model.encoder( | |
alt_input_features.to(whisper_model.encoder.dtype), | |
head_mask=None, | |
output_attentions=False, | |
output_hidden_states=False, | |
return_dict=True, | |
) | |
S_alt = alt_outputs.last_hidden_state.to(torch.float32) | |
S_alt = S_alt[:, :chunk.size(-1) // 320 + 1] | |
if traversed_time == 0: | |
S_alt_list.append(S_alt) | |
else: | |
S_alt_list.append(S_alt[:, 50 * overlapping_time:]) | |
buffer = chunk[:, -16000 * overlapping_time:] | |
traversed_time += 30 * 16000 if traversed_time == 0 else chunk.size(-1) - 16000 * overlapping_time | |
S_alt = torch.cat(S_alt_list, dim=1) | |
# Извлечение признаков из референсного аудио | |
ori_waves_16k = torchaudio.functional.resample(ref_audio, sr_used, 16000) | |
ori_inputs = whisper_feature_extractor([ori_waves_16k.squeeze(0).cpu().numpy()], | |
return_tensors="pt", | |
return_attention_mask=True) | |
ori_input_features = whisper_model._mask_input_features( | |
ori_inputs.input_features, attention_mask=ori_inputs.attention_mask).to(device) | |
with torch.no_grad(): | |
ori_outputs = whisper_model.encoder( | |
ori_input_features.to(whisper_model.encoder.dtype), | |
head_mask=None, | |
output_attentions=False, | |
output_hidden_states=False, | |
return_dict=True, | |
) | |
S_ori = ori_outputs.last_hidden_state.to(torch.float32) | |
S_ori = S_ori[:, :ori_waves_16k.size(-1) // 320 + 1] | |
mel = mel_fn(source_audio.to(device).float()) | |
mel2 = mel_fn(ref_audio.to(device).float()) | |
target_lengths = torch.LongTensor([int(mel.size(2) * length_adjust)]).to(mel.device) | |
target2_lengths = torch.LongTensor([mel2.size(2)]).to(mel2.device) | |
# Извлечение стиля с помощью CAMPPlus | |
feat2 = torchaudio.compliance.kaldi.fbank(ref_waves_16k, | |
num_mel_bins=80, | |
dither=0, | |
sample_frequency=16000) | |
feat2 = feat2 - feat2.mean(dim=0, keepdim=True) | |
style2 = campplus_model(feat2.unsqueeze(0)) | |
if f0_condition: | |
# Извлечение F0 с помощью RMVPE | |
F0_ori = rmvpe.infer_from_audio(ref_waves_16k[0], thred=0.5) | |
F0_alt = rmvpe.infer_from_audio(converted_waves_16k[0], thred=0.5) | |
F0_ori = torch.from_numpy(F0_ori).to(device)[None] | |
F0_alt = torch.from_numpy(F0_alt).to(device)[None] | |
voiced_F0_ori = F0_ori[F0_ori > 1] | |
voiced_F0_alt = F0_alt[F0_alt > 1] | |
log_f0_alt = torch.log(F0_alt + 1e-5) | |
voiced_log_f0_ori = torch.log(voiced_F0_ori + 1e-5) | |
voiced_log_f0_alt = torch.log(voiced_F0_alt + 1e-5) | |
median_log_f0_ori = torch.median(voiced_log_f0_ori) | |
median_log_f0_alt = torch.median(voiced_log_f0_alt) | |
# Корректировка F0 | |
shifted_log_f0_alt = log_f0_alt.clone() | |
if auto_f0_adjust: | |
shifted_log_f0_alt[F0_alt > 1] = log_f0_alt[F0_alt > 1] - median_log_f0_alt + median_log_f0_ori | |
shifted_f0_alt = torch.exp(shifted_log_f0_alt) | |
if pitch_shift != 0: | |
shifted_f0_alt[F0_alt > 1] = adjust_f0_semitones(shifted_f0_alt[F0_alt > 1], pitch_shift) | |
else: | |
F0_ori = None | |
F0_alt = None | |
shifted_f0_alt = None | |
# Регулировка длины | |
cond, _, codes, commitment_loss, codebook_loss = inference_module.length_regulator( | |
S_alt, ylens=target_lengths, n_quantizers=3, f0=shifted_f0_alt | |
) | |
prompt_condition, _, codes, commitment_loss, codebook_loss = inference_module.length_regulator( | |
S_ori, ylens=target2_lengths, n_quantizers=3, f0=F0_ori | |
) | |
max_source_window = max_context_window - mel2.size(2) | |
processed_frames = 0 | |
generated_wave_chunks = [] | |
# Генерация аудио по частям | |
while processed_frames < cond.size(1): | |
chunk_cond = cond[:, processed_frames:processed_frames + max_source_window] | |
is_last_chunk = processed_frames + max_source_window >= cond.size(1) | |
cat_condition = torch.cat([prompt_condition, chunk_cond], dim=1) | |
vc_target = inference_module.cfm.inference( | |
cat_condition, | |
torch.LongTensor([cat_condition.size(1)]).to(mel2.device), | |
mel2, style2, None, diffusion_steps, | |
inference_cfg_rate=inference_cfg_rate | |
) | |
vc_target = vc_target[:, :, mel2.size(-1):] | |
vc_wave = bigvgan_fn(vc_target)[0] | |
if processed_frames == 0: | |
if is_last_chunk: | |
output_wave = vc_wave[0].cpu().numpy() | |
generated_wave_chunks.append(output_wave) | |
break | |
output_wave = vc_wave[0, :-overlap_wave_len].cpu().numpy() | |
generated_wave_chunks.append(output_wave) | |
previous_chunk = vc_wave[0, -overlap_wave_len:] | |
processed_frames += vc_target.size(2) - overlap_frame_len | |
elif is_last_chunk: | |
output_wave = crossfade(previous_chunk.cpu().numpy(), vc_wave[0].cpu().numpy(), overlap_wave_len) | |
generated_wave_chunks.append(output_wave) | |
processed_frames += vc_target.size(2) - overlap_frame_len | |
break | |
else: | |
output_wave = crossfade( | |
previous_chunk.cpu().numpy(), | |
vc_wave[0, :-overlap_wave_len].cpu().numpy(), | |
overlap_wave_len | |
) | |
generated_wave_chunks.append(output_wave) | |
previous_chunk = vc_wave[0, -overlap_wave_len:] | |
processed_frames += vc_target.size(2) - overlap_frame_len | |
# Объединение всех чанков в одно аудио | |
full_output_wave = np.concatenate(generated_wave_chunks) | |
# Нормализация аудио | |
max_val = np.max(np.abs(full_output_wave)) | |
if max_val > 1.0: | |
full_output_wave = full_output_wave / max_val | |
return sr_used, full_output_wave | |