Spaces:
Runtime error
Runtime error
""" | |
This file contains the Predictor class, which is used to run predictions on the | |
Whisper model. It is based on the Predictor class from the original Whisper | |
repository, with some modifications to make it work with the RP platform. | |
""" | |
from concurrent.futures import ThreadPoolExecutor | |
import numpy as np | |
from runpod.serverless.utils import rp_cuda | |
import boto3 | |
import random | |
random.seed(0) | |
from glob import glob | |
import subprocess | |
import io | |
import numpy as np | |
np.random.seed(0) | |
import subprocess | |
import se_extractor | |
import yaml | |
from munch import Munch | |
import uuid | |
import shutil | |
from openai import OpenAI | |
import time | |
import os | |
import phonemizer | |
import torch | |
torch.manual_seed(0) | |
torch.backends.cudnn.benchmark = False | |
torch.backends.cudnn.deterministic = True | |
from torch import nn | |
import torch.nn.functional as F | |
import torchaudio | |
import librosa | |
from nltk.tokenize import word_tokenize | |
import nltk | |
from Modules.diffusion.sampler import DiffusionSampler, ADPM2Sampler, KarrasSchedule | |
nltk.download('punkt') | |
from models import * | |
from utils import * | |
import soundfile as sf | |
from tortoise.utils.text import split_and_recombine_text | |
from resemble_enhance.enhancer.inference import denoise, enhance | |
from text_utils import TextCleaner | |
from pydantic import BaseModel, HttpUrl | |
from api import BaseSpeakerTTS, ToneColorConverter | |
class Predictor: | |
def __init__(self): | |
self.model = None | |
self.sampler = None | |
self.to_mel = None | |
self.global_phonemizer = None | |
self.model_params = None | |
self.textclenaer = None | |
self.mean = 0 | |
self.std = 0 | |
self.device = 'cuda:0' | |
self.ckpt_base = 'checkpoints/base_speakers/EN' | |
self.ckpt_converter = 'checkpoints/converter' | |
self.base_speaker_tts = None | |
self.tone_color_converter = None | |
self.output_dir = 'outputs' | |
self.processed_dir = 'processed' | |
os.makedirs(self.processed_dir, exist_ok=True) | |
os.makedirs(self.output_dir, exist_ok=True) | |
self.s3_client = boto3.client('s3',aws_access_key_id=os.getenv('AWS_ACCESS_KEY'), aws_secret_access_key=os.getenv('AWS_SECRET_KEY')) | |
print(os.getenv("AWS_ACCESS_KEY")) | |
print(os.getenv("AWS_SECRET_KEY")) | |
def setup(self): | |
self.global_phonemizer = phonemizer.backend.EspeakBackend(language='en-us', preserve_punctuation=True, with_stress=True) | |
self.textclenaer = TextCleaner() | |
self.to_mel = torchaudio.transforms.MelSpectrogram( | |
n_mels=80, n_fft=2048, win_length=1200, hop_length=300) | |
self.mean, self.std = -4, 4 | |
config = yaml.safe_load(open("Configs/hg.yml")) | |
print(config) | |
ASR_config = config.get('ASR_config', False) | |
ASR_path = config.get('ASR_path', False) | |
text_aligner = load_ASR_models(ASR_path, ASR_config) | |
F0_path = config.get('F0_path', False) | |
pitch_extractor = load_F0_models(F0_path) | |
from Utils.PLBERT.util import load_plbert | |
BERT_path = config.get('PLBERT_dir', False) | |
plbert = load_plbert(BERT_path) | |
self.model_params = recursive_munch(config['model_params']) | |
self.model = build_model(self.model_params, text_aligner, pitch_extractor, plbert) | |
_ = [self.model[key].eval() for key in self.model] | |
_ = [self.model[key].to(self.device) for key in self.model] | |
params_whole = torch.load("Models/epochs_2nd_00020.pth", map_location='cpu') | |
params = params_whole['net'] | |
for key in self.model: | |
if key in params: | |
print('%s loaded' % key) | |
try: | |
self.model[key].load_state_dict(params[key]) | |
except: | |
from collections import OrderedDict | |
state_dict = params[key] | |
new_state_dict = OrderedDict() | |
for k, v in state_dict.items(): | |
name = k[7:] # remove `module.` | |
new_state_dict[name] = v | |
# load params | |
self.model[key].load_state_dict(new_state_dict, strict=False) | |
# except: | |
# _load(params[key], model[key]) | |
_ = [self.model[key].eval() for key in self.model] | |
self.sampler = DiffusionSampler( | |
self.model.diffusion.diffusion, | |
sampler=ADPM2Sampler(), | |
sigma_schedule=KarrasSchedule(sigma_min=0.0001, sigma_max=3.0, rho=9.0), # empirical parameters | |
clamp=False | |
) | |
def predict(self,s3_url,passage,method_type='voice_clone'): | |
output_dir = 'processed' | |
gen_id = str(uuid.uuid4()) | |
os.makedirs(output_dir,exist_ok=True) | |
raw_dir = os.path.join(output_dir,gen_id,'raw') | |
segments_dir = os.path.join(output_dir,gen_id,'segments') | |
results_dir = os.path.join(output_dir,gen_id,'results') | |
openvoice_dir = os.path.join(output_dir,gen_id,'openvoice') | |
os.makedirs(raw_dir) | |
os.makedirs(segments_dir) | |
os.makedirs(results_dir) | |
s3_key = s3_url.split('/')[-1] | |
bucket_name = 'demovidelyuseruploads' | |
local_file_path = os.path.join(raw_dir,s3_key) | |
self.download_file_from_s3(self.s3_client,bucket_name,s3_key,local_file_path) | |
se_extractor.generate_voice_segments(local_file_path,segments_dir,vad=True) | |
if method_type == 'voice_clone': | |
#voice_clone with styletts2 | |
model,sampler = self.model,self.sampler | |
processed_seg_dir = os.path.join(segments_dir,s3_key.split('.')[0],'wavs') | |
result = self.process_audio_file(processed_seg_dir,passage,model,sampler) | |
final_output = os.path.join(results_dir,f"{gen_id}-voice-clone-1.wav") | |
sf.write(final_output,result,24000) | |
mp3_final_output_1 = str(final_output).replace('wav','mp3') | |
self.convert_wav_to_mp3(final_output,mp3_final_output_1) | |
print(mp3_final_output_1) | |
self.upload_file_to_s3(mp3_final_output_1,'demovidelyusergenerations',f"{gen_id}-voice-clone-1.mp3") | |
return {"voice_clone_1":f"https://demovidelyusergenerations.s3.amazonaws.com/{gen_id}-voice-clone-1.mp3"} | |
def _fn(self,path, solver, nfe, tau): | |
if path is None: | |
return None, None | |
solver = solver.lower() | |
nfe = int(nfe) | |
lambd = 0.9 | |
dwav, sr = torchaudio.load(path) | |
dwav = dwav.mean(dim=0) | |
wav1, new_sr = enhance(dwav, sr, self.device, nfe=nfe, solver=solver, lambd=lambd, tau=tau) | |
wav1 = wav1.cpu().numpy() | |
return (new_sr, wav1) | |
def _fn_denoise(self,path, solver, nfe, tau): | |
if path is None: | |
return None | |
print(torch.cuda.is_available()) | |
print("Going to denoise") | |
solver = solver.lower() | |
nfe = int(nfe) | |
lambd = 0.9 | |
dwav, sr = torchaudio.load(path) | |
dwav = dwav.mean(dim=0) | |
wav1, new_sr = denoise(dwav, sr, self.device) | |
wav1 = wav1.cpu().numpy() | |
print("Done noising") | |
return (new_sr, wav1) | |
def LFinference(self,model,sampler,text, s_prev, ref_s, alpha = 0.3, beta = 0.7, t = 0.7, diffusion_steps=5, embedding_scale=1): | |
text = text.strip() | |
ps = self.global_phonemizer.phonemize([text]) | |
ps = word_tokenize(ps[0]) | |
ps = ' '.join(ps) | |
ps = ps.replace('``', '"') | |
ps = ps.replace("''", '"') | |
tokens = self.textclenaer(ps) | |
tokens.insert(0, 0) | |
tokens = torch.LongTensor(tokens).to(self.device).unsqueeze(0) | |
with torch.no_grad(): | |
input_lengths = torch.LongTensor([tokens.shape[-1]]).to(self.device) | |
text_mask = self.length_to_mask(input_lengths).to(self.device) | |
t_en = model.text_encoder(tokens, input_lengths, text_mask) | |
bert_dur = model.bert(tokens, attention_mask=(~text_mask).int()) | |
d_en = model.bert_encoder(bert_dur).transpose(-1, -2) | |
s_pred = sampler(noise = torch.randn((1, 256)).unsqueeze(1).to(self.device), | |
embedding=bert_dur, | |
embedding_scale=embedding_scale, | |
features=ref_s, # reference from the same speaker as the embedding | |
num_steps=diffusion_steps).squeeze(1) | |
if s_prev is not None: | |
# convex combination of previous and current style | |
s_pred = t * s_prev + (1 - t) * s_pred | |
s = s_pred[:, 128:] | |
ref = s_pred[:, :128] | |
ref = alpha * ref + (1 - alpha) * ref_s[:, :128] | |
s = beta * s + (1 - beta) * ref_s[:, 128:] | |
s_pred = torch.cat([ref, s], dim=-1) | |
d = model.predictor.text_encoder(d_en, | |
s, input_lengths, text_mask) | |
x, _ = model.predictor.lstm(d) | |
duration = model.predictor.duration_proj(x) | |
duration = torch.sigmoid(duration).sum(axis=-1) | |
pred_dur = torch.round(duration.squeeze()).clamp(min=1) | |
pred_aln_trg = torch.zeros(input_lengths, int(pred_dur.sum().data)) | |
c_frame = 0 | |
for i in range(pred_aln_trg.size(0)): | |
pred_aln_trg[i, c_frame:c_frame + int(pred_dur[i].data)] = 1 | |
c_frame += int(pred_dur[i].data) | |
# encode prosody | |
en = (d.transpose(-1, -2) @ pred_aln_trg.unsqueeze(0).to(self.device)) | |
if self.model_params.decoder.type == "hifigan": | |
asr_new = torch.zeros_like(en) | |
asr_new[:, :, 0] = en[:, :, 0] | |
asr_new[:, :, 1:] = en[:, :, 0:-1] | |
en = asr_new | |
F0_pred, N_pred = model.predictor.F0Ntrain(en, s) | |
asr = (t_en @ pred_aln_trg.unsqueeze(0).to(self.device)) | |
if self.model_params.decoder.type == "hifigan": | |
asr_new = torch.zeros_like(asr) | |
asr_new[:, :, 0] = asr[:, :, 0] | |
asr_new[:, :, 1:] = asr[:, :, 0:-1] | |
asr = asr_new | |
out = model.decoder(asr, | |
F0_pred, N_pred, ref.squeeze().unsqueeze(0)) | |
return out.squeeze().cpu().numpy()[..., :-100], s_pred # | |
def length_to_mask(self,lengths): | |
mask = torch.arange(lengths.max()).unsqueeze(0).expand(lengths.shape[0], -1).type_as(lengths) | |
mask = torch.gt(mask+1, lengths.unsqueeze(1)) | |
return mask | |
def preprocess(self,wave): | |
wave_tensor = torch.from_numpy(wave).float() | |
mel_tensor = self.to_mel(wave_tensor) | |
mel_tensor = (torch.log(1e-5 + mel_tensor.unsqueeze(0)) - self.mean) / self.std | |
return mel_tensor | |
def compute_style(self,path,model): | |
wave, sr = librosa.load(path, sr=24000) | |
audio, index = librosa.effects.trim(wave, top_db=30) | |
if sr != 24000: | |
audio = librosa.resample(audio, sr, 24000) | |
mel_tensor = self.preprocess(audio).to(self.device) | |
with torch.no_grad(): | |
ref_s = model.style_encoder(mel_tensor.unsqueeze(1)) | |
ref_p = model.predictor_encoder(mel_tensor.unsqueeze(1)) | |
return torch.cat([ref_s, ref_p], dim=1) | |
def process_audio_file(self,file_dir,passage,model,sampler): | |
print(file_dir) | |
audio_segs = glob(f'{file_dir}/*.wav') | |
print(audio_segs) | |
if len(audio_segs) >= 1: | |
s_ref = self.compute_style(audio_segs[0], model) | |
else: | |
raise NotImplementedError('No audio segments found!') | |
sentences = split_and_recombine_text(passage) | |
wavs = [] | |
s_prev = None | |
for text in sentences: | |
if text.strip() == "": continue | |
text += '.' | |
wav, s_prev = self.LFinference(model,sampler,text, | |
s_prev, | |
s_ref, | |
alpha = 0, | |
beta = 0.3, # make it more suitable for the text | |
t = 0.7, | |
diffusion_steps=10, embedding_scale=1) | |
wavs.append(wav) | |
audio_arrays = [] | |
for wav_file in wavs: | |
audio_arrays.append(wav_file) | |
concatenated_audio = np.concatenate(audio_arrays) | |
return concatenated_audio | |
def download_file_from_s3(self,s3_client,bucket_name, s3_key, local_file_path): | |
try: | |
s3_client.download_file(bucket_name, s3_key, local_file_path) | |
print(f"File downloaded successfully: {local_file_path}") | |
except Exception as e: | |
print(f"Error downloading file: {e}") | |
def convert_wav_to_mp3(self,wav_file, mp3_file): | |
command = ['ffmpeg', '-i', wav_file, '-q:a', '0', '-map', 'a', mp3_file] | |
subprocess.run(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE) | |
def upload_file_to_s3(self,file_name, bucket, object_name=None, content_type="audio/mpeg"): | |
if object_name is None: | |
object_name = file_name | |
try: | |
with open(file_name, 'rb') as file_data: | |
self.s3_client.put_object(Bucket=bucket, Key=object_name, Body=file_data, ContentType=content_type) | |
print("File uploaded successfully") | |
return True | |
except NoCredentialsError: | |
print("Error: No AWS credentials found") | |
return False | |
except Exception as e: | |
print(f"Error uploading file: {e}") | |
return False |