Spaces:
Runtime error
Runtime error
""" | |
This file contains the Predictor class, which is used to run predictions on the | |
Whisper model. It is based on the Predictor class from the original Whisper | |
repository, with some modifications to make it work with the RP platform. | |
""" | |
from concurrent.futures import ThreadPoolExecutor | |
import numpy as np | |
import base64 | |
from pydub.utils import mediainfo | |
import tempfile | |
from runpod.serverless.utils import rp_cuda | |
import boto3 | |
import random | |
random.seed(0) | |
from glob import glob | |
import subprocess | |
import io | |
import numpy as np | |
np.random.seed(0) | |
import subprocess | |
import se_extractor | |
import yaml | |
from munch import Munch | |
import uuid | |
import shutil | |
from openai import OpenAI | |
import time | |
import os | |
import phonemizer | |
import torch | |
torch.manual_seed(0) | |
torch.backends.cudnn.benchmark = False | |
torch.backends.cudnn.deterministic = True | |
from torch import nn | |
import torch.nn.functional as F | |
import torchaudio | |
import librosa | |
from nltk.tokenize import word_tokenize | |
import nltk | |
from Modules.diffusion.sampler import DiffusionSampler, ADPM2Sampler, KarrasSchedule | |
nltk.download('punkt') | |
from models import * | |
from utils import * | |
import soundfile as sf | |
from tortoise.utils.text import split_and_recombine_text | |
from resemble_enhance.enhancer.inference import denoise, enhance | |
from text_utils import TextCleaner | |
from pydantic import BaseModel, HttpUrl | |
from api import BaseSpeakerTTS, ToneColorConverter | |
from pydub import AudioSegment | |
class Predictor: | |
def __init__(self): | |
self.model = None | |
self.sampler = None | |
self.to_mel = None | |
self.global_phonemizer = None | |
self.model_params = None | |
self.textclenaer = None | |
self.mean = 0 | |
self.std = 0 | |
self.device = 'cuda' | |
self.ckpt_base = 'checkpoints/base_speakers/EN' | |
self.ckpt_converter = 'checkpoints/converter' | |
self.base_speaker_tts = None | |
self.tone_color_converter = None | |
self.output_dir = 'outputs' | |
self.processed_dir = 'processed' | |
os.makedirs(self.processed_dir, exist_ok=True) | |
os.makedirs(self.output_dir, exist_ok=True) | |
self.s3_client = boto3.client('s3',aws_access_key_id=os.getenv('AWS_ACCESS_KEY'), aws_secret_access_key=os.getenv('AWS_SECRET_KEY')) | |
print(os.getenv("AWS_ACCESS_KEY")) | |
print(os.getenv("AWS_SECRET_KEY")) | |
def setup(self): | |
self.global_phonemizer = phonemizer.backend.EspeakBackend(language='en-us', preserve_punctuation=True, with_stress=True) | |
self.textclenaer = TextCleaner() | |
self.to_mel = torchaudio.transforms.MelSpectrogram( | |
n_mels=80, n_fft=2048, win_length=1200, hop_length=300) | |
self.mean, self.std = -4, 4 | |
config = yaml.safe_load(open("Configs/hg.yml")) | |
print(config) | |
ASR_config = config.get('ASR_config', False) | |
ASR_path = config.get('ASR_path', False) | |
text_aligner = load_ASR_models(ASR_path, ASR_config) | |
F0_path = config.get('F0_path', False) | |
pitch_extractor = load_F0_models(F0_path) | |
from Utils.PLBERT.util import load_plbert | |
BERT_path = config.get('PLBERT_dir', False) | |
plbert = load_plbert(BERT_path) | |
self.model_params = recursive_munch(config['model_params']) | |
self.model = build_model(self.model_params, text_aligner, pitch_extractor, plbert) | |
_ = [self.model[key].eval() for key in self.model] | |
_ = [self.model[key].to(self.device) for key in self.model] | |
params_whole = torch.load("Models/epochs_2nd_00020.pth", map_location='cpu') | |
params = params_whole['net'] | |
for key in self.model: | |
if key in params: | |
print('%s loaded' % key) | |
try: | |
self.model[key].load_state_dict(params[key]) | |
except: | |
from collections import OrderedDict | |
state_dict = params[key] | |
new_state_dict = OrderedDict() | |
for k, v in state_dict.items(): | |
name = k[7:] # remove `module.` | |
new_state_dict[name] = v | |
# load params | |
self.model[key].load_state_dict(new_state_dict, strict=False) | |
# except: | |
# _load(params[key], model[key]) | |
_ = [self.model[key].eval() for key in self.model] | |
self.sampler = DiffusionSampler( | |
self.model.diffusion.diffusion, | |
sampler=ADPM2Sampler(), | |
sigma_schedule=KarrasSchedule(sigma_min=0.0001, sigma_max=3.0, rho=9.0), # empirical parameters | |
clamp=False | |
) | |
self.base_speaker_tts = BaseSpeakerTTS(f'{self.ckpt_base}/config.json', device=self.device) | |
self.base_speaker_tts.load_ckpt(f'{self.ckpt_base}/checkpoint.pth') | |
self.tone_color_converter = ToneColorConverter(f'{self.ckpt_converter}/config.json', device=self.device) | |
self.tone_color_converter.load_ckpt(f'{self.ckpt_converter}/checkpoint.pth') | |
def createvoice(self,audio_base_64,cut_audio,process_audio): | |
file_bytes = base64.b64decode(audio_base_64) | |
file_buffer = io.BytesIO(file_bytes) | |
header = file_buffer.read(12) | |
print(header) | |
file_format = None | |
bucket_name = 'demovidelyuseruploads' | |
if b'WAVE' in header: | |
file_format = 'wav' | |
elif header.startswith((b'\xff\xfb', b'\xff\xf3', b'\xff\xe3', b'\xff\xfa')): | |
file_format = 'mp3' | |
else: | |
file_format = 'unknown' | |
if file_format == 'unknown': | |
return {'error':'unrecognized file format, encode audio file as base64 str'} | |
unique_filename = f"{uuid.uuid4()}" | |
local_filename = f"{unique_filename}.{file_format}" | |
with open(local_filename, 'wb') as file_out: | |
file_out.write(file_bytes) | |
wav_filename = local_filename | |
if file_format == "mp3": | |
wav_filename = f"{unique_filename}.wav" | |
subprocess.run(["ffmpeg", "-i", local_filename, wav_filename]) | |
os.remove(local_filename) | |
print(wav_filename) | |
# if cut_audio > 0, means it was set | |
if cut_audio > 0: | |
#need to cut | |
se_extractor.extract_segments_to_cut_audio(cut_audio,wav_filename) | |
file_url = f"https://{bucket_name}.s3.amazonaws.com/{wav_filename}" | |
if process_audio: | |
(new_sr, wav1) = self._fn(wav_filename,"Midpoint",32,0.5) | |
print('Denoised') | |
buffer = io.BytesIO() | |
sf.write(buffer, wav1, new_sr, format='WAV') | |
print(new_sr) | |
buffer.seek(0) | |
else: | |
wav1, sr = librosa.load(wav_filename, sr=None) | |
buffer = io.BytesIO() | |
sf.write(buffer, wav1, sr, format='WAV') | |
buffer.seek(0) | |
print("uploading") | |
content_type = "audio/wav" | |
try: | |
self.s3_client.put_object(Bucket=bucket_name, Key=wav_filename, Body=buffer, ContentType=content_type) | |
print("uploaded") | |
except Exception as e: | |
print(f"Error uploading to S3: {e}") | |
return {"error": str(e)} | |
os.remove(wav_filename) | |
return {"url": file_url} | |
def predict(self,s3_url,passage,process_audio): | |
output_dir = 'processed' | |
gen_id = str(uuid.uuid4()) | |
os.makedirs(output_dir,exist_ok=True) | |
raw_dir = os.path.join(output_dir,gen_id,'raw') | |
segments_dir = os.path.join(output_dir,gen_id,'segments') | |
results_dir = os.path.join(output_dir,gen_id,'results') | |
openvoice_dir = os.path.join(output_dir,gen_id,'openvoice') | |
os.makedirs(raw_dir) | |
os.makedirs(segments_dir) | |
os.makedirs(results_dir) | |
s3_key = s3_url.split('/')[-1] | |
bucket_name = 'demovidelyuseruploads' | |
local_file_path = os.path.join(raw_dir,s3_key) | |
self.download_file_from_s3(self.s3_client,bucket_name,s3_key,local_file_path) | |
#voice_clone with styletts2 | |
model,sampler = self.model,self.sampler | |
result = self.process_audio_file(local_file_path,passage,model,sampler) | |
final_output = os.path.join(results_dir,f"{gen_id}-voice-clone-1.wav") | |
sf.write(final_output,result,24000) | |
if process_audio: | |
(new_sr, wav1) = self._fn(final_output,"Midpoint",32,0.5) | |
sf.write(final_output,wav1,new_sr) | |
base_speaker_tts,tone_color_converter = self.base_speaker_tts,self.tone_color_converter | |
reference_speaker = local_file_path | |
target_se, audio_name = se_extractor.get_se(reference_speaker, tone_color_converter, target_dir=openvoice_dir, vad=False) | |
src_path = os.path.join(results_dir,f"{gen_id}-tmp.wav") | |
openvoice_output = os.path.join(results_dir,f"{gen_id}-voice-clone-2.wav") | |
base_speaker_tts.tts(passage,src_path,speaker='default',language='English',speed=1.0) | |
source_se = torch.load(f'{self.ckpt_base}/en_default_se.pth').to(self.device) | |
tone_color_converter.convert(audio_src_path=src_path,src_se=source_se,tgt_se=target_se,output_path=openvoice_output,message='') | |
if process_audio: | |
(new_sr, wav1) = self._fn(openvoice_output,"Midpoint",32,0.5) | |
sf.write(openvoice_output,wav1,new_sr) | |
mp3_final_output_1 = str(final_output).replace('wav','mp3') | |
mp3_final_output_2 = str(openvoice_output).replace('wav','mp3') | |
self.convert_wav_to_mp3(final_output,mp3_final_output_1) | |
self.convert_wav_to_mp3(openvoice_output,mp3_final_output_2) | |
print(mp3_final_output_1) | |
print(mp3_final_output_2) | |
self.upload_file_to_s3(mp3_final_output_1,'demovidelyusergenerations',f"{gen_id}-voice-clone-1.mp3") | |
self.upload_file_to_s3(mp3_final_output_2,'demovidelyusergenerations',f"{gen_id}-voice-clone-2.mp3") | |
shutil.rmtree(os.path.join(output_dir,gen_id)) | |
return {"voice_clone_1":f"https://demovidelyusergenerations.s3.amazonaws.com/{gen_id}-voice-clone-1.mp3", | |
"voice_clone_2":f"https://demovidelyusergenerations.s3.amazonaws.com/{gen_id}-voice-clone-2.mp3" | |
} | |
if method_type == 'voice_clone_with_emotions': | |
try: | |
print("INSIDE emotions") | |
base_speaker_tts,tone_color_converter = self.base_speaker_tts,self.tone_color_converter | |
reference_speaker = local_file_path | |
print("here 1") | |
target_se, audio_name = se_extractor.get_se(reference_speaker, tone_color_converter, target_dir=openvoice_dir, vad=False) | |
print("here 2") | |
src_path = os.path.join(results_dir,f"{gen_id}-tmp-emotions.wav") | |
openvoice_output = os.path.join(results_dir,f"{gen_id}-4.wav") | |
base_speaker_tts.tts(passage,src_path,speaker='default',language='English',speed=1.0,use_emotions=True) | |
source_se = torch.load(f'{self.ckpt_base}/en_style_se.pth').to(self.device) | |
tone_color_converter.convert(audio_src_path=src_path,src_se=source_se,tgt_se=target_se,output_path=openvoice_output,message='') | |
if process_audio: | |
(new_sr, wav1) = self._fn(openvoice_output,"Midpoint",32,0.5) | |
sf.write(openvoice_output,wav1,new_sr) | |
mp3_final_output_1 = str(openvoice_output).replace('wav','mp3') | |
self.convert_wav_to_mp3(openvoice_output,mp3_final_output_1) | |
print(mp3_final_output_1) | |
self.upload_file_to_s3(mp3_final_output_1,'demovidelyusergenerations',f"{gen_id}-voice-with-emotions.mp3") | |
shutil.rmtree(os.path.join(output_dir,gen_id)) | |
return {"voice_clone_with_emotions":f"https://demovidelyusergenerations.s3.amazonaws.com/{gen_id}-voice-with-emotions.mp3" | |
} | |
except Exception as e: | |
return {"error":f"Unexpected error{e}"} | |
if method_type == 'voice_clone_with_multi_lang': | |
print("Inside multilang") | |
#voice clone with multi-lingugal | |
_,tone_color_converter = self.base_speaker_tts,self.tone_color_converter | |
reference_speaker = local_file_path | |
target_se, audio_name = se_extractor.get_se(reference_speaker, tone_color_converter, target_dir=openvoice_dir, vad=False) | |
src_path = 'openai_source_output.mp3' | |
source_se, audio_name = se_extractor.get_se(src_path, tone_color_converter, vad=True) | |
client = OpenAI(api_key=os.environ.get("OPENAI_API_KEY")) | |
response = client.audio.speech.create( | |
model="tts-1", | |
voice="fable", | |
input=passage | |
) | |
openai_multi_lang_path = os.path.join(results_dir,f"{gen_id}-openai-gen.wav") | |
response.stream_to_file(openai_multi_lang_path) | |
multi_lang_with_voice_clone_path = os.path.join(results_dir,f"{gen_id}-voice-clone-multi-lang.wav") | |
source_se, audio_name = se_extractor.get_se(src_path, tone_color_converter, vad=True) | |
self.tone_color_converter.convert(audio_src_path=openai_multi_lang_path, src_se=source_se, tgt_se=target_se, output_path=multi_lang_with_voice_clone_path,message='') | |
mp3_final_output_1 = str(multi_lang_with_voice_clone_path).replace('wav','mp3') | |
self.convert_wav_to_mp3(multi_lang_with_voice_clone_path,mp3_final_output_1) | |
print(mp3_final_output_1) | |
self.upload_file_to_s3(mp3_final_output_1,'demovidelyusergenerations',f"{gen_id}-voice-clone-multi-lang.mp3") | |
shutil.rmtree(os.path.join(output_dir,gen_id)) | |
return {"voice_clone_with_emotions":f"https://demovidelyusergenerations.s3.amazonaws.com/{gen_id}-voice-clone-multi-lang.mp3" | |
} | |
def predict_with_emotions(self,s3_url,passage,process_audio): | |
output_dir = 'processed' | |
gen_id = str(uuid.uuid4()) | |
os.makedirs(output_dir,exist_ok=True) | |
raw_dir = os.path.join(output_dir,gen_id,'raw') | |
segments_dir = os.path.join(output_dir,gen_id,'segments') | |
results_dir = os.path.join(output_dir,gen_id,'results') | |
openvoice_dir = os.path.join(output_dir,gen_id,'openvoice') | |
os.makedirs(raw_dir) | |
os.makedirs(segments_dir) | |
os.makedirs(results_dir) | |
s3_key = s3_url.split('/')[-1] | |
bucket_name = 'demovidelyuseruploads' | |
local_file_path = os.path.join(raw_dir,s3_key) | |
self.download_file_from_s3(self.s3_client,bucket_name,s3_key,local_file_path) | |
try: | |
print("INSIDE new emotions method") | |
base_speaker_tts,tone_color_converter = self.base_speaker_tts,self.tone_color_converter | |
reference_speaker = local_file_path | |
print("here 1") | |
target_se, audio_name = se_extractor.get_se(reference_speaker, tone_color_converter, target_dir=openvoice_dir, vad=False) | |
print("here 2") | |
src_path = os.path.join(results_dir,f"{gen_id}-tmp-emotions.wav") | |
openvoice_output = os.path.join(results_dir,f"{gen_id}-4.wav") | |
base_speaker_tts.tts(passage,src_path,speaker='default',language='English',speed=1.0,use_emotions=True) | |
source_se = torch.load(f'{self.ckpt_base}/en_style_se.pth').to(self.device) | |
tone_color_converter.convert(audio_src_path=src_path,src_se=source_se,tgt_se=target_se,output_path=openvoice_output,message='') | |
if process_audio: | |
(new_sr, wav1) = self._fn(openvoice_output,"Midpoint",32,0.5) | |
sf.write(openvoice_output,wav1,new_sr) | |
mp3_final_output_1 = str(openvoice_output).replace('wav','mp3') | |
self.convert_wav_to_mp3(openvoice_output,mp3_final_output_1) | |
print(mp3_final_output_1) | |
self.upload_file_to_s3(mp3_final_output_1,'demovidelyusergenerations',f"{gen_id}-voice-with-emotions.mp3") | |
shutil.rmtree(os.path.join(output_dir,gen_id)) | |
return {"voice_clone_with_emotions":f"https://demovidelyusergenerations.s3.amazonaws.com/{gen_id}-voice-with-emotions.mp3" | |
} | |
except Exception as e: | |
return {"error":f"Unexpected error{e}"} | |
def _fn(self,path, solver, nfe, tau): | |
if path is None: | |
return None, None | |
solver = solver.lower() | |
nfe = int(nfe) | |
lambd = 0.1 # lets remove denoise | |
dwav, sr = torchaudio.load(path) | |
dwav = dwav.mean(dim=0) | |
wav1, new_sr = enhance(dwav, sr, self.device, nfe=nfe, solver=solver, lambd=lambd, tau=tau) | |
wav1 = wav1.cpu().numpy() | |
return (new_sr, wav1) | |
def _fn_denoise(self,path, solver, nfe, tau): | |
if path is None: | |
return None | |
print(torch.cuda.is_available()) | |
print("Going to denoise") | |
solver = solver.lower() | |
nfe = int(nfe) | |
lambd = 0.9 | |
dwav, sr = torchaudio.load(path) | |
dwav = dwav.mean(dim=0) | |
wav1, new_sr = denoise(dwav, sr, self.device) | |
wav1 = wav1.cpu().numpy() | |
print("Done noising") | |
return (new_sr, wav1) | |
def LFinference(self,model,sampler,text, s_prev, ref_s, alpha = 0.3, beta = 0.7, t = 0.7, diffusion_steps=5, embedding_scale=1): | |
text = text.strip() | |
ps = self.global_phonemizer.phonemize([text]) | |
ps = word_tokenize(ps[0]) | |
ps = ' '.join(ps) | |
ps = ps.replace('``', '"') | |
ps = ps.replace("''", '"') | |
tokens = self.textclenaer(ps) | |
tokens.insert(0, 0) | |
tokens = torch.LongTensor(tokens).to(self.device).unsqueeze(0) | |
with torch.no_grad(): | |
input_lengths = torch.LongTensor([tokens.shape[-1]]).to(self.device) | |
text_mask = self.length_to_mask(input_lengths).to(self.device) | |
t_en = model.text_encoder(tokens, input_lengths, text_mask) | |
bert_dur = model.bert(tokens, attention_mask=(~text_mask).int()) | |
d_en = model.bert_encoder(bert_dur).transpose(-1, -2) | |
s_pred = sampler(noise = torch.randn((1, 256)).unsqueeze(1).to(self.device), | |
embedding=bert_dur, | |
embedding_scale=embedding_scale, | |
features=ref_s, # reference from the same speaker as the embedding | |
num_steps=diffusion_steps).squeeze(1) | |
if s_prev is not None: | |
# convex combination of previous and current style | |
s_pred = t * s_prev + (1 - t) * s_pred | |
s = s_pred[:, 128:] | |
ref = s_pred[:, :128] | |
ref = alpha * ref + (1 - alpha) * ref_s[:, :128] | |
s = beta * s + (1 - beta) * ref_s[:, 128:] | |
s_pred = torch.cat([ref, s], dim=-1) | |
d = model.predictor.text_encoder(d_en, | |
s, input_lengths, text_mask) | |
x, _ = model.predictor.lstm(d) | |
duration = model.predictor.duration_proj(x) | |
duration = torch.sigmoid(duration).sum(axis=-1) | |
pred_dur = torch.round(duration.squeeze()).clamp(min=1) | |
pred_aln_trg = torch.zeros(input_lengths, int(pred_dur.sum().data)) | |
c_frame = 0 | |
for i in range(pred_aln_trg.size(0)): | |
pred_aln_trg[i, c_frame:c_frame + int(pred_dur[i].data)] = 1 | |
c_frame += int(pred_dur[i].data) | |
# encode prosody | |
en = (d.transpose(-1, -2) @ pred_aln_trg.unsqueeze(0).to(self.device)) | |
if self.model_params.decoder.type == "hifigan": | |
asr_new = torch.zeros_like(en) | |
asr_new[:, :, 0] = en[:, :, 0] | |
asr_new[:, :, 1:] = en[:, :, 0:-1] | |
en = asr_new | |
F0_pred, N_pred = model.predictor.F0Ntrain(en, s) | |
asr = (t_en @ pred_aln_trg.unsqueeze(0).to(self.device)) | |
if self.model_params.decoder.type == "hifigan": | |
asr_new = torch.zeros_like(asr) | |
asr_new[:, :, 0] = asr[:, :, 0] | |
asr_new[:, :, 1:] = asr[:, :, 0:-1] | |
asr = asr_new | |
out = model.decoder(asr, | |
F0_pred, N_pred, ref.squeeze().unsqueeze(0)) | |
return out.squeeze().cpu().numpy()[..., :-100], s_pred # | |
def length_to_mask(self,lengths): | |
mask = torch.arange(lengths.max()).unsqueeze(0).expand(lengths.shape[0], -1).type_as(lengths) | |
mask = torch.gt(mask+1, lengths.unsqueeze(1)) | |
return mask | |
def preprocess(self,wave): | |
wave_tensor = torch.from_numpy(wave).float() | |
mel_tensor = self.to_mel(wave_tensor) | |
mel_tensor = (torch.log(1e-5 + mel_tensor.unsqueeze(0)) - self.mean) / self.std | |
return mel_tensor | |
def compute_style(self,path,model): | |
wave, sr = librosa.load(path, sr=24000) | |
audio, index = librosa.effects.trim(wave, top_db=30) | |
if sr != 24000: | |
audio = librosa.resample(audio, sr, 24000) | |
mel_tensor = self.preprocess(audio).to(self.device) | |
with torch.no_grad(): | |
ref_s = model.style_encoder(mel_tensor.unsqueeze(1)) | |
ref_p = model.predictor_encoder(mel_tensor.unsqueeze(1)) | |
return torch.cat([ref_s, ref_p], dim=1) | |
def process_audio_file(self,local_file_path,passage,model,sampler): | |
print(local_file_path) | |
s_ref = self.compute_style(local_file_path, model) | |
sentences = split_and_recombine_text(passage) | |
wavs = [] | |
s_prev = None | |
for text in sentences: | |
if text.strip() == "": continue | |
text += '.' | |
wav, s_prev = self.LFinference(model,sampler,text, | |
s_prev, | |
s_ref, | |
alpha = 0, | |
beta = 0.3, | |
t = 0.7, | |
diffusion_steps=10, embedding_scale=1) | |
wavs.append(wav) | |
audio_arrays = [] | |
for wav_file in wavs: | |
audio_arrays.append(wav_file) | |
concatenated_audio = np.concatenate(audio_arrays) | |
return concatenated_audio | |
def download_file_from_s3(self,s3_client,bucket_name, s3_key, local_file_path): | |
try: | |
s3_client.download_file(bucket_name, s3_key, local_file_path) | |
print(f"File downloaded successfully: {local_file_path}") | |
except Exception as e: | |
print(f"Error downloading file: {e}") | |
def convert_wav_to_mp3(self,wav_file, mp3_file): | |
command = ['ffmpeg', '-i', wav_file, '-q:a', '0', '-map', 'a', mp3_file] | |
subprocess.run(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE) | |
def upload_file_to_s3(self,file_name, bucket, object_name=None, content_type="audio/mpeg"): | |
if object_name is None: | |
object_name = file_name | |
try: | |
with open(file_name, 'rb') as file_data: | |
self.s3_client.put_object(Bucket=bucket, Key=object_name, Body=file_data, ContentType=content_type) | |
print("File uploaded successfully") | |
return True | |
except NoCredentialsError: | |
print("Error: No AWS credentials found") | |
return False | |
except Exception as e: | |
print(f"Error uploading file: {e}") | |
return False | |