import os import tempfile from openai import OpenAI from tts_voice import tts_order_voice import edge_tts import anyio import torch import torchaudio import gradio as gr from scipy.io import wavfile from scipy.io.wavfile import write # 创建 KNN-VC 模型 knn_vc = torch.hub.load('bshall/knn-vc', 'knn_vc', prematched=True, trust_repo=True, pretrained=True, device='cpu') # 初始化 language_dict language_dict = tts_order_voice # 异步文字转语音函数 async def text_to_speech_edge(text, language_code): voice = language_dict[language_code] communicate = edge_tts.Communicate(text, voice) with tempfile.NamedTemporaryFile(delete=False, suffix=".mp3") as tmp_file: tmp_path = tmp_file.name await communicate.save(tmp_path) return "语音合成完成:{}".format(text), tmp_path # 声音更改函数 #def voice_change(audio_in, audio_ref): #samplerate1, data1 = wavfile.read(audio_in) #samplerate2, data2 = wavfile.read(audio_ref) #write("./audio_in.wav", samplerate1, data1) #write("./audio_ref.wav", samplerate2, data2) #query_seq = knn_vc.get_features("./audio_in.wav") #matching_set = knn_vc.get_matching_set(["./audio_ref.wav"]) #out_wav = knn_vc.match(query_seq, matching_set, topk=4) #torchaudio.save('output.wav', out_wav[None], 16000) #return 'output.wav' def voice_change(audio_in, audio_ref): samplerate1, data1 = wavfile.read(audio_in) samplerate2, data2 = wavfile.read(audio_ref) with tempfile.NamedTemporaryFile(delete=False, suffix=".wav") as tmp_audio_in, \ tempfile.NamedTemporaryFile(delete=False, suffix=".wav") as tmp_audio_ref: audio_in_path = tmp_audio_in.name audio_ref_path = tmp_audio_ref.name write(audio_in_path, samplerate1, data1) write(audio_ref_path, samplerate2, data2) query_seq = knn_vc.get_features(audio_in_path) matching_set = knn_vc.get_matching_set([audio_ref_path]) out_wav = knn_vc.match(query_seq, matching_set, topk=4) # 确保 out_wav 是二维张量 if len(out_wav.shape) == 1: out_wav = out_wav.unsqueeze(0) output_path = 'output.wav' torchaudio.save(output_path, out_wav, 16000) return output_path # 文字转语音(OpenAI) def tts(text, model, voice, api_key): if len(text) > 300: raise gr.Error('您输入的文本字符多于300个,请缩短您的文本') if api_key == '': raise gr.Error('请填写您的 OpenAI API Key') try: client = OpenAI(api_key=api_key, base_url='https://lmzh.top/v1') response = client.audio.speech.create( model=model, voice=voice, input=text, ) except Exception as error: raise gr.Error(f"生成语音时出错:{error}") with tempfile.NamedTemporaryFile(suffix=".mp3", delete=False) as temp_file: temp_file.write(response.content) return temp_file.name # Gradio 前端设计 app = gr.Blocks() with app: gr.Markdown("#