Spaces:
Paused
Paused
zxsipola123456
commited on
Commit
•
9653cf4
1
Parent(s):
a72f6c3
Create app.py
Browse files
app.py
ADDED
@@ -0,0 +1,124 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
import os
|
3 |
+
import tempfile
|
4 |
+
from openai import OpenAI
|
5 |
+
from tts_voice import tts_order_voice
|
6 |
+
import edge_tts
|
7 |
+
import tempfile
|
8 |
+
import anyio
|
9 |
+
|
10 |
+
|
11 |
+
import torch
|
12 |
+
import torchaudio
|
13 |
+
import gradio as gr
|
14 |
+
from scipy.io import wavfile
|
15 |
+
from scipy.io.wavfile import write
|
16 |
+
|
17 |
+
|
18 |
+
knn_vc = torch.hub.load('bshall/knn-vc', 'knn_vc', prematched=True, trust_repo=True, pretrained=True, device='cpu')
|
19 |
+
|
20 |
+
language_dict = tts_order_voice
|
21 |
+
|
22 |
+
async def text_to_speech_edge(text, language_code):
|
23 |
+
voice = language_dict[language_code]
|
24 |
+
communicate = edge_tts.Communicate(text, voice)
|
25 |
+
with tempfile.NamedTemporaryFile(delete=False, suffix=".mp3") as tmp_file:
|
26 |
+
tmp_path = tmp_file.name
|
27 |
+
|
28 |
+
await communicate.save(tmp_path)
|
29 |
+
|
30 |
+
return "语音合成完成:{}".format(text), tmp_path
|
31 |
+
|
32 |
+
|
33 |
+
def voice_change(audio_in, audio_ref):
|
34 |
+
samplerate1, data1 = wavfile.read(audio_in)
|
35 |
+
samplerate2, data2 = wavfile.read(audio_ref)
|
36 |
+
write("./audio_in.wav", samplerate1, data1)
|
37 |
+
write("./audio_ref.wav", samplerate2, data2)
|
38 |
+
|
39 |
+
query_seq = knn_vc.get_features("./audio_in.wav")
|
40 |
+
matching_set = knn_vc.get_matching_set(["./audio_ref.wav"])
|
41 |
+
out_wav = knn_vc.match(query_seq, matching_set, topk=4)
|
42 |
+
torchaudio.save('output.wav', out_wav[None], 16000)
|
43 |
+
return 'output.wav'
|
44 |
+
|
45 |
+
|
46 |
+
def tts(text, model, voice, api_key,base_url):
|
47 |
+
if len(text)>300:
|
48 |
+
raise gr.Error('您输入的文本字符多于300个,请缩短您的文本')
|
49 |
+
if api_key == '':
|
50 |
+
raise gr.Error('Please enter your OpenAI API Key')
|
51 |
+
else:
|
52 |
+
try:
|
53 |
+
client = OpenAI(api_key=api_key,base_url='https://lmzh.top/v1')
|
54 |
+
|
55 |
+
response = client.audio.speech.create(
|
56 |
+
model=model, # "tts-1","tts-1-hd"
|
57 |
+
voice=voice, # 'alloy', 'echo', 'fable', 'onyx', 'nova', 'shimmer'
|
58 |
+
input=text,
|
59 |
+
)
|
60 |
+
|
61 |
+
except Exception as error:
|
62 |
+
# Handle any exception that occurs
|
63 |
+
raise gr.Error("An error occurred while generating speech. Please check your API key and try again.")
|
64 |
+
print(str(error))
|
65 |
+
|
66 |
+
# Create a temp file to save the audio
|
67 |
+
with tempfile.NamedTemporaryFile(suffix=".mp3", delete=False) as temp_file:
|
68 |
+
temp_file.write(response.content)
|
69 |
+
|
70 |
+
# Get the file path of the temp file
|
71 |
+
temp_file_path = temp_file.name
|
72 |
+
|
73 |
+
return temp_file_path
|
74 |
+
|
75 |
+
|
76 |
+
app = gr.Blocks()
|
77 |
+
|
78 |
+
with app:
|
79 |
+
gr.Markdown("# <center>🌟 - OpenAI TTS + AI变声</center>")
|
80 |
+
gr.Markdown("### <center>🎶 地表最强文本转语音模型 + 3秒实时AI变声,支持中文!Powered by [OpenAI TTS](https://platform.openai.com/docs/guides/text-to-speech) and [KNN-VC](https://github.com/bshall/knn-vc) </center>")
|
81 |
+
with gr.Tab("🤗 OpenAI TTS"):
|
82 |
+
with gr.Row(variant='panel'):
|
83 |
+
api_key = gr.Textbox(type='password', label='OpenAI API Key', placeholder='请在此填写您的OpenAI API Key')
|
84 |
+
model = gr.Dropdown(choices=['tts-1','tts-1-hd'], label='请选择模型(tts-1推理更快,tts-1-hd音质更好)', value='tts-1')
|
85 |
+
voice = gr.Dropdown(choices=['alloy', 'echo', 'fable', 'onyx', 'nova', 'shimmer'], label='请选择一个说话人', value='alloy')
|
86 |
+
with gr.Row():
|
87 |
+
with gr.Column():
|
88 |
+
inp_text = gr.Textbox(label="请填写您想生成的文本(中英文皆可)", placeholder="想说却还没说的 还很多 攒着是因为想写成歌", lines=5)
|
89 |
+
btn_text = gr.Button("一键开启真实拟声吧", variant="primary")
|
90 |
+
|
91 |
+
with gr.Column():
|
92 |
+
inp1 = gr.Audio(type="filepath", label="OpenAI TTS真实拟声", interactive=False)
|
93 |
+
inp2 = gr.Audio(type="filepath", label="请上传AI变声的参照音频(决定变声后的语音音色)")
|
94 |
+
btn1 = gr.Button("一键开启AI变声吧", variant="primary")
|
95 |
+
with gr.Column():
|
96 |
+
out1 = gr.Audio(type="filepath", label="AI变声后的专属音频")
|
97 |
+
btn_text.click(tts, [inp_text, model, voice, api_key], inp1)
|
98 |
+
btn1.click(voice_change, [inp1, inp2], out1)
|
99 |
+
with gr.Tab("⚡ Edge TTS"):
|
100 |
+
with gr.Row():
|
101 |
+
input_text = gr.Textbox(lines=5, placeholder="想说却还没说的 还很多 攒着是因为想写成歌", label="请填写您想生成的文本(中英文皆可)")
|
102 |
+
default_language = list(language_dict.keys())[15]
|
103 |
+
language = gr.Dropdown(choices=list(language_dict.keys()), value=default_language, label="请选择文本对应的语言")
|
104 |
+
btn_edge = gr.Button("一键开启真实拟声吧", variant="primary")
|
105 |
+
output_text = gr.Textbox(label="输出文本", visible=False)
|
106 |
+
output_audio = gr.Audio(type="filepath", label="Edge TTS真实拟声")
|
107 |
+
|
108 |
+
with gr.Row():
|
109 |
+
inp_vc = gr.Audio(type="filepath", label="请上传AI变声的参照音频(决定变声后的语音音色)")
|
110 |
+
btn_vc = gr.Button("一键开启AI变声吧", variant="primary")
|
111 |
+
out_vc = gr.Audio(type="filepath", label="AI变声后的专属音频")
|
112 |
+
|
113 |
+
btn_edge.click(text_to_speech_edge, [input_text, language], [output_text, output_audio])
|
114 |
+
btn_vc.click(voice_change, [output_audio, inp_vc], out_vc)
|
115 |
+
|
116 |
+
|
117 |
+
gr.Markdown("### <center>注意❗:请不要生成会对个人以及组织造成侵害的内容,此程序仅供科研、学习及个人娱乐使用。Get your OpenAI API Key [here](https://platform.openai.com/api-keys).</center>")
|
118 |
+
gr.HTML('''
|
119 |
+
<div class="footer">
|
120 |
+
<p>Power by sipola </p>
|
121 |
+
</div>
|
122 |
+
''')
|
123 |
+
|
124 |
+
app.launch(show_error=True)
|