Hilley commited on
Commit
779412e
1 Parent(s): 332c793

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +116 -30
app.py CHANGED
@@ -13,6 +13,11 @@ import se_extractor
13
  from api import BaseSpeakerTTS, ToneColorConverter
14
  import soundfile
15
 
 
 
 
 
 
16
  print("loading ChatTTS model...")
17
  chat = ChatTTS.Chat()
18
  chat.load_models()
@@ -122,47 +127,128 @@ def generate_audio(text, audio_ref, temperature, top_P, top_K, audio_seed_input,
122
 
123
  return "output.wav"
124
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
125
 
126
- with gr.Blocks() as demo:
127
- gr.Markdown("# Enjoy chatting with your ai friends on website, telegram and so on! (https://linkin.love)")
128
 
129
- default_text = "Today a man knocked on my door and asked for a small donation toward the local swimming pool. I gave him a glass of water."
130
- text_input = gr.Textbox(label="Input Text", lines=4, placeholder="Please Input Text...", value=default_text)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
131
 
 
 
 
 
 
 
 
 
132
 
133
- default_refine_text = "[oral_2][laugh_0][break_6]"
134
- refine_text_checkbox = gr.Checkbox(label="Refine text:'oral' means add filler words, 'laugh' means add laughter, and 'break' means add a pause. (0-10) ", value=True)
135
- refine_text_input = gr.Textbox(label="Refine Prompt", lines=1, placeholder="Please Refine Prompt...", value=default_refine_text)
136
- with gr.Column():
137
- clone_voice = gr.Audio(label="请上传您喜欢的语音文件", type="filepath")
138
 
139
- with gr.Row():
140
- temperature_slider = gr.Slider(minimum=0.00001, maximum=1.0, step=0.00001, value=0.3, label="Audio temperature")
141
- top_p_slider = gr.Slider(minimum=0.1, maximum=0.9, step=0.05, value=0.7, label="top_P")
142
- top_k_slider = gr.Slider(minimum=1, maximum=20, step=1, value=20, label="top_K")
143
 
144
- with gr.Row():
145
- audio_seed_input = gr.Number(value=42, label="Speaker Seed")
146
- generate_audio_seed = gr.Button("\U0001F3B2")
147
- text_seed_input = gr.Number(value=42, label="Text Seed")
148
- generate_text_seed = gr.Button("\U0001F3B2")
149
 
150
- generate_button = gr.Button("Generate")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
151
 
152
- #text_output = gr.Textbox(label="Refined Text", interactive=False)
153
- audio_output = gr.Audio(label="Output Audio")
154
 
155
- generate_audio_seed.click(generate_seed,
156
- inputs=[],
157
- outputs=audio_seed_input)
 
 
 
 
158
 
159
- generate_text_seed.click(generate_seed,
160
- inputs=[],
161
- outputs=text_seed_input)
162
 
163
- generate_button.click(generate_audio,
164
- inputs=[text_input, clone_voice, temperature_slider, top_p_slider, top_k_slider, audio_seed_input, text_seed_input, refine_text_checkbox, refine_text_input],
165
- outputs=audio_output)
 
 
 
 
 
 
 
 
166
 
167
  parser = argparse.ArgumentParser(description='ChatTTS demo Launch')
168
  parser.add_argument('--server_name', type=str, default='0.0.0.0', help='Server name')
 
13
  from api import BaseSpeakerTTS, ToneColorConverter
14
  import soundfile
15
 
16
+ from tts_voice import tts_order_voice
17
+ import edge_tts
18
+ import tempfile
19
+ import anyio
20
+
21
  print("loading ChatTTS model...")
22
  chat = ChatTTS.Chat()
23
  chat.load_models()
 
127
 
128
  return "output.wav"
129
 
130
+ def vc_en(text, audio_ref, style_mode):
131
+ if style_mode=="default":
132
+ source_se = torch.load(f'{ckpt_base_en}/en_default_se.pth').to(device)
133
+ reference_speaker = audio_ref
134
+ target_se, audio_name = se_extractor.get_se(reference_speaker, tone_color_converter, target_dir='processed', vad=True)
135
+ save_path = "output.wav"
136
+
137
+ # Run the base speaker tts
138
+ src_path = "tmp.wav"
139
+ base_speaker_tts.tts(text, src_path, speaker='default', language='English', speed=1.0)
140
+
141
+ # Run the tone color converter
142
+ encode_message = "@MyShell"
143
+ tone_color_converter.convert(
144
+ audio_src_path=src_path,
145
+ src_se=source_se,
146
+ tgt_se=target_se,
147
+ output_path=save_path,
148
+ message=encode_message)
149
+
150
+ else:
151
+ source_se = torch.load(f'{ckpt_base_en}/en_style_se.pth').to(device)
152
+ reference_speaker = audio_ref
153
+ target_se, audio_name = se_extractor.get_se(reference_speaker, tone_color_converter, target_dir='processed', vad=True)
154
+
155
+ save_path = "output.wav"
156
+
157
+ # Run the base speaker tts
158
+ src_path = "tmp.wav"
159
+ base_speaker_tts.tts(text, src_path, speaker=style_mode, language='English', speed=0.9)
160
+
161
+ # Run the tone color converter
162
+ encode_message = "@MyShell"
163
+ tone_color_converter.convert(
164
+ audio_src_path=src_path,
165
+ src_se=source_se,
166
+ tgt_se=target_se,
167
+ output_path=save_path,
168
+ message=encode_message)
169
 
170
+ return "output.wav"
 
171
 
172
+ language_dict = tts_order_voice
173
+
174
+ base_speaker = "base_audio.mp3"
175
+ source_se, audio_name = se_extractor.get_se(base_speaker, tone_color_converter, vad=True)
176
+
177
+ async def text_to_speech_edge(text, audio_ref, language_code):
178
+ voice = language_dict[language_code]
179
+ communicate = edge_tts.Communicate(text, voice)
180
+ with tempfile.NamedTemporaryFile(delete=False, suffix=".mp3") as tmp_file:
181
+ tmp_path = tmp_file.name
182
+
183
+ await communicate.save(tmp_path)
184
+
185
+ reference_speaker = audio_ref
186
+ target_se, audio_name = se_extractor.get_se(reference_speaker, tone_color_converter, target_dir='processed', vad=True)
187
+ save_path = "output.wav"
188
 
189
+ # Run the tone color converter
190
+ encode_message = "@MyShell"
191
+ tone_color_converter.convert(
192
+ audio_src_path=tmp_path,
193
+ src_se=source_se,
194
+ tgt_se=target_se,
195
+ output_path=save_path,
196
+ message=encode_message)
197
 
198
+ return "output.wav"
 
 
 
 
199
 
 
 
 
 
200
 
201
+ with gr.Blocks() as demo:
202
+ gr.Markdown("# Enjoy chatting with your ai friends on website, telegram and so on! (https://linkin.love)")
 
 
 
203
 
204
+ default_text = "Today a man knocked on my door and asked for a small donation toward the local swimming pool. I gave him a glass of water."
205
+ text_input = gr.Textbox(label="Input Text", lines=4, placeholder="Please Input Text...", value=default_text)
206
+ voice_ref = gr.Audio(label="Reference Audio", info="Click on the ✎ button to upload your own target speaker audio", type="filepath", value="base_audio.mp3")
207
+
208
+ with gr.Tab("💕Super Natural"):
209
+ default_refine_text = "[oral_2][laugh_0][break_6]"
210
+ refine_text_checkbox = gr.Checkbox(label="Refine text", info="'oral' means add filler words, 'laugh' means add laughter, and 'break' means add a pause. (0-10) ", value=True)
211
+ refine_text_input = gr.Textbox(label="Refine Prompt", lines=1, placeholder="Please Refine Prompt...", value=default_refine_text)
212
+
213
+ with gr.Row():
214
+ temperature_slider = gr.Slider(minimum=0.00001, maximum=1.0, step=0.00001, value=0.3, label="Audio temperature")
215
+ top_p_slider = gr.Slider(minimum=0.1, maximum=0.9, step=0.05, value=0.7, label="top_P")
216
+ top_k_slider = gr.Slider(minimum=1, maximum=20, step=1, value=20, label="top_K")
217
+
218
+ with gr.Row():
219
+ audio_seed_input = gr.Number(value=42, label="Speaker Seed")
220
+ generate_audio_seed = gr.Button("\U0001F3B2")
221
+ text_seed_input = gr.Number(value=42, label="Text Seed")
222
+ generate_text_seed = gr.Button("\U0001F3B2")
223
+
224
+ generate_button = gr.Button("Generate!")
225
 
226
+ #text_output = gr.Textbox(label="Refined Text", interactive=False)
227
+ audio_output = gr.Audio(label="Output Audio")
228
 
229
+ generate_audio_seed.click(generate_seed,
230
+ inputs=[],
231
+ outputs=audio_seed_input)
232
+
233
+ generate_text_seed.click(generate_seed,
234
+ inputs=[],
235
+ outputs=text_seed_input)
236
 
237
+ generate_button.click(generate_audio,
238
+ inputs=[text_input, voice_ref, temperature_slider, top_p_slider, top_k_slider, audio_seed_input, text_seed_input, refine_text_checkbox, refine_text_input],
239
+ outputs=audio_output)
240
 
241
+ with gr.Tab("💕Emotion Control"):
242
+ emo_pick = gr.Dropdown(label="Emotion", info="🙂default😊friendly🤫whispering😄cheerful😱terrified😡angry😢sad", choices=["default", "friendly", "whispering", "cheerful", "terrified", "angry", "sad"], value="default")
243
+ generate_button_emo = gr.Button("Generate!", variant="primary")
244
+ audio_emo = gr.Audio(label="Output Audio", type="filepath")
245
+ generate_button_emo.click(vc_en, [text_input, voice_ref, emo_pick], audio_emo)
246
+
247
+ with gr.Tab("💕multilingual"):
248
+ language = gr.Dropdown(choices=list(language_dict.keys()), value=list(language_dict.keys())[15], label="请选择文本对应的语言及说话人")
249
+ generate_button_ml = gr.Button("开始语音情感真实复刻吧!", variant="primary")
250
+ audio_ml = gr.Audio(label="为您合成的专属语音", type="filepath")
251
+ generate_button_ml.click(text_to_speech_edge, [text_input, voice_ref, language], audio_ml)
252
 
253
  parser = argparse.ArgumentParser(description='ChatTTS demo Launch')
254
  parser.add_argument('--server_name', type=str, default='0.0.0.0', help='Server name')