dangthr commited on
Commit
310182b
·
verified ·
1 Parent(s): 316114e
Files changed (1) hide show
  1. webui (1).py +183 -0
webui (1).py ADDED
@@ -0,0 +1,183 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2024 Alibaba Inc (authors: Xiang Lyu, Liu Yue)
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ import os
15
+ import sys
16
+ ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
17
+ sys.path.append('{}/third_party/Matcha-TTS'.format(ROOT_DIR))
18
+
19
+ import argparse
20
+ import gradio as gr
21
+ import numpy as np
22
+ import torch
23
+ import torchaudio
24
+ import random
25
+ import librosa
26
+
27
+ from cosyvoice.cli.cosyvoice import CosyVoice
28
+ from cosyvoice.utils.file_utils import load_wav, speed_change, logging
29
+
30
+ def generate_seed():
31
+ seed = random.randint(1, 100000000)
32
+ return {
33
+ "__type__": "update",
34
+ "value": seed
35
+ }
36
+
37
+ def set_all_random_seed(seed):
38
+ random.seed(seed)
39
+ np.random.seed(seed)
40
+ torch.manual_seed(seed)
41
+ torch.cuda.manual_seed_all(seed)
42
+
43
+ max_val = 0.8
44
+ def postprocess(speech, top_db=60, hop_length=220, win_length=440):
45
+ speech, _ = librosa.effects.trim(
46
+ speech, top_db=top_db,
47
+ frame_length=win_length,
48
+ hop_length=hop_length
49
+ )
50
+ if speech.abs().max() > max_val:
51
+ speech = speech / speech.abs().max() * max_val
52
+ speech = torch.concat([speech, torch.zeros(1, int(target_sr * 0.2))], dim=1)
53
+ return speech
54
+
55
+ inference_mode_list = ['预训练音色', '3s极速复刻', '跨语种复刻', '自然语言控制']
56
+ instruct_dict = {'预训练音色': '1. 选择预训练音色\n2. 点击生成音频按钮',
57
+ '3s极速复刻': '1. 选择prompt音频文件,或录入prompt音频,注意不超过30s,若同时提供,优先选择prompt音频文件\n2. 输入prompt文本\n3. 点击生成音频按钮',
58
+ '跨语种复刻': '1. 选择prompt音频文件,或录入prompt音频,注意不超过30s,若同时提供,优先选择prompt音频文件\n2. 点击生成音频按钮',
59
+ '自然语言控制': '1. 选择预训练音色\n2. 输入instruct文本\n3. 点击生成音频按钮'}
60
+ stream_mode_list = [('否', False), ('是', True)]
61
+ def change_instruction(mode_checkbox_group):
62
+ return instruct_dict[mode_checkbox_group]
63
+
64
+ def generate_audio(tts_text, mode_checkbox_group, sft_dropdown, prompt_text, prompt_wav_upload, prompt_wav_record, instruct_text, seed, stream, speed_factor):
65
+ if prompt_wav_upload is not None:
66
+ prompt_wav = prompt_wav_upload
67
+ elif prompt_wav_record is not None:
68
+ prompt_wav = prompt_wav_record
69
+ else:
70
+ prompt_wav = None
71
+ # if instruct mode, please make sure that model is iic/CosyVoice-300M-Instruct and not cross_lingual mode
72
+ if mode_checkbox_group in ['自然语言控制']:
73
+ if cosyvoice.frontend.instruct is False:
74
+ gr.Warning('您正在使用自然语言控制模式, {}模型不支持此模式, 请使用iic/CosyVoice-300M-Instruct模型'.format(args.model_dir))
75
+ return (target_sr, default_data)
76
+ if instruct_text == '':
77
+ gr.Warning('您正在使用自然语言控制模式, 请输入instruct文本')
78
+ return (target_sr, default_data)
79
+ if prompt_wav is not None or prompt_text != '':
80
+ gr.Info('您正在使用自然语言控制模式, prompt音频/prompt文本会被忽略')
81
+ # if cross_lingual mode, please make sure that model is iic/CosyVoice-300M and tts_text prompt_text are different language
82
+ if mode_checkbox_group in ['跨语种复刻']:
83
+ if cosyvoice.frontend.instruct is True:
84
+ gr.Warning('您正在使用跨语种复刻模式, {}模型不支持此模式, 请使用iic/CosyVoice-300M模型'.format(args.model_dir))
85
+ return (target_sr, default_data)
86
+ if instruct_text != '':
87
+ gr.Info('您正在使用跨语种复刻模式, instruct文本会被忽略')
88
+ if prompt_wav is None:
89
+ gr.Warning('您正在使用跨语种复刻模式, 请提供prompt音频')
90
+ return (target_sr, default_data)
91
+ gr.Info('您正在使用跨语种复刻模式, 请确保合成文本和prompt文本为不同语言')
92
+ # if in zero_shot cross_lingual, please make sure that prompt_text and prompt_wav meets requirements
93
+ if mode_checkbox_group in ['3s极速复刻', '跨语种复刻']:
94
+ if prompt_wav is None:
95
+ gr.Warning('prompt音频为空,您是否忘记输入prompt音频?')
96
+ return (target_sr, default_data)
97
+ if torchaudio.info(prompt_wav).sample_rate < prompt_sr:
98
+ gr.Warning('prompt音频采样率{}低于{}'.format(torchaudio.info(prompt_wav).sample_rate, prompt_sr))
99
+ return (target_sr, default_data)
100
+ # sft mode only use sft_dropdown
101
+ if mode_checkbox_group in ['预训练音色']:
102
+ if instruct_text != '' or prompt_wav is not None or prompt_text != '':
103
+ gr.Info('您正在使用预训练音色模式,prompt文本/prompt音频/instruct文本会被忽略!')
104
+ # zero_shot mode only use prompt_wav prompt text
105
+ if mode_checkbox_group in ['3s极速复刻']:
106
+ if prompt_text == '':
107
+ gr.Warning('prompt文本为空,您是否忘记输入prompt文本?')
108
+ return (target_sr, default_data)
109
+ if instruct_text != '':
110
+ gr.Info('您正在使用3s极速复刻模式,预训练音色/instruct文本会被忽略!')
111
+
112
+ if mode_checkbox_group == '预训练音色':
113
+ logging.info('get sft inference request')
114
+ set_all_random_seed(seed)
115
+ for i in cosyvoice.inference_sft(tts_text, sft_dropdown, stream=stream):
116
+ yield (target_sr, i['tts_speech'].numpy().flatten())
117
+ elif mode_checkbox_group == '3s极速复刻':
118
+ logging.info('get zero_shot inference request')
119
+ prompt_speech_16k = postprocess(load_wav(prompt_wav, prompt_sr))
120
+ set_all_random_seed(seed)
121
+ for i in cosyvoice.inference_zero_shot(tts_text, prompt_text, prompt_speech_16k, stream=stream):
122
+ yield (target_sr, i['tts_speech'].numpy().flatten())
123
+ elif mode_checkbox_group == '跨语种复刻':
124
+ logging.info('get cross_lingual inference request')
125
+ prompt_speech_16k = postprocess(load_wav(prompt_wav, prompt_sr))
126
+ set_all_random_seed(seed)
127
+ for i in cosyvoice.inference_cross_lingual(tts_text, prompt_speech_16k, stream=stream):
128
+ yield (target_sr, i['tts_speech'].numpy().flatten())
129
+ else:
130
+ logging.info('get instruct inference request')
131
+ set_all_random_seed(seed)
132
+ for i in cosyvoice.inference_instruct(tts_text, sft_dropdown, instruct_text, stream=stream):
133
+ yield (target_sr, i['tts_speech'].numpy().flatten())
134
+
135
+ def main():
136
+ with gr.Blocks() as demo:
137
+ gr.Markdown("### 代码库 [CosyVoice](https://github.com/FunAudioLLM/CosyVoice) 预训练模型 [CosyVoice-300M](https://www.modelscope.cn/models/iic/CosyVoice-300M) [CosyVoice-300M-Instruct](https://www.modelscope.cn/models/iic/CosyVoice-300M-Instruct) [CosyVoice-300M-SFT](https://www.modelscope.cn/models/iic/CosyVoice-300M-SFT)")
138
+ gr.Markdown("#### 请输入需要合成的文本,选择推理模式,并按照提示步骤进行操作")
139
+
140
+ tts_text = gr.Textbox(label="输入合成文本", lines=1, value="我是通义实验室语音团队全新推出的生成式语音大模型,提供舒适自然的语音合成能力。")
141
+ speed_factor = gr.Slider(minimum=0.25, maximum=4, step=0.05, label="语速调节", value=1.0, interactive=True)
142
+ with gr.Row():
143
+ mode_checkbox_group = gr.Radio(choices=inference_mode_list, label='选择推理模式', value=inference_mode_list[0])
144
+ instruction_text = gr.Text(label="操作步骤", value=instruct_dict[inference_mode_list[0]], scale=0.5)
145
+ sft_dropdown = gr.Dropdown(choices=sft_spk, label='选择预训练音色', value=sft_spk[0], scale=0.25)
146
+ stream = gr.Radio(choices=stream_mode_list, label='是否流式推理', value=stream_mode_list[0][1])
147
+ with gr.Column(scale=0.25):
148
+ seed_button = gr.Button(value="\U0001F3B2")
149
+ seed = gr.Number(value=0, label="随机推理种子")
150
+
151
+ with gr.Row():
152
+ prompt_wav_upload = gr.Audio(sources='upload', type='filepath', label='选择prompt音频文件,注意采样率不低于16khz')
153
+ prompt_wav_record = gr.Audio(sources='microphone', type='filepath', label='录制prompt音频文件')
154
+ prompt_text = gr.Textbox(label="输入prompt文本", lines=1, placeholder="请输入prompt文本,需与prompt音频内容一致,暂时不支持自动识别...", value='')
155
+ instruct_text = gr.Textbox(label="输入instruct文本", lines=1, placeholder="请输入instruct文本.", value='')
156
+
157
+ generate_button = gr.Button("生成音频")
158
+
159
+ audio_output = gr.Audio(label="合成音频", autoplay=True, streaming=True)
160
+
161
+ seed_button.click(generate_seed, inputs=[], outputs=seed)
162
+ generate_button.click(generate_audio,
163
+ inputs=[tts_text, mode_checkbox_group, sft_dropdown, prompt_text, prompt_wav_upload, prompt_wav_record, instruct_text, seed, stream, speed_factor],
164
+ outputs=[audio_output])
165
+ mode_checkbox_group.change(fn=change_instruction, inputs=[mode_checkbox_group], outputs=[instruction_text])
166
+ demo.queue(max_size=4, default_concurrency_limit=2)
167
+ demo.launch(server_name='0.0.0.0', server_port=args.port,share=True)
168
+
169
+ if __name__ == '__main__':
170
+ parser = argparse.ArgumentParser()
171
+ parser.add_argument('--port',
172
+ type=int,
173
+ default=8000)
174
+ parser.add_argument('--model_dir',
175
+ type=str,
176
+ default='pretrained_models/CosyVoice-300M',
177
+ help='local path or modelscope repo id')
178
+ args = parser.parse_args()
179
+ cosyvoice = CosyVoice(args.model_dir)
180
+ sft_spk = cosyvoice.list_avaliable_spks()
181
+ prompt_sr, target_sr = 16000, 22050
182
+ default_data = np.zeros(target_sr)
183
+ main()