Mahiruoshi commited on
Commit
92a59ca
·
1 Parent(s): 7d82bc1

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +44 -19
app.py CHANGED
@@ -27,11 +27,6 @@ class VitsGradio:
27
  for dir in dirs:
28
  self.modelPaths.append(dir)
29
  with gr.Blocks() as self.Vits:
30
- gr.Markdown(
31
- "## <center> Vits chatbot后端演示,直接在本机上克隆并部署该space即可。搭配前端使用: https://github.com/Arkueid/Live2DMascot\n"
32
- "## <center> 使用方法:在控制面板选择tts模型与说话人,并且设置gpt的api key,之后会生成语音合成的api。"
33
- "### <center> 我移除了pyopenjtalk,如果合成不了日语就直接摆烂,所以不存在安装困难的情况.运行main.py后直接打开ui界面配置:http://127.0.0.1:7860/\n"
34
- "### <center> 如果有能力,可自行替换text文件夹\n")
35
  with gr.Tab("调试用"):
36
  with gr.Row():
37
  with gr.Column():
@@ -54,11 +49,11 @@ class VitsGradio:
54
  with gr.Column():
55
  with gr.Row():
56
  with gr.Column():
57
- self.api_input1 = gr.TextArea(label="输入api-key或本地存储说话模型的路径", value="https://platform.openai.com/account/api-keys")
58
- with gr.Accordion(label="chatbot选择", open=False):
59
- self.api_input2 = gr.Checkbox(value=True, label="采用gpt3.5")
60
- self.local_chat1 = gr.Checkbox(value=False, label="启动本地chatbot,这里用不了,别点")
61
- self.local_chat2 = gr.Checkbox(value=True, label="是否量化")
62
  res = gr.TextArea()
63
  Botselection = gr.Button("完成chatbot设定")
64
  Botselection.click(self.check_bot, inputs=[self.api_input1,self.api_input2,self.local_chat1,self.local_chat2], outputs = [res])
@@ -66,9 +61,9 @@ class VitsGradio:
66
  self.input2 = gr.Dropdown(label="Language", choices=self.lan, value="自动", interactive=True)
67
  with gr.Column():
68
  btnVC = gr.Button("完成vits TTS端设定")
69
- self.input3 = gr.Dropdown(label="Speaker", choices=list(range(114514)), value=21, interactive=True)
70
  self.input4 = gr.Slider(minimum=0, maximum=1.0, label="更改噪声比例(noise scale),以控制情感", value=0.6)
71
- self.input5 = gr.Slider(minimum=0, maximum=1.0, label="更改噪声偏差(noise scale w),以控制音素长短", value=0.668)
72
  self.input6 = gr.Slider(minimum=0.1, maximum=10, label="duration", value=1)
73
  statusa = gr.TextArea()
74
  btnVC.click(self.create_tts_fn, inputs=[self.input1, self.input2, self.input3, self.input4, self.input5, self.input6], outputs = [statusa])
@@ -83,6 +78,34 @@ class VitsGradio:
83
  return web,file_path
84
 
85
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
86
  def chatgpt(self,text):
87
  self.messages.append({"role": "user", "content": text},)
88
  chat = openai.ChatCompletion.create(model="gpt-3.5-turbo", messages= self.messages)
@@ -121,6 +144,7 @@ class VitsGradio:
121
  return response['choices'][0]['text'].strip()
122
 
123
  def check_bot(self,api_input1,api_input2,local_chat1,local_chat2):
 
124
  if local_chat1:
125
  from transformers import AutoTokenizer, AutoModel
126
  self.tokenizer = AutoTokenizer.from_pretrained(api_input1, trust_remote_code=True)
@@ -130,8 +154,11 @@ class VitsGradio:
130
  self.model = AutoModel.from_pretrained(api_input1, trust_remote_code=True)
131
  self.history = []
132
  else:
133
- self.messages = []
134
- openai.api_key = api_input1
 
 
 
135
  return "Finished"
136
 
137
  def is_japanese(self,string):
@@ -202,11 +229,11 @@ class VitsGradio:
202
 
203
  def tts_fn(self,text):
204
  if self.local_chat1:
205
- text = self.chatgpt(text)
206
  elif self.api_input2:
207
  text = self.ChATGLM(text)
208
  else:
209
- text = self.gpt3_chat(text)
210
  print(text)
211
  text =self.sle(self.language,text)
212
  with torch.no_grad():
@@ -235,7 +262,7 @@ def text_api():
235
  'Content-Type': 'audio/wav',
236
  'Text': text.encode('utf-8')}
237
  return wav_bytes, 200, headers
238
- '''
239
  def gradio_interface():
240
  return grVits.Vits.launch()
241
 
@@ -244,5 +271,3 @@ if __name__ == '__main__':
244
  gradio_thread = Thread(target=gradio_interface)
245
  api_thread.start()
246
  gradio_thread.start()
247
- '''
248
- grVits.Vits.launch()
 
27
  for dir in dirs:
28
  self.modelPaths.append(dir)
29
  with gr.Blocks() as self.Vits:
 
 
 
 
 
30
  with gr.Tab("调试用"):
31
  with gr.Row():
32
  with gr.Column():
 
49
  with gr.Column():
50
  with gr.Row():
51
  with gr.Column():
52
+ self.api_input1 = gr.TextArea(label="输入gpt/茉莉云的api-key或本地存储说话模型的路径.如果要用茉莉云则用'|'隔开key和密码", value="49eig5nu3rllvg6e|itcn9760")
53
+ with gr.Accordion(label="chatbot选择(默认gpt3.5)", open=False):
54
+ self.api_input2 = gr.Checkbox(value=False, label="茉莉云")
55
+ self.local_chat1 = gr.Checkbox(value=False, label="启动本地chatbot")
56
+ self.local_chat2 = gr.Checkbox(value=False, label="是否量化")
57
  res = gr.TextArea()
58
  Botselection = gr.Button("完成chatbot设定")
59
  Botselection.click(self.check_bot, inputs=[self.api_input1,self.api_input2,self.local_chat1,self.local_chat2], outputs = [res])
 
61
  self.input2 = gr.Dropdown(label="Language", choices=self.lan, value="自动", interactive=True)
62
  with gr.Column():
63
  btnVC = gr.Button("完成vits TTS端设定")
64
+ self.input3 = gr.Dropdown(label="Speaker", choices=list(range(1001)), value=0, interactive=True)
65
  self.input4 = gr.Slider(minimum=0, maximum=1.0, label="更改噪声比例(noise scale),以控制情感", value=0.6)
66
+ self.input5 = gr.Slider(minimum=0, maximum=1.0, label="更改噪声偏差(noise scale w),以控制音素长短", value=0.667)
67
  self.input6 = gr.Slider(minimum=0.1, maximum=10, label="duration", value=1)
68
  statusa = gr.TextArea()
69
  btnVC.click(self.create_tts_fn, inputs=[self.input1, self.input2, self.input3, self.input4, self.input5, self.input6], outputs = [statusa])
 
78
  return web,file_path
79
 
80
 
81
+ def mori(self,text):
82
+ import http.client
83
+ conn = http.client.HTTPSConnection("api.mlyai.com")
84
+ payload = json.dumps({
85
+ "content": text,
86
+ "type": 1,
87
+ "from": "123456",
88
+ "fromName": "侑"
89
+ })
90
+ headers = {
91
+ 'Api-Key': self.api_key,
92
+ 'Api-Secret': self.api_secret,
93
+ 'Content-Type': 'application/json'
94
+ }
95
+ conn.request("POST", "/reply", payload, headers)
96
+ res = conn.getresponse()
97
+ data = res.read()
98
+ decoded_data = json.loads(data.decode("utf-8"))
99
+
100
+ if decoded_data["code"] == "00000":
101
+ answer = decoded_data["data"][0]["content"]
102
+ if text == 'exit':
103
+ conn.close()
104
+ return answer
105
+ else:
106
+ conn.close()
107
+ return '对不起,做不到'
108
+
109
  def chatgpt(self,text):
110
  self.messages.append({"role": "user", "content": text},)
111
  chat = openai.ChatCompletion.create(model="gpt-3.5-turbo", messages= self.messages)
 
144
  return response['choices'][0]['text'].strip()
145
 
146
  def check_bot(self,api_input1,api_input2,local_chat1,local_chat2):
147
+ self.api_key, self.api_secret = api_input1.split("|")
148
  if local_chat1:
149
  from transformers import AutoTokenizer, AutoModel
150
  self.tokenizer = AutoTokenizer.from_pretrained(api_input1, trust_remote_code=True)
 
154
  self.model = AutoModel.from_pretrained(api_input1, trust_remote_code=True)
155
  self.history = []
156
  else:
157
+ try:
158
+ self.messages = []
159
+ openai.api_key = api_input1
160
+ except:
161
+ pass
162
  return "Finished"
163
 
164
  def is_japanese(self,string):
 
229
 
230
  def tts_fn(self,text):
231
  if self.local_chat1:
232
+ text = self.mori(text)
233
  elif self.api_input2:
234
  text = self.ChATGLM(text)
235
  else:
236
+ text = text = self.chatgpt(text)
237
  print(text)
238
  text =self.sle(self.language,text)
239
  with torch.no_grad():
 
262
  'Content-Type': 'audio/wav',
263
  'Text': text.encode('utf-8')}
264
  return wav_bytes, 200, headers
265
+
266
  def gradio_interface():
267
  return grVits.Vits.launch()
268
 
 
271
  gradio_thread = Thread(target=gradio_interface)
272
  api_thread.start()
273
  gradio_thread.start()