KleinPenny commited on
Commit
c949392
·
verified ·
1 Parent(s): 0777597

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +36 -30
app.py CHANGED
@@ -2,17 +2,14 @@ import gradio as gr
2
  import numpy as np
3
  from huggingface_hub import InferenceClient
4
  import os
5
- print(os.getenv('hf_token'))
6
 
7
- """
8
- For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
9
- """
10
  client = InferenceClient(
11
  "microsoft/Phi-3-mini-4k-instruct",
12
  token=os.getenv('hf_token'),
13
  )
14
 
15
-
16
  def respond(
17
  message,
18
  history: list[tuple[str, str]],
@@ -45,24 +42,6 @@ def respond(
45
  response += token
46
  yield response
47
 
48
- def reverse_audio(audio):
49
- sr, data = audio
50
- return (sr, np.flipud(data))
51
-
52
- input_audio = gr.Audio(
53
- sources=["microphone"],
54
- waveform_options=gr.WaveformOptions(
55
- waveform_color="#01C6FF",
56
- waveform_progress_color="#0066B4",
57
- skip_length=2,
58
- show_controls=False,
59
- ),
60
- )
61
-
62
- """
63
- For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
64
- """
65
-
66
  # 定义音频处理函数
67
  def process_audio(audio):
68
  if audio is None:
@@ -74,22 +53,38 @@ def process_audio(audio):
74
 
75
  return 'True'
76
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
77
  # 创建 Gradio 接口
78
  def create_interface():
79
  with gr.Blocks() as demo:
80
  # 标题
81
  gr.Markdown("# 语音识别与生成系统")
82
-
83
  # 输入部分:音频上传
84
  with gr.Row():
85
- audio_input = gr.Audio(
86
  sources=["microphone"],
87
  waveform_options=gr.WaveformOptions(
88
  waveform_color="#01C6FF",
89
  waveform_progress_color="#0066B4",
90
  skip_length=2,
91
  show_controls=False,
92
- ),label="音频生成")
 
 
93
 
94
  # 输出部分:文本识别结果和音频播放
95
  with gr.Row():
@@ -98,14 +93,25 @@ def create_interface():
98
 
99
  # 处理按钮
100
  process_button = gr.Button("处理音频")
101
-
102
  # 绑定处理逻辑
103
  process_button.click(process_audio, inputs=[audio_input], outputs=[recognized_text])
104
-
105
- return demo
106
 
 
 
 
 
 
 
 
 
 
 
 
 
 
107
 
108
 
109
  if __name__ == "__main__":
110
  demo = create_interface()
111
- demo.launch()
 
2
  import numpy as np
3
  from huggingface_hub import InferenceClient
4
  import os
 
5
 
6
+ # 获取 HuggingFace 的 API Token
 
 
7
  client = InferenceClient(
8
  "microsoft/Phi-3-mini-4k-instruct",
9
  token=os.getenv('hf_token'),
10
  )
11
 
12
+ # 定义 respond 函数,用于生成聊天回复
13
  def respond(
14
  message,
15
  history: list[tuple[str, str]],
 
42
  response += token
43
  yield response
44
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
45
  # 定义音频处理函数
46
  def process_audio(audio):
47
  if audio is None:
 
53
 
54
  return 'True'
55
 
56
+ # 定义聊天逻辑,更新聊天历史记录
57
+ def chat_with_model(user_message, history):
58
+ system_message = "You are chatting with a helpful assistant."
59
+ max_tokens = 200
60
+ temperature = 0.7
61
+ top_p = 0.9
62
+
63
+ # 调用 respond 函数,生成回复
64
+ bot_response = next(respond(user_message, history, system_message, max_tokens, temperature, top_p))
65
+
66
+ # 将用户消息和模型回复添加到历史记录中
67
+ history.append((user_message, bot_response))
68
+ return history, history
69
+
70
  # 创建 Gradio 接口
71
  def create_interface():
72
  with gr.Blocks() as demo:
73
  # 标题
74
  gr.Markdown("# 语音识别与生成系统")
75
+
76
  # 输入部分:音频上传
77
  with gr.Row():
78
+ audio_input = gr.Audio(
79
  sources=["microphone"],
80
  waveform_options=gr.WaveformOptions(
81
  waveform_color="#01C6FF",
82
  waveform_progress_color="#0066B4",
83
  skip_length=2,
84
  show_controls=False,
85
+ ),
86
+ label="音频生成"
87
+ )
88
 
89
  # 输出部分:文本识别结果和音频播放
90
  with gr.Row():
 
93
 
94
  # 处理按钮
95
  process_button = gr.Button("处理音频")
96
+
97
  # 绑定处理逻辑
98
  process_button.click(process_audio, inputs=[audio_input], outputs=[recognized_text])
 
 
99
 
100
+ # 添加聊天框
101
+ with gr.Row():
102
+ chatbot = gr.Chatbot(label="Chatbot") # 显示聊天记录的区域
103
+ user_input = gr.Textbox(placeholder="Type a message...", label="输入消息") # 用户输入区域
104
+ send_button = gr.Button("发送") # 发送消息的按钮
105
+
106
+ # 聊天历史记录
107
+ chat_history = gr.State([]) # 用于存储聊天历史
108
+
109
+ # 点击发送按钮时,调用 chat_with_model 函数并更新聊天记录
110
+ send_button.click(chat_with_model, inputs=[user_input, chat_history], outputs=[chatbot, chat_history])
111
+
112
+ return demo
113
 
114
 
115
  if __name__ == "__main__":
116
  demo = create_interface()
117
+ demo.launch()