Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -71,7 +71,7 @@ async def chat_with_ai(message):
|
|
71 |
|
72 |
try:
|
73 |
response = chat_client.chat_completion(
|
74 |
-
messages=[{"role": "system", "content": "You are a helpful voice assistant. Provide concise and clear responses to user queries.
|
75 |
max_tokens=800,
|
76 |
temperature=0.7
|
77 |
)
|
@@ -98,62 +98,25 @@ def transcribe_and_chat(audio):
|
|
98 |
return response, audio_path
|
99 |
|
100 |
def create_demo():
|
101 |
-
with gr.Blocks(
|
102 |
-
gr.Markdown(
|
103 |
-
"""
|
104 |
-
# 🎙️ AI Voice Assistant
|
105 |
-
Welcome to the AI Voice Assistant! Speak your question or command, and I'll respond with both text and voice.
|
106 |
-
"""
|
107 |
-
)
|
108 |
|
109 |
with gr.Row():
|
110 |
with gr.Column(scale=1):
|
111 |
-
audio_input = gr.Audio(
|
112 |
-
type="filepath",
|
113 |
-
label="🎤 Press 'Record' to Speak",
|
114 |
-
source="microphone"
|
115 |
-
)
|
116 |
-
|
117 |
-
with gr.Column(scale=2):
|
118 |
-
chat_output = gr.Chatbot(
|
119 |
-
label="Conversation",
|
120 |
-
height=300
|
121 |
-
)
|
122 |
-
audio_output = gr.Audio(
|
123 |
-
label="🔊 AI Voice Response",
|
124 |
-
autoplay=True
|
125 |
-
)
|
126 |
|
127 |
-
|
128 |
-
|
|
|
129 |
|
130 |
-
def process_audio(audio
|
131 |
logging.info(f"Received audio: {audio}")
|
132 |
if audio is None:
|
133 |
-
return
|
134 |
-
|
135 |
-
|
136 |
-
|
137 |
-
logging.info(f"Response: {assistant_response}, Audio path: {audio_path}")
|
138 |
-
return history, audio_path, None # Return None to clear the audio input
|
139 |
-
|
140 |
-
def clear_conversation():
|
141 |
-
global chat_history
|
142 |
-
chat_history = []
|
143 |
-
return None, None, None
|
144 |
-
|
145 |
-
audio_input.change(
|
146 |
-
process_audio,
|
147 |
-
inputs=[audio_input, chat_output],
|
148 |
-
outputs=[chat_output, audio_output, audio_input]
|
149 |
-
)
|
150 |
|
151 |
-
clear_button.click(
|
152 |
-
clear_conversation,
|
153 |
-
outputs=[chat_output, audio_output, audio_input]
|
154 |
-
)
|
155 |
-
|
156 |
-
# Custom JavaScript for auto-submission and audio playback
|
157 |
demo.load(None, js="""
|
158 |
function() {
|
159 |
document.querySelector("audio").addEventListener("stop", function() {
|
@@ -182,6 +145,8 @@ def create_demo():
|
|
182 |
}
|
183 |
""")
|
184 |
|
|
|
|
|
185 |
return demo
|
186 |
|
187 |
# Launch the Gradio app
|
|
|
71 |
|
72 |
try:
|
73 |
response = chat_client.chat_completion(
|
74 |
+
messages=[{"role": "system", "content": "You are a helpful voice assistant. Provide concise and clear responses to user queries."}] + chat_history,
|
75 |
max_tokens=800,
|
76 |
temperature=0.7
|
77 |
)
|
|
|
98 |
return response, audio_path
|
99 |
|
100 |
def create_demo():
|
101 |
+
with gr.Blocks() as demo:
|
102 |
+
gr.Markdown("# AI Voice Assistant")
|
|
|
|
|
|
|
|
|
|
|
103 |
|
104 |
with gr.Row():
|
105 |
with gr.Column(scale=1):
|
106 |
+
audio_input = gr.Audio(type="filepath", label="Press 'Record' to Speak")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
107 |
|
108 |
+
with gr.Column(scale=1):
|
109 |
+
chat_output = gr.Textbox(label="AI Response")
|
110 |
+
audio_output = gr.Audio(label="AI Voice Response", autoplay=True)
|
111 |
|
112 |
+
def process_audio(audio):
|
113 |
logging.info(f"Received audio: {audio}")
|
114 |
if audio is None:
|
115 |
+
return "No audio detected. Please try recording again.", None, None
|
116 |
+
response, audio_path = transcribe_and_chat(audio)
|
117 |
+
logging.info(f"Response: {response}, Audio path: {audio_path}")
|
118 |
+
return response, audio_path, None # Return None to clear the audio input
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
119 |
|
|
|
|
|
|
|
|
|
|
|
|
|
120 |
demo.load(None, js="""
|
121 |
function() {
|
122 |
document.querySelector("audio").addEventListener("stop", function() {
|
|
|
145 |
}
|
146 |
""")
|
147 |
|
148 |
+
audio_input.change(process_audio, inputs=[audio_input], outputs=[chat_output, audio_output, audio_input])
|
149 |
+
|
150 |
return demo
|
151 |
|
152 |
# Launch the Gradio app
|