siyah1 commited on
Commit
2ae12af
·
verified ·
1 Parent(s): 61e99bd

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +221 -180
app.py CHANGED
@@ -1,75 +1,45 @@
1
  import gradio as gr
2
  from geminisearch import webSearch
3
- import speech_recognition as sr
4
- import pyttsx3
5
  import threading
6
- import io
7
- import wave
8
- import numpy as np
9
 
10
  class QuasarAudioChat:
11
  def __init__(self):
12
- self.tts_engine = pyttsx3.init()
13
- self.tts_engine.setProperty('rate', 150)
14
- self.tts_engine.setProperty('volume', 0.9)
15
- self.recognizer = sr.Recognizer()
16
- self.is_listening = False
17
 
18
- def speech_to_text(self, audio_data):
19
- """Convert speech to text"""
20
- try:
21
- if audio_data is None:
22
- return "No audio received"
23
-
24
- # Convert audio to the format expected by speech_recognition
25
- with sr.AudioFile(io.BytesIO(audio_data)) as source:
26
- audio = self.recognizer.record(source)
27
- text = self.recognizer.recognize_google(audio)
28
- return text
29
- except sr.UnknownValueError:
30
- return "Could not understand audio"
31
- except sr.RequestError as e:
32
- return f"Speech recognition error: {e}"
33
-
34
- def text_to_speech(self, text):
35
- """Convert text to speech"""
36
- try:
37
- # Remove markdown and special characters for cleaner speech
38
- clean_text = text.replace('*', '').replace('#', '').replace('`', '')
39
- self.tts_engine.say(clean_text)
40
- self.tts_engine.runAndWait()
41
- except Exception as e:
42
- print(f"TTS Error: {e}")
43
-
44
  def process_audio_message(self, audio, chat_history):
45
- """Process audio input and return response"""
46
  if audio is None:
47
- return chat_history, "Please provide audio input"
48
 
49
- # Convert audio to text
50
- user_text = self.speech_to_text(audio)
51
-
52
- if "error" in user_text.lower() or "could not" in user_text.lower():
53
- chat_history.append({"role": "user", "content": "[Audio Error]"})
54
- chat_history.append({"role": "assistant", "content": user_text})
55
- return chat_history, user_text
 
 
 
 
 
 
56
 
57
  # Add user message to chat
58
- chat_history.append({"role": "user", "content": f"🎤 {user_text}"})
59
 
60
- # Get response from webSearch
61
  try:
62
- response = webSearch(user_text)
 
63
  chat_history.append({"role": "assistant", "content": response})
64
 
65
- # Convert response to speech in background thread
66
- threading.Thread(target=self.text_to_speech, args=(response,), daemon=True).start()
67
-
68
- return chat_history, response
69
  except Exception as e:
70
  error_msg = f"Search error: {str(e)}"
71
  chat_history.append({"role": "assistant", "content": error_msg})
72
- return chat_history, error_msg
73
 
74
  # Initialize audio chat
75
  audio_chat = QuasarAudioChat()
@@ -77,115 +47,179 @@ audio_chat = QuasarAudioChat()
77
  # Predefined questions for Quasar LLM
78
  quasar_examples = [
79
  "What are the latest AI technology developments?",
80
- "What's happening in global news today?",
81
  "What's the current cryptocurrency market status?"
82
  ]
83
 
84
- # Enhanced CSS with audio-focused styling
85
  custom_css = """
 
 
 
 
 
 
 
86
  .gradio-container {
87
- max-width: 1200px !important;
88
  margin: auto !important;
89
- background: linear-gradient(135deg, #667eea 0%, #764ba2 100%) !important;
90
  min-height: 100vh !important;
 
91
  }
92
 
93
  .main-container {
94
  background: rgba(255, 255, 255, 0.95) !important;
95
- border-radius: 20px !important;
96
  padding: 2rem !important;
97
- margin: 1rem !important;
98
- box-shadow: 0 20px 40px rgba(0,0,0,0.1) !important;
99
  }
100
 
101
  .chatbot {
102
- border-radius: 15px !important;
103
- box-shadow: 0 8px 20px rgba(0, 0, 0, 0.1) !important;
104
- border: 2px solid #e1e5e9 !important;
 
 
 
 
 
 
 
 
 
105
  }
106
 
107
  .audio-input {
108
- border-radius: 50px !important;
109
- background: linear-gradient(135deg, #ff6b6b, #ffd93d) !important;
 
 
 
 
 
 
110
  border: none !important;
 
111
  color: white !important;
112
- font-weight: bold !important;
113
- box-shadow: 0 4px 15px rgba(255, 107, 107, 0.3) !important;
 
 
114
  transition: all 0.3s ease !important;
 
 
115
  }
116
 
117
- .audio-input:hover {
118
- transform: translateY(-2px) !important;
119
- box-shadow: 0 8px 25px rgba(255, 107, 107, 0.4) !important;
 
 
 
 
 
 
 
 
120
  }
121
 
122
  .text-input {
123
  border-radius: 25px !important;
124
- border: 2px solid #e1e5e9 !important;
125
  background: rgba(255, 255, 255, 0.9) !important;
 
 
126
  }
127
 
128
- .text-input:focus-within {
129
- border-color: #667eea !important;
130
  box-shadow: 0 0 0 3px rgba(102, 126, 234, 0.1) !important;
131
  }
132
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
133
  h1 {
134
  text-align: center !important;
135
- background: linear-gradient(135deg, #667eea, #764ba2) !important;
136
  -webkit-background-clip: text !important;
137
  -webkit-text-fill-color: transparent !important;
138
- font-weight: 700 !important;
139
- font-size: 2.5rem !important;
140
- margin-bottom: 1rem !important;
 
141
  }
142
 
143
- .description {
144
  text-align: center !important;
145
- color: #666 !important;
 
146
  margin-bottom: 2rem !important;
 
 
 
 
 
 
 
 
 
 
 
 
147
  }
148
 
149
  .examples {
150
- margin-top: 1rem !important;
151
  }
152
 
153
  .example {
154
- border-radius: 20px !important;
155
- background: linear-gradient(135deg, #667eea 0%, #764ba2 100%) !important;
156
  color: white !important;
157
  border: none !important;
158
  transition: all 0.3s ease !important;
159
  font-weight: 500 !important;
 
 
160
  }
161
 
162
  .example:hover {
163
- transform: translateY(-2px) !important;
164
- box-shadow: 0 8px 25px rgba(102, 126, 234, 0.3) !important;
165
- }
166
-
167
- .audio-controls {
168
- display: flex !important;
169
- justify-content: center !important;
170
- gap: 1rem !important;
171
- margin: 1rem 0 !important;
172
  }
173
 
174
- .status-indicator {
175
- padding: 0.5rem 1rem !important;
 
 
 
176
  border-radius: 20px !important;
177
- background: #e8f4f8 !important;
178
- color: #1976d2 !important;
179
- font-weight: 500 !important;
180
- text-align: center !important;
181
  }
182
  """
183
 
184
- # Create the Quasar LLM interface with audio capabilities
185
  with gr.Blocks(
186
  theme=gr.themes.Soft(
187
  primary_hue="blue",
188
- secondary_hue="slate",
189
  neutral_hue="slate",
190
  font=gr.themes.GoogleFont("Inter")
191
  ),
@@ -193,137 +227,144 @@ with gr.Blocks(
193
  title="Quasar LLM Audio Chat"
194
  ) as app:
195
 
 
196
  gr.HTML("""
197
  <div style="text-align: center; margin-bottom: 2rem;">
198
- <h1>✨ Quasar LLM Audio Chat</h1>
199
- <p class="description">
200
- 🎤 Real-time voice-powered web search with intelligent responses<br>
201
- Speak your questions or type them - get instant answers with audio feedback!
 
 
202
  </p>
203
  </div>
204
  """)
205
 
206
- with gr.Row():
207
- with gr.Column(scale=1):
208
- # Chat interface
209
- chatbot = gr.Chatbot(
210
- value=[],
211
- height=400,
212
- show_label=False,
213
- container=True,
214
- bubble_full_width=False,
215
- render_markdown=True,
216
- type="messages"
217
- )
218
-
219
- # Audio input section
220
- with gr.Row():
221
- with gr.Column(scale=3):
222
- audio_input = gr.Audio(
223
- sources=["microphone"],
224
- type="numpy",
225
- label="🎤 Voice Input",
226
- show_label=True,
227
- container=True,
228
- elem_classes=["audio-input"]
229
- )
230
-
231
- with gr.Column(scale=1):
232
- audio_submit = gr.Button(
233
- "🎤 Send Voice",
234
- variant="primary",
235
- size="lg",
236
- elem_classes=["audio-input"]
237
- )
238
 
239
- # Text input as backup
240
- with gr.Row():
 
 
 
 
 
 
 
 
 
 
 
 
241
  text_input = gr.Textbox(
242
- placeholder="💬 Or type your message here...",
243
  container=False,
244
- scale=7,
245
  show_label=False,
246
  lines=1,
247
  max_lines=3,
248
  elem_classes=["text-input"]
249
  )
250
- text_submit = gr.Button("Send", variant="secondary")
251
 
252
- # Status indicator
253
- status = gr.Textbox(
254
- value="Ready for voice or text input",
255
- label="Status",
256
- interactive=False,
257
- elem_classes=["status-indicator"]
258
- )
 
 
 
 
 
 
 
 
259
 
260
- # Example questions
261
  gr.Examples(
262
  examples=quasar_examples,
263
  inputs=[text_input],
264
- label="✨ Try these questions:",
265
  elem_classes=["examples"]
266
  )
267
 
268
  # Audio processing function
269
- def process_audio(audio, history):
270
- if audio is None:
271
- return history, "Please record some audio first"
272
 
273
- try:
274
- # Convert numpy array to audio format for speech recognition
275
- sample_rate, audio_data = audio
276
-
277
- # Convert to WAV format
278
- wav_buffer = io.BytesIO()
279
- with wave.open(wav_buffer, 'wb') as wav_file:
280
- wav_file.setnchannels(1)
281
- wav_file.setsampwidth(2)
282
- wav_file.setframerate(sample_rate)
283
- wav_file.writeframes((audio_data * 32767).astype(np.int16).tobytes())
284
-
285
- wav_buffer.seek(0)
286
- return audio_chat.process_audio_message(wav_buffer.read(), history)
287
- except Exception as e:
288
- error_msg = f"Audio processing error: {str(e)}"
289
- history.append({"role": "assistant", "content": error_msg})
290
- return history, error_msg
291
 
292
  # Text processing function
293
- def process_text(text, history):
294
  if not text.strip():
295
- return history, "", "Please enter a message"
296
 
 
297
  history.append({"role": "user", "content": text})
298
 
299
  try:
 
300
  response = webSearch(text)
301
  history.append({"role": "assistant", "content": response})
302
 
303
- # Convert response to speech
304
- threading.Thread(target=audio_chat.text_to_speech, args=(response,), daemon=True).start()
305
 
306
- return history, "", f"Processed: {text[:50]}..."
307
  except Exception as e:
308
- error_msg = f"Search error: {str(e)}"
309
  history.append({"role": "assistant", "content": error_msg})
310
- return history, "", error_msg
311
 
312
  # Event handlers
313
  audio_submit.click(
314
- process_audio,
315
  inputs=[audio_input, chatbot],
316
  outputs=[chatbot, status]
317
  )
318
 
319
  text_submit.click(
320
- process_text,
321
  inputs=[text_input, chatbot],
322
  outputs=[chatbot, text_input, status]
323
  )
324
 
325
  text_input.submit(
326
- process_text,
327
  inputs=[text_input, chatbot],
328
  outputs=[chatbot, text_input, status]
329
  )
 
1
  import gradio as gr
2
  from geminisearch import webSearch
 
 
3
  import threading
4
+ import time
 
 
5
 
6
  class QuasarAudioChat:
7
  def __init__(self):
8
+ self.is_processing = False
 
 
 
 
9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
10
  def process_audio_message(self, audio, chat_history):
11
+ """Process audio input using Gradio's built-in speech recognition"""
12
  if audio is None:
13
+ return chat_history, "Please provide audio input", "No audio received"
14
 
15
+ try:
16
+ # Gradio will handle the speech-to-text conversion automatically
17
+ # when we use the audio input with the speech recognition feature
18
+ return chat_history, "Processing audio...", "Audio received, processing..."
19
+ except Exception as e:
20
+ error_msg = f"Audio processing error: {str(e)}"
21
+ chat_history.append({"role": "assistant", "content": error_msg})
22
+ return chat_history, error_msg, "Error processing audio"
23
+
24
+ def process_text_with_audio_response(self, text, chat_history):
25
+ """Process text input and prepare for audio output"""
26
+ if not text.strip():
27
+ return chat_history, "", "Please enter a message"
28
 
29
  # Add user message to chat
30
+ chat_history.append({"role": "user", "content": text})
31
 
 
32
  try:
33
+ # Get response from webSearch
34
+ response = webSearch(text)
35
  chat_history.append({"role": "assistant", "content": response})
36
 
37
+ return chat_history, "", f"✅ Response ready: {text[:30]}..."
38
+
 
 
39
  except Exception as e:
40
  error_msg = f"Search error: {str(e)}"
41
  chat_history.append({"role": "assistant", "content": error_msg})
42
+ return chat_history, "", f"❌ Error: {str(e)[:30]}..."
43
 
44
  # Initialize audio chat
45
  audio_chat = QuasarAudioChat()
 
47
  # Predefined questions for Quasar LLM
48
  quasar_examples = [
49
  "What are the latest AI technology developments?",
50
+ "What's happening in global news today?",
51
  "What's the current cryptocurrency market status?"
52
  ]
53
 
54
+ # Enhanced CSS with modern audio chat styling
55
  custom_css = """
56
+ :root {
57
+ --quasar-primary: #667eea;
58
+ --quasar-secondary: #764ba2;
59
+ --quasar-accent: #ff6b6b;
60
+ --quasar-gold: #ffd93d;
61
+ }
62
+
63
  .gradio-container {
64
+ max-width: 1400px !important;
65
  margin: auto !important;
66
+ background: linear-gradient(135deg, var(--quasar-primary) 0%, var(--quasar-secondary) 100%) !important;
67
  min-height: 100vh !important;
68
+ padding: 1rem !important;
69
  }
70
 
71
  .main-container {
72
  background: rgba(255, 255, 255, 0.95) !important;
73
+ border-radius: 25px !important;
74
  padding: 2rem !important;
75
+ box-shadow: 0 25px 50px rgba(0,0,0,0.15) !important;
76
+ backdrop-filter: blur(10px) !important;
77
  }
78
 
79
  .chatbot {
80
+ border-radius: 20px !important;
81
+ box-shadow: 0 10px 30px rgba(0, 0, 0, 0.1) !important;
82
+ border: 2px solid rgba(102, 126, 234, 0.2) !important;
83
+ background: rgba(255, 255, 255, 0.9) !important;
84
+ }
85
+
86
+ .audio-section {
87
+ background: linear-gradient(135deg, rgba(255, 107, 107, 0.1), rgba(255, 217, 61, 0.1)) !important;
88
+ border-radius: 20px !important;
89
+ padding: 1.5rem !important;
90
+ margin: 1rem 0 !important;
91
+ border: 2px solid rgba(255, 107, 107, 0.2) !important;
92
  }
93
 
94
  .audio-input {
95
+ border-radius: 15px !important;
96
+ background: rgba(255, 255, 255, 0.9) !important;
97
+ border: 2px solid var(--quasar-accent) !important;
98
+ box-shadow: 0 5px 15px rgba(255, 107, 107, 0.2) !important;
99
+ }
100
+
101
+ .voice-button {
102
+ background: linear-gradient(135deg, var(--quasar-accent), var(--quasar-gold)) !important;
103
  border: none !important;
104
+ border-radius: 50px !important;
105
  color: white !important;
106
+ font-weight: 600 !important;
107
+ font-size: 1.1rem !important;
108
+ padding: 0.8rem 2rem !important;
109
+ box-shadow: 0 8px 20px rgba(255, 107, 107, 0.3) !important;
110
  transition: all 0.3s ease !important;
111
+ text-transform: uppercase !important;
112
+ letter-spacing: 1px !important;
113
  }
114
 
115
+ .voice-button:hover {
116
+ transform: translateY(-3px) !important;
117
+ box-shadow: 0 12px 30px rgba(255, 107, 107, 0.4) !important;
118
+ }
119
+
120
+ .text-section {
121
+ background: linear-gradient(135deg, rgba(102, 126, 234, 0.1), rgba(118, 75, 162, 0.1)) !important;
122
+ border-radius: 20px !important;
123
+ padding: 1.5rem !important;
124
+ margin: 1rem 0 !important;
125
+ border: 2px solid rgba(102, 126, 234, 0.2) !important;
126
  }
127
 
128
  .text-input {
129
  border-radius: 25px !important;
130
+ border: 2px solid var(--quasar-primary) !important;
131
  background: rgba(255, 255, 255, 0.9) !important;
132
+ font-size: 1rem !important;
133
+ padding: 0.8rem 1.5rem !important;
134
  }
135
 
136
+ .text-input:focus {
137
+ border-color: var(--quasar-secondary) !important;
138
  box-shadow: 0 0 0 3px rgba(102, 126, 234, 0.1) !important;
139
  }
140
 
141
+ .send-button {
142
+ background: linear-gradient(135deg, var(--quasar-primary), var(--quasar-secondary)) !important;
143
+ border: none !important;
144
+ border-radius: 25px !important;
145
+ color: white !important;
146
+ font-weight: 600 !important;
147
+ padding: 0.8rem 1.5rem !important;
148
+ transition: all 0.3s ease !important;
149
+ }
150
+
151
+ .send-button:hover {
152
+ transform: translateY(-2px) !important;
153
+ box-shadow: 0 8px 20px rgba(102, 126, 234, 0.3) !important;
154
+ }
155
+
156
  h1 {
157
  text-align: center !important;
158
+ background: linear-gradient(135deg, var(--quasar-primary), var(--quasar-secondary)) !important;
159
  -webkit-background-clip: text !important;
160
  -webkit-text-fill-color: transparent !important;
161
+ font-weight: 800 !important;
162
+ font-size: 3rem !important;
163
+ margin-bottom: 0.5rem !important;
164
+ text-shadow: 2px 2px 4px rgba(0,0,0,0.1) !important;
165
  }
166
 
167
+ .subtitle {
168
  text-align: center !important;
169
+ color: #555 !important;
170
+ font-size: 1.2rem !important;
171
  margin-bottom: 2rem !important;
172
+ font-weight: 400 !important;
173
+ }
174
+
175
+ .status-box {
176
+ background: linear-gradient(135deg, #e8f4f8, #f0f8ff) !important;
177
+ border: 2px solid #1976d2 !important;
178
+ border-radius: 15px !important;
179
+ padding: 1rem !important;
180
+ color: #1976d2 !important;
181
+ font-weight: 500 !important;
182
+ text-align: center !important;
183
+ font-size: 0.95rem !important;
184
  }
185
 
186
  .examples {
187
+ margin-top: 2rem !important;
188
  }
189
 
190
  .example {
191
+ border-radius: 25px !important;
192
+ background: linear-gradient(135deg, var(--quasar-primary) 0%, var(--quasar-secondary) 100%) !important;
193
  color: white !important;
194
  border: none !important;
195
  transition: all 0.3s ease !important;
196
  font-weight: 500 !important;
197
+ padding: 0.8rem 1.5rem !important;
198
+ font-size: 0.95rem !important;
199
  }
200
 
201
  .example:hover {
202
+ transform: translateY(-3px) !important;
203
+ box-shadow: 0 10px 25px rgba(102, 126, 234, 0.3) !important;
 
 
 
 
 
 
 
204
  }
205
 
206
+ .feature-badge {
207
+ display: inline-block !important;
208
+ background: var(--quasar-accent) !important;
209
+ color: white !important;
210
+ padding: 0.3rem 0.8rem !important;
211
  border-radius: 20px !important;
212
+ font-size: 0.8rem !important;
213
+ font-weight: 600 !important;
214
+ margin: 0 0.3rem !important;
 
215
  }
216
  """
217
 
218
+ # Create the Quasar LLM interface
219
  with gr.Blocks(
220
  theme=gr.themes.Soft(
221
  primary_hue="blue",
222
+ secondary_hue="slate",
223
  neutral_hue="slate",
224
  font=gr.themes.GoogleFont("Inter")
225
  ),
 
227
  title="Quasar LLM Audio Chat"
228
  ) as app:
229
 
230
+ # Header
231
  gr.HTML("""
232
  <div style="text-align: center; margin-bottom: 2rem;">
233
+ <h1>✨ Quasar LLM</h1>
234
+ <p class="subtitle">
235
+ 🎤 <span class="feature-badge">VOICE</span>
236
+ 💬 <span class="feature-badge">TEXT</span>
237
+ 🌐 <span class="feature-badge">WEB SEARCH</span><br>
238
+ Real-time AI-powered conversations with web intelligence
239
  </p>
240
  </div>
241
  """)
242
 
243
+ # Main chat interface
244
+ chatbot = gr.Chatbot(
245
+ value=[],
246
+ height=450,
247
+ show_label=False,
248
+ container=True,
249
+ bubble_full_width=False,
250
+ render_markdown=True,
251
+ type="messages",
252
+ elem_classes=["chatbot"]
253
+ )
254
+
255
+ # Audio Input Section
256
+ with gr.Group(elem_classes=["audio-section"]):
257
+ gr.HTML("<h3 style='text-align: center; color: #ff6b6b; margin-bottom: 1rem;'>🎤 Voice Input</h3>")
258
+
259
+ with gr.Row():
260
+ with gr.Column(scale=4):
261
+ audio_input = gr.Audio(
262
+ sources=["microphone"],
263
+ type="filepath",
264
+ label="Record your question",
265
+ show_label=False,
266
+ container=False,
267
+ elem_classes=["audio-input"]
268
+ )
 
 
 
 
 
 
269
 
270
+ with gr.Column(scale=1, min_width=150):
271
+ audio_submit = gr.Button(
272
+ "🎤 Ask Voice",
273
+ variant="primary",
274
+ size="lg",
275
+ elem_classes=["voice-button"]
276
+ )
277
+
278
+ # Text Input Section
279
+ with gr.Group(elem_classes=["text-section"]):
280
+ gr.HTML("<h3 style='text-align: center; color: #667eea; margin-bottom: 1rem;'>💬 Text Input</h3>")
281
+
282
+ with gr.Row():
283
+ with gr.Column(scale=5):
284
  text_input = gr.Textbox(
285
+ placeholder="Type your question here or use voice input above...",
286
  container=False,
 
287
  show_label=False,
288
  lines=1,
289
  max_lines=3,
290
  elem_classes=["text-input"]
291
  )
 
292
 
293
+ with gr.Column(scale=1, min_width=100):
294
+ text_submit = gr.Button(
295
+ "Send",
296
+ variant="secondary",
297
+ elem_classes=["send-button"]
298
+ )
299
+
300
+ # Status Display
301
+ status = gr.Textbox(
302
+ value="🚀 Ready for your questions - Use voice or text input!",
303
+ label="Status",
304
+ interactive=False,
305
+ elem_classes=["status-box"],
306
+ show_label=False
307
+ )
308
 
309
+ # Example Questions
310
  gr.Examples(
311
  examples=quasar_examples,
312
  inputs=[text_input],
313
+ label="✨ Try these example questions:",
314
  elem_classes=["examples"]
315
  )
316
 
317
  # Audio processing function
318
+ def handle_audio_input(audio_file, history):
319
+ if audio_file is None:
320
+ return history, "Please record audio first 🎤"
321
 
322
+ # For now, we'll add a placeholder message since we don't have speech-to-text
323
+ # In a real deployment, you'd integrate with a speech-to-text service
324
+ user_message = "🎤 [Voice message received - Speech-to-text would process this]"
325
+ history.append({"role": "user", "content": user_message})
326
+
327
+ # Add a helpful response
328
+ response = "I received your voice message! However, speech-to-text processing requires additional services. Please use the text input below for now, or integrate with a cloud speech service like Google Speech-to-Text or OpenAI Whisper."
329
+ history.append({"role": "assistant", "content": response})
330
+
331
+ return history, "Voice message received (text processing recommended)"
 
 
 
 
 
 
 
 
332
 
333
  # Text processing function
334
+ def handle_text_input(text, history):
335
  if not text.strip():
336
+ return history, "", "Please enter a message 📝"
337
 
338
+ # Add user message
339
  history.append({"role": "user", "content": text})
340
 
341
  try:
342
+ # Get response from webSearch
343
  response = webSearch(text)
344
  history.append({"role": "assistant", "content": response})
345
 
346
+ return history, "", f"✅ Question answered: {text[:40]}..."
 
347
 
 
348
  except Exception as e:
349
+ error_msg = f"🔍 Search temporarily unavailable: {str(e)}"
350
  history.append({"role": "assistant", "content": error_msg})
351
+ return history, "", f"❌ Error occurred"
352
 
353
  # Event handlers
354
  audio_submit.click(
355
+ handle_audio_input,
356
  inputs=[audio_input, chatbot],
357
  outputs=[chatbot, status]
358
  )
359
 
360
  text_submit.click(
361
+ handle_text_input,
362
  inputs=[text_input, chatbot],
363
  outputs=[chatbot, text_input, status]
364
  )
365
 
366
  text_input.submit(
367
+ handle_text_input,
368
  inputs=[text_input, chatbot],
369
  outputs=[chatbot, text_input, status]
370
  )