uasername commited on
Commit
e8e8c34
·
verified ·
1 Parent(s): ef02b3a

Update Gradio_UI.py

Browse files
Files changed (1) hide show
  1. Gradio_UI.py +48 -48
Gradio_UI.py CHANGED
@@ -295,59 +295,59 @@ class GradioUI:
295
  )
296
 
297
  def launch(self, **kwargs):
298
- import gradio as gr
299
 
300
- # NEW: Add a helper function to extract audio file path from stored messages
301
- def extract_audio(messages):
302
- if not messages:
 
 
 
 
 
 
303
  return None
304
- last_message = messages[-1]
305
- if isinstance(last_message.content, dict):
306
- mime = last_message.content.get("mime_type", "")
307
- if mime.startswith("audio"):
308
- return last_message.content.get("path")
309
- return None
310
-
311
- with gr.Blocks(fill_height=True) as demo:
312
- stored_messages = gr.State([])
313
- file_uploads_log = gr.State([])
314
- chatbot = gr.Chatbot(
315
- label="Agent",
316
- type="messages",
317
- avatar_images=(
318
- None,
319
- "https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/communication/Alfred.png",
320
- ),
321
- resizeable=True,
322
- scale=1,
323
- )
324
-
325
- # NEW: Add a dedicated audio player component below the chatbot.
326
- audio_player = gr.Audio(label="Audio Pronunciation", type="filepath")
327
 
328
- if self.file_upload_folder is not None:
329
- upload_file = gr.File(label="Upload a file")
330
- upload_status = gr.Textbox(label="Upload Status", interactive=False, visible=False)
331
- upload_file.change(
332
- self.upload_file,
333
- [upload_file, file_uploads_log],
334
- [upload_status, file_uploads_log],
 
 
 
 
 
335
  )
336
- text_input = gr.Textbox(lines=1, label="Chat Message")
337
- # Chain the functions: log user message -> interact with agent -> extract audio for player
338
- text_input.submit(
339
- self.log_user_message,
340
- [text_input, file_uploads_log],
341
- [stored_messages, text_input],
342
- ).then(
343
- self.interact_with_agent, [stored_messages, chatbot], [chatbot]
344
- ).then(
345
- fn=extract_audio, inputs=[stored_messages], outputs=[audio_player]
346
- )
347
 
348
- # Optionally, you can arrange the components as you like, for example:
349
- # gr.Column([chatbot, audio_player, text_input])
350
- demo.launch(debug=True, share=True, **kwargs)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
351
 
352
 
353
  __all__ = ["stream_to_gradio", "GradioUI"]
 
295
  )
296
 
297
  def launch(self, **kwargs):
298
+ import gradio as gr
299
 
300
+ # NEW: Add a helper function to extract audio file path from stored messages
301
+ def extract_audio(messages):
302
+ if not messages:
303
+ return None
304
+ last_message = messages[-1]
305
+ if isinstance(last_message.content, dict):
306
+ mime = last_message.content.get("mime_type", "")
307
+ if mime.startswith("audio"):
308
+ return last_message.content.get("path")
309
  return None
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
310
 
311
+ with gr.Blocks(fill_height=True) as demo:
312
+ stored_messages = gr.State([])
313
+ file_uploads_log = gr.State([])
314
+ chatbot = gr.Chatbot(
315
+ label="Agent",
316
+ type="messages",
317
+ avatar_images=(
318
+ None,
319
+ "https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/communication/Alfred.png",
320
+ ),
321
+ resizeable=True,
322
+ scale=1,
323
  )
 
 
 
 
 
 
 
 
 
 
 
324
 
325
+ # NEW: Add a dedicated audio player component below the chatbot.
326
+ audio_player = gr.Audio(label="Audio Pronunciation", type="filepath")
327
+
328
+ if self.file_upload_folder is not None:
329
+ upload_file = gr.File(label="Upload a file")
330
+ upload_status = gr.Textbox(label="Upload Status", interactive=False, visible=False)
331
+ upload_file.change(
332
+ self.upload_file,
333
+ [upload_file, file_uploads_log],
334
+ [upload_status, file_uploads_log],
335
+ )
336
+ text_input = gr.Textbox(lines=1, label="Chat Message")
337
+ # Chain the functions: log user message -> interact with agent -> extract audio for player
338
+ text_input.submit(
339
+ self.log_user_message,
340
+ [text_input, file_uploads_log],
341
+ [stored_messages, text_input],
342
+ ).then(
343
+ self.interact_with_agent, [stored_messages, chatbot], [chatbot]
344
+ ).then(
345
+ fn=extract_audio, inputs=[stored_messages], outputs=[audio_player]
346
+ )
347
+
348
+ # Optionally, you can arrange the components as you like, for example:
349
+ # gr.Column([chatbot, audio_player, text_input])
350
+ demo.launch(debug=True, share=True, **kwargs)
351
 
352
 
353
  __all__ = ["stream_to_gradio", "GradioUI"]