Pijush2023 commited on
Commit
28374c4
·
verified ·
1 Parent(s): a51d4fa

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +33 -26
app.py CHANGED
@@ -325,7 +325,6 @@ def clear_transcription_state():
325
 
326
 
327
 
328
- # Create the Gradio Blocks interface
329
  with gr.Blocks(theme="rawrsor1/Everforest") as demo:
330
  chatbot = gr.Chatbot([], elem_id="RADAR", bubble_full_width=False)
331
  with gr.Row():
@@ -336,13 +335,13 @@ with gr.Blocks(theme="rawrsor1/Everforest") as demo:
336
  value="Normal Chatbot"
337
  )
338
  with gr.Row():
339
- question_input = gr.Textbox(label="Ask a Question", placeholder="Type your question here...")
340
- audio_input = gr.Audio(sources=["microphone"],streaming=True,type='numpy',every=0.1,label="Speak to Ask")
341
- submit_voice_btn = gr.Button("Submit Voice")
342
-
343
-
344
  with gr.Column():
345
- audio_output = gr.Audio(label="Audio", type="filepath",autoplay=True,interactive=False)
 
 
 
 
 
346
 
347
  with gr.Row():
348
  with gr.Column():
@@ -357,18 +356,9 @@ with gr.Blocks(theme="rawrsor1/Everforest") as demo:
357
  with gr.Row():
358
  with gr.Column():
359
  gr.Markdown("<h1 style='color: red;'>Example Prompts</h1>", elem_id="Example-Prompts")
360
- gr.Examples(examples=examples, fn=insert_prompt, inputs=question_input, outputs=question_input,api_name="api_insert_example")
361
-
362
- # Define interactions
363
- # Define interactions for clicking the button
364
- #get_response_btn.click(fn=add_message, inputs=[chatbot, question_input], outputs=[chatbot, question_input],api_name="api_add_message_on_button_click")\
365
- # .then(fn=chat_with_bot, inputs=[chatbot], outputs=chatbot,api_name="api_get response_on_button")
366
-
367
-
368
- # Define interaction for hitting the Enter key
369
- #question_input.submit(fn=add_message, inputs=[chatbot, question_input], outputs=[chatbot, question_input],api_name="api_add_message_on _enter")\
370
- #.then(fn=chat_with_bot, inputs=[chatbot], outputs=chatbot,api_name="api_get response_on_enter")
371
 
 
372
  get_response_btn.click(
373
  fn=handle_mode_selection,
374
  inputs=[mode_selection, chatbot, question_input],
@@ -392,16 +382,33 @@ with gr.Blocks(theme="rawrsor1/Everforest") as demo:
392
 
393
  # Speech-to-Text functionality
394
  state = gr.State()
395
- audio_input.stream(transcribe_function, inputs=[state, audio_input], outputs=[state, question_input],api_name="api_voice_to_text")
 
 
 
 
 
396
 
 
 
 
 
 
 
397
 
398
- generate_audio_btn.click(fn=generate_audio_from_last_response, inputs=chatbot, outputs=audio_output,api_name="api_generate_text_to_audio")
399
- clean_btn.click(fn=clear_fields, inputs=[], outputs=[chatbot, question_input, audio_output],api_name="api_clear_textbox")
400
-
401
-
402
- # Clear state interaction
403
- clear_state_btn.click(fn=clear_transcription_state, outputs=[question_input, state],api_name="api_clean_state_transcription")
404
 
 
 
 
 
 
 
405
 
406
  # Launch the Gradio interface
407
- demo.launch(show_error=True)
 
325
 
326
 
327
 
 
328
  with gr.Blocks(theme="rawrsor1/Everforest") as demo:
329
  chatbot = gr.Chatbot([], elem_id="RADAR", bubble_full_width=False)
330
  with gr.Row():
 
335
  value="Normal Chatbot"
336
  )
337
  with gr.Row():
 
 
 
 
 
338
  with gr.Column():
339
+ question_input = gr.Textbox(label="Ask a Question", placeholder="Type your question here...")
340
+ audio_input = gr.Audio(sources=["microphone"], streaming=True, type='numpy', every=0.1, label="Speak to Ask")
341
+ submit_voice_btn = gr.Button("Submit Voice")
342
+
343
+ with gr.Column():
344
+ audio_output = gr.Audio(label="Audio", type="filepath", autoplay=True, interactive=False)
345
 
346
  with gr.Row():
347
  with gr.Column():
 
356
  with gr.Row():
357
  with gr.Column():
358
  gr.Markdown("<h1 style='color: red;'>Example Prompts</h1>", elem_id="Example-Prompts")
359
+ gr.Examples(examples=examples, fn=insert_prompt, inputs=question_input, outputs=question_input, api_name="api_insert_example")
 
 
 
 
 
 
 
 
 
 
360
 
361
+ # Define interactions for the Get Response button
362
  get_response_btn.click(
363
  fn=handle_mode_selection,
364
  inputs=[mode_selection, chatbot, question_input],
 
382
 
383
  # Speech-to-Text functionality
384
  state = gr.State()
385
+ audio_input.stream(
386
+ transcribe_function,
387
+ inputs=[state, audio_input],
388
+ outputs=[state, question_input],
389
+ api_name="api_voice_to_text"
390
+ )
391
 
392
+ generate_audio_btn.click(
393
+ fn=generate_audio_from_last_response,
394
+ inputs=chatbot,
395
+ outputs=audio_output,
396
+ api_name="api_generate_text_to_audio"
397
+ )
398
 
399
+ clean_btn.click(
400
+ fn=clear_fields,
401
+ inputs=[],
402
+ outputs=[chatbot, question_input, audio_output],
403
+ api_name="api_clear_textbox"
404
+ )
405
 
406
+ # Clear state interaction
407
+ clear_state_btn.click(
408
+ fn=clear_transcription_state,
409
+ outputs=[question_input, state],
410
+ api_name="api_clean_state_transcription"
411
+ )
412
 
413
  # Launch the Gradio interface
414
+ demo.launch(show_error=True)