Shreyas094 commited on
Commit
10d7fff
·
verified ·
1 Parent(s): 8df21a0

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +21 -10
app.py CHANGED
@@ -17,7 +17,10 @@ from huggingface_hub import InferenceClient
17
  import inspect
18
  import logging
19
  import shutil
 
 
20
 
 
21
 
22
  # Set up basic configuration for logging
23
  logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
@@ -614,15 +617,21 @@ Write a detailed and complete response that answers the following user question:
614
 
615
  logging.info("Finished generating response")
616
 
617
- def transcribe(audio_file):
618
  if audio_file is None:
619
- return ""
620
 
621
- with open(audio_file, "rb") as f:
622
- audio_data = f.read()
623
-
624
- response = whisper_api(audio_data)
625
- return response["text"]
 
 
 
 
 
 
626
 
627
  def vote(data: gr.LikeData):
628
  if data.liked:
@@ -684,6 +693,7 @@ with gr.Blocks() as demo:
684
  with gr.Column(scale=1):
685
  audio_input = gr.Audio(sources="microphone", type="filepath", label="Speak your query")
686
  transcribe_button = gr.Button("Transcribe")
 
687
 
688
  with gr.Column(scale=2):
689
  chatbot = gr.Chatbot(
@@ -718,11 +728,12 @@ with gr.Blocks() as demo:
718
  update_output = gr.Textbox(label="Update Status")
719
  delete_button = gr.Button("Delete Selected Documents")
720
 
721
- # Connect components
722
  transcribe_button.click(
723
- transcribe,
724
  inputs=[audio_input],
725
- outputs=[query_textbox]
 
726
  )
727
 
728
  submit_button.click(
 
17
  import inspect
18
  import logging
19
  import shutil
20
+ import asyncio
21
+ from concurrent.futures import ThreadPoolExecutor
22
 
23
+ executor = ThreadPoolExecutor()
24
 
25
  # Set up basic configuration for logging
26
  logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
 
617
 
618
  logging.info("Finished generating response")
619
 
620
+ async def transcribe_async(audio_file):
621
  if audio_file is None:
622
+ return "", "No audio file provided"
623
 
624
+ try:
625
+ def process_audio():
626
+ with open(audio_file, "rb") as f:
627
+ audio_data = f.read()
628
+ return whisper_api(audio_data)["text"]
629
+
630
+ loop = asyncio.get_event_loop()
631
+ response = await loop.run_in_executor(executor, process_audio)
632
+ return response, "Transcription completed successfully"
633
+ except Exception as e:
634
+ return "", f"Error during transcription: {str(e)}"
635
 
636
  def vote(data: gr.LikeData):
637
  if data.liked:
 
693
  with gr.Column(scale=1):
694
  audio_input = gr.Audio(sources="microphone", type="filepath", label="Speak your query")
695
  transcribe_button = gr.Button("Transcribe")
696
+ transcription_status = gr.Textbox(label="Transcription Status")
697
 
698
  with gr.Column(scale=2):
699
  chatbot = gr.Chatbot(
 
728
  update_output = gr.Textbox(label="Update Status")
729
  delete_button = gr.Button("Delete Selected Documents")
730
 
731
+ # Update the click event
732
  transcribe_button.click(
733
+ transcribe_async,
734
  inputs=[audio_input],
735
+ outputs=[query_textbox, transcription_status],
736
+ _js="startTranscription" # Add a JavaScript function to show a loading indicator
737
  )
738
 
739
  submit_button.click(