Mark0047 commited on
Commit
7a94e97
·
verified ·
1 Parent(s): a1af285

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +15 -17
app.py CHANGED
@@ -1,23 +1,21 @@
1
- # import gradio as gr
2
-
3
- # gr.load("models/openai/whisper-large-v3-turbo").launch()
4
-
5
-
6
  import gradio as gr
7
 
8
- # Define a function to process the output and extract only the transcription text
9
  def process_transcription(audio_input):
10
- model = gr.Interface.load("models/openai/whisper-large-v3-turbo")
 
11
  result = model(audio_input)
12
 
13
- # Extract the transcription text directly
14
- transcription = result["text"]
15
- return transcription
 
 
 
 
 
 
 
16
 
17
- # Launch the interface
18
- gr.Interface(
19
- fn=process_transcription,
20
- inputs="audio",
21
- outputs="text",
22
- live=True,
23
- ).launch()
 
 
 
 
 
 
1
  import gradio as gr
2
 
3
+ # Define a processing function that directly returns the transcription
4
  def process_transcription(audio_input):
5
+ # Load the Whisper model and get the result
6
+ model = gr.load("models/openai/whisper-large-v3-turbo") # Use gr.load in the correct way
7
  result = model(audio_input)
8
 
9
+ # Return only the transcription text
10
+ return result["text"]
11
+
12
+ # Use a Gradio interface to launch the app
13
+ with gr.Blocks() as demo:
14
+ audio_input = gr.Audio(source="microphone", type="filepath", label="Input Audio")
15
+ transcription_output = gr.Textbox(label="Transcription")
16
+
17
+ # Add interaction
18
+ audio_input.change(process_transcription, inputs=audio_input, outputs=transcription_output)
19
 
20
+ # Launch the app
21
+ demo.launch()