whisper_gradio / app.py
pktpaulie's picture
Update app
16fe4c6 verified
raw
history blame contribute delete
769 Bytes
from transformers import pipeline
import torch
import gradio as gr
device = "cuda:0" if torch.cuda.is_available() else "cpu"
pipe = pipeline(
"automatic-speech-recognition",
model="openai/whisper-small.en",
chunk_length_s=30,
device=device,
)
# Function to transcribe audio
def transcribe(audio):
text = pipe(audio)["text"]
return text
# Create the interface
with gr.Blocks() as demo:
with gr.Row():
with gr.Column(scale=1):
audio_input = gr.Audio(sources=["microphone"], type="filepath", label="Record Audio")
submit_button = gr.Button("Transcribe")
text_output = gr.Textbox(label="Transcription")
submit_button.click(fn=transcribe, inputs=audio_input, outputs=text_output)
demo.launch(share=True)