Whisper-Demo / app.py
DarwinAnim8or's picture
Update app.py
47e7707
import gradio as gr
from transformers import pipeline
# Load the model and processor
model_id = "openai/whisper-small"
device = "cpu"
BATCH_SIZE = 8
pipe = pipeline(
task="automatic-speech-recognition",
model=model_id,
chunk_length_s=30,
device=device,
)
def transcribe(inputs, task):
if inputs is None:
raise gr.Error("No audio file submitted! Please upload or record an audio file before submitting your request.")
text = pipe(inputs, batch_size=BATCH_SIZE, generate_kwargs={"task": task}, return_timestamps=True)["text"]
return text
def transcribelocal(microphone, file_upload):
# Check which input is not None
if microphone is not None:
audio = microphone
else:
audio = file_upload
return transcribe(audio, "transcribe")
# Create a Gradio interface with two modes: realtime and file upload
iface = gr.Interface(
fn=transcribelocal,
inputs=[
gr.inputs.Audio(source="microphone", type="filepath", label="Realtime Mode"),
gr.inputs.Audio(source="upload", type="filepath", label="File Upload Mode")
],
outputs=[
gr.outputs.Textbox(label="Transcription")
],
title="Whisper Transcription App",
description="A Gradio app that uses OpenAI's whisper model to transcribe audio"
)
# Launch the app
iface.launch()