hubsnippetai's picture
Update app.py
ddd257c verified
import modal
app = modal.App("whisper-agentic")
# Define the Modal image environment
image = (
modal.Image.debian_slim().apt_install("ffmpeg").pip_install("transformers", "torch", "gradio", "torchaudio", "accelerate", "optimum[diffusers]", "gradio_client")
)
@app.function(image=image, gpu="A10G", timeout=7200)
def run_gradio():
from transformers import pipeline
import gradio as gr
# Load Whisper pipeline
transcriber = pipeline(model="openai/whisper-large-v2", return_timestamps=True)
# Agentic function
def transcribe_and_analyze(audio):
result = transcriber(audio)
transcript = result["text"]
word_count = len(transcript.split())
char_count = len(transcript.replace(" ", ""))
return transcript, word_count, char_count
# Gradio UI
with gr.Blocks(title="Whisper Agentic Transcriber") as demo:
gr.Markdown("## πŸ€– Whisper + Agentic Analysis")
audio = gr.Audio(sources=["microphone", "upload"], type="filepath", label="Record Audio")
btn = gr.Button("Transcribe and Analyze")
transcript = gr.Textbox(label="πŸ“ Transcript", lines=4)
word_count = gr.Number(label="πŸ“ Word Count")
char_count = gr.Number(label="πŸ”‘ Character Count")
btn.click(fn=transcribe_and_analyze, inputs=audio, outputs=[transcript, word_count, char_count])
demo.launch(server_name="0.0.0.0", server_port=7860, share=True, mcp_server=True)