File size: 1,540 Bytes
9bd26c1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
import modal

app = modal.App("whisper-agentic")

# Define the Modal image environment
image = (
    modal.Image.debian_slim().apt_install("ffmpeg").pip_install("transformers", "torch", "gradio", "torchaudio", "accelerate", "optimum[diffusers]", "gradio_client")
)

@app.function(image=image, gpu="A10G", timeout=7200)
def run_gradio():
    from transformers import pipeline
    import gradio as gr

    # Load Whisper pipeline
    transcriber = pipeline(model="openai/whisper-large-v2", return_timestamps=True)

    # Agentic function
    def transcribe_and_analyze(audio):
        result = transcriber(audio)
        transcript = result["text"]

        word_count = len(transcript.split())
        char_count = len(transcript.replace(" ", ""))

        return {
            "Transcript": transcript,
            "Word Count": word_count,
            "Character Count": char_count
        }

    # Gradio UI
    with gr.Blocks(title="Whisper Agentic Transcriber") as demo:
        gr.Markdown("## πŸ€– Whisper + Agentic Analysis")
        audio = gr.Audio(sources=["microphone", "upload"], type="filepath", label="Record Audio")
        btn = gr.Button("Transcribe and Analyze")
        transcript = gr.Textbox(label="πŸ“ Transcript", lines=4)
        word_count = gr.Number(label="πŸ“ Word Count")
        char_count = gr.Number(label="πŸ”‘ Character Count")

        btn.click(fn=transcribe_and_analyze, inputs=audio, outputs=[transcript, word_count, char_count])

    demo.launch(server_name="0.0.0.0", server_port=7860, share=True)