codefusser commited on
Commit
9bd26c1
Β·
1 Parent(s): 33009e4

transcriber word count app file

Browse files
Files changed (1) hide show
  1. app.py +43 -0
app.py ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import modal
2
+
3
+ app = modal.App("whisper-agentic")
4
+
5
+ # Define the Modal image environment
6
+ image = (
7
+ modal.Image.debian_slim().apt_install("ffmpeg").pip_install("transformers", "torch", "gradio", "torchaudio", "accelerate", "optimum[diffusers]", "gradio_client")
8
+ )
9
+
10
+ @app.function(image=image, gpu="A10G", timeout=7200)
11
+ def run_gradio():
12
+ from transformers import pipeline
13
+ import gradio as gr
14
+
15
+ # Load Whisper pipeline
16
+ transcriber = pipeline(model="openai/whisper-large-v2", return_timestamps=True)
17
+
18
+ # Agentic function
19
+ def transcribe_and_analyze(audio):
20
+ result = transcriber(audio)
21
+ transcript = result["text"]
22
+
23
+ word_count = len(transcript.split())
24
+ char_count = len(transcript.replace(" ", ""))
25
+
26
+ return {
27
+ "Transcript": transcript,
28
+ "Word Count": word_count,
29
+ "Character Count": char_count
30
+ }
31
+
32
+ # Gradio UI
33
+ with gr.Blocks(title="Whisper Agentic Transcriber") as demo:
34
+ gr.Markdown("## πŸ€– Whisper + Agentic Analysis")
35
+ audio = gr.Audio(sources=["microphone", "upload"], type="filepath", label="Record Audio")
36
+ btn = gr.Button("Transcribe and Analyze")
37
+ transcript = gr.Textbox(label="πŸ“ Transcript", lines=4)
38
+ word_count = gr.Number(label="πŸ“ Word Count")
39
+ char_count = gr.Number(label="πŸ”‘ Character Count")
40
+
41
+ btn.click(fn=transcribe_and_analyze, inputs=audio, outputs=[transcript, word_count, char_count])
42
+
43
+ demo.launch(server_name="0.0.0.0", server_port=7860, share=True)