aiqcamp commited on
Commit
d4ddaca
·
verified ·
1 Parent(s): 8fc65b7

Delete app-backup.py

Browse files
Files changed (1) hide show
  1. app-backup.py +0 -48
app-backup.py DELETED
@@ -1,48 +0,0 @@
1
-
2
- import gradio as gr
3
- from transformers import pipeline, AutoModelForSequenceClassification, AutoTokenizer
4
-
5
- # Load Whisper for ASR
6
- asr_pipeline = pipeline("automatic-speech-recognition", model="openai/whisper-large-v3")
7
-
8
- # Load Grammar Scoring Model (CoLA)
9
- cola_model = AutoModelForSequenceClassification.from_pretrained("textattack/roberta-base-CoLA")
10
- cola_tokenizer = AutoTokenizer.from_pretrained("textattack/roberta-base-CoLA")
11
- grammar_pipeline = pipeline("text-classification", model=cola_model, tokenizer=cola_tokenizer)
12
-
13
- # Load Grammar Correction Model (T5)
14
- correction_pipeline = pipeline("text2text-generation", model="vennify/t5-base-grammar-correction")
15
-
16
- def process_audio(audio):
17
- if audio is None:
18
- return "No audio provided.", "", ""
19
-
20
- # Step 1: Transcription
21
- transcription = asr_pipeline(audio)["text"]
22
-
23
- # Step 2: Grammar Scoring
24
- score_output = grammar_pipeline(transcription)[0]
25
- label = score_output["label"]
26
- confidence = score_output["score"]
27
-
28
- # Step 3: Grammar Correction
29
- corrected = correction_pipeline(transcription, max_length=128)[0]["generated_text"]
30
-
31
- return transcription, f"{label} ({confidence:.2f})", corrected
32
-
33
- demo = gr.Interface(
34
- fn=process_audio,
35
- inputs=gr.Audio(sources=["microphone", "upload"], type="filepath", label="🎤 Speak or Upload Audio (.wav)"),
36
- outputs=[
37
- gr.Textbox(label="📝 Transcription"),
38
- gr.Textbox(label="✅ Grammar Score"),
39
- gr.Textbox(label="✍️ Grammar Correction")
40
- ],
41
- title="🎙️ Voice Grammar Scorer",
42
- description="Record or upload a WAV file. This app transcribes your voice, scores its grammar, and suggests corrections.",
43
- )
44
-
45
- if __name__ == "__main__":
46
- demo.launch()
47
-
48
-