File size: 1,733 Bytes
dd72aae b0a8d32 dd72aae |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 |
import gradio as gr
from transformers import pipeline, AutoModelForSequenceClassification, AutoTokenizer
# Load Whisper for ASR
asr_pipeline = pipeline("automatic-speech-recognition", model="openai/whisper-large-v3")
# Load Grammar Scoring Model (CoLA)
cola_model = AutoModelForSequenceClassification.from_pretrained("textattack/roberta-base-CoLA")
cola_tokenizer = AutoTokenizer.from_pretrained("textattack/roberta-base-CoLA")
grammar_pipeline = pipeline("text-classification", model=cola_model, tokenizer=cola_tokenizer)
# Load Grammar Correction Model (T5)
correction_pipeline = pipeline("text2text-generation", model="vennify/t5-base-grammar-correction")
def process_audio(audio):
if audio is None:
return "No audio provided.", "", ""
# Step 1: Transcription
transcription = asr_pipeline(audio)["text"]
# Step 2: Grammar Scoring
score_output = grammar_pipeline(transcription)[0]
label = score_output["label"]
confidence = score_output["score"]
# Step 3: Grammar Correction
corrected = correction_pipeline(transcription, max_length=128)[0]["generated_text"]
return transcription, f"{label} ({confidence:.2f})", corrected
demo = gr.Interface(
fn=process_audio,
inputs=gr.Audio(sources=["microphone", "upload"], type="filepath", label="🎤 Speak or Upload Audio (.wav)"),
outputs=[
gr.Textbox(label="📝 Transcription"),
gr.Textbox(label="✅ Grammar Score"),
gr.Textbox(label="✍️ Grammar Correction")
],
title="🎙️ Voice Grammar Scorer",
description="Record or upload a WAV file. This app transcribes your voice, scores its grammar, and suggests corrections.",
)
if __name__ == "__main__":
demo.launch()
|