|
|
|
import gradio as gr |
|
from transformers import pipeline, AutoModelForSequenceClassification, AutoTokenizer |
|
|
|
|
|
asr_pipeline = pipeline("automatic-speech-recognition", model="openai/whisper-large-v3") |
|
|
|
|
|
cola_model = AutoModelForSequenceClassification.from_pretrained("textattack/roberta-base-CoLA") |
|
cola_tokenizer = AutoTokenizer.from_pretrained("textattack/roberta-base-CoLA") |
|
grammar_pipeline = pipeline("text-classification", model=cola_model, tokenizer=cola_tokenizer) |
|
|
|
|
|
correction_pipeline = pipeline("text2text-generation", model="vennify/t5-base-grammar-correction") |
|
|
|
def process_audio(audio): |
|
if audio is None: |
|
return "No audio provided.", "", "" |
|
|
|
|
|
transcription = asr_pipeline(audio)["text"] |
|
|
|
|
|
score_output = grammar_pipeline(transcription)[0] |
|
label = score_output["label"] |
|
confidence = score_output["score"] |
|
|
|
|
|
corrected = correction_pipeline(transcription, max_length=128)[0]["generated_text"] |
|
|
|
return transcription, f"{label} ({confidence:.2f})", corrected |
|
|
|
demo = gr.Interface( |
|
fn=process_audio, |
|
inputs=gr.Audio(sources=["microphone", "upload"], type="filepath", label="π€ Speak or Upload Audio (.wav)"), |
|
outputs=[ |
|
gr.Textbox(label="π Transcription"), |
|
gr.Textbox(label="β
Grammar Score"), |
|
gr.Textbox(label="βοΈ Grammar Correction") |
|
], |
|
title="ποΈ Voice Grammar Scorer", |
|
description="Record or upload a WAV file. This app transcribes your voice, scores its grammar, and suggests corrections.", |
|
) |
|
|
|
if __name__ == "__main__": |
|
demo.launch() |
|
|
|
|
|
|