# Imports import gradio as gr import torch from transformers import AutoTokenizer, AutoModelForSeq2SeqLM # Load the tokenizer and model tokenizer = AutoTokenizer.from_pretrained("prithivida/grammar_error_correcter_v1") model = AutoModelForSeq2SeqLM.from_pretrained("prithivida/grammar_error_correcter_v1") # Use GPU if available device = "cuda" if torch.cuda.is_available() else "cpu" model.to(device) # Grammar correction function def correct_grammar(text): # Tokenize input text inputs = tokenizer([text], return_tensors="pt", padding=True, truncation=True).to(device) # Generate corrected text outputs = model.generate(**inputs, max_length=512, num_beams=5, early_stopping=True) # Decode the output and return the corrected text corrected_text = tokenizer.decode(outputs[0], skip_special_tokens=True) return corrected_text # Gradio interface function def correct_grammar_interface(text): corrected_text = correct_grammar(text) return corrected_text # Gradio app interface with gr.Blocks() as grammar_app: gr.Markdown("

Grammar Correction App

") with gr.Row(): input_box = gr.Textbox(label="Input Text", placeholder="Enter text to be corrected", lines=4) output_box = gr.Textbox(label="Corrected Text", placeholder="Corrected text will appear here", lines=4) submit_button = gr.Button("Correct Grammar") # Bind the button click to the grammar correction function submit_button.click(fn=correct_grammar_interface, inputs=input_box, outputs=output_box) # Launch the app if __name__ == "__main__": grammar_app.launch()