|
|
|
import gradio as gr |
|
import torch |
|
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM |
|
|
|
|
|
tokenizer = AutoTokenizer.from_pretrained("prithivida/grammar_error_correcter_v1") |
|
model = AutoModelForSeq2SeqLM.from_pretrained("prithivida/grammar_error_correcter_v1") |
|
|
|
|
|
device = "cuda" if torch.cuda.is_available() else "cpu" |
|
model.to(device) |
|
|
|
|
|
def correct_grammar(text): |
|
|
|
inputs = tokenizer([text], return_tensors="pt", padding=True, truncation=True, max_length=1024).to(device) |
|
|
|
|
|
outputs = model.generate(**inputs, max_length=1024, num_beams=5, early_stopping=True) |
|
|
|
|
|
corrected_text = tokenizer.decode(outputs[0], skip_special_tokens=True) |
|
return corrected_text |
|
|
|
|
|
def correct_grammar_interface(text): |
|
corrected_text = correct_grammar(text) |
|
return corrected_text |
|
|
|
|
|
with gr.Blocks() as grammar_app: |
|
gr.Markdown("<h1>Grammar Correction App (up to 300 words)</h1>") |
|
|
|
with gr.Row(): |
|
input_box = gr.Textbox(label="Input Text", placeholder="Enter text (up to 300 words)", lines=10) |
|
output_box = gr.Textbox(label="Corrected Text", placeholder="Corrected text will appear here", lines=10) |
|
|
|
submit_button = gr.Button("Correct Grammar") |
|
|
|
|
|
submit_button.click(fn=correct_grammar_interface, inputs=input_box, outputs=output_box) |
|
|
|
|
|
if __name__ == "__main__": |
|
grammar_app.launch() |
|
|