|
import gradio as gr |
|
import torch |
|
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM |
|
|
|
|
|
model_name = "microsoft/deberta-v3-base" |
|
|
|
|
|
tokenizer = AutoTokenizer.from_pretrained(model_name, use_fast=False) |
|
model = AutoModelForSeq2SeqLM.from_pretrained(model_name) |
|
|
|
|
|
def correct_grammar(text): |
|
|
|
inputs = tokenizer.encode(text, return_tensors="pt") |
|
|
|
|
|
with torch.no_grad(): |
|
outputs = model.generate(inputs, max_length=512, num_beams=5, early_stopping=True) |
|
|
|
|
|
corrected_text = tokenizer.decode(outputs[0], skip_special_tokens=True) |
|
return corrected_text |
|
|
|
|
|
interface = gr.Interface( |
|
fn=correct_grammar, |
|
inputs="text", |
|
outputs="text", |
|
title="Grammar Correction", |
|
description="Enter a sentence or paragraph to receive grammar corrections using DeBERTa." |
|
) |
|
|
|
if __name__ == "__main__": |
|
interface.launch() |
|
|