|
import gradio as gr |
|
import torch |
|
from transformers import AutoTokenizer, AutoModelForCausalLM |
|
|
|
|
|
model_name = "openai-community/openai-gpt" |
|
tokenizer = AutoTokenizer.from_pretrained(model_name) |
|
model = AutoModelForCausalLM.from_pretrained(model_name) |
|
|
|
|
|
def correct_grammar(text): |
|
|
|
prompt = f"Correct the grammar of the following sentence:\n{text}\nCorrected: " |
|
|
|
|
|
inputs = tokenizer.encode(prompt, return_tensors="pt") |
|
outputs = model.generate(inputs, max_length=512, num_beams=5, early_stopping=True) |
|
|
|
|
|
corrected_text = tokenizer.decode(outputs[0], skip_special_tokens=True) |
|
|
|
|
|
corrected_text = corrected_text.replace(prompt, "").strip() |
|
return corrected_text |
|
|
|
|
|
interface = gr.Interface( |
|
fn=correct_grammar, |
|
inputs="text", |
|
outputs="text", |
|
title="Grammar Correction with GPT", |
|
description="Enter a sentence or paragraph to receive grammar corrections using the OpenAI GPT model." |
|
) |
|
|
|
if __name__ == "__main__": |
|
interface.launch() |
|
|