import gradio as gr
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
model = AutoModelForSeq2SeqLM.from_pretrained("ramsrigouthamg/t5-large-paraphraser-diverse-high-quality")
tokenizer = AutoTokenizer.from_pretrained("ramsrigouthamg/t5-large-paraphraser-diverse-high-quality")
import torch
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = model.to(device)# Diverse Beam search
def generate_text(inp):
  context = inp
  text = "paraphrase: "+context + " </s>"
  encoding = tokenizer.encode_plus(text,max_length =128, padding=True, return_tensors="pt")
  input_ids,attention_mask  = encoding["input_ids"].to(device), encoding["attention_mask"].to(device)
  model.eval()
  diverse_beam_outputs = model.generate(
    input_ids=input_ids,attention_mask=attention_mask,
    max_length=128,
    early_stopping=True,
    num_beams=5,
    num_beam_groups = 5,
    num_return_sequences=5,
    diversity_penalty = 0.70)
  sent = tokenizer.decode(diverse_beam_outputs[0], skip_special_tokens=True,clean_up_tokenization_spaces=True)
  return sent

title = "Paraphraser One"
description = "Paraphrase means to express meaning using different words. Write or paste text below, submit, and the machine will attempt to express your meaning using different words."

output_text = gr.outputs.Textbox()
gr.Interface(generate_text,"textbox", output_text, title=title, description=description).launch(inline=False)