|
import gradio as gr |
|
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM |
|
|
|
device = "cuda" |
|
|
|
tokenizer = AutoTokenizer.from_pretrained("NoaiGPT/777") |
|
model = AutoModelForSeq2SeqLM.from_pretrained("NoaiGPT/777").to(device) |
|
|
|
def generate_title(text): |
|
input_ids = tokenizer(f'paraphraser: {text}', return_tensors="pt", padding="longest", truncation=True, max_length=64).input_ids.to(device) |
|
outputs = model.generate( |
|
input_ids, |
|
num_beams=4, |
|
num_beam_groups=4, |
|
num_return_sequences=4, |
|
repetition_penalty=10.0, |
|
diversity_penalty=3.0, |
|
no_repeat_ngram_size=2, |
|
temperature=0.8, |
|
max_length=64 |
|
) |
|
return tokenizer.batch_decode(outputs, skip_special_tokens=True) |
|
|
|
def gradio_generate_title(text): |
|
titles = generate_title(text) |
|
return "\n\n".join(titles) |
|
|
|
iface = gr.Interface( |
|
fn=gradio_generate_title, |
|
inputs=gr.Textbox(lines=5, label="Input Text"), |
|
outputs=gr.Textbox(lines=10, label="Generated Titles"), |
|
title="Title Generator", |
|
description="Generate multiple paraphrased titles from input text using NoaiGPT/777 model." |
|
) |
|
|
|
iface.launch() |