|
import gradio as gr |
|
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM |
|
import nltk |
|
nltk.download('punkt') |
|
|
|
def generate_subject(model_name,email_body): |
|
tokenizer = AutoTokenizer.from_pretrained(model_name) |
|
model = AutoModelForSeq2SeqLM.from_pretrained(model_name) |
|
inputs = ["provide email subject: " + email_body] |
|
inputs = tokenizer(inputs, max_length=512, truncation=True, return_tensors="pt") |
|
output = model.generate(**inputs, num_beams=8, do_sample=True, min_length=1, max_length=10) |
|
decoded_output = tokenizer.batch_decode(output, skip_special_tokens=True)[0] |
|
predicted_title = nltk.sent_tokenize(decoded_output.strip())[0] |
|
return predicted_title |
|
|
|
|
|
iface = gr.Interface( |
|
fn=generate_subject, |
|
inputs=[ |
|
gr.Dropdown(choices=["anukvma/t5-base-medium-email-subject-generation-v2", "anukvma/bart-base-medium-email-subject-generation-v5"], label="Select Model"), |
|
gr.Textbox(lines=5, label="Email Body") |
|
], |
|
outputs=gr.Textbox(label="Model Summary") |
|
) |
|
|
|
iface.launch() |