Spaces:
Running
Running
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM, GenerationConfig | |
import gradio as gr | |
# Load model | |
tokenizer = AutoTokenizer.from_pretrained('AyushSoni14/text-summarizer-model') | |
model = AutoModelForSeq2SeqLM.from_pretrained('AyushSoni14/text-summarizer-model') | |
tokenizer.model_max_length = 1024 | |
# Config | |
gen_config = GenerationConfig( | |
max_length=150, | |
min_length=40, | |
length_penalty=2.0, | |
num_beams=4, | |
early_stopping=True | |
) | |
# Summarization function | |
def summarize(blog_post): | |
input = tokenizer(blog_post, max_length=1024, truncation=True, return_tensors='pt') | |
summary_ids = model.generate(input['input_ids'], generation_config=gen_config) | |
summary = tokenizer.decode(summary_ids[0], skip_special_tokens=True) | |
return summary | |
# Gradio UI | |
iface = gr.Interface( | |
fn=summarize, | |
inputs=gr.Textbox(lines=15, label="Enter Text to Summarize"), | |
outputs=gr.Textbox(label="Summary"), | |
title="Text Summarizer", | |
description="Enter a long paragraph or blog post to get a summarized version." | |
) | |
iface.launch(share=True) | |