Text_Summarize / app.py
anky2002's picture
update
c0e6e9e
import gradio as gr
from transformers import pipeline
import torch
# Check if CUDA is available and set device
device = "cuda" if torch.cuda.is_available() else "cpu"
# Initialize the model with device setting
model = pipeline("summarization",
model="luisotorres/bart-finetuned-samsum",
device=device)
def summarize_text(text):
try:
# Dynamically set max_length based on input length
input_length = len(text.split())
max_length = min(130, max(30, input_length // 2))
summary = model(text,
max_length=max_length,
min_length=30,
do_sample=False) # Deterministic generation
return summary[0]["summary_text"]
except Exception as e:
return f"Error: {str(e)}"
# Create Gradio interface
iface = gr.Interface(
fn=summarize_text,
inputs=gr.Textbox(
label="Input Text",
lines=5,
placeholder="Enter the text you want to summarize..."
),
outputs=gr.Textbox(label="Summary"),
title="Text Summarization",
description="Enter your text to generate a concise summary. The summary length will automatically adjust based on your input length.",
examples=[
["Sarah: Do you think it's a good idea to invest in Bitcoin?\nEmily: I'm skeptical. The market is very volatile, and you could lose money.\nSarah: True. But there's also a high upside, right?"],
["John: Hey, can you help me with the project?\nMary: Sure, what do you need?\nJohn: I'm stuck on the database design.\nMary: OK, let's schedule a call tomorrow morning.\nJohn: Perfect, thanks!"]
],
allow_flagging="never"
)
# Launch the interface without share parameter
iface.launch(
server_name="0.0.0.0", # Required for Spaces
server_port=7860, # Standard port for Spaces
show_error=True
)