Spaces:
Sleeping
Sleeping
import streamlit as st | |
from transformers import AutoModelForSeq2SeqLM, AutoTokenizer | |
import torch | |
def generate_summary(model, tokenizer, dialogue): | |
# Tokenize input dialogue | |
inputs = tokenizer(dialogue, return_tensors="pt", max_length=1024, truncation=True) | |
# Generate summary | |
with torch.no_grad(): | |
summary_ids = model.generate(inputs["input_ids"], max_length=150, length_penalty=0.8, num_beams=4) | |
# Decode and return the summary | |
summary = tokenizer.decode(summary_ids[0], skip_special_tokens=True, clean_up_tokenization_spaces=True) | |
return summary | |
# Set page title and favicon | |
st.set_page_config( | |
page_title="Dialog Summarizer App", | |
page_icon=":memo:", # You can set your own emoji or use an image URL | |
) | |
# Add a logo at the top middle of the app | |
logo_path = "ale.png" | |
logo_html = f'<div style="text-align:center;"><img src="{logo_path}" width="200"></div>' | |
st.markdown(logo_html, unsafe_allow_html=True) | |
# Display the app name below the logo | |
st.title("Dialog Summarizer App") | |
st.info("🖥️ Note: This application is running on CPU. Please be patient ⏳.") | |
st.markdown("This app summarizes dialogues. Enter a dialogue in the text area, and the generated summary will appear at the bottom. Keep the dialogues few sentences long for optimal results.") | |
# Create two columns layout using st.columns | |
col1, col2 = st.columns(2) | |
# User input on the left side with increased height | |
user_input = col1.text_area("Enter the dialog:", height=300) | |
# Add "Summarize" and "Clear" buttons | |
summarize_button = col1.button("Summarize") | |
clear_button = col1.button("Clear") | |
# If "Clear" button is clicked, clear the user input | |
if clear_button: | |
user_input = "" | |
# If "Summarize" button is clicked and there is user input, generate and display summary on the right side | |
if summarize_button and user_input: | |
# Load pre-trained Pegasus model and tokenizer | |
model_name = "ale-dp/pegasus-finetuned-dialog-summarizer" | |
tokenizer = AutoTokenizer.from_pretrained(model_name) | |
model = AutoModelForSeq2SeqLM.from_pretrained(model_name) | |
# Generate summary | |
summary = generate_summary(model, tokenizer, user_input) | |
# Display the generated summary on the right side | |
col2.subheader("Generated Summary:") | |
col2.write(summary) | |