Spaces:
Sleeping
Sleeping
import streamlit as st | |
from transformers import AutoModelForSeq2SeqLM, AutoTokenizer | |
import torch | |
from examples import dialogue_examples | |
def generate_summary(model, tokenizer, dialogue): | |
# Tokenize input dialogue | |
inputs = tokenizer(dialogue, return_tensors="pt", max_length=1024, truncation=True) | |
# Generate summary | |
with torch.no_grad(): | |
summary_ids = model.generate(inputs["input_ids"], max_length=150, length_penalty=0.8, num_beams=4) | |
# Decode and return the summary | |
summary = tokenizer.decode(summary_ids[0], skip_special_tokens=True, clean_up_tokenization_spaces=True) | |
return summary | |
st.set_page_config( | |
page_title="Dialogue Summarizer App", | |
page_icon="ale.png", # You can set your own emoji or use an image URL | |
) | |
#logo_path = "ale.png" | |
#logo_html = f'<div style="text-align:center;"><img src="{logo_path}" width="200"></div>' | |
#st.markdown(logo_html, unsafe_allow_html=True) | |
# Display the app name below the logo | |
st.title("Dialogue Summarizer App") | |
st.info("\n🖥️ Note: This application is running on CPU. Please be patient ⏳.") | |
st.markdown("This app summarizes dialogues. Enter a short dialogue in the text area. For best results, keep the dialogues at least a few sentences. You can also use the examples provided at the bottom of the page.") | |
# Create two columns layout using st.columns | |
col1, col2 = st.columns(2) | |
# User input on the left side with increased height | |
user_input = col1.text_area("Enter a Dialogue:", height=300) | |
# Summary textbox on the right side with initial value (read-only) | |
initial_summary = "Generated Summary will appear here." | |
generated_summary = col2.text_area("Summary:", value=initial_summary, height=300, key="summary") | |
# Add "Summarize" and "Clear" buttons | |
summarize_button = col1.button("Summarize") | |
# If "Summarize" button is clicked and there is user input, generate and display summary in the summary textbox | |
if summarize_button and user_input: | |
# Load pre-trained Pegasus model and tokenizer | |
model_name = "ale-dp/pegasus-finetuned-dialog-summarizer" | |
tokenizer = AutoTokenizer.from_pretrained("google/pegasus-cnn_dailymail") | |
model = AutoModelForSeq2SeqLM.from_pretrained(model_name) | |
# Generate summary | |
summary = generate_summary(model, tokenizer, user_input) | |
# Update the summary textbox with the generated summary | |
generated_summary.text(summary) | |
st.markdown("**Dialogue examples:**") | |
for idx, example in enumerate(dialogue_examples, 1): | |
st.write(f"Example {idx}:\n{example}") |