Spaces:
Paused
Paused
File size: 3,458 Bytes
3f9f45e f8d688e 3f9f45e f1b7b9d 3f9f45e f8d688e 3f9f45e f8d688e 3f9f45e f8d688e 3f9f45e f8d688e 3f9f45e f8d688e 3f9f45e f8d688e 3f9f45e f8d688e 3f9f45e f8d688e 3f9f45e f8d688e 3f9f45e f8d688e |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 |
import streamlit as st
from transformers import pipeline
from llama import load_llama_model, generate_llama_summary, PROMPT_TEMPLATE
st.set_page_config(page_title="Email Summarizer", layout="wide")
@st.cache_resource
def load_all_models():
"""Pre-load all models during app initialization"""
with st.spinner("Loading models... This may take a few minutes"):
models = {
"mt5-small": pipeline(
"summarization",
model="ak2603/mt5-small-synthetic-data-plus-translated"
),
"Llama 3.2": load_llama_model(),
"Llama 7b Instruct": None # Placeholder
}
return models
# Initialize models when app loads
models = load_all_models()
# Streamlit UI Configuration
st.title("📧 Automated Email Summarization")
# Sidebar Controls
with st.sidebar:
st.header("Configuration")
model_choice = st.selectbox(
"Select Model",
["mt5-small", "Llama 3.2", "Llama 7b Instruct"],
index=0
)
st.markdown("---")
st.markdown("**Model Information:**")
st.info(f"Selected model: {model_choice}")
st.info(f"Total loaded models: {len([m for m in models.values() if m is not None])}")
# Main Content Area
col1, col2 = st.columns([2, 1])
with col1:
st.subheader("Input Email")
email_input = st.text_area(
"Paste your email here:",
height=300,
key="input_text",
placeholder="Enter email content here..."
)
with col2:
st.subheader("Summary Generation")
if st.button("Generate Summary", use_container_width=True):
if not email_input:
st.error("Please enter some email content first!")
else:
try:
selected_model = models[model_choice]
if selected_model is None:
st.error("Selected model is not implemented yet")
else:
with st.spinner("Generating summary..."):
if model_choice == "mt5-small":
result = selected_model(
email_input,
max_length=150,
do_sample=True,
repetition_penalty=1.5
)[0]['summary_text']
elif model_choice == "Llama 3.2":
model_obj, tokenizer = selected_model
result = generate_llama_summary(
email_input,
model_obj,
tokenizer,
PROMPT_TEMPLATE
)
# Display results
st.success("**Generated Summary:**")
st.write(result)
# Add export options
st.download_button(
label="Download Summary",
data=result,
file_name="email_summary.txt",
mime="text/plain"
)
except Exception as e:
st.error(f"Error generating summary: {str(e)}")
# Footer
st.markdown("---")
st.markdown("_Automated email summarization system v1.0_") |