import streamlit as st from transformers import BartTokenizer, BartForConditionalGeneration, pipeline import nltk import os # Download NLTK data nltk.download('punkt') from nltk.tokenize import sent_tokenize # Define the directory to extract the model model_path = './bart_model/bart_model' # Verify that the directory exists and contains the necessary files if not os.path.exists(model_path): st.error(f"Model directory {model_path} does not exist or is incorrect.") # Print out contents of model_dir for further debugging # print("Contents of model_dir:", os.listdir(model_dir)) else: # Load the tokenizer and model from the extracted directory tokenizer = BartTokenizer.from_pretrained(model_path) model = BartForConditionalGeneration.from_pretrained(model_path) # Create a summarization pipeline summarizer = pipeline("summarization", model=model, tokenizer=tokenizer) # Set the title for the Streamlit app st.title("BART Summary Generator") # Text input for the user text = st.text_area("Enter your text: ") def generate_summary(input_text): # Perform summarization summary = summarizer(input_text, max_length=200, min_length=40, do_sample=False) return summary[0]['summary_text'] if st.button("Generate"): if text: generated_summary = generate_summary(text) # Display the generated summary st.subheader("Generated Summary") st.write(generated_summary) else: st.warning("Please enter some text to generate a summary.")