# app.py !pip install transformers !pip install streamlit import streamlit as st from transformers import pipeline from transformers import AutoModelForSeq2SeqLM, AutoTokenizer import torch import gdown # Download the model from Google Drive @st.cache(allow_output_mutation=True) def load_model_from_gdrive(): url = https://drive.google.com/drive/folders/19P3ZcWor8znyaOMJgx_gaHuOyf4alnP3?usp=drive_link # Replace with your actual Google Drive link output = 'model.zip' gdown.download(url, output, quiet=False) # Unzip the model import zipfile with zipfile.ZipFile(output, 'r') as zip_ref: zip_ref.extractall('model') # Load the model and tokenizer model = AutoModelForSeq2SeqLM.from_pretrained('model') tokenizer = AutoTokenizer.from_pretrained('model') return model, tokenizer model, tokenizer = load_model_from_gdrive() summarizer = pipeline("summarization", model=model, tokenizer=tokenizer) # Streamlit app st.title("Text Summarization App") st.write("Enter the text you want to summarize:") # Text input user_input = st.text_area("Text to summarize", height=200) # Summarize text if st.button("Summarize"): if user_input: summary = summarizer(user_input, max_length=130, min_length=30, do_sample=False) st.subheader("Summary:") st.write(summary[0]['summary_text']) else: st.write("Please enter text to summarize.")