Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -2,136 +2,237 @@ import streamlit as st
|
|
2 |
import logging
|
3 |
import os
|
4 |
from io import BytesIO
|
5 |
-
|
6 |
-
from langchain.text_splitter import
|
7 |
from langchain_community.vectorstores import FAISS
|
8 |
from sentence_transformers import SentenceTransformer
|
9 |
from transformers import pipeline
|
|
|
10 |
|
11 |
# Setup logging for Spaces
|
12 |
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
|
13 |
logger = logging.getLogger(__name__)
|
14 |
|
15 |
-
# Lazy load models
|
16 |
-
@st.cache_resource(ttl=
|
17 |
def load_embeddings_model():
|
18 |
logger.info("Loading embeddings model")
|
19 |
try:
|
20 |
-
return SentenceTransformer("all-MiniLM-
|
21 |
except Exception as e:
|
22 |
logger.error(f"Embeddings load error: {str(e)}")
|
23 |
st.error(f"Embedding model error: {str(e)}")
|
24 |
return None
|
25 |
|
26 |
-
@st.cache_resource(ttl=
|
27 |
def load_qa_pipeline():
|
28 |
logger.info("Loading QA pipeline")
|
29 |
try:
|
30 |
-
return pipeline("text2text-generation", model="google/flan-t5-small", max_length=
|
31 |
except Exception as e:
|
32 |
logger.error(f"QA model load error: {str(e)}")
|
33 |
st.error(f"QA model error: {str(e)}")
|
34 |
return None
|
35 |
|
36 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
37 |
def process_pdf(uploaded_file):
|
38 |
-
logger.info("Processing PDF")
|
39 |
try:
|
40 |
-
|
41 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
42 |
if not text:
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
chunks = text_splitter.split_text(text)
|
53 |
-
|
54 |
embeddings_model = load_embeddings_model()
|
55 |
if not embeddings_model:
|
56 |
-
return None
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
-
|
|
|
|
|
|
|
|
|
|
|
62 |
except Exception as e:
|
63 |
logger.error(f"PDF processing error: {str(e)}")
|
64 |
st.error(f"PDF error: {str(e)}")
|
65 |
-
return None
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
66 |
|
67 |
-
|
68 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
69 |
logger.info(f"Processing query: {query}")
|
70 |
try:
|
71 |
-
if not
|
72 |
-
return "Please upload a PDF first
|
73 |
-
|
74 |
qa_pipeline = load_qa_pipeline()
|
75 |
if not qa_pipeline:
|
76 |
-
return "QA model unavailable."
|
77 |
-
|
78 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
79 |
context = "\n".join(doc.page_content for doc in docs)
|
80 |
-
prompt = f"Context: {context}\nQuestion: {query}\
|
81 |
response = qa_pipeline(prompt)[0]['generated_text']
|
82 |
logger.info("Answer generated")
|
83 |
-
return response.strip()
|
84 |
except Exception as e:
|
85 |
logger.error(f"Query error: {str(e)}")
|
86 |
-
return f"
|
87 |
|
88 |
# Streamlit UI
|
89 |
try:
|
90 |
-
st.set_page_config(page_title="Smart PDF Q&A", page_icon="📄")
|
91 |
-
st.title("Smart PDF Q&A")
|
92 |
st.markdown("""
|
93 |
-
Upload a PDF and ask questions about its content. Chat history is preserved.
|
94 |
<style>
|
95 |
-
.
|
96 |
-
.
|
97 |
-
.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
98 |
</style>
|
99 |
""", unsafe_allow_html=True)
|
100 |
|
|
|
|
|
|
|
101 |
# Initialize session state
|
102 |
if "messages" not in st.session_state:
|
103 |
st.session_state.messages = []
|
104 |
-
if "
|
105 |
-
st.session_state.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
106 |
|
107 |
-
# PDF upload
|
108 |
uploaded_file = st.file_uploader("Upload a PDF", type=["pdf"])
|
109 |
-
|
110 |
-
|
111 |
-
|
112 |
-
|
113 |
-
st.
|
114 |
-
st.session_state.
|
115 |
-
|
116 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
117 |
|
118 |
# Chat interface
|
119 |
-
|
120 |
-
|
|
|
121 |
if prompt:
|
122 |
st.session_state.messages.append({"role": "user", "content": prompt})
|
123 |
with st.chat_message("user"):
|
124 |
-
st.markdown(prompt)
|
125 |
with st.chat_message("assistant"):
|
126 |
-
with st.spinner(
|
127 |
-
answer = answer_question(st.session_state.
|
128 |
-
st.markdown(answer)
|
129 |
st.session_state.messages.append({"role": "assistant", "content": answer})
|
130 |
|
131 |
# Display chat history
|
132 |
for message in st.session_state.messages:
|
133 |
-
|
134 |
-
|
|
|
|
|
135 |
|
136 |
# Download chat history
|
137 |
if st.session_state.messages:
|
@@ -140,4 +241,4 @@ try:
|
|
140 |
|
141 |
except Exception as e:
|
142 |
logger.error(f"App initialization failed: {str(e)}")
|
143 |
-
st.error(f"App failed to start: {str(e)}. Check Spaces logs or contact support.")
|
|
|
2 |
import logging
|
3 |
import os
|
4 |
from io import BytesIO
|
5 |
+
import pdfplumber
|
6 |
+
from langchain.text_splitter import RecursiveCharacterTextSplitter
|
7 |
from langchain_community.vectorstores import FAISS
|
8 |
from sentence_transformers import SentenceTransformer
|
9 |
from transformers import pipeline
|
10 |
+
import re
|
11 |
|
12 |
# Setup logging for Spaces
|
13 |
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
|
14 |
logger = logging.getLogger(__name__)
|
15 |
|
16 |
+
# Lazy load models with caching
|
17 |
+
@st.cache_resource(ttl=1800)
|
18 |
def load_embeddings_model():
|
19 |
logger.info("Loading embeddings model")
|
20 |
try:
|
21 |
+
return SentenceTransformer("all-MiniLM-L12-v2")
|
22 |
except Exception as e:
|
23 |
logger.error(f"Embeddings load error: {str(e)}")
|
24 |
st.error(f"Embedding model error: {str(e)}")
|
25 |
return None
|
26 |
|
27 |
+
@st.cache_resource(ttl=1800)
|
28 |
def load_qa_pipeline():
|
29 |
logger.info("Loading QA pipeline")
|
30 |
try:
|
31 |
+
return pipeline("text2text-generation", model="google/flan-t5-small", max_length=300)
|
32 |
except Exception as e:
|
33 |
logger.error(f"QA model load error: {str(e)}")
|
34 |
st.error(f"QA model error: {str(e)}")
|
35 |
return None
|
36 |
|
37 |
+
@st.cache_resource(ttl=1800)
|
38 |
+
def load_summary_pipeline():
|
39 |
+
logger.info("Loading summary pipeline")
|
40 |
+
try:
|
41 |
+
return pipeline("summarization", model="sshleifer/distilbart-cnn-6-6", max_length=150)
|
42 |
+
except Exception as e:
|
43 |
+
logger.error(f"Summary model load error: {str(e)}")
|
44 |
+
st.error(f"Summary model error: {str(e)}")
|
45 |
+
return None
|
46 |
+
|
47 |
+
# Process PDF with improved extraction
|
48 |
def process_pdf(uploaded_file):
|
49 |
+
logger.info("Processing PDF with enhanced extraction")
|
50 |
try:
|
51 |
+
text = ""
|
52 |
+
code_blocks = []
|
53 |
+
with pdfplumber.open(BytesIO(uploaded_file.getvalue())) as pdf:
|
54 |
+
for page in pdf.pages[:20]:
|
55 |
+
extracted = page.extract_text(layout=False)
|
56 |
+
if extracted:
|
57 |
+
text += extracted + "\n"
|
58 |
+
for char in page.chars:
|
59 |
+
if 'fontname' in char and 'mono' in char['fontname'].lower():
|
60 |
+
code_blocks.append(char['text'])
|
61 |
+
code_text_page = page.extract_text()
|
62 |
+
code_matches = re.finditer(r'(^\s{2,}.*?(?:\n\s{2,}.*?)*)', code_text_page or "", re.MULTILINE)
|
63 |
+
for match in code_matches:
|
64 |
+
code_blocks.append(match.group().strip())
|
65 |
+
tables = page.extract_tables()
|
66 |
+
if tables:
|
67 |
+
for table in tables:
|
68 |
+
text += "\n".join([" | ".join(map(str, row)) for row in table if row]) + "\n"
|
69 |
+
for obj in page.extract_words():
|
70 |
+
if obj.get('size', 0) > 12:
|
71 |
+
text += f"\n{obj['text']}\n"
|
72 |
+
|
73 |
+
code_text = "\n".join(code_blocks).strip()
|
74 |
if not text:
|
75 |
+
raise ValueError("No text extracted from PDF")
|
76 |
+
|
77 |
+
# Use RecursiveCharacterTextSplitter for better semantic splitting
|
78 |
+
text_splitter = RecursiveCharacterTextSplitter(
|
79 |
+
chunk_size=500, chunk_overlap=100, separators=["\n\n", "\n", ".", " "]
|
80 |
+
)
|
81 |
+
text_chunks = text_splitter.split_text(text)[:50]
|
82 |
+
code_chunks = text_splitter.split_text(code_text)[:25] if code_text else []
|
83 |
+
|
|
|
|
|
84 |
embeddings_model = load_embeddings_model()
|
85 |
if not embeddings_model:
|
86 |
+
return None, None, text, code_text
|
87 |
+
|
88 |
+
# Build FAISS vector stores efficiently
|
89 |
+
text_vectors = [embeddings_model.encode(chunk) for chunk in text_chunks]
|
90 |
+
code_vectors = [embeddings_model.encode(chunk) for chunk in code_chunks]
|
91 |
+
|
92 |
+
text_vector_store = FAISS.from_embeddings(zip(text_chunks, text_vectors), embeddings_model.encode) if text_chunks else None
|
93 |
+
code_vector_store = FAISS.from_embeddings(zip(code_chunks, code_vectors), embeddings_model.encode) if code_chunks else None
|
94 |
+
|
95 |
+
logger.info("PDF processed successfully with enhanced extraction")
|
96 |
+
return text_vector_store, code_vector_store, text, code_text
|
97 |
except Exception as e:
|
98 |
logger.error(f"PDF processing error: {str(e)}")
|
99 |
st.error(f"PDF error: {str(e)}")
|
100 |
+
return None, None, "", ""
|
101 |
+
|
102 |
+
# Summarize PDF
|
103 |
+
def summarize_pdf(text):
|
104 |
+
logger.info("Generating summary")
|
105 |
+
try:
|
106 |
+
summary_pipeline = load_summary_pipeline()
|
107 |
+
if not summary_pipeline:
|
108 |
+
return "Summary model unavailable."
|
109 |
|
110 |
+
text_splitter = RecursiveCharacterTextSplitter(
|
111 |
+
chunk_size=500, chunk_overlap=50, separators=["\n\n", "\n", ".", " "]
|
112 |
+
)
|
113 |
+
chunks = text_splitter.split_text(text)[:2]
|
114 |
+
summaries = []
|
115 |
+
|
116 |
+
for chunk in chunks:
|
117 |
+
summary = summary_pipeline(chunk[:500], max_length=100, min_length=30, do_sample=False)[0]['summary_text']
|
118 |
+
summaries.append(summary.strip())
|
119 |
+
|
120 |
+
combined_summary = " ".join(summaries)
|
121 |
+
if len(combined_summary.split()) > 150:
|
122 |
+
combined_summary = " ".join(combined_summary.split()[:150])
|
123 |
+
logger.info("Summary generated")
|
124 |
+
return f"Sure, here's a concise summary of the PDF:\n{combined_summary}"
|
125 |
+
except Exception as e:
|
126 |
+
logger.error(f"Summary error: {str(e)}")
|
127 |
+
return f"Oops, something went wrong summarizing: {str(e)}"
|
128 |
+
|
129 |
+
# Answer question with improved response
|
130 |
+
def answer_question(text_vector_store, code_vector_store, query):
|
131 |
logger.info(f"Processing query: {query}")
|
132 |
try:
|
133 |
+
if not text_vector_store and not code_vector_store:
|
134 |
+
return "Please upload a PDF first!"
|
135 |
+
|
136 |
qa_pipeline = load_qa_pipeline()
|
137 |
if not qa_pipeline:
|
138 |
+
return "Sorry, the QA model is unavailable right now."
|
139 |
+
|
140 |
+
is_code_query = any(keyword in query.lower() for keyword in ["code", "script", "function", "programming", "give me code", "show code"])
|
141 |
+
if is_code_query and code_vector_store:
|
142 |
+
return f"Here's the code from the PDF:\n```python\n{st.session_state.code_text}\n```"
|
143 |
+
|
144 |
+
vector_store = text_vector_store
|
145 |
+
if not vector_store:
|
146 |
+
return "No relevant content found for your query."
|
147 |
+
|
148 |
+
docs = vector_store.similarity_search(query, k=5)
|
149 |
context = "\n".join(doc.page_content for doc in docs)
|
150 |
+
prompt = f"Context: {context}\nQuestion: {query}\nProvide a detailed, accurate answer based on the context, prioritizing relevant information. Respond as a helpful assistant:"
|
151 |
response = qa_pipeline(prompt)[0]['generated_text']
|
152 |
logger.info("Answer generated")
|
153 |
+
return f"Got it! Here's a detailed answer:\n{response.strip()}"
|
154 |
except Exception as e:
|
155 |
logger.error(f"Query error: {str(e)}")
|
156 |
+
return f"Sorry, something went wrong: {str(e)}"
|
157 |
|
158 |
# Streamlit UI
|
159 |
try:
|
160 |
+
st.set_page_config(page_title="Smart PDF Q&A", page_icon="📄", layout="wide")
|
|
|
161 |
st.markdown("""
|
|
|
162 |
<style>
|
163 |
+
.main { max-width: 900px; margin: 0 auto; padding: 20px; }
|
164 |
+
.sidebar { background-color: #f8f9fa; padding: 10px; border-radius: 5px; }
|
165 |
+
.chat-container { border: 1px solid #ddd; border-radius: 10px; padding: 10px; height: 65vh; overflow-y: auto; margin-top: 20px; }
|
166 |
+
.user-bubble { background-color: #e6f3ff; border-radius: 15px; padding: 10px; margin: 5px; text-align: right; }
|
167 |
+
.assistant-bubble { background-color: #f0f0f0; border-radius: 15px; padding: 10px; margin: 5px; text-align: left; }
|
168 |
+
.stButton>button { background-color: #4CAF50; color: white; border: none; padding: 8px 16px; border-radius: 5px; }
|
169 |
+
.stButton>button:hover { background-color: #45a049; }
|
170 |
+
pre { background-color: #f8f8f8; padding: 10px; border-radius: 5px; overflow-x: auto; }
|
171 |
+
.header { background: linear-gradient(90deg, #4CAF50, #81C784); color: white; padding: 10px; border-radius: 5px; text-align: center; }
|
172 |
+
.stChatInput { position: fixed; bottom: 10px; width: 80%; }
|
173 |
</style>
|
174 |
""", unsafe_allow_html=True)
|
175 |
|
176 |
+
st.markdown('<div class="header"><h1>Smart PDF Q&A</h1></div>', unsafe_allow_html=True)
|
177 |
+
st.markdown("Upload a PDF to ask questions, summarize (~150 words), or extract code with 'give me code'. Fast and friendly responses!")
|
178 |
+
|
179 |
# Initialize session state
|
180 |
if "messages" not in st.session_state:
|
181 |
st.session_state.messages = []
|
182 |
+
if "text_vector_store" not in st.session_state:
|
183 |
+
st.session_state.text_vector_store = None
|
184 |
+
if "code_vector_store" not in st.session_state:
|
185 |
+
st.session_state.code_vector_store = None
|
186 |
+
if "pdf_text" not in st.session_state:
|
187 |
+
st.session_state.pdf_text = ""
|
188 |
+
if "code_text" not in st.session_state:
|
189 |
+
st.session_state.code_text = ""
|
190 |
+
|
191 |
+
# Sidebar
|
192 |
+
with st.sidebar:
|
193 |
+
st.markdown('<div class="sidebar">', unsafe_allow_html=True)
|
194 |
+
theme = st.radio("Theme", ["Light", "Dark"], index=0)
|
195 |
+
st.markdown('</div>', unsafe_allow_html=True)
|
196 |
|
197 |
+
# PDF upload and processing
|
198 |
uploaded_file = st.file_uploader("Upload a PDF", type=["pdf"])
|
199 |
+
col1, col2 = st.columns([1, 1])
|
200 |
+
with col1:
|
201 |
+
if st.button("Process PDF") and uploaded_file:
|
202 |
+
with st.spinner("Processing PDF..."):
|
203 |
+
st.session_state.text_vector_store, st.session_state.code_vector_store, st.session_state.pdf_text, st.session_state.code_text = process_pdf(uploaded_file)
|
204 |
+
if st.session_state.text_vector_store or st.session_state.code_vector_store:
|
205 |
+
st.success("PDF processed! Ask away or summarize.")
|
206 |
+
st.session_state.messages = []
|
207 |
+
else:
|
208 |
+
st.error("Failed to process PDF.")
|
209 |
+
with col2:
|
210 |
+
if st.button("Summarize PDF") and st.session_state.pdf_text:
|
211 |
+
with st.spinner("Summarizing..."):
|
212 |
+
summary = summarize_pdf(st.session_state.pdf_text)
|
213 |
+
st.session_state.messages.append({"role": "assistant", "content": summary})
|
214 |
+
st.markdown(summary, unsafe_allow_html=True)
|
215 |
|
216 |
# Chat interface
|
217 |
+
st.markdown('<div class="chat-container">', unsafe_allow_html=True)
|
218 |
+
if st.session_state.text_vector_store or st.session_state.code_vector_store:
|
219 |
+
prompt = st.chat_input("Ask a question (e.g., 'Give me code' or 'What’s the main idea?'):")
|
220 |
if prompt:
|
221 |
st.session_state.messages.append({"role": "user", "content": prompt})
|
222 |
with st.chat_message("user"):
|
223 |
+
st.markdown(f"<div class='user-bubble'>{prompt}</div>", unsafe_allow_html=True)
|
224 |
with st.chat_message("assistant"):
|
225 |
+
with st.spinner('<div class="spinner">⏳</div>'):
|
226 |
+
answer = answer_question(st.session_state.text_vector_store, st.session_state.code_vector_store, prompt)
|
227 |
+
st.markdown(f"<div class='assistant-bubble'>{answer}</div>", unsafe_allow_html=True)
|
228 |
st.session_state.messages.append({"role": "assistant", "content": answer})
|
229 |
|
230 |
# Display chat history
|
231 |
for message in st.session_state.messages:
|
232 |
+
css_class = "user-bubble" if message["role"] == "user" else "assistant-bubble"
|
233 |
+
st.markdown(f"<div class='{css_class}'>{message['content']}</div>", unsafe_allow_html=True)
|
234 |
+
|
235 |
+
st.markdown('</div>', unsafe_allow_html=True)
|
236 |
|
237 |
# Download chat history
|
238 |
if st.session_state.messages:
|
|
|
241 |
|
242 |
except Exception as e:
|
243 |
logger.error(f"App initialization failed: {str(e)}")
|
244 |
+
st.error(f"App failed to start: {str(e)}. Check Spaces logs or contact support.")
|