aaporosh commited on
Commit
7678f2a
·
verified ·
1 Parent(s): f735f4c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +109 -85
app.py CHANGED
@@ -1,81 +1,102 @@
1
  import streamlit as st
2
- import os
3
  import logging
 
4
  from io import BytesIO
5
  from PyPDF2 import PdfReader
6
  from langchain.text_splitter import CharacterTextSplitter
7
- from langchain_community.embeddings import HuggingFaceEmbeddings
8
  from langchain_community.vectorstores import FAISS
9
- from langchain.prompts import PromptTemplate
10
- from langchain.chains.question_answering import load_qa_chain
11
- from langchain_community.llms import HuggingFaceHub
12
- from transformers import pipeline # For fallback if Hub fails
13
 
14
- # Set up logging
15
- logging.basicConfig(level=logging.INFO)
16
  logger = logging.getLogger(__name__)
17
 
18
- # Check API token
19
- if "HUGGINGFACEHUB_API_TOKEN" not in os.environ:
20
- st.error("HUGGINGFACEHUB_API_TOKEN not set in secrets. Add it in Space settings.")
21
- st.stop()
 
 
 
 
 
 
22
 
23
- try:
24
- # Function to process PDF
25
- def process_pdf(uploaded_file):
26
- try:
27
- logger.info("Starting PDF processing")
28
- pdf_reader = PdfReader(BytesIO(uploaded_file.getvalue()))
29
- text = ""
30
- for page in pdf_reader.pages:
31
- extracted = page.extract_text()
32
- if extracted:
33
- text += extracted + "\n"
34
-
 
 
 
 
 
 
 
 
 
 
35
  if not text:
36
- raise ValueError("No text extracted from PDF.")
37
-
38
- # Chunk text (increased overlap for better context)
39
- text_splitter = CharacterTextSplitter(separator="\n", chunk_size=800, chunk_overlap=200, length_function=len)
40
- chunks = text_splitter.split_text(text)
41
-
42
- # Embeddings (light model)
43
- embeddings = HuggingFaceEmbeddings(model_name="sentence-transformers/all-MiniLM-L6-v2", model_kwargs={'device': 'cpu'})
44
-
45
- # Vector store
46
- vector_store = FAISS.from_texts(chunks, embedding=embeddings)
47
- logger.info("PDF processed successfully")
48
- return vector_store
49
- except Exception as e:
50
- logger.error(f"PDF processing error: {str(e)}")
51
- st.error(f"Error processing PDF: {str(e)}")
52
  return None
 
 
 
 
 
 
 
 
 
53
 
54
- # Function to answer questions
55
- def answer_question(vector_store, query):
56
- try:
57
- logger.info(f"Answering query: {query}")
58
- # Lighter LLM via pipeline for faster CPU inference
59
- qa_pipeline = pipeline("text2text-generation", model="google/flan-t5-base")
60
-
61
- # Retrieve top chunks
62
- docs = vector_store.similarity_search(query, k=3)
63
- context = "\n".join([doc.page_content for doc in docs])
64
-
65
- # Prompt
66
- prompt = f"Use this context to answer concisely: {context}\nQuestion: {query}\nAnswer:"
67
- response = qa_pipeline(prompt, max_length=256, num_return_sequences=1)[0]['generated_text']
68
-
69
- logger.info("Answer generated")
70
- return response.strip()
71
- except Exception as e:
72
- logger.error(f"Answer generation error: {str(e)}")
73
- st.error(f"Error answering: {str(e)}")
74
- return "Unable to generate answer."
75
 
76
- # Streamlit UI with chat history
 
 
77
  st.title("Smart PDF Q&A")
78
- st.write("Upload a PDF and ask questions! Chat history is preserved.")
 
 
 
 
 
 
 
79
 
80
  # Initialize session state
81
  if "messages" not in st.session_state:
@@ -83,37 +104,40 @@ try:
83
  if "vector_store" not in st.session_state:
84
  st.session_state.vector_store = None
85
 
86
- # PDF upload and process
87
- uploaded_file = st.file_uploader("Upload PDF", type="pdf")
88
- if uploaded_file:
89
- if st.button("Process PDF"):
90
- with st.spinner("Processing..."):
91
- vector_store = process_pdf(uploaded_file)
92
- if vector_store:
93
- st.session_state.vector_store = vector_store
94
- st.success("PDF ready! Ask away.")
95
- st.session_state.messages = [] # Reset chat on new PDF
96
-
97
- # Display chat history
98
- for message in st.session_state.messages:
99
- with st.chat_message(message["role"]):
100
- st.markdown(message["content"])
101
 
102
- # Question input
103
  if st.session_state.vector_store:
104
- if prompt := st.chat_input("Ask a question:"):
105
- # Add user message
106
  st.session_state.messages.append({"role": "user", "content": prompt})
107
  with st.chat_message("user"):
108
  st.markdown(prompt)
109
-
110
- # Generate answer
111
  with st.chat_message("assistant"):
112
- with st.spinner("Thinking..."):
113
  answer = answer_question(st.session_state.vector_store, prompt)
114
  st.markdown(answer)
115
  st.session_state.messages.append({"role": "assistant", "content": answer})
116
 
 
 
 
 
 
 
 
 
 
 
117
  except Exception as e:
118
  logger.error(f"App initialization failed: {str(e)}")
119
- st.error(f"Initialization error: {str(e)}. Check logs or try factory reset.")
 
1
  import streamlit as st
 
2
  import logging
3
+ import os
4
  from io import BytesIO
5
  from PyPDF2 import PdfReader
6
  from langchain.text_splitter import CharacterTextSplitter
 
7
  from langchain_community.vectorstores import FAISS
8
+ from sentence_transformers import SentenceTransformer
9
+ from transformers import pipeline
 
 
10
 
11
+ # Setup logging for Spaces
12
+ logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
13
  logger = logging.getLogger(__name__)
14
 
15
+ # Lazy load models
16
+ @st.cache_resource(ttl=3600)
17
+ def load_embeddings_model():
18
+ logger.info("Loading embeddings model")
19
+ try:
20
+ return SentenceTransformer("all-MiniLM-L6-v2")
21
+ except Exception as e:
22
+ logger.error(f"Embeddings load error: {str(e)}")
23
+ st.error(f"Embedding model error: {str(e)}")
24
+ return None
25
 
26
+ @st.cache_resource(ttl=3600)
27
+ def load_qa_pipeline():
28
+ logger.info("Loading QA pipeline")
29
+ try:
30
+ return pipeline("text2text-generation", model="google/flan-t5-small", max_length=200)
31
+ except Exception as e:
32
+ logger.error(f"QA model load error: {str(e)}")
33
+ st.error(f"QA model error: {str(e)}")
34
+ return None
35
+
36
+ # Process PDF
37
+ def process_pdf(uploaded_file):
38
+ logger.info("Processing PDF")
39
+ try:
40
+ pdf_reader = PdfReader(BytesIO(uploaded_file.getvalue()))
41
+ text = "".join(page.extract_text() or "" for page in pdf_reader.pages)
42
+ if not text:
43
+ # Optional OCR (uncomment if needed, requires pdf2image, pytesseract)
44
+ # from pdf2image import convert_from_bytes
45
+ # import pytesseract
46
+ # images = convert_from_bytes(uploaded_file.getvalue())
47
+ # text = "".join(pytesseract.image_to_string(img) for img in images)
48
  if not text:
49
+ raise ValueError("No text extracted from PDF")
50
+
51
+ text_splitter = CharacterTextSplitter(separator="\n", chunk_size=600, chunk_overlap=150)
52
+ chunks = text_splitter.split_text(text)
53
+
54
+ embeddings_model = load_embeddings_model()
55
+ if not embeddings_model:
 
 
 
 
 
 
 
 
 
56
  return None
57
+
58
+ embeddings = [embeddings_model.encode(chunk) for chunk in chunks]
59
+ vector_store = FAISS.from_embeddings(zip(chunks, embeddings), embeddings_model.encode)
60
+ logger.info("PDF processed successfully")
61
+ return vector_store
62
+ except Exception as e:
63
+ logger.error(f"PDF processing error: {str(e)}")
64
+ st.error(f"PDF error: {str(e)}")
65
+ return None
66
 
67
+ # Answer question
68
+ def answer_question(vector_store, query):
69
+ logger.info(f"Processing query: {query}")
70
+ try:
71
+ if not vector_store:
72
+ return "Please upload a PDF first."
73
+
74
+ qa_pipeline = load_qa_pipeline()
75
+ if not qa_pipeline:
76
+ return "QA model unavailable."
77
+
78
+ docs = vector_store.similarity_search(query, k=3)
79
+ context = "\n".join(doc.page_content for doc in docs)
80
+ prompt = f"Context: {context}\nQuestion: {query}\nAnswer concisely:"
81
+ response = qa_pipeline(prompt)[0]['generated_text']
82
+ logger.info("Answer generated")
83
+ return response.strip()
84
+ except Exception as e:
85
+ logger.error(f"Query error: {str(e)}")
86
+ return f"Error answering: {str(e)}"
 
87
 
88
+ # Streamlit UI
89
+ try:
90
+ st.set_page_config(page_title="Smart PDF Q&A", page_icon="📄")
91
  st.title("Smart PDF Q&A")
92
+ st.markdown("""
93
+ Upload a PDF and ask questions about its content. Chat history is preserved.
94
+ <style>
95
+ .stChatMessage { border-radius: 10px; padding: 10px; margin: 5px; }
96
+ .stChatMessage.user { background-color: #e6f3ff; }
97
+ .stChatMessage.assistant { background-color: #f0f0f0; }
98
+ </style>
99
+ """, unsafe_allow_html=True)
100
 
101
  # Initialize session state
102
  if "messages" not in st.session_state:
 
104
  if "vector_store" not in st.session_state:
105
  st.session_state.vector_store = None
106
 
107
+ # PDF upload
108
+ uploaded_file = st.file_uploader("Upload a PDF", type=["pdf"])
109
+ if uploaded_file and st.button("Process PDF"):
110
+ with st.spinner("Processing PDF..."):
111
+ st.session_state.vector_store = process_pdf(uploaded_file)
112
+ if st.session_state.vector_store:
113
+ st.success("PDF processed! Ask questions below.")
114
+ st.session_state.messages = []
115
+ else:
116
+ st.error("Failed to process PDF.")
 
 
 
 
 
117
 
118
+ # Chat interface
119
  if st.session_state.vector_store:
120
+ prompt = st.chat_input("Ask a question about the PDF:")
121
+ if prompt:
122
  st.session_state.messages.append({"role": "user", "content": prompt})
123
  with st.chat_message("user"):
124
  st.markdown(prompt)
 
 
125
  with st.chat_message("assistant"):
126
+ with st.spinner("Generating answer..."):
127
  answer = answer_question(st.session_state.vector_store, prompt)
128
  st.markdown(answer)
129
  st.session_state.messages.append({"role": "assistant", "content": answer})
130
 
131
+ # Display chat history
132
+ for message in st.session_state.messages:
133
+ with st.chat_message(message["role"]):
134
+ st.markdown(message["content"])
135
+
136
+ # Download chat history
137
+ if st.session_state.messages:
138
+ chat_text = "\n".join(f"{m['role'].capitalize()}: {m['content']}" for m in st.session_state.messages)
139
+ st.download_button("Download Chat History", chat_text, "chat_history.txt")
140
+
141
  except Exception as e:
142
  logger.error(f"App initialization failed: {str(e)}")
143
+ st.error(f"App failed to start: {str(e)}. Check Spaces logs or contact support.")