Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -1,127 +1,133 @@
|
|
1 |
-
|
2 |
-
from fastapi.responses import HTMLResponse
|
3 |
-
from fastapi.staticfiles import StaticFiles
|
4 |
-
import os
|
5 |
-
from dotenv import load_dotenv
|
6 |
-
from PyPDF2 import PdfReader
|
7 |
-
from langchain.text_splitter import RecursiveCharacterTextSplitter
|
8 |
-
from langchain_google_genai import GoogleGenerativeAIEmbeddings
|
9 |
-
from langchain_community.vectorstores import FAISS
|
10 |
-
from langchain_google_genai import ChatGoogleGenerativeAI
|
11 |
-
from langchain.chains.question_answering import load_qa_chain
|
12 |
-
from langchain.prompts import PromptTemplate
|
13 |
-
import logging
|
14 |
-
|
15 |
-
app = FastAPI()
|
16 |
-
|
17 |
-
# Set up logging
|
18 |
-
logging.basicConfig(level=logging.INFO, format='%(asctime)s %(levelname)s: %(message)s')
|
19 |
-
|
20 |
-
import google.generativeai as genai
|
21 |
from dotenv import load_dotenv
|
|
|
|
|
22 |
import os
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
23 |
|
24 |
-
#
|
25 |
-
|
26 |
|
27 |
-
|
28 |
-
api_key
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
)
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
def get_vector_store(text_chunks):
|
56 |
-
logging.info("Starting vector store creation")
|
57 |
-
embeddings = GoogleGenerativeAIEmbeddings(model="models/embedding-001")
|
58 |
|
59 |
-
|
60 |
-
|
61 |
-
|
62 |
-
|
63 |
-
|
64 |
-
|
65 |
-
|
66 |
-
|
67 |
-
|
68 |
-
|
69 |
-
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
-
|
76 |
-
|
77 |
-
|
78 |
-
|
79 |
-
|
80 |
-
|
81 |
-
|
82 |
-
|
|
|
|
|
|
|
83 |
|
84 |
-
|
|
|
85 |
|
86 |
-
if not os.path.exists(faiss_index_dir):
|
87 |
-
|
88 |
-
|
89 |
-
|
90 |
-
|
91 |
-
|
|
|
|
|
92 |
|
93 |
-
|
94 |
-
|
95 |
-
|
96 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
97 |
|
98 |
-
|
99 |
-
|
100 |
-
|
101 |
-
|
102 |
-
|
103 |
-
|
104 |
-
|
105 |
-
|
106 |
-
|
107 |
-
|
108 |
-
|
109 |
-
|
110 |
-
|
111 |
-
|
112 |
-
|
113 |
-
|
114 |
-
|
115 |
-
|
116 |
-
|
117 |
-
|
118 |
-
|
119 |
-
|
120 |
-
|
121 |
-
|
122 |
-
|
123 |
-
|
124 |
-
<p>Use GET /ask_question/ to ask questions from the PDFs you uploaded.</p>
|
125 |
-
</body>
|
126 |
-
</html>
|
127 |
-
"""
|
|
|
1 |
+
import streamlit as st
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2 |
from dotenv import load_dotenv
|
3 |
+
from PyPDF2 import PdfReader
|
4 |
+
from langchain.text_splitter import RecursiveCharacterTextSplitter
|
5 |
import os
|
6 |
+
from langchain_google_genai import GoogleGenerativeAIEmbeddings
|
7 |
+
import google.generativeai as genai
|
8 |
+
from langchain_community.vectorstores import FAISS
|
9 |
+
from langchain_google_genai import ChatGoogleGenerativeAI
|
10 |
+
from langchain.chains.question_answering import load_qa_chain
|
11 |
+
from langchain.prompts import PromptTemplate
|
12 |
+
from htmlTemplates import css, bot_template, user_template
|
13 |
+
import logging
|
14 |
+
import faiss
|
15 |
|
16 |
+
# Set up logging
|
17 |
+
logging.basicConfig(level=logging.INFO, format='%(asctime)s %(levelname)s: %(message)s')
|
18 |
|
19 |
+
load_dotenv()
|
20 |
+
genai.configure(api_key=os.getenv("GOOGLE_API_KEY"))
|
21 |
+
|
22 |
+
|
23 |
+
def get_pdf_text(pdf_docs):
|
24 |
+
text = ""
|
25 |
+
for pdf in pdf_docs:
|
26 |
+
try:
|
27 |
+
pdf_reader = PdfReader(pdf)
|
28 |
+
for page in pdf_reader.pages:
|
29 |
+
text += page.extract_text()
|
30 |
+
except Exception as e:
|
31 |
+
logging.error(f"Error processing PDF file: {e}")
|
32 |
+
return text
|
33 |
+
|
34 |
+
def get_text_chunks(text):
|
35 |
+
text_splitter = RecursiveCharacterTextSplitter(
|
36 |
+
chunk_size=10000,
|
37 |
+
chunk_overlap=1000
|
38 |
+
)
|
39 |
+
chunks = text_splitter.split_text(text)
|
40 |
+
return chunks
|
41 |
+
|
42 |
+
def get_vector_store(text_chunks):
|
43 |
+
logging.info("Starting vector store creation")
|
44 |
+
embeddings = GoogleGenerativeAIEmbeddings(model="models/embedding-001")
|
45 |
+
logging.info("Embeddings created")
|
|
|
|
|
|
|
|
|
46 |
|
47 |
+
# Create the FAISS vector store
|
48 |
+
vector_store = FAISS.from_texts(texts=text_chunks, embedding=embeddings)
|
49 |
+
logging.info("FAISS vector store created")
|
50 |
+
|
51 |
+
# Define the directory where the FAISS index will be saved
|
52 |
+
faiss_index_dir = os.path.join(os.path.dirname(__file__), "faiss_index")
|
53 |
+
os.makedirs(faiss_index_dir, exist_ok=True)
|
54 |
+
|
55 |
+
# Save the entire FAISS vector store, including the docstore and index_to_docstore_id
|
56 |
+
vector_store.save_local(faiss_index_dir)
|
57 |
+
logging.info("FAISS vector store saved successfully.")
|
58 |
+
|
59 |
+
def get_conversation_chain():
|
60 |
+
prompt_template = """
|
61 |
+
Answer the question clear and precise. If not provided the context return the result as
|
62 |
+
"Sorry I dont know the answer", don't provide the wrong answer.
|
63 |
+
Context:\n {context}?\n
|
64 |
+
Question:\n{question}\n
|
65 |
+
Answer:
|
66 |
+
"""
|
67 |
+
model = ChatGoogleGenerativeAI(model="gemini-pro", temperature=0.3)
|
68 |
+
prompt = PromptTemplate(template=prompt_template, input_variables=['context', 'question'])
|
69 |
+
chain = load_qa_chain(model, chain_type='stuff', prompt=prompt)
|
70 |
+
return chain
|
71 |
+
|
72 |
+
def user_input(user_question):
|
73 |
+
logging.info("Processing user input")
|
74 |
|
75 |
+
# Reload the FAISS vector store from the saved directory
|
76 |
+
faiss_index_dir = os.path.join(os.path.dirname(__file__), "faiss_index")
|
77 |
|
78 |
+
if not os.path.exists(faiss_index_dir):
|
79 |
+
st.warning("Please upload and process PDF files before asking questions.")
|
80 |
+
return
|
81 |
+
|
82 |
+
try:
|
83 |
+
# Load the entire FAISS vector store, enabling dangerous deserialization since we trust the source
|
84 |
+
new_db = FAISS.load_local(faiss_index_dir, GoogleGenerativeAIEmbeddings(model='models/embedding-001'), allow_dangerous_deserialization=True)
|
85 |
+
logging.info("FAISS vector store loaded successfully")
|
86 |
|
87 |
+
# Perform similarity search and generate response
|
88 |
+
docs = new_db.similarity_search(user_question)
|
89 |
+
chain = get_conversation_chain()
|
90 |
+
response = chain({"input_documents": docs, "question": user_question}, return_only_outputs=True)
|
91 |
+
st.write(user_template.replace("{{MSG}}", response["output_text"]), unsafe_allow_html=True)
|
92 |
+
except Exception as e:
|
93 |
+
logging.error(f"Error processing user input: {e}")
|
94 |
+
st.write(bot_template.replace("{{MSG}}", f"Sorry, there was an error processing your request: {str(e)}. Please try again later."), unsafe_allow_html=True)
|
95 |
+
|
96 |
+
def main():
|
97 |
+
st.set_page_config(page_title="Chat with multiple PDFs",
|
98 |
+
page_icon=":books:")
|
99 |
+
st.write(css, unsafe_allow_html=True)
|
100 |
+
|
101 |
+
if "conversation" not in st.session_state:
|
102 |
+
st.session_state.conversation = None
|
103 |
+
if "chat_history" not in st.session_state:
|
104 |
+
st.session_state.chat_history = None
|
105 |
+
|
106 |
+
st.header("Chat with multiple PDFs with Gemini Pro :books:")
|
107 |
|
108 |
+
with st.sidebar:
|
109 |
+
pdf_docs = st.file_uploader(
|
110 |
+
"Upload your PDF Files and Click on Process",
|
111 |
+
accept_multiple_files=True
|
112 |
+
)
|
113 |
+
if st.button("Process"):
|
114 |
+
with st.spinner("Processing..."):
|
115 |
+
try:
|
116 |
+
raw_text = get_pdf_text(pdf_docs)
|
117 |
+
text_chunks = get_text_chunks(raw_text)
|
118 |
+
get_vector_store(text_chunks)
|
119 |
+
st.session_state.conversation = get_conversation_chain()
|
120 |
+
st.success("PDFs processed successfully. You can now ask questions.")
|
121 |
+
except Exception as e:
|
122 |
+
logging.error(f"Error processing PDF files: {e}")
|
123 |
+
st.error("There was an error processing the PDF files. Please try again later.")
|
124 |
+
|
125 |
+
user_question = st.text_input("Ask a Question from the PDF Files")
|
126 |
+
if user_question:
|
127 |
+
if not os.path.exists(os.path.join(os.path.dirname(__file__), "faiss_index", "index.faiss")):
|
128 |
+
st.warning("Please upload and process PDF files before asking questions.")
|
129 |
+
else:
|
130 |
+
user_input(user_question)
|
131 |
+
|
132 |
+
if __name__ == "__main__":
|
133 |
+
main()
|
|
|
|
|
|
|
|