Vikrant26 commited on
Commit
1d0ffee
Β·
verified Β·
1 Parent(s): 2706dbb

Upload 3 files

Browse files
Files changed (3) hide show
  1. .env +1 -0
  2. app.py +185 -0
  3. requirements.txt +9 -0
.env ADDED
@@ -0,0 +1 @@
 
 
1
+ GOOGLE_API_KEY="AIzaSyDnI8-gASsDS0_94frGkc-A3eQVgTvIHDk"
app.py ADDED
@@ -0,0 +1,185 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ from PyPDF2 import PdfReader
3
+ import docx2txt
4
+ from langchain.text_splitter import RecursiveCharacterTextSplitter
5
+ from langchain_google_genai import GoogleGenerativeAIEmbeddings, ChatGoogleGenerativeAI
6
+ from langchain_community.vectorstores import FAISS
7
+ from langchain.chains.question_answering import load_qa_chain
8
+ from langchain.prompts import PromptTemplate
9
+ from dotenv import load_dotenv
10
+ import os
11
+ import google.generativeai as genai
12
+ import logging
13
+ import json
14
+ import base64
15
+ from datetime import datetime
16
+ import sqlite3
17
+
18
+ load_dotenv()
19
+
20
+ # Configure logging
21
+ logging.basicConfig(level=logging.DEBUG)
22
+
23
+ # Configure Generative AI API key
24
+ api_key = os.getenv("GOOGLE_API_KEY")
25
+ if not api_key:
26
+ logging.error("Google API key not found. Make sure .env file is set up correctly.")
27
+ genai.configure(api_key=api_key)
28
+
29
+ # Initialize a global list to store query history
30
+ query_history = []
31
+
32
+ # Connect to the SQLite database
33
+ conn = sqlite3.connect('documents.db')
34
+ c = conn.cursor()
35
+
36
+ # Create the documents table if it doesn't exist
37
+ c.execute('''CREATE TABLE IF NOT EXISTS documents
38
+ (id INTEGER PRIMARY KEY, document_type TEXT, document_content TEXT)''')
39
+
40
+ # Create the query_history table if it doesn't exist
41
+ c.execute('''CREATE TABLE IF NOT EXISTS query_history
42
+ (id INTEGER PRIMARY KEY, user_id TEXT, query TEXT, response TEXT, timestamp TEXT)''')
43
+
44
+ conn.commit()
45
+
46
+ def get_document_text(document, document_type):
47
+ """Extract text from different document types."""
48
+ if document_type == 'pdf':
49
+ pdf_reader = PdfReader(document)
50
+ text = ""
51
+ for page in pdf_reader.pages:
52
+ text += page.extract_text()
53
+ return text
54
+ elif document_type == 'docx':
55
+ return docx2txt.process(document)
56
+ elif document_type == 'txt':
57
+ return document.read()
58
+ else:
59
+ return ""
60
+
61
+ def get_text_chunks(text):
62
+ """Split text into manageable chunks."""
63
+ text_splitter = RecursiveCharacterTextSplitter(chunk_size=10000, chunk_overlap=1000)
64
+ chunks = text_splitter.split_text(text)
65
+ return chunks
66
+
67
+ def get_vector_store(text_chunks):
68
+ """Generate embeddings and create FAISS index."""
69
+ embeddings = GoogleGenerativeAIEmbeddings(model="models/embedding-001")
70
+ vector_store = FAISS.from_texts(text_chunks, embedding=embeddings)
71
+ vector_store.save_local("faiss_index")
72
+ logging.info("FAISS index successfully created and saved.")
73
+
74
+ def get_conversational_chain():
75
+ """Load conversational chain for question answering."""
76
+ prompt_template = """
77
+ Answer the question as detailed as possible from the provided context,
78
+ make sure to provide all the details, if the answer is not in
79
+ provided context just say, "answer is not available in the context",
80
+ don't provide the wrong answer\n\n
81
+ Context:\n {context}?\n
82
+ Question: \n{question}\n
83
+
84
+ Answer:
85
+ """
86
+
87
+ model = ChatGoogleGenerativeAI(model="gemini-pro", temperature=0.3)
88
+ prompt = PromptTemplate(template=prompt_template, input_variables=["context", "question"])
89
+ chain = load_qa_chain(model, chain_type="stuff", prompt=prompt)
90
+ return chain
91
+
92
+ def user_input(user_question, user_id):
93
+ """Process user input and generate response."""
94
+ embeddings = GoogleGenerativeAIEmbeddings(model="models/embedding-001")
95
+
96
+ # Check if the FAISS index file exists before attempting to load it
97
+ if not os.path.exists("faiss_index/index.faiss"):
98
+ logging.error("FAISS index file not found. Ensure that the index is created and saved properly.")
99
+ return "Error: FAISS index file not found."
100
+
101
+ # Load FAISS index with the necessary flag
102
+ new_db = FAISS.load_local("faiss_index", embeddings, allow_dangerous_deserialization=True)
103
+ docs = new_db.similarity_search(user_question)
104
+
105
+ # Load conversational chain
106
+ chain = get_conversational_chain()
107
+
108
+ # Generate response
109
+ response = chain({"input_documents": docs, "question": user_question}, return_only_outputs=True)
110
+ response_text = response["output_text"]
111
+
112
+ # Store query and response in the history
113
+ current_time = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
114
+ query_history.append((user_id, user_question, response_text, current_time))
115
+
116
+ # Store query and response in the database
117
+ c.execute("INSERT INTO query_history (user_id, query, response, timestamp) VALUES (?, ?, ?, ?)",
118
+ (user_id, user_question, response_text, current_time))
119
+ conn.commit()
120
+
121
+ return response_text
122
+
123
+ def display_query_history(user_id):
124
+ """Display the history of queries and responses for a specific user."""
125
+ st.sidebar.subheader("Query History")
126
+ c.execute("SELECT query, response, timestamp FROM query_history WHERE user_id = ?", (user_id,))
127
+ history = c.fetchall()
128
+ for query, response, timestamp in history:
129
+ st.sidebar.write(f"**Query:** {query}")
130
+ st.sidebar.write(f"**Response:** {response}")
131
+ st.sidebar.write(f"**Timestamp:** {timestamp}")
132
+ st.sidebar.write("---")
133
+
134
+ def download_query_history(user_id):
135
+ """Allow users to download their query history as a JSON file."""
136
+ c.execute("SELECT query, response, timestamp FROM query_history WHERE user_id = ?", (user_id,))
137
+ history = c.fetchall()
138
+ history_json = json.dumps([{"query": query, "response": response, "timestamp": timestamp} for query, response, timestamp in history], indent=4)
139
+ b64 = base64.b64encode(history_json.encode()).decode() # Encode the history as base64
140
+ href = f'<a href="data:file/json;base64,{b64}" download="query_history.json">Download Query History</a>'
141
+ st.sidebar.markdown(href, unsafe_allow_html=True)
142
+
143
+ def main():
144
+ """Main Streamlit application function."""
145
+ st.set_page_config("Chat with Documents")
146
+ st.header("πŸ“„πŸ“„ Chat with Documents πŸ“„πŸ“„")
147
+
148
+ user_id = st.text_input("Enter your user ID:")
149
+
150
+ user_question = st.text_input("Ask a Question from the Documents")
151
+
152
+ if user_question and user_id:
153
+ response = user_input(user_question, user_id)
154
+ st.write("Reply: ", response)
155
+
156
+ with st.sidebar:
157
+ st.title("Menu:")
158
+ document_type = st.selectbox("Select Document Type", ["pdf", "docx", "txt"])
159
+ document = st.file_uploader(f"Upload your {document_type.upper()} Documents", accept_multiple_files=True)
160
+ if st.button("Submit & Process"):
161
+ with st.spinner("Processing..."):
162
+ try:
163
+ if document:
164
+ for doc in document:
165
+ doc_text = get_document_text(doc, document_type)
166
+ text_chunks = get_text_chunks(doc_text)
167
+ get_vector_store(text_chunks)
168
+ c.execute("INSERT INTO documents (document_type, document_content) VALUES (?, ?)",
169
+ (document_type, doc_text))
170
+ conn.commit()
171
+ st.success("Documents processed and stored in the database.")
172
+ else:
173
+ st.error("Please upload documents before processing.")
174
+ except Exception as e:
175
+ logging.error("Error processing documents: %s", e)
176
+ st.error(f"An error occurred: {e}")
177
+
178
+ # Display the query history in the sidebar
179
+ display_query_history(user_id)
180
+
181
+ # Add download button for query history
182
+ download_query_history(user_id)
183
+
184
+ if __name__ == "__main__":
185
+ main()
requirements.txt ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ streamlit==1.22.0
2
+ google-generativeai==0.7.2
3
+ python-dotenv==1.0.0
4
+ langchain==0.2.6
5
+ PyPDF2==3.0.1
6
+ chromadb==0.5.3
7
+ faiss-cpu==1.7.2
8
+ langchain_google_genai==1.0.7
9
+ langchain_community==0.2.6