Shreyas094 commited on
Commit
8d77966
·
verified ·
1 Parent(s): 75552b8

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +153 -203
app.py CHANGED
@@ -1,26 +1,13 @@
1
- import os
2
- import json
3
  import gradio as gr
4
  from duckduckgo_search import DDGS
5
- from typing import List
6
- from pydantic import BaseModel, Field
7
- from tempfile import NamedTemporaryFile
8
- from langchain_community.vectorstores import FAISS
9
- from langchain_core.documents import Document
10
- from langchain_community.document_loaders import PyPDFLoader
11
- from langchain_community.embeddings import HuggingFaceEmbeddings
12
- from llama_parse import LlamaParse
13
  import logging
14
- import shutil
15
-
16
- # Initialize LlamaParse
17
- llama_parser = LlamaParse(
18
- api_key=os.environ.get("LLAMA_CLOUD_API_KEY"),
19
- result_type="markdown",
20
- num_workers=4,
21
- verbose=True,
22
- language="en",
23
- )
24
 
25
  def load_document(file: NamedTemporaryFile, parser: str = "llamaparse") -> List[Document]:
26
  """Loads and splits the document into pages."""
@@ -42,6 +29,7 @@ def load_document(file: NamedTemporaryFile, parser: str = "llamaparse") -> List[
42
  def get_embeddings():
43
  return HuggingFaceEmbeddings(model_name="avsolatorio/GIST-Embedding-v0")
44
 
 
45
  DOCUMENTS_FILE = "uploaded_documents.json"
46
 
47
  def load_documents():
@@ -54,6 +42,7 @@ def save_documents(documents):
54
  with open(DOCUMENTS_FILE, "w") as f:
55
  json.dump(documents, f)
56
 
 
57
  uploaded_documents = load_documents()
58
 
59
  # Modify the update_vectors function
@@ -152,180 +141,180 @@ def delete_documents(selected_docs):
152
 
153
  return f"Deleted documents: {', '.join(deleted_docs)}", display_documents()
154
 
155
- def refresh_documents():
156
- global uploaded_documents
157
- uploaded_documents = load_documents()
158
- return display_documents()
159
-
160
- def display_documents():
161
- return gr.CheckboxGroup(
162
- choices=[doc["name"] for doc in uploaded_documents],
163
- value=[doc["name"] for doc in uploaded_documents if doc["selected"]],
164
- label="Select documents to query or delete"
165
- )
166
 
167
- def initial_conversation():
168
- return [
169
- (None, "Welcome! I'm your AI assistant for web search and PDF analysis. Here's how you can use me:\n\n"
170
- "1. Set the toggle for Web Search and PDF Search from the checkbox in Additional Inputs drop down window\n"
171
- "2. Use web search to find information\n"
172
- "3. Upload the documents and ask questions about uploaded PDF documents by selecting your respective document\n"
173
- "4. For any queries feel free to reach out @desai.[email protected] or discord - shreyas094\n\n"
174
- "To get started, upload some PDFs or ask me a question!")
175
- ]
176
-
177
- def respond(message, history, use_web_search, selected_docs, model):
178
- logging.info(f"User Query: {message}")
179
- logging.info(f"Search Type: {'Web Search' if use_web_search else 'PDF Search'}")
180
- logging.info(f"Selected Documents: {selected_docs}")
181
- logging.info(f"Model Used: {model}")
182
-
183
- try:
184
- if use_web_search:
185
- for main_content, sources in get_response_with_search(message):
186
- response = f"{main_content}\n\n{sources}"
187
- yield response
188
- else:
189
- embed = get_embeddings()
190
- if os.path.exists("faiss_database"):
191
- database = FAISS.load_local("faiss_database", embed, allow_dangerous_deserialization=True)
192
- retriever = database.as_retriever(search_kwargs={"k": 20})
193
 
194
- # Filter relevant documents based on user selection
195
- all_relevant_docs = retriever.get_relevant_documents(message)
196
- relevant_docs = [doc for doc in all_relevant_docs if doc.metadata["source"] in selected_docs]
197
 
198
- if not relevant_docs:
199
- yield "No relevant information found in the selected documents. Please try selecting different documents or rephrasing your query."
200
- return
201
 
202
- context_str = "\n".join([doc.page_content for doc in relevant_docs])
203
- else:
204
- context_str = "No documents available."
205
- yield "No documents available. Please upload PDF documents to answer questions."
206
- return
207
 
208
- for partial_response in get_response_from_duckduckgo(message, context_str, model):
209
- yield partial_response
210
- except Exception as e:
211
- logging.error(f"Error in responding: {str(e)}")
212
- yield f"An error occurred: {str(e)}. Please try again later."
213
 
214
- def get_response_from_duckduckgo(query, context, model):
215
- with DDGS() as ddgs:
216
- try:
217
- response = ddgs.chat(f"Using the following context:\n{context}\nRespond to the following query: {query}", model=model, timeout=30)
218
- yield response
219
- except Exception as e:
220
- logging.error(f"Error in getting response from Duckduckgo: {str(e)}")
221
- yield f"An error occurred: {str(e)}. Please try again later."
222
 
223
- def chatbot_interface(message, history, use_web_search, model, selected_docs):
224
- if not message.strip():
225
- return "", history
226
 
227
- history = history + [(message, "")]
 
228
 
 
229
  try:
230
- for response in respond(message, history, use_web_search, selected_docs, model):
231
- history[-1] = (message, response)
232
- yield history
233
- except gr.CancelledError:
234
- yield history
235
  except Exception as e:
236
- logging.error(f"Unexpected error in chatbot_interface: {str(e)}")
237
- history[-1] = (message, f"An unexpected error occurred: {str(e)}")
238
- yield history
239
-
240
- def get_response_with_search(query):
241
- search_results = duckduckgo_search(query)
242
- web_search_database = create_web_search_vectors(search_results)
243
-
244
- if not web_search_database:
245
- yield "No web search results available. Please try again.", ""
246
- return
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
247
 
248
- retriever = web_search_database.as_retriever(search_kwargs={"k": 5})
249
- relevant_docs = retriever.get_relevant_documents(query)
 
 
 
 
250
 
251
- context = "\n".join([doc.page_content for doc in relevant_docs])
252
 
253
- for partial_response in get_response_from_duckduckgo(query, context, "gpt-4o-mini"): # Use the default model for web search
254
- yield partial_response, "" # Yield streaming response without sources
255
 
256
- def create_web_search_vectors(search_results):
257
- embed = get_embeddings()
 
 
258
 
259
- documents = []
260
- for result in search_results:
261
- if 'body' in result:
262
- content = f"{result['title']}\n{result['body']}\nSource: {result['href']}"
263
- documents.append(Document(page_content=content, metadata={"source": result['href']}))
264
 
265
- return FAISS.from_documents(documents, embed)
 
 
 
266
 
267
- def refresh_documents():
268
- global uploaded_documents
269
- uploaded_documents = load_documents()
270
- return display_documents()
271
 
272
- DUCKDUCKGO_CHAT_MODELS = [
273
- "gpt-4o-mini",
274
- "claude-3-haiku",
275
- "llama-3.1-70b",
276
- "mixtral-8x7b"
277
- ]
278
 
279
- document_selector = gr.CheckboxGroup(label="Select documents to query")
280
- use_web_search = gr.Checkbox(label="Use Web Search", value=True)
281
- model_selector = gr.Dropdown(choices=DUCKDUCKGO_CHAT_MODELS, label="Select Duckduckgo Chat Model", value="gpt-4o-mini")
 
 
 
 
282
 
283
- custom_placeholder = "Ask a question (Note: You can toggle between Web Search and PDF Chat in Additional Inputs below)"
 
 
 
 
284
 
 
285
  css = """
286
- /* Fine-tune chatbox size */
287
- .chatbot-container {
288
- height: 600px !important;
289
- width: 100% !important;
290
- }
291
- .chatbot-container > div {
292
- height: 100%;
293
- width: 100%;
294
- }
295
  """
296
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
297
  demo = gr.ChatInterface(
298
- chatbot_interface,
299
  additional_inputs=[
300
- use_web_search,
301
- document_selector,
302
- model_selector
303
  ],
304
  title="AI-powered Web Search and PDF Chat Assistant",
305
- description="Chat with your PDFs or use web search to answer questions. Toggle between Web Search and PDF Chat in Additional Inputs below.",
306
- theme=gr.themes.Soft(
307
- primary_hue="orange",
308
- secondary_hue="amber",
309
- neutral_hue="gray",
310
- font=[gr.themes.GoogleFont("Exo"), "ui-sans-serif", "system-ui", "sans-serif"]
311
- ).set(
312
- body_background_fill_dark="#0c0505",
313
- block_background_fill_dark="#0c0505",
314
- block_border_width="1px",
315
- block_title_background_fill_dark="#1b0f0f",
316
- input_background_fill_dark="#140b0b",
317
- button_secondary_background_fill_dark="#140b0b",
318
- border_color_accent_dark="#1b0f0f",
319
- border_color_primary_dark="#1b0f0f",
320
- background_fill_secondary_dark="#0c0505",
321
- color_accent_soft_dark="transparent",
322
- code_background_fill_dark="#140b0b"
323
- ),
324
  css=css,
325
  examples=[
326
- ["Tell me about the contents of the uploaded PDFs."],
327
- ["What are the main topics discussed in the documents?"],
328
- ["Can you summarize the key points from the PDFs?"]
329
  ],
330
  cache_examples=False,
331
  analytics_enabled=False,
@@ -335,46 +324,7 @@ demo = gr.ChatInterface(
335
  likeable=True,
336
  layout="bubble",
337
  height=400,
338
- value=initial_conversation()
339
  )
340
  )
341
 
342
- with demo:
343
- gr.Markdown("## Upload and Manage PDF Documents")
344
-
345
- with gr.Row():
346
- file_input = gr.Files(label="Upload your PDF documents", file_types=[".pdf"])
347
- parser_dropdown = gr.Dropdown(choices=["pypdf", "llamaparse"], label="Select PDF Parser", value="llamaparse")
348
- update_button = gr.Button("Upload Document")
349
- refresh_button = gr.Button("Refresh Document List")
350
-
351
- update_output = gr.Textbox(label="Update Status")
352
- delete_button = gr.Button("Delete Selected Documents")
353
-
354
- update_button.click(update_vectors,
355
- inputs=[file_input, parser_dropdown],
356
- outputs=[update_output, document_selector])
357
-
358
- refresh_button.click(refresh_documents,
359
- inputs=[],
360
- outputs=[document_selector])
361
-
362
- delete_button.click(delete_documents,
363
- inputs=[document_selector],
364
- outputs=[update_output, document_selector])
365
-
366
- gr.Markdown(
367
- """
368
- ## How to use
369
- 1. Upload PDF documents using the file input at the top.
370
- 2. Select the PDF parser (pypdf or llamaparse) and click "Upload Document" to update the vector store.
371
- 3. Select the documents you want to query using the checkboxes.
372
- 4. Select the Duckduckgo Chat Model you want to use.
373
- 5. Ask questions in the chat interface.
374
- 6. Toggle "Use Web Search" to switch between PDF chat and web search.
375
- 7. Use the provided examples or ask your own questions.
376
- """
377
- )
378
-
379
- if __name__ == "__main__":
380
- demo.launch(share=True)
 
 
 
1
  import gradio as gr
2
  from duckduckgo_search import DDGS
3
+ from typing import List, Dict
4
+ import os
 
 
 
 
 
 
5
  import logging
6
+
7
+ logging.basicConfig(level=logging.INFO)
8
+
9
+ # Environment variables and configurations
10
+ huggingface_token = os.environ.get("HUGGINGFACE_TOKEN")
 
 
 
 
 
11
 
12
  def load_document(file: NamedTemporaryFile, parser: str = "llamaparse") -> List[Document]:
13
  """Loads and splits the document into pages."""
 
29
  def get_embeddings():
30
  return HuggingFaceEmbeddings(model_name="avsolatorio/GIST-Embedding-v0")
31
 
32
+ # Add this at the beginning of your script, after imports
33
  DOCUMENTS_FILE = "uploaded_documents.json"
34
 
35
  def load_documents():
 
42
  with open(DOCUMENTS_FILE, "w") as f:
43
  json.dump(documents, f)
44
 
45
+ # Replace the global uploaded_documents with this
46
  uploaded_documents = load_documents()
47
 
48
  # Modify the update_vectors function
 
141
 
142
  return f"Deleted documents: {', '.join(deleted_docs)}", display_documents()
143
 
144
+ def get_response_from_pdf(query: str, selected_docs: List[str], num_calls: int = 3, temperature: float = 0.2) -> str:
145
+ logging.info(f"Entering get_response_from_pdf with query: {query}, selected_docs: {selected_docs}")
 
 
 
 
 
 
 
 
 
146
 
147
+ embed = get_embeddings()
148
+ if os.path.exists("faiss_database"):
149
+ logging.info("Loading FAISS database")
150
+ database = FAISS.load_local("faiss_database", embed, allow_dangerous_deserialization=True)
151
+ else:
152
+ logging.warning("No FAISS database found")
153
+ return "No documents available. Please upload PDF documents to answer questions."
154
+
155
+ # Pre-filter the documents
156
+ filtered_docs = [doc for doc_id, doc in database.docstore._dict.items() if isinstance(doc, Document) and doc.metadata.get("source") in selected_docs]
157
+ logging.info(f"Number of documents after pre-filtering: {len(filtered_docs)}")
158
+ if not filtered_docs:
159
+ logging.warning(f"No documents found for the selected sources: {selected_docs}")
160
+ return "No relevant information found in the selected documents. Please try selecting different documents or rephrasing your query."
161
+
162
+ # Create a new FAISS index with only the selected documents
163
+ filtered_db = FAISS.from_documents(filtered_docs, embed)
164
+ retriever = filtered_db.as_retriever(search_kwargs={"k": 10})
165
+ logging.info(f"Retrieving relevant documents for query: {query}")
166
+ relevant_docs = retriever.get_relevant_documents(query)
167
+ logging.info(f"Number of relevant documents retrieved: {len(relevant_docs)}")
168
+ for doc in relevant_docs:
169
+ logging.info(f"Document source: {doc.metadata['source']}")
170
+ logging.info(f"Document content preview: {doc.page_content[:100]}...") # Log first 100 characters of each document
 
 
171
 
172
+ context_str = "\n".join([doc.page_content for doc in relevant_docs])
173
+ logging.info(f"Total context length: {len(context_str)}")
 
174
 
175
+ prompt = f"""Using the following context from the PDF documents:
176
+ {context_str}
177
+ Write a detailed and complete response that answers the following user question: '{query}'"""
178
 
179
+ response = ""
180
+ for i in range(num_calls):
181
+ logging.info(f"API call {i+1}/{num_calls}")
182
+ response += DDGS().chat(prompt, model="llama-3.1-70b", max_tokens=1024, temperature=temperature)
 
183
 
184
+ logging.info("Finished generating response")
185
+ return response
 
 
 
186
 
187
+ class ConversationManager:
188
+ def __init__(self):
189
+ self.history = []
190
+ self.current_context = None
 
 
 
 
191
 
192
+ def add_interaction(self, query, response):
193
+ self.history.append((query, response))
194
+ self.current_context = f"Previous query: {query}\nPrevious response summary: {response[:200]}..."
195
 
196
+ def get_context(self):
197
+ return self.current_context
198
 
199
+ def get_web_search_results(query: str, max_results: int = 10) -> List[Dict[str, str]]:
200
  try:
201
+ results = list(DDGS().text(query, max_results=max_results))
202
+ if not results:
203
+ print(f"No results found for query: {query}")
204
+ return results
 
205
  except Exception as e:
206
+ print(f"An error occurred during web search: {str(e)}")
207
+ return [{"error": f"An error occurred during web search: {str(e)}"}]
208
+
209
+ def rephrase_query(original_query: str, conversation_manager: ConversationManager) -> str:
210
+ context = conversation_manager.get_context()
211
+ if context:
212
+ prompt = f"""You are a highly intelligent conversational chatbot. Your task is to analyze the given context and new query, then decide whether to rephrase the query with or without incorporating the context. Follow these steps:
213
+
214
+ 1. Determine if the new query is a continuation of the previous conversation or an entirely new topic.
215
+ 2. If it's a continuation, rephrase the query by incorporating relevant information from the context to make it more specific and contextual.
216
+ 3. If it's a new topic, rephrase the query to make it more appropriate for a web search, focusing on clarity and accuracy without using the previous context.
217
+ 4. Provide ONLY the rephrased query without any additional explanation or reasoning.
218
+
219
+ Context: {context}
220
+
221
+ New query: {original_query}
222
+
223
+ Rephrased query:"""
224
+ response = DDGS().chat(prompt, model="llama-3.1-70b")
225
+ # Extract only the rephrased query, removing any explanations
226
+ rephrased_query = response.split('\n')[0].strip()
227
+ return rephrased_query
228
+ return original_query
229
+
230
+ def summarize_results(query: str, search_results: List[Dict[str, str]], conversation_manager: ConversationManager) -> str:
231
+ try:
232
+ context = conversation_manager.get_context()
233
+ search_context = "\n\n".join([f"Title: {result['title']}\nContent: {result['body']}" for result in search_results])
234
 
235
+ prompt = f"""You are a highly intelligent & expert analyst and your job is to skillfully articulate the web search results about '{query}' and considering the context: {context},
236
+ You have to create a comprehensive news summary FOCUSING on the context provided to you.
237
+ Include key facts, relevant statistics, and expert opinions if available.
238
+ Ensure the article is well-structured with an introduction, main body, and conclusion, IF NECESSARY.
239
+ Address the query in the context of the ongoing conversation IF APPLICABLE.
240
+ Cite sources directly within the generated text and not at the end of the generated text, integrating URLs where appropriate to support the information provided:
241
 
242
+ {search_context}
243
 
244
+ Article:"""
 
245
 
246
+ summary = DDGS().chat(prompt, model="llama-3-70b")
247
+ return summary
248
+ except Exception as e:
249
+ return f"An error occurred during summarization: {str(e)}"
250
 
251
+ conversation_manager = ConversationManager()
 
 
 
 
252
 
253
+ def respond(message, chat_history, temperature, num_api_calls):
254
+ final_summary = ""
255
+ original_query = message
256
+ rephrased_query = rephrase_query(message, conversation_manager)
257
 
258
+ logging.info(f"Original query: {original_query}")
259
+ logging.info(f"Rephrased query: {rephrased_query}")
 
 
260
 
261
+ for _ in range(num_api_calls):
262
+ search_results = get_web_search_results(rephrased_query)
 
 
 
 
263
 
264
+ if not search_results:
265
+ final_summary += f"No search results found for the query: {rephrased_query}\n\n"
266
+ elif "error" in search_results[0]:
267
+ final_summary += search_results[0]["error"] + "\n\n"
268
+ else:
269
+ summary = summarize_results(rephrased_query, search_results, conversation_manager)
270
+ final_summary += summary + "\n\n"
271
 
272
+ if final_summary:
273
+ conversation_manager.add_interaction(original_query, final_summary)
274
+ return final_summary
275
+ else:
276
+ return "Unable to generate a response. Please try a different query."
277
 
278
+ # The rest of your code (CSS, theme, and Gradio interface setup) remains the same
279
  css = """
280
+ Your custom CSS here
 
 
 
 
 
 
 
 
281
  """
282
 
283
+ custom_placeholder = "Ask me anything about web content"
284
+
285
+ theme = gr.themes.Soft(
286
+ primary_hue="orange",
287
+ secondary_hue="amber",
288
+ neutral_hue="gray",
289
+ font=[gr.themes.GoogleFont("Exo"), "ui-sans-serif", "system-ui", "sans-serif"]
290
+ ).set(
291
+ body_background_fill_dark="#0c0505",
292
+ block_background_fill_dark="#0c0505",
293
+ block_border_width="1px",
294
+ block_title_background_fill_dark="#1b0f0f",
295
+ input_background_fill_dark="#140b0b",
296
+ button_secondary_background_fill_dark="#140b0b",
297
+ border_color_accent_dark="#1b0f0f",
298
+ border_color_primary_dark="#1b0f0f",
299
+ background_fill_secondary_dark="#0c0505",
300
+ color_accent_soft_dark="transparent",
301
+ code_background_fill_dark="#140b0b"
302
+ )
303
+
304
  demo = gr.ChatInterface(
305
+ respond,
306
  additional_inputs=[
307
+ gr.Slider(minimum=0.1, maximum=1.0, value=0.2, step=0.1, label="Temperature"),
308
+ gr.Slider(minimum=1, maximum=5, value=1, step=1, label="Number of API Calls")
 
309
  ],
310
  title="AI-powered Web Search and PDF Chat Assistant",
311
+ description="This AI-powered Web Search and PDF Chat Assistant combines real-time web search capabilities with advanced language processing.",
312
+ theme=theme,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
313
  css=css,
314
  examples=[
315
+ ["What is AI"],
316
+ ["Any recent news on US Banks"],
317
+ ["Who is Donald Trump"]
318
  ],
319
  cache_examples=False,
320
  analytics_enabled=False,
 
324
  likeable=True,
325
  layout="bubble",
326
  height=400,
 
327
  )
328
  )
329
 
330
+ demo.launch()