Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,13 +1,57 @@
|
|
|
|
|
|
|
|
1 |
import gradio as gr
|
|
|
2 |
from duckduckgo_search import DDGS
|
3 |
-
from typing import List
|
4 |
-
import
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
5 |
import logging
|
|
|
|
|
6 |
|
7 |
-
logging
|
|
|
8 |
|
9 |
# Environment variables and configurations
|
10 |
huggingface_token = os.environ.get("HUGGINGFACE_TOKEN")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
11 |
|
12 |
def load_document(file: NamedTemporaryFile, parser: str = "llamaparse") -> List[Document]:
|
13 |
"""Loads and splits the document into pages."""
|
@@ -141,30 +185,349 @@ def delete_documents(selected_docs):
|
|
141 |
|
142 |
return f"Deleted documents: {', '.join(deleted_docs)}", display_documents()
|
143 |
|
144 |
-
def
|
145 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
146 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
147 |
embed = get_embeddings()
|
148 |
if os.path.exists("faiss_database"):
|
149 |
logging.info("Loading FAISS database")
|
150 |
database = FAISS.load_local("faiss_database", embed, allow_dangerous_deserialization=True)
|
151 |
else:
|
152 |
logging.warning("No FAISS database found")
|
153 |
-
|
|
|
154 |
|
155 |
# Pre-filter the documents
|
156 |
-
filtered_docs = [
|
|
|
|
|
|
|
|
|
157 |
logging.info(f"Number of documents after pre-filtering: {len(filtered_docs)}")
|
|
|
158 |
if not filtered_docs:
|
159 |
logging.warning(f"No documents found for the selected sources: {selected_docs}")
|
160 |
-
|
|
|
161 |
|
162 |
# Create a new FAISS index with only the selected documents
|
163 |
filtered_db = FAISS.from_documents(filtered_docs, embed)
|
|
|
164 |
retriever = filtered_db.as_retriever(search_kwargs={"k": 10})
|
165 |
logging.info(f"Retrieving relevant documents for query: {query}")
|
166 |
relevant_docs = retriever.get_relevant_documents(query)
|
167 |
logging.info(f"Number of relevant documents retrieved: {len(relevant_docs)}")
|
|
|
168 |
for doc in relevant_docs:
|
169 |
logging.info(f"Document source: {doc.metadata['source']}")
|
170 |
logging.info(f"Document content preview: {doc.page_content[:100]}...") # Log first 100 characters of each document
|
@@ -172,159 +535,172 @@ def get_response_from_pdf(query: str, selected_docs: List[str], num_calls: int =
|
|
172 |
context_str = "\n".join([doc.page_content for doc in relevant_docs])
|
173 |
logging.info(f"Total context length: {len(context_str)}")
|
174 |
|
175 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
176 |
{context_str}
|
177 |
Write a detailed and complete response that answers the following user question: '{query}'"""
|
178 |
-
|
179 |
-
response = ""
|
180 |
-
for i in range(num_calls):
|
181 |
-
logging.info(f"API call {i+1}/{num_calls}")
|
182 |
-
response += DDGS().chat(prompt, model="llama-3.1-70b", max_tokens=1024, temperature=temperature)
|
183 |
-
|
184 |
-
logging.info("Finished generating response")
|
185 |
-
return response
|
186 |
-
|
187 |
-
class ConversationManager:
|
188 |
-
def __init__(self):
|
189 |
-
self.history = []
|
190 |
-
self.current_context = None
|
191 |
-
|
192 |
-
def add_interaction(self, query, response):
|
193 |
-
self.history.append((query, response))
|
194 |
-
self.current_context = f"Previous query: {query}\nPrevious response summary: {response[:200]}..."
|
195 |
-
|
196 |
-
def get_context(self):
|
197 |
-
return self.current_context
|
198 |
-
|
199 |
-
def get_web_search_results(query: str, max_results: int = 10) -> List[Dict[str, str]]:
|
200 |
-
try:
|
201 |
-
results = list(DDGS().text(query, max_results=max_results))
|
202 |
-
if not results:
|
203 |
-
print(f"No results found for query: {query}")
|
204 |
-
return results
|
205 |
-
except Exception as e:
|
206 |
-
print(f"An error occurred during web search: {str(e)}")
|
207 |
-
return [{"error": f"An error occurred during web search: {str(e)}"}]
|
208 |
-
|
209 |
-
def rephrase_query(original_query: str, conversation_manager: ConversationManager) -> str:
|
210 |
-
context = conversation_manager.get_context()
|
211 |
-
if context:
|
212 |
-
prompt = f"""You are a highly intelligent conversational chatbot. Your task is to analyze the given context and new query, then decide whether to rephrase the query with or without incorporating the context. Follow these steps:
|
213 |
-
|
214 |
-
1. Determine if the new query is a continuation of the previous conversation or an entirely new topic.
|
215 |
-
2. If it's a continuation, rephrase the query by incorporating relevant information from the context to make it more specific and contextual.
|
216 |
-
3. If it's a new topic, rephrase the query to make it more appropriate for a web search, focusing on clarity and accuracy without using the previous context.
|
217 |
-
4. Provide ONLY the rephrased query without any additional explanation or reasoning.
|
218 |
|
219 |
-
|
220 |
|
221 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
222 |
|
223 |
-
|
224 |
-
response = DDGS().chat(prompt, model="llama-3.1-70b")
|
225 |
-
# Extract only the rephrased query, removing any explanations
|
226 |
-
rephrased_query = response.split('\n')[0].strip()
|
227 |
-
return rephrased_query
|
228 |
-
return original_query
|
229 |
-
|
230 |
-
def summarize_results(query: str, search_results: List[Dict[str, str]], conversation_manager: ConversationManager) -> str:
|
231 |
-
try:
|
232 |
-
context = conversation_manager.get_context()
|
233 |
-
search_context = "\n\n".join([f"Title: {result['title']}\nContent: {result['body']}" for result in search_results])
|
234 |
-
|
235 |
-
prompt = f"""You are a highly intelligent & expert analyst and your job is to skillfully articulate the web search results about '{query}' and considering the context: {context},
|
236 |
-
You have to create a comprehensive news summary FOCUSING on the context provided to you.
|
237 |
-
Include key facts, relevant statistics, and expert opinions if available.
|
238 |
-
Ensure the article is well-structured with an introduction, main body, and conclusion, IF NECESSARY.
|
239 |
-
Address the query in the context of the ongoing conversation IF APPLICABLE.
|
240 |
-
Cite sources directly within the generated text and not at the end of the generated text, integrating URLs where appropriate to support the information provided:
|
241 |
|
242 |
-
|
243 |
-
|
244 |
-
|
245 |
-
|
246 |
-
|
247 |
-
return summary
|
248 |
-
except Exception as e:
|
249 |
-
return f"An error occurred during summarization: {str(e)}"
|
250 |
|
251 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
252 |
|
253 |
-
|
254 |
-
final_summary = ""
|
255 |
-
original_query = message
|
256 |
-
rephrased_query = rephrase_query(message, conversation_manager)
|
257 |
|
258 |
-
|
259 |
-
|
|
|
|
|
|
|
|
|
260 |
|
261 |
-
|
262 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
263 |
|
264 |
-
|
265 |
-
|
266 |
-
elif "error" in search_results[0]:
|
267 |
-
final_summary += search_results[0]["error"] + "\n\n"
|
268 |
-
else:
|
269 |
-
summary = summarize_results(rephrased_query, search_results, conversation_manager)
|
270 |
-
final_summary += summary + "\n\n"
|
271 |
|
272 |
-
|
273 |
-
conversation_manager.add_interaction(original_query, final_summary)
|
274 |
-
return final_summary
|
275 |
-
else:
|
276 |
-
return "Unable to generate a response. Please try a different query."
|
277 |
|
278 |
-
|
279 |
-
css = """
|
280 |
-
Your custom CSS here
|
281 |
-
"""
|
282 |
-
|
283 |
-
custom_placeholder = "Ask me anything about web content"
|
284 |
-
|
285 |
-
theme = gr.themes.Soft(
|
286 |
-
primary_hue="orange",
|
287 |
-
secondary_hue="amber",
|
288 |
-
neutral_hue="gray",
|
289 |
-
font=[gr.themes.GoogleFont("Exo"), "ui-sans-serif", "system-ui", "sans-serif"]
|
290 |
-
).set(
|
291 |
-
body_background_fill_dark="#0c0505",
|
292 |
-
block_background_fill_dark="#0c0505",
|
293 |
-
block_border_width="1px",
|
294 |
-
block_title_background_fill_dark="#1b0f0f",
|
295 |
-
input_background_fill_dark="#140b0b",
|
296 |
-
button_secondary_background_fill_dark="#140b0b",
|
297 |
-
border_color_accent_dark="#1b0f0f",
|
298 |
-
border_color_primary_dark="#1b0f0f",
|
299 |
-
background_fill_secondary_dark="#0c0505",
|
300 |
-
color_accent_soft_dark="transparent",
|
301 |
-
code_background_fill_dark="#140b0b"
|
302 |
-
)
|
303 |
|
304 |
demo = gr.ChatInterface(
|
305 |
respond,
|
306 |
additional_inputs=[
|
|
|
307 |
gr.Slider(minimum=0.1, maximum=1.0, value=0.2, step=0.1, label="Temperature"),
|
308 |
-
gr.Slider(minimum=1, maximum=5, value=1, step=1, label="Number of API Calls")
|
|
|
|
|
309 |
],
|
310 |
title="AI-powered Web Search and PDF Chat Assistant",
|
311 |
-
description="
|
312 |
-
theme=
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
313 |
css=css,
|
314 |
examples=[
|
315 |
-
["
|
316 |
-
["
|
317 |
-
["
|
318 |
],
|
319 |
cache_examples=False,
|
320 |
analytics_enabled=False,
|
321 |
textbox=gr.Textbox(placeholder=custom_placeholder, container=False, scale=7),
|
322 |
-
chatbot=gr.Chatbot(
|
323 |
-
|
324 |
-
|
325 |
-
|
326 |
-
|
327 |
-
)
|
328 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
329 |
|
330 |
-
|
|
|
|
1 |
+
import os
|
2 |
+
import json
|
3 |
+
import re
|
4 |
import gradio as gr
|
5 |
+
import requests
|
6 |
from duckduckgo_search import DDGS
|
7 |
+
from typing import List
|
8 |
+
from pydantic import BaseModel, Field
|
9 |
+
from tempfile import NamedTemporaryFile
|
10 |
+
from langchain_community.vectorstores import FAISS
|
11 |
+
from langchain_core.vectorstores import VectorStore
|
12 |
+
from langchain_core.documents import Document
|
13 |
+
from langchain_community.document_loaders import PyPDFLoader
|
14 |
+
from langchain_community.embeddings import HuggingFaceEmbeddings
|
15 |
+
from llama_parse import LlamaParse
|
16 |
+
from langchain_core.documents import Document
|
17 |
+
from huggingface_hub import InferenceClient
|
18 |
+
import inspect
|
19 |
import logging
|
20 |
+
import shutil
|
21 |
+
|
22 |
|
23 |
+
# Set up basic configuration for logging
|
24 |
+
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
|
25 |
|
26 |
# Environment variables and configurations
|
27 |
huggingface_token = os.environ.get("HUGGINGFACE_TOKEN")
|
28 |
+
llama_cloud_api_key = os.environ.get("LLAMA_CLOUD_API_KEY")
|
29 |
+
ACCOUNT_ID = os.environ.get("CLOUDFARE_ACCOUNT_ID")
|
30 |
+
API_TOKEN = os.environ.get("CLOUDFLARE_AUTH_TOKEN")
|
31 |
+
API_BASE_URL = "https://api.cloudflare.com/client/v4/accounts/a17f03e0f049ccae0c15cdcf3b9737ce/ai/run/"
|
32 |
+
|
33 |
+
print(f"ACCOUNT_ID: {ACCOUNT_ID}")
|
34 |
+
print(f"CLOUDFLARE_AUTH_TOKEN: {API_TOKEN[:5]}..." if API_TOKEN else "Not set")
|
35 |
+
|
36 |
+
MODELS = [
|
37 |
+
"mistralai/Mistral-7B-Instruct-v0.3",
|
38 |
+
"mistralai/Mixtral-8x7B-Instruct-v0.1",
|
39 |
+
"@cf/meta/llama-3.1-8b-instruct",
|
40 |
+
"mistralai/Mistral-Nemo-Instruct-2407",
|
41 |
+
"gpt-4o-mini",
|
42 |
+
"claude-3-haiku",
|
43 |
+
"llama-3.1-70b",
|
44 |
+
"mixtral-8x7b"
|
45 |
+
]
|
46 |
+
|
47 |
+
# Initialize LlamaParse
|
48 |
+
llama_parser = LlamaParse(
|
49 |
+
api_key=llama_cloud_api_key,
|
50 |
+
result_type="markdown",
|
51 |
+
num_workers=4,
|
52 |
+
verbose=True,
|
53 |
+
language="en",
|
54 |
+
)
|
55 |
|
56 |
def load_document(file: NamedTemporaryFile, parser: str = "llamaparse") -> List[Document]:
|
57 |
"""Loads and splits the document into pages."""
|
|
|
185 |
|
186 |
return f"Deleted documents: {', '.join(deleted_docs)}", display_documents()
|
187 |
|
188 |
+
def generate_chunked_response(prompt, model, max_tokens=10000, num_calls=3, temperature=0.2, should_stop=False):
|
189 |
+
print(f"Starting generate_chunked_response with {num_calls} calls")
|
190 |
+
full_response = ""
|
191 |
+
messages = [{"role": "user", "content": prompt}]
|
192 |
+
|
193 |
+
if model == "@cf/meta/llama-3.1-8b-instruct":
|
194 |
+
# Cloudflare API
|
195 |
+
for i in range(num_calls):
|
196 |
+
print(f"Starting Cloudflare API call {i+1}")
|
197 |
+
if should_stop:
|
198 |
+
print("Stop clicked, breaking loop")
|
199 |
+
break
|
200 |
+
try:
|
201 |
+
response = requests.post(
|
202 |
+
f"https://api.cloudflare.com/client/v4/accounts/{ACCOUNT_ID}/ai/run/@cf/meta/llama-3.1-8b-instruct",
|
203 |
+
headers={"Authorization": f"Bearer {API_TOKEN}"},
|
204 |
+
json={
|
205 |
+
"stream": true,
|
206 |
+
"messages": [
|
207 |
+
{"role": "system", "content": "You are a friendly assistant"},
|
208 |
+
{"role": "user", "content": prompt}
|
209 |
+
],
|
210 |
+
"max_tokens": max_tokens,
|
211 |
+
"temperature": temperature
|
212 |
+
},
|
213 |
+
stream=true
|
214 |
+
)
|
215 |
+
|
216 |
+
for line in response.iter_lines():
|
217 |
+
if should_stop:
|
218 |
+
print("Stop clicked during streaming, breaking")
|
219 |
+
break
|
220 |
+
if line:
|
221 |
+
try:
|
222 |
+
json_data = json.loads(line.decode('utf-8').split('data: ')[1])
|
223 |
+
chunk = json_data['response']
|
224 |
+
full_response += chunk
|
225 |
+
except json.JSONDecodeError:
|
226 |
+
continue
|
227 |
+
print(f"Cloudflare API call {i+1} completed")
|
228 |
+
except Exception as e:
|
229 |
+
print(f"Error in generating response from Cloudflare: {str(e)}")
|
230 |
+
else:
|
231 |
+
# Original Hugging Face API logic
|
232 |
+
client = InferenceClient(model, token=huggingface_token)
|
233 |
+
|
234 |
+
for i in range(num_calls):
|
235 |
+
print(f"Starting Hugging Face API call {i+1}")
|
236 |
+
if should_stop:
|
237 |
+
print("Stop clicked, breaking loop")
|
238 |
+
break
|
239 |
+
try:
|
240 |
+
for message in client.chat_completion(
|
241 |
+
messages=messages,
|
242 |
+
max_tokens=max_tokens,
|
243 |
+
temperature=temperature,
|
244 |
+
stream=True,
|
245 |
+
):
|
246 |
+
if should_stop:
|
247 |
+
print("Stop clicked during streaming, breaking")
|
248 |
+
break
|
249 |
+
if message.choices and message.choices[0].delta and message.choices[0].delta.content:
|
250 |
+
chunk = message.choices[0].delta.content
|
251 |
+
full_response += chunk
|
252 |
+
print(f"Hugging Face API call {i+1} completed")
|
253 |
+
except Exception as e:
|
254 |
+
print(f"Error in generating response from Hugging Face: {str(e)}")
|
255 |
+
|
256 |
+
# Clean up the response
|
257 |
+
clean_response = re.sub(r'<s>\[INST\].*?\[/INST\]\s*', '', full_response, flags=re.DOTALL)
|
258 |
+
clean_response = clean_response.replace("Using the following context:", "").strip()
|
259 |
+
clean_response = clean_response.replace("Using the following context from the PDF documents:", "").strip()
|
260 |
+
|
261 |
+
# Remove duplicate paragraphs and sentences
|
262 |
+
paragraphs = clean_response.split('\n\n')
|
263 |
+
unique_paragraphs = []
|
264 |
+
for paragraph in paragraphs:
|
265 |
+
if paragraph not in unique_paragraphs:
|
266 |
+
sentences = paragraph.split('. ')
|
267 |
+
unique_sentences = []
|
268 |
+
for sentence in sentences:
|
269 |
+
if sentence not in unique_sentences:
|
270 |
+
unique_sentences.append(sentence)
|
271 |
+
unique_paragraphs.append('. '.join(unique_sentences))
|
272 |
+
|
273 |
+
final_response = '\n\n'.join(unique_paragraphs)
|
274 |
+
|
275 |
+
print(f"Final clean response: {final_response[:100]}...")
|
276 |
+
return final_response
|
277 |
+
|
278 |
+
def duckduckgo_search(query):
|
279 |
+
with DDGS() as ddgs:
|
280 |
+
results = ddgs.text(query, max_results=5)
|
281 |
+
return results
|
282 |
+
|
283 |
+
def duckduckgo_chat(keywords, model, timeout=30):
|
284 |
+
with DDGS() as ddgs:
|
285 |
+
response = ddgs.chat(keywords, model=model, timeout=timeout)
|
286 |
+
return response
|
287 |
+
|
288 |
+
class CitingSources(BaseModel):
|
289 |
+
sources: List[str] = Field(
|
290 |
+
...,
|
291 |
+
description="List of sources to cite. Should be an URL of the source."
|
292 |
+
)
|
293 |
+
def chatbot_interface(message, history, use_web_search, model, temperature, num_calls):
|
294 |
+
if not message.strip():
|
295 |
+
return "", history
|
296 |
+
|
297 |
+
history = history + [(message, "")]
|
298 |
+
|
299 |
+
try:
|
300 |
+
for response in respond(message, history, model, temperature, num_calls, use_web_search):
|
301 |
+
history[-1] = (message, response)
|
302 |
+
yield history
|
303 |
+
except gr.CancelledError:
|
304 |
+
yield history
|
305 |
+
except Exception as e:
|
306 |
+
logging.error(f"Unexpected error in chatbot_interface: {str(e)}")
|
307 |
+
history[-1] = (message, f"An unexpected error occurred: {str(e)}")
|
308 |
+
yield history
|
309 |
+
|
310 |
+
def retry_last_response(history, use_web_search, model, temperature, num_calls):
|
311 |
+
if not history:
|
312 |
+
return history
|
313 |
+
|
314 |
+
last_user_msg = history[-1][0]
|
315 |
+
history = history[:-1] # Remove the last response
|
316 |
+
|
317 |
+
return chatbot_interface(last_user_msg, history, use_web_search, model, temperature, num_calls)
|
318 |
+
|
319 |
+
def respond(message, history, model, temperature, num_calls, use_web_search, selected_docs):
|
320 |
+
logging.info(f"User Query: {message}")
|
321 |
+
logging.info(f"Model Used: {model}")
|
322 |
+
logging.info(f"Search Type: {'Web Search' if use_web_search else 'PDF Search'}")
|
323 |
|
324 |
+
logging.info(f"Selected Documents: {selected_docs}")
|
325 |
+
|
326 |
+
try:
|
327 |
+
if use_web_search:
|
328 |
+
for main_content, sources in get_response_with_search(message, model, num_calls=num_calls, temperature=temperature):
|
329 |
+
response = f"{main_content}\n\n{sources}"
|
330 |
+
first_line = response.split('\n')[0] if response else ''
|
331 |
+
logging.info(f"Generated Response (first line): {first_line}")
|
332 |
+
yield response
|
333 |
+
else:
|
334 |
+
embed = get_embeddings()
|
335 |
+
if os.path.exists("faiss_database"):
|
336 |
+
database = FAISS.load_local("faiss_database", embed, allow_dangerous_deserialization=True)
|
337 |
+
retriever = database.as_retriever(search_kwargs={"k": 20})
|
338 |
+
|
339 |
+
# Filter relevant documents based on user selection
|
340 |
+
all_relevant_docs = retriever.get_relevant_documents(message)
|
341 |
+
relevant_docs = [doc for doc in all_relevant_docs if doc.metadata["source"] in selected_docs]
|
342 |
+
|
343 |
+
if not relevant_docs:
|
344 |
+
yield "No relevant information found in the selected documents. Please try selecting different documents or rephrasing your query."
|
345 |
+
return
|
346 |
+
|
347 |
+
context_str = "\n".join([doc.page_content for doc in relevant_docs])
|
348 |
+
else:
|
349 |
+
context_str = "No documents available."
|
350 |
+
yield "No documents available. Please upload PDF documents to answer questions."
|
351 |
+
return
|
352 |
+
|
353 |
+
if model == "@cf/meta/llama-3.1-8b-instruct":
|
354 |
+
# Use Cloudflare API
|
355 |
+
for partial_response in get_response_from_cloudflare(prompt="", context=context_str, query=message, num_calls=num_calls, temperature=temperature, search_type="pdf"):
|
356 |
+
first_line = partial_response.split('\n')[0] if partial_response else ''
|
357 |
+
logging.info(f"Generated Response (first line): {first_line}")
|
358 |
+
yield partial_response
|
359 |
+
elif model in ["gpt-4o-mini", "claude-3-haiku", "llama-3.1-70b", "mixtral-8x7b"]:
|
360 |
+
# Use DuckDuckGo Chat API
|
361 |
+
for partial_response in get_response_from_duckduckgo(message, model, num_calls=num_calls, temperature=temperature):
|
362 |
+
first_line = partial_response.split('\n')[0] if partial_response else ''
|
363 |
+
logging.info(f"Generated Response (first line): {first_line}")
|
364 |
+
yield partial_response
|
365 |
+
else:
|
366 |
+
# Use Hugging Face API
|
367 |
+
for partial_response in get_response_from_pdf(message, model, selected_docs, num_calls=num_calls, temperature=temperature):
|
368 |
+
first_line = partial_response.split('\n')[0] if partial_response else ''
|
369 |
+
logging.info(f"Generated Response (first line): {first_line}")
|
370 |
+
yield partial_response
|
371 |
+
except Exception as e:
|
372 |
+
logging.error(f"Error with {model}: {str(e)}")
|
373 |
+
if "microsoft/Phi-3-mini-4k-instruct" in model:
|
374 |
+
logging.info("Falling back to Mistral model due to Phi-3 error")
|
375 |
+
fallback_model = "mistralai/Mistral-7B-Instruct-v0.3"
|
376 |
+
yield from respond(message, history, fallback_model, temperature, num_calls, use_web_search, selected_docs)
|
377 |
+
else:
|
378 |
+
yield f"An error occurred with the {model} model: {str(e)}. Please try again or select a different model."
|
379 |
+
|
380 |
+
logging.basicConfig(level=logging.DEBUG)
|
381 |
+
|
382 |
+
def get_response_from_duckduckgo(message, model, num_calls=3, temperature=0.2):
|
383 |
+
for i in range(num_calls):
|
384 |
+
try:
|
385 |
+
response = duckduckgo_chat(message, model=model, timeout=30)
|
386 |
+
yield response
|
387 |
+
except Exception as e:
|
388 |
+
logging.error(f"Error in generating response from DuckDuckGo: {str(e)}")
|
389 |
+
yield f"I apologize, but an error occurred: {str(e)}. Please try again later."
|
390 |
+
|
391 |
+
def get_response_from_cloudflare(prompt, context, query, num_calls=3, temperature=0.2, search_type="pdf"):
|
392 |
+
headers = {
|
393 |
+
"Authorization": f"Bearer {API_TOKEN}",
|
394 |
+
"Content-Type": "application/json"
|
395 |
+
}
|
396 |
+
model = "@cf/meta/llama-3.1-8b-instruct"
|
397 |
+
|
398 |
+
if search_type == "pdf":
|
399 |
+
instruction = f"""Using the following context from the PDF documents:
|
400 |
+
{context}
|
401 |
+
Write a detailed and complete response that answers the following user question: '{query}'"""
|
402 |
+
else: # web search
|
403 |
+
instruction = f"""Using the following context:
|
404 |
+
{context}
|
405 |
+
Write a detailed and complete research document that fulfills the following user request: '{query}'
|
406 |
+
After writing the document, please provide a list of sources used in your response."""
|
407 |
+
|
408 |
+
inputs = [
|
409 |
+
{"role": "system", "content": instruction},
|
410 |
+
{"role": "user", "content": query}
|
411 |
+
]
|
412 |
+
|
413 |
+
payload = {
|
414 |
+
"messages": inputs,
|
415 |
+
"stream": True,
|
416 |
+
"temperature": temperature,
|
417 |
+
"max_tokens": 32000
|
418 |
+
}
|
419 |
+
|
420 |
+
full_response = ""
|
421 |
+
for i in range(num_calls):
|
422 |
+
try:
|
423 |
+
with requests.post(f"{API_BASE_URL}{model}", headers=headers, json=payload, stream=True) as response:
|
424 |
+
if response.status_code == 200:
|
425 |
+
for line in response.iter_lines():
|
426 |
+
if line:
|
427 |
+
try:
|
428 |
+
json_response = json.loads(line.decode('utf-8').split('data: ')[1])
|
429 |
+
if 'response' in json_response:
|
430 |
+
chunk = json_response['response']
|
431 |
+
full_response += chunk
|
432 |
+
yield full_response
|
433 |
+
except (json.JSONDecodeError, IndexError) as e:
|
434 |
+
logging.error(f"Error parsing streaming response: {str(e)}")
|
435 |
+
continue
|
436 |
+
else:
|
437 |
+
logging.error(f"HTTP Error: {response.status_code}, Response: {response.text}")
|
438 |
+
yield f"I apologize, but I encountered an HTTP error: {response.status_code}. Please try again later."
|
439 |
+
except Exception as e:
|
440 |
+
logging.error(f"Error in generating response from Cloudflare: {str(e)}")
|
441 |
+
yield f"I apologize, but an error occurred: {str(e)}. Please try again later."
|
442 |
+
|
443 |
+
if not full_response:
|
444 |
+
yield "I apologize, but I couldn't generate a response at this time. Please try again later."
|
445 |
+
|
446 |
+
def create_web_search_vectors(search_results):
|
447 |
+
embed = get_embeddings()
|
448 |
+
|
449 |
+
documents = []
|
450 |
+
for result in search_results:
|
451 |
+
if 'body' in result:
|
452 |
+
content = f"{result['title']}\n{result['body']}\nSource: {result['href']}"
|
453 |
+
documents.append(Document(page_content=content, metadata={"source": result['href']}))
|
454 |
+
|
455 |
+
return FAISS.from_documents(documents, embed)
|
456 |
+
|
457 |
+
def get_response_with_search(query, model, num_calls=3, temperature=0.2):
|
458 |
+
search_results = duckduckgo_search(query)
|
459 |
+
web_search_database = create_web_search_vectors(search_results)
|
460 |
+
|
461 |
+
if not web_search_database:
|
462 |
+
yield "No web search results available. Please try again.", ""
|
463 |
+
return
|
464 |
+
|
465 |
+
retriever = web_search_database.as_retriever(search_kwargs={"k": 5})
|
466 |
+
relevant_docs = retriever.get_relevant_documents(query)
|
467 |
+
|
468 |
+
context = "\n".join([doc.page_content for doc in relevant_docs])
|
469 |
+
|
470 |
+
prompt = f"""Using the following context from web search results:
|
471 |
+
{context}
|
472 |
+
You are an expert AI assistant, write a detailed and complete research document that fulfills the following user request: '{query}'
|
473 |
+
Base your entire response strictly on the information retrieved from trusted sources. Importantly, only include information that is directly supported by the retrieved content.
|
474 |
+
If any part of the information cannot be verified from the given sources, clearly state that it could not be confirmed.
|
475 |
+
After writing the document, please provide a list of sources used in your response."""
|
476 |
+
|
477 |
+
if model == "@cf/meta/llama-3.1-8b-instruct":
|
478 |
+
# Use Cloudflare API
|
479 |
+
for response in get_response_from_cloudflare(prompt="", context=context, query=query, num_calls=num_calls, temperature=temperature, search_type="web"):
|
480 |
+
yield response, "" # Yield streaming response without sources
|
481 |
+
else:
|
482 |
+
# Use Hugging Face API
|
483 |
+
client = InferenceClient(model, token=huggingface_token)
|
484 |
+
|
485 |
+
main_content = ""
|
486 |
+
for i in range(num_calls):
|
487 |
+
for message in client.chat_completion(
|
488 |
+
messages=[{"role": "user", "content": prompt}],
|
489 |
+
max_tokens=10000,
|
490 |
+
temperature=temperature,
|
491 |
+
stream=True,
|
492 |
+
):
|
493 |
+
if message.choices and message.choices[0].delta and message.choices[0].delta.content:
|
494 |
+
chunk = message.choices[0].delta.content
|
495 |
+
main_content += chunk
|
496 |
+
yield main_content, "" # Yield partial main content without sources
|
497 |
+
|
498 |
+
def get_response_from_pdf(query, model, selected_docs, num_calls=3, temperature=0.2):
|
499 |
+
logging.info(f"Entering get_response_from_pdf with query: {query}, model: {model}, selected_docs: {selected_docs}")
|
500 |
+
|
501 |
embed = get_embeddings()
|
502 |
if os.path.exists("faiss_database"):
|
503 |
logging.info("Loading FAISS database")
|
504 |
database = FAISS.load_local("faiss_database", embed, allow_dangerous_deserialization=True)
|
505 |
else:
|
506 |
logging.warning("No FAISS database found")
|
507 |
+
yield "No documents available. Please upload PDF documents to answer questions."
|
508 |
+
return
|
509 |
|
510 |
# Pre-filter the documents
|
511 |
+
filtered_docs = []
|
512 |
+
for doc_id, doc in database.docstore._dict.items():
|
513 |
+
if isinstance(doc, Document) and doc.metadata.get("source") in selected_docs:
|
514 |
+
filtered_docs.append(doc)
|
515 |
+
|
516 |
logging.info(f"Number of documents after pre-filtering: {len(filtered_docs)}")
|
517 |
+
|
518 |
if not filtered_docs:
|
519 |
logging.warning(f"No documents found for the selected sources: {selected_docs}")
|
520 |
+
yield "No relevant information found in the selected documents. Please try selecting different documents or rephrasing your query."
|
521 |
+
return
|
522 |
|
523 |
# Create a new FAISS index with only the selected documents
|
524 |
filtered_db = FAISS.from_documents(filtered_docs, embed)
|
525 |
+
|
526 |
retriever = filtered_db.as_retriever(search_kwargs={"k": 10})
|
527 |
logging.info(f"Retrieving relevant documents for query: {query}")
|
528 |
relevant_docs = retriever.get_relevant_documents(query)
|
529 |
logging.info(f"Number of relevant documents retrieved: {len(relevant_docs)}")
|
530 |
+
|
531 |
for doc in relevant_docs:
|
532 |
logging.info(f"Document source: {doc.metadata['source']}")
|
533 |
logging.info(f"Document content preview: {doc.page_content[:100]}...") # Log first 100 characters of each document
|
|
|
535 |
context_str = "\n".join([doc.page_content for doc in relevant_docs])
|
536 |
logging.info(f"Total context length: {len(context_str)}")
|
537 |
|
538 |
+
if model == "@cf/meta/llama-3.1-8b-instruct":
|
539 |
+
logging.info("Using Cloudflare API")
|
540 |
+
# Use Cloudflare API with the retrieved context
|
541 |
+
for response in get_response_from_cloudflare(prompt="", context=context_str, query=query, num_calls=num_calls, temperature=temperature, search_type="pdf"):
|
542 |
+
yield response
|
543 |
+
else:
|
544 |
+
logging.info("Using Hugging Face API")
|
545 |
+
# Use Hugging Face API
|
546 |
+
prompt = f"""Using the following context from the PDF documents:
|
547 |
{context_str}
|
548 |
Write a detailed and complete response that answers the following user question: '{query}'"""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
549 |
|
550 |
+
client = InferenceClient(model, token=huggingface_token)
|
551 |
|
552 |
+
response = ""
|
553 |
+
for i in range(num_calls):
|
554 |
+
logging.info(f"API call {i+1}/{num_calls}")
|
555 |
+
for message in client.chat_completion(
|
556 |
+
messages=[{"role": "user", "content": prompt}],
|
557 |
+
max_tokens=10000,
|
558 |
+
temperature=temperature,
|
559 |
+
stream=True,
|
560 |
+
):
|
561 |
+
if message.choices and message.choices[0].delta and message.choices[0].delta.content:
|
562 |
+
chunk = message.choices[0].delta.content
|
563 |
+
response += chunk
|
564 |
+
yield response # Yield partial response
|
565 |
|
566 |
+
logging.info("Finished generating response")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
567 |
|
568 |
+
def vote(data: gr.LikeData):
|
569 |
+
if data.liked:
|
570 |
+
print(f"You upvoted this response: {data.value}")
|
571 |
+
else:
|
572 |
+
print(f"You downvoted this response: {data.value}")
|
|
|
|
|
|
|
573 |
|
574 |
+
css = """
|
575 |
+
/* Fine-tune chatbox size */
|
576 |
+
.chatbot-container {
|
577 |
+
height: 600px !important;
|
578 |
+
width: 100% !important;
|
579 |
+
}
|
580 |
+
.chatbot-container > div {
|
581 |
+
height: 100%;
|
582 |
+
width: 100%;
|
583 |
+
}
|
584 |
+
"""
|
585 |
|
586 |
+
uploaded_documents = []
|
|
|
|
|
|
|
587 |
|
588 |
+
def display_documents():
|
589 |
+
return gr.CheckboxGroup(
|
590 |
+
choices=[doc["name"] for doc in uploaded_documents],
|
591 |
+
value=[doc["name"] for doc in uploaded_documents if doc["selected"]],
|
592 |
+
label="Select documents to query or delete"
|
593 |
+
)
|
594 |
|
595 |
+
def initial_conversation():
|
596 |
+
return [
|
597 |
+
(None, "Welcome! I'm your AI assistant for web search and PDF analysis. Here's how you can use me:\n\n"
|
598 |
+
"1. Set the toggle for Web Search and PDF Search from the checkbox in Additional Inputs drop down window\n"
|
599 |
+
"2. Use web search to find information\n"
|
600 |
+
"3. Upload the documents and ask questions about uploaded PDF documents by selecting your respective document\n"
|
601 |
+
"4. For any queries feel free to reach out @[email protected] or discord - shreyas094\n\n"
|
602 |
+
"To get started, upload some PDFs or ask me a question!")
|
603 |
+
]
|
604 |
+
# Add this new function
|
605 |
+
def refresh_documents():
|
606 |
+
global uploaded_documents
|
607 |
+
uploaded_documents = load_documents()
|
608 |
+
return display_documents()
|
609 |
|
610 |
+
# Define the checkbox outside the demo block
|
611 |
+
document_selector = gr.CheckboxGroup(label="Select documents to query")
|
|
|
|
|
|
|
|
|
|
|
612 |
|
613 |
+
use_web_search = gr.Checkbox(label="Use Web Search", value=True)
|
|
|
|
|
|
|
|
|
614 |
|
615 |
+
custom_placeholder = "Ask a question (Note: You can toggle between Web Search and PDF Chat in Additional Inputs below)"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
616 |
|
617 |
demo = gr.ChatInterface(
|
618 |
respond,
|
619 |
additional_inputs=[
|
620 |
+
gr.Dropdown(choices=MODELS, label="Select Model", value=MODELS[3]),
|
621 |
gr.Slider(minimum=0.1, maximum=1.0, value=0.2, step=0.1, label="Temperature"),
|
622 |
+
gr.Slider(minimum=1, maximum=5, value=1, step=1, label="Number of API Calls"),
|
623 |
+
use_web_search,
|
624 |
+
document_selector
|
625 |
],
|
626 |
title="AI-powered Web Search and PDF Chat Assistant",
|
627 |
+
description="Chat with your PDFs or use web search to answer questions. Toggle between Web Search and PDF Chat in Additional Inputs below.",
|
628 |
+
theme=gr.themes.Soft(
|
629 |
+
primary_hue="orange",
|
630 |
+
secondary_hue="amber",
|
631 |
+
neutral_hue="gray",
|
632 |
+
font=[gr.themes.GoogleFont("Exo"), "ui-sans-serif", "system-ui", "sans-serif"]
|
633 |
+
).set(
|
634 |
+
body_background_fill_dark="#0c0505",
|
635 |
+
block_background_fill_dark="#0c0505",
|
636 |
+
block_border_width="1px",
|
637 |
+
block_title_background_fill_dark="#1b0f0f",
|
638 |
+
input_background_fill_dark="#140b0b",
|
639 |
+
button_secondary_background_fill_dark="#140b0b",
|
640 |
+
border_color_accent_dark="#1b0f0f",
|
641 |
+
border_color_primary_dark="#1b0f0f",
|
642 |
+
background_fill_secondary_dark="#0c0505",
|
643 |
+
color_accent_soft_dark="transparent",
|
644 |
+
code_background_fill_dark="#140b0b"
|
645 |
+
),
|
646 |
css=css,
|
647 |
examples=[
|
648 |
+
["Tell me about the contents of the uploaded PDFs."],
|
649 |
+
["What are the main topics discussed in the documents?"],
|
650 |
+
["Can you summarize the key points from the PDFs?"]
|
651 |
],
|
652 |
cache_examples=False,
|
653 |
analytics_enabled=False,
|
654 |
textbox=gr.Textbox(placeholder=custom_placeholder, container=False, scale=7),
|
655 |
+
chatbot = gr.Chatbot(
|
656 |
+
show_copy_button=True,
|
657 |
+
likeable=True,
|
658 |
+
layout="bubble",
|
659 |
+
height=400,
|
660 |
+
value=initial_conversation()
|
661 |
)
|
662 |
+
)
|
663 |
+
|
664 |
+
# Add file upload functionality
|
665 |
+
with demo:
|
666 |
+
gr.Markdown("## Upload and Manage PDF Documents")
|
667 |
+
|
668 |
+
with gr.Row():
|
669 |
+
file_input = gr.Files(label="Upload your PDF documents", file_types=[".pdf"])
|
670 |
+
parser_dropdown = gr.Dropdown(choices=["pypdf", "llamaparse"], label="Select PDF Parser", value="llamaparse")
|
671 |
+
update_button = gr.Button("Upload Document")
|
672 |
+
refresh_button = gr.Button("Refresh Document List")
|
673 |
+
|
674 |
+
update_output = gr.Textbox(label="Update Status")
|
675 |
+
delete_button = gr.Button("Delete Selected Documents")
|
676 |
+
|
677 |
+
# Update both the output text and the document selector
|
678 |
+
update_button.click(update_vectors,
|
679 |
+
inputs=[file_input, parser_dropdown],
|
680 |
+
outputs=[update_output, document_selector])
|
681 |
+
|
682 |
+
# Add the refresh button functionality
|
683 |
+
refresh_button.click(refresh_documents,
|
684 |
+
inputs=[],
|
685 |
+
outputs=[document_selector])
|
686 |
+
|
687 |
+
# Add the delete button functionality
|
688 |
+
delete_button.click(delete_documents,
|
689 |
+
inputs=[document_selector],
|
690 |
+
outputs=[update_output, document_selector])
|
691 |
+
|
692 |
+
gr.Markdown(
|
693 |
+
"""
|
694 |
+
## How to use
|
695 |
+
1. Upload PDF documents using the file input at the top.
|
696 |
+
2. Select the PDF parser (pypdf or llamaparse) and click "Upload Document" to update the vector store.
|
697 |
+
3. Select the documents you want to query using the checkboxes.
|
698 |
+
4. Ask questions in the chat interface.
|
699 |
+
5. Toggle "Use Web Search" to switch between PDF chat and web search.
|
700 |
+
6. Adjust Temperature and Number of API Calls to fine-tune the response generation.
|
701 |
+
7. Use the provided examples or ask your own questions.
|
702 |
+
"""
|
703 |
+
)
|
704 |
|
705 |
+
if __name__ == "__main__":
|
706 |
+
demo.launch(share=True)
|