Spaces:
Sleeping
Sleeping
File size: 12,766 Bytes
9fcd7e5 97c6d6a 5871ec6 97c6d6a 487fdcd 5a02e5f 97c6d6a 9fcd7e5 97c6d6a c8302a1 97c6d6a 5a02e5f 9723c64 a04fb1d 5a02e5f 97c6d6a 618cc2a 9723c64 5a02e5f 9723c64 a04fb1d b9b22f5 5a02e5f 9723c64 a04fb1d b9b22f5 5a02e5f b9b22f5 97c6d6a 5a02e5f 97c6d6a 5a02e5f 532ed96 487fdcd 5a02e5f 532ed96 c3aa982 5a02e5f c3aa982 97c6d6a 5a02e5f e2909e8 5a02e5f e2909e8 ca9bb83 5a02e5f e2909e8 5a02e5f d8d3738 5a02e5f d8d3738 5a02e5f d8d3738 5a02e5f 487fdcd 5a02e5f ca9bb83 5a02e5f 97c6d6a 5a02e5f 97c6d6a 5a02e5f 97c6d6a 5a02e5f 487fdcd 5a02e5f 487fdcd 5a02e5f 487fdcd 5a02e5f ca9bb83 e2e8b23 ca9bb83 834da36 ca9bb83 5a988b9 ca9bb83 5a02e5f ca9bb83 5a02e5f ca9bb83 97c6d6a ca9bb83 5a02e5f 97c6d6a 5a02e5f 97c6d6a ca9bb83 97c6d6a ca9bb83 97c6d6a ca9bb83 97c6d6a 487fdcd |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 |
import os
import json
import re
import gradio as gr
import requests
from duckduckgo_search import DDGS
from typing import List
from pydantic import BaseModel, Field
from tempfile import NamedTemporaryFile
from langchain_community.vectorstores import FAISS
from langchain_community.document_loaders import PyPDFLoader
from langchain_community.embeddings import HuggingFaceEmbeddings
from llama_parse import LlamaParse
from langchain_core.documents import Document
from huggingface_hub import InferenceClient
import inspect
# Environment variables and configurations
huggingface_token = os.environ.get("HUGGINGFACE_TOKEN")
llama_cloud_api_key = os.environ.get("LLAMA_CLOUD_API_KEY")
MODELS = [
"mistralai/Mistral-7B-Instruct-v0.3",
"mistralai/Mixtral-8x7B-Instruct-v0.1",
"microsoft/Phi-3-mini-4k-instruct"
]
# Initialize LlamaParse
llama_parser = LlamaParse(
api_key=llama_cloud_api_key,
result_type="markdown",
num_workers=4,
verbose=True,
language="en",
)
def load_document(file: NamedTemporaryFile, parser: str = "llamaparse") -> List[Document]:
"""Loads and splits the document into pages."""
if parser == "pypdf":
loader = PyPDFLoader(file.name)
return loader.load_and_split()
elif parser == "llamaparse":
try:
documents = llama_parser.load_data(file.name)
return [Document(page_content=doc.text, metadata={"source": file.name}) for doc in documents]
except Exception as e:
print(f"Error using Llama Parse: {str(e)}")
print("Falling back to PyPDF parser")
loader = PyPDFLoader(file.name)
return loader.load_and_split()
else:
raise ValueError("Invalid parser specified. Use 'pypdf' or 'llamaparse'.")
def get_embeddings():
return HuggingFaceEmbeddings(model_name="sentence-transformers/all-mpnet-base-v2")
def update_vectors(files, parser):
if not files:
return "Please upload at least one PDF file."
embed = get_embeddings()
total_chunks = 0
all_data = []
for file in files:
data = load_document(file, parser)
all_data.extend(data)
total_chunks += len(data)
if os.path.exists("faiss_database"):
database = FAISS.load_local("faiss_database", embed, allow_dangerous_deserialization=True)
database.add_documents(all_data)
else:
database = FAISS.from_documents(all_data, embed)
database.save_local("faiss_database")
return f"Vector store updated successfully. Processed {total_chunks} chunks from {len(files)} files using {parser}."
def generate_chunked_response(prompt, model, max_tokens=1000, num_calls=3, temperature=0.2, should_stop=False):
print(f"Starting generate_chunked_response with {num_calls} calls")
client = InferenceClient(model, token=huggingface_token)
full_response = ""
messages = [{"role": "user", "content": prompt}]
for i in range(num_calls):
print(f"Starting API call {i+1}")
if should_stop:
print("Stop clicked, breaking loop")
break
try:
for message in client.chat_completion(
messages=messages,
max_tokens=max_tokens,
temperature=temperature,
stream=True,
):
if should_stop:
print("Stop clicked during streaming, breaking")
break
if message.choices and message.choices[0].delta and message.choices[0].delta.content:
chunk = message.choices[0].delta.content
full_response += chunk
print(f"API call {i+1} completed")
except Exception as e:
print(f"Error in generating response: {str(e)}")
# Clean up the response
clean_response = re.sub(r'<s>\[INST\].*?\[/INST\]\s*', '', full_response, flags=re.DOTALL)
clean_response = clean_response.replace("Using the following context:", "").strip()
clean_response = clean_response.replace("Using the following context from the PDF documents:", "").strip()
# Remove duplicate paragraphs and sentences
paragraphs = clean_response.split('\n\n')
unique_paragraphs = []
for paragraph in paragraphs:
if paragraph not in unique_paragraphs:
sentences = paragraph.split('. ')
unique_sentences = []
for sentence in sentences:
if sentence not in unique_sentences:
unique_sentences.append(sentence)
unique_paragraphs.append('. '.join(unique_sentences))
final_response = '\n\n'.join(unique_paragraphs)
print(f"Final clean response: {final_response[:100]}...")
return final_response
def duckduckgo_search(query):
with DDGS() as ddgs:
results = ddgs.text(query, max_results=5)
return results
class CitingSources(BaseModel):
sources: List[str] = Field(
...,
description="List of sources to cite. Should be an URL of the source."
)
def chatbot_interface(message, history, use_web_search, model, temperature, num_calls):
if not message.strip():
return "", history
history = history + [(message, "")]
try:
for response in respond(message, history, model, temperature, num_calls, use_web_search):
history[-1] = (message, response)
yield history
except gr.CancelledError:
yield history
except Exception as e:
logging.error(f"Unexpected error in chatbot_interface: {str(e)}")
history[-1] = (message, f"An unexpected error occurred: {str(e)}")
yield history
def retry_last_response(history, use_web_search, model, temperature, num_calls):
if not history:
return history
last_user_msg = history[-1][0]
history = history[:-1] # Remove the last response
return chatbot_interface(last_user_msg, history, use_web_search, model, temperature, num_calls)
def respond(message, history, model, temperature, num_calls, use_web_search):
logging.info(f"User Query: {message}")
logging.info(f"Model Used: {model}")
logging.info(f"Search Type: {'Web Search' if use_web_search else 'PDF Search'}")
try:
if use_web_search:
for main_content, sources in get_response_with_search(message, model, num_calls=num_calls, temperature=temperature):
response = f"{main_content}\n\n{sources}"
logging.info(f"Generated Response (first line): {response.split('\n')[0]}")
yield response
else:
for partial_response in get_response_from_pdf(message, model, num_calls=num_calls, temperature=temperature):
logging.info(f"Generated Response (first line): {partial_response.split('\n')[0]}")
yield partial_response
except Exception as e:
logging.error(f"Error with {model}: {str(e)}")
if "microsoft/Phi-3-mini-4k-instruct" in model:
logging.info("Falling back to Mistral model due to Phi-3 error")
fallback_model = "mistralai/Mistral-7B-Instruct-v0.3"
yield from respond(message, history, fallback_model, temperature, num_calls, use_web_search)
else:
yield f"An error occurred with the {model} model: {str(e)}. Please try again or select a different model."
def get_response_with_search(query, model, num_calls=3, temperature=0.2):
search_results = duckduckgo_search(query)
context = "\n".join(f"{result['title']}\n{result['body']}\nSource: {result['href']}\n"
for result in search_results if 'body' in result)
prompt = f"""Using the following context:
{context}
Write a detailed and complete research document that fulfills the following user request: '{query}'
After writing the document, please provide a list of sources used in your response."""
client = InferenceClient(model, token=huggingface_token)
main_content = ""
for i in range(num_calls):
for message in client.chat_completion(
messages=[{"role": "user", "content": prompt}],
max_tokens=1000,
temperature=temperature,
stream=True,
):
if message.choices and message.choices[0].delta and message.choices[0].delta.content:
chunk = message.choices[0].delta.content
main_content += chunk
yield main_content, "" # Yield partial main content without sources
def get_response_from_pdf(query, model, num_calls=3, temperature=0.2):
embed = get_embeddings()
if os.path.exists("faiss_database"):
database = FAISS.load_local("faiss_database", embed, allow_dangerous_deserialization=True)
else:
yield "No documents available. Please upload PDF documents to answer questions."
return
retriever = database.as_retriever()
relevant_docs = retriever.get_relevant_documents(query)
context_str = "\n".join([doc.page_content for doc in relevant_docs])
prompt = f"""Using the following context from the PDF documents:
{context_str}
Write a detailed and complete response that answers the following user question: '{query}'"""
client = InferenceClient(model, token=huggingface_token)
response = ""
for i in range(num_calls):
for message in client.chat_completion(
messages=[{"role": "user", "content": prompt}],
max_tokens=1000,
temperature=temperature,
stream=True,
):
if message.choices and message.choices[0].delta and message.choices[0].delta.content:
chunk = message.choices[0].delta.content
response += chunk
yield response # Yield partial response
def vote(data: gr.LikeData):
if data.liked:
print(f"You upvoted this response: {data.value}")
else:
print(f"You downvoted this response: {data.value}")
css = """
/* Add your custom CSS here */
"""
# Define the checkbox outside the demo block
use_web_search = gr.Checkbox(label="Use Web Search", value=False)
demo = gr.ChatInterface(
respond,
additional_inputs=[
gr.Dropdown(choices=MODELS, label="Select Model", value=MODELS[0]),
gr.Slider(minimum=0.1, maximum=1.0, value=0.2, step=0.1, label="Temperature"),
gr.Slider(minimum=1, maximum=5, value=1, step=1, label="Number of API Calls"),
use_web_search # Add this line to include the checkbox
],
title="AI-powered Web Search and PDF Chat Assistant",
description="Chat with your PDFs or use web search to answer questions.",
theme=gr.themes.Soft(
primary_hue="orange",
secondary_hue="amber",
neutral_hue="gray",
font=[gr.themes.GoogleFont("Exo"), "ui-sans-serif", "system-ui", "sans-serif"]
).set(
body_background_fill_dark="#0c0505",
block_background_fill_dark="#0c0505",
block_border_width="1px",
block_title_background_fill_dark="#1b0f0f",
input_background_fill_dark="#140b0b",
button_secondary_background_fill_dark="#140b0b",
border_color_accent_dark="#1b0f0f",
border_color_primary_dark="#1b0f0f",
background_fill_secondary_dark="#0c0505",
color_accent_soft_dark="transparent",
code_background_fill_dark="#140b0b"
),
css=css,
examples=[
["Tell me about the contents of the uploaded PDFs."],
["What are the main topics discussed in the documents?"],
["Can you summarize the key points from the PDFs?"]
],
cache_examples=False,
analytics_enabled=False,
)
# Add file upload functionality
with demo:
gr.Markdown("## Upload PDF Documents")
with gr.Row():
file_input = gr.Files(label="Upload your PDF documents", file_types=[".pdf"])
parser_dropdown = gr.Dropdown(choices=["pypdf", "llamaparse"], label="Select PDF Parser", value="llamaparse")
update_button = gr.Button("Upload Document")
update_output = gr.Textbox(label="Update Status")
update_button.click(update_vectors, inputs=[file_input, parser_dropdown], outputs=update_output)
gr.Markdown(
"""
## How to use
1. Upload PDF documents using the file input at the top.
2. Select the PDF parser (pypdf or llamaparse) and click "Upload Document" to update the vector store.
3. Ask questions in the chat interface.
4. Toggle "Use Web Search" to switch between PDF chat and web search.
5. Adjust Temperature and Number of API Calls to fine-tune the response generation.
6. Use the provided examples or ask your own questions.
"""
)
if __name__ == "__main__":
demo.launch(share=True) |