Spaces:
Sleeping
Sleeping
import os | |
import json | |
import re | |
import gradio as gr | |
import pandas as pd | |
import requests | |
import random | |
import urllib.parse | |
import spacy | |
from sklearn.metrics.pairwise import cosine_similarity | |
import numpy as np | |
from typing import List, Dict | |
from tempfile import NamedTemporaryFile | |
from bs4 import BeautifulSoup | |
from langchain.prompts import PromptTemplate | |
from langchain.chains import LLMChain | |
from langchain_core.prompts import ChatPromptTemplate | |
from langchain_community.vectorstores import FAISS | |
from langchain_community.document_loaders import PyPDFLoader | |
from langchain_core.output_parsers import StrOutputParser | |
from langchain_community.embeddings import HuggingFaceEmbeddings | |
from langchain_community.llms import HuggingFaceHub | |
from langchain_core.documents import Document | |
from sentence_transformers import SentenceTransformer | |
from llama_parse import LlamaParse | |
from llama_cpp import Llama | |
from llama_cpp_agent.llm_agent import LlamaCppAgent | |
from llama_cpp_agent.messages_formatter import MessagesFormatterType | |
from llama_cpp_agent.providers.llama_cpp_endpoint_provider import LlamaCppEndpointSettings | |
huggingface_token = os.environ.get("HUGGINGFACE_TOKEN") | |
llama_cloud_api_key = os.environ.get("LLAMA_CLOUD_API_KEY") | |
# Load SentenceTransformer model | |
sentence_model = SentenceTransformer('paraphrase-MiniLM-L6-v2') | |
def load_spacy_model(): | |
try: | |
# Try to load the model | |
return spacy.load("en_core_web_sm") | |
except OSError: | |
# If loading fails, download the model | |
os.system("python -m spacy download en_core_web_sm") | |
# Try loading again | |
return spacy.load("en_core_web_sm") | |
# Load spaCy model | |
nlp = load_spacy_model() | |
class EnhancedContextDrivenChatbot: | |
def __init__(self, history_size: int = 10, max_history_chars: int = 5000): | |
self.history = [] | |
self.history_size = history_size | |
self.max_history_chars = max_history_chars | |
self.entity_tracker = {} | |
self.conversation_context = "" | |
self.model = None | |
self.last_instructions = None | |
def add_to_history(self, text: str): | |
self.history.append(text) | |
while len(' '.join(self.history)) > self.max_history_chars or len(self.history) > self.history_size: | |
self.history.pop(0) | |
# Update entity tracker | |
doc = nlp(text) | |
for ent in doc.ents: | |
if ent.label_ not in self.entity_tracker: | |
self.entity_tracker[ent.label_] = set() | |
self.entity_tracker[ent.label_].add(ent.text) | |
# Update conversation context | |
self.conversation_context += f" {text}" | |
self.conversation_context = ' '.join(self.conversation_context.split()[-100:]) # Keep last 100 words | |
def get_context(self): | |
return self.conversation_context | |
def is_follow_up_question(self, question): | |
doc = nlp(question.lower()) | |
follow_up_indicators = set(['it', 'this', 'that', 'these', 'those', 'he', 'she', 'they', 'them']) | |
return any(token.text in follow_up_indicators for token in doc) or question.strip().startswith("What about") | |
def extract_topics(self, text): | |
doc = nlp(text) | |
return [chunk.text for chunk in doc.noun_chunks] | |
def extract_instructions(self, text): | |
instruction_patterns = [ | |
r"(.*?),?\s*(?:please\s+)?(provide\s+(?:me\s+)?a\s+.*?|give\s+(?:me\s+)?a\s+.*?|create\s+a\s+.*?)$", | |
r"(.*?),?\s*(?:please\s+)?(summarize|analyze|explain|describe|elaborate\s+on).*$", | |
r"(.*?),?\s*(?:please\s+)?(in\s+detail|briefly|concisely).*$", | |
] | |
for pattern in instruction_patterns: | |
match = re.match(pattern, text, re.IGNORECASE) | |
if match: | |
return match.group(1).strip(), match.group(2).strip() | |
return text, None | |
def get_most_relevant_context(self, question): | |
if not self.history: | |
return question | |
# Create a combined context from history | |
combined_context = self.get_context() | |
# Get embeddings | |
context_embedding = sentence_model.encode([combined_context])[0] | |
question_embedding = sentence_model.encode([question])[0] | |
# Calculate similarity | |
similarity = cosine_similarity([context_embedding], [question_embedding])[0][0] | |
# If similarity is high, it's likely a follow-up question | |
if similarity > 0.5: # This threshold can be adjusted | |
return f"{combined_context} {question}" | |
# Otherwise, it might be a new topic | |
return question | |
def rephrase_query(self, question, instructions=None): | |
if not self.model: | |
return question # Return original question if no model is available | |
instruction_prompt = f"Instructions: {instructions}\n" if instructions else "" | |
prompt = f""" | |
Given the conversation context, the current question, and any provided instructions, rephrase the question to include relevant context and rephrase it to more search-engine-friendly query: | |
Conversation context: {self.get_context()} | |
Current question: {question} | |
{instruction_prompt} | |
Rephrased question: | |
""" | |
rephrased_question = generate_chunked_response(self.model, prompt) | |
return rephrased_question.strip() | |
def process_question(self, question): | |
core_question, instructions = self.extract_instructions(question) | |
if self.is_follow_up_question(core_question): | |
contextualized_question = self.get_most_relevant_context(core_question) | |
contextualized_question = self.rephrase_query(contextualized_question, instructions) | |
else: | |
contextualized_question = core_question | |
topics = self.extract_topics(contextualized_question) | |
self.add_to_history(question) | |
self.last_instructions = instructions | |
return contextualized_question, topics, self.entity_tracker, instructions | |
# Initialize LlamaParse | |
llama_parser = LlamaParse( | |
api_key=llama_cloud_api_key, | |
result_type="markdown", | |
num_workers=4, | |
verbose=True, | |
language="en", | |
) | |
def load_document(file: NamedTemporaryFile, parser: str = "pypdf") -> List[Document]: | |
"""Loads and splits the document into pages.""" | |
if parser == "pypdf": | |
loader = PyPDFLoader(file.name) | |
return loader.load_and_split() | |
elif parser == "llamaparse": | |
try: | |
documents = llama_parser.load_data(file.name) | |
return [Document(page_content=doc.text, metadata={"source": file.name}) for doc in documents] | |
except Exception as e: | |
print(f"Error using Llama Parse: {str(e)}") | |
print("Falling back to PyPDF parser") | |
loader = PyPDFLoader(file.name) | |
return loader.load_and_split() | |
else: | |
raise ValueError("Invalid parser specified. Use 'pypdf' or 'llamaparse'.") | |
def update_vectors(files, parser): | |
if not files: | |
return "Please upload at least one PDF file." | |
embed = get_embeddings() | |
total_chunks = 0 | |
all_data = [] | |
for file in files: | |
data = load_document(file, parser) | |
all_data.extend(data) | |
total_chunks += len(data) | |
if os.path.exists("faiss_database"): | |
database = FAISS.load_local("faiss_database", embed, allow_dangerous_deserialization=True) | |
database.add_documents(all_data) | |
else: | |
database = FAISS.from_documents(all_data, embed) | |
database.save_local("faiss_database") | |
return f"Vector store updated successfully. Processed {total_chunks} chunks from {len(files)} files using {parser}." | |
def get_embeddings(): | |
return HuggingFaceEmbeddings(model_name="sentence-transformers/all-mpnet-base-v2") | |
def clear_cache(): | |
if os.path.exists("faiss_database"): | |
os.remove("faiss_database") | |
return "Cache cleared successfully." | |
else: | |
return "No cache to clear." | |
def get_model(temperature, top_p, repetition_penalty): | |
return HuggingFaceHub( | |
repo_id="mistralai/Mistral-7B-Instruct-v0.3", | |
model_kwargs={ | |
"temperature": temperature, | |
"top_p": top_p, | |
"repetition_penalty": repetition_penalty, | |
"max_length": 800 | |
}, | |
huggingfacehub_api_token=huggingface_token | |
) | |
MAX_PROMPT_CHARS = 20000 # Adjust based on your model's limitations | |
def chunk_text(text: str, max_chunk_size: int = 800) -> List[str]: | |
chunks = [] | |
current_chunk = "" | |
for sentence in re.split(r'(?<=[.!?])\s+', text): | |
if len(current_chunk) + len(sentence) > max_chunk_size: | |
chunks.append(current_chunk.strip()) | |
current_chunk = sentence | |
else: | |
current_chunk += " " + sentence | |
if current_chunk: | |
chunks.append(current_chunk.strip()) | |
return chunks | |
def get_most_relevant_chunks(question: str, chunks: List[str], top_k: int = 3) -> List[str]: | |
question_embedding = sentence_model.encode([question])[0] | |
chunk_embeddings = sentence_model.encode(chunks) | |
similarities = cosine_similarity([question_embedding], chunk_embeddings)[0] | |
top_indices = np.argsort(similarities)[-top_k:] | |
return [chunks[i] for i in top_indices] | |
def generate_chunked_response(model, prompt, max_tokens=800, max_chunks=5): | |
full_response = "" | |
for i in range(max_chunks): | |
try: | |
chunk = model(prompt + full_response, max_new_tokens=max_tokens) | |
chunk = chunk.strip() | |
if chunk.endswith((".", "!", "?")): | |
full_response += chunk | |
break | |
full_response += chunk | |
except Exception as e: | |
print(f"Error in generate_chunked_response: {e}") | |
break | |
return full_response.strip() | |
def extract_text_from_webpage(html): | |
soup = BeautifulSoup(html, 'html.parser') | |
for script in soup(["script", "style"]): | |
script.extract() | |
text = soup.get_text() | |
lines = (line.strip() for line in text.splitlines()) | |
chunks = (phrase.strip() for line in lines for phrase in line.split(" ")) | |
text = '\n'.join(chunk for chunk in chunks if chunk) | |
return text | |
_useragent_list = [ | |
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36", | |
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36", | |
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Edge/91.0.864.59 Safari/537.36", | |
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Edge/91.0.864.59 Safari/537.36", | |
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Safari/537.36", | |
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Safari/537.36", | |
] | |
def google_search(term, num_results=5, lang="en", timeout=5, safe="active", ssl_verify=None): | |
escaped_term = urllib.parse.quote_plus(term) | |
start = 0 | |
all_results = [] | |
max_chars_per_page = 8000 | |
print(f"Starting Google search for term: '{term}'") | |
with requests.Session() as session: | |
while start < num_results: | |
try: | |
user_agent = random.choice(_useragent_list) | |
headers = { | |
'User-Agent': user_agent | |
} | |
resp = session.get( | |
url="https://www.google.com/search", | |
headers=headers, | |
params={ | |
"q": term, | |
"num": num_results - start, | |
"hl": lang, | |
"start": start, | |
"safe": safe, | |
}, | |
timeout=timeout, | |
verify=ssl_verify, | |
) | |
resp.raise_for_status() | |
print(f"Successfully retrieved search results page (start={start})") | |
except requests.exceptions.RequestException as e: | |
print(f"Error retrieving search results: {e}") | |
break | |
soup = BeautifulSoup(resp.text, "html.parser") | |
result_block = soup.find_all("div", attrs={"class": "g"}) | |
if not result_block: | |
print("No results found on this page") | |
break | |
print(f"Found {len(result_block)} results on this page") | |
for result in result_block: | |
link = result.find("a", href=True) | |
if link: | |
link = link["href"] | |
print(f"Processing link: {link}") | |
try: | |
webpage = session.get(link, headers=headers, timeout=timeout) | |
webpage.raise_for_status() | |
visible_text = extract_text_from_webpage(webpage.text) | |
if len(visible_text) > max_chars_per_page: | |
visible_text = visible_text[:max_chars_per_page] + "..." | |
all_results.append({"link": link, "text": visible_text}) | |
print(f"Successfully extracted text from {link}") | |
except requests.exceptions.RequestException as e: | |
print(f"Error retrieving webpage content: {e}") | |
all_results.append({"link": link, "text": None}) | |
else: | |
print("No link found for this result") | |
all_results.append({"link": None, "text": None}) | |
start += len(result_block) | |
print(f"Search completed. Total results: {len(all_results)}") | |
if not all_results: | |
print("No search results found. Returning a default message.") | |
return [{"link": None, "text": "No information found in the web search results."}] | |
return all_results | |
def estimate_tokens(text): | |
return len(text.split()) | |
def truncate_text(text, max_tokens): | |
words = text.split() | |
if len(words) <= max_tokens: | |
return text | |
return ' '.join(words[:max_tokens]) | |
def rerank_documents(query: str, documents: List[Document], top_k: int = 5) -> List[Document]: | |
query_embedding = sentence_model.encode([query])[0] | |
doc_embeddings = sentence_model.encode([doc.page_content for doc in documents]) | |
similarities = cosine_similarity([query_embedding], doc_embeddings)[0] | |
ranked_indices = similarities.argsort()[::-1][:top_k] | |
return [documents[i] for i in ranked_indices] | |
def prepare_context(query: str, documents: List[Document], max_tokens: int) -> str: | |
reranked_docs = rerank_documents(query, documents) | |
context = "" | |
for doc in reranked_docs: | |
doc_content = f"Source: {doc.metadata.get('source', 'Unknown')}\nContent: {doc.page_content}\n\n" | |
if estimate_tokens(context + doc_content) > max_tokens: | |
break | |
context += doc_content | |
return truncate_text(context, max_tokens) | |
# Initialize LlamaCppAgent | |
def initialize_llama_cpp_agent(): | |
main_model = LlamaCppEndpointSettings( | |
completions_endpoint_url="http://127.0.0.1:8080/completion" | |
) | |
llama_cpp_agent = LlamaCppAgent( | |
main_model, | |
debug_output=False, | |
system_prompt="You are an AI assistant designed to help with RAG tasks.", | |
predefined_messages_formatter_type=MessagesFormatterType.CHATML | |
) | |
return llama_cpp_agent | |
# Modify the ask_question function to use LlamaCppAgent | |
def ask_question(question, temperature, top_p, repetition_penalty, web_search, chatbot, user_instructions): | |
if not question: | |
return "Please enter a question." | |
llama_cpp_agent = initialize_llama_cpp_agent() | |
model = get_model(temperature, top_p, repetition_penalty) | |
# Update the chatbot's model | |
chatbot.model = model | |
embed = get_embeddings() | |
if os.path.exists("faiss_database"): | |
database = FAISS.load_local("faiss_database", embed, allow_dangerous_deserialization=True) | |
else: | |
database = None | |
max_attempts = 3 | |
max_input_tokens = 20000 | |
max_output_tokens = 800 | |
if web_search: | |
contextualized_question, topics, entity_tracker, _ = chatbot.process_question(question) | |
try: | |
search_results = google_search(contextualized_question, num_results=5) | |
except Exception as e: | |
print(f"Error in web search: {e}") | |
return f"I apologize, but I encountered an error while searching for information: {str(e)}" | |
all_answers = [] | |
for attempt in range(max_attempts): | |
try: | |
web_docs = [Document(page_content=result["text"], metadata={"source": result["link"]}) for result in search_results if result["text"]] | |
if not web_docs: | |
return "I'm sorry, but I couldn't find any relevant information from the web search." | |
if database is None: | |
database = FAISS.from_documents(web_docs, embed) | |
else: | |
database.add_documents(web_docs) | |
database.save_local("faiss_database") | |
context_str = prepare_context(contextualized_question, web_docs, max_input_tokens // 2) | |
instruction_prompt = f"User Instructions: {user_instructions}\n" if user_instructions else "" | |
prompt_template = f""" | |
Answer the question based on the following web search results, conversation context, entity information, and user instructions: | |
Web Search Results: | |
{{context}} | |
Conversation Context: {{conv_context}} | |
Current Question: {{question}} | |
Topics: {{topics}} | |
Entity Information: {{entities}} | |
{instruction_prompt} | |
Provide a concise and relevant answer to the question. | |
""" | |
current_conv_context = truncate_text(chatbot.get_context(), max_input_tokens // 4) | |
current_topics = topics[:5] | |
current_entities = {k: list(v)[:3] for k, v in entity_tracker.items()} | |
formatted_prompt = prompt_template.format( | |
context=context_str, | |
conv_context=current_conv_context, | |
question=question, | |
topics=", ".join(current_topics), | |
entities=json.dumps(current_entities) | |
) | |
if estimate_tokens(formatted_prompt) > max_input_tokens: | |
formatted_prompt = truncate_text(formatted_prompt, max_input_tokens) | |
try: | |
# Use LlamaCppAgent for initial response generation | |
initial_response = llama_cpp_agent.get_chat_response(formatted_prompt, temperature=temperature) | |
# Use generate_chunked_response for further refinement if needed | |
full_response = generate_chunked_response(model, initial_response, max_tokens=max_output_tokens) | |
answer = extract_answer(full_response, user_instructions) | |
all_answers.append(answer) | |
break | |
except Exception as e: | |
print(f"Error in response generation: {e}") | |
if attempt == max_attempts - 1: | |
all_answers.append(f"I apologize, but I encountered an error while generating the response. Please try again with a simpler question.") | |
except Exception as e: | |
print(f"Error in ask_question (attempt {attempt + 1}): {e}") | |
if attempt == max_attempts - 1: | |
all_answers.append(f"I apologize, but an unexpected error occurred. Please try again with a different question or check your internet connection.") | |
answer = "\n\n".join(all_answers) | |
sources = set(doc.metadata['source'] for doc in web_docs) | |
sources_section = "\n\nSources:\n" + "\n".join(f"- {source}" for source in sources) | |
answer += sources_section | |
chatbot.add_to_history(answer) | |
return answer | |
else: # PDF document chat | |
for attempt in range(max_attempts): | |
try: | |
if database is None: | |
return "No documents available. Please upload PDF documents to answer questions." | |
retriever = database.as_retriever(search_kwargs={"k": 5}) | |
relevant_docs = retriever.get_relevant_documents(question) | |
context_str = prepare_context(question, relevant_docs, max_input_tokens // 2) | |
instruction_prompt = f"User Instructions: {user_instructions}\n" if user_instructions else "" | |
prompt_template = f""" | |
Answer the question based on the following context from the PDF document: | |
Context: | |
{{context}} | |
Question: {{question}} | |
{instruction_prompt} | |
Provide a summarized and direct answer to the question. | |
""" | |
formatted_prompt = prompt_template.format(context=context_str, question=question) | |
if estimate_tokens(formatted_prompt) > max_input_tokens: | |
formatted_prompt = truncate_text(formatted_prompt, max_input_tokens) | |
try: | |
# Use LlamaCppAgent for initial response generation | |
initial_response = llama_cpp_agent.get_chat_response(formatted_prompt, temperature=temperature) | |
# Use generate_chunked_response for further refinement if needed | |
full_response = generate_chunked_response(model, initial_response, max_tokens=max_output_tokens) | |
answer = extract_answer(full_response, user_instructions) | |
return answer | |
except Exception as e: | |
print(f"Error in response generation: {e}") | |
if attempt == max_attempts - 1: | |
return f"I apologize, but I encountered an error while generating the response. Please try again with a simpler question." | |
except Exception as e: | |
print(f"Error in ask_question (attempt {attempt + 1}): {e}") | |
if attempt == max_attempts - 1: | |
return f"I apologize, but an unexpected error occurred. Please try again with a different question." | |
return "An unexpected error occurred. Please try again later." | |
def extract_answer(full_response, instructions=None): | |
answer_patterns = [ | |
r"Provide a concise and direct answer to the question without mentioning the web search or these instructions:", | |
r"Provide a concise and direct answer to the question:", | |
r"Provide a concise and relevant answer to the question.", | |
r"Answer:", | |
r"Provide a summarized and direct answer to the question.", | |
r"If the context doesn't contain relevant information, state that the information is not available in the document.", | |
r"Provide a summarized and direct answer to the original question without mentioning the web search or these instructions:", | |
r"Do not include any source information in your answer." | |
] | |
for pattern in answer_patterns: | |
match = re.split(pattern, full_response, flags=re.IGNORECASE) | |
if len(match) > 1: | |
full_response = match[-1].strip() | |
break | |
# Remove any remaining instruction-like phrases | |
cleanup_patterns = [ | |
r"without mentioning the web search or these instructions\.", | |
r"Do not include any source information in your answer\.", | |
r"If the context doesn't contain relevant information, state that the information is not available in the document\." | |
] | |
for pattern in cleanup_patterns: | |
full_response = re.sub(pattern, "", full_response, flags=re.IGNORECASE).strip() | |
# Remove the user instructions if present | |
if instructions: | |
instruction_pattern = rf"User Instructions:\s*{re.escape(instructions)}.*?\n" | |
full_response = re.sub(instruction_pattern, "", full_response, flags=re.IGNORECASE | re.DOTALL) | |
return full_response.strip() | |
# Gradio interface | |
with gr.Blocks() as demo: | |
gr.Markdown("# Enhanced PDF Document Chat and Web Search") | |
with gr.Row(): | |
file_input = gr.Files(label="Upload your PDF documents", file_types=[".pdf"]) | |
parser_dropdown = gr.Dropdown(choices=["pypdf", "llamaparse"], label="Select PDF Parser", value="pypdf") | |
update_button = gr.Button("Upload PDF") | |
update_output = gr.Textbox(label="Update Status") | |
update_button.click(update_vectors, inputs=[file_input, parser_dropdown], outputs=update_output) | |
with gr.Row(): | |
with gr.Column(scale=2): | |
chatbot = gr.Chatbot(label="Conversation") | |
question_input = gr.Textbox(label="Ask a question") | |
instructions_input = gr.Textbox(label="Instructions for response (optional)", placeholder="Enter any specific instructions for the response here") | |
submit_button = gr.Button("Submit") | |
with gr.Column(scale=1): | |
temperature_slider = gr.Slider(label="Temperature", minimum=0.0, maximum=1.0, value=0.5, step=0.1) | |
top_p_slider = gr.Slider(label="Top P", minimum=0.0, maximum=1.0, value=0.9, step=0.1) | |
repetition_penalty_slider = gr.Slider(label="Repetition Penalty", minimum=1.0, maximum=2.0, value=1.0, step=0.1) | |
web_search_checkbox = gr.Checkbox(label="Enable Web Search", value=False) | |
enhanced_context_driven_chatbot = EnhancedContextDrivenChatbot() | |
# Update the chat function to use the modified ask_question function | |
def chat(question, history, temperature, top_p, repetition_penalty, web_search, user_instructions): | |
answer = ask_question(question, temperature, top_p, repetition_penalty, web_search, enhanced_context_driven_chatbot, user_instructions) | |
history.append((question, answer)) | |
return "", history | |
submit_button.click(chat, inputs=[question_input, chatbot, temperature_slider, top_p_slider, repetition_penalty_slider, web_search_checkbox, instructions_input], outputs=[question_input, chatbot]) | |
clear_button = gr.Button("Clear Cache") | |
clear_output = gr.Textbox(label="Cache Status") | |
clear_button.click(clear_cache, inputs=[], outputs=clear_output) | |
if __name__ == "__main__": | |
demo.launch() |