Spaces:
Runtime error
Runtime error
from langchain_openai import OpenAIEmbeddings | |
from langchain_openai import AzureOpenAIEmbeddings | |
from langchain_community.vectorstores import FAISS | |
from langchain_community.document_loaders import WebBaseLoader | |
from langchain_text_splitters import RecursiveCharacterTextSplitter | |
from langchain_openai import ChatOpenAI | |
from langchain_core.messages import SystemMessage, HumanMessage, AIMessage | |
from langchain_community.callbacks import get_openai_callback | |
from typing import List, Dict, Any | |
import json | |
import os | |
import re | |
import shutil | |
# Load configuration from JSON file | |
def load_config(): | |
config_path = os.path.join(os.path.dirname(__file__), 'config.json') | |
with open(config_path, 'r', encoding='utf-8') as f: | |
return json.load(f) | |
# Load configuration | |
CONFIG = load_config() | |
CUBIX_DOCS = CONFIG['cubix_docs'] | |
MOB_MAPPINGS = CONFIG['mob_mappings'] | |
COMMAND_SPECS = CONFIG['command_specs'] | |
SYSTEM_PROMPT_TEMPLATE = CONFIG['system_prompt_template'] | |
class RAGSystem: | |
def __init__(self, openai_api_key: str): | |
self.openai_api_key = openai_api_key | |
self.embeddings = OpenAIEmbeddings( | |
openai_api_key=openai_api_key, | |
model="text-embedding-3-large", | |
dimensions=1536, # Explicitly setting dimensions for consistency | |
show_progress_bar=True | |
) | |
self.document_store = None | |
self.user_conversations = {} | |
self.model = ChatOpenAI( | |
openai_api_key=openai_api_key, | |
model_name="gpt-4o-mini", | |
temperature=0.1 | |
) | |
self.documents_loaded = False # Add this line to track document loading | |
self.initialize_knowledge_base() | |
def initialize_knowledge_base(self): | |
"""Initialize or load the knowledge base if it exists.""" | |
if self.documents_loaded: # Check if documents are already loaded | |
print("Documents have already been loaded.") | |
return | |
try: | |
# First check if index exists | |
if not os.path.exists("faiss_index"): | |
print("No existing knowledge base found. Creating new one...") | |
self.create_new_knowledge_base() | |
self.documents_loaded = True # Set flag to True after loading | |
return | |
try: | |
# Try to load the existing index | |
self.document_store = FAISS.load_local( | |
"faiss_index", | |
self.embeddings, | |
allow_dangerous_deserialization=True | |
) | |
# Verify by running a test query | |
self.document_store.similarity_search_with_score("test query", k=1) | |
print("Loaded existing knowledge base.") | |
except AssertionError: | |
# Dimension mismatch detected | |
print("Embedding dimensions mismatch detected. Rebuilding knowledge base...") | |
self.create_new_knowledge_base(backup_old=True) | |
except Exception as e: | |
# Other loading errors | |
print(f"Error loading knowledge base: {e}") | |
self.create_new_knowledge_base(backup_old=True) | |
self.documents_loaded = True # Set flag to True after loading | |
except Exception as e: | |
print(f"Error during knowledge base initialization: {e}") | |
self.create_new_knowledge_base() | |
def create_new_knowledge_base(self, backup_old=False): | |
"""Create a new knowledge base from scratch""" | |
# Backup old index if needed | |
if backup_old and os.path.exists("faiss_index"): | |
backup_dir = f"faiss_index_backup" | |
shutil.move("faiss_index", backup_dir) | |
print(f"Old index backed up to {backup_dir}") | |
# Create directory if needed | |
os.makedirs("faiss_index", exist_ok=True) | |
# Load documents from web | |
docs = [] | |
for url in CUBIX_DOCS: | |
try: | |
loader = WebBaseLoader(url) | |
web_docs = loader.load() | |
# Clean and preprocess documents | |
for doc in web_docs: | |
# Clean HTML artifacts and normalize whitespace | |
content = doc.page_content | |
content = re.sub(r'\s+', ' ', content) # Normalize whitespace | |
content = re.sub(r'<[^>]+>', ' ', content) # Remove HTML tags | |
content = re.sub(r'\[.*?\]', '', content) # Remove square brackets content | |
# Update document content | |
doc.page_content = content.strip() | |
# Add source URL to metadata if not present | |
if 'source' not in doc.metadata: | |
doc.metadata['source'] = url | |
docs.extend(web_docs) | |
print(f"Loaded document from {url}") | |
except Exception as e: | |
print(f"Error loading document from {url}: {e}") | |
if not docs: | |
print("No documents could be loaded.") | |
# Initialize empty vector store | |
self.document_store = FAISS.from_texts(["placeholder"], self.embeddings) | |
return | |
# Split documents | |
text_splitter = RecursiveCharacterTextSplitter( | |
chunk_size=800, # Increased from 500 for better context | |
chunk_overlap=100, # Increased overlap to prevent breaking important context | |
separators=["\n\n", "\n", ". ", " ", ""], # More intelligent splitting | |
length_function=len | |
) | |
split_docs = text_splitter.split_documents(docs) | |
# Create new vector store from scratch | |
self.document_store = FAISS.from_documents(split_docs, self.embeddings) | |
try: | |
# Save the new index | |
self.document_store.save_local("faiss_index") | |
print(f"Successfully saved new knowledge base to faiss_index directory with {len(split_docs)} chunks") | |
except Exception as e: | |
print(f"Error saving knowledge base: {e}") | |
def add_web_documents(self, urls: List[str]): | |
"""Add web documents to the knowledge base""" | |
if not urls: | |
return False | |
# Load documents from web | |
docs = [] | |
for url in urls: | |
try: | |
print(f"Loading document from {url}") | |
loader = WebBaseLoader(url) | |
web_docs = loader.load() | |
# Clean and preprocess documents | |
for doc in web_docs: | |
# Clean HTML artifacts and normalize whitespace | |
content = doc.page_content | |
content = re.sub(r'\s+', ' ', content) # Normalize whitespace | |
content = re.sub(r'<[^>]+>', ' ', content) # Remove HTML tags | |
content = re.sub(r'\[.*?\]', '', content) # Remove square brackets content | |
# Update document content | |
doc.page_content = content.strip() | |
# Add source URL to metadata if not present | |
if 'source' not in doc.metadata: | |
doc.metadata['source'] = url | |
docs.extend(web_docs) | |
print(f"Successfully loaded document from {url}") | |
except Exception as e: | |
print(f"Error loading document from {url}: {e}") | |
if not docs: | |
print("No documents could be loaded.") | |
return False | |
# Split documents | |
text_splitter = RecursiveCharacterTextSplitter( | |
chunk_size=500, # Increased from 150 for better context | |
chunk_overlap=50, # Added overlap to prevent breaking important context | |
separators=["\n\n", "\n", " ", ""], # More intelligent splitting | |
length_function=len | |
) | |
split_docs = text_splitter.split_documents(docs) | |
# Create or update vector store | |
if self.document_store is None: | |
self.document_store = FAISS.from_documents(split_docs, self.embeddings) | |
else: | |
# Add documents to existing store | |
self.document_store.add_documents(split_docs) | |
try: | |
# Save the updated index | |
self.document_store.save_local("faiss_index") | |
print(f"Added {len(split_docs)} document chunks to the knowledge base") | |
return True | |
except Exception as e: | |
print(f"Error saving index: {e}") | |
return False | |
def _filter_response_fields(self, response_dict: dict) -> dict: | |
"""Filter out fields that are not in required_fields for the command type.""" | |
if not isinstance(response_dict, dict) or 'type' not in response_dict: | |
return response_dict | |
command_type = response_dict['type'] | |
if command_type not in COMMAND_SPECS: | |
return response_dict | |
required_fields = COMMAND_SPECS[command_type]['required_fields'] | |
return {k: v for k, v in response_dict.items() if k in required_fields} | |
def generate_response(self, user_id: str, message: str) -> Dict[str, Any]: | |
"""Generate a response for a user message.""" | |
if user_id not in self.user_conversations: | |
self.user_conversations[user_id] = [] | |
# Clean and normalize the input message | |
message = message.strip() | |
# Add the new message using proper LangChain message object | |
self.user_conversations[user_id].append(HumanMessage(content=message)) | |
# Extract relevant context with improved search parameters | |
relevant_context = "" | |
context_docs = [] | |
if self.document_store: | |
try: | |
# Enhanced retrieval strategy - hybrid search approach | |
# First try with higher relevance score threshold for more accurate results | |
results = self.document_store.similarity_search_with_score( | |
message, | |
k=5, # Increased from 4 for better coverage | |
score_threshold=0.6 # Relaxed from 0.75 for better recall with Russian queries | |
) | |
if results: | |
# Sort by score and take top results | |
results.sort(key=lambda x: x[1]) | |
# Format with scores and metadata for better context | |
formatted_results = [] | |
for doc, score in results: | |
if score < 1.5: # Only include relevance context (lower score is better in FAISS) | |
# Extract source for better attribution | |
source = doc.metadata.get('source', 'Unknown source') | |
if isinstance(source, str) and source.startswith('http'): | |
source = source.split('/')[-1] if '/' in source else source | |
# Apply intelligent content trimming - keep key information while reducing token usage | |
content = doc.page_content | |
# Advanced cleaning: remove redundant whitespace and normalize | |
content = re.sub(r'\s+', ' ', content).strip() | |
# Limit content length based on relevance score - higher relevance gets more tokens | |
max_length = int(min(1000, 1500)) # Increased length for better context | |
if len(content) > max_length: | |
content = content[:max_length] + "..." | |
context = { | |
'content': content, | |
'score': score, | |
'source': source, | |
'metadata': doc.metadata if hasattr(doc, 'metadata') else {} | |
} | |
context_docs.append(context) | |
formatted_results.append(f"SOURCE: {source}\nRELEVANCE: {score:.2f}\n{content}") | |
if formatted_results: | |
relevant_context = "\n\n---\n\n".join(formatted_results) | |
# If no high-relevance results found, try broader search | |
if not relevant_context: | |
results = self.document_store.similarity_search( | |
message, | |
k=4 # Increased from 3 for better coverage | |
) | |
if results: | |
formatted_results = [] | |
for doc in results: | |
# Extract source for better attribution | |
source = doc.metadata.get('source', 'Unknown source') | |
if isinstance(source, str) and source.startswith('http'): | |
source = source.split('/')[-1] if '/' in source else source | |
# Truncate content if too long (token optimization) | |
content = doc.page_content | |
if len(content) > 1000: # Increased from 800 for better context | |
content = content[:1000] + "..." | |
formatted_results.append(f"SOURCE: {source}\n{content}") | |
relevant_context = "\n\n---\n\n".join(formatted_results) | |
except Exception as e: | |
print(f"Error during context retrieval: {e}") | |
relevant_context = f"Error retrieving context: {str(e)}" | |
# Build command specifications for the system prompt | |
command_specs_text = "# Available Commands and Required Fields\n\n" | |
for cmd_name, cmd_spec in COMMAND_SPECS.items(): | |
command_specs_text += f"## {cmd_name}\n" | |
command_specs_text += f"Description: {cmd_spec['description']}\n" | |
command_specs_text += "Required fields:\n" | |
for field in cmd_spec['required_fields']: | |
command_specs_text += f"- {field}\n" | |
command_specs_text += "Examples:\n" | |
for example in cmd_spec['examples']: | |
command_specs_text += f"Query: \"{example['input']}\"\n" | |
command_specs_text += f"Response: {json.dumps(example['output'], ensure_ascii=False)}\n\n" | |
# Add mob mappings information | |
mob_mappings_text = "# Entity IDs for Minecraft Mobs\n\nWhen processing kill_mob commands, use these entity IDs in the 'mob' field:\n\n" | |
for mob_name, entity_id in MOB_MAPPINGS.items(): | |
mob_mappings_text += f"- {mob_name}: {entity_id}\n" | |
mob_mappings_text += "\nWhen a player mentions a mob, always map it to the corresponding entity ID in your response.\n\n" | |
# Build system prompt with new structure | |
system_prompt = "" | |
# Add intro | |
if 'intro' in SYSTEM_PROMPT_TEMPLATE: | |
system_prompt += SYSTEM_PROMPT_TEMPLATE['intro'] + "\n\n" | |
# Add all sections from the template in order | |
sections = [ | |
'command_rules', | |
'validation_checklist', | |
'examples', | |
'error_handlers' | |
] | |
for section in sections: | |
if section in SYSTEM_PROMPT_TEMPLATE: | |
for item in SYSTEM_PROMPT_TEMPLATE[section]: | |
system_prompt += item + "\n" | |
system_prompt += "\n" | |
# Add command specs and mob mappings from our generated text | |
system_prompt += command_specs_text + "\n" | |
system_prompt += mob_mappings_text + "\n" | |
# Add relevant context if available | |
if relevant_context: | |
system_prompt += f""" | |
## Relevant Information from CubixWorld Documentation | |
Use this information to guide your response. If the user is asking about game updates, features, or information that appears in this documentation, provide that information in your response: | |
{relevant_context} | |
IMPORTANT: If the user is asking about information contained in the documentation above, you SHOULD provide that information in your response. Do not say you cannot answer questions about game updates or features if the information is available in the documentation. | |
""" | |
# Create messages list with proper LangChain message objects | |
messages = [SystemMessage(content=system_prompt)] | |
# Get the last 5 conversation messages (reduced from 10 for more focused context) | |
history = self.user_conversations[user_id][-5:] if len(self.user_conversations[user_id]) > 0 else [] | |
# Add conversation history to the messages | |
messages.extend(history) | |
# Add context summary to help model | |
if context_docs: | |
# Enhanced context summary with confidence signals | |
context_summary = "\nContext relevance summary (sorted by relevance):\n" | |
# Sort context by relevance score | |
sorted_contexts = sorted(context_docs, key=lambda x: x['score']) | |
for i, ctx in enumerate(sorted_contexts): | |
# Add confidence classification (lower score is better in FAISS) | |
confidence = "High" if ctx['score'] < 0.3 else "Medium" if ctx['score'] < 0.6 else "Moderate" | |
# Add a brief preview of content with source | |
source = ctx['source'].split('/')[-1] if '/' in ctx['source'] else ctx['source'] | |
preview = ctx['content'][:100].replace('\n', ' ') + "..." | |
context_summary += f"{i+1}. [{confidence} confidence, score {ctx['score']:.2f}] From {source}: {preview}\n" | |
messages.append(SystemMessage(content=context_summary)) | |
# Add special instruction for information questions | |
if any(keyword in message.lower() for keyword in ["что", "какие", "когда", "обновление", "новое", "осеннее", "autumn"]): | |
info_instruction = """ | |
IMPORTANT INSTRUCTION: The user is asking about game information or updates. If you have relevant information in the context provided, | |
you MUST share that information in your response using the message command type. Do not refuse to answer questions about game updates | |
or features if the information is available in the context. | |
""" | |
messages.append(SystemMessage(content=info_instruction)) | |
# Initialize token usage variables | |
total_tokens = 0 | |
prompt_tokens = 0 | |
completion_tokens = 0 | |
total_cost = 0.0 | |
# Get response from the model with token tracking | |
with get_openai_callback() as cb: | |
response = self.model.predict_messages(messages) | |
response_content = response.content | |
# Store token usage metrics | |
total_tokens = cb.total_tokens | |
prompt_tokens = cb.prompt_tokens | |
completion_tokens = cb.completion_tokens | |
total_cost = cb.total_cost | |
try: | |
# Simple JSON extraction | |
if "```json" in response_content: | |
json_start = response_content.find("```json") + 7 | |
json_end = response_content.find("```", json_start) | |
response_content = response_content[json_start:json_end].strip() | |
elif "```" in response_content: | |
json_start = response_content.find("```") + 3 | |
json_end = response_content.find("```", json_start) | |
response_content = response_content[json_start:json_end].strip() | |
parsed_response = json.loads(response_content) | |
filtered_response = self._filter_response_fields(parsed_response) | |
json_response = {'response': filtered_response} | |
# Add token usage information to the response | |
json_response["token_usage"] = { | |
"total_tokens": total_tokens, | |
"prompt_tokens": prompt_tokens, | |
"completion_tokens": completion_tokens, | |
"total_cost_usd": total_cost | |
} | |
# Store the bot's response in conversation history as proper AIMessage | |
self.user_conversations[user_id].append(AIMessage(content=json.dumps(json_response))) | |
return json_response | |
except json.JSONDecodeError: | |
# Simple fallback if JSON parsing fails | |
fallback_response = { | |
'type': 'message', | |
'message': response_content | |
} | |
self.user_conversations[user_id].append(AIMessage(content=json.dumps(fallback_response))) | |
return fallback_response |