Spaces:
Sleeping
Sleeping
# llm_handling.py | |
import logging | |
import os | |
from langchain_community.vectorstores import FAISS | |
import requests | |
from tenacity import retry, stop_after_attempt, wait_exponential | |
import json | |
from app.config import BASE_DB_PATH # Ensure correct import | |
from app.config import LLM_CONFIGS, LLMType # Import LLMType and LLM_CONFIGS | |
from app.configs.prompts import SYSTEM_PROMPTS | |
from app.utils.embedding_utils import get_embeddings | |
logging.basicConfig(level=logging.INFO) | |
# ===================================== | |
# Functions related to LLM | |
# ===================================== | |
def get_llm_client(llm_type: LLMType): | |
"""Obtains the appropriate client for the selected model""" | |
config = LLM_CONFIGS.get(llm_type) | |
if not config: | |
raise ValueError(f"Model {llm_type} not supported") | |
client_class = config["client"] | |
model = config["model"] | |
client = client_class() # Ensure no arguments are needed | |
return client, model | |
def get_system_prompt(prompt_type="tutor"): | |
"""Selects the appropriate system prompt""" | |
return SYSTEM_PROMPTS.get(prompt_type, SYSTEM_PROMPTS["tutor"]) | |
def test_local_connection(): | |
"""Checks connection to the local LLM server""" | |
try: | |
response = requests.get(f"http://192.168.43.199:1234/v1/health", timeout=5) | |
return response.status_code == 200 | |
except: | |
return False | |
def read_metadata(db_path): | |
metadata_file = os.path.join(db_path, "metadata.json") | |
if os.path.exists(metadata_file): | |
with open(metadata_file, 'r') as f: | |
return json.load(f) | |
return [] | |
def get_relevant_documents(vectorstore, question, min_similarity=0.7): | |
"""Retrieves relevant documents from the vectorstore""" | |
try: | |
enhanced_query = enhance_query(question) | |
docs_and_scores = vectorstore.similarity_search_with_score( | |
enhanced_query, | |
k=8 | |
) | |
filtered_docs = [ | |
doc for doc, score in docs_and_scores if score >= min_similarity | |
] | |
logging.info(f"Query: {question}") | |
logging.info(f"Documents found: {len(filtered_docs)}") | |
return filtered_docs[:5] if filtered_docs else [] | |
except Exception as e: | |
logging.error(f"Error retrieving documents: {e}") | |
return [] | |
def enhance_query(question): | |
stop_words = set(['il', 'lo', 'la', 'i', 'gli', 'le', 'un', 'uno', 'una']) | |
words = [w for w in question.lower().split() if w not in stop_words] | |
enhanced_query = " ".join(words) | |
return enhanced_query | |
def log_search_results(question, docs_and_scores): | |
logging.info(f"Query: {question}") | |
for idx, (doc, score) in enumerate(docs_and_scores, 1): | |
logging.info(f"Doc {idx} - Score: {score:.4f}") | |
logging.info(f"Content: {doc.page_content[:100]}...") | |
def answer_question(question, db_name, prompt_type="tutor", chat_history=None, llm_type=None): | |
if chat_history is None: | |
chat_history = [] | |
try: | |
embeddings = get_embeddings() | |
db_path = os.path.join(BASE_DB_PATH, f"faiss_index_{db_name}") | |
metadata_list = read_metadata(db_path) | |
metadata_dict = {m["filename"]: m for m in metadata_list} | |
vectorstore = FAISS.load_local(db_path, embeddings, allow_dangerous_deserialization=True) | |
relevant_docs = get_relevant_documents(vectorstore, question) | |
if not relevant_docs: | |
return [ | |
{"role": "user", "content": question}, | |
{"role": "assistant", "content": "Sorry, no relevant information found to answer your question. Try rephrasing or asking a different question."} | |
] | |
sources = [] | |
for idx, doc in enumerate(relevant_docs, 1): | |
source_file = doc.metadata.get("source", "Unknown") | |
if source_file in metadata_dict: | |
meta = metadata_dict[source_file] | |
sources.append(f"📚 {meta['title']} (Author: {meta['author']}) - Part {idx} of {len(relevant_docs)}") | |
context = "\n".join([ | |
f"[Part {idx+1} of {len(relevant_docs)}]\n{doc.page_content}" | |
for idx, doc in enumerate(relevant_docs) | |
]) | |
sources_text = "\n\nSources consulted:\n" + "\n".join(set(sources)) | |
prompt = SYSTEM_PROMPTS[prompt_type].format(context=context) | |
prompt += "\nAlways cite the sources used for your response, including the document title and author." | |
messages = [ | |
{"role": "system", "content": prompt}, | |
*[{"role": m["role"], "content": m["content"]} for m in chat_history], | |
{"role": "user", "content": question} | |
] | |
client, model = get_llm_client(llm_type) | |
response = client.chat.completions.create( | |
model=model, | |
messages=messages, | |
temperature=0.7, | |
max_tokens=2048 | |
) | |
answer = response.choices[0].message.content + sources_text | |
return [ | |
{"role": "user", "content": question}, | |
{"role": "assistant", "content": answer} | |
] | |
except Exception as e: | |
logging.error(f"Error generating response: {e}") | |
error_msg = "Local LLM not available. Try again later or use OpenAI." if "local" in str(llm_type) else str(e) | |
return [ | |
{"role": "user", "content": question}, | |
{"role": "assistant", "content": f"⚠️ {error_msg}"} | |
] | |
class DocumentRetriever: | |
def __init__(self, db_path): | |
self.embeddings = get_embeddings() | |
self.vectorstore = FAISS.load_local( | |
db_path, | |
self.embeddings, | |
allow_dangerous_deserialization=True | |
) | |
def get_relevant_chunks(self, question): | |
enhanced_query = enhance_query(question) | |
docs_and_scores = self.vectorstore.similarity_search_with_score( | |
enhanced_query, | |
k=8 | |
) | |
log_search_results(question, docs_and_scores) | |
# Implement _filter_relevant_docs or remove the call | |
# return self._filter_relevant_docs(docs_and_scores) | |
if __name__ == "__main__": | |
pass |