File size: 2,968 Bytes
47e4aa2
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
import logging
from openai import OpenAI
from langchain_community.vectorstores import FAISS
from langchain_community.embeddings import HuggingFaceEmbeddings
from app.config import OPENAI_API_KEY
import gradio as gr
import os
import shutil

logging.basicConfig(level=logging.INFO)

def answer_question(question, db_name, chat_history=None):
    if chat_history is None:
        chat_history = []

    logging.info(f"Inizio elaborazione domanda: {question} per database: {db_name}")
    
    try:
        embeddings = HuggingFaceEmbeddings(model_name="sentence-transformers/all-MiniLM-L6-v2")
        db_path = f"faiss_index_{db_name}"
        
        if not os.path.exists(db_path):
            return [{"role": "user", "content": question},
                   {"role": "assistant", "content": "Database non trovato"}]
        
        vectorstore = FAISS.load_local(db_path, embeddings, allow_dangerous_deserialization=True)
        relevant_docs = vectorstore.similarity_search(question, k=3)
        
        # Prepara il contesto dai documenti
        context = "\n".join([doc.page_content for doc in relevant_docs])
        
        client = OpenAI(api_key=OPENAI_API_KEY)
        
        messages = [
            {"role": "system", "content": f"Usa questo contesto per rispondere: {context}"},
            {"role": "user", "content": question}
        ]
        
        response = client.chat.completions.create(
            model="gpt-3.5-turbo",  # Cambiato da gpt-4o-mini a un modello supportato
            messages=messages,
            temperature=0,
            max_tokens=2048
        )
        
        answer = response.choices[0].message.content
        
        return [
            {"role": "user", "content": question},
            {"role": "assistant", "content": answer}
        ]

    except Exception as e:
        logging.error(f"Errore durante la generazione della risposta: {e}")
        return [
            {"role": "user", "content": question},
            {"role": "assistant", "content": f"Si è verificato un errore: {str(e)}"}
        ]

# Nel document_handling.py, aggiornare delete_database per restituire anche l'aggiornamento del dropdown
def delete_database(db_name):
    db_path = f"faiss_index_{db_name}"
    if not os.path.exists(db_path):
        return f"Il database {db_name} non esiste.", gr.Dropdown.update(choices=list_databases())
    try:
        shutil.rmtree(db_path)
        logging.info(f"Database {db_name} eliminato con successo.")
        return f"Database {db_name} eliminato con successo.", gr.Dropdown.update(choices=list_databases())
    except OSError as e:
        logging.error(f"Impossibile eliminare il database {db_name}: {e}")
        return f"Impossibile eliminare il database {db_name}: {e}", gr.Dropdown.update(choices=list_databases())

# Manca la chiamata a ensure_default_db()
if __name__ == "__main__":
    ensure_default_db()  # Aggiungere questa chiamata
    rag_chatbot.launch(share=True)