Edurag_beta / ui /chatbot_tab.py
Nugh75's picture
download conversazione e audio conversazione
403260d
raw
history blame
8.42 kB
# ui/chatbot_tab.py
import logging
import gradio as gr
from app.functions.database_handling import list_databases
from app.configs.prompts import SYSTEM_PROMPTS
from app.llm_handling import answer_question, LLMType, generate_chat_audio
from utils.helpers import extract_text_from_files
logging.basicConfig(level=logging.INFO)
def create_chatbot_tab():
"""Crea il tab 'Chatbot' dell'interfaccia Gradio."""
def chat_upload_and_respond(files, chat_history, db_name):
"""Gestisce il caricamento dei file e aggiorna la chat con il contenuto."""
if chat_history is None:
chat_history = []
text = extract_text_from_files(files)
chat_history.append({
"role": "assistant",
"content": f"📄 Contenuto dei documenti caricati:\n{text}"
})
return chat_history
def respond(message, chat_history, db_name, prompt_type, llm_type):
"""Genera una risposta alla domanda dell'utente e aggiorna la chat."""
if chat_history is None:
chat_history = []
# Converti stringa in enum
selected_llm = LLMType.LOCAL if llm_type == "local" else LLMType.OPENAI
# Ottieni risposta con audio
messages = answer_question(
message,
db_name,
prompt_type,
chat_history=chat_history, # Passa la cronologia
llm_type=selected_llm
)
chat_history.extend(messages)
return "", chat_history
def clear_chat():
"""Pulisce la cronologia della chat."""
return [], []
def format_conversation_for_download(chat_history):
"""Formatta la cronologia della chat per il download."""
if not chat_history:
return "Nessuna conversazione da scaricare"
formatted_text = []
for msg in chat_history:
role = "User" if msg["role"] == "user" else "Assistant"
content = msg["content"]
formatted_text.append(f"{role}: {content}\n")
return "\n".join(formatted_text)
def download_conversation(chat_history):
"""Prepara il file di testo per il download."""
conversation_text = format_conversation_for_download(chat_history)
# Crea un file temporaneo con la conversazione
import tempfile
import os
from pathlib import Path
temp_dir = tempfile.gettempdir()
temp_path = os.path.join(temp_dir, "conversazione.txt")
# Assicurati che il contenuto sia in UTF-8
with open(temp_path, "w", encoding="utf-8") as f:
f.write(conversation_text)
return str(Path(temp_path).absolute())
def download_audio(chat_history):
"""Scarica l'ultimo messaggio audio dalla chat"""
try:
if not chat_history:
gr.Warning("Nessun messaggio nella chat")
return None
# Prendi l'ultimo messaggio assistant
for msg in reversed(chat_history):
if msg["role"] == "assistant" and "audio" in msg:
audio_path = msg["audio"]
if audio_path and os.path.exists(audio_path):
return audio_path
gr.Warning("Nessun audio disponibile per l'ultima risposta")
return None
except Exception as e:
gr.Error(f"Errore durante il download dell'audio: {str(e)}")
return None
def format_conversation_for_audio(chat_history):
"""Formatta la conversazione per la sintesi vocale"""
audio_text = []
for msg in chat_history:
role = "Utente" if msg["role"] == "user" else "Assistente"
audio_text.append(f"{role} dice: {msg['content']}")
return "\n".join(audio_text)
def generate_conversation_audio(chat_history):
"""Genera audio della conversazione completa"""
try:
if not chat_history:
gr.Warning("Nessun messaggio nella chat")
return None
conversation_text = format_conversation_for_audio(chat_history)
audio_path = generate_speech(conversation_text, is_user=False)
if audio_path and os.path.exists(audio_path):
return audio_path
else:
gr.Warning("Errore nella generazione dell'audio")
return None
except Exception as e:
gr.Error(f"Errore: {str(e)}")
return None
def convert_chat_to_audio(chat_history):
if not chat_history:
gr.Warning("Nessun messaggio da convertire")
return None
audio_path = generate_chat_audio(chat_history)
if audio_path:
return audio_path
else:
gr.Warning("Errore nella generazione dell'audio")
return None
# Ottieni la lista aggiornata dei database
databases = list_databases()
with gr.Tab("Chatbot"):
# Prima riga: Dropdown selettori
with gr.Row():
with gr.Column(scale=1):
db_name_chat = gr.Dropdown(
choices=databases,
label="Seleziona Database",
value="default_db"
)
with gr.Column(scale=1):
prompt_selector = gr.Dropdown(
choices=list(SYSTEM_PROMPTS.keys()),
label="Seleziona Stile Risposta",
value="tutor"
)
with gr.Column(scale=1):
llm_selector = gr.Dropdown(
choices=["openai", "local"],
label="Seleziona Modello",
value="openai"
)
# Chatbot e input
chatbot = gr.Chatbot(label="Conversazione", type="messages")
question_input = gr.Textbox(
label="Fai una domanda",
placeholder="Scrivi qui la tua domanda...",
lines=2
)
# Bottoni per azioni
with gr.Row():
ask_button = gr.Button("Invia")
clear_button = gr.Button("Pulisci Chat")
# Upload file con dimensioni ridotte
with gr.Row():
file_input = gr.File(
label="Carica PDF/Docx/TXT per la conversazione",
file_types=[".pdf", ".docx", ".txt"],
file_count="multiple",
height="100px",
scale=3
)
upload_button = gr.Button("Carica Documenti", scale=1)
# Stato della chat
chat_state = gr.State([])
# Download e Audio in due righe separate
with gr.Row():
with gr.Column(scale=1):
download_button = gr.Button("💾 Scarica Conversazione")
download_file = gr.File(
label="Download Conversazione",
visible=True,
interactive=False
)
with gr.Row():
with gr.Column(scale=1):
audio_button = gr.Button("🎤 Genera Audio Chat")
audio_output = gr.Audio(label="Audio", visible=True)
audio_button.click(
fn=convert_chat_to_audio,
inputs=[chatbot],
outputs=[audio_output]
)
# Eventi per i bottoni
upload_button.click(
fn=chat_upload_and_respond,
inputs=[file_input, chat_state, db_name_chat],
outputs=chatbot
)
ask_button.click(
fn=respond,
inputs=[question_input, chat_state, db_name_chat, prompt_selector, llm_selector], # Aggiungi il selettore del modello
outputs=[question_input, chatbot]
)
clear_button.click(
fn=clear_chat,
outputs=[chatbot, chat_state]
)
# Aggiungi evento per il download
download_button.click(
fn=download_conversation,
inputs=[chatbot],
outputs=[download_file]
)
# Ritorna il riferimento al dropdown corretto
return {"db_selector": db_name_chat}