File size: 9,476 Bytes
c58df45 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 |
import streamlit as st
import logging
from .semantic_process import process_semantic_analysis
from ..chatbot.chatbot import initialize_chatbot, process_semantic_chat_input
from ..database.database_oldFromV2 import store_file_semantic_contents, retrieve_file_contents, delete_file, get_user_files
from ..utils.widget_utils import generate_unique_key
from .semantic_float_reset import semantic_float_init, float_graph, toggle_float_visibility, update_float_content
logger = logging.getLogger(__name__)
semantic_float_init()
def get_translation(t, key, default):
return t.get(key, default)
def display_semantic_interface(lang_code, nlp_models, t):
# Inicialización del chatbot y el historial del chat
if 'semantic_chatbot' not in st.session_state:
st.session_state.semantic_chatbot = initialize_chatbot('semantic')
if 'semantic_chat_history' not in st.session_state:
st.session_state.semantic_chat_history = []
# Inicializar el estado del grafo si no existe
if 'graph_visible' not in st.session_state:
st.session_state.graph_visible = False
if 'graph_content' not in st.session_state:
st.session_state.graph_content = ""
st.markdown("""
<style>
.chat-message {
margin-bottom: 10px;
padding: 5px;
border-radius: 5px;
}
.user-message {
background-color: #e6f3ff;
text-align: right;
}
.assistant-message {
background-color: #f0f0f0;
text-align: left;
}
.semantic-float {
position: fixed;
right: 20px;
top: 50%;
transform: translateY(-50%);
width: 800px;
height: 600px;
z-index: 1000;
background-color: white;
border: 1px solid #ddd;
border-radius: 5px;
padding: 10px;
overflow: hidden;
box-shadow: 0 0 10px rgba(0,0,0,0.1);
}
.semantic-float img {
width: 100%;
height: auto;
max-height: 440px;
object-fit: contain;
}
.chat-input {
position: fixed;
bottom: 20px;
left: 20px;
right: 20px;
z-index: 1000;
}
</style>
""", unsafe_allow_html=True)
st.markdown(f"<div class='semantic-initial-message'>{t['semantic_initial_message']}</div>", unsafe_allow_html=True)
tab1, tab2 = st.tabs(["Upload", "Analyze"])
with tab1:
st.subheader("File Management")
uploaded_file = st.file_uploader("Choose a file to upload", type=['txt', 'pdf', 'docx', 'doc', 'odt'], key=generate_unique_key('semantic', 'file_uploader'))
if uploaded_file is not None:
file_contents = uploaded_file.getvalue().decode('utf-8')
if store_file_semantic_contents(st.session_state.username, uploaded_file.name, file_contents):
st.success(f"File {uploaded_file.name} uploaded and saved successfully")
else:
st.error("Error uploading file")
st.markdown("---")
st.subheader("Manage Uploaded Files")
user_files = get_user_files(st.session_state.username, 'semantic')
if user_files:
for file in user_files:
col1, col2 = st.columns([3, 1])
with col1:
st.write(file['file_name'])
with col2:
if st.button("Delete", key=f"delete_{file['file_name']}", help=f"Delete {file['file_name']}"):
if delete_file(st.session_state.username, file['file_name'], 'semantic'):
st.success(f"File {file['file_name']} deleted successfully")
st.rerun()
else:
st.error(f"Error deleting file {file['file_name']}")
else:
st.info("No files uploaded yet.")
with tab2:
st.subheader("Semantic Analysis")
st.subheader("File Selection and Analysis")
user_files = get_user_files(st.session_state.username, 'semantic')
file_options = [get_translation(t, 'select_saved_file', 'Select a saved file')] + [file['file_name'] for file in user_files]
selected_file = st.selectbox("", options=file_options, key=generate_unique_key('semantic', 'file_selector'))
if st.button("Analyze Document"):
if selected_file and selected_file != get_translation(t, 'select_saved_file', 'Select a saved file'):
file_contents = retrieve_file_contents(st.session_state.username, selected_file, 'semantic')
if file_contents:
with st.spinner("Analyzing..."):
try:
nlp_model = nlp_models[lang_code]
concept_graph_base64, entity_graph_base64, key_concepts = process_semantic_analysis(file_contents, nlp_model, lang_code)
st.session_state.current_file_contents = file_contents
st.success("Analysis completed successfully")
# Aquí cambiamos el contenido del elemento flotante para mostrar un video de YouTube
youtube_video_id = "dQw4w9WgXcQ" # Cambia esto por el ID del video que quieras mostrar
video_content = f"""
<iframe width="100%" height="100%" src="https://www.youtube.com/embed/{youtube_video_id}" frameborder="0" allow="autoplay; encrypted-media" allowfullscreen></iframe>
"""
st.session_state.graph_id = float_graph(video_content, width="800px", height="600px", position="center-right")
st.session_state.graph_visible = True
st.session_state.graph_content = video_content
# Log para depuración
st.write(f"Debug: Graph ID: {st.session_state.get('graph_id')}")
st.write(f"Debug: Graph visible: {st.session_state.get('graph_visible')}")
except Exception as e:
logger.error(f"Error during analysis: {str(e)}")
st.error(f"Error during analysis: {str(e)}")
else:
st.error("Error loading file contents")
else:
st.error("Please select a file to analyze")
st.subheader("Chat with AI")
# Mostrar el historial del chat
for message in st.session_state.semantic_chat_history:
message_class = "user-message" if message["role"] == "user" else "assistant-message"
st.markdown(f'<div class="chat-message {message_class}">{message["content"]}</div>', unsafe_allow_html=True)
# Colocar la entrada de usuario y los botones en la parte inferior
st.markdown('<div class="chat-input">', unsafe_allow_html=True)
user_input = st.text_input("Type your message here...", key=generate_unique_key('semantic', 'chat_input'))
col1, col2, col3 = st.columns([3, 1, 1])
with col1:
send_button = st.button("Send", key=generate_unique_key('semantic', 'send_message'))
with col2:
clear_button = st.button("Clear Chat", key=generate_unique_key('semantic', 'clear_chat'))
with col3:
if 'graph_id' in st.session_state:
toggle_button = st.button("Toggle Graph", key="toggle_graph")
if toggle_button:
st.session_state.graph_visible = not st.session_state.get('graph_visible', True)
toggle_float_visibility(st.session_state.graph_id, st.session_state.graph_visible)
st.markdown('</div>', unsafe_allow_html=True)
if send_button and user_input:
st.session_state.semantic_chat_history.append({"role": "user", "content": user_input})
if user_input.startswith('/analyze_current'):
response = process_semantic_chat_input(user_input, lang_code, nlp_models[lang_code], st.session_state.get('current_file_contents', ''))
else:
response = st.session_state.semantic_chatbot.generate_response(user_input, lang_code, context=st.session_state.get('current_file_contents', ''))
st.session_state.semantic_chat_history.append({"role": "assistant", "content": response})
st.rerun()
if clear_button:
st.session_state.semantic_chat_history = []
st.rerun()
# Asegurarse de que el grafo flotante permanezca visible después de las interacciones
if 'graph_id' in st.session_state and st.session_state.get('graph_visible', False):
toggle_float_visibility(st.session_state.graph_id, True)
# Mostrar el grafo flotante si está visible
if st.session_state.get('graph_visible', False) and 'graph_content' in st.session_state:
st.markdown(
f"""
<div class="semantic-float">
{st.session_state.graph_content}
</div>
""",
unsafe_allow_html=True
) |