import streamlit as st import streamlit_float import streamlit_option_menu import streamlit_antd_components import streamlit.components.v1 as components import streamlit.components.v1 as stc import logging from .semantic_process import * from ..chatbot.chatbot import initialize_chatbot, process_semantic_chat_input from ..database.database_oldFromV2 import store_file_semantic_contents, retrieve_file_contents, delete_file, get_user_files from ..utils.widget_utils import generate_unique_key from .semantic_float68ok import * logger = logging.getLogger(__name__) semantic_float_init() def get_translation(t, key, default): return t.get(key, default) def display_semantic_interface(lang_code, nlp_models, t): # Inicialización del chatbot y el historial del chat if 'semantic_chatbot' not in st.session_state: st.session_state.semantic_chatbot = initialize_chatbot('semantic') if 'semantic_chat_history' not in st.session_state: st.session_state.semantic_chat_history = [] # Inicializar el estado del grafo si no existe if 'graph_visible' not in st.session_state: st.session_state.graph_visible = False if 'graph_content' not in st.session_state: st.session_state.graph_content = "" st.markdown(""" """, unsafe_allow_html=True) st.markdown(f"
", unsafe_allow_html=True) tab1, tab2 = st.tabs(["Upload", "Analyze"]) with tab1: st.subheader("File Management") uploaded_file = st.file_uploader("Choose a file to upload", type=['txt', 'pdf', 'docx', 'doc', 'odt'], key=generate_unique_key('semantic', 'file_uploader')) if uploaded_file is not None: file_contents = uploaded_file.getvalue().decode('utf-8') if store_file_semantic_contents(st.session_state.username, uploaded_file.name, file_contents): st.success(f"File {uploaded_file.name} uploaded and saved successfully") else: st.error("Error uploading file") st.markdown("---") st.subheader("Manage Uploaded Files") user_files = get_user_files(st.session_state.username, 'semantic') if user_files: for file in user_files: col1, col2 = st.columns([3, 1]) with col1: st.write(file['file_name']) with col2: if st.button("Delete", key=f"delete_{file['file_name']}", help=f"Delete {file['file_name']}"): if delete_file(st.session_state.username, file['file_name'], 'semantic'): st.success(f"File {file['file_name']} deleted successfully") st.rerun() else: st.error(f"Error deleting file {file['file_name']}") else: st.info("No files uploaded yet.") with tab2: st.subheader("Semantic Analysis") st.subheader("File Selection and Analysis") user_files = get_user_files(st.session_state.username, 'semantic') file_options = [get_translation(t, 'select_saved_file', 'Select a saved file')] + [file['file_name'] for file in user_files] selected_file = st.selectbox("", options=file_options, key=generate_unique_key('semantic', 'file_selector')) if st.button("Analyze Document"): if selected_file and selected_file != get_translation(t, 'select_saved_file', 'Select a saved file'): file_contents = retrieve_file_contents(st.session_state.username, selected_file, 'semantic') if file_contents: with st.spinner("Analyzing..."): try: nlp_model = nlp_models[lang_code] concept_graph_base64, entity_graph_base64, key_concepts = process_semantic_analysis(file_contents, nlp_model, lang_code) st.session_state.current_file_contents = file_contents st.success("Analysis completed successfully") if concept_graph_base64: graph_content = f"""{', '.join([f"{concept}: {freq:.2f}" for concept, freq in key_concepts])}
""" st.session_state.graph_id = float_graph(graph_content, width="800px", height="600px", position="center-right") st.session_state.graph_visible = True st.session_state.graph_content = graph_content if entity_graph_base64: entity_graph_content = f"""