v3 / modules /semantic /semantic_interface.py
AIdeaText's picture
Update modules/semantic/semantic_interface.py
975486a verified
raw
history blame
9.1 kB
#modules/semantic/semantic_interface.py
import streamlit as st
from streamlit_float import *
from streamlit_antd_components import *
from streamlit.components.v1 import html
import spacy_streamlit
import io
from io import BytesIO
import base64
import matplotlib.pyplot as plt
import pandas as pd
import re
import logging
# Configuración del logger
logger = logging.getLogger(__name__)
# Importaciones locales
from .semantic_process import (
process_semantic_input,
format_semantic_results
)
from ..utils.widget_utils import generate_unique_key
from ..database.semantic_mongo_db import store_student_semantic_result
from ..database.semantic_export import export_user_interactions
def display_semantic_interface(lang_code, nlp_models, semantic_t):
"""
Interfaz para el análisis semántico con controles alineados horizontalmente
"""
# Mantener la página en semántico
st.session_state.page = 'semantic'
# Inicializar estados si no existen
if 'semantic_file_content' not in st.session_state:
st.session_state.semantic_file_content = None
if 'semantic_analysis_done' not in st.session_state:
st.session_state.semantic_analysis_done = False
if 'semantic_analysis_counter' not in st.session_state:
st.session_state.semantic_analysis_counter = 0
# Estilos CSS para alinear los botones
st.markdown("""
<style>
.stButton > button {
width: 100%;
height: 38px;
}
.stUploadButton > button {
width: 100%;
height: 38px;
}
div.row-widget.stButton {
margin-top: 1px;
margin-bottom: 1px;
}
</style>
""", unsafe_allow_html=True)
try:
# Contenedor principal con layout fijo
with st.container():
# Una sola fila para todos los controles
col_file, col_analyze, col_export, col_new = st.columns([4, 2, 2, 2])
# Columna 1: Carga de archivo
with col_file:
uploaded_file = st.file_uploader(
semantic_t.get('file_uploader', 'Upload TXT file'),
type=['txt'],
key=f"semantic_uploader_{st.session_state.semantic_analysis_counter}"
)
if uploaded_file is not None:
# Actualizar el contenido del archivo
file_content = uploaded_file.getvalue().decode('utf-8')
if file_content != st.session_state.semantic_file_content:
st.session_state.semantic_file_content = file_content
st.session_state.semantic_analysis_done = False
# Columna 2: Botón de análisis
with col_analyze:
analyze_enabled = uploaded_file is not None and not st.session_state.semantic_analysis_done
analyze_button = st.button(
semantic_t.get('analyze_button', 'Analyze Text'),
disabled=not analyze_enabled,
key=f"analyze_button_{st.session_state.semantic_analysis_counter}",
use_container_width=True
)
# Columna 3: Botón de exportación
with col_export:
export_button = st.button(
semantic_t.get('export_button', 'Export'),
disabled=not st.session_state.semantic_analysis_done,
key=f"export_button_{st.session_state.semantic_analysis_counter}",
use_container_width=True
)
# Columna 4: Botón de nuevo análisis
with col_new:
new_button = st.button(
semantic_t.get('new_analysis', 'New Analysis'),
disabled=not st.session_state.semantic_analysis_done,
key=f"new_button_{st.session_state.semantic_analysis_counter}",
use_container_width=True
)
st.markdown("<hr style='margin: 1em 0; opacity: 0.3'>", unsafe_allow_html=True)
# Procesar análisis cuando se presiona el botón
if analyze_button and st.session_state.semantic_file_content:
with st.spinner(semantic_t.get('processing', 'Processing...')):
try:
analysis_result = process_semantic_input(
st.session_state.semantic_file_content,
lang_code,
nlp_models,
semantic_t
)
if analysis_result['success']:
# Guardar resultados y actualizar estado
st.session_state.semantic_result = analysis_result
st.session_state.semantic_analysis_done = True
# Guardar en base de datos
if store_student_semantic_result(
st.session_state.username,
st.session_state.semantic_file_content,
analysis_result['analysis']
):
st.success(semantic_t.get('success_message', 'Analysis saved successfully'))
display_semantic_results(analysis_result, lang_code, semantic_t)
else:
st.error(semantic_t.get('error_message', 'Error saving analysis'))
else:
st.error(analysis_result['message'])
except Exception as e:
logger.error(f"Error en análisis: {str(e)}")
st.error(semantic_t.get('error_processing', f'Error: {str(e)}'))
# Manejar exportación
if export_button and st.session_state.semantic_analysis_done:
try:
pdf_buffer = export_user_interactions(st.session_state.username, 'semantic')
st.download_button(
label=semantic_t.get('download_pdf', 'Download PDF'),
data=pdf_buffer,
file_name="semantic_analysis.pdf",
mime="application/pdf",
key=f"download_{st.session_state.semantic_analysis_counter}"
)
except Exception as e:
st.error(f"Error exporting: {str(e)}")
# Manejar nuevo análisis
if new_button:
st.session_state.semantic_file_content = None
st.session_state.semantic_analysis_done = False
st.session_state.semantic_result = None
st.session_state.semantic_analysis_counter += 1
st.rerun()
# Mostrar resultados existentes o mensaje inicial
if st.session_state.semantic_analysis_done and 'semantic_result' in st.session_state:
display_semantic_results(st.session_state.semantic_result, lang_code, semantic_t)
elif not uploaded_file:
st.info(semantic_t.get('initial_message', 'Upload a TXT file to begin analysis'))
except Exception as e:
logger.error(f"Error general: {str(e)}")
st.error("Error in semantic interface. Please try again.")
def display_semantic_results(result, lang_code, semantic_t):
"""
Muestra los resultados del análisis semántico
"""
if result is None or not result['success']:
st.warning(semantic_t.get('no_results', 'No results available'))
return
analysis = result['analysis']
# Crear tabs para los resultados
tab1, tab2 = st.tabs([
semantic_t.get('concepts_tab', 'Key Concepts Analysis'),
semantic_t.get('entities_tab', 'Entities Analysis')
])
# Tab 1: Conceptos Clave
with tab1:
col1, col2 = st.columns(2)
# Columna 1: Lista de conceptos
with col1:
st.subheader(semantic_t.get('key_concepts', 'Key Concepts'))
concept_text = "\n".join([
f"• {concept} ({frequency:.2f})"
for concept, frequency in analysis['key_concepts']
])
st.markdown(concept_text)
# Columna 2: Gráfico de conceptos
with col2:
st.subheader(semantic_t.get('concept_graph', 'Concepts Graph'))
st.image(analysis['concept_graph'])
# Tab 2: Entidades
with tab2:
col1, col2 = st.columns(2)
# Columna 1: Lista de entidades
with col1:
st.subheader(semantic_t.get('identified_entities', 'Identified Entities'))
if 'entities' in analysis:
for entity_type, entities in analysis['entities'].items():
st.markdown(f"**{entity_type}**")
st.markdown("• " + "\n• ".join(entities))
# Columna 2: Gráfico de entidades
with col2:
st.subheader(semantic_t.get('entity_graph', 'Entities Graph'))
st.image(analysis['entity_graph'])