|
|
|
import streamlit as st |
|
from streamlit_float import * |
|
from streamlit_antd_components import * |
|
from streamlit.components.v1 import html |
|
import spacy_streamlit |
|
import io |
|
from io import BytesIO |
|
import base64 |
|
import matplotlib.pyplot as plt |
|
import pandas as pd |
|
import re |
|
import logging |
|
|
|
|
|
logger = logging.getLogger(__name__) |
|
|
|
|
|
from .semantic_process import ( |
|
process_semantic_input, |
|
format_semantic_results |
|
) |
|
|
|
from ..utils.widget_utils import generate_unique_key |
|
from ..database.semantic_mongo_db import store_student_semantic_result |
|
from ..database.chat_mongo_db import store_chat_history, get_chat_history |
|
|
|
|
|
|
|
|
|
|
|
def display_semantic_interface(lang_code, nlp_models, semantic_t): |
|
""" |
|
Interfaz para el análisis semántico |
|
Args: |
|
lang_code: Código del idioma actual |
|
nlp_models: Modelos de spaCy cargados |
|
semantic_t: Diccionario de traducciones semánticas |
|
""" |
|
try: |
|
|
|
if 'semantic_state' not in st.session_state: |
|
st.session_state.semantic_state = { |
|
'analysis_count': 0, |
|
'last_analysis': None, |
|
'current_file': None |
|
} |
|
|
|
|
|
st.info(semantic_t.get('initial_instruction', |
|
'Para comenzar un nuevo análisis semántico, cargue un archivo de texto (.txt)')) |
|
|
|
uploaded_file = st.file_uploader( |
|
semantic_t.get('semantic_file_uploader', 'Upload a text file for semantic analysis'), |
|
type=['txt'], |
|
key=f"semantic_file_uploader_{st.session_state.semantic_state['analysis_count']}" |
|
) |
|
|
|
|
|
col1, col2 = st.columns([1,4]) |
|
|
|
|
|
with col1: |
|
analyze_button = st.button( |
|
semantic_t.get('semantic_analyze_button', 'Analyze'), |
|
key=f"semantic_analyze_button_{st.session_state.semantic_state['analysis_count']}", |
|
type="primary", |
|
icon="🔍", |
|
disabled=uploaded_file is None, |
|
use_container_width=True |
|
) |
|
|
|
|
|
if analyze_button and uploaded_file is not None: |
|
try: |
|
with st.spinner(semantic_t.get('processing', 'Processing...')): |
|
|
|
text_content = uploaded_file.getvalue().decode('utf-8') |
|
|
|
|
|
analysis_result = process_semantic_input( |
|
text_content, |
|
lang_code, |
|
nlp_models, |
|
semantic_t |
|
) |
|
|
|
if analysis_result['success']: |
|
|
|
st.session_state.semantic_result = analysis_result |
|
st.session_state.semantic_state['analysis_count'] += 1 |
|
st.session_state.semantic_state['current_file'] = uploaded_file.name |
|
|
|
|
|
if store_student_semantic_result( |
|
st.session_state.username, |
|
text_content, |
|
analysis_result['analysis'] |
|
): |
|
st.success( |
|
semantic_t.get('analysis_complete', |
|
'Análisis completado y guardado. Para realizar un nuevo análisis, cargue otro archivo.') |
|
) |
|
|
|
|
|
display_semantic_results( |
|
st.session_state.semantic_result, |
|
lang_code, |
|
semantic_t |
|
) |
|
else: |
|
st.error(semantic_t.get('error_message', 'Error saving analysis')) |
|
else: |
|
st.error(analysis_result['message']) |
|
|
|
except Exception as e: |
|
logger.error(f"Error en análisis semántico: {str(e)}") |
|
st.error(semantic_t.get('error_processing', f'Error processing text: {str(e)}')) |
|
|
|
|
|
elif 'semantic_result' in st.session_state and st.session_state.semantic_result is not None: |
|
|
|
st.info( |
|
semantic_t.get('current_analysis_message', |
|
f'Mostrando análisis del archivo: {st.session_state.semantic_state["current_file"]}. ' |
|
'Para realizar un nuevo análisis, cargue otro archivo.') |
|
) |
|
|
|
display_semantic_results( |
|
st.session_state.semantic_result, |
|
lang_code, |
|
semantic_t |
|
) |
|
else: |
|
st.info(semantic_t.get('upload_prompt', 'Cargue un archivo para comenzar el análisis')) |
|
|
|
except Exception as e: |
|
logger.error(f"Error general en interfaz semántica: {str(e)}") |
|
st.error(semantic_t.get('general_error', "Se produjo un error. Por favor, intente de nuevo.")) |
|
|
|
|
|
def display_semantic_results(semantic_result, lang_code, semantic_t): |
|
""" |
|
Muestra los resultados del análisis semántico de conceptos clave. |
|
""" |
|
if semantic_result is None or not semantic_result['success']: |
|
st.warning(semantic_t.get('no_results', 'No results available')) |
|
return |
|
|
|
analysis = semantic_result['analysis'] |
|
|
|
|
|
st.subheader(semantic_t.get('key_concepts', 'Key Concepts')) |
|
if 'key_concepts' in analysis and analysis['key_concepts']: |
|
|
|
df = pd.DataFrame( |
|
analysis['key_concepts'], |
|
columns=[ |
|
semantic_t.get('concept', 'Concept'), |
|
semantic_t.get('frequency', 'Frequency') |
|
] |
|
) |
|
|
|
|
|
st.write( |
|
""" |
|
<style> |
|
.concept-table { |
|
display: flex; |
|
flex-wrap: wrap; |
|
gap: 10px; |
|
margin-bottom: 20px; |
|
} |
|
.concept-item { |
|
background-color: #f0f2f6; |
|
border-radius: 5px; |
|
padding: 8px 12px; |
|
display: flex; |
|
align-items: center; |
|
gap: 8px; |
|
} |
|
.concept-name { |
|
font-weight: bold; |
|
} |
|
.concept-freq { |
|
color: #666; |
|
font-size: 0.9em; |
|
} |
|
</style> |
|
<div class="concept-table"> |
|
""" + |
|
''.join([ |
|
f'<div class="concept-item"><span class="concept-name">{concept}</span>' |
|
f'<span class="concept-freq">({freq:.2f})</span></div>' |
|
for concept, freq in df.values |
|
]) + |
|
"</div>", |
|
unsafe_allow_html=True |
|
) |
|
else: |
|
st.info(semantic_t.get('no_concepts', 'No key concepts found')) |
|
|
|
|
|
st.subheader(semantic_t.get('concept_graph', 'Concepts Graph')) |
|
if 'concept_graph' in analysis and analysis['concept_graph'] is not None: |
|
try: |
|
|
|
st.markdown( |
|
""" |
|
<style> |
|
.graph-container { |
|
background-color: white; |
|
border-radius: 10px; |
|
padding: 20px; |
|
box-shadow: 0 2px 4px rgba(0,0,0,0.1); |
|
margin: 10px 0; |
|
} |
|
.button-container { |
|
display: flex; |
|
gap: 10px; |
|
margin: 10px 0; |
|
} |
|
</style> |
|
""", |
|
unsafe_allow_html=True |
|
) |
|
|
|
with st.container(): |
|
st.markdown('<div class="graph-container">', unsafe_allow_html=True) |
|
|
|
|
|
graph_bytes = analysis['concept_graph'] |
|
graph_base64 = base64.b64encode(graph_bytes).decode() |
|
st.markdown( |
|
f'<img src="data:image/png;base64,{graph_base64}" alt="Concept Graph" style="width:100%;"/>', |
|
unsafe_allow_html=True |
|
) |
|
|
|
|
|
st.caption(semantic_t.get( |
|
'graph_description', |
|
'Visualización de relaciones entre conceptos clave identificados en el texto.' |
|
)) |
|
|
|
st.markdown('</div>', unsafe_allow_html=True) |
|
|
|
|
|
col1, col2 = st.columns([1,4]) |
|
with col1: |
|
st.download_button( |
|
label="📥 " + semantic_t.get('download_graph', "Download"), |
|
data=graph_bytes, |
|
file_name="semantic_graph.png", |
|
mime="image/png", |
|
use_container_width=True |
|
) |
|
|
|
|
|
with st.expander("📊 " + semantic_t.get('graph_help', "Graph Interpretation")): |
|
st.markdown(""" |
|
- 🔀 Las flechas indican la dirección de la relación entre conceptos |
|
- 🎨 Los colores más intensos indican conceptos más centrales en el texto |
|
- ⭕ El tamaño de los nodos representa la frecuencia del concepto |
|
- ↔️ El grosor de las líneas indica la fuerza de la conexión |
|
""") |
|
|
|
except Exception as e: |
|
logger.error(f"Error displaying graph: {str(e)}") |
|
st.error(semantic_t.get('graph_error', 'Error displaying the graph')) |
|
else: |
|
st.info(semantic_t.get('no_graph', 'No concept graph available')) |
|
|
|
|
|
|
|
''' |
|
# Botón de exportación al final |
|
if 'semantic_analysis_counter' in st.session_state: |
|
col1, col2, col3 = st.columns([2,1,2]) |
|
with col2: |
|
if st.button( |
|
semantic_t.get('export_button', 'Export Analysis'), |
|
key=f"semantic_export_{st.session_state.semantic_analysis_counter}", |
|
use_container_width=True |
|
): |
|
pdf_buffer = export_user_interactions(st.session_state.username, 'semantic') |
|
st.download_button( |
|
label=semantic_t.get('download_pdf', 'Download PDF'), |
|
data=pdf_buffer, |
|
file_name="semantic_analysis.pdf", |
|
mime="application/pdf", |
|
key=f"semantic_download_{st.session_state.semantic_analysis_counter}" |
|
) |
|
''' |