|
|
|
|
|
|
|
import streamlit as st
|
|
import re
|
|
import io
|
|
from io import BytesIO
|
|
import pandas as pd
|
|
import numpy as np
|
|
import time
|
|
import matplotlib.pyplot as plt
|
|
from datetime import datetime, timedelta
|
|
from spacy import displacy
|
|
import random
|
|
import base64
|
|
import seaborn as sns
|
|
import logging
|
|
|
|
|
|
from ..database.morphosintax_mongo_db import get_student_morphosyntax_analysis
|
|
from ..database.semantic_mongo_db import get_student_semantic_analysis
|
|
from ..database.discourse_mongo_db import get_student_discourse_analysis
|
|
from ..database.chat_mongo_db import get_chat_history
|
|
from ..database.current_situation_mongo_db import get_current_situation_analysis
|
|
from ..database.claude_recommendations_mongo_db import get_claude_recommendations
|
|
|
|
|
|
from ..utils.widget_utils import generate_unique_key
|
|
|
|
logger = logging.getLogger(__name__)
|
|
|
|
|
|
|
|
def display_student_activities(username: str, lang_code: str, t: dict):
|
|
"""
|
|
Muestra todas las actividades del estudiante
|
|
Args:
|
|
username: Nombre del estudiante
|
|
lang_code: Código del idioma
|
|
t: Diccionario de traducciones
|
|
"""
|
|
try:
|
|
st.header(t.get('activities_title', 'Mis Actividades'))
|
|
|
|
|
|
tabs = st.tabs([
|
|
t.get('current_situation_activities', 'Mi Situación Actual'),
|
|
t.get('morpho_activities', 'Análisis Morfosintáctico'),
|
|
t.get('semantic_activities', 'Análisis Semántico'),
|
|
t.get('discourse_activities', 'Análisis del Discurso'),
|
|
t.get('chat_activities', 'Conversaciones con el Asistente')
|
|
])
|
|
|
|
|
|
with tabs[0]:
|
|
display_current_situation_activities(username, t)
|
|
|
|
|
|
with tabs[1]:
|
|
display_morphosyntax_activities(username, t)
|
|
|
|
|
|
with tabs[2]:
|
|
display_semantic_activities(username, t)
|
|
|
|
|
|
with tabs[3]:
|
|
display_discourse_activities(username, t)
|
|
|
|
|
|
with tabs[4]:
|
|
display_chat_activities(username, t)
|
|
|
|
except Exception as e:
|
|
logger.error(f"Error mostrando actividades: {str(e)}")
|
|
st.error(t.get('error_loading_activities', 'Error al cargar las actividades'))
|
|
|
|
|
|
|
|
|
|
def display_current_situation_activities(username: str, t: dict):
|
|
"""
|
|
Muestra análisis de situación actual junto con las recomendaciones de Claude
|
|
unificando la información de ambas colecciones y emparejándolas por cercanía temporal.
|
|
"""
|
|
try:
|
|
|
|
logger.info(f"Recuperando análisis de situación actual para {username}")
|
|
situation_analyses = get_current_situation_analysis(username, limit=10)
|
|
|
|
|
|
if situation_analyses:
|
|
logger.info(f"Recuperados {len(situation_analyses)} análisis de situación")
|
|
|
|
for i, analysis in enumerate(situation_analyses):
|
|
logger.info(f"Análisis #{i+1}: Claves disponibles: {list(analysis.keys())}")
|
|
if 'metrics' in analysis:
|
|
logger.info(f"Métricas disponibles: {list(analysis['metrics'].keys())}")
|
|
else:
|
|
logger.warning("No se encontraron análisis de situación actual")
|
|
|
|
logger.info(f"Recuperando recomendaciones de Claude para {username}")
|
|
claude_recommendations = get_claude_recommendations(username)
|
|
|
|
if claude_recommendations:
|
|
logger.info(f"Recuperadas {len(claude_recommendations)} recomendaciones de Claude")
|
|
else:
|
|
logger.warning("No se encontraron recomendaciones de Claude")
|
|
|
|
|
|
if not situation_analyses and not claude_recommendations:
|
|
logger.info("No se encontraron análisis de situación actual ni recomendaciones")
|
|
st.info(t.get('no_current_situation', 'No hay análisis de situación actual registrados'))
|
|
return
|
|
|
|
|
|
logger.info("Creando emparejamientos temporales de análisis")
|
|
|
|
|
|
situation_times = []
|
|
for analysis in situation_analyses:
|
|
if 'timestamp' in analysis:
|
|
try:
|
|
timestamp_str = analysis['timestamp']
|
|
dt = datetime.fromisoformat(timestamp_str.replace('Z', '+00:00'))
|
|
situation_times.append((dt, analysis))
|
|
except Exception as e:
|
|
logger.error(f"Error parseando timestamp de situación: {str(e)}")
|
|
|
|
recommendation_times = []
|
|
for recommendation in claude_recommendations:
|
|
if 'timestamp' in recommendation:
|
|
try:
|
|
timestamp_str = recommendation['timestamp']
|
|
dt = datetime.fromisoformat(timestamp_str.replace('Z', '+00:00'))
|
|
recommendation_times.append((dt, recommendation))
|
|
except Exception as e:
|
|
logger.error(f"Error parseando timestamp de recomendación: {str(e)}")
|
|
|
|
|
|
situation_times.sort(key=lambda x: x[0], reverse=True)
|
|
recommendation_times.sort(key=lambda x: x[0], reverse=True)
|
|
|
|
|
|
combined_items = []
|
|
|
|
|
|
for sit_time, situation in situation_times:
|
|
|
|
best_match = None
|
|
min_diff = timedelta(minutes=30)
|
|
best_rec_time = None
|
|
|
|
for rec_time, recommendation in recommendation_times:
|
|
time_diff = abs(sit_time - rec_time)
|
|
if time_diff < min_diff:
|
|
min_diff = time_diff
|
|
best_match = recommendation
|
|
best_rec_time = rec_time
|
|
|
|
|
|
if best_match:
|
|
timestamp_key = sit_time.isoformat()
|
|
combined_items.append((timestamp_key, {
|
|
'situation': situation,
|
|
'recommendation': best_match,
|
|
'time_diff': min_diff.total_seconds()
|
|
}))
|
|
|
|
recommendation_times = [(t, r) for t, r in recommendation_times if t != best_rec_time]
|
|
logger.info(f"Emparejado: Diagnóstico {sit_time} con Recomendación {best_rec_time} (diferencia: {min_diff})")
|
|
else:
|
|
|
|
timestamp_key = sit_time.isoformat()
|
|
combined_items.append((timestamp_key, {
|
|
'situation': situation
|
|
}))
|
|
logger.info(f"Sin emparejar: Diagnóstico {sit_time} sin recomendación cercana")
|
|
|
|
|
|
for rec_time, recommendation in recommendation_times:
|
|
timestamp_key = rec_time.isoformat()
|
|
combined_items.append((timestamp_key, {
|
|
'recommendation': recommendation
|
|
}))
|
|
logger.info(f"Sin emparejar: Recomendación {rec_time} sin diagnóstico cercano")
|
|
|
|
|
|
combined_items.sort(key=lambda x: x[0], reverse=True)
|
|
|
|
logger.info(f"Procesando {len(combined_items)} elementos combinados")
|
|
|
|
|
|
for i, (timestamp_key, analysis_pair) in enumerate(combined_items):
|
|
try:
|
|
|
|
situation_data = analysis_pair.get('situation', {})
|
|
recommendation_data = analysis_pair.get('recommendation', {})
|
|
time_diff = analysis_pair.get('time_diff')
|
|
|
|
|
|
if not situation_data and not recommendation_data:
|
|
continue
|
|
|
|
|
|
text_to_show = situation_data.get('text', recommendation_data.get('text', ''))
|
|
text_type = situation_data.get('text_type', recommendation_data.get('text_type', ''))
|
|
|
|
|
|
try:
|
|
|
|
dt = datetime.fromisoformat(timestamp_key)
|
|
formatted_date = dt.strftime("%d/%m/%Y %H:%M:%S")
|
|
except Exception as date_error:
|
|
logger.error(f"Error formateando fecha: {str(date_error)}")
|
|
formatted_date = timestamp_key
|
|
|
|
|
|
title = f"{t.get('analysis_date', 'Fecha')}: {formatted_date}"
|
|
if text_type:
|
|
text_type_display = {
|
|
'academic_article': t.get('academic_article', 'Artículo académico'),
|
|
'student_essay': t.get('student_essay', 'Trabajo universitario'),
|
|
'general_communication': t.get('general_communication', 'Comunicación general')
|
|
}.get(text_type, text_type)
|
|
title += f" - {text_type_display}"
|
|
|
|
|
|
if time_diff is not None:
|
|
if time_diff < 60:
|
|
title += f" 🔄 (emparejados)"
|
|
else:
|
|
title += f" 🔄 (emparejados, diferencia: {int(time_diff//60)} min)"
|
|
|
|
|
|
expander_id = f"analysis_{i}_{timestamp_key.replace(':', '_')}"
|
|
|
|
|
|
with st.expander(title, expanded=False):
|
|
|
|
st.subheader(t.get('analyzed_text', 'Texto analizado'))
|
|
st.text_area(
|
|
"Text Content",
|
|
value=text_to_show,
|
|
height=100,
|
|
disabled=True,
|
|
label_visibility="collapsed",
|
|
key=f"text_area_{expander_id}"
|
|
)
|
|
|
|
|
|
diagnosis_tab, recommendations_tab = st.tabs([
|
|
t.get('diagnosis_tab', 'Diagnóstico'),
|
|
t.get('recommendations_tab', 'Recomendaciones')
|
|
])
|
|
|
|
|
|
with diagnosis_tab:
|
|
if situation_data and 'metrics' in situation_data:
|
|
metrics = situation_data['metrics']
|
|
|
|
|
|
col1, col2 = st.columns(2)
|
|
|
|
|
|
with col1:
|
|
st.subheader(t.get('key_metrics', 'Métricas clave'))
|
|
|
|
|
|
for metric_name, metric_data in metrics.items():
|
|
try:
|
|
|
|
score = None
|
|
if isinstance(metric_data, dict):
|
|
|
|
if 'normalized_score' in metric_data:
|
|
score = metric_data['normalized_score']
|
|
elif 'score' in metric_data:
|
|
score = metric_data['score']
|
|
elif 'value' in metric_data:
|
|
score = metric_data['value']
|
|
elif isinstance(metric_data, (int, float)):
|
|
score = metric_data
|
|
|
|
if score is not None:
|
|
|
|
if isinstance(score, (int, float)):
|
|
|
|
if score < 0.5:
|
|
emoji = "🔴"
|
|
color = "#ffcccc"
|
|
elif score < 0.75:
|
|
emoji = "🟡"
|
|
color = "#ffffcc"
|
|
else:
|
|
emoji = "🟢"
|
|
color = "#ccffcc"
|
|
|
|
|
|
st.markdown(f"""
|
|
<div style="background-color:{color}; padding:10px; border-radius:5px; margin-bottom:10px;">
|
|
<b>{emoji} {metric_name.capitalize()}:</b> {score:.2f}
|
|
</div>
|
|
""", unsafe_allow_html=True)
|
|
else:
|
|
|
|
st.markdown(f"""
|
|
<div style="background-color:#f0f0f0; padding:10px; border-radius:5px; margin-bottom:10px;">
|
|
<b>ℹ️ {metric_name.capitalize()}:</b> {str(score)}
|
|
</div>
|
|
""", unsafe_allow_html=True)
|
|
except Exception as e:
|
|
logger.error(f"Error procesando métrica {metric_name}: {str(e)}")
|
|
|
|
|
|
with col2:
|
|
st.subheader(t.get('details', 'Detalles'))
|
|
|
|
|
|
for metric_name, metric_data in metrics.items():
|
|
try:
|
|
if isinstance(metric_data, dict):
|
|
|
|
details = None
|
|
if 'details' in metric_data and metric_data['details']:
|
|
details = metric_data['details']
|
|
else:
|
|
|
|
details = {k: v for k, v in metric_data.items()
|
|
if k not in ['normalized_score', 'score', 'value']}
|
|
|
|
if details:
|
|
st.write(f"**{metric_name.capitalize()}**")
|
|
st.json(details, expanded=False)
|
|
except Exception as e:
|
|
logger.error(f"Error mostrando detalles de {metric_name}: {str(e)}")
|
|
else:
|
|
st.info(t.get('no_diagnosis', 'No hay datos de diagnóstico disponibles'))
|
|
|
|
|
|
with recommendations_tab:
|
|
if recommendation_data and 'recommendations' in recommendation_data:
|
|
st.markdown(f"""
|
|
<div style="padding: 20px; border-radius: 10px;
|
|
background-color: #f8f9fa; margin-bottom: 20px;">
|
|
{recommendation_data['recommendations']}
|
|
</div>
|
|
""", unsafe_allow_html=True)
|
|
elif recommendation_data and 'feedback' in recommendation_data:
|
|
st.markdown(f"""
|
|
<div style="padding: 20px; border-radius: 10px;
|
|
background-color: #f8f9fa; margin-bottom: 20px;">
|
|
{recommendation_data['feedback']}
|
|
</div>
|
|
""", unsafe_allow_html=True)
|
|
else:
|
|
st.info(t.get('no_recommendations', 'No hay recomendaciones disponibles'))
|
|
|
|
except Exception as e:
|
|
logger.error(f"Error procesando par de análisis: {str(e)}")
|
|
continue
|
|
|
|
except Exception as e:
|
|
logger.error(f"Error mostrando actividades de situación actual: {str(e)}")
|
|
st.error(t.get('error_current_situation', 'Error al mostrar análisis de situación actual'))
|
|
|
|
|
|
|
|
def display_morphosyntax_activities(username: str, t: dict):
|
|
"""Muestra actividades de análisis morfosintáctico"""
|
|
try:
|
|
analyses = get_student_morphosyntax_analysis(username)
|
|
if not analyses:
|
|
st.info(t.get('no_morpho_analyses', 'No hay análisis morfosintácticos registrados'))
|
|
return
|
|
|
|
for analysis in analyses:
|
|
with st.expander(
|
|
f"{t.get('analysis_date', 'Fecha')}: {analysis['timestamp']}",
|
|
expanded=False
|
|
):
|
|
st.text(f"{t.get('analyzed_text', 'Texto analizado')}:")
|
|
st.write(analysis['text'])
|
|
|
|
if 'arc_diagrams' in analysis:
|
|
st.subheader(t.get('syntactic_diagrams', 'Diagramas sintácticos'))
|
|
for diagram in analysis['arc_diagrams']:
|
|
st.write(diagram, unsafe_allow_html=True)
|
|
|
|
except Exception as e:
|
|
logger.error(f"Error mostrando análisis morfosintáctico: {str(e)}")
|
|
st.error(t.get('error_morpho', 'Error al mostrar análisis morfosintáctico'))
|
|
|
|
|
|
|
|
|
|
def display_semantic_activities(username: str, t: dict):
|
|
"""Muestra actividades de análisis semántico"""
|
|
try:
|
|
logger.info(f"Recuperando análisis semántico para {username}")
|
|
analyses = get_student_semantic_analysis(username)
|
|
|
|
if not analyses:
|
|
logger.info("No se encontraron análisis semánticos")
|
|
st.info(t.get('no_semantic_analyses', 'No hay análisis semánticos registrados'))
|
|
return
|
|
|
|
logger.info(f"Procesando {len(analyses)} análisis semánticos")
|
|
|
|
for analysis in analyses:
|
|
try:
|
|
|
|
if not all(key in analysis for key in ['timestamp', 'concept_graph']):
|
|
logger.warning(f"Análisis incompleto: {analysis.keys()}")
|
|
continue
|
|
|
|
|
|
timestamp = datetime.fromisoformat(analysis['timestamp'].replace('Z', '+00:00'))
|
|
formatted_date = timestamp.strftime("%d/%m/%Y %H:%M:%S")
|
|
|
|
|
|
with st.expander(f"{t.get('analysis_date', 'Fecha')}: {formatted_date}", expanded=False):
|
|
|
|
if analysis.get('concept_graph'):
|
|
try:
|
|
|
|
logger.debug("Decodificando gráfico de conceptos")
|
|
image_data = analysis['concept_graph']
|
|
|
|
|
|
if isinstance(image_data, bytes):
|
|
image_bytes = image_data
|
|
else:
|
|
|
|
image_bytes = base64.b64decode(image_data)
|
|
|
|
logger.debug(f"Longitud de bytes de imagen: {len(image_bytes)}")
|
|
|
|
|
|
st.image(
|
|
image_bytes,
|
|
caption=t.get('concept_network', 'Red de Conceptos'),
|
|
use_column_width=True
|
|
)
|
|
logger.debug("Gráfico mostrado exitosamente")
|
|
|
|
except Exception as img_error:
|
|
logger.error(f"Error procesando gráfico: {str(img_error)}")
|
|
st.error(t.get('error_loading_graph', 'Error al cargar el gráfico'))
|
|
else:
|
|
st.info(t.get('no_graph', 'No hay visualización disponible'))
|
|
|
|
except Exception as e:
|
|
logger.error(f"Error procesando análisis individual: {str(e)}")
|
|
continue
|
|
|
|
except Exception as e:
|
|
logger.error(f"Error mostrando análisis semántico: {str(e)}")
|
|
st.error(t.get('error_semantic', 'Error al mostrar análisis semántico'))
|
|
|
|
|
|
|
|
def display_discourse_activities(username: str, t: dict):
|
|
"""Muestra actividades de análisis del discurso"""
|
|
try:
|
|
logger.info(f"Recuperando análisis del discurso para {username}")
|
|
analyses = get_student_discourse_analysis(username)
|
|
|
|
if not analyses:
|
|
logger.info("No se encontraron análisis del discurso")
|
|
st.info(t.get('no_discourse_analyses', 'No hay análisis del discurso registrados'))
|
|
return
|
|
|
|
logger.info(f"Procesando {len(analyses)} análisis del discurso")
|
|
for analysis in analyses:
|
|
try:
|
|
|
|
if not all(key in analysis for key in ['timestamp', 'combined_graph']):
|
|
logger.warning(f"Análisis incompleto: {analysis.keys()}")
|
|
continue
|
|
|
|
|
|
timestamp = datetime.fromisoformat(analysis['timestamp'].replace('Z', '+00:00'))
|
|
formatted_date = timestamp.strftime("%d/%m/%Y %H:%M:%S")
|
|
|
|
with st.expander(f"{t.get('analysis_date', 'Fecha')}: {formatted_date}", expanded=False):
|
|
if analysis['combined_graph']:
|
|
logger.debug("Decodificando gráfico combinado")
|
|
try:
|
|
image_bytes = base64.b64decode(analysis['combined_graph'])
|
|
st.image(image_bytes, use_column_width=True)
|
|
logger.debug("Gráfico mostrado exitosamente")
|
|
except Exception as img_error:
|
|
logger.error(f"Error decodificando imagen: {str(img_error)}")
|
|
st.error(t.get('error_loading_graph', 'Error al cargar el gráfico'))
|
|
else:
|
|
st.info(t.get('no_visualization', 'No hay visualización comparativa disponible'))
|
|
|
|
except Exception as e:
|
|
logger.error(f"Error procesando análisis individual: {str(e)}")
|
|
continue
|
|
|
|
except Exception as e:
|
|
logger.error(f"Error mostrando análisis del discurso: {str(e)}")
|
|
st.error(t.get('error_discourse', 'Error al mostrar análisis del discurso'))
|
|
|
|
|
|
def display_chat_activities(username: str, t: dict):
|
|
"""
|
|
Muestra historial de conversaciones del chat
|
|
"""
|
|
try:
|
|
|
|
chat_history = get_chat_history(
|
|
username=username,
|
|
analysis_type='sidebar',
|
|
limit=50
|
|
)
|
|
|
|
if not chat_history:
|
|
st.info(t.get('no_chat_history', 'No hay conversaciones registradas'))
|
|
return
|
|
|
|
for chat in reversed(chat_history):
|
|
try:
|
|
|
|
timestamp = datetime.fromisoformat(chat['timestamp'].replace('Z', '+00:00'))
|
|
formatted_date = timestamp.strftime("%d/%m/%Y %H:%M:%S")
|
|
|
|
with st.expander(
|
|
f"{t.get('chat_date', 'Fecha de conversación')}: {formatted_date}",
|
|
expanded=False
|
|
):
|
|
if 'messages' in chat and chat['messages']:
|
|
|
|
for message in chat['messages']:
|
|
role = message.get('role', 'unknown')
|
|
content = message.get('content', '')
|
|
|
|
|
|
with st.chat_message(role):
|
|
st.markdown(content)
|
|
|
|
|
|
st.divider()
|
|
else:
|
|
st.warning(t.get('invalid_chat_format', 'Formato de chat no válido'))
|
|
|
|
except Exception as e:
|
|
logger.error(f"Error mostrando conversación: {str(e)}")
|
|
continue
|
|
|
|
except Exception as e:
|
|
logger.error(f"Error mostrando historial del chat: {str(e)}")
|
|
st.error(t.get('error_chat', 'Error al mostrar historial del chat'))
|
|
|
|
|
|
def display_discourse_comparison(analysis: dict, t: dict):
|
|
"""Muestra la comparación de análisis del discurso"""
|
|
st.subheader(t.get('comparison_results', 'Resultados de la comparación'))
|
|
|
|
col1, col2 = st.columns(2)
|
|
with col1:
|
|
st.markdown(f"**{t.get('concepts_text_1', 'Conceptos Texto 1')}**")
|
|
df1 = pd.DataFrame(analysis['key_concepts1'])
|
|
st.dataframe(df1)
|
|
|
|
with col2:
|
|
st.markdown(f"**{t.get('concepts_text_2', 'Conceptos Texto 2')}**")
|
|
df2 = pd.DataFrame(analysis['key_concepts2'])
|
|
st.dataframe(df2) |