Update modules/studentact/student_activities_v2.py
Browse files
modules/studentact/student_activities_v2.py
CHANGED
@@ -469,82 +469,14 @@ def display_morphosyntax_activities(username: str, t: dict):
|
|
469 |
|
470 |
###############################################################################################
|
471 |
|
472 |
-
def display_semantic_activities(username: str, t: dict):
|
473 |
-
"""Muestra actividades de análisis semántico"""
|
474 |
-
try:
|
475 |
-
logger.info(f"Recuperando análisis semántico para {username}")
|
476 |
-
analyses = get_student_semantic_analysis(username)
|
477 |
-
|
478 |
-
if not analyses:
|
479 |
-
logger.info("No se encontraron análisis semánticos")
|
480 |
-
st.info(t.get('no_semantic_analyses', 'No hay análisis semánticos registrados'))
|
481 |
-
return
|
482 |
-
|
483 |
-
logger.info(f"Procesando {len(analyses)} análisis semánticos")
|
484 |
-
|
485 |
-
for analysis in analyses:
|
486 |
-
try:
|
487 |
-
# Verificar campos necesarios
|
488 |
-
if not all(key in analysis for key in ['timestamp', 'concept_graph']):
|
489 |
-
logger.warning(f"Análisis incompleto: {analysis.keys()}")
|
490 |
-
continue
|
491 |
-
|
492 |
-
# Formatear fecha
|
493 |
-
timestamp = datetime.fromisoformat(analysis['timestamp'].replace('Z', '+00:00'))
|
494 |
-
formatted_date = timestamp.strftime("%d/%m/%Y %H:%M:%S")
|
495 |
-
|
496 |
-
# Crear expander
|
497 |
-
with st.expander(f"{t.get('analysis_date', 'Fecha')}: {formatted_date}", expanded=False):
|
498 |
-
# Procesar y mostrar gráfico
|
499 |
-
if analysis.get('concept_graph'):
|
500 |
-
try:
|
501 |
-
# Convertir de base64 a bytes
|
502 |
-
logger.debug("Decodificando gráfico de conceptos")
|
503 |
-
image_data = analysis['concept_graph']
|
504 |
-
|
505 |
-
# Si el gráfico ya es bytes, usarlo directamente
|
506 |
-
if isinstance(image_data, bytes):
|
507 |
-
image_bytes = image_data
|
508 |
-
else:
|
509 |
-
# Si es string base64, decodificar
|
510 |
-
image_bytes = base64.b64decode(image_data)
|
511 |
-
|
512 |
-
logger.debug(f"Longitud de bytes de imagen: {len(image_bytes)}")
|
513 |
-
|
514 |
-
# Mostrar imagen
|
515 |
-
st.image(
|
516 |
-
image_bytes,
|
517 |
-
caption=t.get('concept_network', 'Red de Conceptos'),
|
518 |
-
use_container_width=True
|
519 |
-
)
|
520 |
-
logger.debug("Gráfico mostrado exitosamente")
|
521 |
-
|
522 |
-
except Exception as img_error:
|
523 |
-
logger.error(f"Error procesando gráfico: {str(img_error)}")
|
524 |
-
st.error(t.get('error_loading_graph', 'Error al cargar el gráfico'))
|
525 |
-
else:
|
526 |
-
st.info(t.get('no_graph', 'No hay visualización disponible'))
|
527 |
-
|
528 |
-
except Exception as e:
|
529 |
-
logger.error(f"Error procesando análisis individual: {str(e)}")
|
530 |
-
continue
|
531 |
-
|
532 |
-
except Exception as e:
|
533 |
-
logger.error(f"Error mostrando análisis semántico: {str(e)}")
|
534 |
-
st.error(t.get('error_semantic', 'Error al mostrar análisis semántico'))
|
535 |
-
|
536 |
-
|
537 |
-
###################################################################################################
|
538 |
-
|
539 |
def display_discourse_activities(username: str, t: dict):
|
540 |
-
"""Muestra actividades de análisis del discurso
|
541 |
try:
|
542 |
logger.info(f"Recuperando análisis del discurso para {username}")
|
543 |
analyses = get_student_discourse_analysis(username)
|
544 |
|
545 |
if not analyses:
|
546 |
logger.info("No se encontraron análisis del discurso")
|
547 |
-
# Usamos el término "análisis comparado de textos" en la UI
|
548 |
st.info(t.get('no_discourse_analyses', 'No hay análisis comparados de textos registrados'))
|
549 |
return
|
550 |
|
@@ -552,8 +484,8 @@ def display_discourse_activities(username: str, t: dict):
|
|
552 |
for analysis in analyses:
|
553 |
try:
|
554 |
# Verificar campos mínimos necesarios
|
555 |
-
if not
|
556 |
-
logger.warning(f"Análisis
|
557 |
continue
|
558 |
|
559 |
# Formatear fecha
|
@@ -561,91 +493,63 @@ def display_discourse_activities(username: str, t: dict):
|
|
561 |
formatted_date = timestamp.strftime("%d/%m/%Y %H:%M:%S")
|
562 |
|
563 |
with st.expander(f"{t.get('analysis_date', 'Fecha')}: {formatted_date}", expanded=False):
|
564 |
-
# Mostrar
|
565 |
-
if '
|
566 |
-
st.
|
567 |
-
|
568 |
-
|
569 |
-
|
570 |
-
|
571 |
-
|
572 |
-
|
573 |
-
|
574 |
-
|
575 |
-
|
576 |
-
|
577 |
-
|
578 |
-
|
579 |
-
|
580 |
-
|
581 |
-
|
582 |
-
|
583 |
-
height=100,
|
584 |
-
disabled=True,
|
585 |
-
label_visibility="collapsed",
|
586 |
-
key=f"text_area_2_{timestamp}"
|
587 |
-
)
|
588 |
-
|
589 |
-
# Mostrar conceptos clave
|
590 |
-
st.subheader(t.get('key_concepts', 'Conceptos clave'))
|
591 |
-
col1, col2 = st.columns(2)
|
592 |
-
with col1:
|
593 |
-
st.markdown(f"**{t.get('doc1_title', 'Documento 1')}**")
|
594 |
-
if 'key_concepts1' in analysis:
|
595 |
-
concepts1 = ", ".join([concept for concept, _ in analysis['key_concepts1']])
|
596 |
-
st.markdown(concepts1)
|
597 |
-
else:
|
598 |
-
st.info(t.get('no_concepts', 'No hay conceptos disponibles'))
|
599 |
-
|
600 |
-
with col2:
|
601 |
-
st.markdown(f"**{t.get('doc2_title', 'Documento 2')}**")
|
602 |
-
if 'key_concepts2' in analysis:
|
603 |
-
concepts2 = ", ".join([concept for concept, _ in analysis['key_concepts2']])
|
604 |
-
st.markdown(concepts2)
|
605 |
-
else:
|
606 |
-
st.info(t.get('no_concepts', 'No hay conceptos disponibles'))
|
607 |
|
608 |
-
# Mostrar
|
609 |
-
st.
|
610 |
|
611 |
-
#
|
612 |
-
|
|
|
613 |
|
614 |
-
|
615 |
-
|
616 |
-
st.markdown(f"**{t.get('analysis_doc1', 'Análisis del primer texto')}**")
|
617 |
-
if 'graph1' in analysis:
|
618 |
try:
|
619 |
-
|
620 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
621 |
except Exception as img_error:
|
622 |
-
logger.error(f"Error
|
623 |
-
st.error(t.get('error_loading_graph', 'Error al cargar el gráfico'))
|
624 |
-
else:
|
625 |
-
st.info(t.get('no_visualization', 'No hay visualización disponible'))
|
626 |
|
627 |
-
|
628 |
-
|
629 |
-
st.markdown(f"**{t.get('analysis_doc2', 'Análisis del segundo texto')}**")
|
630 |
-
if 'graph2' in analysis:
|
631 |
try:
|
632 |
-
|
633 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
634 |
except Exception as img_error:
|
635 |
-
logger.error(f"Error
|
636 |
-
st.error(t.get('error_loading_graph', 'Error al cargar el gráfico'))
|
637 |
-
else:
|
638 |
-
st.info(t.get('no_visualization', 'No hay visualización disponible'))
|
639 |
-
|
640 |
-
# Añadir leyenda de interpretación
|
641 |
-
st.markdown("### 📊 " + t.get('graph_interpretation', 'Interpretación del grafo:'))
|
642 |
|
643 |
-
|
644 |
-
|
645 |
-
* 🎨 Los colores más intensos indican conceptos más centrales en el texto
|
646 |
-
* ⭕ El tamaño de los nodos representa la frecuencia del concepto
|
647 |
-
* ↔️ El grosor de las líneas indica la fuerza de la conexión
|
648 |
-
""")
|
649 |
|
650 |
except Exception as e:
|
651 |
logger.error(f"Error procesando análisis individual: {str(e)}")
|
@@ -653,9 +557,11 @@ def display_discourse_activities(username: str, t: dict):
|
|
653 |
|
654 |
except Exception as e:
|
655 |
logger.error(f"Error mostrando análisis del discurso: {str(e)}")
|
656 |
-
# Usamos el término "análisis comparado de textos" en la UI
|
657 |
st.error(t.get('error_discourse', 'Error al mostrar análisis comparado de textos'))
|
658 |
|
|
|
|
|
|
|
659 |
#################################################################################
|
660 |
|
661 |
def display_discourse_comparison(analysis: dict, t: dict):
|
|
|
469 |
|
470 |
###############################################################################################
|
471 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
472 |
def display_discourse_activities(username: str, t: dict):
|
473 |
+
"""Muestra actividades de análisis del discurso"""
|
474 |
try:
|
475 |
logger.info(f"Recuperando análisis del discurso para {username}")
|
476 |
analyses = get_student_discourse_analysis(username)
|
477 |
|
478 |
if not analyses:
|
479 |
logger.info("No se encontraron análisis del discurso")
|
|
|
480 |
st.info(t.get('no_discourse_analyses', 'No hay análisis comparados de textos registrados'))
|
481 |
return
|
482 |
|
|
|
484 |
for analysis in analyses:
|
485 |
try:
|
486 |
# Verificar campos mínimos necesarios
|
487 |
+
if 'timestamp' not in analysis:
|
488 |
+
logger.warning(f"Análisis sin timestamp: {analysis.keys()}")
|
489 |
continue
|
490 |
|
491 |
# Formatear fecha
|
|
|
493 |
formatted_date = timestamp.strftime("%d/%m/%Y %H:%M:%S")
|
494 |
|
495 |
with st.expander(f"{t.get('analysis_date', 'Fecha')}: {formatted_date}", expanded=False):
|
496 |
+
# Mostrar conceptos clave en fila
|
497 |
+
if 'key_concepts1' in analysis and 'key_concepts2' in analysis:
|
498 |
+
st.markdown("### Conceptos clave")
|
499 |
+
|
500 |
+
# Documento 1
|
501 |
+
st.markdown("**Documento 1**")
|
502 |
+
if analysis['key_concepts1']:
|
503 |
+
# Extraer solo los conceptos sin la frecuencia
|
504 |
+
concepts1 = [concept for concept, _ in analysis['key_concepts1']]
|
505 |
+
# Mostrar en formato de fila
|
506 |
+
st.markdown(", ".join([f"**{concept}**" for concept in concepts1]))
|
507 |
+
|
508 |
+
# Documento 2
|
509 |
+
st.markdown("**Documento 2**")
|
510 |
+
if analysis['key_concepts2']:
|
511 |
+
# Extraer solo los conceptos sin la frecuencia
|
512 |
+
concepts2 = [concept for concept, _ in analysis['key_concepts2']]
|
513 |
+
# Mostrar en formato de fila
|
514 |
+
st.markdown(", ".join([f"**{concept}**" for concept in concepts2]))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
515 |
|
516 |
+
# Mostrar gráficos
|
517 |
+
st.markdown("### Visualización comparativa")
|
518 |
|
519 |
+
# Verificar y mostrar gráficos si existen
|
520 |
+
graph_col1, graph_col2 = st.columns(2)
|
521 |
+
has_graphs = False
|
522 |
|
523 |
+
with graph_col1:
|
524 |
+
if 'graph1' in analysis and analysis['graph1']:
|
|
|
|
|
525 |
try:
|
526 |
+
# Verificar que sea bytes
|
527 |
+
if isinstance(analysis['graph1'], bytes):
|
528 |
+
st.image(analysis['graph1'],
|
529 |
+
caption="Documento 1",
|
530 |
+
use_container_width=True) # Usar use_container_width en lugar de use_column_width
|
531 |
+
has_graphs = True
|
532 |
+
else:
|
533 |
+
logger.warning(f"graph1 no es bytes: {type(analysis['graph1'])}")
|
534 |
except Exception as img_error:
|
535 |
+
logger.error(f"Error mostrando graph1: {str(img_error)}")
|
|
|
|
|
|
|
536 |
|
537 |
+
with graph_col2:
|
538 |
+
if 'graph2' in analysis and analysis['graph2']:
|
|
|
|
|
539 |
try:
|
540 |
+
# Verificar que sea bytes
|
541 |
+
if isinstance(analysis['graph2'], bytes):
|
542 |
+
st.image(analysis['graph2'],
|
543 |
+
caption="Documento 2",
|
544 |
+
use_container_width=True) # Usar use_container_width en lugar de use_column_width
|
545 |
+
has_graphs = True
|
546 |
+
else:
|
547 |
+
logger.warning(f"graph2 no es bytes: {type(analysis['graph2'])}")
|
548 |
except Exception as img_error:
|
549 |
+
logger.error(f"Error mostrando graph2: {str(img_error)}")
|
|
|
|
|
|
|
|
|
|
|
|
|
550 |
|
551 |
+
if not has_graphs:
|
552 |
+
st.info(t.get('no_visualization', 'No hay visualización comparativa disponible'))
|
|
|
|
|
|
|
|
|
553 |
|
554 |
except Exception as e:
|
555 |
logger.error(f"Error procesando análisis individual: {str(e)}")
|
|
|
557 |
|
558 |
except Exception as e:
|
559 |
logger.error(f"Error mostrando análisis del discurso: {str(e)}")
|
|
|
560 |
st.error(t.get('error_discourse', 'Error al mostrar análisis comparado de textos'))
|
561 |
|
562 |
+
|
563 |
+
|
564 |
+
|
565 |
#################################################################################
|
566 |
|
567 |
def display_discourse_comparison(analysis: dict, t: dict):
|