File size: 3,368 Bytes
26b4a3d
91b2c3c
ad3217c
 
 
 
26b4a3d
 
 
 
 
 
 
ad3217c
26b4a3d
 
ad3217c
26b4a3d
ad3217c
26b4a3d
ad3217c
 
 
 
26b4a3d
 
 
 
 
91b2c3c
26b4a3d
91b2c3c
 
26b4a3d
 
91b2c3c
 
 
 
 
 
 
 
 
 
 
 
 
 
26b4a3d
 
 
 
 
 
 
 
91b2c3c
26b4a3d
 
 
91b2c3c
26b4a3d
 
 
 
 
 
91b2c3c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
26b4a3d
91b2c3c
 
 
 
 
26b4a3d
91b2c3c
 
 
 
 
 
26b4a3d
 
 
 
 
91b2c3c
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
#modules/semantic/semantic_process.py
#modules/semantic/semantic_process.py
import streamlit as st
from ..text_analysis.semantic_analysis import (
    perform_semantic_analysis,
    fig_to_bytes,
    fig_to_html,
    identify_key_concepts,
    create_concept_graph,
    visualize_concept_graph,
    create_entity_graph,
    visualize_entity_graph,
    create_topic_graph,
    visualize_topic_graph,
    generate_summary,
    extract_entities,
    analyze_sentiment,
    extract_topics
)
from ..database.semantic_mongo_db import store_student_semantic_result

import logging
logger = logging.getLogger(__name__)

def process_semantic_input(text, lang_code, nlp_models, t):
    """
    Procesa el texto ingresado para realizar el análisis semántico.
    """
    try:
        logger.info(f"Iniciando análisis semántico para texto de {len(text)} caracteres")
        
        # Realizar el análisis
        doc = nlp_models[lang_code](text)
        analysis = perform_semantic_analysis(text, nlp_models[lang_code], lang_code)
        
        logger.info("Análisis semántico completado. Guardando resultados...")
        
        # Intentar guardar en la base de datos
        try:
            store_result = store_student_semantic_result(
                st.session_state.username,
                text,
                analysis
            )
            if not store_result:
                logger.warning("No se pudo guardar el análisis en la base de datos")
        except Exception as db_error:
            logger.error(f"Error al guardar en base de datos: {str(db_error)}")
            # Continuamos aunque falle el guardado
        
        return {
            'analysis': analysis,
            'success': True,
            'message': t.get('success_message', 'Analysis completed successfully')
        }
        
    except Exception as e:
        logger.error(f"Error en process_semantic_input: {str(e)}")
        return {
            'analysis': None,
            'success': False,
            'message': str(e)
        }

def format_semantic_results(analysis_result, t):
    """
    Formatea los resultados del análisis para su visualización.
    """
    try:
        if not analysis_result['success']:
            return {
                'formatted_text': analysis_result['message'],
                'visualizations': None
            }

        formatted_sections = []
        analysis = analysis_result['analysis']

        # Formatear conceptos clave
        if 'key_concepts' in analysis:
            concepts_section = [f"### {t.get('key_concepts', 'Key Concepts')}"]
            concepts_section.extend([
                f"- {concept}: {frequency:.2f}"
                for concept, frequency in analysis['key_concepts']
            ])
            formatted_sections.append('\n'.join(concepts_section))

        return {
            'formatted_text': '\n\n'.join(formatted_sections),
            'visualizations': {
                'concept_graph': analysis.get('concept_graph'),
                'entity_graph': analysis.get('entity_graph')
            }
        }
        
    except Exception as e:
        logger.error(f"Error en format_semantic_results: {str(e)}")
        return {
            'formatted_text': str(e),
            'visualizations': None
        }

__all__ = [
    'process_semantic_input',
    'format_semantic_results'
]