File size: 2,996 Bytes
78411bb
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
# Importaciones generales
import streamlit as st
import re
import io
from io import BytesIO
import base64
import matplotlib.pyplot as plt
import plotly.graph_objects as go
import pandas as pd
import numpy as np
import time
from datetime import datetime
from streamlit_player import st_player  # Necesitarás instalar esta librería: pip install streamlit-player
from spacy import displacy
import logging
import random


from ..utils.widget_utils import generate_unique_key

from ..database.morphosintax_mongo_db import store_student_morphosyntax_result
from ..database.chat_db import store_chat_history
from ..database.morphosintaxis_export import export_user_interactions

import logging
logger = logging.getLogger(__name__)


def display_semantic_analysis_interface(nlp_models, lang_code):

    t = translations[lang_code]

    st.header(t['title'])

    # Opción para introducir texto
    text_input = st.text_area(
        t['text_input_label'],
        height=150,
        placeholder=t['text_input_placeholder'],
    )

    # Opción para cargar archivo
    uploaded_file = st.file_uploader(t['file_uploader'], type=['txt'])

    if st.button(t['analyze_button']):
        if text_input or uploaded_file is not None:
            if uploaded_file:
                text_content = uploaded_file.getvalue().decode('utf-8')
            else:
                text_content = text_input

            # Realizar el análisis
            analysis_result = perform_semantic_analysis(text_content, nlp_models[lang_code], lang_code)

            # Guardar el resultado en el estado de la sesión
            st.session_state.semantic_result = analysis_result

            # Mostrar resultados
            display_semantic_results(st.session_state.semantic_result, lang_code, t)           

            # Guardar el resultado del análisis
            if store_semantic_result(st.session_state.username, text_content, analysis_result):
                st.success(t['success_message'])
            else:
                st.error(t['error_message'])
        else:
            st.warning(t['warning_message'])

    elif 'semantic_result' in st.session_state:

        # Si hay un resultado guardado, mostrarlo
        display_semantic_results(st.session_state.semantic_result, lang_code, t)
    
    else:
        st.info(t['initial_message'])  # Asegúrate de que 'initial_message' esté en tus traducciones
            
def display_semantic_results(result, lang_code, t):
    if result is None:
        st.warning(t['no_results'])  # Asegúrate de que 'no_results' esté en tus traducciones
        return

    # Mostrar conceptos clave
    with st.expander(t['key_concepts'], expanded=True):
        concept_text = " | ".join([f"{concept} ({frequency:.2f})" for concept, frequency in result['key_concepts']])
        st.write(concept_text)

    # Mostrar el gráfico de relaciones conceptuales
    with st.expander(t['conceptual_relations'], expanded=True):
        st.pyplot(result['relations_graph'])