|
|
|
import streamlit as st |
|
import re |
|
import io |
|
import base64 |
|
import matplotlib.pyplot as plt |
|
import pandas as pd |
|
import time |
|
from streamlit_player import st_player |
|
from spacy import displacy |
|
|
|
|
|
|
|
from .auth import authenticate_user, register_user |
|
from .database import get_student_data, store_morphosyntax_result, store_semantic_result, store_chat_history, create_admin_user, create_student_user |
|
|
|
|
|
from .morpho_analysis import generate_arc_diagram, get_repeated_words_colors, highlight_repeated_words, POS_COLORS, POS_TRANSLATIONS |
|
from .semantic_analysis import visualize_semantic_relations, perform_semantic_analysis |
|
from .discourse_analysis import compare_semantic_analysis, perform_discourse_analysis |
|
from .chatbot import initialize_chatbot, get_chatbot_response |
|
|
|
|
|
def initialize_session_state(): |
|
if 'initialized' not in st.session_state: |
|
st.session_state.clear() |
|
st.session_state.initialized = True |
|
st.session_state.logged_in = False |
|
st.session_state.page = 'login' |
|
st.session_state.username = None |
|
st.session_state.role = None |
|
|
|
|
|
def main(): |
|
initialize_session_state() |
|
|
|
if st.session_state.page == 'login': |
|
login_register_page() |
|
elif st.session_state.page == 'admin': |
|
admin_page() |
|
elif st.session_state.page == 'user': |
|
user_page() |
|
|
|
print(f"Estado actual de la sesión: {st.session_state}") |
|
|
|
|
|
def login_register_page(): |
|
st.title("AIdeaText") |
|
|
|
left_column, right_column = st.columns([1, 3]) |
|
|
|
with left_column: |
|
tab1, tab2 = st.tabs(["Iniciar Sesión", "Registrarse"]) |
|
|
|
with tab1: |
|
login_form() |
|
|
|
with tab2: |
|
register_form() |
|
|
|
with right_column: |
|
display_videos_and_info() |
|
|
|
|
|
|
|
def login_form(): |
|
username = st.text_input("Correo electrónico", key="login_username") |
|
password = st.text_input("Contraseña", type="password", key="login_password") |
|
|
|
if st.button("Iniciar Sesión", key="login_button"): |
|
success, role = authenticate_user(username, password) |
|
if success: |
|
st.session_state.logged_in = True |
|
st.session_state.username = username |
|
st.session_state.role = role |
|
st.session_state.page = 'admin' if role == 'Administrador' else 'user' |
|
print(f"Inicio de sesión exitoso. Usuario: {username}, Rol: {role}") |
|
print(f"Estado de sesión después de login: {st.session_state}") |
|
st.rerun() |
|
else: |
|
st.error("Credenciales incorrectas") |
|
|
|
|
|
def admin_page(): |
|
st.title("Panel de Administración") |
|
st.write(f"Bienvenido, {st.session_state.username}") |
|
|
|
st.header("Crear Nuevo Usuario Estudiante") |
|
new_username = st.text_input("Correo electrónico del nuevo usuario", key="admin_new_username") |
|
new_password = st.text_input("Contraseña", type="password", key="admin_new_password") |
|
if st.button("Crear Usuario", key="admin_create_user"): |
|
if create_student_user(new_username, new_password): |
|
st.success(f"Usuario estudiante {new_username} creado exitosamente") |
|
else: |
|
st.error("Error al crear el usuario estudiante") |
|
|
|
|
|
|
|
|
|
def user_page(): |
|
st.title("Bienvenido a AIdeaText") |
|
st.write(f"Hola, {st.session_state.username}") |
|
|
|
|
|
|
|
tabs = st.tabs(["Análisis Morfosintáctico", "Análisis Semántico", "Análisis del Discurso", "Chat", "Mi Progreso"]) |
|
|
|
with tabs[0]: |
|
display_morphosyntax_analysis_interface(nlp_models, 'es') |
|
with tabs[1]: |
|
display_semantic_analysis_interface(nlp_models, 'es') |
|
with tabs[2]: |
|
display_discourse_analysis_interface(nlp_models, 'es') |
|
with tabs[3]: |
|
display_chatbot_interface('es') |
|
with tabs[4]: |
|
display_student_progress(st.session_state.username, 'es') |
|
|
|
|
|
def display_videos_and_info(): |
|
st.header("Videos: pitch, demos, entrevistas, otros") |
|
|
|
videos = { |
|
"Intro AideaText": "https://www.youtube.com/watch?v=UA-md1VxaRc", |
|
"Pitch IFE Explora": "https://www.youtube.com/watch?v=Fqi4Di_Rj_s", |
|
"Entrevista Dr. Guillermo Ruíz": "https://www.youtube.com/watch?v=_ch8cRja3oc", |
|
"Demo versión desktop": "https://www.youtube.com/watch?v=nP6eXbog-ZY" |
|
} |
|
|
|
selected_title = st.selectbox("Selecciona un video tutorial:", list(videos.keys())) |
|
|
|
if selected_title in videos: |
|
try: |
|
st_player(videos[selected_title]) |
|
except Exception as e: |
|
st.error(f"Error al cargar el video: {str(e)}") |
|
|
|
st.markdown(""" |
|
## Novedades de la versión actual |
|
- Nueva función de análisis semántico |
|
- Soporte para múltiples idiomas |
|
- Interfaz mejorada para una mejor experiencia de usuario |
|
""") |
|
|
|
|
|
def register_form(): |
|
|
|
|
|
pass |
|
|
|
def display_chat_interface(): |
|
st.markdown("### Chat con AIdeaText") |
|
|
|
if 'chat_history' not in st.session_state: |
|
st.session_state.chat_history = [] |
|
|
|
for i, (role, text) in enumerate(st.session_state.chat_history): |
|
if role == "user": |
|
st.text_area(f"Tú:", value=text, height=50, key=f"user_message_{i}", disabled=True) |
|
else: |
|
st.text_area(f"AIdeaText:", value=text, height=50, key=f"bot_message_{i}", disabled=True) |
|
|
|
user_input = st.text_input("Escribe tu mensaje aquí:") |
|
|
|
if st.button("Enviar"): |
|
if user_input: |
|
st.session_state.chat_history.append(("user", user_input)) |
|
response = get_chatbot_response(user_input) |
|
st.session_state.chat_history.append(("bot", response)) |
|
st.experimental_rerun() |
|
|
|
|
|
def display_student_progress(username, lang_code='es'): |
|
student_data = get_student_data(username) |
|
|
|
if student_data is None: |
|
st.warning("No se encontraron datos para este estudiante.") |
|
st.info("Intenta realizar algunos análisis de texto primero.") |
|
return |
|
|
|
st.title(f"Progreso de {username}") |
|
|
|
if student_data['entries_count'] > 0: |
|
if 'word_count' in student_data and student_data['word_count']: |
|
st.subheader("Total de palabras por categoría gramatical") |
|
|
|
df = pd.DataFrame(list(student_data['word_count'].items()), columns=['category', 'count']) |
|
df['label'] = df.apply(lambda x: f"{POS_TRANSLATIONS[lang_code].get(x['category'], x['category'])}", axis=1) |
|
|
|
df = df.sort_values('count', ascending=False) |
|
|
|
fig, ax = plt.subplots(figsize=(12, 6)) |
|
bars = ax.bar(df['label'], df['count'], color=[POS_COLORS.get(cat, '#CCCCCC') for cat in df['category']]) |
|
|
|
ax.set_xlabel('Categoría Gramatical') |
|
ax.set_ylabel('Cantidad de Palabras') |
|
ax.set_title('Total de palabras por categoría gramatical') |
|
plt.xticks(rotation=45, ha='right') |
|
|
|
for bar in bars: |
|
height = bar.get_height() |
|
ax.text(bar.get_x() + bar.get_width()/2., height, |
|
f'{height}', |
|
ha='center', va='bottom') |
|
|
|
plt.tight_layout() |
|
|
|
buf = io.BytesIO() |
|
fig.savefig(buf, format='png') |
|
buf.seek(0) |
|
st.image(buf, use_column_width=True) |
|
else: |
|
st.info("No hay datos de conteo de palabras disponibles.") |
|
|
|
st.header("Diagramas de Arco") |
|
with st.expander("Ver todos los Diagramas de Arco"): |
|
for i, entry in enumerate(student_data['entries']): |
|
if 'arc_diagrams' in entry and entry['arc_diagrams']: |
|
st.subheader(f"Entrada {i+1} - {entry['timestamp']}") |
|
st.write(entry['arc_diagrams'][0], unsafe_allow_html=True) |
|
|
|
st.header("Diagramas de Red") |
|
with st.expander("Ver todos los Diagramas de Red"): |
|
for i, entry in enumerate(student_data['entries']): |
|
if 'network_diagram' in entry and entry['network_diagram']: |
|
st.subheader(f"Entrada {i+1} - {entry['timestamp']}") |
|
try: |
|
image_bytes = base64.b64decode(entry['network_diagram']) |
|
st.image(image_bytes) |
|
except Exception as e: |
|
st.error(f"Error al mostrar el diagrama de red: {str(e)}") |
|
else: |
|
st.warning("No se encontraron entradas para este estudiante.") |
|
st.info("Intenta realizar algunos análisis de texto primero.") |
|
|
|
|
|
|
|
|
|
|
|
|
|
def display_morphosyntax_analysis_interface(nlp_models, lang_code): |
|
translations = { |
|
'es': { |
|
'title': "AIdeaText - Análisis morfológico y sintáctico", |
|
'input_label': "Ingrese un texto para analizar (máx. 5,000 palabras):", |
|
'input_placeholder': "El objetivo de esta aplicación es que mejore sus habilidades de redacción...", |
|
'analyze_button': "Analizar texto", |
|
'repeated_words': "Palabras repetidas", |
|
'legend': "Leyenda: Categorías gramaticales", |
|
'arc_diagram': "Análisis sintáctico: Diagrama de arco", |
|
'sentence': "Oración" |
|
}, |
|
'en': { |
|
'title': "AIdeaText - Morphological and Syntactic Analysis", |
|
'input_label': "Enter a text to analyze (max 5,000 words):", |
|
'input_placeholder': "The goal of this app is for you to improve your writing skills...", |
|
'analyze_button': "Analyze text", |
|
'repeated_words': "Repeated words", |
|
'legend': "Legend: Grammatical categories", |
|
'arc_diagram': "Syntactic analysis: Arc diagram", |
|
'sentence': "Sentence" |
|
}, |
|
'fr': { |
|
'title': "AIdeaText - Analyse morphologique et syntaxique", |
|
'input_label': "Entrez un texte à analyser (max 5 000 mots) :", |
|
'input_placeholder': "Le but de cette application est d'améliorer vos compétences en rédaction...", |
|
'analyze_button': "Analyser le texte", |
|
'repeated_words': "Mots répétés", |
|
'legend': "Légende : Catégories grammaticales", |
|
'arc_diagram': "Analyse syntaxique : Diagramme en arc", |
|
'sentence': "Phrase" |
|
} |
|
} |
|
|
|
t = translations[lang_code] |
|
|
|
input_key = f"morphosyntax_input_{lang_code}" |
|
|
|
|
|
if input_key not in st.session_state: |
|
st.session_state[input_key] = "" |
|
|
|
|
|
def update_input(): |
|
st.session_state[input_key] = st.session_state[f"text_area_{lang_code}"] |
|
|
|
sentence_input = st.text_area( |
|
t['input_label'], |
|
height=150, |
|
placeholder=t['input_placeholder'], |
|
value=st.session_state[input_key], |
|
key=f"text_area_{lang_code}", |
|
on_change=update_input |
|
) |
|
|
|
if st.button(t['analyze_button'], key=f"analyze_button_{lang_code}"): |
|
current_input = st.session_state[input_key] |
|
if current_input: |
|
doc = nlp_models[lang_code](current_input) |
|
|
|
word_colors = get_repeated_words_colors(doc) |
|
|
|
with st.expander(t['repeated_words'], expanded=True): |
|
highlighted_text = highlight_repeated_words(doc, word_colors) |
|
st.markdown(highlighted_text, unsafe_allow_html=True) |
|
|
|
st.markdown(f"##### {t['legend']}") |
|
legend_html = "<div style='display: flex; flex-wrap: wrap;'>" |
|
for pos, color in POS_COLORS.items(): |
|
if pos in POS_TRANSLATIONS[lang_code]: |
|
legend_html += f"<div style='margin-right: 10px;'><span style='background-color: {color}; padding: 2px 5px;'>{POS_TRANSLATIONS[lang_code][pos]}</span></div>" |
|
legend_html += "</div>" |
|
st.markdown(legend_html, unsafe_allow_html=True) |
|
|
|
with st.expander(t['arc_diagram'], expanded=True): |
|
sentences = list(doc.sents) |
|
arc_diagrams = [] |
|
for i, sent in enumerate(sentences): |
|
st.subheader(f"{t['sentence']} {i+1}") |
|
html = displacy.render(sent, style="dep", options={"distance": 100}) |
|
html = html.replace('height="375"', 'height="200"') |
|
html = re.sub(r'<svg[^>]*>', lambda m: m.group(0).replace('height="450"', 'height="300"'), html) |
|
html = re.sub(r'<g [^>]*transform="translate\((\d+),(\d+)\)"', lambda m: f'<g transform="translate({m.group(1)},50)"', html) |
|
st.write(html, unsafe_allow_html=True) |
|
arc_diagrams.append(html) |
|
|
|
if store_morphosyntax_result( |
|
st.session_state.username, |
|
current_input, |
|
word_colors, |
|
arc_diagrams, |
|
): |
|
st.success("Análisis guardado correctamente.") |
|
else: |
|
st.error("Hubo un problema al guardar el análisis. Por favor, inténtelo de nuevo.") |
|
else: |
|
st.warning("Por favor, ingrese un texto para analizar.") |
|
|
|
|
|
def display_semantic_analysis_interface(nlp_models, lang_code): |
|
translations = { |
|
'es': { |
|
'title': "AIdeaText - Análisis semántico", |
|
'file_uploader': "Cargar archivo de texto", |
|
'analyze_button': "Analizar texto", |
|
'semantic_relations': "Relaciones Semánticas Relevantes", |
|
}, |
|
'en': { |
|
'title': "AIdeaText - Semantic Analysis", |
|
'file_uploader': "Upload text file", |
|
'analyze_button': "Analyze text", |
|
'semantic_relations': "Relevant Semantic Relations", |
|
}, |
|
'fr': { |
|
'title': "AIdeaText - Analyse sémantique", |
|
'file_uploader': "Télécharger le fichier texte", |
|
'analyze_button': "Analyser le texte", |
|
'semantic_relations': "Relations Sémantiques Pertinentes", |
|
} |
|
} |
|
|
|
t = translations[lang_code] |
|
|
|
st.header(t['title']) |
|
|
|
uploaded_file = st.file_uploader(t['file_uploader'], type=['txt']) |
|
|
|
if uploaded_file is not None: |
|
text_content = uploaded_file.getvalue().decode('utf-8') |
|
|
|
if st.button(t['analyze_button']): |
|
relations_graph = perform_semantic_analysis(text_content, nlp_models[lang_code], lang_code) |
|
|
|
with st.expander(t['semantic_relations'], expanded=True): |
|
st.pyplot(relations_graph) |
|
|
|
|
|
|
|
def display_discourse_analysis_interface(nlp_models, lang_code): |
|
translations = { |
|
'es': { |
|
'title': "AIdeaText - Análisis del discurso", |
|
'file_uploader1': "Cargar archivo de texto 1 (Patrón)", |
|
'file_uploader2': "Cargar archivo de texto 2 (Comparación)", |
|
'analyze_button': "Analizar textos", |
|
'comparison': "Comparación de Relaciones Semánticas", |
|
}, |
|
'en': { |
|
'title': "AIdeaText - Discourse Analysis", |
|
'file_uploader1': "Upload text file 1 (Pattern)", |
|
'file_uploader2': "Upload text file 2 (Comparison)", |
|
'analyze_button': "Analyze texts", |
|
'comparison': "Comparison of Semantic Relations", |
|
}, |
|
'fr': { |
|
'title': "AIdeaText - Analyse du discours", |
|
'file_uploader1': "Télécharger le fichier texte 1 (Modèle)", |
|
'file_uploader2': "Télécharger le fichier texte 2 (Comparaison)", |
|
'analyze_button': "Analyser les textes", |
|
'comparison': "Comparaison des Relations Sémantiques", |
|
} |
|
} |
|
|
|
t = translations[lang_code] |
|
|
|
st.header(t['title']) |
|
|
|
col1, col2 = st.columns(2) |
|
|
|
with col1: |
|
uploaded_file1 = st.file_uploader(t['file_uploader1'], type=['txt']) |
|
|
|
with col2: |
|
uploaded_file2 = st.file_uploader(t['file_uploader2'], type=['txt']) |
|
|
|
if uploaded_file1 is not None and uploaded_file2 is not None: |
|
text_content1 = uploaded_file1.getvalue().decode('utf-8') |
|
text_content2 = uploaded_file2.getvalue().decode('utf-8') |
|
|
|
if st.button(t['analyze_button']): |
|
graph1, graph2 = perform_discourse_analysis(text_content1, text_content2, nlp_models[lang_code], lang_code) |
|
|
|
st.subheader(t['comparison']) |
|
|
|
col1, col2 = st.columns(2) |
|
|
|
with col1: |
|
st.pyplot(graph1) |
|
|
|
with col2: |
|
st.pyplot(graph2) |
|
|
|
|
|
|
|
def display_chatbot_interface(lang_code): |
|
translations = { |
|
'es': { |
|
'title': "Expertos en Vacaciones", |
|
'input_placeholder': "Escribe tu mensaje aquí...", |
|
'initial_message': "¡Hola! ¿Cómo podemos ayudarte?" |
|
}, |
|
'en': { |
|
'title': "Vacation Experts", |
|
'input_placeholder': "Type your message here...", |
|
'initial_message': "Hi! How can we help you?" |
|
}, |
|
'fr': { |
|
'title': "Experts en Vacances", |
|
'input_placeholder': "Écrivez votre message ici...", |
|
'initial_message': "Bonjour! Comment pouvons-nous vous aider?" |
|
} |
|
} |
|
t = translations[lang_code] |
|
st.title(t['title']) |
|
|
|
if 'chatbot' not in st.session_state: |
|
st.session_state.chatbot = initialize_chatbot() |
|
if 'messages' not in st.session_state: |
|
st.session_state.messages = [{"role": "assistant", "content": t['initial_message']}] |
|
|
|
|
|
chat_container = st.container() |
|
|
|
|
|
with chat_container: |
|
for message in st.session_state.messages: |
|
with st.chat_message(message["role"]): |
|
st.markdown(message["content"]) |
|
|
|
|
|
user_input = st.chat_input(t['input_placeholder']) |
|
|
|
if user_input: |
|
|
|
st.session_state.messages.append({"role": "user", "content": user_input}) |
|
|
|
|
|
with chat_container: |
|
with st.chat_message("user"): |
|
st.markdown(user_input) |
|
|
|
|
|
with chat_container: |
|
with st.chat_message("assistant"): |
|
message_placeholder = st.empty() |
|
full_response = "" |
|
|
|
for chunk in get_chatbot_response(st.session_state.chatbot, user_input, lang_code): |
|
full_response += chunk |
|
message_placeholder.markdown(full_response + "▌") |
|
message_placeholder.markdown(full_response) |
|
|
|
|
|
st.session_state.messages.append({"role": "assistant", "content": full_response}) |
|
|
|
|
|
store_chat_history(st.session_state.username, st.session_state.messages) |
|
|
|
|
|
st.markdown('<script>window.scrollTo(0,document.body.scrollHeight);</script>', unsafe_allow_html=True) |
|
|
|
|
|
if __name__ == "__main__": |
|
main() |