diff --git a/app.py b/app.py new file mode 100644 index 0000000000000000000000000000000000000000..19cf2204d9d615b43bf98fae660395a8cdebf51f --- /dev/null +++ b/app.py @@ -0,0 +1,150 @@ +#app.py de v3 +#app.py +import logging +import streamlit as st +import sys +import os +from dotenv import load_dotenv +from datetime import datetime + +def setup_logging(): + log_dir = 'logs' + if not os.path.exists(log_dir): + os.makedirs(log_dir) + current_time = datetime.now().strftime("%Y%m%d_%H%M%S") + log_filename = f'{log_dir}/app_log_{current_time}.txt' + logging.basicConfig( + level=logging.DEBUG, + format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', + filename=log_filename, + filemode='w' + ) + console = logging.StreamHandler() + console.setLevel(logging.INFO) + formatter = logging.Formatter('%(name)-12s: %(levelname)-8s %(message)s') + console.setFormatter(formatter) + logging.getLogger('').addHandler(console) + logging.info(f"Logging iniciado. Archivo de log: {log_filename}") + +setup_logging() +load_dotenv() + +st.set_page_config(page_title="AIdeaText", layout="wide", page_icon="random") + +sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) + +#########IMPORTACIONES LOCALES####################################### +from translations import get_translations + +from session_state import initialize_session_state + +from modules.ui.ui import main as ui_main + +from modules.utils.spacy_utils import load_spacy_models + +from modules.morphosyntax.morphosyntax_interface import ( + display_morphosyntax_interface +) + +###Importaciones de la base de datos### +from modules.database.database_init import ( + initialize_database_connections +) + +from modules.database.sql_db import ( + create_student_user, + get_student_user, + update_student_user, + delete_student_user, + store_application_request, + store_student_feedback +) + + +from modules.database.mongo_db import ( + get_collection, + insert_document, + find_documents, + update_document, + delete_document +) + +from modules.database.morphosintax_mongo_db import ( + store_student_morphosyntax_result, + get_student_morphosyntax_analysis +) + +from modules.database.chat_db import ( + store_chat_history, + get_chat_history +) + +from modules.studentact.student_activities_v2 import ( + display_student_progress +) + +from modules.auth.auth import ( + authenticate_student, + register_student, + update_student_info, + delete_student +) + +from modules.admin.admin_ui import admin_page + +from modules.chatbot.chatbot import ( + initialize_chatbot, + process_chat_input +) + +print("Configurando página") +st.cache_data.clear() +st.cache_resource.clear() + +logging.basicConfig(level=logging.INFO) +logger = logging.getLogger(__name__) + +@st.cache_resource +def initialize_nlp_models(): + logger.info("Cargando modelos de spaCy") + models = load_spacy_models() + logger.info("Modelos de spaCy cargados exitosamente") + return models + +def app_main(): + try: + logger.info("Entrando en app_main()") + + # Inicializar el estado de la sesión + initialize_session_state() + + # Inicializar conexiones a bases de datos si no se ha hecho + if 'db_initialized' not in st.session_state: + st.session_state.db_initialized = initialize_database_connections() + + # Cargar modelos NLP si no se ha hecho + if 'nlp_models' not in st.session_state: + logger.info("Inicializando modelos NLP en la sesión") + st.session_state.nlp_models = initialize_nlp_models() + logger.info("Modelos NLP inicializados y almacenados en la sesión") + + # Configurar la página inicial si no está configurada + if 'page' not in st.session_state: + st.session_state.page = 'login' + + logger.info(f"Página actual: {st.session_state.page}") + logger.info(f"Rol del usuario: {st.session_state.role}") + + # Dirigir el flujo a la interfaz de usuario principal + logger.info(f"Llamando a ui_main() desde app_main()") + ui_main() + + except Exception as e: + logger.error(f"Error en app_main: {str(e)}", exc_info=True) + st.error("Se ha producido un error en la aplicación. Por favor, inténtelo de nuevo más tarde.") + if st.button("Reiniciar aplicación"): + st.rerun() + +if __name__ == "__main__": + print("Llamando a app_main()") + app_main() \ No newline at end of file diff --git a/assets/img/AIdeaTextCard.jpg b/assets/img/AIdeaTextCard.jpg new file mode 100644 index 0000000000000000000000000000000000000000..74491084bc16ba88426e5c6cbf84bdd83f6e5f55 Binary files /dev/null and b/assets/img/AIdeaTextCard.jpg differ diff --git a/assets/img/Mesa de trabajo 1png 0.3.png b/assets/img/Mesa de trabajo 1png 0.3.png new file mode 100644 index 0000000000000000000000000000000000000000..ca1e518d7dd4b7f7f8c9ee572a471d9e84e2a285 Binary files /dev/null and b/assets/img/Mesa de trabajo 1png 0.3.png differ diff --git a/assets/img/logo_92x92.jpg b/assets/img/logo_92x92.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ceb744511b50a4217a7aacacd8612111caa750b9 Binary files /dev/null and b/assets/img/logo_92x92.jpg differ diff --git a/assets/img/logo_92x92.png b/assets/img/logo_92x92.png new file mode 100644 index 0000000000000000000000000000000000000000..ca1e518d7dd4b7f7f8c9ee572a471d9e84e2a285 Binary files /dev/null and b/assets/img/logo_92x92.png differ diff --git a/assets/img/text.txt b/assets/img/text.txt new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/modules/23-7-2024_auth.py b/modules/23-7-2024_auth.py new file mode 100644 index 0000000000000000000000000000000000000000..1919f0375c28484a5eeaa70533135e70e96357c6 --- /dev/null +++ b/modules/23-7-2024_auth.py @@ -0,0 +1,135 @@ +### auth.py +import os +from azure.cosmos import CosmosClient, exceptions +import bcrypt +import base64 + +################################################################################################################ +def clean_and_validate_key(key): + key = key.strip() + while len(key) % 4 != 0: + key += '=' + try: + base64.b64decode(key) + return key + except: + raise ValueError("La clave proporcionada no es válida") + +# Azure Cosmos DB configuration +endpoint = os.environ.get("COSMOS_ENDPOINT") +key = os.environ.get("COSMOS_KEY") + +if not endpoint or not key: + raise ValueError("Las variables de entorno COSMOS_ENDPOINT y COSMOS_KEY deben estar configuradas") + +key = clean_and_validate_key(key) + +try: + client = CosmosClient(endpoint, key) + database = client.get_database_client("user_database") + container = database.get_container_client("users") + # Prueba de conexión + database_list = list(client.list_databases()) + print(f"Conexión exitosa. Bases de datos encontradas: {len(database_list)}") +except Exception as e: + print(f"Error al conectar con Cosmos DB: {str(e)}") + raise + +#############################################################################################################3 +def hash_password(password): + """Hash a password for storing.""" + return bcrypt.hashpw(password.encode('utf-8'), bcrypt.gensalt()).decode('utf-8') + +################################################################################################################ +def verify_password(stored_password, provided_password): + """Verify a stored password against one provided by user""" + return bcrypt.checkpw(provided_password.encode('utf-8'), stored_password.encode('utf-8')) + +################################################################################################################ +def register_user(username, password, additional_info=None): + try: + query = f"SELECT * FROM c WHERE c.id = '{username}'" + existing_user = list(container.query_items(query=query, enable_cross_partition_query=True)) + + if existing_user: + return False # User already exists + + new_user = { + 'id': username, + 'password': hash_password(password), + 'role': 'Estudiante', + 'additional_info': additional_info or {} + } + + new_user['partitionKey'] = username + + container.create_item(body=new_user) + return True + except exceptions.CosmosHttpResponseError as e: + print(f"Error al registrar usuario: {str(e)}") + return False + + +################################################################################################################ +def authenticate_user(username, password): + """Authenticate a user.""" + try: + query = f"SELECT * FROM c WHERE c.id = '{username}'" + results = list(container.query_items(query=query, partition_key=username)) + + if results: + stored_user = results[0] + if verify_password(stored_user['password'], password): + return True + except exceptions.CosmosHttpResponseError: + pass + + return False + + +################################################################################################################ +def get_user_role(username): + """Get the role of a user.""" + try: + query = f"SELECT c.role FROM c WHERE c.id = '{username}'" + results = list(container.query_items(query=query, partition_key=username)) + + if results: + return results[0]['role'] + except exceptions.CosmosHttpResponseError: + pass + + return None + +################################################################################################################ +def update_user_info(username, new_info): + """Update user information.""" + try: + query = f"SELECT * FROM c WHERE c.id = '{username}'" + results = list(container.query_items(query=query, partition_key=username)) + + if results: + user = results[0] + user['additional_info'].update(new_info) + container.upsert_item(user, partition_key=username) + return True + except exceptions.CosmosHttpResponseError: + pass + + return False + +################################################################################################################ +def delete_user(username): + """Delete a user.""" + try: + query = f"SELECT * FROM c WHERE c.id = '{username}'" + results = list(container.query_items(query=query, partition_key=username)) + + if results: + user = results[0] + container.delete_item(item=user['id'], partition_key=username) + return True + except exceptions.CosmosHttpResponseError: + pass + + return False \ No newline at end of file diff --git a/modules/23-7-2024_ui.py b/modules/23-7-2024_ui.py new file mode 100644 index 0000000000000000000000000000000000000000..61b9cfa1929613b94deaf41659aab855877e772a --- /dev/null +++ b/modules/23-7-2024_ui.py @@ -0,0 +1,344 @@ +# modules/ui.py +# Importaciones estandar de python +import io +import streamlit as st +import matplotlib +matplotlib.use('Agg') +import matplotlib.pyplot as plt +import squarify +import pandas as pd +from datetime import datetime +import base64 +from spacy import displacy +import re +from .morpho_analysis import POS_COLORS, POS_TRANSLATIONS # Asegúrate de que esta importación esté presente +print("POS_COLORS:", POS_COLORS) +print("POS_TRANSLATIONS:", POS_TRANSLATIONS) + +# Importaciones locales +from .auth import authenticate_user, register_user, get_user_role +from .database import get_student_data, store_analysis_result +from .morpho_analysis import get_repeated_words_colors, highlight_repeated_words, POS_COLORS, POS_TRANSLATIONS +from .syntax_analysis import visualize_syntax + +######################################################################### +# Define colors for grammatical categories +POS_COLORS = { + 'ADJ': '#FFA07A', # Light Salmon + 'ADP': '#98FB98', # Pale Green + 'ADV': '#87CEFA', # Light Sky Blue + 'AUX': '#DDA0DD', # Plum + 'CCONJ': '#F0E68C', # Khaki + 'DET': '#FFB6C1', # Light Pink + 'INTJ': '#FF6347', # Tomato + 'NOUN': '#90EE90', # Light Green + 'NUM': '#FAFAD2', # Light Goldenrod Yellow + 'PART': '#D3D3D3', # Light Gray + 'PRON': '#FFA500', # Orange + 'PROPN': '#20B2AA', # Light Sea Green + 'SCONJ': '#DEB887', # Burlywood + 'SYM': '#7B68EE', # Medium Slate Blue + 'VERB': '#FF69B4', # Hot Pink + 'X': '#A9A9A9', # Dark Gray +} + +POS_TRANSLATIONS = { + 'es': { + 'ADJ': 'Adjetivo', + 'ADP': 'Adposición', + 'ADV': 'Adverbio', + 'AUX': 'Auxiliar', + 'CCONJ': 'Conjunción Coordinante', + 'DET': 'Determinante', + 'INTJ': 'Interjección', + 'NOUN': 'Sustantivo', + 'NUM': 'Número', + 'PART': 'Partícula', + 'PRON': 'Pronombre', + 'PROPN': 'Nombre Propio', + 'SCONJ': 'Conjunción Subordinante', + 'SYM': 'Símbolo', + 'VERB': 'Verbo', + 'X': 'Otro', + }, + 'en': { + 'ADJ': 'Adjective', + 'ADP': 'Adposition', + 'ADV': 'Adverb', + 'AUX': 'Auxiliary', + 'CCONJ': 'Coordinating Conjunction', + 'DET': 'Determiner', + 'INTJ': 'Interjection', + 'NOUN': 'Noun', + 'NUM': 'Number', + 'PART': 'Particle', + 'PRON': 'Pronoun', + 'PROPN': 'Proper Noun', + 'SCONJ': 'Subordinating Conjunction', + 'SYM': 'Symbol', + 'VERB': 'Verb', + 'X': 'Other', + }, + 'fr': { + 'ADJ': 'Adjectif', + 'ADP': 'Adposition', + 'ADV': 'Adverbe', + 'AUX': 'Auxiliaire', + 'CCONJ': 'Conjonction de Coordination', + 'DET': 'Déterminant', + 'INTJ': 'Interjection', + 'NOUN': 'Nom', + 'NUM': 'Nombre', + 'PART': 'Particule', + 'PRON': 'Pronom', + 'PROPN': 'Nom Propre', + 'SCONJ': 'Conjonction de Subordination', + 'SYM': 'Symbole', + 'VERB': 'Verbe', + 'X': 'Autre', + } +} + +########################################################################## +def login_page(): + st.title("Iniciar Sesión") + username = st.text_input("Usuario") + password = st.text_input("Contraseña", type='password') + if st.button("Iniciar Sesión"): + if authenticate_user(username, password): + st.success(f"Bienvenido, {username}!") + st.session_state.logged_in = True + st.session_state.username = username + st.session_state.role = get_user_role(username) + st.experimental_rerun() + else: + st.error("Usuario o contraseña incorrectos") + +########################################################################## +def register_page(): + st.title("Registrarse") + new_username = st.text_input("Nuevo Usuario") + new_password = st.text_input("Nueva Contraseña", type='password') + + additional_info = {} + additional_info['carrera'] = st.text_input("Carrera") + + if st.button("Registrarse"): + if register_user(new_username, new_password, additional_info): + st.success("Registro exitoso. Por favor, inicia sesión.") + else: + st.error("El usuario ya existe o ocurrió un error durante el registro") + +########################################################################## +def get_chatbot_response(input_text): + # Esta función debe ser implementada o importada de otro módulo + # Por ahora, retornamos un mensaje genérico + return "Lo siento, el chatbot no está disponible en este momento." + +########################################################################## +def display_chat_interface(): + st.markdown("### Chat con AIdeaText") + + if 'chat_history' not in st.session_state: + st.session_state.chat_history = [] + + for i, (role, text) in enumerate(st.session_state.chat_history): + if role == "user": + st.text_area(f"Tú:", value=text, height=50, key=f"user_message_{i}", disabled=True) + else: + st.text_area(f"AIdeaText:", value=text, height=50, key=f"bot_message_{i}", disabled=True) + + user_input = st.text_input("Escribe tu mensaje aquí:") + + if st.button("Enviar"): + if user_input: + st.session_state.chat_history.append(("user", user_input)) + response = get_chatbot_response(user_input) + st.session_state.chat_history.append(("bot", response)) + st.experimental_rerun() + +########################################################################## + +def display_student_progress(username, lang_code='es'): + print("lang_code:", lang_code) + student_data = get_student_data(username) + + if student_data is None: + st.warning("No se encontraron datos para este estudiante.") + st.info("Intenta realizar algunos análisis de texto primero.") + return + + st.title(f"Progreso de {username}") + + if student_data['entries_count'] > 0: + if 'word_count' in student_data and student_data['word_count']: + st.subheader("Total de palabras por categoría gramatical") + + df = pd.DataFrame(list(student_data['word_count'].items()), columns=['category', 'count']) + df['label'] = df.apply(lambda x: f"{POS_TRANSLATIONS[lang_code].get(x['category'], x['category'])}", axis=1) + + # Ordenar el DataFrame por conteo de palabras, de mayor a menor + df = df.sort_values('count', ascending=False) + + fig, ax = plt.subplots(figsize=(12, 6)) + bars = ax.bar(df['label'], df['count'], color=[POS_COLORS.get(cat, '#CCCCCC') for cat in df['category']]) + + ax.set_xlabel('Categoría Gramatical') + ax.set_ylabel('Cantidad de Palabras') + ax.set_title('Total de palabras por categoría gramatical') + plt.xticks(rotation=45, ha='right') + + # Añadir etiquetas de valor en las barras + for bar in bars: + height = bar.get_height() + ax.text(bar.get_x() + bar.get_width()/2., height, + f'{height}', + ha='center', va='bottom') + + plt.tight_layout() + + buf = io.BytesIO() + fig.savefig(buf, format='png') + buf.seek(0) + st.image(buf, use_column_width=True) + else: + st.info("No hay datos de conteo de palabras disponibles.") + + # Diagramas de Arco (consolidados) + st.header("Diagramas de Arco") + with st.expander("Ver todos los Diagramas de Arco"): + for i, entry in enumerate(student_data['entries']): + if 'arc_diagrams' in entry and entry['arc_diagrams']: + st.subheader(f"Entrada {i+1} - {entry['timestamp']}") + st.write(entry['arc_diagrams'][0], unsafe_allow_html=True) + + # Diagramas de Red (consolidados) + st.header("Diagramas de Red") + with st.expander("Ver todos los Diagramas de Red"): + for i, entry in enumerate(student_data['entries']): + if 'network_diagram' in entry and entry['network_diagram']: + st.subheader(f"Entrada {i+1} - {entry['timestamp']}") + try: + # Decodificar la imagen base64 + image_bytes = base64.b64decode(entry['network_diagram']) + st.image(image_bytes) + except Exception as e: + st.error(f"Error al mostrar el diagrama de red: {str(e)}") + else: + st.warning("No se encontraron entradas para este estudiante.") + st.info("Intenta realizar algunos análisis de texto primero.") + +##############################################################Mostrar entradas recientes###################################################################### + #st.header("Entradas Recientes") + #for i, entry in enumerate(student_data['entries'][:5]): # Mostrar las 5 entradas más recientes + #with st.expander(f"Entrada {i+1} - {entry['timestamp']}"): + #st.write(entry['text']) + #else: + #st.warning("No se encontraron entradas para este estudiante.") + #st.info("Intenta realizar algunos análisis de texto primero.") + +########################################################################## +def display_text_analysis_interface(nlp_models, lang_code): + translations = { + 'es': { + 'title': "AIdeaText - Análisis morfológico y sintáctico", + 'input_label': "Ingrese un texto para analizar (máx. 5,000 palabras):", + 'input_placeholder': "El objetivo de esta aplicación es que mejore sus habilidades de redacción. Para ello, después de ingresar su texto y presionar el botón obtendrá tres vistas horizontales. La primera, le indicará las palabras que se repiten por categoría gramátical; la segunda, un diagrama de arco le indicara las conexiones sintácticas en cada oración; y la tercera, es un grafo en el cual visualizara la configuración de su texto.", + 'analyze_button': "Analizar texto", + 'repeated_words': "Palabras repetidas", + 'legend': "Leyenda: Categorías gramaticales", + 'arc_diagram': "Análisis sintáctico: Diagrama de arco", + 'network_diagram': "Análisis sintáctico: Diagrama de red", + 'sentence': "Oración" + }, + 'en': { + 'title': "AIdeaText - Morphological and Syntactic Analysis", + 'input_label': "Enter a text to analyze (max 5,000 words):", + 'input_placeholder': "The goal of this app is for you to improve your writing skills. To do this, after entering your text and pressing the button you will get three horizontal views. The first will indicate the words that are repeated by grammatical category; second, an arc diagram will indicate the syntactic connections in each sentence; and the third is a graph in which you will visualize the configuration of your text.", + 'analyze_button': "Analyze text", + 'repeated_words': "Repeated words", + 'legend': "Legend: Grammatical categories", + 'arc_diagram': "Syntactic analysis: Arc diagram", + 'network_diagram': "Syntactic analysis: Network diagram", + 'sentence': "Sentence" + }, + 'fr': { + 'title': "AIdeaText - Analyse morphologique et syntaxique", + 'input_label': "Entrez un texte à analyser (max 5 000 mots) :", + 'input_placeholder': "Le but de cette application est d'améliorer vos compétences en rédaction. Pour ce faire, après avoir saisi votre texte et appuyé sur le bouton vous obtiendrez trois vues horizontales. Le premier indiquera les mots répétés par catégorie grammaticale; deuxièmement, un diagramme en arcs indiquera les connexions syntaxiques dans chaque phrase; et le troisième est un graphique dans lequel vous visualiserez la configuration de votre texte.", + 'analyze_button': "Analyser le texte", + 'repeated_words': "Mots répétés", + 'legend': "Légende : Catégories grammaticales", + 'arc_diagram': "Analyse syntaxique : Diagramme en arc", + 'network_diagram': "Analyse syntaxique : Diagramme de réseau", + 'sentence': "Phrase" + } + } + + t = translations[lang_code] + + if 'input_text' not in st.session_state: + st.session_state.input_text = "" + + # Añadimos una clave única basada en el idioma seleccionado + sentence_input = st.text_area( + t['input_label'], + height=150, + placeholder=t['input_placeholder'], + value=st.session_state.input_text, + key=f"text_input_{lang_code}" # Clave única basada en el idioma + ) + st.session_state.input_text = sentence_input + +# sentence_input = st.text_area(t['input_label'], height=150, placeholder=t['input_placeholder'], value=st.session_state.input_text) +# st.session_state.input_text = sentence_input + + if st.button(t['analyze_button'], key=f"analyze_button_{lang_code}"): + if sentence_input: + doc = nlp_models[lang_code](sentence_input) + + with st.expander(t['repeated_words'], expanded=True): + word_colors = get_repeated_words_colors(doc) + highlighted_text = highlight_repeated_words(doc, word_colors) + st.markdown(highlighted_text, unsafe_allow_html=True) + + st.markdown(f"##### {t['legend']}") + legend_html = "
" + for pos, color in POS_COLORS.items(): + if pos in POS_TRANSLATIONS: + legend_html += f"
{POS_TRANSLATIONS[pos]}
" + legend_html += "
" + st.markdown(legend_html, unsafe_allow_html=True) + + with st.expander(t['arc_diagram'], expanded=True): + sentences = list(doc.sents) + arc_diagrams = [] + for i, sent in enumerate(sentences): + st.subheader(f"{t['sentence']} {i+1}") + html = displacy.render(sent, style="dep", options={"distance": 100}) + html = html.replace('height="375"', 'height="200"') + html = re.sub(r']*>', lambda m: m.group(0).replace('height="450"', 'height="300"'), html) + html = re.sub(r']*transform="translate\((\d+),(\d+)\)"', lambda m: f' {item['input']} : {item['output']} " + encoding = self.tokenizer(input_text, truncation=True, padding='max_length', max_length=self.max_length, return_tensors="pt") + return encoding['input_ids'].squeeze(), encoding['attention_mask'].squeeze() + +class MultilingualChatbot: + def __init__(self): + self.models = { + 'en': GPT2LMHeadModel.from_pretrained("microsoft/DialoGPT-medium"), + 'es': GPT2LMHeadModel.from_pretrained("DeepESP/gpt2-spanish"), + 'fr': GPT2LMHeadModel.from_pretrained("asi/gpt-fr-cased-small") + } + self.tokenizers = { + 'en': GPT2Tokenizer.from_pretrained("microsoft/DialoGPT-medium"), + 'es': GPT2Tokenizer.from_pretrained("DeepESP/gpt2-spanish"), + 'fr': GPT2Tokenizer.from_pretrained("asi/gpt-fr-cased-small") + } + for tokenizer in self.tokenizers.values(): + tokenizer.pad_token = tokenizer.eos_token + tokenizer.add_special_tokens({ + "bos_token": "", + "eos_token": "" + }) + tokenizer.add_tokens([":"]) + + for model in self.models.values(): + model.resize_token_embeddings(len(self.tokenizers['en'])) # Assuming all tokenizers have the same vocabulary size + + self.device = "cuda" if torch.cuda.is_available() else "mps" if torch.backends.mps.is_available() else "cpu" + for model in self.models.values(): + model.to(self.device) + + def train(self, lang, data_file, epochs=5, batch_size=32, learning_rate=1e-4): + model = self.models[lang] + tokenizer = self.tokenizers[lang] + + chat_data = MultilingualChatData(data_file, tokenizer) + data_loader = DataLoader(chat_data, batch_size=batch_size, shuffle=True) + + optimizer = Adam(model.parameters(), lr=learning_rate) + + model.train() + for epoch in range(epochs): + total_loss = 0 + for batch in tqdm.tqdm(data_loader, desc=f"Epoch {epoch+1}/{epochs}"): + input_ids, attention_mask = [b.to(self.device) for b in batch] + + optimizer.zero_grad() + outputs = model(input_ids, attention_mask=attention_mask, labels=input_ids) + loss = outputs.loss + loss.backward() + optimizer.step() + + total_loss += loss.item() + + print(f"Epoch {epoch+1}/{epochs}, Loss: {total_loss/len(data_loader):.4f}") + + torch.save(model.state_dict(), f"model_state_{lang}.pt") + + def generate_response(self, prompt, src_lang): + model = self.models.get(src_lang, self.models['en']) + tokenizer = self.tokenizers.get(src_lang, self.tokenizers['en']) + + input_text = f" {prompt} : " + input_ids = tokenizer.encode(input_text, return_tensors='pt').to(self.device) + + attention_mask = torch.ones(input_ids.shape, dtype=torch.long, device=self.device) + + output = model.generate( + input_ids, + attention_mask=attention_mask, + max_length=1000, + pad_token_id=tokenizer.eos_token_id, + no_repeat_ngram_size=3, + do_sample=True, + top_k=50, + top_p=0.95, + temperature=0.7, + num_return_sequences=1, + length_penalty=1.0, + repetition_penalty=1.2 + ) + + decoded_output = tokenizer.decode(output[0], skip_special_tokens=True) + return decoded_output.split(":")[-1].strip() + +def initialize_chatbot(): + return MultilingualChatbot() + +def get_chatbot_response(chatbot, prompt, src_lang): + return chatbot.generate_response(prompt, src_lang) + +# Ejemplo de uso +if __name__ == "__main__": + chatbot = initialize_chatbot() + + # Entrenar el modelo en español (asumiendo que tienes un archivo de datos en español) + chatbot.train('es', './spanish_chat_data.json', epochs=3) + + # Generar respuestas + print(get_chatbot_response(chatbot, "Hola, ¿cómo estás?", 'es')) + print(get_chatbot_response(chatbot, "Hello, how are you?", 'en')) + print(get_chatbot_response(chatbot, "Bonjour, comment allez-vous?", 'fr')) \ No newline at end of file diff --git a/modules/chatbot/txt.txt b/modules/chatbot/txt.txt new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/modules/database/__init__.py b/modules/database/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/modules/database/__pycache__/__init__.cpython-311.pyc b/modules/database/__pycache__/__init__.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a266f255fb26d3b8986ad11380482db97782fef5 Binary files /dev/null and b/modules/database/__pycache__/__init__.cpython-311.pyc differ diff --git a/modules/database/__pycache__/chat_db.cpython-311.pyc b/modules/database/__pycache__/chat_db.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..413b6362739b3e87d44aeb8e62de940cf479251d Binary files /dev/null and b/modules/database/__pycache__/chat_db.cpython-311.pyc differ diff --git a/modules/database/__pycache__/database.cpython-311.pyc b/modules/database/__pycache__/database.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1e5def232078ecafa2282ef270d37dad5354fb59 Binary files /dev/null and b/modules/database/__pycache__/database.cpython-311.pyc differ diff --git a/modules/database/__pycache__/database_init.cpython-311.pyc b/modules/database/__pycache__/database_init.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3550252ef5e3d35d443b6bf7890c9ce889291acd Binary files /dev/null and b/modules/database/__pycache__/database_init.cpython-311.pyc differ diff --git a/modules/database/__pycache__/database_oldFromV2.cpython-311.pyc b/modules/database/__pycache__/database_oldFromV2.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5e69d036d220327e2b5e732414cde1f1683c4eb3 Binary files /dev/null and b/modules/database/__pycache__/database_oldFromV2.cpython-311.pyc differ diff --git a/modules/database/__pycache__/mongo_db.cpython-311.pyc b/modules/database/__pycache__/mongo_db.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9441dcab4ec067bdaa9a84c04b10f4721e8de110 Binary files /dev/null and b/modules/database/__pycache__/mongo_db.cpython-311.pyc differ diff --git a/modules/database/__pycache__/morphosintax_db.cpython-311.pyc b/modules/database/__pycache__/morphosintax_db.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..872f610f9e83a0c5908c59a59219ca6790c19be1 Binary files /dev/null and b/modules/database/__pycache__/morphosintax_db.cpython-311.pyc differ diff --git a/modules/database/__pycache__/morphosintax_mongo_db.cpython-311.pyc b/modules/database/__pycache__/morphosintax_mongo_db.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..af12c9eeb005d74337f9d11c2c3cd088923d7c97 Binary files /dev/null and b/modules/database/__pycache__/morphosintax_mongo_db.cpython-311.pyc differ diff --git a/modules/database/__pycache__/morphosintaxis_export.cpython-311.pyc b/modules/database/__pycache__/morphosintaxis_export.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9fac44e14d0770679a1a04702fe53c9e61ba9643 Binary files /dev/null and b/modules/database/__pycache__/morphosintaxis_export.cpython-311.pyc differ diff --git a/modules/database/__pycache__/sql_db.cpython-311.pyc b/modules/database/__pycache__/sql_db.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ee69efedc1189015b7c5ef5b5f79bff84a17b0cb Binary files /dev/null and b/modules/database/__pycache__/sql_db.cpython-311.pyc differ diff --git a/modules/database/backUp/database.py b/modules/database/backUp/database.py new file mode 100644 index 0000000000000000000000000000000000000000..1676ee58c4527c524fa4f6eb4dbe94f28b4ca8b2 --- /dev/null +++ b/modules/database/backUp/database.py @@ -0,0 +1,216 @@ +# database.py +# Versión 3 actualizada para manejar chat_history_v3 + +import streamlit as st +import logging +import os +from pymongo import MongoClient +import certifi +from datetime import datetime, timezone +import uuid + +logging.basicConfig(level=logging.DEBUG) +logger = logging.getLogger(__name__) + +# Variables globales para Cosmos DB MongoDB API +mongo_client = None +mongo_db = None +analysis_collection = None +chat_collection_v3 = None # Nueva variable global para chat_history_v3 + +def initialize_mongodb_connection(): + global mongo_client, mongo_db, analysis_collection, chat_collection_v3 + try: + cosmos_mongodb_connection_string = os.getenv("MONGODB_CONNECTION_STRING") + if not cosmos_mongodb_connection_string: + logger.error("La variable de entorno MONGODB_CONNECTION_STRING no está configurada") + return False + + mongo_client = MongoClient(cosmos_mongodb_connection_string, + tls=True, + tlsCAFile=certifi.where(), + retryWrites=False, + serverSelectionTimeoutMS=5000, + connectTimeoutMS=10000, + socketTimeoutMS=10000) + + mongo_client.admin.command('ping') + + mongo_db = mongo_client['aideatext_db'] + analysis_collection = mongo_db['text_analysis'] + chat_collection_v3 = mongo_db['chat_history_v3'] # Inicializar la nueva colección + + # Crear índices para chat_history_v3 + chat_collection_v3.create_index([("username", 1), ("timestamp", -1)]) + chat_collection_v3.create_index([("username", 1), ("analysis_type", 1), ("timestamp", -1)]) + + logger.info("Conexión a Cosmos DB MongoDB API exitosa") + return True + except Exception as e: + logger.error(f"Error al conectar con Cosmos DB MongoDB API: {str(e)}", exc_info=True) + return False + +def store_chat_history_v3(username, messages, analysis_type): + try: + logger.info(f"Guardando historial de chat para el usuario: {username}, tipo de análisis: {analysis_type}") + logger.debug(f"Mensajes a guardar: {messages}") + + chat_document = { + 'username': username, + 'timestamp': datetime.now(timezone.utc).isoformat(), + 'analysis_type': analysis_type, + 'messages': messages + } + result = chat_collection_v3.insert_one(chat_document) + logger.info(f"Historial de chat guardado con ID: {result.inserted_id} para el usuario: {username}") + return True + except Exception as e: + logger.error(f"Error al guardar el historial de chat para el usuario {username}: {str(e)}") + return False + +def get_chat_history_v3(username, analysis_type=None, limit=10): + try: + logger.info(f"Obteniendo historial de chat para el usuario: {username}, tipo de análisis: {analysis_type}") + + query = {"username": username} + if analysis_type: + query["analysis_type"] = analysis_type + + cursor = chat_collection_v3.find(query).sort("timestamp", -1).limit(limit) + + chat_history = [] + for chat in cursor: + chat_history.append({ + "timestamp": chat["timestamp"], + "analysis_type": chat["analysis_type"], + "messages": chat["messages"] + }) + + logger.info(f"Se obtuvieron {len(chat_history)} entradas de chat para el usuario: {username}") + return chat_history + except Exception as e: + logger.error(f"Error al obtener el historial de chat para el usuario {username}: {str(e)}") + return [] + +def delete_chat_history_v3(username, analysis_type=None): + try: + logger.info(f"Eliminando historial de chat para el usuario: {username}, tipo de análisis: {analysis_type}") + + query = {"username": username} + if analysis_type: + query["analysis_type"] = analysis_type + + result = chat_collection_v3.delete_many(query) + + logger.info(f"Se eliminaron {result.deleted_count} entradas de chat para el usuario: {username}") + return True + except Exception as e: + logger.error(f"Error al eliminar el historial de chat para el usuario {username}: {str(e)}") + return False + +def export_chat_history_v3(username, analysis_type=None): + try: + logger.info(f"Exportando historial de chat para el usuario: {username}, tipo de análisis: {analysis_type}") + + query = {"username": username} + if analysis_type: + query["analysis_type"] = analysis_type + + cursor = chat_collection_v3.find(query).sort("timestamp", -1) + + export_data = list(cursor) + + logger.info(f"Se exportaron {len(export_data)} entradas de chat para el usuario: {username}") + return export_data + except Exception as e: + logger.error(f"Error al exportar el historial de chat para el usuario {username}: {str(e)}") + return [] + +# Funciones específicas para cada tipo de análisis + +def store_morphosyntax_result(username, text, repeated_words, arc_diagrams, pos_analysis, morphological_analysis, sentence_structure): + if analysis_collection is None: + logger.error("La conexión a MongoDB no está inicializada") + return False + + try: + word_count = {} + for word, color in repeated_words.items(): + category = color # Asumiendo que 'color' es la categoría gramatical + word_count[category] = word_count.get(category, 0) + 1 + + analysis_document = { + 'username': username, + 'timestamp': datetime.now(timezone.utc).isoformat(), + 'text': text, + 'repeated_words': repeated_words, + 'word_count': word_count, + 'arc_diagrams': arc_diagrams, + 'pos_analysis': pos_analysis, + 'morphological_analysis': morphological_analysis, + 'sentence_structure': sentence_structure, + 'analysis_type': 'morphosyntax' + } + + result = analysis_collection.insert_one(analysis_document) + logger.info(f"Análisis morfosintáctico guardado con ID: {result.inserted_id} para el usuario: {username}") + return True + except Exception as e: + logger.error(f"Error al guardar el análisis morfosintáctico para el usuario {username}: {str(e)}") + return False + +# Aquí puedes agregar funciones similares para análisis semántico y de discurso + +def get_student_data(username): + if analysis_collection is None or chat_collection_v3 is None: + logger.error("La conexión a MongoDB no está inicializada") + return None + + formatted_data = { + "username": username, + "entries": [], + "entries_count": 0, + "word_count": {}, + "chat_history": { + "morphosyntax": [], + "semantic": [], + "discourse": [] + } + } + + try: + logger.info(f"Buscando datos de análisis para el usuario: {username}") + cursor = analysis_collection.find({"username": username}) + + for entry in cursor: + formatted_entry = { + 'timestamp': entry.get("timestamp"), + "analysis_type": entry.get("analysis_type", "morphosyntax") + } + + if formatted_entry["analysis_type"] == "morphosyntax": + formatted_entry.update({ + "text": entry.get("text", ""), + "word_count": entry.get("word_count", {}), + "arc_diagrams": entry.get("arc_diagrams", []) + }) + for category, count in formatted_entry["word_count"].items(): + formatted_data["word_count"][category] = formatted_data["word_count"].get(category, 0) + count + + formatted_data["entries"].append(formatted_entry) + + formatted_data["entries_count"] = len(formatted_data["entries"]) + formatted_data["entries"].sort(key=lambda x: x["timestamp"], reverse=True) + + # Obtener historial de chat para cada tipo de análisis + for analysis_type in ["morphosyntax", "semantic", "discourse"]: + chat_history = get_chat_history_v3(username, analysis_type) + formatted_data["chat_history"][analysis_type] = chat_history + + except Exception as e: + logger.error(f"Error al obtener datos del estudiante {username}: {str(e)}") + + logger.info(f"Datos formateados para {username}: {formatted_data}") + return formatted_data + +# Puedes agregar más funciones según sea necesario para manejar otros tipos de datos o análisis \ No newline at end of file diff --git a/modules/database/backUp/databaseBackUp23-9-24.py b/modules/database/backUp/databaseBackUp23-9-24.py new file mode 100644 index 0000000000000000000000000000000000000000..bece010876634b24e9f01e26a9e2429f04ba88f7 --- /dev/null +++ b/modules/database/backUp/databaseBackUp23-9-24.py @@ -0,0 +1,581 @@ +# database.py +import logging +import os +from azure.cosmos import CosmosClient +from azure.cosmos.exceptions import CosmosHttpResponseError +from pymongo import MongoClient +import certifi +from datetime import datetime +import io +from io import BytesIO +import base64 +import matplotlib.pyplot as plt +from matplotlib.figure import Figure +import bcrypt +print(f"Bcrypt version: {bcrypt.__version__}") +import uuid +import plotly.graph_objects as go # Para manejar el diagrama de Sankey +import numpy as np # Puede ser necesario para algunas operaciones +logging.basicConfig(level=logging.DEBUG) +logger = logging.getLogger(__name__) + +# Variables globales para Cosmos DB SQL API +application_requests_container = None +cosmos_client = None +user_database = None +user_container = None +user_feedback_container = None + +# Variables globales para Cosmos DB MongoDB API +mongo_client = None +mongo_db = None +analysis_collection = None +chat_collection = None # Nueva variable global + + +##############################################################################--- INICIO DE LAS BASES DE DATOS --- ############################### +def initialize_database_connections(): + try: + print("Iniciando conexión a MongoDB") + mongodb_success = initialize_mongodb_connection() + print(f"Conexión a MongoDB: {'exitosa' if mongodb_success else 'fallida'}") + except Exception as e: + print(f"Error al conectar con MongoDB: {str(e)}") + mongodb_success = False + + try: + print("Iniciando conexión a Cosmos DB SQL API") + sql_success = initialize_cosmos_sql_connection() + print(f"Conexión a Cosmos DB SQL API: {'exitosa' if sql_success else 'fallida'}") + except Exception as e: + print(f"Error al conectar con Cosmos DB SQL API: {str(e)}") + sql_success = False + + return { + "mongodb": mongodb_success, + "cosmos_sql": sql_success + } + +#####################################################################################33 +def initialize_cosmos_sql_connection(): + global cosmos_client, user_database, user_container, application_requests_container, user_feedback_container + logger.info("Initializing Cosmos DB SQL API connection") + try: + cosmos_endpoint = os.environ.get("COSMOS_ENDPOINT") + cosmos_key = os.environ.get("COSMOS_KEY") + logger.info(f"Cosmos Endpoint: {cosmos_endpoint}") + logger.info(f"Cosmos Key: {'*' * len(cosmos_key) if cosmos_key else 'Not set'}") + + if not cosmos_endpoint or not cosmos_key: + logger.error("COSMOS_ENDPOINT or COSMOS_KEY environment variables are not set") + raise ValueError("Las variables de entorno COSMOS_ENDPOINT y COSMOS_KEY deben estar configuradas") + + cosmos_client = CosmosClient(cosmos_endpoint, cosmos_key) + user_database = cosmos_client.get_database_client("user_database") + user_container = user_database.get_container_client("users") + application_requests_container = user_database.get_container_client("application_requests") + user_feedback_container = user_database.get_container_client("user_feedback") + + logger.info(f"user_container initialized: {user_container is not None}") + logger.info(f"application_requests_container initialized: {application_requests_container is not None}") + logger.info(f"user_feedback_container initialized: {user_feedback_container is not None}") + + logger.info("Conexión a Cosmos DB SQL API exitosa") + return True + except Exception as e: + logger.error(f"Error al conectar con Cosmos DB SQL API: {str(e)}", exc_info=True) + return False + +############################################################################################3 +def initialize_mongodb_connection(): + global mongo_client, mongo_db, analysis_collection, chat_collection + try: + cosmos_mongodb_connection_string = os.getenv("MONGODB_CONNECTION_STRING") + if not cosmos_mongodb_connection_string: + logger.error("La variable de entorno MONGODB_CONNECTION_STRING no está configurada") + return False + + mongo_client = MongoClient(cosmos_mongodb_connection_string, + tls=True, + tlsCAFile=certifi.where(), + retryWrites=False, + serverSelectionTimeoutMS=5000, + connectTimeoutMS=10000, + socketTimeoutMS=10000) + + mongo_client.admin.command('ping') + + mongo_db = mongo_client['aideatext_db'] + analysis_collection = mongo_db['text_analysis'] + chat_collection = mongo_db['chat_history'] # Inicializar la nueva colección + + # Verificar la conexión + mongo_client.admin.command('ping') + + logger.info("Conexión a Cosmos DB MongoDB API exitosa") + return True + except Exception as e: + logger.error(f"Error al conectar con Cosmos DB MongoDB API: {str(e)}", exc_info=True) + return False + +##############################################################################--- FIN DEL INICIO DE LAS BASES DE DATOS --- ################################################################################################################################ +########################################################## -- INICIO DE GESTION DE USUARIOS ---########################################################## +def create_user(username, password, role): + global user_container + try: + print(f"Attempting to create user: {username} with role: {role}") + if user_container is None: + print("Error: user_container is None. Attempting to reinitialize connection.") + if not initialize_cosmos_sql_connection(): + raise Exception("Failed to initialize SQL connection") + + hashed_password = bcrypt.hashpw(password.encode('utf-8'), bcrypt.gensalt()).decode('utf-8') + print(f"Password hashed successfully for user: {username}") + user_data = { + 'id': username, + 'password': hashed_password, + 'role': role, + 'created_at': datetime.utcnow().isoformat() + } + user_container.create_item(body=user_data) + print(f"Usuario {role} creado: {username}") # Log para depuración + return True + except Exception as e: + print(f"Detailed error in create_user: {str(e)}") + return False + +####################################################################################################### +def create_admin_user(username, password): + return create_user(username, password, 'Administrador') + +####################################################################################################### +def create_student_user(username, password): + return create_user(username, password, 'Estudiante') + +####################################################################################################### +# Funciones para Cosmos DB SQL API (manejo de usuarios) +def get_user(username): + try: + query = f"SELECT * FROM c WHERE c.id = '{username}'" + items = list(user_container.query_items(query=query, enable_cross_partition_query=True)) + user = items[0] if items else None + if user: + print(f"Usuario encontrado: {username}, Rol: {user.get('role')}") # Log añadido + else: + print(f"Usuario no encontrado: {username}") # Log añadido + return user + except Exception as e: + print(f"Error al obtener usuario {username}: {str(e)}") + return None + +########################################################## -- FIN DE GESTION DE USUARIOS ---########################################################## + +########################################################## -- INICIO GESTION DE ARCHIVOS ---########################################################## + +def store_file_contents(username, file_name, file_contents, analysis_type): + if user_container is None: + logger.error("La conexión a Cosmos DB SQL API no está inicializada") + return False + try: + document = { + 'id': f"{username}_{analysis_type}_{file_name}", + 'username': username, + 'file_name': file_name, + 'analysis_type': analysis_type, + 'file_contents': file_contents, + 'timestamp': datetime.utcnow().isoformat() + } + user_container.upsert_item(body=document) + logger.info(f"Contenido del archivo guardado para el usuario: {username}, tipo de análisis: {analysis_type}") + return True + except Exception as e: + logger.error(f"Error al guardar el contenido del archivo para el usuario {username}: {str(e)}") + return False + +def retrieve_file_contents(username, file_name, analysis_type): + print(f"Attempting to retrieve file: {file_name} for user: {username}") + if user_container is None: + logger.error("La conexión a Cosmos DB SQL API no está inicializada") + return None + try: + query = f"SELECT * FROM c WHERE c.id = '{username}_{analysis_type}_{file_name}'" + items = list(user_container.query_items(query=query, enable_cross_partition_query=True)) + if items: + return items[0]['file_contents'] + else: + logger.info(f"No se encontró contenido de archivo para el usuario: {username}, tipo de análisis: {analysis_type}") + return None + except Exception as e: + logger.error(f"Error al recuperar el contenido del archivo para el usuario {username}: {str(e)}") + return None + +def get_user_files(username, analysis_type=None): + if user_container is None: + logger.error("La conexión a Cosmos DB SQL API no está inicializada") + return [] + try: + if analysis_type: + query = f"SELECT c.file_name, c.analysis_type, c.timestamp FROM c WHERE c.username = '{username}' AND c.analysis_type = '{analysis_type}'" + else: + query = f"SELECT c.file_name, c.analysis_type, c.timestamp FROM c WHERE c.username = '{username}'" + + items = list(user_container.query_items(query=query, enable_cross_partition_query=True)) + return items + except Exception as e: + logger.error(f"Error al obtener la lista de archivos del usuario {username}: {str(e)}") + return [] + +def delete_file(username, file_name, analysis_type): + if user_container is None: + logger.error("La conexión a Cosmos DB SQL API no está inicializada") + return False + try: + user_container.delete_item(item=f"{username}_{analysis_type}_{file_name}", partition_key=username) + logger.info(f"Archivo eliminado para el usuario: {username}, tipo de análisis: {analysis_type}") + return True + except Exception as e: + logger.error(f"Error al eliminar el archivo para el usuario {username}: {str(e)}") + return False + +########################################################## -- FIN GESTION DE ARCHIVOS ---########################################################## + +########################################################## -- INICIO GESTION DE FORMULARIOS ---########################################################## +def store_application_request(name, email, institution, role, reason): + global application_requests_container + logger.info("Entering store_application_request function") + try: + logger.info("Checking application_requests_container") + if application_requests_container is None: + logger.error("application_requests_container is not initialized") + return False + + logger.info("Creating application request document") + application_request = { + "id": str(uuid.uuid4()), + "name": name, + "email": email, + "institution": institution, + "role": role, + "reason": reason, + "requestDate": datetime.utcnow().isoformat() + } + + logger.info(f"Attempting to store document: {application_request}") + application_requests_container.create_item(body=application_request) + logger.info(f"Application request stored for email: {email}") + return True + except Exception as e: + logger.error(f"Error storing application request: {str(e)}") + return False + +####################################################################################################### +def store_user_feedback(username, name, email, feedback): + global user_feedback_container + logger.info(f"Attempting to store user feedback for user: {username}") + try: + if user_feedback_container is None: + logger.error("user_feedback_container is not initialized") + return False + + feedback_item = { + "id": str(uuid.uuid4()), + "username": username, + "name": name, + "email": email, + "feedback": feedback, + "timestamp": datetime.utcnow().isoformat() + } + + result = user_feedback_container.create_item(body=feedback_item) + logger.info(f"User feedback stored with ID: {result['id']} for user: {username}") + return True + except Exception as e: + logger.error(f"Error storing user feedback for user {username}: {str(e)}") + return False + + +########################################################## -- FIN GESTION DE FORMULARIOS ---########################################################## + +########################################################## -- INICIO ALMACENAMIENTO ANÁLISIS MORFOSINTÁCTICO ---########################################################## + +def store_morphosyntax_result(username, text, repeated_words, arc_diagrams, pos_analysis, morphological_analysis, sentence_structure): + if analysis_collection is None: + logger.error("La conexión a MongoDB no está inicializada") + return False + + try: + word_count = {} + for word, color in repeated_words.items(): + category = color # Asumiendo que 'color' es la categoría gramatical + word_count[category] = word_count.get(category, 0) + 1 + + analysis_document = { + 'username': username, + 'timestamp': datetime.utcnow(), + 'text': text, + 'word_count': word_count, + 'arc_diagrams': arc_diagrams, + 'pos_analysis': pos_analysis, + 'morphological_analysis': morphological_analysis, + 'sentence_structure': sentence_structure + } + + result = analysis_collection.insert_one(analysis_document) + logger.info(f"Análisis guardado con ID: {result.inserted_id} para el usuario: {username}") + return True + except Exception as e: + logger.error(f"Error al guardar el análisis para el usuario {username}: {str(e)}") + return False + +########################################################## -- FIN ALMACENAMIENTO ANÁLISIS MORFOSINTÁCTICO ---########################################################## + + +##########################################--- INICIO SECCIÓN DEL ANÁLISIS SEMÁNTICO ---############################################### + +def store_file_semantic_contents(username, file_name, file_contents): + if user_container is None: + logger.error("La conexión a Cosmos DB SQL API no está inicializada") + return False + try: + document = { + 'id': f"{username}_semantic_{file_name}", + 'username': username, + 'file_name': file_name, + 'file_contents': file_contents, + 'analysis_type': 'semantic', + 'timestamp': datetime.utcnow().isoformat() + } + user_container.upsert_item(body=document) + logger.info(f"Contenido del archivo semántico guardado para el usuario: {username}") + return True + except Exception as e: + logger.error(f"Error al guardar el contenido del archivo semántico para el usuario {username}: {str(e)}") + return False + +def store_semantic_result(username, text, analysis_result): + if analysis_collection is None: + print("La conexión a MongoDB no está inicializada") + return False + try: + # Convertir los conceptos clave a una lista de tuplas + key_concepts = [(concept, float(frequency)) for concept, frequency in analysis_result['key_concepts']] + + # Convertir los gráficos a imágenes base64 + graphs = {} + for graph_name in ['relations_graph', 'entity_graph', 'topic_graph']: + if graph_name in analysis_result: + buf = BytesIO() + analysis_result[graph_name].savefig(buf, format='png') + buf.seek(0) + graphs[graph_name] = base64.b64encode(buf.getvalue()).decode('utf-8') + + analysis_document = { + 'username': username, + 'timestamp': datetime.utcnow(), + 'text': text, + 'key_concepts': key_concepts, + 'graphs': graphs, + 'summary': analysis_result.get('summary', ''), + 'entities': analysis_result.get('entities', {}), + 'sentiment': analysis_result.get('sentiment', ''), + 'topics': analysis_result.get('topics', []), + 'analysis_type': 'semantic' + } + + result = analysis_collection.insert_one(analysis_document) + print(f"Análisis semántico guardado con ID: {result.inserted_id} para el usuario: {username}") + return True + except Exception as e: + print(f"Error al guardar el análisis semántico para el usuario {username}: {str(e)}") + return False + +##########################################--- FIN DE LA SECCIÓN DEL ANÁLISIS SEMÁNTICO ---############################################### + +############################################--- INICIO DE LA SECCIÓN DEL ANÁLISIS DEL DISCURSO ################################################################### + +def store_discourse_analysis_result(username, text1, text2, analysis_result): + if analysis_collection is None: + print("La conexión a MongoDB no está inicializada") + return False + + try: + # Convertir los grafos individuales a imágenes base64 + buf1 = BytesIO() + analysis_result['graph1'].savefig(buf1, format='png') + buf1.seek(0) + img_str1 = base64.b64encode(buf1.getvalue()).decode('utf-8') + + buf2 = BytesIO() + analysis_result['graph2'].savefig(buf2, format='png') + buf2.seek(0) + img_str2 = base64.b64encode(buf2.getvalue()).decode('utf-8') + + # Crear una imagen combinada + fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(20, 10)) + ax1.imshow(plt.imread(BytesIO(base64.b64decode(img_str1)))) + ax1.axis('off') + ax1.set_title("Documento 1: Relaciones Conceptuales") + ax2.imshow(plt.imread(BytesIO(base64.b64decode(img_str2)))) + ax2.axis('off') + ax2.set_title("Documento 2: Relaciones Conceptuales") + + buf_combined = BytesIO() + fig.savefig(buf_combined, format='png') + buf_combined.seek(0) + img_str_combined = base64.b64encode(buf_combined.getvalue()).decode('utf-8') + plt.close(fig) + + # Convertir los conceptos clave a listas de tuplas + key_concepts1 = [(concept, float(frequency)) for concept, frequency in analysis_result['key_concepts1']] + key_concepts2 = [(concept, float(frequency)) for concept, frequency in analysis_result['key_concepts2']] + + # Crear el documento para guardar + analysis_document = { + 'username': username, + 'timestamp': datetime.utcnow(), + #'text1': text1, + #'text2': text2, + 'graph1': img_str1, + 'graph2': img_str2, + 'combined_graph': img_str_combined, + 'key_concepts1': key_concepts1, + 'key_concepts2': key_concepts2, + 'analysis_type': 'discourse' + } + + # Insertar el documento en la base de datos + result = analysis_collection.insert_one(analysis_document) + print(f"Análisis discursivo guardado con ID: {result.inserted_id} para el usuario: {username}") + return True + except Exception as e: + print(f"Error al guardar el análisis discursivo para el usuario {username}: {str(e)}") + print(f"Tipo de excepción: {type(e).__name__}") + print(f"Detalles de la excepción: {e.args}") + return False + +############################################--- FIN DE LA SECCIÓN DEL ANÁLISIS DEL DISCURSO ################################################################### + + +################################################-- INICIO DE LA SECCIÓN DEL CHATBOT --- ############################################################### +def store_chat_history(username, messages): + try: + logger.info(f"Attempting to save chat history for user: {username}") + logger.debug(f"Messages to save: {messages}") + + chat_document = { + 'username': username, + 'timestamp': datetime.utcnow(), + 'messages': messages + } + result = chat_collection.insert_one(chat_document) + logger.info(f"Chat history saved with ID: {result.inserted_id} for user: {username}") + logger.debug(f"Chat content: {messages}") + return True + except Exception as e: + logger.error(f"Error saving chat history for user {username}: {str(e)}") + return False + +####################################################################################################### +def export_analysis_and_chat(username, analysis_data, chat_data): + try: + export_data = { + "username": username, + "timestamp": datetime.utcnow(), + "analysis": analysis_data, + "chat": chat_data + } + + # Aquí puedes decidir cómo quieres exportar los datos + # Por ejemplo, podrías guardarlos en una nueva colección en MongoDB + export_collection = mongo_db['exports'] + result = export_collection.insert_one(export_data) + + # También podrías generar un archivo JSON o CSV y guardarlo en Azure Blob Storage + + return True + except Exception as e: + logger.error(f"Error al exportar análisis y chat para {username}: {str(e)}") + return False + +################################################-- FIN DE LA SECCIÓN DEL CHATBOT --- ############################################################### + +####################################################################################################################################################### + +def get_student_data(username): + if analysis_collection is None or chat_collection is None: + logger.error("La conexión a MongoDB no está inicializada") + return None + formatted_data = { + "username": username, + "entries": [], + "entries_count": 0, + "word_count": {}, + "semantic_analyses": [], + "discourse_analyses": [], + "chat_history": [] + } + try: + logger.info(f"Buscando datos de análisis para el usuario: {username}") + cursor = analysis_collection.find({"username": username}) + + for entry in cursor: + formatted_entry = { + "timestamp": entry.get("timestamp", datetime.utcnow()), + "analysis_type": entry.get("analysis_type", "morphosyntax") + } + + if formatted_entry["analysis_type"] == "morphosyntax": + formatted_entry.update({ + "text": entry.get("text", ""), + "word_count": entry.get("word_count", {}), + "arc_diagrams": entry.get("arc_diagrams", []) + }) + for category, count in formatted_entry["word_count"].items(): + formatted_data["word_count"][category] = formatted_data["word_count"].get(category, 0) + count + + elif formatted_entry["analysis_type"] == "semantic": + formatted_entry.update({ + "key_concepts": entry.get("key_concepts", []), + "graph": entry.get("graph", "") + }) + formatted_data["semantic_analyses"].append(formatted_entry) + + elif formatted_entry["analysis_type"] == "discourse": + formatted_entry.update({ + "text1": entry.get("text1", ""), + "text2": entry.get("text2", ""), + "key_concepts1": entry.get("key_concepts1", []), + "key_concepts2": entry.get("key_concepts2", []), + "graph1": entry.get("graph1", ""), + "graph2": entry.get("graph2", ""), + "combined_graph": entry.get("combined_graph", "") + }) + formatted_data["discourse_analyses"].append(formatted_entry) + + formatted_data["entries"].append(formatted_entry) + + formatted_data["entries_count"] = len(formatted_data["entries"]) + formatted_data["entries"].sort(key=lambda x: x["timestamp"], reverse=True) + + for entry in formatted_data["entries"]: + entry["timestamp"] = entry["timestamp"].isoformat() + + except Exception as e: + logger.error(f"Error al obtener datos de análisis del estudiante {username}: {str(e)}") + + try: + logger.info(f"Buscando historial de chat para el usuario: {username}") + chat_cursor = chat_collection.find({"username": username}) + for chat in chat_cursor: + formatted_chat = { + "timestamp": chat["timestamp"].isoformat(), + "messages": chat["messages"] + } + formatted_data["chat_history"].append(formatted_chat) + + formatted_data["chat_history"].sort(key=lambda x: x["timestamp"], reverse=True) + + except Exception as e: + logger.error(f"Error al obtener historial de chat del estudiante {username}: {str(e)}") + logger.info(f"Datos formateados para {username}: {formatted_data}") + return formatted_data diff --git a/modules/database/backUp/database_oldFromV2.py b/modules/database/backUp/database_oldFromV2.py new file mode 100644 index 0000000000000000000000000000000000000000..7f3f816151bafad692d16a1dbbc4b78bdd7f7ed9 --- /dev/null +++ b/modules/database/backUp/database_oldFromV2.py @@ -0,0 +1,473 @@ +# database.py +# database.py de la versión 3 al 26-9-2024 +import streamlit as st +import logging +import os +import pandas as pd +from azure.cosmos import CosmosClient +from azure.cosmos.exceptions import CosmosHttpResponseError +from pymongo import MongoClient +import certifi +from datetime import datetime, timezone +from io import BytesIO +import base64 +import matplotlib.pyplot as plt +from matplotlib.figure import Figure +import bcrypt +print(f"Bcrypt version: {bcrypt.__version__}") +import uuid +import plotly.graph_objects as go # Para manejar el diagrama de Sankey +import numpy as np # Puede ser necesario para algunas operaciones +logging.basicConfig(level=logging.DEBUG) +logger = logging.getLogger(__name__) + +# Variables globales para Cosmos DB SQL API +application_requests_container = None +cosmos_client = None +user_database = None +user_container = None +user_feedback_container = None + +# Variables globales para Cosmos DB MongoDB API +mongo_client = None +mongo_db = None +analysis_collection = None +chat_collection = None # Nueva variable global + + +##############################################################################--- INICIO DE LAS BASES DE DATOS --- ############################### +def initialize_database_connections(): + try: + print("Iniciando conexión a MongoDB") + mongodb_success = initialize_mongodb_connection() + print(f"Conexión a MongoDB: {'exitosa' if mongodb_success else 'fallida'}") + except Exception as e: + print(f"Error al conectar con MongoDB: {str(e)}") + mongodb_success = False + + try: + print("Iniciando conexión a Cosmos DB SQL API") + sql_success = initialize_cosmos_sql_connection() + print(f"Conexión a Cosmos DB SQL API: {'exitosa' if sql_success else 'fallida'}") + except Exception as e: + print(f"Error al conectar con Cosmos DB SQL API: {str(e)}") + sql_success = False + + return { + "mongodb": mongodb_success, + "cosmos_sql": sql_success + } + +#####################################################################################33 +def initialize_cosmos_sql_connection(): + global cosmos_client, user_database, user_container, application_requests_container, user_feedback_container + logger.info("Initializing Cosmos DB SQL API connection") + try: + cosmos_endpoint = os.environ.get("COSMOS_ENDPOINT") + cosmos_key = os.environ.get("COSMOS_KEY") + logger.info(f"Cosmos Endpoint: {cosmos_endpoint}") + logger.info(f"Cosmos Key: {'*' * len(cosmos_key) if cosmos_key else 'Not set'}") + + if not cosmos_endpoint or not cosmos_key: + logger.error("COSMOS_ENDPOINT or COSMOS_KEY environment variables are not set") + raise ValueError("Las variables de entorno COSMOS_ENDPOINT y COSMOS_KEY deben estar configuradas") + + cosmos_client = CosmosClient(cosmos_endpoint, cosmos_key) + user_database = cosmos_client.get_database_client("user_database") + user_container = user_database.get_container_client("users") + application_requests_container = user_database.get_container_client("application_requests") + user_feedback_container = user_database.get_container_client("user_feedback") + + logger.info(f"user_container initialized: {user_container is not None}") + logger.info(f"application_requests_container initialized: {application_requests_container is not None}") + logger.info(f"user_feedback_container initialized: {user_feedback_container is not None}") + + logger.info("Conexión a Cosmos DB SQL API exitosa") + return True + except Exception as e: + logger.error(f"Error al conectar con Cosmos DB SQL API: {str(e)}", exc_info=True) + return False + +############################################################################################3 +def initialize_mongodb_connection(): + global mongo_client, mongo_db, analysis_collection, chat_collection + try: + cosmos_mongodb_connection_string = os.getenv("MONGODB_CONNECTION_STRING") + if not cosmos_mongodb_connection_string: + logger.error("La variable de entorno MONGODB_CONNECTION_STRING no está configurada") + return False + + mongo_client = MongoClient(cosmos_mongodb_connection_string, + tls=True, + tlsCAFile=certifi.where(), + retryWrites=False, + serverSelectionTimeoutMS=5000, + connectTimeoutMS=10000, + socketTimeoutMS=10000) + + mongo_client.admin.command('ping') + + mongo_db = mongo_client['aideatext_db'] + # export = mongo_db['export'] + analysis_collection = mongo_db['text_analysis'] + chat_collection = mongo_db['chat_history'] # Inicializar la nueva colección + + # Verificar la conexión + mongo_client.admin.command('ping') + + logger.info("Conexión a Cosmos DB MongoDB API exitosa") + return True + except Exception as e: + logger.error(f"Error al conectar con Cosmos DB MongoDB API: {str(e)}", exc_info=True) + return False + +##############################################################################--- FIN DEL INICIO DE LAS BASES DE DATOS --- ################################################################################################################################ +########################################################## -- INICIO DE GESTION DE USUARIOS ---########################################################## +def create_user(username, password, role): + global user_container + try: + print(f"Attempting to create user: {username} with role: {role}") + if user_container is None: + print("Error: user_container is None. Attempting to reinitialize connection.") + if not initialize_cosmos_sql_connection(): + raise Exception("Failed to initialize SQL connection") + + hashed_password = bcrypt.hashpw(password.encode('utf-8'), bcrypt.gensalt()).decode('utf-8') + print(f"Password hashed successfully for user: {username}") + user_data = { + 'id': username, + 'password': hashed_password, + 'role': role, + 'timestamp':datetime.now(timezone.utc).isoformat(), + } + user_container.create_item(body=user_data) + print(f"Usuario {role} creado: {username}") # Log para depuración + return True + except Exception as e: + print(f"Detailed error in create_user: {str(e)}") + return False + +####################################################################################################### +def create_admin_user(username, password): + return create_user(username, password, 'Administrador') + +####################################################################################################### +def create_student_user(username, password): + return create_user(username, password, 'Estudiante') + +####################################################################################################### +# Funciones para Cosmos DB SQL API (manejo de usuarios) +def get_user(username): + try: + query = f"SELECT * FROM c WHERE c.id = '{username}'" + items = list(user_container.query_items(query=query, enable_cross_partition_query=True)) + user = items[0] if items else None + if user: + print(f"Usuario encontrado: {username}, Rol: {user.get('role')}") # Log añadido + else: + print(f"Usuario no encontrado: {username}") # Log añadido + return user + except Exception as e: + print(f"Error al obtener usuario {username}: {str(e)}") + return None + +########################################################## -- FIN DE GESTION DE USUARIOS ---########################################################## + +########################################################## -- INICIO GESTION DE ARCHIVOS ---########################################################## + +def manage_file_contents(username, file_name, analysis_type, file_contents=None): + if user_container is None: + logger.error("La conexión a Cosmos DB SQL API no está inicializada") + return None + + item_id = f"{analysis_type}_{file_name}" + + try: + if file_contents is not None: + # Storing or updating file + document = { + 'id': item_id, + 'username': username, + 'file_name': file_name, + 'analysis_type': analysis_type, + 'file_contents': file_contents, + 'timestamp': datetime.now(timezone.utc).isoformat() + } + user_container.upsert_item(body=document, partition_key=username) + logger.info(f"Contenido del archivo guardado/actualizado para el usuario: {username}, tipo de análisis: {analysis_type}") + return True + else: + # Retrieving file + item = user_container.read_item(item=item_id, partition_key=username) + return item['file_contents'] + except CosmosHttpResponseError as e: + if e.status_code == 404: + logger.info(f"No se encontró el archivo para el usuario: {username}, tipo de análisis: {analysis_type}") + return None + else: + logger.error(f"Error de Cosmos DB al manejar el archivo para el usuario {username}: {str(e)}") + return None + except Exception as e: + logger.error(f"Error al manejar el archivo para el usuario {username}: {str(e)}") + return None + + +def get_user_files(username, analysis_type=None): + if user_container is None: + logger.error("La conexión a Cosmos DB SQL API no está inicializada") + return [] + try: + if analysis_type: + query = f"SELECT c.file_name, c.analysis_type, c.timestamp FROM c WHERE c.username = '{username}' AND c.analysis_type = '{analysis_type}'" + else: + query = f"SELECT c.file_name, c.analysis_type, c.timestamp FROM c WHERE c.username = '{username}'" + + items = list(user_container.query_items(query=query, enable_cross_partition_query=True)) + return items + except Exception as e: + logger.error(f"Error al obtener la lista de archivos del usuario {username}: {str(e)}") + return [] + + + +def delete_file(username, file_name, analysis_type): + if user_container is None: + logger.error("La conexión a Cosmos DB SQL API no está inicializada") + return False + + try: + item_id = f"{analysis_type}_{file_name}" + user_container.delete_item(item=item_id, partition_key=username) + logger.info(f"Archivo eliminado para el usuario: {username}, tipo de análisis: {analysis_type}, nombre: {file_name}") + return True + + if success: + # Invalidar caché + cache_key = f"student_data_{username}" + if cache_key in st.session_state: + del st.session_state[cache_key] + + + + except CosmosHttpResponseError as e: + logger.error(f"Cosmos DB error al eliminar el archivo para el usuario {username}: {str(e)}") + return False + + except Exception as e: + logger.error(f"Error al eliminar el archivo para el usuario {username}: {str(e)}") + return False + +########################################################## -- FIN GESTION DE ARCHIVOS ---########################################################## + +########################################################## -- INICIO GESTION DE FORMULARIOS ---########################################################## +def store_application_request(name, email, institution, role, reason): + global application_requests_container + logger.info("Entering store_application_request function") + try: + logger.info("Checking application_requests_container") + if application_requests_container is None: + logger.error("application_requests_container is not initialized") + return False + + logger.info("Creating application request document") + application_request = { + "id": str(uuid.uuid4()), + "name": name, + "email": email, + "institution": institution, + "role": role, + "reason": reason, + "requestDate": datetime.utcnow().isoformat() + } + + logger.info(f"Attempting to store document: {application_request}") + application_requests_container.create_item(body=application_request) + logger.info(f"Application request stored for email: {email}") + return True + except Exception as e: + logger.error(f"Error storing application request: {str(e)}") + return False + +####################################################################################################### +def store_user_feedback(username, name, email, feedback): + global user_feedback_container + logger.info(f"Attempting to store user feedback for user: {username}") + try: + if user_feedback_container is None: + logger.error("user_feedback_container is not initialized") + return False + + feedback_item = { + "id": str(uuid.uuid4()), + "username": username, + "name": name, + "email": email, + "feedback": feedback, + 'timestamp':datetime.now(timezone.utc).isoformat(), + } + + result = user_feedback_container.create_item(body=feedback_item) + logger.info(f"User feedback stored with ID: {result['id']} for user: {username}") + return True + except Exception as e: + logger.error(f"Error storing user feedback for user {username}: {str(e)}") + return False + +########################################################## -- FIN GESTION DE FORMULARIOS ---########################################################## + +########################################################## -- INICIO ALMACENAMIENTO ANÁLISIS MORFOSINTÁCTICO ---########################################################## + +def store_morphosyntax_result(username, text, repeated_words, arc_diagrams, pos_analysis, morphological_analysis, sentence_structure): + if analysis_collection is None: + logger.error("La conexión a MongoDB no está inicializada") + return False + + try: + word_count = {} + for word, color in repeated_words.items(): + category = color # Asumiendo que 'color' es la categoría gramatical + word_count[category] = word_count.get(category, 0) + 1 + + analysis_document = { + 'username': username, + 'timestamp':datetime.now(timezone.utc).isoformat(), + 'text': text, + 'repeated_words': repeated_words, + 'word_count': word_count, + 'arc_diagrams': arc_diagrams, + 'pos_analysis': pos_analysis, + 'morphological_analysis': morphological_analysis, + 'sentence_structure': sentence_structure, + 'analysis_type': 'morphosyntax' + } + + result = analysis_collection.insert_one(analysis_document) + logger.info(f"Análisis guardado con ID: {result.inserted_id} para el usuario: {username}") + return True + except Exception as e: + logger.error(f"Error al guardar el análisis para el usuario {username}: {str(e)}") + return False + + +################################################-- INICIO DE LA SECCIÓN DEL CHATBOT --- ############################################################### +def store_chat_history(username, messages): + try: + logger.info(f"Attempting to save chat history for user: {username}") + logger.debug(f"Messages to save: {messages}") + + chat_document = { + 'username': username, + 'timestamp':datetime.now(timezone.utc).isoformat(), + 'messages': messages + } + result = chat_collection.insert_one(chat_document) + logger.info(f"Chat history saved with ID: {result.inserted_id} for user: {username}") + logger.debug(f"Chat content: {messages}") + return True + except Exception as e: + logger.error(f"Error saving chat history for user {username}: {str(e)}") + return False + +####################################################################################################### +def export_analysis_and_chat(username, analysis_data, chat_data): + try: + export_data = { + "username": username, + 'timestamp':datetime.now(timezone.utc).isoformat(), + "analysis": analysis_data, + "chat": chat_data + } + + # Aquí puedes decidir cómo quieres exportar los datos + # Por ejemplo, podrías guardarlos en una nueva colección en MongoDB + export_collection = mongo_db['exports'] + result = export_collection.insert_one(export_data) + + # También podrías generar un archivo JSON o CSV y guardarlo en Azure Blob Storage + + return True + except Exception as e: + logger.error(f"Error al exportar análisis y chat para {username}: {str(e)}") + return False + +################################################-- FIN DE LA SECCIÓN DEL CHATBOT --- ############################################################### +########--- STUDENT DATA ------- + +def get_student_data(username): + if analysis_collection is None or chat_collection is None: + logger.error("La conexión a MongoDB no está inicializada") + return None + formatted_data = { + "username": username, + "entries": [], + "entries_count": 0, + "word_count": {}, + "semantic_analyses": [], + "discourse_analyses": [], + "chat_history": [] + } + try: + logger.info(f"Buscando datos de análisis para el usuario: {username}") + cursor = analysis_collection.find({"username": username}) + + for entry in cursor: + formatted_entry = { + 'timestamp':datetime.now(timezone.utc).isoformat(), + "analysis_type": entry.get("analysis_type", "morphosyntax") + } + + if formatted_entry["analysis_type"] == "morphosyntax": + formatted_entry.update({ + "text": entry.get("text", ""), + "word_count": entry.get("word_count", {}), + "arc_diagrams": entry.get("arc_diagrams", []) + }) + for category, count in formatted_entry["word_count"].items(): + formatted_data["word_count"][category] = formatted_data["word_count"].get(category, 0) + count + + elif formatted_entry["analysis_type"] == "semantic": + formatted_entry.update({ + "key_concepts": entry.get("key_concepts", []), + "graph": entry.get("graph", "") + }) + formatted_data["semantic_analyses"].append(formatted_entry) + + elif formatted_entry["analysis_type"] == "discourse": + formatted_entry.update({ + "text1": entry.get("text1", ""), + "text2": entry.get("text2", ""), + "key_concepts1": entry.get("key_concepts1", []), + "key_concepts2": entry.get("key_concepts2", []), + "graph1": entry.get("graph1", ""), + "graph2": entry.get("graph2", ""), + "combined_graph": entry.get("combined_graph", "") + }) + formatted_data["discourse_analyses"].append(formatted_entry) + + formatted_data["entries"].append(formatted_entry) + + formatted_data["entries_count"] = len(formatted_data["entries"]) + formatted_data["entries"].sort(key=lambda x: x["timestamp"], reverse=True) + + for entry in formatted_data["entries"]: + entry["timestamp"] = entry["timestamp"].isoformat() + + except Exception as e: + logger.error(f"Error al obtener datos de análisis del estudiante {username}: {str(e)}") + + try: + logger.info(f"Buscando historial de chat para el usuario: {username}") + chat_cursor = chat_collection.find({"username": username}) + for chat in chat_cursor: + formatted_chat = { + "timestamp": chat["timestamp"].isoformat(), + "messages": chat["messages"] + } + formatted_data["chat_history"].append(formatted_chat) + + formatted_data["chat_history"].sort(key=lambda x: x["timestamp"], reverse=True) + + except Exception as e: + logger.error(f"Error al obtener historial de chat del estudiante {username}: {str(e)}") + logger.info(f"Datos formateados para {username}: {formatted_data}") + return formatted_data diff --git a/modules/database/backUp/database_v3-2ok.py b/modules/database/backUp/database_v3-2ok.py new file mode 100644 index 0000000000000000000000000000000000000000..ee15eb120bfe8092774a2f6a434538f5d3b3433d --- /dev/null +++ b/modules/database/backUp/database_v3-2ok.py @@ -0,0 +1,629 @@ +# database.py +import logging +import os +from azure.cosmos import CosmosClient +from azure.cosmos.exceptions import CosmosHttpResponseError +from pymongo import MongoClient +import certifi +from datetime import datetime, timezone +import io +from io import BytesIO +import base64 +import matplotlib.pyplot as plt +from matplotlib.figure import Figure +import bcrypt +print(f"Bcrypt version: {bcrypt.__version__}") +import uuid +import plotly.graph_objects as go # Para manejar el diagrama de Sankey +import numpy as np # Puede ser necesario para algunas operaciones +logging.basicConfig(level=logging.DEBUG) +logger = logging.getLogger(__name__) + +# Variables globales para Cosmos DB SQL API +application_requests_container = None +cosmos_client = None +user_database = None +user_container = None +user_feedback_container = None + +# Variables globales para Cosmos DB MongoDB API +mongo_client = None +mongo_db = None +analysis_collection = None +chat_collection = None # Nueva variable global + + +##############################################################################--- INICIO DE LAS BASES DE DATOS --- ############################### +def initialize_database_connections(): + try: + print("Iniciando conexión a MongoDB") + mongodb_success = initialize_mongodb_connection() + print(f"Conexión a MongoDB: {'exitosa' if mongodb_success else 'fallida'}") + except Exception as e: + print(f"Error al conectar con MongoDB: {str(e)}") + mongodb_success = False + + try: + print("Iniciando conexión a Cosmos DB SQL API") + sql_success = initialize_cosmos_sql_connection() + print(f"Conexión a Cosmos DB SQL API: {'exitosa' if sql_success else 'fallida'}") + except Exception as e: + print(f"Error al conectar con Cosmos DB SQL API: {str(e)}") + sql_success = False + + return { + "mongodb": mongodb_success, + "cosmos_sql": sql_success + } + +#####################################################################################33 +def initialize_cosmos_sql_connection(): + global cosmos_client, user_database, user_container, application_requests_container, user_feedback_container + logger.info("Initializing Cosmos DB SQL API connection") + try: + cosmos_endpoint = os.environ.get("COSMOS_ENDPOINT") + cosmos_key = os.environ.get("COSMOS_KEY") + logger.info(f"Cosmos Endpoint: {cosmos_endpoint}") + logger.info(f"Cosmos Key: {'*' * len(cosmos_key) if cosmos_key else 'Not set'}") + + if not cosmos_endpoint or not cosmos_key: + logger.error("COSMOS_ENDPOINT or COSMOS_KEY environment variables are not set") + raise ValueError("Las variables de entorno COSMOS_ENDPOINT y COSMOS_KEY deben estar configuradas") + + cosmos_client = CosmosClient(cosmos_endpoint, cosmos_key) + user_database = cosmos_client.get_database_client("user_database") + user_container = user_database.get_container_client("users") + application_requests_container = user_database.get_container_client("application_requests") + user_feedback_container = user_database.get_container_client("user_feedback") + + logger.info(f"user_container initialized: {user_container is not None}") + logger.info(f"application_requests_container initialized: {application_requests_container is not None}") + logger.info(f"user_feedback_container initialized: {user_feedback_container is not None}") + + logger.info("Conexión a Cosmos DB SQL API exitosa") + return True + except Exception as e: + logger.error(f"Error al conectar con Cosmos DB SQL API: {str(e)}", exc_info=True) + return False + +############################################################################################3 +def initialize_mongodb_connection(): + global mongo_client, mongo_db, analysis_collection, chat_collection + try: + cosmos_mongodb_connection_string = os.getenv("MONGODB_CONNECTION_STRING") + if not cosmos_mongodb_connection_string: + logger.error("La variable de entorno MONGODB_CONNECTION_STRING no está configurada") + return False + + mongo_client = MongoClient(cosmos_mongodb_connection_string, + tls=True, + tlsCAFile=certifi.where(), + retryWrites=False, + serverSelectionTimeoutMS=5000, + connectTimeoutMS=10000, + socketTimeoutMS=10000) + + mongo_client.admin.command('ping') + + mongo_db = mongo_client['aideatext_db'] + analysis_collection = mongo_db['text_analysis'] + chat_collection = mongo_db['chat_history'] # Inicializar la nueva colección + + # Verificar la conexión + mongo_client.admin.command('ping') + + logger.info("Conexión a Cosmos DB MongoDB API exitosa") + return True + except Exception as e: + logger.error(f"Error al conectar con Cosmos DB MongoDB API: {str(e)}", exc_info=True) + return False + +##############################################################################--- FIN DEL INICIO DE LAS BASES DE DATOS --- ################################################################################################################################ +########################################################## -- INICIO DE GESTION DE USUARIOS ---########################################################## +def create_user(username, password, role): + global user_container + try: + print(f"Attempting to create user: {username} with role: {role}") + if user_container is None: + print("Error: user_container is None. Attempting to reinitialize connection.") + if not initialize_cosmos_sql_connection(): + raise Exception("Failed to initialize SQL connection") + + hashed_password = bcrypt.hashpw(password.encode('utf-8'), bcrypt.gensalt()).decode('utf-8') + print(f"Password hashed successfully for user: {username}") + user_data = { + 'id': username, + 'password': hashed_password, + 'role': role, + 'created_at': datetime.utcnow().isoformat() + } + user_container.create_item(body=user_data) + print(f"Usuario {role} creado: {username}") # Log para depuración + return True + except Exception as e: + print(f"Detailed error in create_user: {str(e)}") + return False + +####################################################################################################### +def create_admin_user(username, password): + return create_user(username, password, 'Administrador') + +####################################################################################################### +def create_student_user(username, password): + return create_user(username, password, 'Estudiante') + +####################################################################################################### +# Funciones para Cosmos DB SQL API (manejo de usuarios) +def get_user(username): + try: + query = f"SELECT * FROM c WHERE c.id = '{username}'" + items = list(user_container.query_items(query=query, enable_cross_partition_query=True)) + user = items[0] if items else None + if user: + print(f"Usuario encontrado: {username}, Rol: {user.get('role')}") # Log añadido + else: + print(f"Usuario no encontrado: {username}") # Log añadido + return user + except Exception as e: + print(f"Error al obtener usuario {username}: {str(e)}") + return None + +########################################################## -- FIN DE GESTION DE USUARIOS ---########################################################## + +########################################################## -- INICIO GESTION DE ARCHIVOS ---########################################################## + +def store_file_contents(username, file_name, file_contents, analysis_type): + if user_container is None: + logger.error("La conexión a Cosmos DB SQL API no está inicializada") + return False + try: + document = { + 'id': f"{username}_{analysis_type}_{file_name}", + 'username': username, + 'file_name': file_name, + 'analysis_type': analysis_type, + 'file_contents': file_contents, + 'timestamp':datetime.now(timezone.utc).isoformat(), + } + user_container.upsert_item(body=document) + logger.info(f"Contenido del archivo guardado para el usuario: {username}, tipo de análisis: {analysis_type}") + return True + except Exception as e: + logger.error(f"Error al guardar el contenido del archivo para el usuario {username}: {str(e)}") + return False + +def retrieve_file_contents(username, file_name, analysis_type): + print(f"Attempting to retrieve file: {file_name} for user: {username}") + if user_container is None: + logger.error("La conexión a Cosmos DB SQL API no está inicializada") + return None + try: + query = f"SELECT * FROM c WHERE c.id = '{username}_{analysis_type}_{file_name}'" + items = list(user_container.query_items(query=query, enable_cross_partition_query=True)) + if items: + return items[0]['file_contents'] + else: + logger.info(f"No se encontró contenido de archivo para el usuario: {username}, tipo de análisis: {analysis_type}") + return None + except Exception as e: + logger.error(f"Error al recuperar el contenido del archivo para el usuario {username}: {str(e)}") + return None + +def get_user_files(username, analysis_type=None): + if user_container is None: + logger.error("La conexión a Cosmos DB SQL API no está inicializada") + return [] + try: + if analysis_type: + query = f"SELECT c.file_name, c.analysis_type, c.timestamp FROM c WHERE c.username = '{username}' AND c.analysis_type = '{analysis_type}'" + else: + query = f"SELECT c.file_name, c.analysis_type, c.timestamp FROM c WHERE c.username = '{username}'" + + items = list(user_container.query_items(query=query, enable_cross_partition_query=True)) + return items + except Exception as e: + logger.error(f"Error al obtener la lista de archivos del usuario {username}: {str(e)}") + return [] + +def delete_file(username, file_name, analysis_type): + if user_container is None: + logger.error("La conexión a Cosmos DB SQL API no está inicializada") + return False + try: + user_container.delete_item(item=f"{username}_{analysis_type}_{file_name}", partition_key=username) + logger.info(f"Archivo eliminado para el usuario: {username}, tipo de análisis: {analysis_type}") + return True + except Exception as e: + logger.error(f"Error al eliminar el archivo para el usuario {username}: {str(e)}") + return False + +########################################################## -- FIN GESTION DE ARCHIVOS ---########################################################## + +########################################################## -- INICIO GESTION DE FORMULARIOS ---########################################################## +def store_application_request(name, email, institution, role, reason): + global application_requests_container + logger.info("Entering store_application_request function") + try: + logger.info("Checking application_requests_container") + if application_requests_container is None: + logger.error("application_requests_container is not initialized") + return False + + logger.info("Creating application request document") + application_request = { + "id": str(uuid.uuid4()), + "name": name, + "email": email, + "institution": institution, + "role": role, + "reason": reason, + "requestDate": datetime.utcnow().isoformat() + } + + logger.info(f"Attempting to store document: {application_request}") + application_requests_container.create_item(body=application_request) + logger.info(f"Application request stored for email: {email}") + return True + except Exception as e: + logger.error(f"Error storing application request: {str(e)}") + return False + +####################################################################################################### +def store_user_feedback(username, name, email, feedback): + global user_feedback_container + logger.info(f"Attempting to store user feedback for user: {username}") + try: + if user_feedback_container is None: + logger.error("user_feedback_container is not initialized") + return False + + feedback_item = { + "id": str(uuid.uuid4()), + "username": username, + "name": name, + "email": email, + "feedback": feedback, + "timestamp":datetime.now(timezone.utc).isoformat(), + } + + result = user_feedback_container.create_item(body=feedback_item) + logger.info(f"User feedback stored with ID: {result['id']} for user: {username}") + return True + except Exception as e: + logger.error(f"Error storing user feedback for user {username}: {str(e)}") + return False + + +########################################################## -- FIN GESTION DE FORMULARIOS ---########################################################## + +########################################################## -- INICIO ALMACENAMIENTO ANÁLISIS MORFOSINTÁCTICO ---########################################################## + +def store_morphosyntax_result(username, text, repeated_words, arc_diagrams, pos_analysis, morphological_analysis, sentence_structure): + if analysis_collection is None: + logger.error("La conexión a MongoDB no está inicializada") + return False + + try: + word_count = {} + for word, color in repeated_words.items(): + category = color # Asumiendo que 'color' es la categoría gramatical + word_count[category] = word_count.get(category, 0) + 1 + + analysis_document = { + 'username': username, + 'timestamp':datetime.now(timezone.utc).isoformat(), + 'text': text, + 'word_count': word_count, + 'arc_diagrams': arc_diagrams, + 'pos_analysis': pos_analysis, + 'morphological_analysis': morphological_analysis, + 'sentence_structure': sentence_structure + } + + result = analysis_collection.insert_one(analysis_document) + logger.info(f"Análisis guardado con ID: {result.inserted_id} para el usuario: {username}") + return True + except Exception as e: + logger.error(f"Error al guardar el análisis para el usuario {username}: {str(e)}") + return False + +########################################################## -- FIN ALMACENAMIENTO ANÁLISIS MORFOSINTÁCTICO ---########################################################## + + +##########################################--- INICIO SECCIÓN DEL ANÁLISIS SEMÁNTICO ---############################################### + +def store_file_semantic_contents(username, file_name, file_contents): + if user_container is None: + logger.error("La conexión a Cosmos DB SQL API no está inicializada") + return False + try: + document = { + 'id': f"{username}_semantic_{file_name}", + 'username': username, + 'file_name': file_name, + 'file_contents': file_contents, + 'analysis_type': 'semantic', + 'timestamp':datetime.now(timezone.utc).isoformat(), + } + user_container.upsert_item(body=document) + logger.info(f"Contenido del archivo semántico guardado para el usuario: {username}") + return True + except Exception as e: + logger.error(f"Error al guardar el contenido del archivo semántico para el usuario {username}: {str(e)}") + return False + +def store_semantic_result(username, text, analysis_result): + if analysis_collection is None: + print("La conexión a MongoDB no está inicializada") + return False + try: + # Convertir los conceptos clave a una lista de tuplas + key_concepts = [(concept, float(frequency)) for concept, frequency in analysis_result['key_concepts']] + + # Convertir los gráficos a imágenes base64 + graphs = {} + for graph_name in ['relations_graph', 'entity_graph', 'topic_graph']: + if graph_name in analysis_result: + buf = BytesIO() + analysis_result[graph_name].savefig(buf, format='png') + buf.seek(0) + graphs[graph_name] = base64.b64encode(buf.getvalue()).decode('utf-8') + + analysis_document = { + 'username': username, + 'timestamp':datetime.now(timezone.utc).isoformat(), + 'text': text, + 'key_concepts': key_concepts, + 'graphs': graphs, + 'summary': analysis_result.get('summary', ''), + 'entities': analysis_result.get('entities', {}), + 'sentiment': analysis_result.get('sentiment', ''), + 'topics': analysis_result.get('topics', []), + 'analysis_type': 'semantic' + } + + result = analysis_collection.insert_one(analysis_document) + print(f"Análisis semántico guardado con ID: {result.inserted_id} para el usuario: {username}") + return True + except Exception as e: + print(f"Error al guardar el análisis semántico para el usuario {username}: {str(e)}") + return False + +##########################################--- FIN DE LA SECCIÓN DEL ANÁLISIS SEMÁNTICO ---############################################### + +############################################--- INICIO DE LA SECCIÓN DEL ANÁLISIS DEL DISCURSO ################################################################### + +def store_discourse_analysis_result(username, text1, text2, analysis_result): + if analysis_collection is None: + print("La conexión a MongoDB no está inicializada") + return False + + try: + # Convertir los grafos individuales a imágenes base64 + buf1 = BytesIO() + analysis_result['graph1'].savefig(buf1, format='png') + buf1.seek(0) + img_str1 = base64.b64encode(buf1.getvalue()).decode('utf-8') + + buf2 = BytesIO() + analysis_result['graph2'].savefig(buf2, format='png') + buf2.seek(0) + img_str2 = base64.b64encode(buf2.getvalue()).decode('utf-8') + + # Crear una imagen combinada + fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(20, 10)) + ax1.imshow(plt.imread(BytesIO(base64.b64decode(img_str1)))) + ax1.axis('off') + ax1.set_title("Documento 1: Relaciones Conceptuales") + ax2.imshow(plt.imread(BytesIO(base64.b64decode(img_str2)))) + ax2.axis('off') + ax2.set_title("Documento 2: Relaciones Conceptuales") + + buf_combined = BytesIO() + fig.savefig(buf_combined, format='png') + buf_combined.seek(0) + img_str_combined = base64.b64encode(buf_combined.getvalue()).decode('utf-8') + plt.close(fig) + + # Convertir los conceptos clave a listas de tuplas + key_concepts1 = [(concept, float(frequency)) for concept, frequency in analysis_result['key_concepts1']] + key_concepts2 = [(concept, float(frequency)) for concept, frequency in analysis_result['key_concepts2']] + + # Crear el documento para guardar + analysis_document = { + 'username': username, + 'timestamp':datetime.now(timezone.utc).isoformat(), + #'text1': text1, + #'text2': text2, + 'graph1': img_str1, + 'graph2': img_str2, + 'combined_graph': img_str_combined, + 'key_concepts1': key_concepts1, + 'key_concepts2': key_concepts2, + 'analysis_type': 'discourse' + } + + # Insertar el documento en la base de datos + result = analysis_collection.insert_one(analysis_document) + print(f"Análisis discursivo guardado con ID: {result.inserted_id} para el usuario: {username}") + return True + except Exception as e: + print(f"Error al guardar el análisis discursivo para el usuario {username}: {str(e)}") + print(f"Tipo de excepción: {type(e).__name__}") + print(f"Detalles de la excepción: {e.args}") + return False + +############################################--- FIN DE LA SECCIÓN DEL ANÁLISIS DEL DISCURSO ################################################################### + + +################################################-- INICIO DE LA SECCIÓN DEL CHATBOT --- ############################################################### +def store_chat_history(username, messages): + try: + logger.info(f"Attempting to save chat history for user: {username}") + logger.debug(f"Messages to save: {messages}") + + chat_document = { + 'username': username, + 'timestamp':datetime.now(timezone.utc).isoformat(), + 'messages': messages + } + result = chat_collection.insert_one(chat_document) + logger.info(f"Chat history saved with ID: {result.inserted_id} for user: {username}") + logger.debug(f"Chat content: {messages}") + return True + except Exception as e: + logger.error(f"Error saving chat history for user {username}: {str(e)}") + return False + +####################################################################################################### +def export_analysis_and_chat(username, analysis_data, chat_data): + try: + export_data = { + "username": username, + 'timestamp':datetime.now(timezone.utc).isoformat(), + "analysis": analysis_data, + "chat": chat_data + } + + # Aquí puedes decidir cómo quieres exportar los datos + # Por ejemplo, podrías guardarlos en una nueva colección en MongoDB + export_collection = mongo_db['exports'] + result = export_collection.insert_one(export_data) + + # También podrías generar un archivo JSON o CSV y guardarlo en Azure Blob Storage + + return True + except Exception as e: + logger.error(f"Error al exportar análisis y chat para {username}: {str(e)}") + return False + +################################################-- FIN DE LA SECCIÓN DEL CHATBOT --- ############################################################### + +####################################################################################################################################################### + +def get_student_data(username): + if analysis_collection is None or chat_collection is None: + logger.error("La conexión a MongoDB no está inicializada") + return None + formatted_data = { + "username": username, + "entries": [], + "entries_count": 0, + "word_count": {}, + "semantic_analyses": [], + "discourse_analyses": [], + "chat_history": [] + } + try: + logger.info(f"Buscando datos de análisis para el usuario: {username}") + cursor = analysis_collection.find({"username": username}) + + for entry in cursor: + formatted_entry = { + "timestamp": entry.get("timestamp", datetime.now(timezone.utc).isoformat()), + "analysis_type": entry.get("analysis_type", "morphosyntax") + } + + if formatted_entry["analysis_type"] == "morphosyntax": + formatted_entry.update({ + "text": entry.get("text", ""), + "word_count": entry.get("word_count", {}), + "arc_diagrams": entry.get("arc_diagrams", []) + }) + for category, count in formatted_entry["word_count"].items(): + formatted_data["word_count"][category] = formatted_data["word_count"].get(category, 0) + count + + elif formatted_entry["analysis_type"] == "semantic": + formatted_entry.update({ + "key_concepts": entry.get("key_concepts", []), + "graph": entry.get("graph", "") + }) + formatted_data["semantic_analyses"].append(formatted_entry) + + elif formatted_entry["analysis_type"] == "discourse": + formatted_entry.update({ + "text1": entry.get("text1", ""), + "text2": entry.get("text2", ""), + "key_concepts1": entry.get("key_concepts1", []), + "key_concepts2": entry.get("key_concepts2", []), + "graph1": entry.get("graph1", ""), + "graph2": entry.get("graph2", ""), + "combined_graph": entry.get("combined_graph", "") + }) + formatted_data["discourse_analyses"].append(formatted_entry) + + formatted_data["entries"].append(formatted_entry) + + formatted_data["entries_count"] = len(formatted_data["entries"]) + formatted_data["entries"].sort(key=lambda x: x["timestamp"], reverse=True) + + for entry in formatted_data["entries"]: + entry["timestamp"] = entry["timestamp"].isoformat() + + except Exception as e: + logger.error(f"Error al obtener datos de análisis del estudiante {username}: {str(e)}") + + try: + logger.info(f"Buscando historial de chat para el usuario: {username}") + chat_cursor = chat_collection.find({"username": username}) + for chat in chat_cursor: + formatted_chat = { + "timestamp": chat["timestamp"].isoformat(), + "messages": chat["messages"] + } + formatted_data["chat_history"].append(formatted_chat) + + formatted_data["chat_history"].sort(key=lambda x: x["timestamp"], reverse=True) + + except Exception as e: + logger.error(f"Error al obtener historial de chat del estudiante {username}: {str(e)}") + logger.info(f"Datos formateados para {username}: {formatted_data}") + return formatted_data + +################################################################ +def get_user_analysis_summary(username): + if analysis_collection is None: + logger.error("La conexión a MongoDB no está inicializada") + return [] + try: + summary = analysis_collection.aggregate([ + {"$match": {"username": username}}, + {"$group": { + "_id": "$analysis_type", + "count": {"$sum": 1}, + "last_analysis": {"$max": "$timestamp"} + }} + ]) + return list(summary) + except Exception as e: + logger.error(f"Error al obtener el resumen de análisis para el usuario {username}: {str(e)}") + return [] + +####################################################################### +def get_user_recent_chats(username, limit=5): + if chat_collection is None: + logger.error("La conexión a MongoDB no está inicializada") + return [] + try: + recent_chats = chat_collection.find( + {"username": username}, + {"messages": {"$slice": -5}} + ).sort("timestamp", -1).limit(limit) + return list(recent_chats) + except Exception as e: + logger.error(f"Error al obtener chats recientes para el usuario {username}: {str(e)}") + return [] + +################################################# +def get_user_analysis_details(username, analysis_type, skip=0, limit=10): + if analysis_collection is None: + logger.error("La conexión a MongoDB no está inicializada") + return [] + try: + details = analysis_collection.find( + {"username": username, "analysis_type": analysis_type} + ).sort("timestamp", -1).skip(skip).limit(limit) + return list(details) + except Exception as e: + logger.error(f"Error al obtener detalles de análisis para el usuario {username}: {str(e)}") + return [] diff --git a/modules/database/chat_db.py b/modules/database/chat_db.py new file mode 100644 index 0000000000000000000000000000000000000000..16c10963237ffbfc116df440ea87d187136b275a --- /dev/null +++ b/modules/database/chat_db.py @@ -0,0 +1,45 @@ +#/modules/database/chat_db.py +from .mongo_db import insert_document, find_documents +from datetime import datetime, timezone +import logging +from .database_init import get_mongodb # Asegúrate de que esta importación esté al principio del archivo + +logger = logging.getLogger(__name__) + +COLLECTION_NAME = 'chat_history-v3' + +def store_chat_history(username, messages, analysis_type): + chat_document = { + 'username': username, + 'timestamp': datetime.now(timezone.utc).isoformat(), + 'messages': messages, + 'analysis_type': analysis_type + } + + result = insert_document(COLLECTION_NAME, chat_document) + if result: + logger.info(f"Historial de chat guardado con ID: {result} para el usuario: {username}") + return True + return False + +def get_chat_history(username, analysis_type, limit=None): + query = {"username": username} + if analysis_type: + query["analysis_type"] = analysis_type + + db = get_mongodb() + collection = db['chat_history-v3'] + cursor = collection.find(query).sort("timestamp", -1) + if limit: + cursor = cursor.limit(limit) + + return list(cursor) + +#def get_chat_history(username, analysis_type=None, limit=10): +# query = {"username": username} +# if analysis_type: +# query["analysis_type"] = analysis_type + +# return find_documents(COLLECTION_NAME, query, sort=[("timestamp", -1)], limit=limit) + +# Agregar funciones para actualizar y eliminar chat si es necesario \ No newline at end of file diff --git a/modules/database/database_init.py b/modules/database/database_init.py new file mode 100644 index 0000000000000000000000000000000000000000..35824b6847f6cb0436c3e4fa148a0c28f24f28ae --- /dev/null +++ b/modules/database/database_init.py @@ -0,0 +1,80 @@ +import os +import logging +from azure.cosmos import CosmosClient +from pymongo import MongoClient +import certifi + +logging.basicConfig(level=logging.DEBUG) +logger = logging.getLogger(__name__) + +# Variables globales para Cosmos DB SQL API +cosmos_client = None +user_database = None +user_container = None +application_requests_container = None +user_feedback_container = None + +# Variables globales para Cosmos DB MongoDB API +mongo_client = None +mongo_db = None + +def initialize_cosmos_sql_connection(): + global cosmos_client, user_database, user_container, application_requests_container, user_feedback_container + try: + cosmos_endpoint = os.environ.get("COSMOS_ENDPOINT") + cosmos_key = os.environ.get("COSMOS_KEY") + + if not cosmos_endpoint or not cosmos_key: + raise ValueError("COSMOS_ENDPOINT and COSMOS_KEY environment variables must be set") + + cosmos_client = CosmosClient(cosmos_endpoint, cosmos_key) + user_database = cosmos_client.get_database_client("user_database") + user_container = user_database.get_container_client("users") + application_requests_container = user_database.get_container_client("application_requests") + user_feedback_container = user_database.get_container_client("user_feedback") + + logger.info("Conexión a Cosmos DB SQL API exitosa") + return True + except Exception as e: + logger.error(f"Error al conectar con Cosmos DB SQL API: {str(e)}", exc_info=True) + return False + +def initialize_mongodb_connection(): + global mongo_client, mongo_db + try: + cosmos_mongodb_connection_string = os.getenv("MONGODB_CONNECTION_STRING") + if not cosmos_mongodb_connection_string: + raise ValueError("MONGODB_CONNECTION_STRING environment variable is not set") + + mongo_client = MongoClient(cosmos_mongodb_connection_string, + tls=True, + tlsCAFile=certifi.where(), + retryWrites=False, + serverSelectionTimeoutMS=5000, + connectTimeoutMS=10000, + socketTimeoutMS=10000) + + mongo_client.admin.command('ping') + + mongo_db = mongo_client['aideatext_db'] + + logger.info("Conexión a Cosmos DB MongoDB API exitosa") + return True + except Exception as e: + logger.error(f"Error al conectar con Cosmos DB MongoDB API: {str(e)}", exc_info=True) + return False + +def initialize_database_connections(): + sql_success = initialize_cosmos_sql_connection() + mongodb_success = initialize_mongodb_connection() + return sql_success and mongodb_success + +def get_sql_containers(): + if user_container is None or application_requests_container is None or user_feedback_container is None: + initialize_cosmos_sql_connection() + return user_container, application_requests_container, user_feedback_container + +def get_mongodb(): + if mongo_db is None: + initialize_mongodb_connection() + return mongo_db \ No newline at end of file diff --git a/modules/database/mongo_db.py b/modules/database/mongo_db.py new file mode 100644 index 0000000000000000000000000000000000000000..c9e3a02096f9dcd44d0e1b262a43a7a155ef183e --- /dev/null +++ b/modules/database/mongo_db.py @@ -0,0 +1,51 @@ +from .database_init import get_mongodb +import logging + +logger = logging.getLogger(__name__) + +def get_collection(collection_name): + db = get_mongodb() + return db[collection_name] + +def insert_document(collection_name, document): + collection = get_collection(collection_name) + try: + result = collection.insert_one(document) + logger.info(f"Documento insertado en {collection_name} con ID: {result.inserted_id}") + return result.inserted_id + except Exception as e: + logger.error(f"Error al insertar documento en {collection_name}: {str(e)}") + return None + +def find_documents(collection_name, query, sort=None, limit=None): + collection = get_collection(collection_name) + try: + cursor = collection.find(query) + if sort: + cursor = cursor.sort(sort) + if limit: + cursor = cursor.limit(limit) + return list(cursor) + except Exception as e: + logger.error(f"Error al buscar documentos en {collection_name}: {str(e)}") + return [] + +def update_document(collection_name, query, update): + collection = get_collection(collection_name) + try: + result = collection.update_one(query, update) + logger.info(f"Documento actualizado en {collection_name}: {result.modified_count} modificado(s)") + return result.modified_count + except Exception as e: + logger.error(f"Error al actualizar documento en {collection_name}: {str(e)}") + return 0 + +def delete_document(collection_name, query): + collection = get_collection(collection_name) + try: + result = collection.delete_one(query) + logger.info(f"Documento eliminado de {collection_name}: {result.deleted_count} eliminado(s)") + return result.deleted_count + except Exception as e: + logger.error(f"Error al eliminar documento de {collection_name}: {str(e)}") + return 0 \ No newline at end of file diff --git a/modules/database/morphosintax_mongo_db.py b/modules/database/morphosintax_mongo_db.py new file mode 100644 index 0000000000000000000000000000000000000000..0b28bfc0faad356f8ef1bd651e5d48187f1575dd --- /dev/null +++ b/modules/database/morphosintax_mongo_db.py @@ -0,0 +1,49 @@ +#/modules/database/morphosintax_mongo_db.py +from .mongo_db import insert_document, find_documents, update_document, delete_document +from ..utils.svg_to_png_converter import process_and_save_svg_diagrams +from datetime import datetime, timezone +import logging + +logger = logging.getLogger(__name__) + +COLLECTION_NAME = 'student_morphosyntax_analysis' + +def store_student_morphosyntax_result(username, text, arc_diagrams): + analysis_document = { + 'username': username, + 'timestamp': datetime.now(timezone.utc).isoformat(), + 'text': text, + 'arc_diagrams': arc_diagrams, + 'analysis_type': 'morphosyntax' + } + + result = insert_document(COLLECTION_NAME, analysis_document) + if result: + # Procesar y guardar los diagramas SVG como PNG + png_ids = process_and_save_svg_diagrams(username, str(result), arc_diagrams) + + # Actualizar el documento con los IDs de los PNGs + update_document(COLLECTION_NAME, {'_id': result}, {'$set': {'png_diagram_ids': png_ids}}) + + logger.info(f"Análisis morfosintáctico del estudiante guardado con ID: {result} para el usuario: {username}") + return True + return False + +def get_student_morphosyntax_analysis(username, limit=10): + query = {"username": username, "analysis_type": "morphosyntax"} + return find_documents(COLLECTION_NAME, query, sort=[("timestamp", -1)], limit=limit) + +def update_student_morphosyntax_analysis(analysis_id, update_data): + query = {"_id": analysis_id} + update = {"$set": update_data} + return update_document(COLLECTION_NAME, query, update) + +def delete_student_morphosyntax_analysis(analysis_id): + query = {"_id": analysis_id} + return delete_document(COLLECTION_NAME, query) + +def get_student_morphosyntax_data(username): + analyses = get_student_morphosyntax_analysis(username, limit=None) # Obtener todos los análisis + return { + 'entries': analyses + } \ No newline at end of file diff --git a/modules/database/morphosintaxis_export.py b/modules/database/morphosintaxis_export.py new file mode 100644 index 0000000000000000000000000000000000000000..2cfb33d8dc0d6497597c8545de147fcbf4752c22 --- /dev/null +++ b/modules/database/morphosintaxis_export.py @@ -0,0 +1,78 @@ +from io import BytesIO +from reportlab.lib import colors +from reportlab.lib.pagesizes import letter +from reportlab.platypus import SimpleDocTemplate, Paragraph, Spacer, Image, PageBreak +from reportlab.lib.styles import getSampleStyleSheet, ParagraphStyle +from reportlab.lib.units import cm +from svglib.svglib import svg2rlg +from reportlab.graphics import renderPM +import base64 +import cairosvg +from reportlab.graphics import renderPDF +from reportlab.lib.utils import ImageReader + +#importaciones locales +from .morphosintax_mongo_db import get_student_morphosyntax_data +from .chat_db import get_chat_history + +# Placeholder para el logo +LOGO_PATH = "assets\img\logo_92x92.png" # Reemplaza esto con la ruta real de tu logo + +# Definir el tamaño de página carta manualmente (612 x 792 puntos) +LETTER_SIZE = (612, 792) + +def add_logo(canvas, doc): + logo = Image(LOGO_PATH, width=2*cm, height=2*cm) + logo.drawOn(canvas, doc.leftMargin, doc.height + doc.topMargin - 0.5*cm) + +def export_user_interactions(username, analysis_type): + # Obtener historial de chat (que ahora incluye los análisis morfosintácticos) + chat_history = get_chat_history(username, analysis_type) + + # Crear un PDF + buffer = BytesIO() + doc = SimpleDocTemplate( + buffer, + pagesize=letter, + rightMargin=2*cm, + leftMargin=2*cm, + topMargin=2*cm, + bottomMargin=2*cm + ) + + story = [] + styles = getSampleStyleSheet() + + # Título + story.append(Paragraph(f"Interacciones de {username} - Análisis {analysis_type}", styles['Title'])) + story.append(Spacer(1, 0.5*cm)) + + # Historial del chat y análisis + for entry in chat_history: + for message in entry['messages']: + role = message['role'] + content = message['content'] + story.append(Paragraph(f"{role.capitalize()}: {content}", styles['BodyText'])) + story.append(Spacer(1, 0.25*cm)) + + # Si hay visualizaciones (diagramas SVG), convertirlas a imagen y añadirlas + if 'visualizations' in message and message['visualizations']: + for svg in message['visualizations']: + drawing = svg2rlg(BytesIO(svg.encode('utf-8'))) + img_data = BytesIO() + renderPM.drawToFile(drawing, img_data, fmt="PNG") + img_data.seek(0) + img = Image(img_data, width=15*cm, height=7.5*cm) + story.append(img) + story.append(Spacer(1, 0.5*cm)) + + story.append(PageBreak()) + + # Construir el PDF + doc.build(story) + buffer.seek(0) + return buffer + +# Uso en Streamlit: +# pdf_buffer = export_user_interactions(username, 'morphosyntax') +# st.download_button(label="Descargar PDF", data=pdf_buffer, file_name="interacciones.pdf", mime="application/pdf") \ No newline at end of file diff --git a/modules/database/morphosintaxis_export_v1.py b/modules/database/morphosintaxis_export_v1.py new file mode 100644 index 0000000000000000000000000000000000000000..2b2e67742b4f9fdae6476882757ee8e187639ca2 --- /dev/null +++ b/modules/database/morphosintaxis_export_v1.py @@ -0,0 +1,97 @@ +# database_export.py + +import pandas as pd +import matplotlib.pyplot as plt +from io import BytesIO +#importaciones locales +from .morphosintax_mongo_db import get_student_morphosyntax_analysis +from .chat_db import get_chat_history + + +def export_user_interactions(username, analysis_type): + # Obtener historial de chat (que ahora incluye los análisis morfosintácticos) + chat_history = get_chat_history(username, analysis_type) + + # Crear un PDF + buffer = BytesIO() + doc = SimpleDocTemplate( + buffer, + pagesize=letter, + rightMargin=2*cm, + leftMargin=2*cm, + topMargin=2*cm, + bottomMargin=2*cm + ) + + story = [] + styles = getSampleStyleSheet() + + # Título + story.append(Paragraph(f"Interacciones de {username} - Análisis {analysis_type}", styles['Title'])) + story.append(Spacer(1, 0.5*cm)) + + # Historial del chat y análisis + for entry in chat_history: + for message in entry['messages']: + role = message['role'] + content = message['content'] + story.append(Paragraph(f"{role.capitalize()}: {content}", styles['BodyText'])) + story.append(Spacer(1, 0.25*cm)) + + # Si hay visualizaciones (diagramas SVG), convertirlas a imagen y añadirlas + if 'visualizations' in message and message['visualizations']: + for svg in message['visualizations']: + drawing = svg2rlg(BytesIO(svg.encode('utf-8'))) + img_data = BytesIO() + renderPM.drawToFile(drawing, img_data, fmt="PNG") + img_data.seek(0) + img = Image(img_data, width=15*cm, height=7.5*cm) + story.append(img) + story.append(Spacer(1, 0.5*cm)) + + story.append(PageBreak()) + + # Construir el PDF + doc.build(story) + buffer.seek(0) + return buffer + +#def export_user_interactions(username, analysis_type): + # Obtener análisis morfosintáctico + #morphosyntax_data = get_student_morphosyntax_analysis(username) + + # Obtener historial de chat + #chat_history = get_chat_history(username, analysis_type) + + # Crear un DataFrame con los datos + #df = pd.DataFrame({ + # 'Timestamp': [entry['timestamp'] for entry in chat_history], + # 'Role': [msg['role'] for entry in chat_history for msg in entry['messages']], + # 'Content': [msg['content'] for entry in chat_history for msg in entry['messages']] + #}) + + # Crear un PDF + #buffer = BytesIO() + #plt.figure(figsize=(12, 6)) + #plt.axis('off') + #plt.text(0.5, 0.98, f"Interacciones de {username} - Análisis {analysis_type}", ha='center', va='top', fontsize=16) + #plt.text(0.5, 0.95, f"Total de interacciones: {len(df)}", ha='center', va='top', fontsize=12) + + # Añadir tabla con las interacciones + #plt.table(cellText=df.values, colLabels=df.columns, cellLoc='center', loc='center') + + # Añadir diagramas de arco si es análisis morfosintáctico + #if analysis_type == 'morphosyntax' and morphosyntax_data: + # for i, analysis in enumerate(morphosyntax_data): + # plt.figure(figsize=(12, 6)) + # plt.axis('off') + # plt.text(0.5, 0.98, f"Diagrama de Arco {i+1}", ha='center', va='top', fontsize=16) + # plt.imshow(analysis['arc_diagrams'][0]) # Asumiendo que arc_diagrams es una lista de imágenes + + #plt.savefig(buffer, format='pdf', bbox_inches='tight') + #buffer.seek(0) + #return buffer + +# Uso: +# pdf_buffer = export_user_interactions(username, 'morphosyntax') +# st.download_button(label="Descargar PDF", data=pdf_buffer, file_name="interacciones.pdf", mime="application/pdf") \ No newline at end of file diff --git a/modules/database/sql_db.py b/modules/database/sql_db.py new file mode 100644 index 0000000000000000000000000000000000000000..7c4802252f0fc18082a4ec1126072869252bef09 --- /dev/null +++ b/modules/database/sql_db.py @@ -0,0 +1,129 @@ +from .database_init import get_sql_containers +from datetime import datetime, timezone +import logging +import bcrypt +import uuid + +logger = logging.getLogger(__name__) + +def get_user(username, role=None): + user_container, _, _ = get_sql_containers() + try: + query = f"SELECT * FROM c WHERE c.id = '{username}'" + if role: + query += f" AND c.role = '{role}'" + items = list(user_container.query_items(query=query, enable_cross_partition_query=True)) + return items[0] if items else None + except Exception as e: + logger.error(f"Error al obtener usuario {username}: {str(e)}") + return None + +def get_admin_user(username): + return get_user(username, role='Administrador') + +def get_student_user(username): + return get_user(username, role='Estudiante') + +def get_teacher_user(username): + return get_user(username, role='Profesor') + +def create_user(username, password, role, additional_info=None): + user_container, _, _ = get_sql_containers() + try: + hashed_password = bcrypt.hashpw(password.encode('utf-8'), bcrypt.gensalt()).decode('utf-8') + user_data = { + 'id': username, + 'password': hashed_password, + 'role': role, + 'timestamp': datetime.now(timezone.utc).isoformat(), + 'additional_info': additional_info or {} + } + user_container.create_item(body=user_data) + logger.info(f"Usuario {role} creado: {username}") + return True + except Exception as e: + logger.error(f"Error al crear usuario {role}: {str(e)}") + return False + +def create_student_user(username, password, additional_info=None): + return create_user(username, password, 'Estudiante', additional_info) + +def create_teacher_user(username, password, additional_info=None): + return create_user(username, password, 'Profesor', additional_info) + +def create_admin_user(username, password, additional_info=None): + return create_user(username, password, 'Administrador', additional_info) + + +########################################################### +def update_student_user(username, new_info): + user_container, _, _ = get_sql_containers() + try: + user = get_student_user(username) + if user: + user['additional_info'].update(new_info) + user_container.upsert_item(body=user) + logger.info(f"Información del estudiante actualizada: {username}") + return True + else: + logger.warning(f"Intento de actualizar estudiante no existente: {username}") + return False + except Exception as e: + logger.error(f"Error al actualizar información del estudiante {username}: {str(e)}") + return False + + +def delete_student_user(username): + user_container, _, _ = get_sql_containers() + try: + user = get_student_user(username) + if user: + user_container.delete_item(item=user['id'], partition_key=username) + logger.info(f"Estudiante eliminado: {username}") + return True + else: + logger.warning(f"Intento de eliminar estudiante no existente: {username}") + return False + except Exception as e: + logger.error(f"Error al eliminar estudiante {username}: {str(e)}") + return False + +def store_application_request(name, lastname, email, institution, current_role, desired_role, reason): + _, application_requests_container, _ = get_sql_containers() + try: + application_request = { + "id": str(uuid.uuid4()), + "name": name, + "lastname": lastname, + "email": email, + "institution": institution, + "current_role": current_role, + "desired_role": desired_role, + "reason": reason, + "requestDate": datetime.utcnow().isoformat() + } + application_requests_container.create_item(body=application_request) + logger.info(f"Solicitud de aplicación almacenada para el email: {email}") + return True + except Exception as e: + logger.error(f"Error al almacenar la solicitud de aplicación: {str(e)}") + return False + +def store_student_feedback(username, name, email, feedback): + _, _, user_feedback_container = get_sql_containers() + try: + feedback_item = { + "id": str(uuid.uuid4()), + "username": username, + "name": name, + "email": email, + "feedback": feedback, + "role": "Estudiante", + 'timestamp': datetime.now(timezone.utc).isoformat(), + } + result = user_feedback_container.create_item(body=feedback_item) + logger.info(f"Feedback de estudiante almacenado con ID: {result['id']} para el usuario: {username}") + return True + except Exception as e: + logger.error(f"Error al almacenar el feedback del estudiante {username}: {str(e)}") + return False \ No newline at end of file diff --git a/modules/database/txt.txt b/modules/database/txt.txt new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/modules/discourse/__init__.py b/modules/discourse/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/modules/discourse/__pycache__/__init__.cpython-311.pyc b/modules/discourse/__pycache__/__init__.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..531fc4d8fdd23ed8161f6e843ea0c74857a2b978 Binary files /dev/null and b/modules/discourse/__pycache__/__init__.cpython-311.pyc differ diff --git a/modules/discourse/__pycache__/discourse_interface.cpython-311.pyc b/modules/discourse/__pycache__/discourse_interface.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e24a65c99cd7c93a342fcf3dba20781483972e33 Binary files /dev/null and b/modules/discourse/__pycache__/discourse_interface.cpython-311.pyc differ diff --git a/modules/discourse/__pycache__/discourse_process.cpython-311.pyc b/modules/discourse/__pycache__/discourse_process.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6ed76cf9b49d6541d349438ffe7fdbde63034234 Binary files /dev/null and b/modules/discourse/__pycache__/discourse_process.cpython-311.pyc differ diff --git a/modules/discourse/discourse_interface.py b/modules/discourse/discourse_interface.py new file mode 100644 index 0000000000000000000000000000000000000000..70632ef980b5a2ba9f02b27bd5dc61bd7b255cdc --- /dev/null +++ b/modules/discourse/discourse_interface.py @@ -0,0 +1,125 @@ +import streamlit as st +import pandas as pd +from streamlit_float import * +from .discourse_process import process_discourse_input +from ..chatbot.chatbot import initialize_chatbot +from ..database.database_oldFromV2 import store_discourse_analysis_result +from ..text_analysis.discourse_analysis import perform_discourse_analysis +from ..utils.widget_utils import generate_unique_key + +def display_discourse_interface(lang_code, nlp_models, t): + st.subheader(t['discourse_title']) + + text_input = st.text_area( + t['warning_message'], + height=150, + key=generate_unique_key("discourse", "text_area") + ) + + if st.button( + t['results_title'], + key=generate_unique_key("discourse", "analyze_button") + ): + if text_input: + # Aquí iría tu lógica de análisis morfosintáctico + # Por ahora, solo mostraremos un mensaje de placeholder + st.info(t['analysis_placeholder']) + else: + st.warning(t['no_text_warning']) + +''' +def display_discourse_interface(lang_code, nlp_models, t): + st.subheader(t['title']) + + # Inicializar el chatbot si no existe + if 'discourse_chatbot' not in st.session_state: + st.session_state.discourse_chatbot = initialize_chatbot('discourse') + + # Mostrar el historial del chat + chat_history = st.session_state.get('discourse_chat_history', []) + for message in chat_history: + with st.chat_message(message["role"]): + st.write(message["content"]) + if "visualization" in message: + st.pyplot(message["visualization"]) + + # Input del usuario + user_input = st.chat_input(t['discourse_initial_message'], key=generate_unique_key('discourse', st.session_state.username)) + + if user_input: + # Procesar el input del usuario + response, visualization = process_discourse_input(user_input, lang_code, nlp_models[lang_code], st.session_state.get('file_contents'), t) + + # Actualizar el historial del chat + chat_history.append({"role": "user", "content": user_input}) + chat_history.append({"role": "assistant", "content": response, "visualization": visualization}) + st.session_state.discourse_chat_history = chat_history + + # Mostrar el resultado más reciente + with st.chat_message("assistant"): + st.write(response) + if visualization: + st.pyplot(visualization) + + # Botón para limpiar el historial del chat + if st.button(t['clear_chat'], key=generate_unique_key('discourse', 'clear_chat')): + st.session_state.discourse_chat_history = [] + st.rerun() + + # Sección para cargar archivos + col1, col2 = st.columns(2) + with col1: + uploaded_file1 = st.file_uploader(t['file_uploader1'], type=['txt', 'pdf', 'docx', 'doc', 'odt']) + with col2: + uploaded_file2 = st.file_uploader(t['file_uploader2'], type=['txt', 'pdf', 'docx', 'doc', 'odt']) + + if uploaded_file1 and uploaded_file2: + file_contents1 = uploaded_file1.getvalue().decode('utf-8') + file_contents2 = uploaded_file2.getvalue().decode('utf-8') + st.session_state.file_contents = (file_contents1, file_contents2) + + if st.button(t['analyze_button']): + result = perform_discourse_analysis(file_contents1, file_contents2, nlp_models[lang_code], lang_code) + st.session_state.discourse_result = result + display_discourse_results(result, lang_code, t) + store_discourse_analysis_result(st.session_state.username, file_contents1, file_contents2, result) + +def display_discourse_results(result, lang_code, t): + if result is None: + st.warning(t.get('no_results', "No hay resultados disponibles.")) + return + + col1, col2 = st.columns(2) + + with col1: + with st.expander(t.get('file_uploader1', "Documento 1"), expanded=True): + st.subheader(t.get('key_concepts', "Conceptos Clave")) + if 'key_concepts1' in result: + df1 = pd.DataFrame(result['key_concepts1'], columns=['Concepto', 'Frecuencia']) + df1['Frecuencia'] = df1['Frecuencia'].round(2) + st.table(df1) + + if 'graph1' in result: + st.pyplot(result['graph1']) + + with col2: + with st.expander(t.get('file_uploader2', "Documento 2"), expanded=True): + st.subheader(t.get('key_concepts', "Conceptos Clave")) + if 'key_concepts2' in result: + df2 = pd.DataFrame(result['key_concepts2'], columns=['Concepto', 'Frecuencia']) + df2['Frecuencia'] = df2['Frecuencia'].round(2) + st.table(df2) + + if 'graph2' in result: + st.pyplot(result['graph2']) + + # Relación de conceptos entre ambos documentos (Diagrama de Sankey) + st.subheader(t.get('comparison', "Relación de conceptos entre ambos documentos")) + if 'key_concepts1' in result and 'key_concepts2' in result: + # Código para generar el diagrama de Sankey (como en la función original) + pass + else: + st.warning(t.get('comparison_not_available', "La comparación no está disponible.")) + + + ''' \ No newline at end of file diff --git a/modules/discourse/discourse_process.py b/modules/discourse/discourse_process.py new file mode 100644 index 0000000000000000000000000000000000000000..0e782df248b816d8f616770ca5202a818fe1d91a --- /dev/null +++ b/modules/discourse/discourse_process.py @@ -0,0 +1,21 @@ +from ..text_analysis.discourse_analysis import perform_discourse_analysis, compare_semantic_analysis +import streamlit as st + +def process_discourse_input(user_input, lang_code, nlp_model, file_contents, t): + if user_input.startswith('/analisis_discurso'): + if file_contents is None or len(file_contents) != 2: + return t['no_files_uploaded'], None + text1, text2 = file_contents + result = perform_discourse_analysis(text1, text2, nlp_model, lang_code) + return t['discourse_analysis_completed'], (result['graph1'], result['graph2']) + elif user_input.startswith('/comparar'): + if file_contents is None or len(file_contents) != 2: + return t['no_files_uploaded'], None + text1, text2 = file_contents + comparison_result = compare_semantic_analysis(text1, text2, nlp_model, lang_code) + return t['comparison_completed'], comparison_result + else: + # Procesar otros tipos de inputs del usuario + chatbot = st.session_state.discourse_chatbot + response = chatbot.generate_response(user_input, lang_code, context=str(file_contents)) + return response, None \ No newline at end of file diff --git a/modules/email/__init__.py b/modules/email/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/modules/email/__pycache__/__init__.cpython-311.pyc b/modules/email/__pycache__/__init__.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..decd2af6d410fe2fb58d2694805363eb73aacd9d Binary files /dev/null and b/modules/email/__pycache__/__init__.cpython-311.pyc differ diff --git a/modules/email/__pycache__/email.cpython-311.pyc b/modules/email/__pycache__/email.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..766c7c57fb026d6b10afe9262588d1facb6b17e4 Binary files /dev/null and b/modules/email/__pycache__/email.cpython-311.pyc differ diff --git a/modules/email/email.py b/modules/email/email.py new file mode 100644 index 0000000000000000000000000000000000000000..878d5c53b1c1c31a51c2d7ec3bb7710784855861 --- /dev/null +++ b/modules/email/email.py @@ -0,0 +1,92 @@ +import smtplib +from email.mime.text import MIMEText +from email.mime.multipart import MIMEMultipart +import os + +def send_email_notification(name, email, institution, role, reason): + sender_email = "noreply@aideatext.ai" # Configura esto con tu dirección de correo + receiver_email = "hello@aideatext.ai" + password = os.environ.get("NOREPLY_EMAIL_PASSWORD") # Configura esto en tus variables de entorno + + message = MIMEMultipart("alternative") + message["Subject"] = "Nueva solicitud de prueba de AIdeaText" + message["From"] = sender_email + message["To"] = receiver_email + + text = f"""\ + Nueva solicitud de prueba de AIdeaText: + Nombre: {name} + Email: {email} + Institución: {institution} + Rol: {role} + Razón: {reason} + """ + + html = f"""\ + + +

Nueva solicitud de prueba de AIdeaText

+

Nombre: {name}

+

Email: {email}

+

Institución: {institution}

+

Rol: {role}

+

Razón: {reason}

+ + + """ + + part1 = MIMEText(text, "plain") + part2 = MIMEText(html, "html") + + message.attach(part1) + message.attach(part2) + + try: + with smtplib.SMTP_SSL("smtp.titan.email", 465) as server: + logger.info("Conectado al servidor SMTP") + server.login(sender_email, password) + logger.info("Inicio de sesión exitoso") + server.sendmail(sender_email, receiver_email, message.as_string()) + logger.info(f"Correo enviado de {sender_email} a {receiver_email}") + logger.info(f"Email notification sent for application request: {email}") + return True + except Exception as e: + logger.error(f"Error sending email notification: {str(e)}") + return False + +def send_user_feedback_notification(name, email, feedback): + sender_email = "noreply@aideatext.ai" + receiver_email = "feedback@aideatext.ai" # Cambia esto a la dirección que desees + password = os.environ.get("NOREPLY_EMAIL_PASSWORD") + + message = MIMEMultipart("alternative") + message["Subject"] = "Nuevo comentario de usuario en AIdeaText" + message["From"] = sender_email + message["To"] = receiver_email + + html = f"""\ + + +

Nuevo comentario de usuario en AIdeaText

+

Nombre: {name}

+

Email: {email}

+

Comentario: {feedback}

+ + + """ + + part = MIMEText(html, "html") + message.attach(part) + + try: + with smtplib.SMTP_SSL("smtp.titan.email", 465) as server: + logger.info("Conectado al servidor SMTP") + server.login(sender_email, password) + logger.info("Inicio de sesión exitoso") + server.sendmail(sender_email, receiver_email, message.as_string()) + logger.info(f"Correo enviado de {sender_email} a {receiver_email}") + logger.info(f"Email notification sent for user feedback from: {email}") + return True + except Exception as e: + logger.error(f"Error sending user feedback email notification: {str(e)}") + return False \ No newline at end of file diff --git a/modules/email/txt.txt b/modules/email/txt.txt new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/modules/lost.py b/modules/lost.py new file mode 100644 index 0000000000000000000000000000000000000000..9a5a4fee0812e6dae79d898d23afedd8355961bd --- /dev/null +++ b/modules/lost.py @@ -0,0 +1,104 @@ +import streamlit as st +import pandas as pd +import matplotlib.pyplot as plt +import seaborn as sns +from datetime import datetime, timedelta +import pytz +import logging +from io import BytesIO +from reportlab.pdfgen import canvas +from reportlab.lib.pagesizes import letter +from docx import Document +from odf.opendocument import OpenDocumentText +from odf.text import P + +# Configuración de logging +logging.basicConfig(level=logging.DEBUG) +logger = logging.getLogger(__name__) + +# Asumimos que estas funciones están disponibles a través de las importaciones en load_database_functions +from .database.morphosintax_mongo_db import get_student_morphosyntax_analysis, get_student_morphosyntax_data +from .database.chat_db import get_chat_history + +def display_student_progress(username, lang_code, t): + logger.debug(f"Iniciando display_student_progress para {username}") + + st.title(f"{t.get('progress_of', 'Progreso de')} {username}") + + # Obtener datos de análisis morfosintáctico + morphosyntax_data = get_student_morphosyntax_data(username) + # Obtener historial de chat + chat_history = get_chat_history(username, None) + + if not morphosyntax_data and not chat_history: + logger.warning(f"No se encontraron datos para el estudiante {username}") + st.warning(t.get("no_data_warning", "No se encontraron datos para este estudiante.")) + st.info(t.get("try_analysis", "Intenta realizar algunos análisis de texto primero.")) + return + + # Resumen de actividades + with st.expander(t.get("activities_summary", "Resumen de Actividades"), expanded=True): + total_morphosyntax = len(morphosyntax_data) + total_chats = len(chat_history) + st.write(f"{t.get('total_morphosyntax_analyses', 'Total de análisis morfosintácticos')}: {total_morphosyntax}") + st.write(f"{t.get('total_chats', 'Total de conversaciones de chat')}: {total_chats}") + + # Gráfico de tipos de actividades + try: + activity_counts = pd.Series({ + 'Análisis Morfosintáctico': total_morphosyntax, + 'Conversaciones de Chat': total_chats + }) + fig, ax = plt.subplots() + sns.barplot(x=activity_counts.index, y=activity_counts.values, ax=ax) + ax.set_title(t.get("activity_types_chart", "Tipos de actividades realizadas")) + ax.set_ylabel(t.get("count", "Cantidad")) + st.pyplot(fig) + except Exception as e: + logger.error(f"Error al crear el gráfico: {e}") + st.error("No se pudo crear el gráfico de tipos de actividades.") + + # Función para generar el contenido del archivo de actividades de las últimas 48 horas + def generate_activity_content_48h(): + content = f"Actividades de {username} en las últimas 48 horas\n\n" + + two_days_ago = datetime.now(pytz.utc) - timedelta(days=2) + + try: + recent_morphosyntax = [a for a in morphosyntax_data if datetime.fromisoformat(a['timestamp']) > two_days_ago] + + content += f"Análisis morfosintácticos: {len(recent_morphosyntax)}\n" + for analysis in recent_morphosyntax: + content += f"- Análisis del {analysis['timestamp']}: {analysis['text'][:50]}...\n" + + recent_chats = [c for c in chat_history if datetime.fromisoformat(c['timestamp']) > two_days_ago] + + content += f"\nConversaciones de chat: {len(recent_chats)}\n" + for chat in recent_chats: + content += f"- Chat del {chat['timestamp']}: {len(chat['messages'])} mensajes\n" + except Exception as e: + logger.error(f"Error al generar el contenido de actividades: {e}") + content += "Error al recuperar los datos de actividades.\n" + + return content + + # Botones para descargar el histórico de actividades de las últimas 48 horas + st.subheader(t.get("download_history_48h", "Descargar Histórico de Actividades (Últimas 48 horas)")) + if st.button("Generar reporte de 48 horas"): + try: + report_content = generate_activity_content_48h() + st.text_area("Reporte de 48 horas", report_content, height=300) + st.download_button( + label="Descargar TXT (48h)", + data=report_content, + file_name="actividades_48h.txt", + mime="text/plain" + ) + except Exception as e: + logger.error(f"Error al generar el reporte: {e}") + st.error("No se pudo generar el reporte. Por favor, verifica los logs para más detalles.") + + logger.debug("Finalizando display_student_progress") + +# Funciones auxiliares para generar diferentes formatos de archivo (PDF, DOCX, ODT) se mantienen igual +# ... \ No newline at end of file diff --git a/modules/morphosyntax/__init__.py b/modules/morphosyntax/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/modules/morphosyntax/__pycache__/__init__.cpython-311.pyc b/modules/morphosyntax/__pycache__/__init__.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d1de440bb8567809c523502b8eb53e8cbc879140 Binary files /dev/null and b/modules/morphosyntax/__pycache__/__init__.cpython-311.pyc differ diff --git a/modules/morphosyntax/__pycache__/morphosyntax_interface.cpython-311.pyc b/modules/morphosyntax/__pycache__/morphosyntax_interface.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..31626a87d30b5ef698ab28d4e18280c2ab3b2878 Binary files /dev/null and b/modules/morphosyntax/__pycache__/morphosyntax_interface.cpython-311.pyc differ diff --git a/modules/morphosyntax/__pycache__/morphosyntax_process.cpython-311.pyc b/modules/morphosyntax/__pycache__/morphosyntax_process.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..46b24c65f61758eaa702d506363ec7f9d05ceeaa Binary files /dev/null and b/modules/morphosyntax/__pycache__/morphosyntax_process.cpython-311.pyc differ diff --git a/modules/morphosyntax/morphosyntax_interface-Back1910-25-9-24.py b/modules/morphosyntax/morphosyntax_interface-Back1910-25-9-24.py new file mode 100644 index 0000000000000000000000000000000000000000..e8bb9d5dda50e6e595246de2a292bf0e3fa4536d --- /dev/null +++ b/modules/morphosyntax/morphosyntax_interface-Back1910-25-9-24.py @@ -0,0 +1,171 @@ +#modules/morphosyntax/morphosyntax_interface.py +import streamlit as st +from streamlit_float import * +from streamlit_antd_components import * +from streamlit.components.v1 import html +import base64 +from .morphosyntax_process import process_morphosyntactic_input +from ..chatbot.chatbot import initialize_chatbot +from ..utils.widget_utils import generate_unique_key +from ..database.database_oldFromV2 import store_morphosyntax_result + +import logging +logger = logging.getLogger(__name__) + + +####################### VERSION ANTERIOR A LAS 20:00 24-9-24 + +def display_morphosyntax_interface(lang_code, nlp_models, t): + # Estilo CSS personalizado + st.markdown(""" + + """, unsafe_allow_html=True) + + # Mostrar el mensaje inicial como un párrafo estilizado + st.markdown(f""" +
+ {t['morpho_initial_message']} +
+ """, unsafe_allow_html=True) + + # Inicializar el chatbot si no existe + if 'morphosyntax_chatbot' not in st.session_state: + st.session_state.morphosyntax_chatbot = initialize_chatbot('morphosyntactic') + + # Crear un contenedor para el chat + chat_container = st.container() + + # Mostrar el historial del chat + with chat_container: + if 'morphosyntax_chat_history' not in st.session_state: + st.session_state.morphosyntax_chat_history = [] + for i, message in enumerate(st.session_state.morphosyntax_chat_history): + with st.chat_message(message["role"]): + st.write(message["content"]) + if "visualizations" in message: + for viz in message["visualizations"]: + st.components.v1.html( + f""" +
+
+ {viz} +
+
+ """, + height=370, + scrolling=True + ) + + + # Input del usuario + user_input = st.chat_input( + t['morpho_input_label'], + key=generate_unique_key('morphosyntax', "chat_input") + ) + + if user_input: + # Añadir el mensaje del usuario al historial + st.session_state.morphosyntax_chat_history.append({"role": "user", "content": user_input}) + + # Mostrar indicador de carga + with st.spinner(t.get('processing', 'Processing...')): + try: + # Procesar el input del usuario + response, visualizations, result = process_morphosyntactic_input(user_input, lang_code, nlp_models, t) + + # Añadir la respuesta al historial + message = { + "role": "assistant", + "content": response + } + if visualizations: + message["visualizations"] = visualizations + st.session_state.morphosyntax_chat_history.append(message) + + # Mostrar la respuesta más reciente + with st.chat_message("assistant"): + st.write(response) + if visualizations: + for i, viz in enumerate(visualizations): + st.components.v1.html( + f""" +
+
+ {viz} +
+
+ """, + height=350, + scrolling=True + ) + + # Si es un análisis, guardarlo en la base de datos + if user_input.startswith('/analisis_morfosintactico') and result: + store_morphosyntax_result( + st.session_state.username, + user_input.split('[', 1)[1].rsplit(']', 1)[0], # texto analizado + result.get('repeated_words', {}), + visualizations, + result.get('pos_analysis', []), + result.get('morphological_analysis', []), + result.get('sentence_structure', []) + ) + + except Exception as e: + st.error(f"{t['error_processing']}: {str(e)}") + + # Si es un análisis, guardarlo en la base de datos + if user_input.startswith('/analisis_morfosintactico') and result: + store_morphosyntax_result( + st.session_state.username, + user_input.split('[', 1)[1].rsplit(']', 1)[0], # texto analizado + result['repeated_words'], + visualizations, # Ahora pasamos todas las visualizaciones + result['pos_analysis'], + result['morphological_analysis'], + result['sentence_structure'] + ) + + # Forzar la actualización de la interfaz + st.rerun() + + # Botón para limpiar el historial del chat + if st.button(t['clear_chat'], key=generate_unique_key('morphosyntax', 'clear_chat')): + st.session_state.morphosyntax_chat_history = [] + st.rerun() + + + +''' +############ MODULO PARA DEPURACIÓN Y PRUEBAS ##################################################### +def display_morphosyntax_interface(lang_code, nlp_models, t): + st.subheader(t['morpho_title']) + + text_input = st.text_area( + t['warning_message'], + height=150, + key=generate_unique_key("morphosyntax", "text_area") + ) + + if st.button( + t['results_title'], + key=generate_unique_key("morphosyntax", "analyze_button") + ): + if text_input: + # Aquí iría tu lógica de análisis morfosintáctico + # Por ahora, solo mostraremos un mensaje de placeholder + st.info(t['analysis_placeholder']) + else: + st.warning(t['no_text_warning']) +### +################################################# +''' diff --git a/modules/morphosyntax/morphosyntax_interface.py b/modules/morphosyntax/morphosyntax_interface.py new file mode 100644 index 0000000000000000000000000000000000000000..6584642d36e300db456639bf5f83d1e14a02ed12 --- /dev/null +++ b/modules/morphosyntax/morphosyntax_interface.py @@ -0,0 +1,184 @@ +#modules/morphosyntax/morphosyntax_interface.py +import streamlit as st +from streamlit_float import * +from streamlit_antd_components import * +from streamlit.components.v1 import html +import base64 +from .morphosyntax_process import process_morphosyntactic_input +from ..chatbot.chatbot import initialize_chatbot +from ..utils.widget_utils import generate_unique_key +from ..database.morphosintax_mongo_db import store_student_morphosyntax_result +from ..database.chat_db import store_chat_history +from ..database.morphosintaxis_export import export_user_interactions + +import logging +logger = logging.getLogger(__name__) + +def display_morphosyntax_interface(lang_code, nlp_models, t): + st.title("Análisis Morfosintáctico") + + # Contenedor para el historial del chat + chat_container = st.container() + + # Input del usuario (siempre visible en la parte inferior) + user_input = st.chat_input(t['morpho_input_label']) + + # Procesar el input del usuario + if user_input: + # Añadir el mensaje del usuario al historial + st.session_state.morphosyntax_chat_history.append({"role": "user", "content": user_input}) + store_chat_history(st.session_state.username, [{"role": "user", "content": user_input}], "morphosyntax") + + response, visualizations, result = process_morphosyntactic_input(user_input, lang_code, nlp_models, t) + + # Añadir la respuesta al historial + assistant_message = { + "role": "assistant", + "content": response, + "visualizations": visualizations if visualizations else [] + } + st.session_state.morphosyntax_chat_history.append(assistant_message) + store_chat_history(st.session_state.username, [assistant_message], "morphosyntax") + + # Si es un análisis, guardarlo en la base de datos + if user_input.startswith('/analisis_morfosintactico') and result: + store_student_morphosyntax_result( + st.session_state.username, + user_input.split('[', 1)[1].rsplit(']', 1)[0], # texto analizado + visualizations + ) + + # Mostrar el historial del chat + with chat_container: + if 'morphosyntax_chat_history' not in st.session_state: + st.session_state.morphosyntax_chat_history = [] + for message in st.session_state.morphosyntax_chat_history: + with st.chat_message(message["role"]): + st.write(message["content"]) + if "visualizations" in message and message["visualizations"]: + for i, viz in enumerate(message["visualizations"]): + st.markdown(f"**Oración {i+1} del párrafo analizado**") + st.components.v1.html( + f""" +
+
+ {viz} +
+
+ """, + height=370, + scrolling=True + ) + if i < len(message["visualizations"]) - 1: + st.markdown("---") # Separador entre diagramas + + # Botón para limpiar el historial del chat + if st.button(t['clear_chat'], key=generate_unique_key('morphosyntax', 'clear_chat')): + st.session_state.morphosyntax_chat_history = [] + st.rerun() + + # Botón de exportación + if st.button("Exportar Interacciones"): + pdf_buffer = export_user_interactions(st.session_state.username, 'morphosyntax') + st.download_button( + label="Descargar PDF", + data=pdf_buffer, + file_name="interacciones_morfosintaxis.pdf", + mime="application/pdf" + ) + +''' + if user_input: + # Añadir el mensaje del usuario al historial + st.session_state.morphosyntax_chat_history.append({"role": "user", "content": user_input}) + + # Procesar el input del usuario nuevo al 26-9-2024 + response, visualizations, result = process_morphosyntactic_input(user_input, lang_code, nlp_models, t) + + # Mostrar indicador de carga + with st.spinner(t.get('processing', 'Processing...')): + try: + # Procesar el input del usuario + response, visualizations, result = process_morphosyntactic_input(user_input, lang_code, nlp_models, t) + + # Añadir la respuesta al historial + message = { + "role": "assistant", + "content": response + } + if visualizations: + message["visualizations"] = visualizations + st.session_state.morphosyntax_chat_history.append(message) + + # Mostrar la respuesta más reciente + with st.chat_message("assistant"): + st.write(response) + if visualizations: + for i, viz in enumerate(visualizations): + st.markdown(f"**Oración {i+1} del párrafo analizado**") + st.components.v1.html( + f""" +
+
+ {viz} +
+
+ """, + height=350, + scrolling=True + ) + if i < len(visualizations) - 1: + st.markdown("---") # Separador entre diagramas + + # Si es un análisis, guardarlo en la base de datos + if user_input.startswith('/analisis_morfosintactico') and result: + store_morphosyntax_result( + st.session_state.username, + user_input.split('[', 1)[1].rsplit(']', 1)[0], # texto analizado + result.get('repeated_words', {}), + visualizations, + result.get('pos_analysis', []), + result.get('morphological_analysis', []), + result.get('sentence_structure', []) + ) + + + except Exception as e: + st.error(f"{t['error_processing']}: {str(e)}") + + + + # Forzar la actualización de la interfaz + st.rerun() + + # Botón para limpiar el historial del chat + if st.button(t['clear_chat'], key=generate_unique_key('morphosyntax', 'clear_chat')): + st.session_state.morphosyntax_chat_history = [] + st.rerun() +''' + + +''' +############ MODULO PARA DEPURACIÓN Y PRUEBAS ##################################################### +def display_morphosyntax_interface(lang_code, nlp_models, t): + st.subheader(t['morpho_title']) + + text_input = st.text_area( + t['warning_message'], + height=150, + key=generate_unique_key("morphosyntax", "text_area") + ) + + if st.button( + t['results_title'], + key=generate_unique_key("morphosyntax", "analyze_button") + ): + if text_input: + # Aquí iría tu lógica de análisis morfosintáctico + # Por ahora, solo mostraremos un mensaje de placeholder + st.info(t['analysis_placeholder']) + else: + st.warning(t['no_text_warning']) +### +################################################# +''' diff --git a/modules/morphosyntax/morphosyntax_process-Back1910-25-9-24.py b/modules/morphosyntax/morphosyntax_process-Back1910-25-9-24.py new file mode 100644 index 0000000000000000000000000000000000000000..191855b00ca8237f7dfd88afc925a314e82a2dc4 --- /dev/null +++ b/modules/morphosyntax/morphosyntax_process-Back1910-25-9-24.py @@ -0,0 +1,29 @@ +#modules/morphosyntax/morphosyntax_process.py +from ..text_analysis.morpho_analysis import perform_advanced_morphosyntactic_analysis +from ..database.database_oldFromV2 import store_morphosyntax_result +import streamlit as st + +def process_morphosyntactic_input(user_input, lang_code, nlp_models, t): + if user_input.startswith('/analisis_morfosintactico'): + # Extraer el texto entre corchetes + text_to_analyze = user_input.split('[', 1)[1].rsplit(']', 1)[0] + + # Realizar el análisis morfosintáctico + result = perform_advanced_morphosyntactic_analysis(text_to_analyze, nlp_models[lang_code]) + + if result is None: + response = t.get('morphosyntactic_analysis_error', 'Error in morphosyntactic analysis') + return response, None, None + + # Preparar la respuesta + response = t.get('morphosyntactic_analysis_completed', 'Morphosyntactic analysis completed') + + # Obtener todos los diagramas de arco + visualizations = result['arc_diagram'] + + return response, visualizations, result + else: + # Para otros tipos de input, simplemente devolver la respuesta del chatbot + chatbot = st.session_state.morphosyntax_chatbot + response = chatbot.generate_response(user_input, lang_code) + return response, None, None diff --git a/modules/morphosyntax/morphosyntax_process.py b/modules/morphosyntax/morphosyntax_process.py new file mode 100644 index 0000000000000000000000000000000000000000..bfdc1fde99f3c1d52a19832917b980ed85aee0cc --- /dev/null +++ b/modules/morphosyntax/morphosyntax_process.py @@ -0,0 +1,29 @@ +#modules/morphosyntax/morphosyntax_process.py +import streamlit as st +from ..text_analysis.morpho_analysis import perform_advanced_morphosyntactic_analysis +from ..database.morphosintax_mongo_db import store_student_morphosyntax_result +from ..chatbot.chatbot import process_chat_input + +def process_morphosyntactic_input(user_input, lang_code, nlp_models, t): + if user_input.startswith('/analisis_morfosintactico'): + text_to_analyze = user_input.split('[', 1)[1].rsplit(']', 1)[0] + result = perform_advanced_morphosyntactic_analysis(text_to_analyze, nlp_models[lang_code]) + + if result is None or 'arc_diagrams' not in result: + response = t.get('morphosyntactic_analysis_error', 'Error in morphosyntactic analysis') + return response, None, None + + response = t.get('morphosyntactic_analysis_completed', 'Morphosyntactic analysis completed') + visualizations = result['arc_diagrams'] + + store_student_morphosyntax_result( + st.session_state.username, + text_to_analyze, + visualizations + ) + + return response, visualizations, result + else: + chatbot = st.session_state.morphosyntax_chat_input + response = chatbot.generate_response(user_input, lang_code) + return response, None, None diff --git a/modules/morphosyntax/txt.txt b/modules/morphosyntax/txt.txt new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/modules/semantic/__init_.py b/modules/semantic/__init_.py new file mode 100644 index 0000000000000000000000000000000000000000..cc04bf76fe89672037c58d29484d6533457b2b07 --- /dev/null +++ b/modules/semantic/__init_.py @@ -0,0 +1 @@ +from .flexible_analysis_handler import FlexibleAnalysisHandler \ No newline at end of file diff --git a/modules/semantic/__pycache__/flexible_analysis_handler.cpython-311.pyc b/modules/semantic/__pycache__/flexible_analysis_handler.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ee65f50c83dd3bfd0530139fa905960ce57b8a24 Binary files /dev/null and b/modules/semantic/__pycache__/flexible_analysis_handler.cpython-311.pyc differ diff --git a/modules/semantic/__pycache__/semantic_float.cpython-311.pyc b/modules/semantic/__pycache__/semantic_float.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..afb9426ea5597b3d85ac0f0500d665a49b1a147b Binary files /dev/null and b/modules/semantic/__pycache__/semantic_float.cpython-311.pyc differ diff --git a/modules/semantic/__pycache__/semantic_float68ok.cpython-311.pyc b/modules/semantic/__pycache__/semantic_float68ok.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2736c7be7b369af9c9368810c98d590ef66db70e Binary files /dev/null and b/modules/semantic/__pycache__/semantic_float68ok.cpython-311.pyc differ diff --git a/modules/semantic/__pycache__/semantic_float86ok.cpython-311.pyc b/modules/semantic/__pycache__/semantic_float86ok.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0640e8983813bc16ee13251e6d6d65ffa23c823a Binary files /dev/null and b/modules/semantic/__pycache__/semantic_float86ok.cpython-311.pyc differ diff --git a/modules/semantic/__pycache__/semantic_float_reset.cpython-311.pyc b/modules/semantic/__pycache__/semantic_float_reset.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..021862ba760cca86a770a4d0c46349894e389ae6 Binary files /dev/null and b/modules/semantic/__pycache__/semantic_float_reset.cpython-311.pyc differ diff --git a/modules/semantic/__pycache__/semantic_interface.cpython-311.pyc b/modules/semantic/__pycache__/semantic_interface.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c8a9a3c5527bfc1667cf12cb816298b9b8dc4314 Binary files /dev/null and b/modules/semantic/__pycache__/semantic_interface.cpython-311.pyc differ diff --git a/modules/semantic/__pycache__/semantic_interfaceBackUp_2092024_1800.cpython-311.pyc b/modules/semantic/__pycache__/semantic_interfaceBackUp_2092024_1800.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7b49443b6b818aef2248867278c43369ca51a662 Binary files /dev/null and b/modules/semantic/__pycache__/semantic_interfaceBackUp_2092024_1800.cpython-311.pyc differ diff --git a/modules/semantic/__pycache__/semantic_interfaceBorrados.cpython-311.pyc b/modules/semantic/__pycache__/semantic_interfaceBorrados.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ef8bf460009df8fd59e3d489ebdce3163f2588f6 Binary files /dev/null and b/modules/semantic/__pycache__/semantic_interfaceBorrados.cpython-311.pyc differ diff --git a/modules/semantic/__pycache__/semantic_interfaceKoKo.cpython-311.pyc b/modules/semantic/__pycache__/semantic_interfaceKoKo.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8c42b43be508ed128eac77ae0f753dc575a9113e Binary files /dev/null and b/modules/semantic/__pycache__/semantic_interfaceKoKo.cpython-311.pyc differ diff --git a/modules/semantic/__pycache__/semantic_interface_1.cpython-311.pyc b/modules/semantic/__pycache__/semantic_interface_1.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c9f0fef968b2f37b43dfc116c5f2ce2eb3d9e099 Binary files /dev/null and b/modules/semantic/__pycache__/semantic_interface_1.cpython-311.pyc differ diff --git a/modules/semantic/__pycache__/semantic_interface_2.cpython-311.pyc b/modules/semantic/__pycache__/semantic_interface_2.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..46222222f83623c6b7de8bb0a92c634af86f8744 Binary files /dev/null and b/modules/semantic/__pycache__/semantic_interface_2.cpython-311.pyc differ diff --git a/modules/semantic/__pycache__/semantic_interface_2192024_1632.cpython-311.pyc b/modules/semantic/__pycache__/semantic_interface_2192024_1632.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7d62dabaaf573d5de0a760a242a43332955c9f36 Binary files /dev/null and b/modules/semantic/__pycache__/semantic_interface_2192024_1632.cpython-311.pyc differ diff --git a/modules/semantic/__pycache__/semantic_interface_3.cpython-311.pyc b/modules/semantic/__pycache__/semantic_interface_3.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b9b5eec9852b9159bbf76c1f7d8482108c099803 Binary files /dev/null and b/modules/semantic/__pycache__/semantic_interface_3.cpython-311.pyc differ diff --git a/modules/semantic/__pycache__/semantic_interface_4.cpython-311.pyc b/modules/semantic/__pycache__/semantic_interface_4.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..701cbc0e83a28b4a25fd8aa80465fd5e79e7b01a Binary files /dev/null and b/modules/semantic/__pycache__/semantic_interface_4.cpython-311.pyc differ diff --git a/modules/semantic/__pycache__/semantic_interface_5.cpython-311.pyc b/modules/semantic/__pycache__/semantic_interface_5.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..52ee832d6ece08e1f92d23f223e56796b1a2cc54 Binary files /dev/null and b/modules/semantic/__pycache__/semantic_interface_5.cpython-311.pyc differ diff --git a/modules/semantic/__pycache__/semantic_interface_6.cpython-311.pyc b/modules/semantic/__pycache__/semantic_interface_6.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dcbe12108da351a568e772abfbb883d25b08c517 Binary files /dev/null and b/modules/semantic/__pycache__/semantic_interface_6.cpython-311.pyc differ diff --git a/modules/semantic/__pycache__/semantic_interface_61.cpython-311.pyc b/modules/semantic/__pycache__/semantic_interface_61.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..386c8ac736aeeb2a120880d3e6bfae0facc9f7f2 Binary files /dev/null and b/modules/semantic/__pycache__/semantic_interface_61.cpython-311.pyc differ diff --git a/modules/semantic/__pycache__/semantic_interface_610.cpython-311.pyc b/modules/semantic/__pycache__/semantic_interface_610.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f60c48f197a142f57c9f59cb1055ae57db32407f Binary files /dev/null and b/modules/semantic/__pycache__/semantic_interface_610.cpython-311.pyc differ diff --git a/modules/semantic/__pycache__/semantic_interface_62.cpython-311.pyc b/modules/semantic/__pycache__/semantic_interface_62.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fedd207e2531dc00d3ee98645be42383e194e8db Binary files /dev/null and b/modules/semantic/__pycache__/semantic_interface_62.cpython-311.pyc differ diff --git a/modules/semantic/__pycache__/semantic_interface_63.cpython-311.pyc b/modules/semantic/__pycache__/semantic_interface_63.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..78b98d5ffad8ec43b02cdcafab3a6752e4a27ef7 Binary files /dev/null and b/modules/semantic/__pycache__/semantic_interface_63.cpython-311.pyc differ diff --git a/modules/semantic/__pycache__/semantic_interface_64.cpython-311.pyc b/modules/semantic/__pycache__/semantic_interface_64.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e94c4a23f3a5c2098964ce3f8d5d67d561e0a944 Binary files /dev/null and b/modules/semantic/__pycache__/semantic_interface_64.cpython-311.pyc differ diff --git a/modules/semantic/__pycache__/semantic_interface_65.cpython-311.pyc b/modules/semantic/__pycache__/semantic_interface_65.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4e45c7073838ffe64539d9470f4efecc2dad75b0 Binary files /dev/null and b/modules/semantic/__pycache__/semantic_interface_65.cpython-311.pyc differ diff --git a/modules/semantic/__pycache__/semantic_interface_66.cpython-311.pyc b/modules/semantic/__pycache__/semantic_interface_66.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0bd5c0b21aba6451de1963b18e79e2e8121f69f0 Binary files /dev/null and b/modules/semantic/__pycache__/semantic_interface_66.cpython-311.pyc differ diff --git a/modules/semantic/__pycache__/semantic_interface_67.cpython-311.pyc b/modules/semantic/__pycache__/semantic_interface_67.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4fa300c5559d94da29f1c2dc3a5c0712e028ea6f Binary files /dev/null and b/modules/semantic/__pycache__/semantic_interface_67.cpython-311.pyc differ diff --git a/modules/semantic/__pycache__/semantic_interface_68.cpython-311.pyc b/modules/semantic/__pycache__/semantic_interface_68.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5bc6395eabbac5e54d20a38a4eb1aaf61e4e171e Binary files /dev/null and b/modules/semantic/__pycache__/semantic_interface_68.cpython-311.pyc differ diff --git a/modules/semantic/__pycache__/semantic_interface_681.cpython-311.pyc b/modules/semantic/__pycache__/semantic_interface_681.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d13664fec98f1609721ede6a5ab8abf88480f375 Binary files /dev/null and b/modules/semantic/__pycache__/semantic_interface_681.cpython-311.pyc differ diff --git a/modules/semantic/__pycache__/semantic_interface_68ok.cpython-311.pyc b/modules/semantic/__pycache__/semantic_interface_68ok.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..85965738ee096cba107008fa62c5d60004d3fad7 Binary files /dev/null and b/modules/semantic/__pycache__/semantic_interface_68ok.cpython-311.pyc differ diff --git a/modules/semantic/__pycache__/semantic_interface_69.cpython-311.pyc b/modules/semantic/__pycache__/semantic_interface_69.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..66c5867ad061bca00a6ec984d824ea694fe26adc Binary files /dev/null and b/modules/semantic/__pycache__/semantic_interface_69.cpython-311.pyc differ diff --git a/modules/semantic/__pycache__/semantic_interface_7.cpython-311.pyc b/modules/semantic/__pycache__/semantic_interface_7.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ef2c5bc6d30879a47186e7e17858e0b3a001bb04 Binary files /dev/null and b/modules/semantic/__pycache__/semantic_interface_7.cpython-311.pyc differ diff --git a/modules/semantic/__pycache__/semantic_interface_StreamLitChat.cpython-311.pyc b/modules/semantic/__pycache__/semantic_interface_StreamLitChat.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d43c979fafff38614b9565936d755342894b7abf Binary files /dev/null and b/modules/semantic/__pycache__/semantic_interface_StreamLitChat.cpython-311.pyc differ diff --git a/modules/semantic/__pycache__/semantic_interface_Test.cpython-311.pyc b/modules/semantic/__pycache__/semantic_interface_Test.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f5c14d8bf89c9a1e8f5887e40f3c307ea0f43fbf Binary files /dev/null and b/modules/semantic/__pycache__/semantic_interface_Test.cpython-311.pyc differ diff --git a/modules/semantic/__pycache__/semantic_interface_afterParty.cpython-311.pyc b/modules/semantic/__pycache__/semantic_interface_afterParty.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cce4f330ccd69352a6f6ec411dfa26d5fd01789c Binary files /dev/null and b/modules/semantic/__pycache__/semantic_interface_afterParty.cpython-311.pyc differ diff --git a/modules/semantic/__pycache__/semantic_interface_backup2092024_1930.cpython-311.pyc b/modules/semantic/__pycache__/semantic_interface_backup2092024_1930.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7940c44b2027728a9e8478d2c5a508ed44070469 Binary files /dev/null and b/modules/semantic/__pycache__/semantic_interface_backup2092024_1930.cpython-311.pyc differ diff --git a/modules/semantic/__pycache__/semantic_interface_backup_2092024.cpython-311.pyc b/modules/semantic/__pycache__/semantic_interface_backup_2092024.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3db97b9995c894928a31892cda744d6c4da483f9 Binary files /dev/null and b/modules/semantic/__pycache__/semantic_interface_backup_2092024.cpython-311.pyc differ diff --git a/modules/semantic/__pycache__/semantic_interface_backup_2192024_1230.cpython-311.pyc b/modules/semantic/__pycache__/semantic_interface_backup_2192024_1230.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..64b09631b0b72880568c8077ba29bb75283d88c1 Binary files /dev/null and b/modules/semantic/__pycache__/semantic_interface_backup_2192024_1230.cpython-311.pyc differ diff --git a/modules/semantic/__pycache__/semantic_interface_vOk.cpython-311.pyc b/modules/semantic/__pycache__/semantic_interface_vOk.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c9f2fa094a416e6429b9a3bccb0312558c955c02 Binary files /dev/null and b/modules/semantic/__pycache__/semantic_interface_vOk.cpython-311.pyc differ diff --git a/modules/semantic/__pycache__/semantic_process.cpython-311.pyc b/modules/semantic/__pycache__/semantic_process.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..58e1056dd70a702aa4ea3f67e4ff20e54923fd9c Binary files /dev/null and b/modules/semantic/__pycache__/semantic_process.cpython-311.pyc differ diff --git a/modules/semantic/flexible_analysis_handler.py b/modules/semantic/flexible_analysis_handler.py new file mode 100644 index 0000000000000000000000000000000000000000..3e4396ab87a2a3559c06d41b388e7cdc4b843048 --- /dev/null +++ b/modules/semantic/flexible_analysis_handler.py @@ -0,0 +1,59 @@ +from typing import Dict, Any +import base64 +from io import BytesIO +from matplotlib.figure import Figure + +class FlexibleAnalysisHandler: + def __init__(self, analysis_data): + self.data = analysis_data + + def get_key_concepts(self): + return self.data.get('key_concepts', []) + + def get_concept_graph(self): + return self.data.get('concept_graph') + + def get_entity_graph(self): + return self.data.get('entity_graph') + + # Método genérico para obtener cualquier tipo de grafo + def get_graph(self, graph_type): + return self.data.get(graph_type) + + # Agrega más métodos según sea necesario + + +''' +class FlexibleAnalysisHandler: + def __init__(self, analysis_data: Dict[str, Any]): + self.data = analysis_data + + def get_key_concepts(self): + if 'key_concepts' in self.data: + return self.data['key_concepts'] + elif 'word_count' in self.data: + # Convertir word_count a un formato similar a key_concepts + return [(word, count) for word, count in self.data['word_count'].items()] + return [] + + def get_graph(self): + if 'graph' in self.data: + # Decodificar la imagen base64 + image_data = base64.b64decode(self.data['graph']) + return BytesIO(image_data) + elif 'arc_diagrams' in self.data: + # Devolver el primer diagrama de arco como SVG + return self.data['arc_diagrams'][0] + return None + + def get_pos_analysis(self): + return self.data.get('pos_analysis', []) + + def get_morphological_analysis(self): + return self.data.get('morphological_analysis', []) + + def get_sentence_structure(self): + return self.data.get('sentence_structure', []) + + # Agregar más métodos según sea necesario para otros tipos de análisis +''' \ No newline at end of file diff --git a/modules/semantic/logV6ite5.txt b/modules/semantic/logV6ite5.txt new file mode 100644 index 0000000000000000000000000000000000000000..27ff0010ade774a6e958f97c6f380c6b08050b14 --- /dev/null +++ b/modules/semantic/logV6ite5.txt @@ -0,0 +1,63 @@ +Request headers: + 'Cache-Control': 'no-cache' + 'x-ms-version': 'REDACTED' + 'x-ms-documentdb-query-iscontinuationexpected': 'REDACTED' + 'x-ms-consistency-level': 'REDACTED' + 'x-ms-documentdb-isquery': 'REDACTED' + 'Content-Type': 'application/query+json' + 'x-ms-session-token': 'REDACTED' + 'x-ms-documentdb-query-enablecrosspartition': 'REDACTED' + 'x-ms-date': 'REDACTED' + 'authorization': 'REDACTED' + 'Accept': 'application/json' + 'x-ms-cosmos-correlated-activityid': 'REDACTED' + 'Content-Length': '154' + 'User-Agent': 'azsdk-python-cosmos/4.7.0 Python/3.11.5 (Windows-10-10.0.22631-SP0)' +A body is sent with the request +INFO:azure.core.pipeline.policies.http_logging_policy:Response status: 200 +Response headers: + 'Content-Length': '377' + 'Date': 'Mon, 23 Sep 2024 16:50:28 GMT' + 'Content-Type': 'application/json' + 'Server': 'Compute' + 'x-ms-gatewayversion': 'REDACTED' + 'x-ms-activity-id': 'REDACTED' + 'x-ms-last-state-change-utc': 'REDACTED' + 'x-ms-resource-quota': 'REDACTED' + 'x-ms-resource-usage': 'REDACTED' + 'x-ms-schemaversion': 'REDACTED' + 'lsn': 'REDACTED' + 'x-ms-item-count': 'REDACTED' + 'x-ms-request-charge': 'REDACTED' + 'x-ms-alt-content-path': 'REDACTED' + 'x-ms-content-path': 'REDACTED' + 'x-ms-documentdb-partitionkeyrangeid': 'REDACTED' + 'x-ms-xp-role': 'REDACTED' + 'x-ms-cosmos-query-execution-info': 'REDACTED' + 'x-ms-global-Committed-lsn': 'REDACTED' + 'x-ms-number-of-read-regions': 'REDACTED' + 'x-ms-transport-request-id': 'REDACTED' + 'x-ms-cosmos-llsn': 'REDACTED' + 'x-ms-session-token': 'REDACTED' + 'x-ms-request-duration-ms': 'REDACTED' + 'x-ms-serviceversion': 'REDACTED' + 'x-ms-cosmos-is-partition-key-delete-pending': 'REDACTED' + 'x-ms-cosmos-physical-partition-id': 'REDACTED' +2024-09-23 10:50:28.499 `label` got an empty value. This is discouraged for accessibility reasons and may be disallowed in the future by raising an exception. Please provide a non-empty label and hide it with label_visibility if needed. +ERROR:modules.database.database:Error al obtener detalles de análisis para el usuario sebastian.marroquin@aideatext.ai: Error=2, Details='Response status code does not indicate success: BadRequest (400); Substatus: 0; ActivityId: 1232e510-97a7-434e-a4c1-fca9fcdb4820; Reason: (Response status code does not indicate success: BadRequest (400); Substatus: 0; ActivityId: 1232e510-97a7-434e-a4c1-fca9fcdb4820; Reason: (Response status code does not indicate success: BadRequest (400); Substatus: 0; ActivityId: 1232e510-97a7-434e-a4c1-fca9fcdb4820; Reason: (Message: {"Errors":["The index path corresponding to the specified order-by item is excluded."]} +ActivityId: 1232e510-97a7-434e-a4c1-fca9fcdb4820, Request URI: /apps/8198d87f-2a8c-48ce-b2aa-d600d8339179/services/8cde3c70-163e-4ffe-9ef7-2e635e3612a9/partitions/617356c9-0748-483a-9063-d83f8fa10f24/replicas/133658946384147008s/, RequestStats: Microsoft.Azure.Cosmos.Tracing.TraceData.ClientSideRequestStatisticsTraceDatum, SDK: Windows/10.0.20348 cosmos-netstandard-sdk/3.18.0);););, full error: {'ok': 0.0, 'errmsg': 'Error=2, Details=\'Response status code does not indicate success: BadRequest (400); Substatus: 0; ActivityId: 1232e510-97a7-434e-a4c1-fca9fcdb4820; Reason: (Response status code does not indicate success: BadRequest (400); Substatus: 0; ActivityId: 1232e510-97a7-434e-a4c1-fca9fcdb4820; Reason: (Response status code does not indicate success: BadRequest (400); Substatus: 0; ActivityId: 1232e510-97a7-434e-a4c1-fca9fcdb4820; Reason: (Message: {"Errors":["The index path corresponding to the specified order-by item is excluded."]}\r\nActivityId: 1232e510-97a7-434e-a4c1-fca9fcdb4820, Request URI: /apps/8198d87f-2a8c-48ce-b2aa-d600d8339179/services/8cde3c70-163e-4ffe-9ef7-2e635e3612a9/partitions/617356c9-0748-483a-9063-d83f8fa10f24/replicas/133658946384147008s/, RequestStats: Microsoft.Azure.Cosmos.Tracing.TraceData.ClientSideRequestStatisticsTraceDatum, SDK: Windows/10.0.20348 cosmos-netstandard-sdk/3.18.0);););', 'code': 2, 'codeName': 'BadValue'} +ERROR:modules.database.database:Error al obtener detalles de análisis para el usuario sebastian.marroquin@aideatext.ai: Error=2, Details='Response status code does not indicate success: BadRequest (400); Substatus: 0; ActivityId: 42ca6540-5e85-417e-ac10-84c49e87515c; Reason: (Response status code does not indicate success: BadRequest (400); Substatus: 0; ActivityId: 42ca6540-5e85-417e-ac10-84c49e87515c; Reason: (Response status code does not indicate success: BadRequest (400); Substatus: 0; ActivityId: 42ca6540-5e85-417e-ac10-84c49e87515c; Reason: (Message: {"Errors":["The index path corresponding to the specified order-by item is excluded."]} +ActivityId: 42ca6540-5e85-417e-ac10-84c49e87515c, Request URI: /apps/8198d87f-2a8c-48ce-b2aa-d600d8339179/services/8cde3c70-163e-4ffe-9ef7-2e635e3612a9/partitions/617356c9-0748-483a-9063-d83f8fa10f24/replicas/133710761763567388s/, RequestStats: Microsoft.Azure.Cosmos.Tracing.TraceData.ClientSideRequestStatisticsTraceDatum, SDK: Windows/10.0.20348 cosmos-netstandard-sdk/3.18.0);););, full error: {'ok': 0.0, 'errmsg': 'Error=2, Details=\'Response status code does not indicate success: BadRequest (400); Substatus: 0; ActivityId: 42ca6540-5e85-417e-ac10-84c49e87515c; Reason: (Response status code does not indicate success: BadRequest (400); Substatus: 0; ActivityId: 42ca6540-5e85-417e-ac10-84c49e87515c; Reason: (Response status code does not indicate success: BadRequest (400); Substatus: 0; ActivityId: 42ca6540-5e85-417e-ac10-84c49e87515c; Reason: (Message: {"Errors":["The index path corresponding to the specified order-by item is excluded."]}\r\nActivityId: 42ca6540-5e85-417e-ac10-84c49e87515c, Request URI: /apps/8198d87f-2a8c-48ce-b2aa-d600d8339179/services/8cde3c70-163e-4ffe-9ef7-2e635e3612a9/partitions/617356c9-0748-483a-9063-d83f8fa10f24/replicas/133710761763567388s/, RequestStats: Microsoft.Azure.Cosmos.Tracing.TraceData.ClientSideRequestStatisticsTraceDatum, SDK: Windows/10.0.20348 cosmos-netstandard-sdk/3.18.0);););', 'code': 2, 'codeName': 'BadValue'} +ERROR:modules.database.database:Error al obtener detalles de análisis para el usuario sebastian.marroquin@aideatext.ai: Error=2, Details='Response status code does not indicate success: BadRequest (400); Substatus: 0; ActivityId: 20b3da1e-1bb8-4857-8d0d-80fd19d49e32; Reason: (Response status code does not indicate success: BadRequest (400); Substatus: 0; ActivityId: 20b3da1e-1bb8-4857-8d0d-80fd19d49e32; Reason: (Response status code does not indicate success: BadRequest (400); Substatus: 0; ActivityId: 20b3da1e-1bb8-4857-8d0d-80fd19d49e32; Reason: (Message: {"Errors":["The index path corresponding to the specified order-by item is excluded."]} +ActivityId: 20b3da1e-1bb8-4857-8d0d-80fd19d49e32, Request URI: /apps/8198d87f-2a8c-48ce-b2aa-d600d8339179/services/8cde3c70-163e-4ffe-9ef7-2e635e3612a9/partitions/617356c9-0748-483a-9063-d83f8fa10f24/replicas/133710761763567388s/, RequestStats: Microsoft.Azure.Cosmos.Tracing.TraceData.ClientSideRequestStatisticsTraceDatum, SDK: Windows/10.0.20348 cosmos-netstandard-sdk/3.18.0);););, full error: {'ok': 0.0, 'errmsg': 'Error=2, Details=\'Response status code does not indicate success: BadRequest (400); Substatus: 0; ActivityId: 20b3da1e-1bb8-4857-8d0d-80fd19d49e32; Reason: (Response status code does not indicate success: BadRequest (400); Substatus: 0; ActivityId: 20b3da1e-1bb8-4857-8d0d-80fd19d49e32; Reason: (Response status code does not indicate success: BadRequest (400); Substatus: 0; ActivityId: 20b3da1e-1bb8-4857-8d0d-80fd19d49e32; Reason: (Message: {"Errors":["The index path corresponding to the specified order-by item is excluded."]}\r\nActivityId: 20b3da1e-1bb8-4857-8d0d-80fd19d49e32, Request URI: /apps/8198d87f-2a8c-48ce-b2aa-d600d8339179/services/8cde3c70-163e-4ffe-9ef7-2e635e3612a9/partitions/617356c9-0748-483a-9063-d83f8fa10f24/replicas/133710761763567388s/, RequestStats: Microsoft.Azure.Cosmos.Tracing.TraceData.ClientSideRequestStatisticsTraceDatum, SDK: Windows/10.0.20348 cosmos-netstandard-sdk/3.18.0);););', 'code': 2, 'codeName': 'BadValue'} +ERROR:modules.ui.ui:Error en la pestaña 3: 'timestamp' +Traceback (most recent call last): + File "M:\test-3dev\modules\ui\ui.py", line 177, in user_page + func(st.session_state.username, st.session_state.lang_code, t) + File "M:\test-3dev\modules\studentact\student_activities.py", line 88, in display_student_progress + st.subheader(f"Conversación {i+1} - {chat['timestamp']}") + ~~~~^^^^^^^^^^^^^ +KeyError: 'timestamp' +INFO:root:display_feedback_form called with lang_code: es +INFO:modules.ui.ui:Finalizada la renderización de user_page +INFO:modules.ui.ui:Estado final de la sesión: {'semantic_file_selector_sebastian.marroquin@aideatext.ai': 'Uso de stanza en el análisis sintác.txt', 'graph_visible': True, 'logout_button_sebastian.marroquin@aideatext.ai_es': False, 'discourse_chatbot': , 'language_selector_sebastian.marroquin@aideatext.ai_es': 'Español', 'semantic_chatbot': , 'semantic_chat_history': [{'role': 'user', 'content': 'Hola'}, {'role': 'assistant', 'content': 'Hola, gracias por contactarme. \n\nPara ayudarte con el requerimiento funcional a nivel sintáctico, primero elaboraré una secuencia didáctica estándar de cómo enseñar a mejorar las habilidades de redacción de un estudiante partiendo de un análisis sintáctico:\n\nPaso 1: Leer el texto redactado por el estudiante e identificar problemas de sintaxis como oraciones mal construidas, errores en concordancia de género y número, uso incorrecto de tiempos verbales, etc. \n\nPaso 2: Explicar al estudiante los conceptos básicos de sintaxis y estructura oracional. Por ejemplo, sujeto, verbo, complementos, oraciones simples vs compuestas, etc.\n\nPaso 3: Presentar al estudiante ejemplos de oraciones con problemas sintácticos y pedirle que los identifique y corrija. Guiarlo en este proceso de autocorrección.\n\nPaso 4: Solicitar al estudiante que aplique lo aprendido a su propio texto, identificando y corrigiendo problemas de sintaxis. Se puede utilizar diferentes colores para resaltar dist'}], 'delete_audio prompt.txt.txt': False, 'db_initialized': {'mongodb': True, 'cosmos_sql': True}, 'file_contents': None, 'semantic_chat_input_sebastian.marroquin@aideatext.ai': 'Hola', 'lang_code': 'es', 'morphosyntax_chatbot': , 'semantic_send_message_sebastian.marroquin@aideatext.ai': False, 'delete_Semblanza.txt': False, 'morphosyntax_chat_input_sebastian.marroquin@aideatext.ai': None, 'nlp_models': {'es': , 'en': , 'fr': }, 'feedback_name_es': '', 'role': 'role', 'discourse_sebastian.marroquin@aideatext.ai_sebastian.marroquin@aideatext.ai': None, 'username': 'sebastian.marroquin@aideatext.ai', 'current_file_contents': 'Uso de stanza en el análisis sintáctico en la enseñanza de la redacción. \r\n\r\nStanza es una biblioteca de procesamiento del lenguaje natural (NLP) desarrollada por Stanford NLP Group, que ofrece una serie de herramientas de análisis lingüístico para muchos idiomas. Sus capacidades se extienden desde la segmentación de texto hasta análisis más complejos como el reconocimiento de partes del discurso, análisis de entidades nombradas, análisis sintáctico y semántico, entre otros. \r\n\r\n\r\nAquí te explico cómo algunas de sus funcionalidades específicas pueden facilitar la implementación de actividades de aprendizaje de la redacción en el nivel medio superior y superior, desde un enfoque andragógico:\r\n\r\nSegmentación de texto en oraciones y palabras.\r\nEsta funcionalidad puede ayudar a los estudiantes a identificar la estructura básica de los textos. \r\nAl descomponer un texto en sus componentes más básicos, los estudiantes pueden empezar a entender cómo se construyen las oraciones y párrafos, lo cual es fundamental para la redacción.\r\n\r\nReconocimiento de partes del discurso (POS tagging): Comprender las partes del discurso es esencial para el análisis sintáctico y la construcción de oraciones coherentes y complejas. Stanza puede ayudar a los estudiantes a identificar automáticamente sustantivos, verbos, adjetivos, etc., en los textos que escriben o analizan, lo que facilita el aprendizaje de la gramática y la sintaxis de manera aplicada.\r\nAnálisis de entidades nombradas (NER): Esta herramienta puede ser útil para actividades de redacción que involucren investigación y análisis de textos. \r\n\r\nAl identificar personas, lugares, organizaciones y otros tipos de entidades dentro de un texto, los estudiantes pueden aprender a distinguir entre diferentes tipos de información y a utilizarlos adecuadamente en sus escritos.\r\n\r\nAnálisis sintáctico: El análisis de la estructura de las oraciones puede mejorar significativamente la calidad de la escritura. Stanza permite analizar cómo las palabras en una oración se relacionan entre sí, lo que puede ayudar a los estudiantes a comprender y aplicar conceptos de coherencia y cohesión en sus textos.\r\n\r\nAnálisis de dependencias: Esta funcionalidad ofrece una visión detallada de las relaciones sintácticas dentro de las oraciones, lo cual es crucial para construir oraciones complejas y bien formadas. Los estudiantes pueden utilizar esta herramienta para revisar y mejorar la estructura sintáctica de sus escritos.\r\nLematización y stemming: Ayuda a los estudiantes a comprender la raíz de las palabras y sus variaciones, lo cual es útil para la ampliación del vocabulario y la correcta utilización de las palabras en diferentes contextos.\r\nDesde el punto de vista andragógico, el uso de herramientas como Stanza puede fomentar un enfoque más autodirigido y reflexivo hacia el aprendizaje de la redacción. Los estudiantes pueden utilizar estas herramientas para analizar y mejorar sus propios textos, recibir retroalimentación inmediata sobre aspectos específicos de su escritura, y llevar a cabo investigaciones lingüísticas que enriquezcan su comprensión del idioma. La incorporación de tecnologías digitales en el aprendizaje se alinea con las necesidades y estilos de aprendizaje de los adultos, promoviendo la autonomía, la autoevaluación y la aplicación práctica de los conocimientos adquiridos.\r\n\r\n \r\nAnexo I. Requerimiento funcional a nivel sintáctico [Producto 1]\r\nEn esta sección vamos a describir las tareas que deberá realizar el o la profesional identificada como usuaria / usuario líder. Para este caso es un profesional competente en la enseñanza y el aprendizaje del idioma castellano y que posee este idioma como lenguaje materno. Entonces requerimos de sus servicios profesionales par que: \r\n[Subproducto 11] Elaborar una secuencia [didáctica] estándar de como enseñaría a mejorar las habilidades de un estudiante partiendo de un análisis sintáctico. No requerimos que nos describa como hacer un análisis sintáctico, sino que como enseña a redactar al estudiante empleando sus diferentes técnicas y métodos dentro del nivel sintáctico. \r\nEjemplo:\r\n\r\nPaso 5: Evaluar. \r\nCuando el estudiante termina de redactar un texto tengo que corregir. Entonces tomo un boli rojo y comienzo a leer y marco las palabras repetidas, pero también cuando no hay relación entre género y número; y así, [en este caso la descripción tiene que ser detallada]\r\nPaso 6: Retro alimentación de la evaluación\r\nEn este momento trato de orientar mis comentarios hacia las fortalezas del estudiante y después le indico como es que puede mejorar su redacción, le presento ejemplos de otros textos que son cercanos a su estilo [en este caso la descripción tiene que ser detallada]\r\n[Subroducto 12] Con los resultados del producto [11] es importante que reporte cuáles tareas podrías ser reemplazadas por funciones en la funcionalidad de análisis semántico de AIdeaText. Es importante que grafique, empleando la interfase de AIdeaText, como se vería está funcionalidad. En ese sentido, es importante que anote que visualizaciones funcionarían mejor (o si ninguna funciona) que otras o si se requiere implementar otras funcionalidades que, de hacerlo de manera manual, serían muy laboriosas de hacer. \r\nEjemplo: \r\nFunción evaluar: La aplicación, al presentar una visualización ya está entregado una evaluación. Pero para el caso sintáctico no sería mejor que devuelva el mismo escrito, pero señalando con un círculo donde se encuentran las palabras repetidas, por ejemplo. [Se debe dibujar como se vería esta función en la interfase]\r\n', 'morphosyntax_clear_chat_sebastian.marroquin@aideatext.ai': False, 'concept_graph': 'data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAA1gAAAI5CAYAAAC8QJvjAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjguMywgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/H5lhTAAAACXBIWXMAAAsTAAALEwEAmpwYAAEAAElEQVR4nOzdd3xT1/3/8ZckW97beGGwDZgNNmDAZgTC3iGMpAnQzJa0Tdomnd9vR/Jrm6b9Nm3TpmlIM0oKmZBAGGHGbAwYG9tsjLHN8t5L1rq/PxzdWsgGGzzh83w8eICupKtzZSPdzz3nvI9GURQFIYQQQgghhBB3TNvZDRBCCCGEEEKIu4UUWEIIIYQQQgjRRqTAEkIIIYQQQog2IgWWEEIIIYQQQrQRKbCEEEIIIYQQoo1IgSWEEEIIIYQQbUQKLCFEl7J69Wo0Go36R6/X07dvX/73f/8Xg8HQ6v3t3bsXjUbD3r1727ytOTk5aDQaVq9e3eb77o7S0tJ46aWXKC0t7dDX3bZtG/PmzSMoKAhnZ2eCg4NZsGABGzZs6NB2tLfVq1fz3nvvdcprX758mWeffZbo6GhcXV3x9PRk9OjRvPzyy1RUVADy/0EIIWycOrsBQgjRlHXr1hEeHk5VVRUbNmzglVdeoaqqitdff72zm6YKDQ0lKSmJvn37dnZTuoS0tDT+3//7fyxfvhx/f/8Oec0XXniBv/71ryxZsoR//OMfhISEUFBQwObNm1m6dCkpKSnExMR0SFva2+rVqzGbzTz55JMd+rr79+9nwYIFBAUF8f3vf5+hQ4diMpk4cuQIb7zxBsXFxfz1r3/t0DYJIURXJgWWEKJLio2NpV+/fgBMnz6dzMxM3nvvPf72t7+h1XaNzncXFxfi4+M7uxndksViQVEUnJxu/2to7dq1/PWvf+XVV1/lRz/6kd19S5cu5Qc/+AF+fn532tR7WllZGUuWLGHQoEHs3r0bDw8P9b4ZM2bwox/9iMOHD3diC4UQouvpGmcpQghxCyNHjqS2tpbi4mJ1W21tLT/72c+IiopCr9cTFRXFyy+/jNVqvem+du7cyZw5cwgNDcXd3Z2hQ4fy5z//GYvF4vDYt99+m5EjR+Lm5oafnx+TJk1STyibGxK1du1aYmJicHV1JTAwkBUrVpCXl2f3mMjISJYvX87HH3/MoEGD8PDwIC4ujoMHDzq0Yd++fUydOhUvLy88PDyYOXMmp06dsnvMjh07GDduHD4+Pnh6ejJgwAB+85vf3PR9AMjOzmbZsmX06NEDFxcXYmNjHYbWvfTSS2g0GjIzM5k7dy6enp5ERETwm9/8Rn2vV69ezRNPPAFAdHS0OsQzJycHAI1Gwy9+8Qv+8Ic/qD+vkydPtvj4mvLKK68wdOhQh+LKZtSoUfTu3Vu9vX37dhISEnBzc8PHx4eFCxdy/vx5u+dMnjyZCRMmsHv3bkaOHKn+fjQ13DA9PZ0HH3yQgIAA3NzcGDBgAK+88ordYz7//HPi4+Nxd3fH19eXpUuXcvnyZbvH2H4X3n77bfr164erqysjR45kz549du3at28fhw4dUt/byZMnq/cfO3aMadOm4enpiYeHB1OnTuXYsWN2r5OcnMz06dPV9vbp04fvfve7N32P33nnHYqKinj99dftiisbDw8Ppk+f3uzzk5OTWbJkCeHh4ep79L//+7/U1dWpj/ne975HcHAwZrPZ7rn19fX4+fnxgx/8QN1WVFTEM888Q8+ePXFxcWHgwIH861//uukxCCFER5MCSwjRLeTk5ODj40NAQAAAZrOZmTNn8s477/CDH/yAbdu28fTTT/Pb3/6Wn/zkJzfd16VLl5g6dSrvvfceW7du5bHHHuOll17iF7/4hd3jfvzjH/Ptb3+bkSNH8umnn7J27Vruu+8+hxPkxv71r3+xYsUKBg0axOeff84f/vAHduzYwaRJk6iurrZ77IEDB/jzn//Mb3/7Wz755BMsFgvz5s2jvLxcfczWrVuZOnUqnp6erF27lg8//JCqqiomTpzIlStX1ONZsGABUVFRfPLJJ2zatIkXXniBmpqam74PV65cYezYsaSnp/PXv/6VTZs2MXLkSBYvXsymTZscHv/ggw8yZcoUNm7cyMKFC3nxxRd5//33AZg7dy6//OUvgYbhnUlJSSQlJREaGqo+f/Xq1WzdupVXX32VrVu3EhYW1qLja8r169c5c+YM8+fPv+kx2mzfvl0tDj/55BPefPNNTp06xYQJE7h27ZrdY7OysvjBD37ACy+8wOeff05oaChLly7l4sWL6mOOHTtGQkICWVlZ/PWvf2Xr1q288MILXL16VX3MqlWrWLx4MYMHD2b9+vW89dZbnDp1ikmTJlFVVWX3mnv37uUvf/kLL7/8Mh9//DEuLi7Mnj1bLQD/+c9/MmLECIYPH66+t//85z8ByMjIYNKkSZSVlbF69Wr+85//UFlZyaRJk0hPTwegurqamTNnotPpWL16Ndu2bePXv/61Q1Fzo127dhEaGkpcXFyL3ucbXb58mdjYWFatWsX27dv5wQ9+wHvvvacW4wArVqygsLCQnTt32j13y5YtlJeX881vfhOAyspKJkyYwJdffslLL73E1q1bmT9/Pt/5zne61NBhIYRAEUKILuTf//63Aijnzp1TTCaTUlpaqrz77ruKTqdTXn/9dfVx//nPfxRA2bdvn93zf/e73ynOzs5KQUGBoiiKsmfPHgVQ9uzZ0+TrWa1WxWQyKb/73e8UX19fxWKxKIqiKJmZmYpWq1Wef/75ZtuanZ2tAMq///1vRVEUxWw2K0FBQcrkyZPtHnfgwAEFUP72t7+p2yIiIhRfX1+ltLRU3ZacnKwAygcffKBu69u3rzJlyhS7/VVUVCgBAQHKD37wA0VRFGXdunUKoFRUVDTb1qY8+eSTSmBgoFJcXGy3fdq0aUpMTIx6+8UXX1QA5b333rN73NChQ5Xp06ert20/u8zMTIfXApTQ0FCltrbWbntLjq8pR44cUQBl1apVtzpMRVEUZdSoUUq/fv0Uk8mkbrt06ZLi5ORk9zOeNGmS4uTkpFy4cEHdVlBQoGi1WuXll19Wt02cOFEJDw9Xampqmny9qqoqxdvbW3niiSfstl+6dElxdnZW/vrXv6rbIiIiFGdnZ+Xy5cvqtsrKSsXPz09Zvny5XdvGjx/v8FqLFy9WfHx8lLKyMnVbRUWF4ufnpzz44IOKovz3dys9Pb25t6hJAwcOVOLj41v02Bv/P9zI9n9tzZo1ikajsfu9i46OVr7xjW/YPf6BBx5QBg0apN7+zW9+o7i4uNj9bBRFUZ5++mklICDA7mcrhBCdSXqwhBBd0sCBA3F2dsbf35+nnnqKlStX8uyzz6r3b9++nYiICMaNG4fZbFb/zJgxQ52A35y8vDxWrlxJREQEer0eZ2dnfvnLX1JeXk5hYSEAu3fvxmq18u1vf7vFbT5//jyFhYUsW7bMbvuECROIiIhg3759dtsTEhLs5ggNGzYMQO0hy8zMJCsri2XLltkdo7u7OwkJCezfvx9omK/m7OzMN77xDdavX68ew61s376dOXPm4OPjY7f/mTNnkp6eTmVlpd3j586da3d76NChN+3Nu9GsWbNwc3NTb7f0+O5UTU0NqampPPzww3ZzvqKiohg/frzDzyU6Opro6Gj1dlBQEEFBQeqx1tbWcujQIZYtW4a7u3uTr5mUlERlZaXDsfXq1YuBAwc6HFt8fDy9evVSb3t5eTF37lySkpJueXz79+9n3rx5+Pr6qtu8vb1ZsGCBemzR0dH4+vqycuVK1q5de9PewbZUWVnJz372M/r27YuLiwvOzs6sWLECRVHIzMxUH7dixQq++OILtWevpKSEL7/8khUrVqiP2b59O2PHjiUqKsrh97WkpIQzZ850yDEJIcStSIElhOiSNmzYQHJyMl9++SXTpk3jn//8J//5z3/U+wsLC8nNzcXZ2dnuz5gxY4CGE7SmWK1WFixYwJYtW/jlL39JYmIiycnJ6vBAWxS87fnh4eEtbrMtnrzxsDibkJAQh/jyG5P2XFxc7NpgK5Seeuoph+PcsmWL2sZ+/fqxY8cOrFYrK1asICQkhPj4eIfC4UaFhYX85z//cdi3bYjlje9hU+1tTXT+je9LS4+vKbZiJDc395avW1ZWhqIot/1zAftjLSsrw2q13vR3w3Zs06ZNczi2kydPOhxbcHCwwz6Cg4Mdhi82pbS0tNljKysrA8DHx4c9e/YQFhbGd7/7XXr37s3QoUP57LPPbrrvXr16teg9bs4TTzzBqlWr+P73v8+uXbtITk7mjTfeALD73Vm+fDkGg4H169cD8Mknn2A2m1m+fLn6mMLCQvbv3+/wfi5duhRo/v+8EEJ0NEkRFEJ0SUOHDlVTBKdMmcLw4cP5yU9+wuLFi/Hw8CAgIICoqCg+/fTTJp8fGRnZ5PasrCyOHz/OmjVr7E7eNm/ebPe4wMBAAK5du8aAAQNa1GbbiXl+fr7Dffn5+YwaNapF+7GxzTd75ZVXmDZtmsP9er1e/ff999/P/fffT319PYcOHeLXv/41c+fOJScnRz2WpvY/ceJEfvaznzV5f1hYWKvaeysajcbh9aFlx9dU2wYNGsTmzZv5/e9/f9PX9fPzQ6PRNPtzaW2kvJ+fH1qt9qbFj+3YVq9ezZAhQxzu9/LysrtdUFDg8JiCggJ69ux5y/b4+/s3e2yNe0hjY2P57LPPMJvNHD9+nFdeeYWHHnqI9PR0hg4d2uS+p02bxq5du0hJSWn176/BYOCLL77gpZdesguqsIWbNGbrTVy7di1PPPEEa9euZfLkyXa9egEBAQQFBfG3v/2tyddr6f9TIYRob9KDJYTo8lxcXPjTn/5EYWGhOrF/1qxZXLlyBU9PT+Li4hz+NFdU1NbWAuDs7KxuM5lMfPDBB3aPmzZtGlqttlUJZQMGDCA4OJiPP/7Ybvvhw4fJzc21S31r6f4iIyM5ffp0k8c4fPhwh+e4uLgwZcoUfvrTn1JTU0N2dnaz+581axYZGRkMGTKkyf3betRayvb4xglxbX18jf3v//4vp06d4i9/+UuT9584cYLLly/j4eHBqFGjWLdunV1SZG5uLocPH271z8Xd3Z0JEyawdu3aZo913LhxeHl5cfHixSaP7cZi4MiRI3bD9qqqqti6dSsJCQnqNhcXlyZfb9KkSXz55Zd2wRlVVVVs3ry5yWNzcnIiPj6e3/72t1itVs6ePdvssT799NMEBgby7LPPNhmaUltby+7du5t8bn19PRaLxe7/GtDsQsTf/OY32bt3L3v37iUpKclueCA0/L6eO3eO3r17N/me3li0CiFEZ5EeLCFEt7BgwQJGjx7Nn//8Z5599lmWLVvGv//9b6ZOncqPfvQjYmJiMBqNZGVlsWnTJjZu3Njk/JhBgwYRERHBL37xC3Q6Hc7Ozk0uktq3b1+ef/55/vKXv1BVVcWCBQvQ6XQcO3aMgQMH8vDDDzs8R6fT8Zvf/IaVK1eyfPlyli9fzrVr1/jFL35BdHR0qxeI1Wg0vPHGGzzwwAMYjUYeeughAgMDKSgo4PDhw/Tu3ZsXXniBVatWsX//fubMmUOvXr0oLi7mlVdeISwsrNmeCYDf/OY3jBkzhvvuu49nn32WyMhIysrKOHXqFJcuXeK9995rVXsHDx4MwBtvvMFjjz2Gs7Mzw4cPb7YnqqXH15zly5eTmprKj370I5KSknjooYcICQmhsLCQrVu3smbNGo4fP07v3r357W9/y9y5c5k3bx7f/e53qa6u5sUXX8THx6fZmPebefXVV5k0aRIJCQn86Ec/Ijw8nEuXLpGWlsbrr7+Ot7c3f/rTn/je975HUVERs2fPxsfHh2vXrrFv3z4mT57Mo48+qu4vODiYGTNm8NJLL+Hi4sIf//hHampq+NWvfmX3/v7zn//kk08+oW/fvnh5eTFgwAB+9atfsWXLFqZOncrPfvYzNBoNf/zjH6mtreXXv/410JDI969//YuFCxcSFRVFTU0Nf//73/Hy8rIr4m7k7+/PZ599xoIFCxg5ciTPPfecutDwsWPHWLVqFUuWLGmyB9LHx4f4+Hj+/Oc/ExoaSmBgIO+9916zPX9Lly7lueeeY/ny5bi5ubFkyRK7+59//nk++eQTJk6cyPPPP8+AAQOoqanh3LlzHDhwgC+++KJVP0MhhGg3nZ2yIYQQjd0siW7Hjh0KoPzlL39RFEVR6urqlBdffFEZMGCAotfrFT8/PyUuLk558cUX1USxplIET5w4oYwfP15xc3NTevbsqfzqV79S3n77bQVQsrOz7V7zzTffVIYNG6buf9KkScrhw4cVRWk+NW3NmjXK8OHDFb1er/j7+yvLly9Xrl+/bveYiIgIZdmyZQ7HCCgvvvii3bbDhw8rc+fOVXx9fRUXFxclIiJCefjhh9V2HD58WFmwYIESHh6u6PV6JSQkRFmyZIly7ty5W77fV65cUZ566iklLCxMcXZ2VkJCQpRp06Ypa9asUR9jSxG8MaXtscceUyIiIuy2vfTSS0pYWJii1Wrt3k9A+cUvftFkG251fLeydetWZc6cOUpgYKDi5OSkBAUFKQsWLFA2bdpk97ht27Yp8fHxiqurq+Lt7a0sWLDA4T1qLqkvIiJCeeyxx+y2paamKvPmzVN8fHwUV1dXZcCAAcof/vAHh7ZNnjxZ8fLyUtzc3JR+/fopTzzxhHL69Gm7fS9btkx5++23lT59+ih6vV6JjY1VvvrqK7t95eXlKbNnz1Y8PT0VQJk0aZJ635EjR5SpU6cqHh4eiru7uzJlyhTl6NGj6v3nzp1THnroISUyMlJxcXFRAgMDldmzZytHjhxp0Xuck5OjfO9731Pb5+HhocTFxSmvvPKKml7Z1P+H7OxsZdasWYqnp6fSo0cP5Xvf+56yZcuWZpM9lyxZogDKI4880mQ7SktLlR/+8IdKZGSk4uzsrPTo0UOZMGGCXSqjEEJ0No2iKEqnVHZCCCGEIDIyUh1yKIQQovuTOVhCCCGEEEII0UakwBJCCCGEEEKINiJDBIUQQgghhBCijUgPlhBCCCGEEEK0ESmwhBBCCCGEEKKNSIElhBBCCCGEEG1ECiwhhBBCCCGEaCNSYAkhhBBCCCFEG5ECSwghhBBCCCHaiBRYQgghhBBCCNFGpMASQgghhBBCiDYiBZYQQgghhBBCtBEpsIQQQgghhBCijUiBJYQQQgghhBBtRAosIYQQQgghhGgjUmAJIYQQQgghRBuRAksIIYQQQggh2ogUWEIIIYQQQgjRRqTAEkIIIYQQQog2IgWWEEIIIYQQQrQRKbCEEEIIIYQQoo1IgSWEEEIIIYQQbUQKLCGEEEIIIYRoI1JgCSGEEEIIIUQbkQJLCCGEEEIIIdqIFFhCCCGEEEII0UakwBJCCCGEEEKINiIFlhBCCCGEEEK0EafOboAQdzuj0UhdXR1ms5m6ujr0ej3Ozs7odDo8PT3RauU6hxBCCCHE3UIKLCHaiKIolJSUkJ+fT0FBAYWFhRQUFFBTU4Obmxs6nY6ysjIAfH19MZvNGAwGAgMDCQoKIjg4mODgYMLCwnB3d+/koxFCCCGEELdDoyiK0tmNEKI7q62tJSMjgxMnTlBXV0fPnj0JDg5WiyY/P79me6mMRiNFRUVqMVZYWMj169eJjIxkxIgR9OvXD51O18FHJIQQQgghbpcUWELcpsuXL3PkyBEuXbpE//79GTFiBJGRkWg0mjvab319PadPn+bEiROUlZURExNDQkICnp6ebdRyIYQQQgjRXqTAEqKVDAYDu3btIjMzk4kTJzJs2DBcXV3b5bWKi4s5fvw4J0+eZOrUqYwYMeKOCzghhBBCCNF+pMASooUUReHs2bNs27aNgQMHMnXq1HYrrG6Un5/Ppk2b0Ov1zJ8/n4CAgA55XSGEEEII0TpSYAnRAoqi8OWXX5Kdnc2CBQvo3bt3h7fBarVy9OhRDhw4wMKFC+nfv3+Ht0EIIYQQQtycFFhC3IKiKGzevJni4mKWLVuGi4tLp7bn6tWrfPTRRyxYsIABAwZ0aluEEEIIIYQ9KbCEuAmr1cqmTZsoKytj2bJl6PX6zm4SANevX+eDDz5g3rx5DBo0qLObI4QQQgghviYrnApxE1999RXl5eVdqrgCCAsLY/ny5WzZsoUrV650dnOEEEIIIcTXpMASohlXrlwhIyODhx56qEsVVzahoaHMnTuXjRs3YjKZOrs5QgghhBACKbCEaJLJZGLjxo3MmTMHd3f3zm5OswYPHkxoaCh79uzp7KYIIYQQQgikwBKiSXv27CE0NLRbzG+aM2cOGRkZXL16tbObIoQQQghxz5MCS4gb1NXVkZqayqxZszq7KS3i7u7O/fffz8GDBzu7KUIIIYQQ9zwpsIS4QUZGBv369cPT07Ozm9Jiw4YNIzc3l8rKys5uihBCCCHEPU0KLCEaURSF48ePExcX19lNaRW9Xs/QoUNJTU3t7KYIIYQQQtzTpMASopGrV6+iKAoRERGd3ZRWi4uLIzU1FVnaTgghhBCi80iBJUQjly9fpl+/fmg0ms5uSqsFBwejKAoVFRWd3RQhhBBCiHuWFFhCNJKXl0doaGhnN+O2hYaGkpeX19nNEEIIIYS4Z0mBJUQjUmAJIYQQQog7IQWWEF8zmUxUVFQQGBjY2U25bSEhIRQUFHR2M4QQQggh7llSYAnxNbPZjJOTE1pt9/1v4eLigslk6uxmCCGEEELcs7rvmaQQQgghhBBCdDFSYAkhhBBCCCFEG5ECS4ivaTSabr+GlNVq7ewmCCGEEELc06TAEuJrLi4uaLVaqqqqOrspt62kpAR/f//OboYQQgghxD1LCiwhvqbRaAgNDSU/P7+zm3LbunvMvBBCCCFEdycFlhCNdPd1pKTAEh3NYlWoqDdRVFtPYU09pXVGTBYZqiqEEOLe5dTZDRCiKwkNDSUjI6Ozm3FbDAYDpaWlBAUFdXZTxF3ObLWSV20gt6KOGpMFzQ33K4Bep6Wnlyu9vN1wc9J1RjOFEEKITiEFlhCNREdHs2XLFqqqqvDy8urs5rTKyZMn6d+/P05O8t9atA9FUbhcWceF0mqsCmgAJ40GjUbj8DizVSG7vJbs8lp6erkyIMAT5268xpwQQgjRUvJtJ0QjLi4uDB06lNTU1M5uSqsoisLx48eJi4vr7KaIu1S92cLR6+WcK6lGgwZnrRYnrdahuIKG+Yw6zdeP0Wi4VmXg4JVSyg2yCLYQQoi7nxRYQtxg1KhRpKamtknkuaIoVNabuFpVx+miKtIKykkrqOBCSTUFNfXUmS1t0GK4evUqZrOZyMjINtmfEI0ZzBaOXC+j3GDCSaNB20RR1RzN14WWyWLl2PUySuuM7dhSIYQQovPJWCIhbhAaGoqPjw8nTpxg1KhRt7UPk8XKtSoD2RW1mCxWFEBRQKMBlIY5KlpNw99+rs5E+rjTw13fZG/ArSiKwt69exk9evRtPV+Im7EqCsfzyjGYreh1t39NzkmrxWxVSMmvYFxPPzz08vUjhBDi7iQ9WEI0Ye7cuXz11VdUVFS0+rnFtfUcuFLCuZJqzBYFJ60WZ60Wva7hb2ddw7+dvh4+VWYwcaKgguS88tvq0UpLS6O2tpbRo0e3+rlC3MrFshpqTJY2mT/lpNVgVRQyCiu7/aLeQgghRHOkwBKiCcHBwYwdO5bNmze3+ERQURTOlVSRkl+B2dqQoqbT3rxHSdNonkqZwcTBKyWUtGIIVUVFBbt27WLhwoXodJLUJtpWtdFMdnktujbsGXXSaKgwmrlWZWizfQohhBBdiRRYQjRjwoQJ1NTUkJycfMvHNhRX1eSU1+Gk0eB0i8LqRrZCC0VDSl55i4osi8XCF198wdixYwkODm7V6wnRErkVdSgKrZpzdSsajQYtGrLKa6QXSwghxF1JCiwhmqHT6ViyZAkHDx4kJSXlpo+9Xm3gcmUdzlrHyOpWvaZWgwYNJ/IrMNxkuKDZbObTTz9Fr9czYcKE23490X299tpr9OjRA41Gwy9/+cs23fdLL72ERqPh408/bfXFgpbY/dnHTI7owW//8EcAIiMj8fT0bPKx8+bNQ6PRkJOT06rXuN3nCSGEEHdKZhkLcRMBAQE8/vjjvP/++yiK0mQMusFs4UxxFdom1gO6HTqtBpPFyqmiKkaF+Djs02w288knn+Ds7MzixYtlaOA96uWXX8ZgMPD+++8zYsSINt33kiVLCO/TD7e+Q9u090r19S7rTA1Jna+//jpGo6QLCiGEuDtID5YQt+Dv78/jjz/OwYMH2b9/v0N8+8WyGixWpW3nqWg1lNQZKb1h3aCqqio+/PBDXFxcpLi6h02ePJni4mKqq6t57LHHmD9/PhqNhuLiYoqLi9FoNEyePBn4b2/Ut771LaKjo+nRowfr1q0DwGg08j//8z9ERETg5ubGfffdB8D69ev51mMrOHOioef20M5tPD3rPuYOjuCpmRM5tHMbAGlHDjE1qgf/++QjfH/JHBYM78Nbv38JgPKSYlbOvZ+5QyKYOySCHyydR86Fc3bHUft1L+1zzz3HY489BkB9fT0rVqzAy8uL2bNnU1lZqT6+qKiIESNG4OnpiaenJxMnTuT06dO3fJ4QQgjRkaTAEqIF/Pz8eOKJJ8jNzeXtt98mLy8PaIhjv15lwKlRwlr+1csNJ51PPXrL/T46YSRzh0Q4bNdoNChAbkUt0DDHKyUlhVWrVhEeHs6iRYukuLqH/frXv8bFxYXAwEA++uijZucymUwmzGYzAPv27eOpp56ioqKCn/70pxQUFPCLX/yCP/zhD/Tp04cXX3yRPn36cOHCBYqKigAwm01cOH2S33zvKYzGep786S8wGY385ntPcS4jndraGgDSkg4xPOE+vHz9+PTtNyi4dhWNVsuEmfP43q9f5pFnvs+lc6d54zf2QxmNFse15latWsXatWuZPn06U6ZM4fDhw+p9Wq2WRYsW8be//Y2f//znpKen88Mf/vCWzxNCCCE6kgwRFKKFfHx8WL58ORkZGaxdu5aYmBgGxCV8vaZV2w+jctZoKKo1kldQyI5tX1JfX883v/lNCbRoR4qioCgKFosFq9Vq93dT21rzmNu9r7nH2Np57do1nJwaPsr/8pe/qD2sOTk5vPzyyxw8eBCAAQMGYDAY8Pb2Jjc3lzfffJMPP/wQgPj4eAwGA5GRkXz44Yd2vUJJX+3EbDYx86HlJMyYh8FQz3t//A1H9u4iqGdvAIaMSWDk/TPIyTxH/pXLZJ0/Q8+ovhzdu5tzaSlqAZh9/oz9+93Ez2Dv3r0A/OlPf6Jv375s2rRJPYb6+nq2b99OUlKSus+TJ0/e8nlCCCFER5ICS4ibyMnJISoqivHjx+Pj48OhQ4eYP38+f/vb35g3bx7pGScxmYyE9Irg6Z/8LxNmznPYx8erXufzf79FRVkpvgGBzP3GCr75g5/YPeat37/Epg/+TZ+BQ/jl3/9FUFhPXnnhu3y1cT3jJ03mwpnT/M///A/vv/8+r732GsXFxQQFBfGtb32LF198saPejpu6VYHQXoVGW++7u1AUBavVSlVVlVpsGI1Gu7lMTk5Oak+nn58f3t7eODs7oygKQUFBODs7o9FoCA8Px8PDA51Oh1ar5dSpU+rznfXOALi4uODh4YGTc8PXhsVswfR175i7pxcKChpNQ0+uYlXYvOY9zp44zqxvrGDEuIn86+VfY6itob7egPJ1EXizIRS2Y2rcO/f3v/+dw4cP8/zzzzN79myefPJJqqqqbvk8IYQQoiNJgSVECxw5coRXXnkFFxcX1q5dy+jRo1m4cCHj5i+mrLycHes+5JUXvsea/fEY6+vtnhsU1pPlz/0IRVE4kriT91/7P0aOv4+hcWMBMNTWUl1VwcJvPsXHq17ntV/9lOdefhWTqeHkNT8vj5/97Gf069ePnJwcvvOd72C1WklMTOSll16iT58+xMbGdnoRc7ec0Go0GrXQ0Ol0dv++8e+b3ddWj2nuvldffRVvb29eeOEFsrOzyc7OxsPDg2vXrgENyXy//OUvMZvNJCYmsmDBApYsWcJ7771Hfn4+3/3udykuLubFF19k69atLFmyhLS0NF577TUyMzP57LPPcHV1ZeL0Obz/lz+wee17mMwmvvzwP+icnIiOHUlFSfHXbxrotFo0Xw+VrTfW4+LiAoBiNpF77gwlBfm4e3lRXV1D5ddFUVVZKYmJiZhMDXMNFUXh/vvvZ+PGjfz0pz8lISGBI0eOOPyMysrK2L9/P1evXsXHxwegRc8TQgghOoIUWEK0QEJCAj/5yU/Iyspiw4YN7NmzB29vbz76+GNMjXoM8q9eRqNt6DGor6+npKSYvGtX+HTV61RXVqiPO7hnN27+gZjNJjRaLbOXP4WzXs+Ozz7h1PGjKFarOtRr6gOLqaqqIiUlhSNHjrBv3z4Mhv8u0vrJJ5+QlZXVQe9E824sTDqiQGmP57dFEmRH0Wq1eHt78/Of/5yjR4/yxhtvqGERLfHzn/+curo6PvjgAxITExkzZozd/Yqi4BsUzHMvv8q6Va/znz//gR5hPXn6F78hOLw31eVlDe3QaNHr9epQxdraWuYve5wThw+yd8sX3D9/IVEDBlF4/RoBAQFqUeTr6Y5Go8FoNGIymfjjH/9IQEAAU6ZMYdeuXZSVlTF27Fh1PtVzzz1HYmIin3zyCd/4xjcYOnQoV65cAWDlypUkJyezadMmampq7J4nhBBCdCSNcrdcdhaiHTQeInjw4EEuXrxIdHQ0fn5+lJWVMXL8fSz51nf54j/vcjRxF29tTcTT24dlE0cx9v5p/OzPb7AkbhB+PYJZvPJZsk6f5KvPPmb+4yuZ/MBiXl65grLiIt7Yuhe9iys/eXg+RkMdb27fz9sv/5oDX27ixdfeIMRFi9ls5vvf/z5+fn6sWLGC7OxsNm3axDe/+U0WLFjQ6cVLdypMxM0VFRVxOOkINUFRKKAO6TObTRgMBpydnXHW69GgoaamGkVRcHd3h6+LJbPZjI+3NyEhoU3uX1EUzIrCuHB/vPT/vc5XU1NDXl4eeXl55Ofnk5eXR1VVFcHBwYSGhhIaGkpISAhBQUFqMSeEEEJ0NfINJUQLHDlyhD/96U8kJSUBDdHXP/jBD6ivq+Nadjanjx9zfJJGg16vB40Gk7GeipISzqQ0PE6r1XxdlGhRrFY2vP0GvoGBlBUVMGbKdFz0LralgjAbDLj6+OPi4qIWMT169GD37t0AjBgxgsWLF7f7eyDuboqicOnSJZKSkrh48SIAAf3rCew/FIvRiMVqQVEUPDw90Xz922k01uPsrP+6yHbCzc2VWo2GGrOZqupq/I316PUuDq9lURS89E52xRWAh4cH/fr1o1+/fuo2g8FAfn4++fn55ObmcuTIEUpLSwkMDCQkJEQtvIKDg9VhiUIIIURnkgJLiBYYN24ce/fu5eDBgyxbtoyVK1eSlJTExi++QNFoiLvvfvZu2ejwPL2rK488+wIb3n2T3Z99ROy4SeTlXFILJUWx4uLqhpPehW2frGVg7Ch++Jv/IzAwEFc3NwDiYoYzvH8fysvLWblypRp0MXToUKAhPc3b2xtfX198fX3x8fGx+7dc6Rc3YzabOXnyJElJSRQWFtrdV557keCBw3FydaHeYECn/e/SAIqioNFq0Ts5Y8sD9PT0xFBfj16vp95opLS0lJCQENSVhb9+ngL08/NoUftcXV2JjIwkMjJS3WYymSgsLFR7uzIyMigsLMTb29uupys0NLShZ00IIYToQDJEUIibsA0RnDt3Llu2bHG4/0plHaeLq9BrHfPQLBYzxSUlWMwW9HpnTCYTtbW1GAz1ODnpcHJypq6uFp1Oh5OTE66uruicnPBwd8fLywuNRotZUZjUOwBXp6bXvLJYLFRVVVFeXk5FRQXl5eXqn4qKCioqKnBzc2u2+PL19W3oZRP3nJqaGo4fP05ycjLV1dV292k0GgYOHEh8fDwZl/OodvPF0miuoUYDTs7OmIwm9C56jPVGNBoIDQ2loqKCmpoaqmtqcNLpCAkNxd3tv0WO0Wol2N2F2GDvNh1WarFYKC4uVocW2oYZurq62hVcoaGhX///kiGtQggh2odc2hbiDvRw16Ph66v5jU7Ybiyu/AMCKC4qwtlZ3xCjrdHg4uKCi4uempoarFYrtXV1uOj1oCjU1dXh4e1NkLdXs8UVgE6nUwumplitVqqrq+2Kr/z8fM6fP68WYc7Ozs0WX76+vri6urbxuyY6U1FREUeOHCE9PV1dhNhGr9czYsQIxo4di4+PD5s2bSI9PZ3wsZPx6BGCxdhQSPn6+lJR0RDa4ubmhrHe+HVPqQZ3dw9qampxdXGhzmBoKPJdXdFotJisVlx0Wob0aPsCR6fTERwcTHBwMDExMUDD/8uysjK14EpOTiYvLw9FUdRiy1Z8+fv7S9ElhBCiTUgPlhB3KDW/gqLaepy/7sVqqrjSarQUFxdRV2egzlCnhgI0DLlqGDJlqDNgtpjR6XS4ubri5OJKdeZJpowZSd++fdul7YqiUFNTY1eA3dgTptVq7QquGwswNzc3OTHt4hRFITs7m6SkJDIzMx3u9/b2ZuzYsYwcORI3NzdMJhPr1q3jwoULAGh0TvQaOwn3wGB8PDwwGuupqqrGxcUFd3c3ysrKcXNzxc/PH4Di4iKMRiO1tXU4OTnhH+CPq4cnep2GsaF+eOg779qeoihUVVXZBWnk5eVhMBjUXi7b34GBgeo6YkIIIURLSYElxB2qMppJulqKVqNBsVqaLK4ASkpLqKuro662Fidn54ahgbqvTzQ1DSe55WXl1NXV4eTigtVQy7Wkr0BRGDhwIDNnzsTPz69Dj01RFAwGg8PQw8b/tlgszfZ++fr64uHhIQVYJzGbzZw6dYqkpCQKCgoc7g8NDWXcuHEMHjxYLSTq6ur46KOPuHz5st1jPb29uW/RI5RatJSVlmIxmQgMDMBQX091VTVeXl54eXkBDTHt5eXlWKwWjEYznt5ehAf4EdcrELeb9Mh2ptraWofhhRUVFfTo0cOut8u2QLMQQgjRHCmwhGgDWWU1nC+upKKstMniCqCsvIza2lpqqqvx9PTEalXQNpq75ax3xt/fn/LycurqjWz8+x/wdNYRGRmJVqvFycmJhIQEJk6c2KXmTdXX1zdbfJWXl1NfX68WXU31hHl5edm9D+LO1dbWqvOrqr5e1NdGo9HQv39/xo0bR+/eve2K36qqKtauXetQjNmWBvD392f3oSNctzjh5u2Lt7c3VRUVGOrq8PP3w83VDUVRsCoKJaWlKCjU1tSQfzKFfj18efjhhzvk+NuK0WhUiy7b38XFxfj7+9v1dIWEhMhQWiGEECopsIRoA6VlZWxJPYuzjz86FIfiCvh68n81VdXVeHl64ubmRl2dwe4x3j7e6F3d0RZfZe8X67l06RK1tbUMHjxYPYHz9vZm+vTpDB06tFv0DBmNRjVwo6lCrLa2Fm9v72aHIXp7e8swrRYqLi5W51eZTCa7+5ydnYmNjSU+Pp6AgACH55aWlvKf//yH8vJyu+3BwcEsX74cLy8vDAYDr732GgaDgYceexKzhw+nL+WCk74hsVKnQwGctBqqiwu4mHKUksuXuJSVxcCBA3nssceIiopqx3eg/ZnNZoqKitSerry8PAoLC/H09LQL0ggJCcHT07OzmyuEEKITSIElxB0qLy9n9erVVFRW0X/STHzDI3DWatHeUPxUVVVSVVVNTU017u7ueHt7YzZbqKurA0Dr5IRWp2NEzwAGhASSm5vLZ599xtWrV8nNzVXnhNj07t2b2bNnExra9GKu3YXZbKaysrLZYYjV1dV4eHg0OwfsXo+iVxSF3NxcDh8+rM6ZaszLy4sxY8YQFxeH29fR/zfKy8tj7dq11NTU2G2PiIjgkUceUYv7ffv2sWfPHiIjI3n88ccxmUz8/ve/R+vkzPM/+jFanRYnrRa9TktxcTH/+Mc/ALhy5QpBQUH07t2blStX3nU9llarlZKSEod5Xc7Ozg6x8T4+Pt3iwogQQojbd++elQjRBmzFVXl5OeHh4SwcM4yCeoULpdWYrQrOWo16MmU7qdRoNA3DqKwKPj7eGE1G0OowG+q4knoIggIZ8I1vEBERwTPPPMPGjRvx8vKioKCAq1evEhYWhlar5fLly/zrX/9i5MiRTJkyBQ+Plq0r1NU4OTnh7++Pv79/k/c3jqK3FV9Xr17l1KlTlJeXU1lZ2WwUve12VxpS2VYsFgunT58mKSmJvLw8h/tDQkJISEhg6NChN+0BzMnJ4aOPPqK+vt5u+4ABA1iyZIk638hgMKgLbU+ePBmAkpISFEXBz8cbT1f79zgwMJDIyEhycnLo2bMnpaWlFBQUkJKSwujRo+/k0LscrVZLjx496NGjB8OHDwcaCl9bamdeXh6pqank5+djNpsdYuP9/f3vuqJTCCHuZVJgCXGbbiyuli9f3rAoqltDfHtWeQ351fUNC6sqgFaHRqtD5+yM1skZq1aLFQ1enp5cSj1CWXYmisXMudJizp07x8CBA3F3d+eRRx7hyJEj7N69m/r6eioqKnB3d8fFxQVFUUhJSeH06dNMnjyZ0aNH33XD6VoaRd+45ys/P59z586pQxObiqJvfLs7zZ+pq6sjJSWFo0ePOsyvAujfvz8JCQlERkbesqfk3LlzrF+/3iGuPTY2lgULFtid9B87dgyDwUBERIS66G9RUREAPXr0aHL/o0ePJicnB61Wi7u7O4qisGfPHoYOHdpsb9rdQqPR4Ofnh5+fH4MGDVK3V1dXqz1cZ8+eJTExkZqaGoKDg+16u4KCgu66/8tCCHGvkAJLiNvQXHFl46F3YniQDwMDrBTXGakwmLhaYqDUWEd9dTV1pcU4ebgyasI4/FwDee/wLkot/z3J/fLLL4mKisLFxQWNRkNCQgK9e/dm/fr1ODk5YbFYcHV1xWBomMNlMBjYvn07KSkpzJo1q91i3bsirVaLt7c33t7eTd7fVBR9cXExWVlZdlH0N1sLrCtE0ZeUlHD06FFOnDjR5PyqmJgY4uPj7YaR3syJEyfYtGkTN44SHzduHNOnT7c73vr6eofeK7h1gTVw4EA8PDyoqanBzc0NV1dXamtr2bdvH7NmzWpRO+82np6eREdHEx0drW6rq6sjPz+f/Px8srOzOXz4MOXl5QQGBtr1dgUHB9+VvbFCCHG3kTlYQrTSrYqr5ly5coV3332Xixcv4uLiQnx8PN/61rcAKCgo4K233sJqtaqPT0hIYObMmXb7MBgMbN68mdOnTwMNw8BsMeqNDRw4kBkzZjQ77E78162i6MvLy7FarZ0SRa8oCpcvXyYpKYnz5887FEOenp7q/Cp3d/cW7/PQoUPs3r3b4b7p06czfvx4h+379+8nMTGRiIgIHn/8cfVYP/30U86cOcPixYsZNmxYk6/31VdfceDAAaBh2GBJSQkajYbvfOc7zRZmAkwmEwUFBXax8UVFRfj4+Dgskny39wYKIUR3Iz1YQrTC7RZXgHoS5OzsjMlkUsMtoCGpLSEhgUOHDqnbjhw5wvDhw+1CLFxdXVmyZAlRUVFs376d/Px8AgMDGT16NBkZGeo8mnPnzpGZmcm4ceO6XKx7V6PRaHBzc8PNza3ZwJCmouivX7+u3m4cRd9UT1hro+gtFgtnzpwhKSmJ69evO9xv+30ZOnRoqwI+FEVh165dHD582OE9WLBgASNGjGjy2G29V5MmTbIrJG/VgwUwatQoDh48iKIoFBcXM2DAAM6fP8+OHTtYtmxZp/cMdlXOzs6Eh4cTHh6ubrNYLGqCoW0YbH5+Pu7u7g7zujw9PeW9FUKITiIFlhAtdCfFFdgXWHV1dXYFFjScvJ4+fVrtjVIUhc2bN/P000/bnZxrNBri4uLo1asX69ato7i4mIqKCiZPnkxhYSHp6elAw8nYgQMHSEtLY/r06QwbNkxOuG6Ti4sLwcHBBAcHN3l/U1H0Fy5cUG/bouibG4Zoi6Kvq6sjNTWVo0ePUllZ6fA60dHRJCQkEBUV1eqfpcViYfPmzaSlpdltd3JyYsmSJQwcOLDJ5x07doy6ujp69+5tF7FusVjU3qimYt9tfH196devH5mZmQB4eHjg6urKxYsXyczMpH///q06jnuZTqcjJCSEkJAQdZuiKJSWlqo9XUePHiUvLw+tVusQG+/n5yefAUII0QGkwBKiBe60uIL/Flh6vR6TyYTBYMBqtarFk16vZ+7cuXzwwQfqc65fv05ycjJjx4512F9wcDDf/va3+fLLL0lLS2PXrl0MGzaMFStWkJiYyLVr14CGxWM///xzkpOTmT17NmFhYbf7Nohm6PV6NUWuKWaz2aEAy87OVv9dVFREcXExxcXFODs74+rqiqurKy4uLuowwPHjx9/2kDqTycS6descYtxdXFx45JFH1NCKGxmNxmZ7r0pLS7Farfj5+alJg80ZPXq0WmCdO3eOCRMmsHv3bnbs2EHfvn0lzOEO2ArcgIAAhg4dCjQUXZWVlWpPV3p6Otu3b8doNNotjmxb+kESDIUQom1JgSXELbRFcQUNYQyurq7qEEFFUaivr7ebPxEdHc2QIUPUOVYAiYmJDBo0qMkQB71ez8KFC4mKimLr1q2cPHmSa9eusWTJEgoLC9m9ezfV1dVAwxywt99+mxEjRjB16tRuG+veHTk5OaknwTaKonDlyhWSkpIoLy/H398fd3d3DAYDBoOB+vp6/P398fX15eTJk1y8ePGma4E1NwzUYDDw4YcfcvnyZbvtHh4eLF++/KbrqB07doza2lp69epFnz597O5ryfBAm379+uHj40NFRQW1tbV4enoSGBhIcXExR48eZdy4cbfch2g5jUaDj48PPj4+dj2TNTU1amz8hQsX2L9/P5WVlQQHB9v1dgUFBd3Ta8sJIcSdkk9QIW6irYorGzc3N7XAgob0sBsnqM+aNYuLFy+q86nq6+vZtm0bDz/8cLP7jYmJoWfPnqxbt46CggLeffddZsyYwbPPPsuBAwc4cuQIFosFRVFITU3lzJkzTJo0iTFjxkjvQQezWCycPXuWpKQktZdRo9GovVZBQUEkJCQwbNgw9SS3cRS9bd5X4yj68vJy9Hq9Q/Hl7OzMrl27qKqqsjth9vPzY8WKFTcNQTEajepcrcmTJzsMLWtNgaXVahk1ahSJiYlAQ4LhzJkz+eCDD9i3bx8xMTFS8HcADw8P+vbta5cyWl9frxZdV65c4dixY5SUlBAQEGA3vDAkJAQXF5dObL0QQnQfUmAJ0Yy2Lq6gocDS6/UYjUYAh3lYAF5eXkybNo2tW7eq286ePcv58+cZMGBAs/sODAzk6aefZufOnSQnJ7Nt2zays7N54IEHGDlyJDt27FCHiBkMBnbs2EFKSgqzZ8++p2LdO4vBYFDnV1VUVDjc369fPxISEujTp49DMdM4ir53794Oz7VF0TcO4cjOzubLL7+kpKQEg8GgFnFBQUEMGzaM8+fP2xVjN0bRJycnU1tbS3h4uEPvFbSuwAIYOXIke/fuxWq1kpuby9y5c4mOjiYzM5PExETmz5/fov2ItuXi4kJERAQRERHqNrPZTGFhoTqvKyMjg8LCQry9vR3mdUlhLIQQjqTAEqIJ7VFcQUOBpdPpsFqtWK3WJgssaEheS0tLU3s44L9rY90sEdDZ2Zm5c+cSFRXFF198wblz58jLy2PJkiU8+uijZGZmsn37dkpKSgAoLi5mzZo1DBgwgJkzZ0qsezsoKyvj6NGjpKamqoW1jZOTE8OHDyc+Pp6goKDbfg2NRoOnpyeenp6Eh4eTn59PUlISffr0oU+fPiiKgtlsJiAggPvvvx+DwUBFRQW5ublNRtF7eHiwa9cuFEVh6tSp1NTUOETRt7bA8vT0ZNCgQerw1+PHjzNz5kyysrJITU0lLi7upsMVRcdxcnIiLCzMbr6m1WqluLhYLboOHDhAfn4+er3eITbe29tbwjSEEPc0WQdLiBu0V3EFsH79ek6dOsXhw4cZNWoUjz76aLPrB+Xn5/Ovf/3Lbm2scePGMWPGjBa9VllZGevXr+fatWtotVqmTp3KuHHjsFqtHD16lH379qnDEKEhoUxi3duObX7V2bNnHdav8vDwYPTo0YwePbrNewByc3P58MMP7X62AP3792fp0qXNBlLYiq7y8nL279/P/v37cXd3Z+TIkVRWVtpF0Xt7e7N582acnZ154YUXCA4OxtPT85ZhCdnZ2bz//vtAQ8/Jj370I/bs2UNSUpLDGlui61MUhbKyMnWIoe2PoigOsfH+/v7ysxVC3DOkB0uIRtqzuAL7qHaj0UhtbW2zjw0JCSE+Pt5uzSLb2liNY5qb4+fnx5NPPslXX33F4cOH2bVrF9nZ2Tz44IOMGzeO4cOH89VXX3HixAlAYt3bgtVqVedXXb161eH+Hj16kJCQwPDhw9slROD8+fOsW7cOs9lstz0mJoYFCxbcdL6dbQ6Yn58fmzZtom/fvixbtozo6Gjgv1H05eXl5ObmYjQaMZvN7Nu3Tw2vaCqK3nbb29ubyMhIAgICKCkpob6+nlOnTjFp0iTS09PJzc3lzJkzDBkypM3fF9E+NBoN/v7++Pv7M3jwYKCh6KqurlaLrdOnT7N7927q6uoIDg626+nq0aOHzAEVQtyVpAdLiK+1d3EFDYmA+/fvJz09nV69erFo0SImT57c7OONRiNvvPGG3Zydnj178tRTT7UqWvnChQts3LiR2tpavLy8WLRokbqm0bVr19i2bZtDQdCrVy+JdW+h+vp6dX6VbR2zxvr27UtCQgJ9+/Ztt6L1xIkTbN682a7HEyAhIYEZM2a0+HUPHz7Mzp076dmzJ08//XSTzzt//jwfffQR/fr1Y/ny5UDTUfSN54RVV1fj6elJcXExFy9exNXVlfDwcJ544glyc3PZt28ffn5+PPvss7eMfRfdT11dnRobbyu+Kioq6NGjh11vV3BwsPz8hRDdnhRYQtAxxRVAUlISO3bs4MyZMwQEBLBgwQJmz5590+dcuHCBDz/80G7bnDlzGDNmTKteu7Kyks8++4zc3Fw0Gg333XcfkyZNQqvVoigKGRkZ7Nq1S411h4Yr1BLr3rzy8nJ1ftWNQ/J0Op06v6q5BYrbyqFDh9i1a5fD9mnTpjF+/PgWF1cmk4nXXnuNmpoaHn300WYXAT548CC7d+8mISGBmTNntmjfFouFqqoq8vPzef3116mpqcFgMBAfHw80XHwwGAzExsYyZsyYVkXRi+7JaDRSUFCgFlz5+fkUFxfj6+vrMK+rPT6PhRCivcgQQXHP66jiChyHCDYXctFY//79GTx4MGfOnFG3ffXVVwwaNAgvL68Wv7a3tzePPfYY+/btY//+/ezbt4/c3FwWLVqEt7c3MTExDBw4kAMHDpCUlGQX63769GkmT54sse5fu3r1KklJSZw5c8ZhfpW7u7s6v8rT07Nd26EoCrt27bIbRgoNhfH8+fMZOXJkq/Z3/PhxampqCAsLU4cGNsUWcBEYGNjifet0OrVYmj59OmlpaQBERUXxwAMPsGDBAt5++23MZjNhYWFYLBY1it7WE9ZUFH3j23IS3r3o9Xp69epFr1691G0Wi4XCwkK1p+vMmTMUFBTg4eFhV3CFhoa2+/8vIe5mVquV0tJSCgoKKCwspKCggJKSEoxGIxaLRR11oNPp0Ov1BAQEEBwcTFBQEMHBwfj7+8si5TchBZa4p3VkcQX/LbD0ej0mk6lFBRY0rI2VlZVltzbW9u3bWbp0aateX6vVcv/99xMREcHnn39OTk4Oq1at4sEHHyQ6OhoXFxemTZvGiBEj7GLd6+vr1Vj3WbNm0a9fv1a97t3AarVy7tw5kpKSuHLlisP9gYGB6vyqjhjiZLVa2bRpk1qo2Dg5ObF48WIGDRrUqv2ZTCYOHToEwKRJk27a69XaBMEbxcXFqe0+deoUM2bMoE+fPowYMYIzZ86Qn5/PokWL7J7TVBS9bbih7bZWq212IeamouhF16PT6dRCasSIEcB/TwRtPV2HDx8mPz8fJycnuyCN0NBQfHx85GcsRDMsFguZmZmcOHGCS5cu4enpqRZNw4YNIzAwEBcXF3Q6HRqNBkVRsFgs1NfXU1RURGFhobpsQ3V1tfq5HR0dLRdfbyBDBMU9q6OLK4DLly/z3nvvcf36daqqqpg6dSpPP/10i5579OhRtm3bZretcQhBa1VXV7NhwwaysrKAhoTCqVOn2n1IXrx4ke3bt1NcXGz33Hsp1r2+vp4TJ05w9OhRysrKHO7v06cPCQkJ9OvXr8NO7EwmE+vXr+f8+fN2211cXHjkkUeIjIxs9T6PHDnC9u3bCQ0N5dvf/nazx6IoCr///e8xmUz87Gc/c1gouyUUReGtt94iPz8fgNmzZzN27FjKy8v5xz/+gdls5qmnnrLr2WjJPuvq6pqdA2aLom+u+LLF08vJefegKAoVFRUO87rMZrPDWl0BAQFypV3c04qKikhNTSUjI4PAwEBGjBjBoEGD7mjxcIPBwNmzZzlx4gQlJSXExMQwYsSI277wdreRAkvckzqjuIKGD7k33niDoqIiCgoKmDRpEs8991yLnmu1WnnnnXe4fv26us3X15fvfve7tz03RVEUDh06RGJiIlarlZ49e7JkyRL8/PzUx1gslmZj3RMSErjvvvvuyrkxFRUVHD16lJSUlCbnVw0bNoz4+PgWJTq2JYPBwEcffURubq7ddg8PD5YvX35ba0mZTCb+9re/UV1dzSOPPHLTBa3Ly8t57bXX8PT05Mc//nGrX8vm+PHjbNmyBWjoCfvud7+LRqNRg2BuFrJxuxpH0d9YfJWXl2M0Gpscemj7d0ui6EXnqq6utiu48vPzqa6uJjg42K7w6tGjR7skeQrRldTX15OYmMipU6cYOXIksbGxBAQEtPnrlJSUkJaWRmpqKkOHDmXKlCl3VLzdDaTAEvecxsVVz549WbFiRYfN3aiurubVV1+loqKCrKwsJkyYwE9/+tMWPz8vL49//etfdvN+xo8fz/Tp0++oXVeuXGH9+vVUVFTg6urKggUL1Njlxm1vHOtu4+XldVfFul+7dk2dX3VjIp+bm5s6v6o189/aSnV1NWvXrlV7fmx8fX1ZsWLFbX9x2npHb9V7BZCZmckHH3xAVFQUjz322G29HjR88f/5z39WF19+/PHHiYyMxGg08vrrr1NVVcXChQuJjY297ddorcZR9E31hDWOom+qJ8zb21uGyXRBBoNBLbpsf5eVlREQEGDX0xUSEnJXXiwS96YLFy6wdetWoqKimDFjBu7u7u3+mrW1tezcuZPs7GzmzZt32yNs7gZy+UbcUzqzuAL7kAvbHCxFUVpcmISGhhIfH09SUpK6LSkpieHDh99RUl2vXr145pln+OKLLzh37hyffvopo0ePZubMmepVXk9PTx544AHi4uLsYt2rqqr4/PPPSU5O7rax7larlfPnz5OUlMTly5cd7g8ICCAhIYGYmJhOi5AuKytjzZo1lJaW2m0PCgpixYoVt13wmc1mDh48CNx67hXc+fwrGxcXF2JiYkhOTgYaerQiIyPR6/VMmzaNDRs2qGEuHXUlVK/X06NHj2aPzRZF37gAu3TpkkMUfXMhHD4+PtJr0glcXV2JjIy0GzprMpkoLCxUe7rS0tIoKirCx8fHYZHk2xkGK0RnsVgsbN68mdzcXB544AH69OnTYa/t7u7OwoULycrKYsuWLURGRjJv3rx78sKT9GCJe0ZnF1c2v//976mpqeHo0aNMmDCBn//8561qh9Fo5B//+AeVlZXqtvDwcJ566qk77kFSFIVjx46xc+dOLBYLISEhLF261KFnxBbrvnv3bqqqqtTtGo2G2NhYpk6d2i0SvoxGIydOnODIkSNNzq+KiooiISGB6OjoTu2dKygoYM2aNXYR+tBQGD/66KN3dAJ47NgxvvzyS0JCQli5cuUtj/OLL77gxIkTzJ07l9GjR9/260LDcb355ptAw7DL559/Hk9PTxRF4d133+Xq1atMnDiRqVOn3tHrdBSLxUJlZWWzwxArKytxd3e/6TBEWQOq81gsFoqLi+2GF+bn5+Pq6uoQG+/l5XVX9NiLu4vFYmH9+vVYLBaWLl3aqZ8nJpOJTz/9FGdnZxYvXnzPFVlSYIl7QlcprgBee+01ysrK2L9/PxMnTuT555+3m/PUEufOnePjjz+22zZv3jzi4uLapI15eXmsW7eO0tJS9Ho9c+fOJSYmxuFx9fX1drHuNi4uLl061r2yslKdX2UwGOzu02q16vyq25nP1NZyc3P56KOPHNoZHR3NQw89dEdfoGazmb///e9UVlby8MMPtyh58J133uHq1avqkL479e6776qpjNOmTWPChAlAQxT+O++8g06n49lnn231/5GuyGq1Ul1d3ewcsBuj6JsahihR9B1LURRKS0vt5nXl5eWh0WgcYuP9/Pyk6BKdxmw2s379ehRFYenSpV2it9xsNrNu3To0Gg1Lly7tkucD7UUKLHHX60rFFcBbb72lRg2PGjWK55577raG1X388cecO3dOve3q6sqzzz7bZj1H9fX1bN68mVOnTgEQGxvLnDlzmpyjUFpayo4dOxxS7QIDA7tUrPv169dJSkri9OnTTc6viouLY/To0Xh7e3dSC+2dP3+edevWYTab7bYPHz6cBx544I6/rJKTk9m6dSvBwcE888wztzw5VBSFP/7xjxgMBn7yk5+0yeLT6enpbNiwAWiYS/b9739fDZLYsGED6enpDBo0iIcffviOX6uraxxFf2MBdqsoetttiaJvf4qiUFVVZdfTlZeXh8FgcIiNDwwMlGAU0e4UReHTTz8FYMmSJV2qkLFYLGqR9dBDD90zn0+dX94K0Y66WnEFTc/Duh2zZ8/m0qVLakiAwWBg+/btLFmypE3a6eLiwuLFi+nTpw/btm0jLS2Nq1evsnTpUof5Xv7+/jzyyCMOse7FxcWsXbu2U2PdFUVR51fdmLwHDW23za/qShPc09LS2LRpk0MhGB8fz8yZM+/4S8psNnPgwAGgZXOvoCFkw2Aw4O7u3mYTpocMGcL27dupq6ujvLycrKwsdWL01KlTOXv2LGfPniU7O5uoqKg2ec2uSqPR4OnpiaenJ+Hh4Q7326Lobyy+cnJy1NtNRdE3vi1R9HdOo9Hg7e2Nt7e3XeJmbW2tWnBlZmayf/9+KisrCQoKsuvpCg4O7hK9C+LuceLECcrLy3n66ae7VHEFDcO/ly5dyjvvvENaWpq6vt3dTv6Hi7tWVyyuwL7AMhqNt11g+fj4MGXKFLZv365uO3XqFLGxsW3WY6TRaBg5ciTh4eGsW7eOoqIi3n77bWbNmsWoUaMcTtT69evHd77zHY4dO8bevXvVePPz589z8eJFEhISmDhxYoeEFhiNRtLT00lKSnIIhgCIjIxU51d1tSvMhw8fZufOnQ7bp06dyoQJE9rkBPnEiRPqyV9LFyW2BVwEBga22Um6k5MTI0aM4PDhw0BD2IWtwPL29mbixIl89dVXbN++nZUrV3a5n1VH0mg0anHbXK/3jVH05eXlXLt2zS6K/mZrgUkU/e1zd3enb9++9O3bV91WX19PQUEBeXl5XL16lePHj1NcXExAQIDDel33eqy1uD0VFRXs3r2bxx57rMsVVzY6nY6FCxfy/vvv07dv3y4zSqQ9SYEl7kpdtbiC/xZYer3+jnqwAMaMGUN6ejp5eXnqtq1bt/Ld7363TSe3BgUF8e1vf5tt27aRmprKli1byM7OZv78+Q7vq219rGHDhvHVV1+RlpamrgZ/8OBB0tPTmTZtGsOHD2+XK+mVlZUcO3aMlJQUh/dWq9UydOhQ4uPju2TaoaIo7N69m0OHDtlt12g0zJs3j1GjRrXJ67Q2OdCmrRIEbzRq1Ci1wLpw4QIVFRX4+PgAkJCQQEpKCgUFBaSkpNxxsMbdztXVFVdX12ZTRRtH0dt6wi5cuNCiKHpfX1+8vLy67ElcV+Ti4kLv3r3p3bu3us1sNqsJhvn5+Zw6dYqCggK8vLwc5nW1xTBccfdSFIXNmzcTHx9/R0nCHSE4OJixY8eyadMmli1bdtf3pEuBJe46Xbm4AschgrW1tbe9L61Wy/z583n77bfVtbFsARptnbzm7OzMggULiIqKYvPmzZw+fZrr16+zZMkSevbs6fB4W6z76NGj2bZtmxpkUFVVxYYNG9RY96aeezvy8vI4cuQIJ0+edBhW5+rqyqhRoxg7dmyXvXJmtVrZvHmzwzpjOp2OxYsXO6xLdifS0tKoqKggKCioVfttrwIrICCAPn36cOnSJRRFISUlhSlTpgANPVwzZszg008/Zc+ePQwdOlRis+/A7UbR225LFP2dc3JyIiwszO4ij9VqpaSkRJ3XdfDgQfLz89Hr9Q6x8d7e3nf9yalomZycHMrLy3nkkUc6uyktMmHCBN58801ycnLu+iHf8iko7ipdvbiCtpuDZRMWFsaYMWM4evSouu3QoUMMGzaMoKCgO9p3U4YNG0ZYWBjr168nLy+P9957j2nTphEfH9/kl35YWBhPPvmkQ6y7LSXuTmLdFUUhMzOTpKQksrOzHe738/MjPj6eESNGdKn5VTeypT81Di2BhpPhRx55pE2/iCwWS6vnXtm0V4EFEBcXx6VLlwBITU1l0qRJak/JoEGDiIyMJCcnh3379jFr1qw2f33RwMnJiYCAgGYXrW4qiv7y5cucPHnSLor+ZsMQJYrekVarVQvf4cOHAw2fb+Xl5WpPV0pKCnl5eVitVofhhQEBAVJ03YOOHz/eZdN6m6LT6RgzZgwpKSlSYAnRXXSH4grshwhWVVXdcYEFMGXKFM6ePauujWXrDXnyySfb5Us3ICCAp556il27dnH06FF27NhBTk4ODzzwQJPhBxqNhpiYGAYOHGgX664oCidOnODMmTNMmjSJsWPHtuiLwmQyqfOrSkpKHO7v3bs3CQkJDBgwoMvPJzEYDHz88cfk5OTYbffw8GDZsmVtPpTR1nvVo0ePVveKtWeBNWDAALy8vKiqqqK6uprz58+r7dNoNMyaNYu33nqLY8eOMWrUqHZpg7g1nU6Hn59fs7H5VquVqqoquwIsLy+Ps2fPqr1gLi4uzRZfvr6+MhfpaxqNRn2vG/9fraqqUpMLT58+ze7du6mtrVWLLtvfPXr06DYn3qL1qqurycrKYv78+Z3dlFYZPnw4iYmJam/43UoKLHFX6C7FFbR9DxY0jPOfPXs2n3zyibrtypUrpKamttm8nRs5OTkxe/ZsoqKi2LhxI+fPn2fVqlUsWbLEbr7Bje2cNm0aI0eOtIt1r6+vZ+fOnaSmpt401r2qqork5GSSk5ObnF81ZMgQ4uPj22zYYXurrq5m7dq15Ofn22339fVlxYoVzfYi3K476b2qqamhtrYWFxcXvLy82rRd0HDiPnLkSPbt2wc0XJltfFIZEhLCqFGjOH78ODt27LgnxvB3R1qtVh0q2NTnQFNR9MXFxVy8eFG93TiKvqmesHs9it7LywsvLy81DAagrq5OLbqys7M5fPgw5eXlBAYG2sXGBwcHSw/iXeLEiRMMHjy4y57rNMfV1ZXBgweTlpamrnt4N5ICS3R73am4gvYpsAAGDhzIgAED7Nai2rVrFwMGDGjXq0QDBw7kmWee4bPPPuPKlSusXr2ayZMnM2HChGZ7j24V696/f39mzZqlxrrn5+er86saL2gMDUWbbX6VLRihOygrK2PNmjUOCYdBQUEsX768XeaKpaenqyddd9J71V4ntyNHjmT//v0oisKlS5coKSmxKzLvv/9+Tp06xcWLF8nMzKR///7t0g7Rftoyir65YYj3YhS9m5sbUVFRdsOujEYjBQUFauGVmppKcXExvr6+dj1dISEhMq+xG8rKyuq2BcrgwYM5dOhQt21/S0iBJbq18vJy3n///W5TXEHbxbTfSKPRMGfOHLKzs+3WxtqxYweLFy9uk9dojq+vL48//jh79uzh4MGDJCYmkpOTw6JFi25a3DUX637hwgUuXrxI7969sVgsakDGja9pm1/V3YYUFRQUsHbtWnU+mk2vXr149NFH2+Vkx2KxsH//fqCh96q1Qyfbc3igjY+PD/3791cvEhw/fpyZM2eq93t4eDB58mS2b9/Ojh076Nu3rwyBusu0NIr+xgLsVlH0jW/fK1H0er2eXr160atXL3WbxWKhqKhIDdM4d+4c+fn5eHh4OMzrao+eatE2FEUhPz+f0NDQzm7KbQkNDSUvLw9FUe7aiyFSYIluy1ZclZWVdZviCto2pv1GPj4+TJ482W4NpZMnTxIbG2u3Nkt70Ol0TJs2jcjISDZs2MClS5d48803WbRo0U1fu3Gse2JiIsePHyc/P5+rV6+SmJiIXq+nT58+BAcHo9Fo6NWrFwkJCQwcOLBbniRdvnyZDz/8EIPBYLc9OjqapUuXtlsYR0ZGhtp7NWTIkFY/39bL2N5zn+Li4tQCKy0tjSlTptgNaRo9erS6ltCxY8dISEho1/aIrsfV1ZWQkBBCQkKavL+pKPrz58+rt+vq6uyi6G8sxry9vbvlZ0tL6HQ69b2zLfhqtVopLS1Ve7qSkpLIy8tDp9M5xMb7+vretSfE3UlZWRl6vb7bxvh7eHig1+spLy9vdj5ndycFluiWumtxBf8tsJycnLBYLNTU1LTpVZz4+HgyMjLs5vVs3bqV73znOx0y9r5fv34888wzfP7552RnZ7N27VomTJjA/ffff8uTFi8vL6xWK/n5+Wp8vdFoVNP1nnrqqW69DtKFCxf49NNPMZvNdtuHDRvGwoUL2603pnHv1X333XdbJ48d0YMFDb8/vr6+6onwmTNniImJUe/X6XTMnDmTDz74gL179zJ8+PBue5Ih2kdrouhtBVhzUfRNDUO826LotVotgYGBBAYGMnToUKChh6SyslLt6UpLS2P79u0YjUaH2PiAgIC7tiDtqrpz75VNaGgo+fn5UmAJ0VV05+IKGgor2/wrJycn6uvrMRqNbTbMzbY21jvvvKOujVVaWsqBAwfUtYXam5eXFytWrODAgQPs3buXAwcOkJuby+LFix3mSRUUFHDkyBEyMjKwWCw4OTkxYsQICgsLyc3Nxd/fn/DwcFxdXdm6dSvXr1+/7Vj3zpSens4XX3zhsEbX2LFjmTVrVrteFc7IyKCsrIyAgAD1BKq1bAVWYGBgWzbNgUajIS4ujt27dwMNwwQbF1jQ0NsXHR1NZmYmiYmJ3S5FS3SulkbRNx6G2JIo+sa3u3uQhEajUYvJgQMHqttramrU2Pjz58+zd+9eqqurCQoKsuvtCgoKuquK0K6mtra2230H3sjT05OamprObka7kd9+0a109+LKxs3NDZPJZDdMsC3nEfXs2ZPRo0dz7NgxdZttbayOirfWarVMmjSJiIgIPvvsMy5fvsyqVatYuHAh/fv3Jysri6SkJLKyshye6+fnx6xZsxgyZAjHjh1TY92B24p172xJSUns2LHDYfuUKVOYOHFiuxZXVqtVTQ683d6ruro6qqqqcHZ2xtfXt41b6GjEiBHs2bNHnX+Xn5/vMBxs5syZZGVlkZqayujRo5sdLiZEa91OFP3169dvGkV/YzHW3eaN2nh4eNCvXz+7pFeDwUBBQQF5eXnk5uZy9OhRSktLCQgIsOvpCg4O7rbHLURrSYEluo27pbgCcHd3p7Ky0i5JsK1PXG1rY9mCFCwWC5s3b+aJJ57o0DH0kZGRPPPMM2qU+1/+8hf1CvKNJ/vh4eEkJCQwaNAg9b6bxbqnpKQwa9Ysu7jirkRRFL766isOHjxot12j0TB37lzi4uLavQ0ZGRmUlpbi7+/PsGHDbmsftvlXgYGBHfK74+HhwaBBgzh16hTQ0Is1b948u8cEBgYyduxYkpKS2LZtG48//rjMDREdorVR9OXl5RQVFZGZmakWYDqd7qZrgbm6unab32dXV1ciIiKIiIhQt5lMJgoLC9XeroyMDAoLC/H29raLjQ8JCWly7UQhujspsES3cDcVV9B+Ue2Nubq6Mnv2bD799FN12+XLlzlx4gQjR45s89e7lbCwMA4fPsyFCxdQFAUvLy8GDx6Mu7s7gwYNIiEhwS7tqrHmYt1LSkr44IMP6N+/PzNnzmzzdaPuhNVqZcuWLaSmptpt1+l0LF68uNUx6bfbhjudewUdN/+qsdGjR6sFVkZGBtOnT3e4+j1p0iTS09PJzc3lzJkztxXeIURba00UfeNhiDk5Oert5qLobbe7ehS9s7MzPXv2tFuT0Gq1UlxcrM7runDhAvn5+bi6ujrM6/Ly8urSxyfErUiBJbq8u624AvskwbaMar/RoEGD1LkqNra1sToiGKCwsFCdX2U2m+nRowcjRozgzJkz1NbWUlhYyNNPP8348eNbtL+bxbpnZWURHx/Pfffd1+nDUMxmM5999hlnz561267X6/nGN75Bnz59OqQdJ0+eVHuvhg8fftv76YwCq3fv3vTo0YOioiKMRiMnT5506PFzdXVlypQpbNmyhV27dtG/f/9uP/dF3P1uN4r+6tWr6m2TyaT2ojU1DLErFiharZagoCCCgoLUeZWKolBWVqYWXcnJyeTl5QE4xMb7+/t3uWPqLJ6enlRWVnZ2M+5IRUXFXb2WoRRYoku7G4sr6JgeLPjvULQ33ngDk8kENMyn2blzJw8++GC7vKZtkdikpCQuXrzocH+vXr2YP38+165d4+LFi+zatYvS0lJmzZrVopNjW6z78OHD+eqrrzhx4gSKomCxWDh06BDp6elMmzaNmJiYTvkyrq+v5+OPPyY7O9tuu7u7O8uXL2/2hKqttVXvFXROgWULu9i2bRvQMExw1KhRDj/TkSNHkpycTEFBAUlJSdx3330d1kYh2ktro+jLy8sdouhvLMC6YhS9RqPB398ff39/tQdaURSqqqrU2PiTJ0+yc+dODAaD3eLIoaGhBAYGdot5uG3Nto5Ud6UoCnl5ed0+CfFmpMASXdbdWlyBfYFVU1PTbgUWNCzIO3nyZHbt2qVuS09PJyYmpk17UsxmMydPniQpKYnCwkKH+3v27KnOr9LpdCiKwvHjx9mxYwcpKSlcuXKFpUuXtvgk3sPDgwULFqgn4bbFiKurq9m4cSPHjx9n9uzZdkNU2ltNTQ1r1651+OLz8fFhxYoV7Z7A19ipU6coKSnBz8/vjnqvoHMKLICYmBh2796NyWRS10a7cRipVqtl9uzZrF69mgMHDhAbG4u3t3eHtlOIjnYnUfTl5eXU1NTYRdE3VYB1VgqgRqPB29sbb29vux6O2tpatejKysri4MGDVFRUEBQUZNfbFRQUdNf3ZHt7e6thK91xQejq6mqAbtn2lpICS3RJd3NxBfYFltFoVNd8ai+2tbEKCgrUbba1se70S7Smpobjx4+TnJysfmjaaDQaBg4cqM6vatz7oNFoGD16NL169WLdunUUFhbyr3/9izlz5hAbG9vi3qewsDCefPJJTp48ya5du9RQj6tXr/L2228TGxvLtGnT2j3Stry8nP/85z+Ulpbabe/RowcrVqzo0JP+tuy9sl0ptyWrdSRXV1eGDh3KiRMngIZerKbm6UVGRjJ48GDOnDnD7t27WbRoUYe2U4iu5naj6DMyMqioqLCLom9qLbDOiKJ3d3enT58+dhcGjUYj+fn55Ofnc+3aNY4fP65eWGo8vDAkJOSuOofQaDSEhoZy/fp1BgwY0NnNabXr168TEhJyVw/5lAJLdDl3e3EF9nOw2nOIoI1Op2PevHm899576tpYJSUlHDhwgPvvv/+29llUVMSRI0dIT093WDhXr9czYsQIxo4di7+//033ExISwsqVK9m6dau6VlR2djZz585t8VwqjUbD8OHDGThwIAcOHODw4cNqrHtaWhpnz55t11j3wsJC1qxZoxZ3NuHh4Tz66KMdnpJ1+vRpiouL26T3qnGCYGcMKRo9erRaYJ0+fZqZM2c2+X5Onz6dCxcukJGRoRbuQoimtVUUfXPFV0dF0ev1enr37m2X5mg2mykqKlLndZ0+fZqCggI8PT3terpCQ0O79SLl0dHRnDp1qlsWWKdOneqy6b9tRQos0aXcC8UVdNwcrMZ69epFXFwcycnJ6raDBw8ybNiwFg9dUxSF7OxskpKS7IIzbLy9vRk7diwjR45Uj7El9Ho9Dz74IFFRUWzdupWMjAyuXbvG0qVLW7W+kV6vZ+rUqYwYMaLDYt2vXLnCBx98gMFgsNver18/HnroIfR6fZu9VktYrVb27dsHwMSJE++4oOys4YE2YWFhhIWFcf36dcxmM+np6SQkJDg8zs/Pj3HjxrF//362b9/O008/fVdfHRWiPbUkir66utquAGscRV9eXo6Tk9NN1wJrryh6JycntYCysVqtlJSUqLHxhw4dIi8vD2dnZ7uertDQUHx8fLrFZ0dMTAx79+6lpqamWxWKNTU1ZGZmMmfOnM5uSruSAkt0GRUVFfdEcQWOQwQ7osACmDp1KmfPnlWH8lksFrZs2cJjjz120y8Us9nMqVOnSEpKshtmaBMWFkZCQgKDBw++oxP62NhYevbsyfr16ykoKOCdd95hxowZjB49ulVfeLZY96ysLLZt29Zuse6ZmZl8+umnaoCIzbBhw1i4cGGnTL629V75+vqqSV13wlZgdeT8sRvFxcWxadMmoGGYYHx8fJO/DxMmTODEiRNcu3aN9PR0YmNjO7ilQtwbNBoNXl5eeHl5tSiKvry8nLKyMrKzs9XbiqI0W3y1dRS9VqtV56zZevUVRaGiokLt6UpNTSU/Px+z2ewQG+/v798lQkEac3NzY+DAgaSlpbU4ifdWLFaFKqOZWrMFRVEaEi+ddHjpndBp2+ZnkZaWxqBBg1p1EbY7kgJLdAkVFRWsXr36niiuwL7Aslgs7T4Hy8bV1ZVZs2axfv16dVtOTk6zJ6O1tbUcP36cY8eONTm/asCAASQkJNC7d+82+yLs0aMHTz/9NDt27OD48eN8+eWXZGdn88ADD7T6d6Jv37585zvfITk5mT179rRprHtGRgYbN27EarXabR8zZgyzZ8/ulCugjedetUXvFXR+DxbA0KFD1RSxkpISsrOzmwxo0ev1TJs2jQ0bNvDVV18xaNCgTo/sF+Je1Joo+sbzwJqKom9uLbA7jaLXaDTq/gYNGqRur66uVnu6zp49S2JiIjU1NQQHB9sNL+zRo0enJxjGxcWxfv16xowZc9tz4qyKQmFNPdnltVQYzWgBpdH9GsAK+OidiPJ1J8jDBe1tvu8mk4nk5GSWLl16W8/vTqTAEp3uXiuu4L8FlkajwcnJiYqKig577SFDhpCWlmYXob5z50769++vzm0pLi5W51fd2Dvj7Oyszq9qr4V9nZ2dmTdvHlFRUWzatImzZ8+Sl5fHkiVLmrxaejM6nY74+HiGDRvWZrHuR44cYfv27Q7b77//fu67775OG15y5swZioqK8PHxabPem65QYOn1emJiYjh69CjQ0IvVXALm8OHDSU5O5urVqxw8eJCpU6d2ZFOFEC3Ukij6G9cCu1UUfeNi7Haj6D09PYmOjrYbSm4wGNQEQ9sw+bKyMgIDA+16u4KDgzt0WHh4eDjh4eEkJiYyc+bMVj+/pM5IRmElRkvDhUJnjabJ7y9FUag0mkkvrESv0zI8yJsAt9YfZ2JiIr169erQdN/OIgWW6FT3YnEF2HWNOzs7U1VVpXbHt7fGa2PZwilqa2vZsWMHsbGxJCUlceHCBYfneXl5MXbsWEaNGtVhXftDhgwhLCyMdevWcf36dd577z2mTp3KuHHjWv1e3SrWPTk5mTlz5tz0g19RFBITEzlw4IDddo1Gw5w5cxg9enTrD7KNKIrS5r1XJpOJsrIytFptuxXTLTVq1Ci1wDp37lyz8cQajYZZs2bxzjvvcPjwYUaOHNnh6YdCiDun1+vVhYmbcmMUfXl5OVlZWe0SRe/q6kpkZCSRkZHqNpPJREFBgdrblZaWRlFREb6+vg6LJLfnd+acOXN48803GTRoUJPz5ZqiKApni6u5UlWHBnC+RSGq0Whw/vo712SxcjyvnF7ebgwK8Gzxd/Hly5c5deoU3/nOd1r0+O5OCizRae7V4goaiionJyfMZjN6vZ66ujpMJlOHXfny8/Nj8uTJ7N69G6vVSmFhIcePH6dfv374+vraPTY0NJSEhASGDBnSKcMh/Pz8eOqpp9i9ezdJSUns2rWLnJwcFi5ceFsTe5uLdb927dpNY92tVitbt24lJSXFbrtOp2PRokXqIpmd5cyZMxQWFuLj48OIESPaZJ8lJSUoikJAQECnD4UJCgoiIiKC3NxcrFYrJ06caHZR4fDwcGJiYkhPT2fnzp08/PDDHdxaIUR7a00Uva0nrKVR9LbbNxt25+zsrPYgNX7NoqIitbfr3LlzFBQU4Obm5hCm0VZrQLm7uzNnzhy++OILVq5cecvzCEVRSC+sJL+mvtkeq5tx0mpRFIUrlbUYLVZigrxvuQ+j0cgXX3zBnDlzOjxVt7NIgSU6xb1cXNm4ublRVVVllyTYkUMLYmNj+fzzzzl16hRGoxFomJsUFxeHVqtV51dFRER0eqKSTqdj5syZREZGsnHjRjIzM1m1ahWLFy+2u6LYUi2Jdb/vvvuIj49Hp9NhNpv5/PPPOXPmjN1+9Ho9Dz/8MH379m2Lw7xtiqK0aXKgTVcYHthYXFwcubm5AKSkpDBhwoRmhwDZAl3Onj1LdnY2UVFRHdlUIUQna2kUfeNhiNevX+fMmTNUVFQ0G0Xf+PaNczx1Op067NE2TFtRFEpLS9UwjaNHj5KXl4dWq3WIjff19b2t79tBgwZx8eJFPvjgA5YtW3bTc4kLpdXk1xhw1mhv+7tdo9HghJb8mnrcSqsZENB8sVhfX8+HH35IVFSU3Vy3u51GsS2KI0QHkeKqwT//+U8KCwu5cOECHh4e/Pa3v21VJPntKikp4ciRI6SlpVFcXKyuMQQNSUszZszg6aef7tTUuJupqKjgs88+4/Lly2g0GiZNmnTHi+mWlpayc+dOzp07Z7c9ICCAKVOmcPz4cbKzs+3uc3d3Z9myZV1iLPmZM2f49NNP8fb25vvf//4dLx5tk5iYyP79+7nvvvuYMmVKm+zzTpjNZv76179SU1MDwCOPPHLTNWAOHDjAV199RXBwMCtXruxyKWBCiK6rqSj6G+eE3W4UvaIoVFZWqj1dtj9Go9FheGFL1yBUFIXNmzdTXFzMsmXLmgz4KTeYOHq9DKfb6LlqilVRsCgKY8P88HV17O2rr69n7dq1BAUFMW/evE6/WNuRpAdLdCgprv7L1k3eEVHtiqKQm5urzq+yXVfx8fEhNDSUkpISevbsSVhYGCaTqUt/CPr4+PD444+zd+9eDhw4wN69e8nNzWXRokW3PeTC39+fb3zjG2RlZbF9+3a15yYvL4+f//znODs7069fP/Vn5uPjw4oVK7pEEXpj71VbFVfw30WGu0oPlpOTEyNGjODgwYNAQ9jFzQqshIQEUlJSKCgoIDU1lbi4uI5qqhCim2uLKHqg2YWYfX196d+/v91nWE1NjVp0XbhwgX379lFZWUlwcLBd4RUUFOTwWa/RaJg/fz5bt25lzZo1PPLII3bD6BVF4WRRpfrYtqDVaLB8vd8J4f52+62pqeGjjz4iNDSUOXPmdOnzivYgBZboMFJc2bNNetXr9dTU1LRLgWWxWDhz5gxJSUlcv37d4f7g4GBmz57Nnj171IVybWtjffOb3+yyH4harZYpU6YQGRnJ559/TnZ2NqtWreLBBx+kX79+t73fvn378swzz5CcnMy2bds4ceKE+nNJTk4mPDycuLg4Hn/8cXx8fNrqcO6IbYy/t7d3m829sulqQwShIezi0KFDKIrCxYsXKSsra3YIkJOTEzNmzODTTz8lMTGRIUOG3PVrrwghOsadRNHbbjeOom9cgEVERDB8+HC8vLwwGo3k5+eTn5/PlStXOHbsGKWlpfj7+9v1dIWEhODi4sLcuXNJTEzkzTffZMaMGQwbNgyNRkOZwUStyYJTG3+vO2k01JoslNeb8HPVoygKGRkZ7Ny5k1GjRnH//fd32XOJ9iQFlugQUlw5arwWlm0OVlupq6sjNTWVo0ePUllZ6XB///79SUhIIDIyUv2S+Oyzz9T7s7OzycjIaJOFattTnz59eOaZZ/j888+5dOkSa9euZfz48UyZMuW25yHpdDr69OmDq6srvr6+6s9FURQ1mSo7O7tVse7tpXHv1YQJE9q098pisVBSUoJGo+n0BMHG/Pz86Nu3LxcvXkRRFFJTU28axT5o0CAiIyPJyclh3759zJo1qwNbK4S4l91OFP25c+fU2waDAW9vb7X48vPzIyoqCg8PD0wmE9XV1RQUFHDy5En1Qput4Bo3bhx79+4lIyODuXPnctmoReH2e6/+/uuf8cWa9/jzRxuJjR/P1KgeRPYfyLs7DoCikFtRB3U1bNmyhZqaGpYtW9Zs4QkQGRlJcXGxwxqbdwspsES7k+KqaY0LrLYaIlhaWsrRo0c5ceKEGlxh4+TkRGxsLPHx8Q5D24YOHUpaWhpZWVnqth07dhAdHd3lE388PT1ZsWIFBw8eZM+ePRw6dIjc3FyWLFnikIjYEleuXOHDDz+kvr6eAQMGEBYWRmZmJjqdjqFDh2I0GtVY99mzZ7d6Xa62dP78efLz8/Hy8mLkyJFtuu/S0lKsVit+fn63vYBle4mLi1PXcUtNTWXy5MnNFtS22Pa33nqLY8eOMWrUqC7VIyeEuHfdKoreZDLZJSE2FUXv5eWFj48PAwcORKPRYDQauXjxItXV1ZSWlpKZmcnWrVuZ94P/xcPDA527+9efl3d2gfAXf/8Xnt7egILFZOJSXgU7dnzO+PHjSUhIuOVFztdff93hPOVuIjN+RbuS4qp5jYcI3kkPlqIoXL58mU8++YTXX3+do0eP2n1oeXp6MmXKFF544QXmzZvX5Lwh29pYjXtAamtr2b179221qaNpNBomTpzI448/jre3N1evXmXVqlWcPXu2VfvJzMzkP//5j93PwsvLixUrVvDiiy/aFWzXrl3jnXfeYePGjWrUe0dSFIW9e/cCbd97BV1zeKBN//798fb2BhrG+d/q5xwSEsKoUaOwWq3s2LEDyXYSQnR1RUVFjBkzhoiICGJiYvjhD39ISEgIlZWVPPXUU6SmpvLZZ5/x29/+lszMTPr06YPRaOSRRx7hd7/7HX//+9959dVXCQoKotZQz48fXsDDo4ewcv509uzYRn5BPhvWvMfUqB789rlv8a3Zk3ggph+f/fstoOE75p+/+xULhvfh2QdnUZSf16h1Ci9//9u8+btfU1hYyI8ffZDFI/rz85//3G4u8P/93/8RFhaGXq8nPDyc//f//p+6h+eee47HHnsMaOjJ+/GPf0zPnj3x9fVl6dKl6ndQdyUFlmg3Ulzd3J0OEbRYLJw6dYp33nmH9957j7Nnz9qdOAYHB7Nw4UJ++MMfct99992yJ8rf399hXaHU1FQ1Frs76N27N8888wwDBgzAYDDwySef8OWXX6oLKt9MRkYGH330ESaTyW776NGjWbx4MbGxsTz33HMOMehpaWm8/vrrHDp0SI167wiNe69GjRrV5vvvygWWVqu1O+bjx4/f8jn3338/rq6uXLx4kczMzPZsnhBC3DGtVsuiRYv429/+xs9//nPS09P54Q9/qN6/Z88evvOd7xAaGsorr7yCVqslPj4egKysLB588EFeffVVRowYwaaNGwjt1ZuHnnkWQ001//jljykpLqK8rAyAlEP7GT1tFlZF4e0//JasixfZuXEdn727iqiBQ5g8fyEnDh8AoLyinLy8fKChCPPz8+OpH/2c/3ltFc//+CcAaqBQr169+NWvfsVrr73G8OHDeemllzh06JDDsb7yyiv8+c9/Zv78+fzwhz9k27ZtPPPMM+323nYEGSIo2oUUV7fWuMAym80tHodsMBjU+VUVFRUO90dHRxMfH0+fPn1aPdZ6/PjxnDx50u7K0ZYtW3jmmWc6faHZlnJ3d+cb3/gGR48eZdeuXRw7dozLly+zdOnSZucSHT16lG3btjlsnzx5MpMmTVLfR71ez9SpUxk5ciQ7duxQY92NRiO7du0iNTWVmTNn0r9///Y7QOznXo0fP77Ne6+gaxdYACNGjGDfvn1YrVZycnIoKiq6aVs9PDyYNGkSO3bsYMeOHfTt27fb/E4LIe499fX1bN++naSkJPXi6cmTJ9X7n3zySVauXImTkxNPP/00+/btY8GCBUDD5+Mf//hHAH7yk4ai58Sh/Zw4tF99vtVQh+/XAUETZs1n4pyFpB86wLkTyeRkXeTInq8AWPj4txk+dhxnUo+zb+sXeHp4EBISDDTMWXZ21jNi3H0U5F/nx3/6Hb1792bz5s0AFBYW8v/+3/+j7OtCznYM48ePtzvWLVu2APDWW2+p23bt2nWnb2GnkgJLtDkprlrGVmBpNBqcnJyaDKNorKysjKNHj5Kamtrk/KqYmBji4+Pv6IRYp9Mxb948/v3vf6vbioqKOHTokEPvVlem0WiIj4+nd+/erFu3jvz8fN566y3mz5/PsGHD1McpisKePXvYv3+/w/Nnz57NmDFjmty/n59fk7HuJSUlfPjhh0RHRzNz5sx2i3G/cOECeXl5eHp6tkvvFXT9Asvb25sBAwaowwOPHz/O7Nmzb/qcMWPGkJKSQnFxMceOHSMhIaEjmiqEEK3297//ncOHD/P8888ze/ZsnnzySbvh6Laiy/a3wWCgoKAAaLjQmJiYSFVVFadPnwZg+Q9/SlhEw4LrVosFrYsrNV9f2PX08cHJ2QmdU8NFJzc3V3UdLW9vbwICAnByapiL6+TkjEZjPwCurraGXz+9nOqqSg4dOkRISAg1NTW88MIL9OzZk3//+98cO3aM3//+92pi8Y2cnJzYsmWLeuHLarXe4TvYuWSIoGhTUly1XOO4aGdn5yZ7oxRF4cqVK3z66af8/e9/58iRI3bFlYeHB/fffz/PP/888+fPb5OT4YiICIfAhP3791NaWnrH++5oYWFhrFy5Ug2n+Oyzz/jiiy8wGo1YrVa2bt3qUFzpdDoWL17cbHHVmC3WfdasWXa/55mZmbz55pvs3LmT+vr6Nj2mG5MD2yOAwmq1UlJSAtAl1vpqTuN1rdLT0x2Gd95Ip9Mxc+ZMAPbu3asuWCyEEF1VcXEx27dv5+rVq1gsFi5cuADAP//5T1asWMGvfvUroGHdRtu8adv3jqurq/pddnjnl5QWFnDp7Gk++Pur+PoHoGm0gLGTTqeOhvDx9WX0ffcDsPb1P7PxP++S9NX2Ztv4xx89y8XTJ1myZAmnTp3i448/RlEUNBoN9fX15Ofnq71UTZk3bx5ms5n333+fy5cvs337drverO5IerBEm5HiqnVuLLAa92BZrVbOnj1LUlISV69edXhujx49SEhIYPjw4e0yPGz69OmcP39ePQE1m81s2bKFFStWdHo0eWu5urqyePFioqKi1LWtcnNzcXNz49q1a3aP1ev1PPzww/Tt27fF+9fpdMTHxzNs2DASExNJTU1FURQsFguHDx8mIyODadOmtVmse2ZmJtevX2/X3quysjLMZjM+Pj7qVcyuqE+fPvj7+1NaWorBYODUqVO3XAssOjqa6OhoMjMzSUxMZP78+R3UWiGE+C/b1IDKykqqqqrUP7bbHh4e9OzZk48//phRo0YRHh5OaWmpeiFp4sSJ6oiWP/7xj/zwhz/kyJEjQEP6YOMLsg888ADHTqTz/l9ewcfPnyGjxoKi4Pr157tO54SLi+t/h00rChNmzmHxU8+w/dMPMRmNDBuTwNHEpoftXTiZDsB7773He++99/UuFP7v//6P3/zmN/zhD39gyZIlZGRkqM9RFEV9vf/5n/9RFybeuHEjUVFRrFy5so3f8Y6lUSROSbSBxsVVWFgY3/zmN6W4ugWj0cjvf/97AE6dOkVYWBj/93//x4kTJzhy5EiTPVp9+/YlISGBvn37tnuhk56ezoYNG+y2LV682G6IXXdTUFDAhx9+yP79+6moqKBfv36Ehoai0Whwc3Nj2bJldxy7npeXx7Zt27h8+bLd9p49e95xrLuiKLz99ttcv36dmTNnttsQt/Pnz/PRRx/Rr18/li9f3i6v0VYOHz7Mzp07gYb3+Fvf+tYtn1NcXMw///lPFEVh5cqVza5RI4QQraUoCrW1tc0WTrZ/19fX4+npiZeXl/rH29vb4d83XuRavXo1TzzxBL/73e948MEHuXz5MpcvXyY/P99hWJ2/vz+9e/cmODiYEvTUunpjNRlxd/fAxdWFyooK6uuNoAEvT088vbxQrAqFhQVffy627DzDZLHSx8+daH/PWz7WYrHw5ZdfsmTJEgYPHsyJEyda/N52J9KDJe6YFFe3x9nZGZ1OpybPZWZm8uqrrzok3ul0OoYPH05CQkKza2W0h+HDh5OWlkZ2dra6bfv27fTr18+u96078fT0xMXFBRcXF6xWKxcuXKC8vJy4uDieeOKJNhliGRoayhNPPMGpU6fYtWuX2jNpi3WPiYlh2rRpeHl5tXrfFy9e5Pr163h4eNgNj2trXX3+VWOxsbEkJiZiNpu5du0a169fv+niltAw7HHs2LEkJSWxbds2Hn/88W7XMyuE6HhGo/GWhVN1dTUuLi4OhVJYWJjdbQ8PjxZ/7iiKQnFxMZcvXyYlJQWAffv22Q2L1mq1hIWF0bt3b3r37k2vXr0oLy8nJSWFffv20W/gYNwHheCm98FoNFJSUoLVYkWr0+Ln64uLS8N5W73RgF6vp6XFlaIooIFw75adF1RVVbFgwQIiIyN59dVXW/Sc7kgKLHFHpLi6fbaxyefPn1cXTa2pqVGvVrm7uzN69GhGjx6Np+etrwq1R/vmzZvHm2++qRZ9NTU17N69u1sOq6qoqGDNmjUUFxczcOBA/Pz8uHDhAtXV1SiKcsv5O62h0WgYNmwYAwYM4ODBgxw+fFh9D9PT0zl79iyTJk1i7NixLR7i2Xjdq/Hjx7fr4r+2Aqsrz7+ycXd3Z/DgwerQk+PHj6tJWjczadIk0tPTyc3N5cyZMwwZMqS9myqE6KIsFgvV1dU3LZyqqqqwWCxqgWT729fXl169etn1Pt3p0H2z2UxeXp7aO3XlyhVqa2sBCAgI4MUXX0Sv19OrVy+1oOrZsyd6vZ66ujoyMjJYs2YNFouFUaNGMX36dDw8PEjLLye7uJyary/8ubi44Ofni1b730TV+nojen3Lh4abFYUQDxfcnFqWyurr63tPrEUoBZa4bVJc3R6r1cq5c+dISkoiNTWV2tpatFotJpMJk8lEz5491flV7XkS3RIBAQFMnDiRPXv2qNtSUlKIiYmhd+/endiy1ikqKmLNmjV289yCg4Pp378/Op2O0tJS3n33XaZPn87YsWPbrDdDr9czZcoURowY0WSse0pKCrNmzWpRrHtWVhbXrl1r994r6F49WNAQdmErsE6ePMmMGTNu+Vnk6urKlClT2LJlC7t27aJ///6d/v9NCNG2FEWhrq6uyaKp8e3a2lo8PDwchulFRkba3XZ1dW2X3u66ujquXLmiFlPXrl1zGM3i5eWlFlO2YX/ar0MqbIFYKSkpnD9/nujoaObMmUNERITa3vLyclK2b8R14Ci0Tjo83d2/vnhrfzxGYz2+vr4tarfFquCk1TAosPUjMu52UmCJ2yLFVevV19dz4sQJjh49qq4JYTuh032d3jN37lwmT57cpYYr2dbGKi4uVrdt2bKFlStXdot1hK5evcoHH3zgsJBznz59ePjhh9HpdOzcuZNjx46xfft2srOzWbhwYZsOg7TFul+6dIlt27apBUxpaWmLYt0b916NGzfu6+Eb7UNRlG5XYPXq1Yvg4GAKCgowmUxkZGS0KAVy5MiRJCcnU1BQQFJSUrdaikCIe53JZLpl4VRVVYWTk5ND4RQUFES/fv3U256enmqx0t4URaGiokLtnbp8+TKFhYUOjwsKClKH+vXu3RtfX1+Hc4O6ujrS09NJSUnBarUyatQoZs6cibu7u93jzpw5w6ZNmzAYDAThRMS4KeidnbmxuLJaLVgslhZdbLIoClYURgT5otdJKPmNpMASrSbFVetUVFRw9OhRUlJSHCK79Xo9ISEheHl5UVRUREhISJcqrqBhbYp58+axevVqdVthYSGHDx9m4sSJndewFrh48SKffPKJw/C/IUOG8OCDD6rDOObMmUNUVBRffPEF58+fZ9WqVSxevLjNe+n69OnDM888w/Hjx9mzZ4+6HkhmZiZZWVnEx8czadIkh0nNWVlZXL16VR022p4qKiowmUx4enp2m7l2Go2GuLg4tm7dCjQMExw9evQt/y9ptVpmz57N6tWrOXDgALGxsXh7e3dEk4UQzbBardTU1NyycDIajQ6FU+O5TrY/7XlBqqXHU1BQoPZOXb582WHdS51OR8+ePe3mTzX3+asoijoX68KFC0RHRzN37ly73iobs9nMjh07SE5OBmDgwIE88MADlJs1ZBRVoigKTo0Ky3qjsUXzr8xWKwowPMibQPfOfX+7KimwRKtIcdVy165dIykpiTNnzjgk+7i7uxMXF8fAgQM5d+4c1dXVXL9+3aGXpauIjIwkNjaWtLQ0ddu+ffsYOnQofl+vBN/VnDx5kg0bNji893FxccyZM8fhauWgQYMIDQ1l/fr1XL16ldWrV3P//fczYcKENi16dTodY8eOZejQoXax7larlcOHD5Oens60adOIjY1Fo9HYrXvV3r1X0P2GB9oMHz6cXbt2YTQaKSws5MqVKy0qkCMjIxk8eDBnzpxh9+7dLFq0qANaK8S9R1EU6uvrb1k41dTU4Orq6lA49erVyy44ws3NrctdkISGIeDXrl1Te6euXr3qcHHVzc3NrncqLCzslvO2amtrycjIICUlBUVRGDVqFLNmzXLorbIpLi5m3bp1FBQUoNPpmDFjBmPGjGlIzQVcnLSkF1ZSb7HipNGg1Wgw1tejv8nSHFZFwaIo6HVaYoK88XeT4qo5UmCJFpPi6tasVivnz58nKSnJIaYbGkID4uPjiYmJwdnZWY2X1uv1GI3GLltgAcyYMYMLFy6oE23NZjNbt25l2bJlXe5L7tixY2zbts1hIu2kSZNuOgTT19eXJ554gsTERA4dOsRXX31FTk4ODz74YJsHjXh4eDB//nzi4uLsYt1ramr44osvOH78OLNnz6a+vp4rV67g7u7eomFvd6q7FlguLi4MGzZMTdhKTk5ucQ/k9OnTuXDhAhkZGYwePZpevXq1Z1OFuOvcak0n2781Go1DFHlAQACRkZHqbU9Pz24x/NymurrarncqLy/P4cKen5+f3fypwMDAFn1v3thb1b9/f+bNm0fv3r1v+vy0tDS2bt2KyWTC39+fpUuXEhoaavcYfzc9E3v5k1law5WqOixWBaPFirebi7pIsKIoKDQMBwTQaKC3txvR/h52PV/CkRRYokWkuLo5o9Gorl9lm1/VWFRUFAkJCURHR9t9KNqGADg5OWE2m9XipStyd3dnxowZbNy4Ud128eJFTp8+zdChQzuvYY3Y5irZenwamz17NmPHjr3lPnQ6HdOnTycyMpINGzaQlZXFqlWrWLRoEX369GnzNtti3U+fPs3OnTvtYt3ffvttSkpK8PLyYtq0aR0y1KW7FljQ0DtpK7DOnDnDrFmz8PDwuOXz/Pz8GDduHPv372f79u08/fTTXe6igRCd4XbXdLIVUUFBQTdd06m7URSFkpISu/lTpaWldo/RaDSEhYXZJfy1dlmO2tpadW6VRqO5ZW+VTX19PV9++SXp6Q0L/w4bNox58+Y1+747abUMCvQi2t+D7OJyDmbmE+jvh8mioNEoKArodRr8XPUEeegJ9XSVwqqFpMAStyTFVfMqKyvV+VW2+TQ2Op2OoUOHkpCQ0OxCprYCS6vVotPpmlxcuCuJiYkhLS2NnJwcdZttbazO/p2wWq1s27ZNHWtuo9VqefDBB1u9QHJ0dDTPPPMMn3/+OTk5OaxZs4aJEycyefLkNp8MrdFoGDp0KP3797eLdS8vL+fUqVO4uroyffp0zGbzHcf/3kp3LrBCQ0MJDw/n6tWrWCwW0tLSGD9+fIueO2HCBE6cOMG1a9fIyMggJiamnVsrROe6cU2npoqo9ljTqTuxWCx2cemXL192uBCq1+sJDw+3i0u/nUJSURRyc3NJSUkhMzOTAQMGsGDBAnr16tWi9zY/P59169ZRUlKCs7Mzc+bMUYea34qTVkt94XVcS68xNfI+LFYFq6Kg1WjQae++n2tHkAJL3JQUV027fv06SUlJnD592mEogJubG3FxcYwZM+aWV60aT2J1dnamvLy8PZrbZhqvjWVbILm6uprdu3czb968TmuX2Wxmw4YNnD592m67s7MzDz/8MP369but/Xp7e/PNb36T/fv3s2/fPvbv309ubi6LFy9ulzCEG2PdP/roI6ChcNi/fz+nTp1i5syZ9O/fv11OZmyLWUL3LLCgoRfr6tWrQEPYxbhx41r0Xun1eqZNm8aGDRvYvXs3AwcO7PZX28W96cY1nZrrfeqoNZ26E4PBoA71u3z5cpNx6Z6ennbD/UJCQu7oolttbS1paWmkpKSg1WrVecItDRlSFIXk5GR27NiBxWIhKCiIpUuXtvozPCcnh8jISAB0Wg26Fi40LJp27/yvEa0mxZU9q9XKhQsXSEpKIjc31+H+gIAAdX5VS4dyNf4A1ev1Xb7AgoZ5ZBMnTlRjw6HhRDYmJqZT5q4YjUY+/vhjLl26ZLfdzc2NZcuWER4efkf712q1TJ48mYiICD7//HNyc3NZtWoVCxcubNH6VbfDz8+PsWPHqr9rPXv2BBpi3T/66CP69evHrFmz2nwh4OrqagwGA+7u7rccitJVDRkyhO3bt2MwGCgrKyMrK6vFBfbw4cNJTk7m6tWrHDx4kKlTp7Zza4Voue6yplN30lRc+o1zd3v06GFXUDUVl95aiqKQk5NDSkoKFy9eZMCAATzwwAMt7q2yqaurY9OmTZw9exZouMA0c+bM21rTLycnp93XWLyXSIElmiTF1X8ZjUbS0tI4cuSIw1hraEghS0hIuK1ehRt7sG6Mbu2qJkyYwMmTJykpKVG3bdmyhW9/+9sdOjm5traWDz74gGvXrtlt9/b2ZsWKFW3aCxMVFcUzzzzDhg0buHjxIh9++CEJCQlMmzatXY557969+Pn58eCDD+Lm5mYX637x4kX++c9/MnbsWCZNmtRm/zdtwwNbOgG7K3J2diY2NpYjR44ADcV/SwssjUbDrFmzeOedd0hKSmLkyJFdNiVT3F2665pO3YnVaqWwsNCuoGoqLj0sLMwuLr0tLzbV1NSQlpZGamoqOp2OUaNGMXfu3NtaEuPKlSusX7+eiooKXFxcWLBgAUOGDLmtdtnSG4ODg2/r+cKRFFjCgRRXDSorKzl27BgpKSkO6X5arVadX3VjMk9rdNcCy7Y21vvvv69uKygo4MiRIy2e83KnKioqWLNmjd0CyNDQk7hixYoWr0TfGh4eHixbtozDhw/z1VdfqWmRS5YsadMT8ZycHHJzc3FzcyM+Ph5XV1eGDh3Knj171Iheq9VKUlISGRkZdrHud6I7z79qLC4uTi2wLly4QGVlZYuHdIaHhxMTE0N6ejo7d+7k4Ycfbs+mirtcc2s63VhENbWmk7e3d5db06k7MZlMdnHpV65ccYhLd3V1teudaklcemvd2Fs1cOBAFi5cSHh4+G19ZiuKwqFDh0hMTMRqtdKzZ887/g7Kyclpch0tcfukwBJ2biyuVqxYcc8VV3l5eSQlJXHq1CmH+VWurq7q/Kq2mINz4xDB6urqO95nR4mKilJPRG327t3L4MGD2/2qf1FREWvWrHEoSMPCwli2bFmLkuNul0ajYfz48URERLB+/XquXbvGqlWr7ujq4Y1swy9txRU0FHfz5s1j1KhRTca6JycnM3v27Dsapnm3FFiBgYFERUWRnZ2N1WolNTWVyZMnt/j5U6dO5ezZs5w9e5bs7GyioqLar7GiW2puTacbC6em1nTy9vZWk+W6+ppO3UlNTY1d71RTcem+vr52BVWPHj3a7X239ValpKTg5OREXFwc8+bNu6NzqurqajXdFhrWRpw6deodj6LIycmRz7k2JgWWUDVVXN1Ot3V3pCiKOr+qcUKejb+/P/Hx8cTGxrbpFUS9Xo9Wq8VqteLs7ExdXV2HJMW1FdvaWLYePpPJxJdffsmjjz7abl9aV69e5YMPPnDoVezTpw8PP/xwhwUThIeHs3LlSnX8+7p168jOzr7t8e82OTk55OTk4Orq2mSsfHOx7tevX+fdd98lJiaGadOmtToWGO6eAgsaerGys7MBSElJ4b777mvxsClvb28mTJhAYmIi27dvZ+XKlTLk6h7S1JpOTQ3ba2pNJ1tx313XdOouFEWhtLTUrqBqPGQdGi6GhYaG2g33a49wohvblZ2dTUpKCllZWQwaNIhFixbRs2fPO/5OvHTpEp9//jnV1dW4u7vz4IMPEh0d3Sbtzs7O7pB1Fu8l3eMsTrS7e7W4MplM6vyqGz+cASIiItT5Ve1xgqXRaHBzc6OmpkYdIlhXV3dbJ8edwcPDgxkzZvDFF1+o2zIzMzlz5kyb9eY0lpWVxSeffILRaLTbPnjwYBYtWtThhambmxsPPfSQmuB0/Phxrly5wtKlS287gMK2hlfj3qsbNY51P3ToEIcOHVKTrtLT0zl79iz33Xcf8fHxrXpP7qYCa+DAgXh6eqppaufPn2fQoEEtfn5CQgKpqakUFBSQmpoqk7/vAk2t6dRU4dTcmk7BwcF31ZpO3UnjuHRbyl9NTY3dY5ydne3i0sPDwzvsZ1RdXa3OrXJ2diYuLo758+e3yQggq9XK3r17OXDgAIqiEBkZyaJFi9qsWKysrMRgMBAUFNQm+xMNpMAS92RxVVVVxbFjxzh+/HiT86uGDBlCfHy8mt7WnhoXWEajsVsVWACxsbGkpaXZJStu27aNvn37tunw0lOnTrFhwwY1Ht7GFmnbWT0MGo2GMWPG0KtXL9atW0dBQQFvvfUWc+fOJTY2tlX7ys3NJTs7G1dXV+Lj42/5eL1ez/33309sbCw7d+5Uk6SMRiO7d+8mNTW1xbHuNTU11NbWqmvedHc6nY4RI0Zw4MABoCHsojUFlrOzMzNmzODTTz8lMTGRIUOG3PWfi93Zna7p1Lgn6m5d06k7MRgMXL161S4u3WQy2T2mcVx6r169CAkJ6dDeQkVRuHTpEikpKVy6dKlNe6tsKv4/e2cd3taZpv1bbGaGmDlGOY4dBicOk50Ugm3TpjDtzA7u7H4Ls7Pb3enuzLTTKXemSdu0aeNAQ7YTh8FOHDmGmGJmywxiOOf7Q9Gpj2UmSbZ+V3PVOjo6ekXnvPf7PM/99PXh1KlTaGhoAIPBwKpVqyYUjR8PpvqrmcEksOY5801ctbW1ITc3F8XFxToTdTMzM/D5fCQkJMDW1nbWxqR9v7lcLpRKpY7gM3S0vbE+/vhjWm+sa9euYdOmTdPyHHl5ebh06ZKOfe6KFSuwevVqg7gwuLu748iRI7h48SKKiopw9uxZ1NbWYvPmzeNOKx1P9Go47O3t8cwzz6CmpgaZmZlob28HMDFb98HRK0N4P6cDPp+PO3fugCRJVFdXo7u7Gw4ODuN+fFhYGHx9fVFXV4ebN29iw4YNMzhaE8MxWk+nwX8TBKEjnLT1NoNNIowl/Xq+obVL10anhEKhzvneycmJVj9lb2+vl3OVSCTCo0ePkJ+fDx6PBz6fj23btk17vXpFRQXOnj1LLbqmpqZSfaqmE1P91cxgOtPMY+aLuCJJElVVVcjJydHplQRoJqfa+ip9pHxo33MOh2OUAgvQTMqXLl2KW7duUdvy8vIQFRU1pT5UJEni5s2btJ5bWjZu3DhsjZI+4fF42LlzJ/z8/HDp0iUUFhaiubkZaWlpcHNzG/WxDQ0NqKmpAY/Hm/Tr8vf3x6uvvoq8vLwJ27rPpfRALXZ2dggKCsKTJ08AaGqx1q1bN+7Ha23bP/nkEzx48AB8Pn9OvT/6ZLSeToP/Hqmnk7bOydTTyfggCAIdHR20+qm+vj7aPlq7dG9vbypCNZPmRWMxNFoVHh6OtLQ0eHh4TPv3TqVS4cqVK7h//z4AICgoCDt27Jix119bWzuujAkTE8MksOYp80FcKZVKFBUVIScnR8fKGwAWLFiApKQkhISE6LWAfajAkkgkehvLVFi+fDkeP35M9QojSZLqjTWZ95ckSWRkZODBgwe07UwmEzt27EBUVNS0jHu6YTAYiI2NhZeXF06ePIn29nZ8/vnnSElJQXx8/IgX48HOgVP5LTKZTCxevBiRkZG4du3asLbua9euRWxsLG0s2t/IXBMQ8fHxlMB69OgRVq9ePaEohpubG/h8Ph4+fIisrCzs3bvXNJEfg5F6Og1N2xuup5Orq6upp9McQ6lUoqWlhWaXrl380WJmZkaJKa1d+lTMgqaLgYEBqraKx+MhPj4e27dvn7HF2K6uLqSnp6O1tRUsFgvJyclITEycsXNOX18fFArFnDvvGwImgTUPmeviSiQSIS8vD3l5eTpihclkIjw8HImJiVOKrEwn2iaGTCYTLBZLZyXPWOBwONiyZQu+/PJLaps2JXPJkiUTOpZarcaZM2fw+PFjnefYs2fPtDknzSTOzs54+eWXkZmZCYFAgIsXL6K2tnbYVJLGxkYqejVdK4kWFhbYsmUL4uPjkZGRQdXIicVinDt3Dg8fPqTZus/FCBYABAYGws7ODr29vZBIJCgtLZ2wOF+9ejUeP36MqqoqVFZWIjg4eIZGa9hMV08nrbueqafT3EQsFlOpflq79KEp+XZ2djRB5eLiYjALF9qUYoFAgNraWkRERGD37t1wd3ef0TEWFxfj/PnzUCgUsLe3R1pa2ozXgdfV1cHX19dg3vu5hElgzTPmsrjSNrotKirSOZlr86QTEhJmpAHtVBjabLinp0ePo5ka/v7+iIqKQlFREbXt+vXrCA8PH/f7rlAo8N1331F9PrSYmZlh7969U+rzNNtwOBxs3boVfn5+OH/+PEpLS9HS0oLdu3fTLpza6NXixYun/ffo5uaGQ4cOjWjrHhUVhXXr1lECa7Luh4YKk8kEn8/H1atXAWjMLiYqsCwtLbFy5UpkZWUhKysLAQEBc8p6e7p6Omm3m3o6zR9IkkRPTw8t3W9oxgiDwYCbmxvNkGI265zHy8DAAFVbZW5ujvj4eOzYsWPGSwcUCgUyMjLw6NEjAEBERMS0ORCORW1t7YzUdZkwCax5xVwUV9qVppycHJ0JOaBZJUtMTERsbKzBWuoOFVi9vb36G8w0kJKSgsrKSp3eWM8999yYky6JRIJvvvkGTU1NtO3W1tbYv3+/0drILly4EB4eHkhPT6eETXJyMpKSktDc3Izq6uppjV4NZTRb96KiIhQXF6O+vh6+vr4GtwAxHcTGxuL69esgCIIqoHd1dZ3QMRISEiAQCNDZ2YkHDx4gKSlphkY7vUxHTyftdlNPJxNqtRptbW00QTWcXbqnpyfNLn02xMJkIAiCilbV1dUhIiICe/bsgYeHx6w8v1AoRHp6Ojo6OsBms7Fx40bExcXN2gJFXV0dli5dOivPNd8wCax5wlwTVyqViqqv0q68D8bb2xtJSUkIDQ01+Pz9wZ8Dl8s12hRBLZaWlkhOTsb58+epbU+ePEF5efmoNtl9fX34+uuvdT5PR0dH7N+/3+gn/g4ODnjxxReRnZ2N3NxcXL58GXV1dZQQTUhIoNJFZwqtrXtsbCwuX76M0tJSAEBvby9qamogFovx5MmTcdm6GxNWVlYICwtDSUkJAI3ZxUQdLlksFlJSUnD8+HHcuHEDUVFRei+6H62nk/bv0Xo6DRZUhroAZUK/yOVyml16U1OTjl26paUlzd1vtu3SJ0N/fz8VrbK0tASfz8fOnTtn7XdAkiQEAgEyMzOhUqng7OyMtLS0CS/8TIXe3l6oVKo5l7VgKJgE1jhRKBRob29He3s7enp6oFKpqHz+2NhYsNlsmJmZwdnZGS4uLrC3tzeYif1cEldisZiqrxq6asZgMKj6KmNKIxsawdKmcBkzcXFxKCwsRENDA7Xt0qVL8Pf3H/YC1tnZia+++kpHXLq7u2Pfvn16nchOJ2w2Gxs2bICvry9++OEHPHz4ECUlJYiKiprViIidnR327NmD2tpaZGRkoLW1lbrv22+/RUBAADZs2DCn6rHi4+MpgVVYWIjk5OQJ1wAFBQUhKCgIlZWVuHbtGrZu3ToTQx21p5P2b1NPJxMzQX9/Py06NZxduqOjI01QOTg4GMV3bHC0qr6+HhEREXj22Wfh7u4+q+OQyWQ4f/48dT6KjY3Fxo0bZ70m0VR/NbOYBNYIiMViFBUVob6+HkKhECKRCE5OTnBxcYGDgwPMzc1hY2MDBoMBBwcHqNVqSKVSPHr0CO3t7RCLxXB2doabmxvCwsIQEBCgF8HV19eHY8eOGb24am9vp+qrtKlNWng8HuLi4pCQkAB7e3s9jXDyzEWBNbg3FkEQADT57deuXcPGjRtp+zY3N+P48eM6hiR+fn549tln5+TKemhoKNzd3fHLX/4Scrkc7e3tyMvLw/Lly2f1POHn54dXX30Vf/3rX1FdXU1F0Kqrq/HRRx+NautubPj6+sLJyQmdnZ2Qy+UoLi4Gn8+f8HFSUlJQXV2N/Px8LFq0aEz7/cGYejqZMCRIktSxSx+aos5kMmnpfvq2S58Mg6NVVlZW4PP52LVrl15MVpqbm5Geno6enh5wuVxs3boVkZGRsz4OwFR/NdOYzs6DIAgCVVVVePToEWpraxESEoLIyEgkJyfDwcFhQhMfmUyGjo4OtLS04ObNmzh37hyio6MRGxsLR0fHGXwVP6IVV93d3UYprrR9J3JyclBVVaVzv62tLVVfZcwTwKECa2BgQI+jmT5cXFywdOlS3L59m9r24MEDREVFUQYP1dXV+O6776BQKGiPDQsLQ2pq6pyeQIpEIri7u4MgCLi7u+P69euoq6vDrl27YG1tPWvjYDKZcHBwwOLFi+Hl5YX29vZx2bobGwwGA/Hx8cjMzASgMbuYTK2Dk5MTFi9ejJycHGRkZODQoUMAYOrpZMLgUalUaG5upjX0HWqXzuPxaO5+np6eBmGXPlG08zmBQICGhgYsXLhQL9EqLSRJIicnB9nZ2dQ5Py0tbdbmg8ONp66uDsuXL9fL888H5u7sZQKQJImioiJkZ2fDzs4OsbGxU3aO0fZ08Pb2xuLFi9He3o6CggJ88cUXcHV1xaZNm2b0h2XM4kqlUqG4uBg5OTlob2/Xud/T0xNLlixBWFiYwaRhToW5VoM1mBUrVuDx48eUM6K2N9bLL7+MsrIynD59Wsfxkc/nY/PmzXPisx2Nmzdvgslk4tlnn4W/vz9Onz6N2tpafPzxx9i5cycCAwNnbSwdHR2UBb5SqRyXrbsxEh0djezsbKhUKrS2tqKlpWVMG+ThejopFApqMe7JkyewtLQcs6eTjY0NLC0t5/z32oThIJFIaHbpLS0tOudbGxsb+Pj4UNEpFxcXo/6O9vX1UdEqa2trxMfHIzU1Va8tAcRiMc6ePYvKykoAGrfYdevW6XUBsbe3FwRB6E3gzQcY5NDk2nlGT08PLly4AIlEgi1btsx4zwG1Wo28vDzcunULSUlJWLJkybQXgxqruBKLxXj48CEePHgwbH1VaGgolixZAi8vrzm1ukuSJH7/+9+DIAh0d3ejsbER6enpBl8kPF6qqqrw9ddf07Z5e3ujqalJJ7d/+fLlWLNmzZz6fIejpaUFn376KTgcDn72s5/B0tISIpEIp0+fRk1NDQBg2bJlWL169Yx/DxQKBd5++22wWCz88z//M5hMJkiSRElJCa5cuaIj+KOiopCcnAwbG5sZHddMcfbsWRQUFIAkSYSFhWHFihWj1jsplcphezo1NDQgNzcXLi4u+OlPf2p0aVMm5hZau/TBgmqoYRCDwYCLiwutfsoQ7dInCkEQqKyshEAgQGNjIxYuXAg+nz+h9N2Zoq6uDqdOncLAwADMzc2xY8cOhISE6HtYyM/PR21tLVJTU/U9lDnLvI1gkSSJ3Nxc3L59G0uXLkVSUtKsrNqwWCwkJiYiNDQUFy5cwOPHj7Fz585pOxEYo7jq6OhAbm4uCgsLdeqruFwuYmNjsXjxYjg4OOhphDMLg8GAmZkZJBIJOBwOFAoFpFIprKys9D20aSEwMBALFy7E48ePQZIk6uvrcfv2bSxatIiW2rlhw4YZsyk3NG7evAlA4xyonZhbWVlh3759uHPnDq5fv447d+6gvr4eaWlpMzoJ0vascXJyos6BWlv3kJAQ3LlzR8fWvby8HMuXL0dSUpLBpXGO1dOpoaEB9+7dg1KpxP3799HR0QFHR8cJ93RKTExET08PhEIhBAIBVqxYoYdXa2K+QhCEjl26SCSi7cNms+Hl5WUUdumToa+vD/n5+Xj06BFsbGzA5/ORlpZmEA2sCYLArVu3cPPmTZAkiQULFiA1NdVgBK3W4MLEzGFYV8ZZgiRJZGVlob6+HocPH9bLxN3Ozg579+5FUVERvvrqKzz33HPw8vKa0jGNSVyRJIna2lrk5ORQYfPB2NjYYPHixeDz+XPqgjAS5ubmkEgk4HK5UCqVc0pgARrxVFlZicePH6O5uRmAJrK1cOFCMJlMbN++HdHR0Xoe5ezQ0tKCiooKcDgcLFmyhHYfk8nEihUr4OPjg1OnTqGxsREff/wxtm/fjtDQ0BkZj3aVezjHQA6HM6ytu0KhwNWrV5Gfn48NGzbMmq37WD2dtH+P1tNp6dKl4HK56O7uBpPJxKJFiyYl7JlMJjZu3IijR4/i9u3biImJMdqongnDR2uXro1QNTU16dSuWlhY0KJT7u7ucyYTQsvQaFVkZCT27t07q/bmYzEwMIBTp06hrq4ODAYDK1aswKpVqwwm9VJbf7Vy5Up9D2VOM+8EFkmSyMjIQHNzMw4cOKBXAcJgMBAdHQ0LCwt8++23eOaZZ7BgwYJJHctYxJVKpcLjx4+Rk5MDoVCoc7+HhweSkpIQHh4+5y4Mo6H9rDgcDpRKpY6jnrFjbm4OFotFiStAEznp7e3F66+/juDgYD2ObnbRRq8WLVo0YlqZj48PXn31VZw9exZPnjzBiRMnZixvXyuwRuuFMtTWXVsb2dPTMy227iRJQiwW6zS/nameTsuXL6f6tD18+BCLFy+elED09fVFeHg4SktLkZ2djV27dk3q9ZswMRRttFX7r62tbUS7dK0phaOj45xNr+7t7aVqq+zs7MDn87F7926DM+CorKzEmTNnIJFIYGVlhV27dsHf31/fw6KhrYmeq1lBhsK8ElgkSeLSpUtobW3F/v37DSYyEhQUhF27duHEiRN45pln4OPjM6HHG4O4kkgkVH3V0DQGBoOBkJAQJCUlYcGCBXP2AjEa2s+LyWSCyWSit7d3wt8DQ0WhUOD777+HWCyGjY0NZUOvNQWYK69zPLS2to4YvRqKhYUFnnvuOeTm5iI7Oxv3799HQ0MDdu/ePa0XxtEiWEPR2ro/fPgQ169fp5oka23dExISsGrVKtq5VZuuZ0g9nSIjI3H58mXI5XJ0dnaivr5+0uky69atw5MnT1BUVISEhIQpZyKYmH+QJInOzk6aoNJOgrUwmUx4eHjQ7NLnUpbDcBAEgSdPnkAgEKCpqQlRUVHYt2+fQUWrtKjValy9ehX37t0DAAQEBGDnzp0G+Rlp7dnn41xrNplXAquoqAgNDQ148cUXDa63TkBAAHbt2oVTp07htddeG7dAMnRx1dnZSdVXDe3+zuFwEBsbi8TExHm/kjLUSXDoxdVYkUqlOH78OJqamsBgMBAcHAyBQAAOh4OoqCiwWCxcv34dGzZs0PdQZwVt9Co+Pn5cF14Gg0EtPKSnp6O1tRWffPIJtmzZMm29UyYisADNRC8hIQFhYWHIyMhAbm4u5HI55HI5qqqq8O2338Lf3x8ODg4QiUQG2dOJy+UiKioKeXl5AIC8vLxJCyx7e3skJSXh9u3byMjIwOHDh00TFxOjolKp0NLSQompxsZGarFCi9YuXRud8vT0NIjaotmgt7eXqq2ys7NDfHw89uzZY3DRKi09PT1IT09Hc3MzmEwm1qxZg6VLlxrseaCurg5+fn76HsacZ94IrIGBAVy+fBn79u0zOHGlJTAwEKGhocjMzMTOnTvH3N9QxZU2vzcnJwdPnjzRud/a2pqqrzKE8RoCQ3thzQWB1d/fj6+++ormZGVlZYXw8HCq1w8A3L9/H9HR0XrrTzJbtLW1oby8HGw2G0uXLp3QYz09PXHkyBGcP38eJSUlOHXqFGpra7Fx48YpTTqUSiV6enrAZDIpu16SJCfU08nd3R3V1dWQSCTg8Xjg8Xjo6uqChYUFUlNTERQUZJATjfj4eEpglZWVQSQSTXq1efny5SgoKEBzczOKiormTT2hifEhlUp17NKHGjppDVa0/4zdLn2iqNVqKlrV3NyMqKgo7N+/Hy4uLvoe2qiUlJTg3LlzkMvlsLW1RVpamkG3stDOz9asWaPvocx55oXAIkkS58+fR3x8vMFP4pKTk/HRRx+hoqJiVCtPQxRXarWaqq9qa2vTud/d3R1JSUmIiIiYV/VVo0EQBHp6etDR0YG6ujpIpVK0trbi2LFjKCwsBKARX76+vrCysoKLiwv1T9+f92h0dnbiq6++0rH4dnd3x89+9jMcPXoUvb29AH78fR4+fHhOTygmGr0aipmZGdLS0uDn54fMzEzk5+ejqakJaWlp456EDO3pVFdXh8rKSvB4PBw7doy6j81m01LzxurpRJIkSktLcfnyZeozF4vF+OabbwzW1t3V1RULFixAQ0MDCILAo0ePJt10k8vlIjk5GWfOnEF2djbCwsLmTbTBBB2SJNHb20tL9xuvXbohLkTMND09PcjPz0dBQQHs7e3B5/PxzDPPGGy0SotSqURWVhYePnwIAAgLC8O2bdsM+roMAF1dXWAymbCzs9P3UOY880JgPXnyBL29vXjmmWf0PZQx4XK52L59O06fPo3AwMBhhYihiSupVErVVw0MDNDu06aFJSUlwcfHZ15eQAZDkiQaGhooN72Ojg4olUo0Nzejrq4OgOb9lMlk1GOkUinKysp0jmVjYwNXV1cEBAQgMjLSYPrwtLS04Ouvv9Yx6vD19cVzzz0HHo+HzZs34/jx47TH5OXlYfHixbM93FlBKBSirKxsUtGrwTAYDMTHx8Pb2xsnT55Ee3s7PvvsM2zYsAFBQUFjOuypVCqaaOrs7ASPx0NoaCjWrFkDGxsbWFlZTVgcMBgMREREIDg42Khs3ePj49HQ0AAAEAgEWLp06aRFflRUFB48eIDm5mbcvn0ba9eunc6hmjBQCIKAUCikCaqh10E2mw1PT0+aXbqhT8RnksHRqpaWFkRFReHAgQOTNsmZbTo6OpCeng6hUAgWi4WUlBQsWrTIKOY3Wnt2YxirsWM4V7oZ5P79+1i+fLnRRE18fX1hb2+PiooKhIeH0+4zJHHV1dWF3NxcFBQUDFtfFRMTg8TERFOncGjS5QoLC1FQUICuri6d+wdPOlksFuRy+biO2d/fj8rKSly+fBkhISGIjY1FYGCg3iJBNTU1OHHihI59cFhYGFJTU6nXGRQUhIiICJSUlFD7XL16FWFhYQYX6ZgOtNErPp8Pa2vrcT9O29NpsFDSiiU7OzuUlpaiqqoK2dnZ8Pb2RkJCAhwcHMbd0+natWsQCoXg8/nT0hNlPLbuKSkpCAkJMYgLfHh4ODIzMyGRSNDb24vq6moEBQVN6lgMBgMbN27E559/jpycHMTFxcHe3n6aR2xC3ygUCjQ1NVFiaiS7dG3tlNYu3ZAWFvSFNlr16NEjODo6Gk20SgtJkigoKMClS5egVCrh6OiI3bt3G0RD4/FSV1eHgIAAfQ9jXjDnf/FdXV0QCoUICwvT91AmRHx8PB4+fEgTWP39/XoXV9pGsdr6qqG2sdbW1khISACfz4eFhcWsjs0Q6ejowNWrV1FRUaHzXg1m8AWGyWSOS2ANhiAIlJWVoaysDNbW1khKSsLixYtndVGhtLQUp06dglqtpm2Pi4vDli1bdETfhg0bUFVVRb1WhUKBjIwMo4g0TwShUIjS0lKw2WwsW7aM2q5SqWiiaaR6JyaTSYs6De7ptGrVKtTV1eHGjRtQqVQgCAKrV68edyr0RA0uxstotu4nTpyYsq37dMFmsxETE0M5f+Xl5U1aYAGAl5cXoqOjUVhYiMuXL8+57/J8ZGBggFY/1dbWBoIgaPs4ODjQ0v3msl36RFGr1aioqIBAIEBrayuio6Nx8OBBvf/2J4pcLseFCxdQXFwMAIiOjsbmzZuNKhVYW3+VnJys76HMC+a8wBIIBIiJiTG61aOwsDBkZmaiq6sLjo6O6O/vx9GjR/UmrtRqNUpKSpCTk4PW1lad+93c3JCUlISFCxcaTaRwJlGpVLhz5w5u376tIziGY2gEa+iK6ETQGroUFhZi27Zt8PT0nPSxxsvDhw9x8eJFHRG5bNkyrF27dtjJhrW1NZKTk3Hx4kVqW1lZ2Zj1h4bO0J5OZ86coWxxz507N+09nby8vBAaGoqTJ09CKBTi888/x/r165GQkDDmJK+zsxPA9AssLZOxdZ9t+Hw+JbAqKyvR29s7pfqEtWvXUosdtbW1JrcuI2KwXbpWVHV3d9P2GWqXvmDBAoO04tY33d3dVG2Vo6Mj4uPj8dxzzxndXAzQtNc4efIkuru7weVysXnzZqM0suns7ASbzTbVX80SxvdNnyAlJSXYt2+fvocxYdhsNiIjI1FaWoro6Gi9iSupVAqBQIAHDx5Q/YsGo62vMuX0/khjYyPOnTunU9g8GoMjWCwWa1oaDWsn24sXL8aaNWtmZKWNJEncvn0b165d07kvJSUFSUlJoz6ez+dT7mtaLl26BD8/P4NcGZxoTydtSomZmRlWr14NDw+Pae/pBGiaBB8+fBiXL19GXl4eMjIyUFtbi+3bt494rlCr1ejq6gKDwZjRNF6trfvChQtx/fp1PHz4ECRJgiAI5ObmoqioCGvXrkVsbKxeUlsdHR0REBCA6upqkCSJ/Pz8KTls2djYYNmyZbh27RoyMzNx5MiROW3eYsyoVCq0trbS7NKHnnu5XC4t3W8+2aVPFLVajfLycggEArS1tSE6OhqHDh0atYm5IUOSJB48eIDLly9DrVbD1dUVu3fvNtrXo62/MjE7zGmBJZFIIJPJjPbH4OnpCYFAgEePHqG7uxvu7u6zJq66u7uRm5uLR48e6dRXadNqEhMTjfa9nSnu3buHK1eujJoOOByDV/WYTKbOez5ZSJJEbm4uKisrsW/fvmmtCSFJEpmZmbh//z5tO5PJxPbt28e1wsdkMrF161Z8+umnVNpNX18fbty4gfXr10/bWMdCrVZDJBKNaU2u7ek0OMI0Wk+nkydPIiQkhIrUzCQcDgebN2+Gn58ffvjhB5SXl6O1tXVE2+Du7m4QBAF7e/tZqYGwsLDA5s2bwefzkZmZSZm6SCQSnD9/Hg8fPsTGjRuxYMGCGR/LUOLj41FdXQ0AyM/Px8qVK6cUiU9KSkJ+fj6EQiHy8/MRHx8/XUM1MQWkUimtfqq5uVnHLt3a2poWnXJ1dTUJ5DHo7u6GQCBAQUEBnJ2dwefzERYWZpTRKi1SqZQ6jwLAokWLkJKSYtSvqa6uDsHBwfoexrzBeL8p46C1tRXu7u5GG1mxsrLCuXPnEBUVBXd3dxw4cGBGxZXW4S4nJ2fYmiErKyskJCQgPj7eVF81DLdu3Ro2kjMWHA4H7u7uqKmpoZwAy8vLkZycDFtbW3C5XMjlcnR0dKC9vR1CoZCyOB8vXV1dOHr0KA4ePDgtTZ3VajV++OEHFBUV0baz2Wzs3r17Qil+bm5uSExMpNK0ACA3NxeRkZFTbqsw0Z5OQ63J/fz8aCl8PB5v3OeTjo4OlJaWgsVi0WqvZprw8HC4u7tTjS+/+OKLYRtfzlT91Vi4ubnh4MGDOrbura2t+Pvf/47IyEisW7duVs1OgoODYW1tTUUghzMYmggcDgfr16/H999/j2vXriEiImJeu8bpA5Ik0dfXR3P309YCDkZrl66NUtnZ2RntnGE2GRytEgqFiI6OxgsvvDAnFl0bGhpw6tQp9PX1wczMDNu2bZvS+cAQ0NZfzebC5XxnTgustrY2g+97NRL9/f04d+4c+vr64OjoOKPiSq1Wo7S0FDk5OWhpadG539XVlaqvMubVm5mCJEncvHkTN27cGPdj7OzsEBsbi4iICKogure3F1KpFARBoLq6GjExMSPm9svlctTX16OgoAAVFRXjqvPq6+ujRNZUUsIUCgVOnjyJyspK2nYzMzM8//zzk4pArFq1CiUlJdRkmyAIXLhwAS+99NKIq8fank7DOewNvj3Rnk7Txc2bN0GSJOLi4mbdGdHe3h4vvvgirl69inv37iE7Oxt1dXXYuXMnJeL1JbAAuq373bt3cefOHSqSUFxcTNm6L1myZFbOOSwWC3FxcZTbY15e3pQnVGFhYfD19UVdXR1u3ryJDRs2TMdQTYwAQRBob2+nCaqhae0sFotml+7t7W0SvhOkq6uLqq1ycXEBn89HaGjonJgbEASBO3fu4MaNGyAIAl5eXkhLS5sTNUsdHR3gcrmwtbXV91DmDcb/ixiF7u5uo7LP1KI1tOjp6YGbmxs2bdo0IxcBmUwGgUCA+/fvD1tfFRQUhKSkJPj5+ZlW9Ebh1q1b4xZXERERiI+PH7ZmzdzcHFKpFEwmE0wmE729vSMKLB6Ph+DgYAQHB0MikaCoqAi5ubljRra0360XX3xxUumCUqkU33zzDRobG2nbrayssH//fri6uk74mACowuFvvvkGJElCoVCgrKwM6enp8PPzG1dPJ61Y0tY5aUWVPiyAOzo6UFJSMuvRq8GwWCysX78evr6+OHv2LKqqqvDRRx8hNTUVfn5+ehVYWjgcDlatWoWYmBiarbtSqcS1a9fw6NGjWbN15/P5uHXrFkiSRG1tLTo7O6e0Gs9gMLBhwwZ88sknePDgAfh8vtE5pxkyCoUCzc3NNLv0oe6r5ubmNDHl4eExJ4TAbKNSqahoVXt7O2JiYvDiiy/OqRYsIpEIp0+fRk1NDQBg6dKlWLNmzZwx7TLVX80+c/5MY2y504PdAt3d3eHk5DTt4qqnpwf3799Hfn6+jlsdm81GdHQ0EhMTTZOBcVBdXY3r16+PuZ+9vT22bt0Kf3//EfcZ/DlzOBz09PTAy8trzGNbWFggMTERcXFxuH79OnJzc0etARsYGMDJkydx+PDhCf0++vv78fXXX+uk2Tg4OGD//v1jCrbRejpp/66urkZTUxM4HA64XC6am5uRmpoKV1fXMXs6GRLaiXpsbKzeVwyDg4Px6quv4tSpU6ivr8eXX36JFStWQCgUAtCvwNIy2NY9MzOTGpvW1t3f3x8bN26c0bHa2NggJCSEqrkQCARISUmZ0jHd3NzA5/Px8OFDZGVlGaXhkqEgEolodumtra06dun29va0+iknJyeDPUcYA11dXRAIBCgsLISrqyvi4+MREhIy50RqdXU1Tp8+DbFYDEtLS+zcuROBgYH6Hta0UldXh9DQUH0PY14xt34lRs5QcXXgwAF89dVX03JskiTR1NSEnJwclJWV6UzALS0tqfoqbQqRidGRy+U4d+7cmPstWbIEq1evHjOSMlRgDbUHHgsul4uUlBQsXLgQZ8+eHdXFsKWlBXfv3sXy5cvHdeyuri589dVXOhEyNzc37Nu3D2ZmZujp6ZlSTycbGxts2LABX375Jc3kg8FgYOXKlRN6L/RJZ2cnHj9+DBaLNe73d6axsbHBwYMHcfPmTSriWlxcjJCQEIOqmfDz88ORI0d0bN1rampmxdY9Pj6eElgFBQVYs2bNlCOgq1evxuPHj1FVVYXKysop9dmaL5Akia6uLlq639DzIYPBgIeHB83hbyJNvE0Mj0qlQllZGQQCATo6OhATE4OXXnppWmp3DQ21Wo3r16/jzp07ADTnn127ds2575G2/sqUpjy7mASWgTCcuJqOyJW2AW1OTg6ampp07ndxcUFSUhIiIyPn3KrUTDO4QH8kNm3ahISEhHEdb/DnzeVyJ2xkocXT0xMvvPACvvzyS7S1tY24340bNxASEgIXF5cR9yFJElVVVTh27Bj6+vogl8shl8uhUChgY2MDBwcHfPjhh7SeToPrnSba0wkAkpOTkZGRQd0uKSlBdHS00bgfaaNXMTExeo9eDYbJZGL16tXw8fHBV199he7ubjx+/BgNDQ0GNenXp617QEAA7O3t0dPTA6lUipKSEsTExEzpmJaWlli5ciWysrKQmZkJf3//OZN2NF2o1WqaXXpDQ8OwduleXl40u/TxnE9MjI/Ozk4qWuXm5oZFixYhNDR0zn5Xe3t7cerUKTQ2NoLBYGDVqlVYvny50WU9jYf29naYmZnNei3wfGdOz6g5HA5kMpm+hzEmo4krmUw2qRVUmUyGR48eITc3d1gREBgYiKSkJPj7+5tSKCZBXV0dBALBqPts2bJlQvbMQyNYkxVYgCZt8ODBg/jqq6+GNS4BNBG448ePY/v27bTI0+CIU2NjI+WEx+PxwOPxwOVyERkZiR07dsDe3n7aezotWrQIhYWFtHFfunQJvr6+Bt9/pqurC8XFxWAymQYTvRqKv78/tm7dioKCAjAYDBw/fhxLlizB2rVrDWoypbV1j4+PR0ZGxqzYujMYDPD5fGRnZwPQNNCeqsACgISEBAgEAnR2duLBgwdj9oeb68hkMirdr7GxEU1NTTp26VZWVrR0Pzc3tzk5+dUn2mjVw4cP0dXVhZiYGBw+fHhORqsGU15ejrNnz0Imk8HGxgapqanw8fHR97BmjLq6OlPDcz0wpwWWm5sbVbBoqIwmruRyOUQi0YQKSXt7e6n6qqEFv2w2G1FRUUhMTBw1amFibLRuYyOxevXqCfe+mS6BNbinU3x8PI4dO4aenh5a9Ekul4MkSfB4PCgUCspRb3BPp+bmZvT39yMxMZF2/NjYWGzdunXGJjuDe2NpU1l7e3tx8+ZNrFu3bkaec7rQRq/i4uIM2nlKIpEgKioKNjY2EIvFuHfvHurr65GWljatvdKmA1dX11m1dY+NjcX169ehVqvR1NSEtra2KZslsVgspKSk4Pjx47h58yaioqLmVSr2cHbpQ9PUnZ2daYLKZJc+c3R0dCA/Px+FhYVwd3fH4sWLERISYlALLDOBSqXC5cuX8eDBAwCa+tQdO3bM+bYztbW1iIiI0Pcw5h1zXmDdvXtX38MYkbHSAtva2uDi4jKuiay2vqq0tHTY+qpFixYhPj5+RFc6E+Ons7MTtbW1I97v7u4+Kee4wSd5Lper4+w4uKfTaEYRQ3s6RUREICcnB/b29uByuVQkisVigcFgwMfHB6mpqbTnEggEuHz5ss53aenSpUhOTp7xiY+7uzsSExORk5NDbcvJyUFUVNSknQpnmq6uLhQVFRl09EpLR0cHlRbj4uJC9cz65JNPDLLny2zaultaWiI8PBzFxcUANFGsLVu2TPk1BAUFISgoCJWVlbh27Rq2bt065WMaIoPt0rVRqqFZFCwWCx4eHjSHv7k+ydU3SqWSqq3q6upCbGwsXn75ZYNbUJkpurq6cPLkSbS1tYHFYmHdunVYvHjxnBfxJEmivr4emzdv1vdQ5h1zWmA5Ozujt7cXCoXC4FKLxlNz1draOurKKUEQKC8vR05Ojo5tNqB5/dr6Kn1YVc9VRksNZLFY2LFjx4RXApVKJRQKBXp7eyGXy9HV1YW+vj6cPHly1J5ONjY24+rp5OrqikePHg373BUVFejv74eNjQ1IksSdO3dw9epVnf3Wr1+PJUuWTOh1TYXVq1ejtLSU1hvr/PnzeOmllwzyojjYOdCQo1cAvQeWt7c3Xn31Vfzwww8oLy/H999/j0WLFiElJcXg6jIH27pfuXIFJSUlAOi27uvXr0doaOiUviPx8fGUwCoqKkJycjIIFgcDChUGFEoo1CQIkgSLwYAFhwVrLhvWPDY4YyyGpaSkoLq6Gvn5+Vi0aJFRthEZilKppNmlNzY26mRPmJmZ0aJTJrv02aOjowMCgQBFRUXw8PBAYmIigoOD53y0ajBFRUW4cOECFAoFHBwckJaWBg8PD30Pa1YQCoWwsLCYc8YdxsCcPsNpV8mqq6sRFham7+FQjNfQoqqqClFRUTrb5XI5VV81XBpZQEAAkpKSEBAQYJATUWNGqVSioKBgxPtjYmJoERaCICAWi4d11Bva00kmk6G2thZcLhckSUKlUiE0NHRaejolJyejqKho2IbEJEkiPz+fKsTPzc2l3c9kMrFt27ZpqUWZCFwuFxs3bsSJEyeobU1NTRAIBBNOv5xpuru7Db72SgtJkjo9sMzNzfHMM8/gwYMHuHz5MvLy8tDY2Ii0tDSDchnUYmdnh927d2PRokXIyMig2bp/9913U7Z1X7BggWaBTiyFrU8gLtcIwTMzBwMAMUwHBCYDIAFYcljws7WAm5UZWEzdc6+TkxMSEhKQm5uLzMxMHDx40OjO0WKxmGaX3tLSomOXrk011v5zdnY2utdpzCiVSpSWlkIgEKCnp2feRau0KBQKXLp0ibpmL1y4EFu3bp1X5iim+iv9MacFFgDExcVBIBBMu8AiSRJqEiChWcVkjvPiMV5x1dPTg5aWFjzzzDPUtr6+Pty/fx8CgUBnhZDFYlH1VYaaQjUXqKurg1Qq1Xz+arVOXVNAQABOnDhBCSexWEytHg0WSsP1dGpubsbnn38OQNOrSigUIjIyclrGbWlpibCwMDx+/HjY+x8/fozu7m4UFRXRtrPZbOzevRshISHTMo6JEhoaitDQUMo6GwCys7MREhJiUCtyt27dAkEQiI2NNfhJTF9fH5RKJaysrGjnHgaDgcWLF8Pb2xvp6eloa2vDp59+is2bNyM6OlqPIx4ZX19fHDlyBAKBANeuXdOxdV+0aBFWrVo1YUdWiVIN/xUp6JVr0hClEgmsLCwAjHyeJ0kSEqUajzsHUNolgq+tBQLsLXSuDStXrkRRURHq6upQVlZmcOmYgyFJEt3d3bT6qa6uLto+DAYD7u7utHQ/k1uZfmhvb4dAIEBxcTE8PDyQlJQ076JVWoRCIU6ePInOzk5wOBxs3LgRsbGx807o19bWTts8wsTEmPMCKyIiAllZWejp6ZnyxEekUKFVJEOXVIkBhRJqAmAwAJIEeGwm7HhsOFvw4GbFA3uYVJGJWLELBAJER0eDw+GgubmZqq8aulJoYWGBRYsWYdGiRab6qmlCpVKNWN+Ul5eHwsJCyOVyMBgMylWPx+PB09MTwcHBNOFkZWU17ovbUJMLsVg8ra8rPj5+WIGl7QUSHh5OGyuPx8Pzzz+vd3eljRs3oqamhmqKLZPJkJWVhbS0NL2OS4tWmDKZTKxYsULfwxmTodGroXh4eODIkSM4f/48Hj9+jDNnzqC2thabNm0yuFRrQBNhXbRoESIiInRs3e/fv4/i4uJx27qTJIn6PimedIsAcysQog7NYgoAhVIJLmfk189gMMB+OnkjSBI1vWK0iWSIcrWBLe/HyLO5uTnWrFmDCxcu4PLlywgKCjKYFG6tXfrgCNXQ8xCHw6HZpXt5ec2riIChoY1WPXz4EL29vYiNjcUrr7xi8GnKMwVJklRjb5VKBRcXF6Slpc1LYy+CINDQ0DBn6z0NnTkvsNhsNmJiYpCXl4f169dP6hjdUgWedIvQJ1eBJEkwn0asuCzNxZQkSagIEu1iBdolCpR2DcDL2hyB9pbgsjQX9ImIK6VSifz8fCxbtgx///vf0dDQoLOPk5MTkpKSEBUVZTAXZ0OHJEmIxeJRU/UGBgZG7Onk5uaG5uZmEAQBLperU0Owfv16LF68eNLjG05gkSQ5bStuPj4+sLKygkgkorYplUoUFxejv78fIpGI6ttkZWWFffv2GUSNiK2tLdasWYPMzExq2+PHjxETE4PAwEA9jkzD7du3QRAEYmJiDD56BYwtsACNuE5NTYW/vz8yMjJQUFCApqYm7N6922Aj5FO1dVcSBPLb+tArU4LFYIDNZsPcwhwSsaYfk0QsBtdufAJTe42QqNTIbe5BmKMVFtj+aOIQFxeHvLw8CIVC5OTk6E2Yy2QyNDU1UWKqubmZ1uQboNule3t7w83NbV5GRAwNbbSqqKgIXl5eWLp0KYKDg+e1lb1MJsO5c+dQWloKAODz+diwYcO8nSMJhUJYWVmZFt/1xJwXWACwePFifPLJJ4iJiZnQKoaKIFHRJULTgCbthM1ggDHMyYvBYIAFgMX6cfWysV+KVpEMkc42MFPLxy2u5HI5PvnkE5SVlek0WgQ0PWySkpIQGBg470LdoyGXy8cUTiKRCDweT0c4eXp6Un+P1dPp6tWrI7pdTbVo1szMjPqbxWJBrVZDJpNNS8Np4MdUnsrKSgCa96yoqIhaodYKLHt7e+zfv9+geqEkJCSgsLAQra2t1LaLFy/i9ddf1+vFs6enB4WFhUYTvQLGJ7AAzfclLi4OXl5eOHnyJDo6OvDZZ59hw4YN4PP5Bnv+0dq6l5WVISsra1y27ko1gQetvRiQq8BhMqjXZmlhQQksqVQKGxvbCU1gOUwmCJJEaZcIKoKEv73Gmp3JZGLDhg04duwYbt++jZiYmFlJq+vv76el+wmFQh2nUCcnJ1r9lL29vcF+1vMNpVKJkpISCAQC9Pb2Ii4uDkeOHJm30arBNDU1IT09Hb29veDxeNi6dSsWLlyo72HpldraWvj6+up7GPOWeSGwtCvgP/zwA1566aVxXSDlKjUetPZColRrhNUELjDa1Us1QULQ2oPWkkdjiqu+vj48ePAA165d0yniZ7FYiIyMRGJiokFEFGaTwT2dRrMmJwhCRzjZ29tTtU7af1NxrpLJZMM2bdYy1c+GyWTCzMyMao7N4XDQ3d0NT0/PKR13MG5ubqisrIREIkFRURGtEbdIJIKbmxv27dtncCte2t5Yn332GTUh7Onpwc2bN5GcnKy3cWmjV9HR0QYlSEdjvAJLi4uLC1555RVkZGQgPz8fFy5cQG1tLbZu3UpbFDAkGAwGwsPDERQUNKatO5PFgqCtD6Ih4goAOBwuOFwOlAolSBKQSCWwspzYb4PJYIANoLJHDA6LCW8bzfnfz88P4eHhKC0tRXZ2Nnbt2jVtrx/QROy1dunaf6PZpXt7e8Pb23te9ecyFoRCIVVb5e3tjWXLliEoKGheR6u0kCSJe/fu4erVqyAIAh4eHkhLSzOa8/FMUldXZ7D1s/OBeSGwAE2ouLS0FPfu3RuzR5FCTeB+Sy+kSjU4rKmcwAj0dXfD3NMffmw29qxZpiOuWlpakJOTg5KSEqhUKhQWFiIwMBBcLhfm5uZUfZUhFfRPByP1dBoqoob2dNKKKCcnJ9ptHo8346us2gL64bC2tp6Wyaa5ufmMCiwXFxcMDAygqKhIJxXIzs4Ohw4dMthJs4eHBxISEnD//n1q27179xAVFaWX/Pre3l4UFBSAwWAYTfSKJEl0dnYCGL/AAjTfxW3btsHPzw/nz59HSUkJWlpakJaWNq3fz+lmLFv3/Px8xKdshYhrrSOutFhaWKJX0QtAkyZoZWmJ0cwuhkNrdFHWOQAHcw4sOZpL77p16/DkyRMUFRUhISEBXl5ek36tSqUSLS0tNLv0wQsogCZK7u3tTbNLn6/pU4aOQqGgolX9/f2Ii4vDq6++SqVxm9A4Wp45cwZVVVUAgKSkJCQnJ5tSWPFj/dX27dv1PZR5y7wRWAwGA9u2bcNnn30GZ2fnEV3RSJJEUXs/JCoVuFP4kaoJNbo6O6FSqcHhMuESGgURyYQ5NF/8J0+eICcnB/X19cDTbWVlZZTbW2JiIqKjow2yqHwslErlmMJppJ5Obm5uCAoKGrWnkyEyXSd0c3Nz9PT0APhRYE0nra2tKCgo0LFrd3R0xPLlyw1WXGlZs2YNysrKqCbM2t5YL7744qynMQ2OXjk6Os7qc08WkUgEmUwGCwuLSTV2jYyMhIeHB9LT06mUu+TkZCQmJhp0GtlItu5SAmgUKcBm9sLW1hpstq7YMDc3Q38/AwRBQqVSQ65QgMeduKkDk8GA+un1JdFDk3Znb2+PpKQk3L59GxkZGTh8+PC430eJREITUy0tLTq/azs7O5qgcnFxMejPyQTQ1tYGgUCAx48fw9vbG8uXLzdFq4ahtrYWp0+fxsDAAMzNzbFz504EBwfre1gGQ1tbGzWHMqEf5o3AAjQXm+effx7ffPMNtmzZMqx1e6tIhk6pAhzG5E9mdHHFgaOjAwiSgUJhH6w66vEgN4c2cdaKKwcHB7z11lsIDw83yIvgRHo6DU7Ls7GxgY2NDVXrNNWeTnOZwRFOLpc7bJ+zyVJWVobz58/rTMLc3NwQEhJiFI0/eTweNm7ciO+++47a1tjYiPz8fPD5/FkbR29vLx49emRU0Svgx/RAJyenSZ9jHB0d8dJLL+HKlSu4f/8+srKyUFtbix07dkxKtM0mQ23dXWOTQAKQy2Xo6JDBwtIS1tbWYA46/zMYTJhbWEAs0tQqSsTiSQksQFPH2ydXoWlACm8bzXu1fPlyFBQUoLm5GUVFRcOm9JAkiZ6eHlq6nzYS+eM4GXBzc6MZUpiiHcaBQqHA48ePIRAIMDAwYIpWjQJBELh58ybV2N3Hxwepqamm1gBDMNVf6R/Dn1FNM56enti3bx+OHz8OkiRpPUjUBInSThFYmFjN1WB0xZWjpj+KWASZQoWKuhadqIRYLMbq1avxyiuv6GWSS5Ik5HL5mMJptJ5OgyNR5ubmBikQjYGhToLaaNZUyc/PH1ZceXt7w9/f36g+r9DQUISEhKCiooLaduXKFYSEhMxa7Zg2ehUVFWU00Stg4vVXI8Fms7Fx40b4+vrihx9+wJMnT/Dxxx8jLS1tRJc+Q0Fr6+4dHIp7DZ2QiAYAaNptiEVijZmFtfVTsfij2YVWYMlkMqgJNVjMiUetGQwGmCRQ0yuBl7XmPMnlcpGcnIwzZ84gOzsbYWFhYLFYaGtro6JTDQ0NNPdP4Ee7dG2EysvLy+Aj0CboDI5WLViwACtXrkRgYKApWjUC/f39OHXqFOrr68FgMLBy5UqsXLnS9H4NQ11dHWJjY/U9jHnNvBNYAODu7k6JrKamJqxatQpcLhdCiRxqkgRnkj/WoeLKxsYG/X19kEql6G5vx61L57B+97NgsFjgcTjw9/dHa2srwsLCkJaWNiN5w6P1dBp8m8lk6ggnJycn+Pv7U9sn0tNpLjJaxE0buZuqQB4qsLSpcJOFJEncvXsX2dnZAECryfD396dNho0hggVoJqmbNm1CbW2tTm+s1NTUGX/+vr4+o6u90jJdAktLWFgY3N3dkZ6ejqamJhw9ehSrVq3CsmXLDH7S0yYjYGlpCUtzM/T39UEu13yXCDWB3t4+iMUS2NragMvlgc3mgMvjQiFXaMwuJBJYW02uLpbFAGQqAj0yJRzMNSngISEh4HA4KCoqwr/8y7/AxsZGp0bS0tKS5u5nsks3TgZHq0QiEeLi4vDaa6+ZIjBj8OTJE5w9exYSiQRWVlZITU2Fn5+fvodlkGjrr3bu3KnvocxrjGNGNQO4ubnh1VdfRVZWFj766CNs2bIFQp79BEuXf0Qrrn736kGUPxLg0yt3oVRoLpDigX789V9/jR0vHIGNgxMCNmyFoqsNxcXFSE5ORkxMzIQjCKP1dBr892g9nQan8ZkaRY6NpaUlzeVvMARBoL29fcpW7UNTBEdzLRwLkiRx+fJl5OTkUNsGBjSr9SEhIXB3d6ftP12T7tnA1tYWq1evRlZWFrWtuLgYMTExCAgImNHnvn37NtRqNSIjI+Hk5DSjzzXdTLfAAjSp1y+88AKuX7+OO3fu4Nq1a6irq8OuXbsMzo1Si1JNQCiWaxxi2ZpMA5lMhr7+fqhVmiivUqlEZ2cXzM3NNbUMFpZQPBVhErEY1lZWmKjZBaBZICDUahTUNkFeX4GGhga0tbWhr68P9fX1aGxsxKJFi2jNfBcsWAAHBwejijSboNPa2kpFq3x9fbFq1SoEBAQY/EKEvlGr1bhy5Qpyc3MBAIGBgdi5c6eptmgUWlpaYGdnZ/Ap23OdeSuwAM2EedeuXaisrMSFi5fgtXoLLHg8sLkcqFVqsMa5oq8m1Ojs6IBMJsOmvS9izfbd4HB+FCyW1jb4/d+/gaWFBdQk0NjYAouBAbz22mvDTkBG6uk0+O/RejqFhoaOq6eTiYmh7SNVW1s77P1tbW3TKrCmEsFSq9U4d+4cCgsLadslEgkWLlw4rDAYKrgMncWLF6OwsBBtbW3UtosXL+K1116bsfq+vr4+o6y90jITAgvQmLwkJyfD19cXZ86cQU1NDT766CPs2rVrxgXvZBhQqMCA5jctk0rw3Sd/hauXN1JSn4VILIJoQES1A5BKpZDJZLCysgSTqTG7UKsJyORymPE0KXmDj7Eh7bkhz0ZCpVJBoVBAoVBArlCAIEiQhBpVTx0xmUwmwsPDYW1tja6uLoSHh+PgwYOz+I6YmAnkcjkVrRKLxeDz+Xj99ddN0apx0t3djfT0dLS0tIDJZGLt2rVYsmSJaU4zBnV1dab6KwNgXgqszz77DO+88w5aWloQERGBP/3pT2ByuUiL8kdicgoaq6vg7R+IHQdfwl/+5dfoaG2FuaUl4pauwC/++8+wsLJCV3sbPvz9v+DR3VuQy2RYsXUnUl9+A5e+/jvKCwT48OINWNvZ4eLxo7h65iQG+nrhFxqOl/7x3xAcGorMcyexZ88ebNy4EY8ePYJYLMbu3bsRGBg4Kz2dTEyO0QRWZWUl4uLipnT8wStOHA6HijhNBKVSiZMnT+LJkye07Wq1GkFBQSM2pTQ2gaXtjfX5559Tk+Hu7m7cvn0ba9asmZHnvHPnDtRqNRYuXGhUET9AU+spkUiohZmZIDAwEK+++ipOnz6N2tpafP3111i2bBlWr15tUCv1/XIliKd/y6VSfPne/yJ68RJsSHsO1lbWsDC3QH9/P9WagSRJDAyIIFfIwWIywWazIRGLKYE1+Bgpac9C+VRMaf8RBL2ZL5PJAM/SCqvWJsPX2wuenp7Ugsr777+P2tpa1NbWmlKgjJSWlhYIBAKUlJTA19cXq1evNkWrJsjjx49x/vx5yOVy2NnZIS0tbUptDOYTdXV1s2r6ZGJ45t0M/dq1a3jllVeQlJSEX/7yl/i3f/s3bNu2DT/55a8BAMX3c7D/rV/AxtEZMrkca3fugYWVNZpqKnHxmy/hGxyK/W/+Am//7DUU5NzByu2pcPH0hkImh1qtpkwEBkQDuHc1Eyc+fBcLE5Kwansazh39FH/42av4w3fncO/ePQBAbm4u+Hw+rl+/jrNnz+J3v/sd2Gw2GAwGFAoFurq60NXVpbNiM9pt074zt29dXR0qKysxHNXV1WCz2bTUhYmOoampCTU1NQA0ufodHR24evXquMerUCiQnZ2N9vZ22n1mZmZwcXFBX1+fTtohg8GAhYUFFZmZyHgNYV9HR0c8fvyYun3q1CkwmUydlKqpjkEkEiErKwsEQcDT05My2TCU92GsfRsbG9Hf3w8PDw+0trbO6Bg2btyI3Nxc3L17F1lZWSgpKcG2bduolXt9v2cdYhlAkiBIAq9tWwcAKLx/D2v9nPHsa29BpVDg+oWzkEnEiIhPxN6f/QZgMvAfh/cBAP7fx8dwL+sCTn78F/z6//6Ko3/6H+oYyX4u2Pniq9i6/yV899F7uH81Cwq5DOH8BLz2L7+Hp48vOGw2VCSJqGBvqg4LAGxsbLB8+XJcu3YNmZmZOHLkiGlSbiTI5XIUFxdDIBBAKpUiLi4Ob7zxxpzrYTnTKJVKZGZmQiAQAADCw8Oxbds2k4HLOFGr1WhsbJyVemQTozPvBNalS5cAAL/73e+wbt06NDQ04O233wb3aQ3Shj3PY/fh1wEAj+7dQvbp79DaUE89vryoAHU11SjMvQv/sAhsOfAyCJIEA4BYIgFBatZFmQwGSvM06R/7fvYbeCzwQVP1E+RcyUBnSzNsbe0AAEuXLgWfz8ejR4/Q3d2tY71rwrBQKBRoaWmhIiZDOX/+/JRC8319fWhoaADwY13XrVu3xpUSIZfLUVRUBLFYTNtuZmaGyMhI3Lt3T6dwXou7uztlhGFsqFQq1NTUUIYXANDU1DSp2sbRqKysRHNzM5ydnWm1X8ZCc3MzKisr0dLSouNIN1PI5XKUlpbiwYMHOHfuHEJDQ2e0bo0gCKjVaqhUKqhUKurvodvid+2FtaMLVAoFthx4CX/773+H2wJfbNp7CMKmBlz86u9Ytmk7bBwccDX9BGQyGQ7++l+w5yc/x8f/+mt8+af/xuP79xCdtBwRi5cg9eU38NHvfgsPX3/seOEV+AWHIuPbo8j6/mus27UHPoHB+Pv/vY13f/tzvPv9eQCaqJhCTei8hqSkJOTn50MoFGoaIcfHz9j7ZWJqkCRJ1VaVlJTAz88Pa9euhb+/v0kYT4L29nakp6ejvb0dbDYbKSkpiI+PN6UEToCWlhbY29vTyg1M6Id5J7C0aH+w1P+fbndycaP2+dv/vo22xgb88g/vwtzCEr9/82UwGYCzswv1WBabDahUYDC01u6aI6nVamoSThIESJIcdJJgANDcp/0RMJnMESftJgwHLpcLJycnqpZlKM3NzVS6z2TgcDhgMlmwdnQCm8MFyTUHk8MDqVKM+jipVIrCwkIdAw5LS0tERUWhvb19RHEFYMq1Y/qEzWYjKCgIJSUl1La+vj60tbVNW9qjXC5HS0sLABhtbrtEIgGAWS0Ot7OzQ3x8PMrLy9Hd3Y3Hjx/Dy8tLZwJKkuSogmi82whCV7AMR5RCqXmcWoWQWI2AsbazQ/yqZPzPm4cBAHcu/UDtX1EgAMCAf0QUlmzciruXzsHC2hq7X/8ZpDIZYpdp6vEcnV2wc/8LYDKY+OztfwOTycTP3/4TuDwecq5m4fHD+5CKRTC31NTeDjdaDoeD9evX4/vvv8e1a9cQERFhmiwZGEOjVXw+3xStmgIkSeLRo0fIyMiAUqmEk5MT0tLS4ObmNvaDTdAw1V8ZDvNOYG3atAl//OMf8W//9m+orq7G3/72N9jb20OtGH4CS5Ik+rq78eDG1adbGLC0tkZ04lIU5NxB5td/h4evH0T9/Vi9c8+Pgo3JRMSiRDy6cwMnPvgzFiYkIv/2Dbh6ecPbxxe+vr64efMmkpOTsXXrVpw4cQIdHR145ZVXdJ5/uL/Hum3ad+b2jYmJwYULFzAS1tbWWLly5YSOSzCYkHMtIWGbY+HO/QBJala4FQo4OjmBxQDYShl40n6wlTKNRH96nK6uLly5cgWurq6047q4uGDt2rWQSqU4f/78iP2JHB0dsWHDhgm/D4a0L0mS4HK5tOiiWCxGUFAQzSFzsmMoKCiAg4MDvLy8qDo7Q3wfRtu3vr4e1tbW8PX1hYuLy7Q8J0EQlMBRqVRQKpXD3nZxcYFSqURTUxO6u7tRUVEBNzc3MJlMShyNtsA0nvtYLNawtuXDPZYkCDAYoIwutGiXyJgsFl7/j3dAkiTUBAFCrQZAgslgYOBpbzqFXA6pWAwuhwNSpQKgWShjjtCknsFg0MbCADBSjCMsLAy+vr6oq6vDzZs3ab9PE/qBJEmqtqq0tBT+/v5ITk42uj6ChoZcLsf58+epNO+YmBhs2rQJXC53jEeaGI66ujokJCToexgmMA8F1po1a/Dpp5/inXfewc9//nOEh4fjz3/+M8qeGgKQ+PEC+NKv/hnv/OpNfPvRe9jz8uu4lXGeuu+f3v0IH/zH/0PO5UtQKORYl/osbKxtwH56gXdwdETyzt0Q9/Xi5oWzKH/0ED4hYTj4y39Gr7AFdXV1ADSWz1ZWVpDL5QBgCu0aASRJorGxEV1dXcPeL5PJ4ODggJCQkDGPpSJIVHaL0NCvKaY3BzAgfeqKx2CAVKtgzuOBw+FARVhCbesIDpuJhc42cDTnoq6uDjk5OTorfcHBwdi9ezfYbDa++OKLUZu/btu2bcrmHIbA2rVr8cEHH9AidRYWFlPuBTIwMICqqipERkbitdde0xGyxoLWhfQnP/kJ7OzsoFKpIJfLIZPJIJfLaf/Gu00ul+sIGCaTCS6XqzNBcnNzQ1BQEEpLSyGTySCRSBASEqIj9mYDW0tzWFhag+AqYGZmBgaTic7WFhTevYW45atR/6QcD65mITg6Ds11NegStmLhokTk37qOopzbWLk9FQ+uXsbxP/8PfvGnD2FuYQ4mk4nm+lpkn01HZPxiLF69Dk+KC/Hev/wKXv6BKM1/iKiEJCp6BQBc1shibMOGDfjkk0/w4MEDxMfHG11LgLmCTCajolVyuRx8Ph8/+clPDLYFgTHR0tKC9PR0dHd3g8vlYsuWLYiKitL3sIwWbf1VWlqavodiAgCDNOWlUVyv74SKIMGawGoUSZLo7u6imlRq4fG4sLe3h1yugEwmhUwuB6EmwORwUH7nGhqLHoLL5VINfAevgDk6OsLT0xNeXhp3KVdXV5NjoIEhEAhw/vz5Ee83MzPDgQMHRk2965Mp8UjYB7ma0PTjefodaGtrpVzHpFIJPDw8YG6ucRckSRJqkgQBwEIhxq1T30KlpH/3oqOjsW3bNjCZTFy8eBEPHz4ccQzW1tZ46623ZszWfLa5e/curly5Qtt24MAB+Pv7T/qYmZmZyM3NRXh4OPbs2TPVIU4ZtVo9LgE0+PbAwADOnz8PkiSxfPnyp85240unm25UKhUqKiqoNFt3d3cEBgZOS9NcJpMJHo9H/TMzM6Pd1m5TWdljwMwWbIbmMV/833/j3PEvIJNI8Kv//QvKiwpwJ/MCRP19cHJ1x7LN2xG/Khm/f2U/7J1d8Ov3PsH9q5fx9Z/+G9sOvYKNzx/EhWOf4erp7yGTSvCPf/oQKzdtw2d/+A/cuHAWcpkM0YuX4M3f/Q9cPDxBkiSUJIm1Pk7gjCCyAE1Np0AgQFBQEPbu3Tvl98fE+CBJEs3NzRAIBCgrK4O/vz/4fL4pWjVNkCSJ+/fv48qVK1Cr1XBzc8Pu3bvh6Oio76EZNQ0NDcjIyMCRI0f0PRQTMAksGlXdYlT3isGZYHEqSRLo6u6mmlBq4ZnxNE5mYIAECZlcDrlCiZqr5yHq7QZJkpDJZCBJEra2tuBwOJDL5ZQToRYWiwV3d3d4enpS/0xNJ/ULQRA4evQoZUgxHGZmZti/fz88PT117uuWKvCwtRcAwB7yfRO2C6lmpzKZFM4uLrC2ouf2iyUSiCRSSDrb0PTgJsink+UlS5Zg3TqNK9r58+eRn58/6ut49tlnERoaOvqLNSLUajU+/fRTCIVCapujoyNee+21SS1SDAwM4L333oNKpZpy9IogiElFiYZuUz1NR5sI2v5dVlZWBmGaoE23qq6uBkEQsLS0RExMDBwdHXVE0UgiabhtWgfWsdD+/ob+9ghCjc7OTgwMDEA15DyshQHAzMwcLBZTs3BGECAIAiwWCxbmFnBycgSXO3rjdjVJgs1kYLXP6FEpsViM999/HzKZDHv37kVQUNCYr83E5JHJZCgqKoJAIIBCoQCfz0dMTIwpWjWNSCQSnD17lmojkpCQgPXr15sWkaeBW7duQSqVIiUlRd9DMYF5mCI4Gl42ZqjuFQ8xpBgbBoMJRwcHdHV1QaH4MT1JLpOjp7sH9g72YIABFocLH1srbH/zDdTX16OsrAxlZWUQiURQKpVQKpWwsLCAm5sbbGxsQBAEWlpa0NnZiaamJjQ1NVHHNjc3p8SWNtJl6to9ezCZTGzfvh0fffTRiBNemUyGL7/8kupvpmVAoYKgrRcAA2ym7veMyWRCDc3kjsFgQj3k+CKRiGpAbOHkCve4JWh5eAfJyclYunQpFAoFLl68iKKiolFfQ2Rk5JwSV4BmMWLr1q3429/+RqtRu337NlavXj3h4929exdKpRJBQUEwNzdHe3v7pNPpFCPUec4GM2lwweFwxiWChtunr68PFy5cQG9vL7hcLlavXj3t7o/DYcPTXPoGn+vVahXa29shEolAjLDuyGQyYW6mSQfkmfHg7OyMzs5OiMRiqNVqyOUydHd3w9nZGSzWyJdXgiThaD52jYmlpSVWrlyJrKwsZGZmwt/ff1oifSZ+ZGi0KiAgACkpKfDz8zMtYk4z9fX1OHXqFPr7+2FmZobt27cjLCxM38OaM9TV1SExMVHfwzDxFFMEawilnQNo7JdOOIoFAARJoKuzS8etzdzcDLZ2dlCTQJKnPWx4P6ZjkSSJpqYmlJWVobS0FL29vYMeZ46QkBD4+/uDx+Ohvb0dTU1NaG5uHtZm2d7enia43Nzc5kzql6GSk5MzLsvu6OhopKSkwMzcHDlNPRApVSN+x7q6uqiaPIVCAStrKzg5OgEg0d/fD5GIbsPO4nLhxVFjeXQEnjx5gosXL+r0uhqKpaUl3njjjTkryi9evIgHDx5QJgwkSWLv3r2wtLQctyjq6+vDtWvXoFQqER8fb9Sr2FVVVWhqaoKfnx98fHwAaNwXJyOKhm6bqh21dkGgsLAQABAVFYXNmzfTzElmgqL2PrSI5OAymVCplGhra4NEIsFIF0Q2m62p1wID5uZmsLPXLJzJZFJ0dHRCItWIWHMzM5iZm8PZyQmMYQwvSJKEiiSR4GEPe7Oxz89qtRoffvghurq6kJKSgqSkpKm8bBNPGRytUiqVVLRqNl025wsEQeD27du4ceMGSJKEt7c3UlNTR2x6b2LiqFQqylvA1DPMMDAJrCGoCBJ3GrugUBM66SPjgSDU6OzqgkpJjzpYWFljoacTgh1HtnElSRJtbW1UZGuwFTiXy0VQUBDCwsIQGBgIuVxOia3m5ma0tLToCDsmkwk3NzdaaqGTk5NpVW4aIQgC33zzDaqqqsbc18LCAgnrt0Bq7Qgukzni59DT0w2pVGO3rlQqYW6uaRLc29sLiURK25fBAGzt7cFmsSEqzsXjosIxx8FkMvH888/TomqGhtaAYaKRIu1tkUiE27dvU0IV0FiGR0dHj/v7rxUlTk5OWLhw4Uy91AkzVp3RcKIoIyMDTU1N2L17NyIjI8Hj8QwuJaegoAAXL16EUqmEo6Mjdu/ePaM2zX1yJXKbe0AolWhtbYVcIR9xXx5l2sGAhaUF7Gxt8WNzD0AkGkBXVxdkcjkY0PzWLSws4ODgQNsPAFQEAXMOC8u8xp/mXVlZiePHj8PMzAxvvvmmSQRMEu2CpkAgQHl5OQIDA8Hn8+Hr62u6Ls4QAwMDOH36NGpra8FgMLBs2TKsWrXKFImdZurr65GVlaXjRG1Cf5gE1jD0ypR40NKjSesbJoVrLNSEGl2dnVA9raNhcbmQ9XTBQdqNbVu3jPtE3tnZSYktbQ8eQLOS6u/vj7CwMISEhMDCwoJqSqsVXE1NTejo6NBx+OLxeDqphca8Mm8IKJVKfPvtt6ipqRl9RwYDget3gsliwYzLg4WFOTVpG0xfXy/EYs1quEqlApvNgpmZuU6PKwYDMLewgEqphBoMCB8L0FtXOeoQmEwm0tLSEB4ePuHXOR7GqjMabzrdZOqMhtLe3o7S0lLattDQ0HFN2hUKBXJzc0EQBPh8/rT0t2EwGOByuZOOFE20zmgwf/7zn9HX14c333zToAvJOzo6kJ6eDqFQCBaLhZSUFCxatGhGJr8kSeJyeT1au/sgFQ/feJkBTSaBNt3PysoKNjbWGPqbBUj09PSgp6cXSpUSTAYDFhaWsLa2ho2NDe05VSSJhc7W8LSemFvs8ePHUVlZCT6fj61bt07osfMdqVRKRavUajX4fD6io6NNQnWGqaqqwpkzZyAWi2FpaYldu3YhICBA38Oak9y8eRMKhYKqwTahf0wCawQ6JQrkt/UC0DUhGA9qtRqdXZ0AkwVZXw8ac66BUCmRkJCAjRs3TnjC0Nvbi/LycpSVlaGhoYESTkwmEz4+PggPD0doaChtIiiXy9Ha2koJrubmZqp2ZzC2trY0weXu7m7qQTFBlEolvvvuu1EjWZauHvCMXw5iUKSRxWKCw+GAzeGAw2aDzeFAIpEg47vj+OztfwMAJK7biNf/7W2qmF79tHE1j8ejeu4wWCwQSgWqs38Y9rk1z8XC7t27h6270vbcmoplt0wmG7WZ8WxDkiSKi4vR3d1NbeNwOEhISBgzdba6uhqNjY1wdHREZGTksHVGExFFPB4PXC5XL6vkCoUCb7/9NlgsFv75n/95yil9M41SqURWVhblfhkWFobt27dPe9rL9evX8eHnf8eGV34GlVIJghhiLsRkwszcnPqN2dhYw8pq9AyEzs4O9PX1QU0QYLNYMDc3f9p6Q5OKqyQI2JlxkOBuN+HvQmdnJz788EOQJIkjR46YmrCOgbadhkAgQEVFhSlaNYuo1Wpcu3YNd+/eBQD4+/tj165dpsXcGeTo0aNYunSpyQjHgDAJrFHokylRIOyDVEWAw2RM6KSsIkio1CoIq8rR8PAuSPWPK/JLly5FcnLypE/yIpEIFRUVKC0tRW1tLc1u2dvbG2FhYQgLC4O9vb3OYwcGBmiphc3NzTrF9wwGAy4uLpTg8vT0hLOzs8FPzPSNSqXC999/T7kjDcVlIR/2vsFQj5KKBAAKpQLN9bWoLSvBF//zOyxasx4v/OZfqfuZDAbMzS2efh6any9JkmByuCg+9y2kA/1Uo1e1Wk1FgxISEuDs7DysSFIoFKM2czVWlEolBAIBGAwGWCwW2Gw2AgICsGzZshFFkVqtxpdffgmSJPHyyy/Dz8/PqL/7LS0t+PTTT+Hq6orXXntN38MZNyUlJTh37hzkcjns7OyQlpYGLy+vaTn28ePH8be//Q0EQSBi2WrwU7ZDLv2xtpHztN4K0HQjtrO1hYXF2NEONaFGu1CIAZFIswjyNGLp6OQEFpsDEiSWeznCnDO59ChtywBfX18cPHjQJBSGQSqVorCwEAKBgIpAm6JVs0dvby/S09PR1NQEJpOJ1atXY+nSpUZ9DjV0tPVXv/jFL2a8dtXE+DGsJHwDw9aMg2XejniibQRLkmCCASYDw17YtOkfgKaBZJybAxgOUfiiREAzpbh79y7YbPakXM0ATZoKn88Hn8+HVCrFkydPUFZWhqqqKjQ2NqKxsRGXL1+Gm5sbJbacnZ3BYDBgbW1NbQM0KV2dnZ201ML29nYIhUIIhUIIBALN6+Fy4eHhQUsvHJz6YkKTuvnss8/izp07uHXrlk6am4WjCwj12KlvDDDg6OoOG3tNKhdBEFAoFGA+7ZXF5HAglUpAkiStIJ/DU6GpsxtN5SW041lZWSEkJATt7e1ob2+f8uucDQbXGU00UjT4H5vNxp07d5CdnU07fmxsLHx9fYd97itXroDH4yEkJGROpLNoazmdnZ31PJKJERERAQ8PD5w8eRItLS34+9//jrVr12LJkiWTFhYqlQp/+tOfcOnSJWpb6b2b8AgKg7t/MORSMcyeNvYGGGAwADu78Td/ZzFZcHJygkqthkQigVyhAJPJQk9PD+wcHBHjajdpcQUAK1euRFFREerq6lBWVjZjqb7GxtBoVVBQEDZv3gwfHx+TCJ1FSktLce7cOchkMtja2iI1NXXUJvcmpoempiY4OzubxJWBYYpgjRO5mkDzgBSNfVJI1QS0azEk6Nn49mYc+NpZwMn8x3Sg9vZ2HD16lLJK1pKcnIxly5ZN2xgVCgWqqqpQVlaGJ0+e0Ar8HR0dKWHl4eEx6kVH+bToe3Bq4WB3Qy3W1ta01EIPDw/TD/wpnZ2dOH/+POrr66ltwZv2gFCrgTF+ciqVChKJGAqFAv+wPRn8Vcl49s1fgcnU1PCwWCxNAdYQzCytkHfxDErv3QCgESm+vr7w8vKatdVDbZ3RVETRZOuMRkKtVuOTTz6hicuRemOJxWK8++67UCqVeOWVV0ZtFG0sZGdn486dO1i1ahVWrVql7+FMGLVajezsbOTk5AAAgoKCsGPHjglHJPr6+vAf//Ef1KLRYNgcLlJeegMe/kEgny6MMBgMODjYg8ebeGqiVCqBUNgOuUIOFosFKxtbKJqq8fzm9VNOv3748CEuXLgAOzs7vPHGG/PaKVYikVC1VSRJUtGqueqOaqioVCpkZWUhLy8PABASEoIdO3aMe2HCxNS4ceMGVCoVkpOT9T0UE4MwRbDGCY/FhL+dJfztLKEkCIgUKshVBAgALAYDlhwWLDmsYSeFLi4u2L9/P44dO0YzKsjOzgabzZ62vgVcLhfh4eEIDw+HSqVCbW0tysrKUF5ejq6uLty5cwd37tyBra0tJba8vb11Jt8cDgcLFiygrTyJRCJaWmFzczMGBgZQXl6O8vJyAJoJiZOTEy210MXFZV66BTk5OeHQoUPIz8/H1atXIZFIwGAygTHNG0iQJAEWiw0WSz14K0gwoFAoqHQ3JpMFpjZ19Wl0i/V0suXg4ED1bhovWgMGY6wzGo3BvbG0aH8PQwVHTk4OlEolgoOD54S4Aow3gqVFa3bh6+uLs2fPorKyEh9//DFSU1NHjEIOpa6uDv/xH/8xohGNuRkP3cUPYG9jA0sXd4BQw97ObsyGwSNhbm4Be0cH9PcPQKVS4dbJr+DEZeKsUoLdu3dP6TcSFxeHvLw8CIVC5OTkYMWKFZM+ljFCkiQaGhogEAjw5MkTBAcHY8uWLViwYIHBnXvmA52dnTh58iRlTLN+/XokJCSYPotZpLa2FsuXL9f3MEwMwRTBmkWamprw1Vdf0SJLALBlyxbEx8fP2PMSBIGGhgaUlpaivLycZnRhaWmJ0NBQhIWFwc/Pb9xiiCRJdHV10QRXW1sb1Gp6oTiHw4G7uzsttdDW1nZenXyVSiXKy8tRoTaHTCodNYJFEATEEjFAkpDJ5fg5FcH6pY4QZjCYlMBiMpmwtLVDY34O7EgFXFxcZr2fkaFz/vx5WvSCxWLh9ddfp1z1JBIJ3n33XSgUCrz88svw9PTU11Cnlb/85S/o7u7G66+/DhcXF30PZ0r09fXh1KlTaGhoAIPBwMqVK7FixYoRv7skSSInJwfvvfcehELhsPu4u7sjKCiIOoZrUBh8E1aAyWSBPcHaWy1qkoSaJNDfLsSpD/4IYWM9nJ2dER4ejtWrV085klhbW4tjx46Bw+HgzTffnBfp2hKJhKqtAoD4+HhERUWZolV6ZHBrBQcHB+zevRvu7u76Hta8QqlU4n//93/xy1/+0mROZmCYIliziJeXF55//nl8/fXXNLe1CxcugM1mIyYmZkaeV5sq5uvri40bN6K5uZmyf+/u7oZAIIBAIICZmRmCg4OpXlujpZ5oo1VOTk6Ijo4GoEkTaGtro6UWdnd3o6GhAQ0NDdRjLS0tdVIL53IqAYfDQWRkJPoauyCSW0IulUEqlVA2/oNhPG0LQBAEaAVWT0XUoA2a/xgMcHk8mPF44HI4sGCzwCbZsLe3R0BAAHx8fOZ1CtFgkpOTUV5eDrFYY2agVqtx4cIFHDhwAAwGA/fu3YNCoUBQUNCcEVdKpRI9PT1gMpkGbc8+XmxtbXHo0CHcuHGDalxaV1eH1NRUHSt9pVKJM2fO4MSJEzQnSS0MBgNBQUFwd3enRJSdnR2e2bgO5ja2KO8SoV0sH7P2VsvgGlw2k4EgB2u4ediiPmohMls0bTMaGxtx48YNuLi4TKl+ys/PD+Hh4SgtLUV2djZ27do16WMZMiRJor6+HgKBAJWVlQgODsbWrVtN0So9M7Q5eGRkJLZs2WIqEdADTU1NcHV1NYkrA8QUwdIDNTU1+Oabb2gmCAwGA6mpqbPa0JQkSbS3t1Nia/AKL4fDQWBgIMLDwxEUFDRpi2SJRIKWlhaac+HQWjRAUxMzOLXQzc1tzqUWlnQMoLFfCi5LI5QIkoBKqYRKpYJSqYTyqfNff38/iKfufz/dugaLVq/Dcz/9NTgcLrSqi8FggMFkaur/GAywWSxYWNug9voFqKQ/vr8sFgsLFixAQEAAAgIC4ObmNq8nJsXFxTh16hRt286dOxEUFERFrw4fPjxtbnX6pq2tDR9//DGcnJzwk5/8RN/DmVZqampw+vRpiEQiWFhYYNeuXVTz7J6eHnz99dfIzs5GX1+fzmO5XC4iIiJga2tLbXN2dsb+/ftp0SCZSo2WARnq+6WQqwkwGQBBkiBJBhjQ/BqZmrUOkADseJoaXGcLLphPf2e9vb34r//6L6o+JSoqCq6urnjxxRentNrf09ODDz74ACqVak59ZwHNdaOgoAACgQBMJpOqrZrLC3HGQltbG06ePImuri5wOBxs2rQJMTEx8/q6ok+uX78OgiCwdu1afQ/FxBBMAktPVFZW4sSJE7SUOiaTiT179gzbp2g26OrqonptNTU1UdtZLBatsfFU7G5JUtOQc3BqYWtrq47jHovF0kkttLe3N+qTeLtYjkfCPnDGSMVrbxdCKpFCJhHh9c2rkbB2A/b//LewsbEGk8kEQZJQq1RQqdVQq1QgAbA4HCilUtw7/il8fX1HjFpZWFjA39+fElzzIbVoMCRJ4uuvv0Z1dTW1zcLCAhEREcjLy0NgYCD27dunxxFOL1pBGRYWhmeeeUbfw5l2RCIRTp8+TdVWLV26FD4+Pjhx4gQePHhAc2/VYmtri4iICNqKr6enJ/bu3TtquplCTWBAoYJIoYJSTYAgARaTATM2E9ZcNqy4bEpUDaW+vh7//u//jtraWrDZbMTHx8PFxQWvvPLKlHoDXb16Fbdv34anpycOHz5s1OfHodGqkJAQ8Pl8eHt7G/XrmiuQJIm8vDxcvnwZKpUKLi4u2L17t9HWds4V/v73v2PlypVzwvF2rmESWHqkrKwMJ0+epPWxYrFYePbZZ/XeLK6vr48SW/X19VSPJAaDAR8fH4SFhSE0NJS2AjxZ1Go1hEIhLbWws7NTZz9zc3NaaqGnp6dR5d8TJInr9Z2aidkoE4bOzk5knzmJz97W9L5akrIZ+3/+W5ibWwwT1SOhVhNgsNl4dPk8Hl2/DA6HAz8/P1rq00g4OztTYsvHx2depBl0d3fjww8/pES9UqlEQ0MDAgIC5lwk4Nq1a7h16xZWrFiBNWvW6Hs4MwJJkrhz5w6uXbuG+vp6tLa2Qvk0MjwUT09PBAQE0NJt/f398eyzz874d18gEOC//uu/0N3dDSsrK8TGxsLHxweHDh3ScbMcLwqFAu+//z4GBgawc+dOKl3bmBCLxVRtFYvFAp/PR1RUlClaZUBIpVKcO3cOZWVlADT1bykpKab0cz1jqr8ybEwCS88UFxfj9OnTtCavbDYbe/fuhZ+fnx5H9iNisRgVFRUoKytDTU0NLerm6elJORJOZ42HTCajpRY2NTVRtTODsbe3pwkud3f3SU9WZoPaXjGedIvBZoxcPN/d0422xkY01WqiLFZ29nDz9oGzszMUCoWOSQqDyQKDAVRl/wCFVIKenh709PRApVLBw8MDdnZ24xobi8WCt7c3JbjGI9CMlVu3buHatWsANIYB9fX12L59O/7hH/5BzyObXr777juUlZUhNTUVkZGR+h7OjCGXy/G3v/0NJ0+eRENDA0iShKOjIxVtZzKZCA4OhpubG+1xYWFhSE1NnbVzxg8//ICPP/4YUqkUrq6uCA0NRUxMDHbs2DHp31phYSHOnDkDa2trvPnmm0Yx0SJJEnV1dRAIBKiqqkJoaCj4fD68vLzm7DnHWGlsbER6ejr6+vrA4/Gwbds2RERE6HtYJqBJk75x4wZefPFFfQ/FxDCYBJYBUFBQgLNnz9K2cTgc7N+/3+Ca9MlkMlRWVqKsrAyVlZU0sw5t4XZYWBhcXFym9UJJkiT6+vpoqYUtLS205wc0Eyk3NzdaaqGjo6PBXLQJksS9pm6IleoRUwX7+nohFv9YR6Wt0XJzc4WlpSVkMhn6+/spkwwWl4vWRznob67XOZZUKoW9vT1cXFwgFAp1xNloaNMJtSmF0xGtNBTUajU+/vhjtLS0IDc3F2q1GmvXrsVvf/tbgxboE+WDDz5AR0cHXn31VR1xMVfo6urCiRMnUF1djYKCArS1tVF1ntbW1nBzc0NUVJSOCUZsbCy2bt06q+6ZBEHgo48+wqlTp0AQBGWosm7dOixdunRSxyRJEp9//jmam5uxfPlyg67FEIvFVG2VNlUyMjLSFK0yQEiSxN27d3Ht2jUQBAFPT0+kpaXB3t5e30Mz8ZSrV6+CwWDM2ewEY8cksAyEvLw8XLx4kbaNx+PhwIEDButoplQqUV1djdLSUjx58oTW48vBwYGKbHl6es6IwCEIAu3t7bTUwo6ODgz9SpuZmcHDw4OWXjiVuoepMiBXIaelGwySARZT930ZEA1goH+Auq1WqyGXy+Di4krVTJEgIRKJIJUrMNDWjOa8W6M+J5vNRlJSEvz8/NDQ0IDq6mo0NzfT0lPHwsnJiYpu+fr6GsVK+WjU19fjX//1X1FfXw97e3tER0djzZo1c6avkFqtxn/913+BJEn80z/905xM56moqMDp06fR1taGx48fQ61WgyRJDAwMoKenBzweD35+foiMjKSlEy9ZsgTr1q3Ty8KLTCbD7373O+Tk5IDBYCAmJgZ2dnZ47rnnEBwcPKljNjU14fPPPwebzcYbb7xhUJNgkiRRW1sLgUCA6upqhIaGIj4+fsauCyamjkgkwpkzZ6ha1SVLlmDt2rVzznjK2Pnb3/6G1atXw9/fX99DMTEMJoFlQOTk5CArK4u2zczMDAcPHjT43hJqtZrW2HhwOp+NjQ3Va8vHx2dGV4zlcjlaW1tproWD+35psbW11UktnE3B0CVRQNDWCwBgD3k/JBIxent/dD4jSAJSiQTOzs6ws9NMnEiShJIgYM1mQPjoHooKCsb1vLa2tli3bh0iIiIgl8tRV1eH6upqVFdXD2tlPRIsFgteXl60dEJj66MllUpx5MgRNDQ0IDY2Fra2tmCz2XjttdfmhKV5R0cHPvjgA9jb2+OnP/2pvoczrZAkiRs3buDmzZvo7OxEaWmpzmKBo6MjxGIxZDIZWCwWgoOD4erqirVr12LZsmV6ndx3dXXhV7/6FWpqasDlcsHn82FjY4OXXnpp0r3KTp8+jaKiIoSHh2PPnj3TPOKJIxKJUFBQgPz8fHA4HKq2arKOtCZmh6HunFqXVROGhUKhwP/93//hV7/61ZxcPJsLmASWgXH79m1cvXqVts3CwgKHDh0ymiahBEGgsbGRsn8fbJNsYWGBkJAQhIWFwd/ff1bSsfr7+2mphc3NzVAoFLR9mEwmXFxcaKmFTk5OMyoaemRKFAj7IFcT4AyqyZLJZeju+lHskCAhFong6OQERwdHqAgSJEh4WJkh3MkaLCYDTU1NyMjIQHNz87ie28fHBxs2bKAJ956eHlRXV6OmpgY1NTW0iORYmJub09IJx1v3pU9u3LiBK1euUKvqWvz9/bF//36jX10vLS3F999/j+DgYDz//PP6Hs60IZPJcPr0aTx58gRtbW2oqKigRa1ZLBZCQkLg4uIClUqFiooKdHR0ANA0dX/rrbcMIvpaWVmJX//61+jp6YGNjQ1iYmLg6OiIl19+eVLmPf39/Xj//fehVCpx8OBBvdTwkiSJmpoaCAQC1NTUICwsDHw+3xStMgIIgqD6y5EkCV9fX+zatWveOc0aC9XV1bh16xZeeOEFfQ/FxAiYBJYBonX+GoyVlRVeeOEFo1tZJ0kSra2tKCsrQ2lpKbq6uqj7eDwerbHxbE16CIJAZ2cnLbWwvb1dZwWcy+XqpBZO98VGRRCo6BKjaUAKAGCCAbVKOcRFkYRIIoG9oyPsbO3AZTER6WwNJwt6U0eSJFFYWIjs7Oxh7amHwmAwEBcXhzVr1uhY7xMEgZaWFiq61dTUNKF0QkdHR1o6oaE1oJTJZHj33Xchk8mQmJiI3Nxc2v27du1CVFSUnkY3Pdy8eRPXr1/H0qVLsW7dOn0PZ1pob2+nGgc3NTWhqqqKdr+5uTkWLlxI+z6TJAmhUAgOhwNHR0c4Oztj9+7dBrFgdePGDfz3f/835HI5PDw8EBwcDF9fX+zfv39S6Vha8xZXV1ccOXJk1qLK2miVQCAAj8cDn89HZGSkKVplJPT19eHUqVNoaGgAg8HAypUrsWLFCqPLSphPZGdng8ViYfXq1foeiokRMAksA4QkSVy5cgX37t2jbbexscELL7xgUPn1E4EkSXR0dFCRrba2Nuo+NpuNwMBAhIWFITg4eNaLnhUKBdra2miphb29vTr7WVtb01ILPTw8pkU8aBuaNg/IMKBQorenW9tTGAwGA2KRCJCJsX1lEpzMuaOuBsvlcty6dYsybxgLMzMzrFq1CosWLRpxUjc0nXCwUB4LJpMJb29vKrrl4eGh9wv3jRs3cOPGDfj5+eHAgQP46quvqF5KAGBpaYmf/OQnRl18n56ejsePH2PHjh2IiYnR93CmTElJCX744Qfqu1hfTzd1cXR0RFhYmE5UnMPhYM+ePbCxscHJkyfR2dkJNpuNjRs3Ii4uTq+RFZIkcezYMRw7dgwkSSIkJATu7u6Ij4/Hli1bJnw8pVKJDz74AL29vdiyZQvi4+NnYNQahkarwsPDwefz4eHhYYpWGREVFRU4e/YspFIprK2tkZqaCl9fX30Py8QYfP7551i7dq3BuE2b0MUksAwUkiSRkZGBBw8e0Lbb2dnhhRdemBOObj09PZTYamxspLYzmUz4+flRvbb0ZUghEol0UguHps0xGAw4OzvTUgtdXFymJCAUKhX++Je/gsligyRJqORS5N6+hZCQELzzzjvjXtnu6urC5cuXUVFRMa79nZ2dsWHDhnE1LOzt7aXEVm1tLaRS6bieA9AIOq3Y8vf3n/UFg8HRq0OHDsHX1xddXV346KOPaL2T+Hw+tm7dOqtjm04++ugjCIVCvPzyywZrlDMeCILA1atXcffuXZAkiaqqKp1UWF9fX/j4+OhM7M3MzPD8889TbqwKhQIZGRl49OgRAGDhwoXYunWrXiOsarUav//973Hjxg0wmUzExsbC2toamzdvxqJFiyZ8PG1qqIWFBd58881pXyQQiUR49OgR8vPzwePxKCdAQ4tSmxgdlUqFK1eu4P79+wCAoKAg7NixQyebwYThIZfL8cc//hG//vWv55Tr7VzDJLAMGJIkcf78eeTn59O2Ozo64tChQzq2w8bMwMAA1di4rq6OSkdjMBjw9vamHAn1WdtDkiS6urpoqYVCoVAnSsThcODu7k5LLbS1tZ3Qqu4777xDWU0Dml433t7e+M///M8JpylWVVUhMzNz2ObNwxESEoKUlBQ4ODiMa3+CINDa2koJrsbGxgmlEzo4ONDSCWc6rUibOufr64tDhw7pbB/Miy++aHCtEsYDQRB4++23oVKp8Nvf/tZoJ78SiQTp6emoqakBQRCoqKiAUCik7mexWAgLC4OTk5POYy0tLbF///5h7emLiopw4cIFKBQKODg4IC0tDR4eHjP6WkZDIpHgrbfeQlVVFZViZ2Zmhn379k3YIUwbFaurq0NiYiI2bNgw5fGRJInq6moIBALU1tYiPDwc8fHxc7pX3lymq6sL6enpaG1tBZPJRHJyMpKSkkyfpZFQWVmJu3fv0q5fJgwPk8AycAiCwJkzZ1BcXEzb7uzsjEOHDs3J1SaJRIInT56gtLQU1dXVNAHj4eFBia3hJlWzjUql0kktHM6Nz9LSkpZa6OnpOaqQ+PDDD9He3k7dLi0thYODA/71X/91UhNBtVqNvLw83LhxY1zmFSwWC4mJiVixYsWEJ+dyuRz19fWU4BqvsAM00UsvLy8qwuXp6Tmt6YRyuRzvvvsupFIpFb3SolKp8PHHH9PG6+zsjFdffdXo7Im7urrw/vvvw9bW1mibJ7e2tuK7775Db28v1Gq1Tg2nhYUFFi5cOKwhhJ2dHfbv3z9qzWpXVxdOnjyJtrY2sFgsrFu3DosXL9bbJLOlpQVvvPEGenp6YGdnh+joaFhYWODw4cMTrr1ta2vDJ598AgaDgddff33S58qBgQEqWmVubk7VVhmrYDcBFBcX4/z581AoFLC3t0daWppRR7jnI1euXAGHw8GqVav0PRQTo2ASWEYAQRBIT09HaWkpbbubmxsOHjxo1HUiYyGXy2mNjQe7/zk7O1Niy83NzWBW3yQSCS2tsKmpadgUOicnJ1pqoaurKzWR//LLL2k1QdqV7d/85jeT7pUDaBp9Xrt2Dfn5+Tr9wobDysoKycnJiI6OnvT729fXR4mtmpqaCacT+vn5URGuqaYTak0AfHx8hnVfqqurw9GjR2nb1q5di+XLl0/peWeb8vJynDhxAoGBgdi3b5++hzNhCgsLcf78eahUKqhUKhQXF9PcSJ2dnRESEjJseoyzszP2798/rkivSqXC5cuXqVTskJAQ7NixQ2/nVIFAgN/+9rdQKBTw9vZGQEAAnJyccPjw4QlHds+fPw+BQICgoCDs3bt33I8jCIKKVtXV1SEiIoKqrTJhvAxNj42IiMDWrVtNRiRGyGeffYb169fDx8dH30MxMQomgWUkqNVqfP/99zr1NJ6enjhw4MC8WFFUKpWoqalBWVkZKioqaBN1Ozs7Smx5e3sbjNgCNOk1PT09tNTCtrY2Wr0PoDH6cHNzg6enJ8rLy9HW1gYzMzMwGAzU19dDrVbjZz/7GeLi4qY8ptbWVmRkZKChoWFc+3t6emLjxo3w8vKa0vMSBIG2tjZaOuF4jDi02NvbU2LLz89vQpODwdGr0Wysf/jhB2oSAmg+l9dff33cKZOGwJ07d5CdnY2kpCSkpKToezjjRq1WIysrixI8CoUCRUVFNFdMf3//EX/jnp6e2Lt374RtzsvKyvDDDz9AJpPB1tYWqampeksNTU9PxwcffACSJBEeHg4XFxcEBgbi+eefn1A0VywW4/3334dMJsPevXvH7GU0OFplYWEBPp+PhQsXzotry1xHKBQiPT0dHR0dBmPwYmJymOqvjAeTwDIiVCoVvv32W6q7upYFCxZg3759BtHbZbZQq9Wor6+nTDIGT8CsrKwQGhqK8PBw+Pj4GGR6l1qthlAopKUWDk5N04oPDocDa2trKBQKkCSJn/70p1i/fv20jIEkSZSUlODKlSu06MBoREdHIzk5edrq/xQKBerq6lBTU4Pq6mqqX9F4YDAYVLNjf39/eHl5jToB1faYW7BgAV544YURJxcSiQR//etfaTVwAQEB2Ldvn9FMSM6cOYPCo8fIZQAA0GlJREFUwkJs27ZtWgT5bCASifD9999Tol8mk6GwsJBaSGGz2QgPDx9R6Pr5+eHZZ5+dtCDo7e1Feno6mpqawGQysXr1ar00JCZJEn/4wx+QmZkJFouFuLg4WFpaTkosa5vXOzo64vXXX9c5Fw6NVi1cuBB8Pt/gG9ubGB8kSUIgECAzMxMqlQrOzs5IS0uDq6urvodmYpI8efIEOTk5OHjwoL6HYmIMTALLyFAqlTh+/Djq6upo2/38/PD888/Py47eJEmiqakJpaWlKCsro9mrm5ub0xobG/L7I5PJKLF1/fp13LlzB0qlEoBm0j8wMIC4uDgsXryYllro5uY2pZUspVKJO3fu4O7duzpRteHgcrlYsWIFEhMTp30Frb+/n5ZOOFjkjAWPx9NJJ9ROjgdHrw4cODCmcUBBQQHOnj1L25aWloaFCxdO+DXpg08//RQtLS1GY9LR2NiI77//HgMDAwA03/fCwkLI5XIAmkWTiIiIEVP3QkNDkZaWNuXvo1qtxrVr13D37l0AGmG9c+fOWXcyVSqVeOutt1BWVkbVPrHZbGzfvh2xsbHjPo5arcaHH36Irq4upKSkICkpCYDmd6aNVllZWVHRqvm0SDfXkclkOH/+PEpKSgAAsbGx2Lhxo+kzNnIuX74MHo+HlStX6nsoJsbAJLCMELlcjq+//ppmbQ5obFafeeaZeR02JkkSbW1tVGRrcESEy+UiKCgIYWFhCAoKMujUl6KiIpw6dQpyuRz9/f1oa2tDTU0NQkNDERoaStuXxWLB1dWV5lro6Og44ZX33t5eXLlyhbogj4WDgwPWr1+PkJCQGVnl136WWsHV0NAw4XRCrVlGa2srbt++PWb0avBza53YtFhZWeGNN94w+JpHkiTx9ttvQ6lU4je/+Y1Bj1e7wp6RkUF9tgMDAygqKqIWF1xdXREcHDxiJDomJgbbtm2bVjOUyspKnDlzBhKJBFZWVti1a9eE3fymSnd3N1555RV0dnbC0dERCxcuBJvNxsGDByckmisrK3H8+HHweDxs3LgRZWVlaGhowMKFCxEXF2eKVs1BmpubkZ6ejp6eHnC5XGzduhWRkZH6HpaJaeDTTz/Fhg0bjGLhbL5jElhGikwmw7Fjx9Da2krbHhoait27dxtkWpw+6OzspMRWS0sLtZ3NZsPf3x9hYWEICQmZcM3GTFNTU4Mvv/ySui2RSFBcXIzU1FRs2bKFllrY0dGhY1hhZmZGcyz09PQc9yp8XV0dMjMzaY2gRyMgIAAbNmyAs7Pz+F/gJFAoFJQ7YU1NDc1lcTTUajVyc3Nhbm6OPXv2YPny5fDy8hrzN9LZ2YmPPvqIJuom2wB2Nunt7cW7774LKysr/PKXv9T3cEZEpVLh0qVLtDYUvb29KC4uhlqtBoPBoJwkRxLEiYmJSElJmRGB39/fj9OnT6Ourg4MBgPLly/HqlWrZrVJdmlpKf7hH/4Bcrkcvr6+8PX1haWlJV5++eVxt6zo6+vDO++8g4cPHyIoKAgvvviiKVo1RyFJEjk5OcjOzgZBEHB3d0daWtqEXShNGCYymQx/+tOfTPVXRoJJYBkxEokEx44do/WFATTNM3ft2jWrEwFjoLe3l+q11dDQQIkSJpMJHx8fhIeHIzQ01CD6i7W3t+PDDz+kbqtUKuTk5GD79u146623aPvK5XK0tLTQXAu1qVaDsbOzo6UWuru7j5gySRAE8vPzce3atXGl6TGZTCQkJGDlypWzFjHp7++nardqamogFouH3a+hoQE1NTWwsbFBbGwsGAwGeDwefH19qXRCBweHYSfpN27cwI0bN2jbXnrpJXh7e8/ES5oWtBELPz8/g83T7+vrw/fff09rGNzZ2YnS0lIQBAEOh4OIiIhRRcSaNWuwfPnyGa2RIggCt27dws2bN0GSJHx8fJCamjrhXnRTISMjA3/4wx8AAJGRkXB0dISbmxtefPHFEUUSQRCorKyEQCBAY2MjvL29UVBQAEtLSxw5cmTY3mAmjBuJRIIzZ86gsrISALB48WKsW7fONBGfQ1RUVOD+/fs4cOCAvodiYhyYBJaRIxKJcPToUZ1eQzExMdi+fbvRFOXPNiKRiBJbtbW1tMa4gxsbT9UWfLJIJBK888471G2SJHHr1i2sWbMG//Iv/zLm4/v7+2mCq6WlhWZxD2hEkYuLCy210MnJiSbMpVIpbty4gby8vHE1D7awsMCaNWsQFxc3qwJfm06oFVxa10Vt9EqpVCIqKmpEgwQ7OzvKLMPf358SiSqVCh999BGt/5KLiwuOHDlisFHie/fu4fLly0hISMCmTZv0PRwd6urqcPLkSZogbmtrQ0VFBUiShI2NDSIiIkZN4d20aRMSEhJmY7gAgNraWpw+fRoDAwOwsLDAjh07ptQuYaK89957OHPmDNhsNuLi4mBhYYGwsDDs2bOHdo7v6+ujaqtsbGzA5/MREREBLpeLzMxM5ObmwtfXFwcPHjRdG+YQdXV1OHXqFAYGBmBubo4dO3YgJCRE38MyMc1kZWXBwsLC6NqGzFdMAmsO0N/fjy+++AI9PT207fHx8di8ebPpQjoGUqkUT548QVlZGaqqqmhGD25ubpTYcnZ2nrX3kiRJ/Od//ictPe3evXvg8/n493//9wnXjxEEgc7OTppVfHt7u45o4nK5OqmFNjY2aG9vR2ZmJq0312i4ublh48aNeuvToVQqUV9fj3PnzuHq1atgsVhU9GosGAwGPDw8qOiWUqnE119/TdsnOTkZy5Ytm6nhTwmtzfzmzZuxaNEifQ+HgiRJ5Obm4sqVK7TvXVNTE6qqqgAA7u7uCAoKGlGcM5lM7NixA1FRUbMy5sGIxWKcOXOGGmtSUhKSk5NnRWgTBIF/+Id/QGFhISwtLREXFwcWi4WVK1di5cqVtGhVZGQk4uLidKJUUqkU77//PiQSCfbs2YPw8PAZH7eJmWVohHXBggVITU2Fra2tvodmYgb45JNPsGnTJoPOoDDxIyaBNUfo7e3FF198oWO3PZM1CnMRhUKBqqoqlJWV4cmTJ5SLGQA4OjpSYsvDw2PG39M///nPtM8zLy8PoaGh+Kd/+qdpyalXKBRobW2lNUUe7MCoxcbGBp6envDw8IBcLkdhYeGwKYjDERERgXXr1o27XmQ6USgUeO+99yAWi7Fjxw4wGAzKMGOkdMLh4HK56OjowMDAABwcHGBubg4ul4vXX39dbxHO0fj888/R1NSEQ4cOwdfXV9/DAaARvOfOnUNxcTG1jSRJ1NfXUzVOQUFBozazZbPZ2LNnz6xGjoZCkiTu3buHq1evgiAIeHp6Ii0tbVa+BwMDA3j55ZfR1tYGZ2dn+Pv7o62tDS4uLggNDQWfz0d4ePiotVUPHz7EhQsXYGdnhzfeeMOgXVVNjM7AwABOnTql1xpBE7OHVCrFu+++i1//+tcGmz1hgo5JYM0huru78cUXX+hMfpcvX441a9aYRNYEUalUqK2tRVlZGcrLy2m1SLa2trTGxjNxUfvss89oNSqFhYXw9vbGL37xixmLDIlEIp3UQplMRtuHJEkMDAygvb0dFhYWsLGxgaWl5YjfLzabjWXLlmHp0qWzOqHTpsp5enri8OHD1PhIkoRQKKSlE45lT69QKPDgwQOoVCrweDw4ODggOjoab775JiwtLWfj5YwLbQ8lmUyGX/3qVwYxtp6eHpw4cYJWK0qSJKqqqtDc3Awej4eIiIhR65p4PB6ef/55vUVEh9LY2Ij09HT09fWBx+Nh27ZtiIiImPHnraysxIsvvojOzk5YWloiJCQE3t7eeOutt0YVp1oIgsAnn3wCoVCINWvWYMWKFTM+ZhPTjyG4XJqYXcrLy5GXl4f9+/freygmxolJYM0xOjo6cPToUZ0V+tWrV5v6JkwBgiBojY0Hi1hLS0uEhoYiLCwMfn5+07a69O2336KiooK6XVpaCkdHR/zkJz+ZlckcoJkId3V10VIL29raQBAE5HI5ampqIBQKwWQyYW1tDRsbG+r/PB6PJrpsbW2xfv16hIeHz7jYVyqVePfddyEWi7F3714EBQWNum9DQwMV3RpqGqOltbWV9nkAmghddHQ0lU7o7e2t19XFgYEB/PGPf4SFhQV+9atf6X1RpaqqCqdOnaKaBQOa31JFRQWEQiFsbW2pGqGRsLS0xL59+wzOTlwqleLcuXMoKysDoEnJTklJmZFFhN7eXuTn5+PRo0dobW3FzZs3YWFhgZiYGNjb28Pa2hqvvPLKuAx6amtrcezYMXA4HLz55puzathhYmqo1WpcvXoV9+7dA6C/Pm0mZp/MzExYWVkZbGq6CV1MAmsOIhQKcfToUdqkBgDWr1+PJUuW6GlUcweSJNHc3EyJre7ubuo+MzMzBAcHIywsDIGBgVOabJ0/fx4CgYC6XVlZCTMzM7zyyitYvHjxlF7DVFCpVLTUwpKSEuTl5elETrlcro7oYrPZ8PHxwcaNG2fUySwnJwdZWVnw8PDAyy+/PCGhIRKJqOhWdXU1RCIRAM3nXlBQQEvb5HK5SEhIoJy6uFwufHx8KMHl5OQ0qyJHa++/YMECvPjii7P2vEMhSRJ37tzBtWvXaC0ECIJASUkJurq64OnpiYCAgFGjv7a2tjhw4IDB2kyTJIm8vDxkZWVBrVbD1dUVu3fvhpOT05SPrVar8eTJEwgEAjQ3NyMqKgp8Ph8uLi745JNP8O2334LD4YDP51NtGQ4dOjSuc853332HsrIyREdHY+fOnVMeq4mZp6enB+np6WhubgaTycSaNWuwdOlSvS+imJgdPvroI2zduhVeXl76HoqJcWISWHOUlpYWHDt2jFZDBMy++9ZchyRJtLe3U2JrcPSDw+EgMDAQYWFhCA4OhpmZ2YSOff36ddy8eZO6rXXGO3jwINauXTttr2E6EIvFyM7ORmZmJoRCIfr7+4dNu7OwsKDE1rJly7Bz585pX0FXKpV47733IBKJ8Pzzz0+pZkf7+Wqt4EtKSpCbm0sTDR4eHiM+h42NDSW2/Pz8Zjxl7/79+8jIyACfz8fWrVtn9LlGQi6X4+zZs1RkR4tKpcLjx4/R39+PkJAQuLq6jnocJycn7N+/3ygK9ltbW3Hy5El0d3eDw+Fg8+bNiImJmdSxBker7O3tqdqqwcKJIAj84z/+Ix48eABra2vExsaCyWQiKioKO3fuHHPS3dPTgw8++AAqlQqHDx82TdoMnJKSEpw7dw5yuRy2trZIS0szGR3MIyQSCd577z1T/ZWRYRJYc5jGxkZ89dVXOvbc27ZtQ1xcnJ5GNbfp6uqixNbg+ikWi0VrbDyeiXZeXh4uXrxI3W5pacHAwACeffZZbN++fUbGP1Xkcjlu3bqFnJwciMVi9Pf3o7+/HwMDAxgYGNBpiMzlchEfH4+lS5diwYIF8PT0hL29/ZRWZXNzc5GZmTmp6NVYqFQqnDhxAllZWejp6aGiW3FxceMSiu7u7rR0wunuUXPhwgU8fPgQGzZsQGJi4rQeezx0dnbixIkTOm0jFAoFiouLoVQqERERMWYqm4eHB/bu3WsQNWTjRS6X4+LFiygqKgIAREdHY/PmzeNq6Ds4WtXS0kJFq0Zr3i0SifDaa6+hsbERbm5uCAkJAYPBGLfD5dWrV3H79m2dGkUThoNSqURWVhYePnwIAAgLC8O2bdtmrdegCcOgrKwM+fn52Lt3r76HYmICmATWHKeurg5ff/01LZrAYDCwc+dOvVgdzyf6+vqoXlv19fWUuGAwGPDx8UFYWBhCQ0NHXKEvKyvDd999R93u6OiAUCjEzp07Df5E29XVhaysLDx58oTaRhAERCIRBgYGKNGlNQ6xsLBAYGAgHBwcYGFhoWMVb2FhMa7nHRy9eu6552akF4xSqcRHH32E7u5uKBQK9PT0gCAIBAYGTsidkMPhwNfXF/7+/ggICJiWNgBffPEF6uvrsX//fgQEBEzpWBOlvLwcZ86c0Ymay2QyFBUVgcfj6URihsPX1xfPPffchFsRGALaNNJLly5BqVTCyckJaWlpI6bD9vT0UNEqR0dH8Pl8hIWFjTu1uLa2Fm+99RYGBgYQHBxMuZs+++yzY373FQoF3n//fQwMDGDnzp2Ijo6e8Os1MXN0dHQgPT0dQqEQLBYLKSkpWLRokUkIz0MyMjJgY2ODpUuX6nsoJiaASWDNA6qrq/HNN9/QeioxGAzs3r3b1AtllhCLxaioqEBZWRlqampon4WnpyflSDi41qSxsRF/+9vfqNt9fX2orq7G5s2bceTIkVkd/2SpqqpCZmamTkRDi1KppKJb/f394PF48Pb21lmhdXBwoMSWl5cX3Nzcho3+aFPk3N3d8corr8zYZERb6zSYdevWISgoiEonrKurg1KpHPcxra2tqeiWv7//pKI377zzDiQSCX7+85/PmnkBQRC4ceMGbt26pXOfRCJBYWEhXF1d4efnN+bnERISgrS0NKO3D+/o6MDJkyfR3t4ONpuNlJQUxMfHg8FgQK1Wo6KiAgKBAK2trYiOjkZcXNyo0arRuH37Nn73u99BrVYjJiYGtra24HK5eOmll8ZMwywsLMSZM2dgbW2NN998c1zRNhMzy1CR7ujoiLS0NIMzeTExe3z44YfYvn07PD099T0UExPAJLDmCRUVFfjuu+9oDT6ZTCaeeeYZU8f3WUYmk6GyshJlZWWorKykTcJdXFwQHh5OrWL/5S9/oe6TSCQoLi5GcnIyfvGLX+hj6JNCrVbjwYMHuHHjhk50YygkSUKpVMLf3x9ubm5ob29Ha2urjlBhsVhwdXWFl5cXJbxsbW3xl7/8hUqjDA0NncmXhdOnT1PpYIAmIvXGG29QPb9UKhUaGxsps4zW1tYJHd/NzY0SXAsWLBgznVAsFuN///d/wePx8I//+I+zstItlUpx+vRpVFZW6tw3MDCA0tJS+Pv7j0s8REdHY9u2bXOmxkCpVCIzM5MyqlmwYAHc3NwoN9D4+HiEhYVNS5roF198gS+//BIcDgfx8fHgcrmws7PDyy+/PKpQJ0kSn3/+OZqbm7F8+XKDq+2cb8jlcly4cIHqFxcdHY1NmzYZZTTXxPQgFovx/vvv49e//rWpx5mRYRJY84iSkhKkp6fT6mBYLBaef/75WU8nMqFBqVTSGhsP7jlla2uL/Px8ODs7w9raGiqVCvfv38eKFSvw//7f/zO6k61YLMa1a9eQn5+vU4s1HNbW1khOTkZERAQ6OjpoDZE7Ojp0jtHR0YGWlhb4+Pjg1Vdfhbe394zW8IjFYvz1r3+luXUGBwfjueeeG1bciMVi1NbWUoKrv79/3M/F4XAod0J/f3+4uLjoPEddXR2OHj0KLy8vHD58ePIvbJwIhUJ89913NBdNLb29vaiurkZoaOi4PoPFixdjw4YNcy79Sa1W4/z58zh+/Dh6e3sREBCAN954A5GRkdP+PL/73e9w69Yt2NraIjo6GkwmEz4+Pjhw4MCoorWpqQmff/452Gw23njjDYNsnj0fmE6jFBNzh9LSUhQUFOD555/X91BMTBCTwJpnFBUV4cyZM7TJKYfDwd69e+Hr66u/gZmAWq2mNTYWi8W4c+cO1dzW0dERNTU1WLduHX7zm98Ybe+T1tZWZGRkoKGhYVz7e3l5YcOGDTSnM7lcjpaWFqo/V2NjI65cuQKFQoGFCxdSNtl2dna01EJ3d/dpTT/Lz8/HuXPnaNv27NkzZuotSZLo7OykxNZk0gm1tVv+/v6wsrKiTFFiY2Nn3ATl8ePH+OGHH4Ydc1dXF9ra2hASEjKu6MyqVauwcuXKOSWuuru7IRAIUFBQAGdnZwQGBqKoqAjt7e1gMplITk5GUlLStL5mkUiEn/70p6iuroanpyfV+y0uLg5bt24d9bm00djw8HDs2bNn2sZkYmxIksSDBw9w+fLlabf6N2H8XLx4Efb29qYWO0aISWDNQ4abFHK5XOzfv99k/WogEASBxsZG/OEPf0B1dTWVWtfY2IgFCxbg4MGDWLJkCfz9/afdiW42IEkSJSUluHz58rgjOTExMVi7du2wDnQPHjzAmTNnwOVysXTpUrS0tKClpUXHQZPJZMLFxYWWWujk5DTpaCBJkvjiiy9oYtHa2ho/+clPJpTWo1Kp0NTUREsnnMip2dXVFd3d3RAKhUhLS8OKFSsm9DrGC0EQyM7OphqdDkUoFEImk2HBggXjEg8bN27Ua0+36UStVqO8vBwCgQBCoZCqrdJOlFUqFbKzs5GbmwsACAoKws6dO8dt4DIe6urq8POf/xzd3d0ICwujarDGep/7+/vx/vvvQ6lU4uDBg/Dz85u2MZkYGalUih9++AHl5eUAgEWLFmH9+vVGX4NoYvr44IMPsHPnTnh4eOh7KCYmiElgzVMePHiAS5cu0bbxeDwcPHjQ9EM2II4ePYra2lqIRCJ0dHRAIBDA1tYW8fHxcHBwAI/HozU2NrYidYVCgbt37+Lu3bvD9s0aCpfLxYoVK5CYmEgJS5VKhb/85S/o7+/HM888g7CwMAAaMTA0tVAoFOoIFx6PBw8PD1qkaywb8cG0t7fj448/ptU3Ll68GBs3bhz3MYYikUhQU1NDNTwe3Nx4JAoLC/8/e+8dH9V55f+/7zSNeu8V0XvvvYpuqgFjsIlbXJJsssnu/pLNtnzX2ezG2U3cYlwAg216770bAwJRhCgS6r2Xkabf3x9CNxokQIDESOJ5v15+mXnunZkzo9Ho+dxzzudQWlpK//79GTBggJLdCg4ObpZMicFgYPPmzaSmpjZ6vLCwELVajZ+f3yMfS6VS8cILL7QL57ri4mIuXbpEQkICQUFBDBw4kG7duj3wwsfNmzfZsWMHNTU1eHl5MX/+fKKjo5stntOnT/Of//mfmEwm+vfvj6enJ5Ik8fLLLz+0FPzkyZMcPXqU4OBg3nrrrTZXgtzWyMjIYMuWLZSXl6PX65k9e7YwnRI4UFVVxUcffST6r9ooQmA9x5w5c4ZDhw45rLm6uvLqq68+0n1K8GzYvHkz169fV25fvnyZwMBAhg0bhtVqJS8vTzmm0WgcBhu3pVkpZWVlHDp0iMTExCad7+fnR1xcHF26dOHixYvs2bOH4OBgfvzjHz9UTJjNZnJzc5XSwuzs7EbFi5eXV4PSwodlpOpmCtUhSRKvv/56s7g+ybJMcXGxQznh/Zk5gLNnz2I2mxk6dKjDz97Dw8OhnPBxxGMdOTk5bNiwodH3SpZlKioq0Gq1TcrGaDQaFi5c2KbNdaxWq5KtKigooF+/fgwYMMDBBfRhlJeXs3nzZjIzM5EkiXHjxjF69Ohm2UTJsszatWtZs2YNWq2WgQMHotVq0ev1vP766w8sPbNYLHz88ceUlZUxc+ZMBg0a9NSxCBoiyzKnT5/m2LFj2O12IiIiWLBggWKOIxDUkZiYyNWrV1myZImzQxE8AUJgPeecOHGCY8eOOay5u7uzYsUKUQPeCti/f79SUgQoDmRLly5l1KhRlJaWKoONMzMzlfNUKhUdOnRQZm21lX6ttLQ09u3bR35+fpPOj4mJISMjA7vd3qTep8aoqqpyEFzZ2dkN3A4lSSIwMNChtDAoKEjZEFssFj755BNKS0uV+4SGhvLGG280+5VHm83mUE5YVwp55swZVCoVo0ePfqjIDA4OVgRXdHT0I8uREhIS2L17d6MZRlmWsVqtqFSqJrn/ubi4sGTJkjbb71lcXEx8fDxXrlwhKCiIQYMGNbnX7H5sNhvHjx/n9OnTyLJMhw4dmDdv3hMJ4PuxWq38/ve/58iRI/j6+tKnTx8kScLf35/XX3/9gRdfbty4wcaNG3Fzc+MnP/lJm7pI0xaoqqpi69at3L17F4CRI0cyYcKEduOcKWhedu/ejb+/P8OHD3d2KIInQAis5xxZljly5AinT592WPf09GTFihVNKvcRtBynT5/m8OHDyu07d+7g6urKggULmDp1qsO5lZWVymDjtLQ0pWRNkiQiIyOVWVut/Uqp3W4nPj6eo0ePOjj0NUZOTg537tyhR48e/PGPf2yWDWFdxqi+4MrLy3MoAYRac5j6pYVms5nt27c7iJu4uLgW/+NYXV3N2bNn+fLLL7FarfTs2bPJ99VoNERFRSl28PXLCW02G/v37+fChQuN3leWZfR6PUajsUkliG5ubrz88sttrgTZarWSlJREfHw8hYWFj52tehQpKSls3boVg8GAu7s7c+fOpVOnTk/9uOXl5fz6178mMTGRqKgoYmNjAejYsSNLly5tVPjLssyaNWtIS0tj2LBhDb5jBE9OS/2cBe2Xjz76iPnz54sZaG0UIbAEyLLMgQMHHDIlUGsTvmLFila/IW/PJCQksH37duV2Wloasiwza9YsFixY8MD7VVdXK4ONU1JSHAYbh4WFKWKrNWcpa2pqOH78OBcuXGggbqBWiP3www+YTCZ69OhBTEwMEydOpH///s2eNbJYLOTl5SmCKysryyFbVcfdu3cxGAx4eXnh5eWFn58ff/d3f4e3t3ezxnM/dcY1vXv3Zty4cUp2KzU1tdFywgfh7u6uzCBLSEigsLCw0fNcXFxwc3Nr9D1oDC8vL5YvX96qP2/3U1RUpGSrQkJClN6qlsg2VFVVsWXLFqW/bdSoUYwfP/6pn+vu3bv85je/ITc3l549eyrzyB4mnvLy8vjss8+QJIl33nmnTf3MWiMtmakUtF8qKyv55JNP+Id/+Id25bD6PCEElgCoFVl79uzh4sWLDut+fn68+uqreHl5OSmy55vk5GTWrVun3M7Ozqaqqoq4uDheffXVJj2GyWRyGGxcf8MdGBioiK2QkJBW+UVeUFDA/v37lbKaOnJycrh9+zbu7u4MGjRIiT0kJIRp06Y1q3FAYxgMBnJychwyXeXl5Zw/f96hnK5Dhw688MILSqYrODi42TfpBw4c4Pvvv2fixImMHj1aWa8rJ6wzy8jOzn6kO2F5eTmJiYmYzWbc3d3x9fXF19cXHx8f1Go1AQEBqNXqJpdx+vv7s3z58hYXmc1B/WxVUVGRkq16Fpl8u92u9ObIskxkZCTz589/6gtcJ0+e5I9//CMGg4EBAwYoc8lmz57NgAEDGr3Prl27iI+Pp3PnzixduvSpnv95pqysjC1btrRIr52gfXPt2jUSExNZvHixs0MRPCFCYAkUZFlmx44dJCQkOKwHBASwYsWKFh3aKmicvLw8/vrXvyq3CwsLyc/PZ9y4cbz33nuP/XgWi4W7d++SlJTErVu3HErwfHx8FLEVGRnZqsSWLMvcunWLAwcOUFpait1u5/z58xiNRnr06EFQUFCD+/Ts2ZMpU6Y8s429LMuUlpZy4MABdu/eTWVlJZWVlciy7DCbS6PREBoa6mCi4ePj81Tv97p160hOTmbx4sV069btgefV1NQ4DDsuKytziD8nJ4fk5ORGRZgkScrgYFmW8fDweGTMoaGhvPzyy63+u6OwsJBLly5x5coVQkNDGThwIF27dnVKb0x6ejpbtmyhoqICvV7PnDlzHvozfRSyLPPNN9+wdu1a1Go1AwYMQKPRoFarWb58eaMXIgwGAx9++CFGo5GlS5cqM7UETefmzZts374do9HYIm6RgvbNrl27FEMrQdtECCyBA3a7na1btzo410FtY/wrr7zSrDNbBI+mqqqKP/7xj8rtsrIyUlNTGT58OP/0T//0VI9ts9lIT09XTDKqqqqUYx4eHnTr1o0ePXoQHR3dapqwrVYr33//PevXrycxMRE3NzcGDx78wI2+Vqtl5MiRjBw58pnNlpFlma+++orMzEzsdjtVVVXY7XZGjBhBfn4+xcXFDe7j5ubmILjCwsIe63ftf//3fykvL+cnP/lJk3uD6gRhSkoKt2/fZv/+/Q5GKfWRJImoqCgKCwuprq4Gat/buuxW3ciA+kRHR7NkyRL0en2TX8ezxGq1cuPGDeLj4ykuLlbs7X19fZ0dGtXV1Wzfvp3bt28Dtbb/kydPfuKZd2azmQ8++IBDhw7h5+dHz549kSQJNzc33njjjUZf8/fff8+BAwcICAjg7bffbjXfAa0dq9XKwYMHOX/+PABdunRhzpw54m+n4LH48MMPefHFF4WjcxtGCCxBA2w2G5s2bVKGH9YRFhbG8uXLW+2GqT1it9v53e9+p2QUDAYD169fZ+jQofzmN79pNtEgyzKZmZmK2Kqf2XB1daVr1650796d2NhYpw/BtNls/M///A+XL1/G39+/0ezV/Xh7ezNlyhR69OjxTDJz+fn5fPbZZw69Y3V9LzU1NQ1KCw0GQ4PH8PPzc3AtDAkJaXSDbTabef/991Gr1fzmN7957PKj8vJyNmzYQFZWFpWVlZSWllJSUqIMgNbpdMTGxpKWlobRaHzg47i5ueHn54evry+DBw9myZIlTv+sNEbdPLmrV68SGhrKoEGD6NKlS6sTELIsc+7cOQ4fPozNZiM0NJSFCxc+cbliSUkJv/vd77h8+TIdOnRQsilBQUG89tprDQSyzWbjk08+obi4+JmYtbQHiouL2bRpE3l5eajVaiZPnszQoUNbVTWAoPVTUVHBX//6V371q1+Jz04bRggsQaNYrVbWr19PcnKyw3pERATLli176EwgQfPyxz/+UckuWSwWfvjhB0aNGsXPfvazFrnaLssyeXl5itiqb3Sg0+no3Lkz3bt3p3Pnzk75HNQZOgQEBDBr1iwOHDhATk5Ok+4bExPD1KlTCQkJaeEo4fDhww7unJIk8cYbbzRw0ZNlmfLycgfBlZOT08AWXa1WExISogiu8PBw/P39yc3NZeXKlQQHB/P2228/Voypqals2rRJyUrVx2q14uLiQnBwMHv27GnSsGOo3bD37NmT6OhoxZ0wNDTUqRsFi8WiZKtKSkpaVbbqUWRnZ7N582ZKS0txcXFh5syZ9O7d+4ke6/bt27z//vukpaXRp08fRax17dqVxYsXN/gZ3blzh2+++Qa9Xs9PfvKTVl/q6UyuXr3K7t27MZvN+Pn5sWDBgjbnmCloHVy9epWkpCQWLVrk7FAET4EQWIIHYrFY+PbbbxVnqzqio6N5+eWXW+XV6fbIZ599Rm5uLlC7GT958iSjR4/mjTfeIDIyssWfv7CwULF/ry9kNBoNsbGxdO/ena5duz6TEhibzcZHH31EaWkp8+fPp3fv3siyTEJCAkeOHHEoc3wQkiQxcOBAJkyY0KIx1x/cWkdYWBivv/76I7NMNpuNgoICB9fCoqKiBr1Rer0em83G7du3GThwIK+99lqTNsF12ZFDhw416tAIMGjQIHr06MHGjRsxGo3U1NQo2a2ysrJG52KFh4fTqVOnBht1Nzc3OnTooAiuZ9UXV1BQwKVLl7h69SphYWEMHDiwVWarHoXRaGTXrl3KIO4BAwYwbdq0J/oOPnbsmDKzbeDAgcpog9GjRzNx4sQG59f19w0aNIiZM2c+3Qtph5jNZvbu3av0Lvfq1YtZs2aJi5CCJ2bnzp0EBwczdOhQZ4cieAqEwBI8FLPZzLp168jIyHBY79ixI0uWLHningBB0/nmm2+4c+eOcvvMmTMMGjSI5cuX071792caS1lZGTdv3uTGjRtkZmYqG36VSkV0dDQ9evSgW7duLWZBfPnyZXbs2EFAQADvvPOOg1AxmUycPHmSc+fOOdjSPwi9Xs+4ceMYPHhwi2246zIA9Zk2bdoT/eE0mUwNSgsrKyu5e/cuGRkZxMTEEBMTg4+Pj9LLFR4eTmhoqMNG3Gw2s3PnzgZ9lnWo1WpmzJiBp6cnGzduxGKxNDhHlmUqKiooLS2ltLSUiooKoqKiiImJaVKmKiAgQBFb0dHRzboZrZ+tKi0tVbJVbX3chCzLxMfHs3//fqxWK0FBQSxYsKBJJbL3P87atWvZuHEjAP3791c+/3UXLepTWFjIp59+iizLvPXWW88k+9tWyM/PZ9OmTRQVFaHVapk2bRr9+/cXZV2Cp+Ivf/kLixcvfuzfbUHrQggswSMxmUx8/fXXZGdnO6x36dKFRYsWtbmrwW2NHTt2cPnyZeX2hQsX6N69O4sWLWLw4MFOi6uqqkrJbKWmpjpkQuoPNm6uMqz62at58+bRp0+fRs8rLi7mwIEDikHAowgMDGTq1Kl07NixWeK8n02bNimZB6gts3zvvfeeevSBLMtUVlby2WefcfXqVbp164YkSQ3mXqlUKoKDgwkPD8fDw4Pz589jMBga3QR6eXmxaNEiSktL2bp16wOzW/czfvx4goODFXfCkpKSJr8OlUpFZGSkQznhk9hYFxQUKL1VERERDBw4kM6dO7e776fm2NQbjUY+/PBDDh06pLiHSpKERqNhxYoVhIeHO5y/f/9+zp07R0xMDK+88spzLyBkWebixYscOHAAq9VKYGAgCxcuFBtiwVNTXl7OZ599Jvqv2gFCYAmaRE1NDWvWrCEvL89hvUePHixYsEDM9WhBjhw5wqlTp5TbCQkJREdHM2fOHMaPH+/EyP5GTU0Nt2/fJikpieTkZIfysZCQEEVsBQYGPvEfjbqhy/7+/rz77ruP/MwlJyezf/9+ioqKmvT43bp1Y8qUKc0+86iyspKPPvoIk8mkrNUJ5ObgL3/5CyUlJcpQ2MLCQofSwoKCAmRZpri4mKSkJKxWK2q1Gk9PT2UgsqenJ127dmXhwoUkJSWxZ8+eR87LglpxNHv2bPr16+ewXlpaqszeunv37kPNMe7H1dWV2NhYYmNj6dix40MzTxaLhcTEROLj4ykrK2PAgAH079+/zWerHsX9ZWm9e/dm5syZj5UJLCgo4IMPPuDChQvExsYSEREBgKenJ2+88YbDBYCamho+/PBDqqurefHFF+nRo0ezvp62hNFoZOfOndy4cQN4unJNgeB+rly5wq1bt3jxxRedHYrgKRECS9BkDAYDq1evdjA9AOjTpw9z5swRIquF+OGHH9i3b59yOzExkcDAQKZNm8asWbOcGFnjmM1mkpOTSUpK4vbt2w7Cwt/fXxFbYWFhTRZbdrudjz76iJKSEubOnUvfvn2bdD+bzcb58+c5fvy4QxwPQq1WM3z4cEaPHt2sZWsXLlxgz549DmtLliyha9euT/W4FouF999/H0mS+M1vftNotsZkMrFjxw4OHDhARUUFFRUVDd6LiIgI+vbti8FgICMjQxFdD8v+qNVqFi5c+MgZTXa7nZycHCW7lZWV1eTMGNR+ZuqyWzExMbi4uJCfn098fDzXrl0jIiKCQYMG0blz5+fuO+jKlSvs2bNHMVZYuHAhoaGhTb5/YmIiH374Ibdu3aJv376KMA0LC2PFihUOouHixYvs3r0bHx8f3n333edSUGRlZbF582bKyspwcXFh1qxZ9OrVy9lhCdoRO3bsICwszKnVKYLmQQgswWNRWVnJqlWrGpQADRgwgFmzZomUdguQmJjIpk2blNu3b9/Gzc2NiRMnsmTJEidG9misVqvDYOP6bnXe3t4Og40ftjm+cuUK27Ztw8/Pj/fee++xN9IGg4EjR45w+fLlJmVmPD09mTRpEn369GmWz7Tdbuerr74iKytLWfP29ubdd99Fp9M98ePWDaIOCAhodPC0yWRi27ZtDUYumEwmKisrqa6upmPHjkiSRFJSUoM5WO7u7orY8vLywt3dHUmS0Ol0LFmyhA4dOjx2zCaTibS0NEVwNTYXrDFsNhtFRUXU1NTg7u7O2LFjmTx5cptwAmxJioqK2LRpE/n5+ajVaqZMmcKQIUOa/Lk9dOgQa9euJTc3l0GDBikXFnr16sX8+fOVx7Hb7Xz22Wfk5+czYcIExowZ02KvqbUhyzJnz57lyJEj2O12wsLCWLBgQbNnuwWCP//5z7z00ksEBgY6OxTBUyIEluCxKS8vZ9WqVQ7uaABDhgxh2rRpQmQ1M+np6axatUq5nZaWhizLjBo1ijfeeMOJkT0edrvdYbBxZWWlcszd3Z1u3brRvXt3OnTo4JA5sdvtfPzxxxQXFzNnzpwG5WiPQ25uLvv27Wtg2vIgIiIimDZtWoOelCchLy+PlStXOmRvhg8fTlxc3BM/5rVr19iyZUujJYeFhYVs2LDhgSWSvr6+LFq0iKCgIHbu3MmZM2eorKxUslwGg6GBGFWpVPj7+zN37lz69etHeHg4Xl5eT/U7X1ZWppQS3r17l5qaGofjVVVV5Obmkp+fj7e3N6Ghofj7+yNJEnq9XikljI2NfW7FlsVi4eDBg1y4cAGoLXd94YUXFIfAh2G32/n666/ZtWsXFouFfv36KRcw7hdSqamprFmzptn6CNsCBoOBbdu2KSNLhg8fzqRJk9pdb5/A+ZSVlfH555/zy1/+Uuyj2gFCYAmeiNLSUlatWqUMI61jxIgRTJ48WXw5NCPFxcV8+OGHyu26wbSDBw/m5z//uRMje3JkWSY7O5ukpCRu3LhBaWmpckyv19OlSxe6d+9Op06dSEpKYuvWrU+cvWrsua9fv86hQ4cafH4fRL9+/Zg4ceJTuyMePHiQs2fPKrdVKhVvvPHGY5V11efo0aOcPHmSMWPGMGHCBGU9KSmJbdu2NTC8qKNjx44sWLAArVbLli1bSEpKanCOzWajqqqKiooKRXjJskyfPn0crOA9PDwcXAvDwsKeeBi53W4nNzeXmzdvcurUKS5fvozRaCQ0NJSQkJBHPq6fn59DOeHzNhT9xo0b7NixA5PJhLe3NwsWLGjSKAeDwcAnn3zC8ePH8fLycihdXbRokYNb6YYNG0hKSqJv377MnTu3RV5HayE1NZWtW7dSWVmJq6src+fOpUuXLs4OS9BOSUhI4M6dOyxcuNDZoQiaASGwBE9McXExq1atajB7aOzYsa3GfKE9YDKZ+P3vf6/cLigooKCggL59+/LP//zPbV7MyrJMQUGBIrYKCgqUYxqNhpSUFPR6Pa+++mqzzgUxm82cOXOGM2fONDrT6X50Oh1jx45l6NChTzyewGw28/HHHzsM7Q0PD+e11157IuFYt9mts9e22+0cO3bMwRTlfkaPHs348eOVYeJ3795t0nP5+fkxf/58DAaDYqKRnZ3dIOMkSRIBAQEOA5GDg4ObdMU/Ly+P+Ph4rl+/TlRUFL1790aj0ZCamkpKSkqTDUugVryGh4crgis8PPy56NEqLS1l8+bNZGdno1KpmDBhAiNHjnzk90ROTg4ff/wx58+fJzY2VhH9Wq2W1157TbFnLy0t5eOPP8ZqtfL6668r5hjtCbvdzokTJzh58iSyLBMdHc38+fOfi4ydwHls375d6SkVtH2EwBI8FQUFBaxevdqhtwZg4sSJjB492klRtS9kWeb9999X5hGVlZWRmppK//79+Yd/+IdnMuD3WVLndpeUlMSlS5dISkpCr9czYsQIOnbsqAw2bspA3aZQVlbGwYMHFVewR+Hn50dcXBxdunR5InF7+/Ztvv32W4e16dOnM2TIkMd+rI8//pjCwkJ+/OMf4+3tzZYtW5RSpvvR6XTMnTuX7t27U11dzbfffuvQE/YwQkJCePnll/Hw8HBYl2WZkpISB9fCvLy8BnPINBoNoaGhDpkuHx8fxVb++vXrxMfHU1VVpTgBNraZLS8vV8oJU1JSGoi7h6HX6x2GHbfnckKbzcaRI0eUbGmnTp2YO3fuI39nEhISWL16NVevXqVfv37Kz8Db25s333xTuX+ds2l4eDivv/56m7/IU5+Kigq2bNlCeno6kiQxZswYxo4d+1yIc4HzkGWZ//u//2PZsmUEBAQ4OxxBMyAEluCpyc3NZc2aNQ2smKdOncqwYcOcFFX74s9//rNSRmcwGEhMTGTIkCG888477Xb2it1u54MPPuDWrVtKmVPd15UkSURHR9O9e3e6deuGt7f3Uz9famoq+/fvJz8/v0nnd+rUibi4uCdqRt64caODoHNxceG99957rBJEm83Gf/7nfyLLMj/60Y/YunWrQ6llffz9/Vm8eDGBgYFUVlaydu1ah0zhw4iKiuKll15qcrmd1WolPz9fEVzZ2dmNGllYrVaMRiOVlZV069aNSZMm0bNnzyZvZO12O3l5eYpZRmZmZpMGTNfh6+uriK0OHTq0y3LC27dvs337dqqrq/Hw8GD+/PmPNCbZs2cPO3bsID09nYEDByomLFFRUSxfvhyNRoPZbObDDz+ksrLysVw9WztP8n4JBM1BaWkpX375JX//93/fri5YPM8IgSVoFrKysli7dm0D++eZM2eKdHcz8NVXXynGDGazmfPnzzNq1CiWL19ObGysk6NrGeoMHHx9fXnvvfcwGo3cunWLpKQk7t6967CZDg8PVxwJ/f39n/g57XY78fHxHD16tEnZEZVKxZAhQxg3btxjbdArKir4+OOPHX5fevTo8VizTwoLC/n444+pqanBy8tLyXDeT9euXZk7dy56vZ6SkhK+/vrrBgY1D6Jz5868+OKLT23JXVNTQ3Z2NqmpqZw/f55Lly5RVVVFaGgooaGhinOdv7+/Q2lhSEhIk8sxzWYz6enpiuC6f5zEw5AkiYiICMUwIzw8vN2YGDxuRsZms7F69WqOHj2KwWCgb9++yoavf//+zJ49G0mSFGdPT09PfvKTnzyVG6azsdlsHD58mO+//x5oesZPIGguLl++zN27d5k/f76zQxE0E0JgCZqN9PR01q1b12Cj97TObwLHjIcsy4qxwfz58+nTp4+To2t+7HY7n376KYWFhcyePZsBAwY4HDcajQ6Djet/5oKCgujevTs9evQgKCjoia4G1tTUcOzYMS5evNikmU11tvn9+/dvcgbm/vlmAC+99FKTm+ivX7/Of/3Xf1FTU0Pv3r0bHJckifHjxzN69GgkSSI/P5+1a9c26Jl8EL169WLu3LnNIjRycnKIj48nMTGRmJgYBgwYgL+/P7m5uUqmKzc3t0EvnFqtJiQkRBFcERER+Pn5NelnWlFRoYitu3fvNihjfhguLi4Nygnb8lXlx+0pqqys5K9//Stnz57F3d2dTp06Kcfi4uIYPnw4sizzxRdfkJ2dzejRo5k4ceKzejnNSklJCZs3byYnJweVSsXEiRMZMWJEm/55C9oe27ZtIyoqioEDBzo7FEEzIQSWoFm5e/cu3377rcNGSZIk5s+fLwYyPgV79+7l/Pnzyu0zZ84wePBgZs6cyYgRI5wYWctw/fp1Nm/ejI+PDz/5yU8eusm3WCwOg43rl6r6+fkpma3w8PDH3jQVFBSwf//+JhtBhIaGMnXqVKKjox95rt1u54svviAnJ0dZ8/Hx4Z133nHIBljtdirNNirNVoxWG3ZZxmqxsH/PHi59fxpXFUSGOboQ6vV65s+fT+fOnQHIzMzkm2++aVDG+yAGDx7MtGnTnqrvxGQyKb1V1dXVSm/Vg8ogbTYbBQUFDqWFRUVFDazi9Xq9Qy9XeHj4IzMNsiw7lBNmZGQ8Vjmhj4+PQzlhU+zPWyN3795l69atVFVV4ebmxty5c5XPyP1kZGTw+eefc/HiRWJiYggODgZqv89feuklOnfuTFZWFl988QUajYZ33323zfW1Xb9+nV27dmEymfDx8WHBggXt0rRD0LqRZZn//d//5ZVXXnmqCgxB60IILEGzc+fOHdavX++wgVGpVCxcuNDB7lfQdE6ePMnRo0eV2+fPn6dnz55MnjyZKVOmODGy5keWZT755BMKCwuZNWvWY13Rs9lspKamkpSUxM2bNzEYDMoxLy8vZdZWdHR0k8WDLMvcvHmTgwcPPrDH6X569erF5MmTH9kblpuby8qVKx1ExMiRIxk3YSK5VUbSy2swWG2oJJDl2v9sNitVVVWYTGbMZiN6vSvYbZRlpFCenoKvhxuLFy9WhqAmJyezYcOGB5YQ3s+YMWMYP378E13Bl2WZ3NxcJVvVoUMHBg4cSGxs7BOJNaPRSE5OjoOJRmMZOB8fHwfBFRoa+tCyxrpywjqzjKb2o0GtwKhzJ4yNjSUiIqJNlRNWVVWxbds2UlJSgNrRGhMnTmz0NZw/f57Nmzdz5coVevfurZicuLi48PrrrxMYGMjWrVu5evXqY5e4OhOLxcL+/fuJj48HastzZ8+e3S778AStn5KSElatWsUvfvELkTltRwiBJWgRkpKS2LRpk0N5lVqtZvHixQ+8Yip4MJcuXWLnzp3K7cuXLxMTE8PYsWOZN2+eEyNrfhITE9m0aRPe3t789Kc/feLNq91uJzMzU3EkrG+N7ubmRteuXenevTuxsbFN6vOxWq18//33nDx5skliRavVMmrUKEaMGPHQzf6BAweU3g+1Tk9Q975E9uqHWq1GQkItofzRra6upry8DFkGQ7UBu92Om5sbGo0WlUaDTqcj3M+bHoFeeLloSUxMZOvWrU3O1kyZMuWJMqImk4lr164RHx9PTU0NAwcOpF+/fk89N+x+ZFmmoqLCQXDl5OQ0+HmoVCqCg4MdSgsDAgIeuHmpqKhQxNbdu3cdhPmjcHFxISYmRslwNbWE0ZnIssyZM2c4evQodrud8PBwFixY0CADJcsy27dv59ixY9y5c4cBAwYon2U/Pz/eeOMNLBYLH374IRaLhVdffZWYmBgnvKKmU1BQwObNmykoKECj0RAXF8egQYNa/c9M0H65dOkSaWlp7e5v+fOOEFiCFuPatWts3brV4eq8RqPhpZdearfGDC3F/dbeiYmJBAYGMmzYMJYvX+7EyJoXWZb59NNPKSgoaFaDFFmWycnJUcRWfVc7FxcXh8HGj2rWr6io4PDhw1y9erVJz+3t7c2UKVPo0aNHo5u4utlYdjcvQvoORaXRoAYCAvyB2vNlZCrKyzEY6vqIZKqqqpCpHfQrSRJeXl64u7lhvffrpq0q4fSOTdibIK4kSWL27Nn079+/Sa8J/vaexsfHc+PGDWJjY5Vs1bPcrNrtdgoLCx1KCwsKChqUFrq4uBAWFuaQ6WpMAMqyTH5+vkM5YVPmpNXh4+OjmGXExsa26nLCzMxMNm/eTHl5OXq9ntmzZ9OjRw+HcywWC19++SUXL16kpKSE3r17Kz/f2NhYli5dqoi14OBg3nrrrVZpaS7LMpcvX2bfvn1YLBYCAgJYsGCBMt9LIHAWW7ZsoUOHDg16jQVtGyGwBC1KQkIC27dvd1jTarW8/PLLTepTEdSSk5PDypUrldu3b9/G3d2d/v3788477zgxsublxo0bbNy48amzVw9DlmUKCwsVsZWXl6cc02g0dOrUie7du9OlS5eHbo4zMzPZt2+fQw/Vw4iJiWHq1KkNNnQ2u8zJOxnkVdZgt9mQ7wkibx9v3N3csdltlJaUYDb/LUtjt9sxVBtQSRKeXl74+vrionNRjldWVVJtNGGuqiTrh+NYah6ckVGr1SxYsKDJ5btGo1HJVplMJqW36v4ZWc7EbDY7lBZmZ2c7ZDDr8PLychBcYWFhDQS2xWJR3Anv3r3bZBt/qBWuYWFhSnarNZYT1tTUsGPHDm7evAnU9t/FxcU5ZHXLysr461//SkJCgmIAUseQIUOYPHkyH3/8MWVlZa3SOdZkMrFr1y6uX78OQL9+/Zg+fXqbdj4UtA9kWeZPf/oTK1asUMq6Be0DIbAELc7FixfZvXu3w5qLiwvLly8nPDzcSVG1LSoqKvjTn/6k3E5NTQWgZ8+e/MM//IOzwmpWWip79ShKSkq4efMmSUlJZGZmKusqlYoOHToos7YaExCyLJOQkMCRI0ea5M4nSRIDBw5kwoQJuLm5YbXLxOeWUWo0U1VRjrHmbyYUKpWEj48P5eXl2GyOToZWq5UaYw0uOhciIyNQq+s2wzIVFZVKLGqtFpvFTMaZw5gNlQ3i0el0LF68+JEZZVmWyc7OJj4+nqSkJKdlq56GysrKBqWF94+VkCSJoKAgh9LCwMBAh4xMZWWlQzlhU10Zofb9rl9O6O/v3yreP1mWOX/+PAcPHsRmsxESEsKCBQscBp6mpKTw9ddfk5CQQGRkpMOxmTNn4ubmxsaNG3Fzc+OnP/1pq+lnysnJYfPmzZSUlKDT6ZgxY0a7mdslaPsUFxfz9ddf83d/93et4rtA0HwIgSV4Jpw7d479+/c7rOn1el555RVCQ0MfcC9BHTabjd/97nfK7ezsbAwGA126dOG3v/1tq7sq/iTUZa+8vLz46U9/2uT5R81JRUUFt27d4saNG6Snpys9hJIkERkZqTgS+vj4ONzPZDJx4sQJfvjhhyb1O+n1esaNH48U0ZlSkxWtJGG32ykoLEC2y4CMxWLBZrPVmljch9lsxm634+/vh7d3XSwyZeXlVBsc7chVWi12i5m0Uwew1vztmKurK0uXLn2oa1pdturixYuYzWalt6o1ZaueFFmWKSoqcigtzM/Pb2DLr9PpCA0Ndch0eXl5IUkSsixTUFCglBOmp6c/Vjmht7e3Qzmhm5tbc7/Mx+JRYuT06dPs3buXK1eu0LNnTyVelUrFsmXLOH78OOnp6QwfPpy4uDhnvQyg9uf7ww8/cOjQoQeKRoHA2cTHx5ORkcHcuXOdHYqgmRECS/DMOHXqFEeOHHFYc3Nz49VXXyUoKMhJUbUd/vu//1uZ5VNQUEBhYSE9e/bk5z//+SPd6lo7sizz17/+lfz8fGbMmMHgwYOdHRLV1dXKYOOUlBQH4RQWFqaIrfobtuLiYg4cOMDt27cf+fiBPfoT2LkHHm5u6O8N2jUYqigvL8doNGGx1pYEurq6olH/TWxKEkgqFXabDR8fH9zc3JGRKSstpaamcRt2tVaHqbKMtFMHQJbx9PRk2bJljf7e3Z+t6tixIwMHDqRDhw7t/gqrxWIhLy9PEVzZ2dmNOkd6eHg0KC3U6/VYLBYyMjIUwfW45YShoaFKdisyMtIpF04eVk4nyzKbNm3i/Pnz3L59mz59+igXQlxdXXnhhRfYsGEDkiTxzjvvOE3MVFdXs337duX3cMiQIUyZMsUpF20EgoexefNmOnbs+Fj9r4K2gRBYgmfKsWPHOHHihMOah4cHK1asEPMfHsEnn3yi2EmXlpaSlpZG//79eeONN9p8qWVSUhIbNmxwavbqYZhMJu7cuUNSUhJ37tzBbDYrxwIDAxWxFRISgiRJ3Llzh/379zuYadRH7+tP9MjJ2CwWkGX0rnpl6GtmRgbmeq54Kkm6N+dJQq1W4evrS3lFBRazhYAAf7RaLSWlpZiMpkafqw61TkdhUgKUFrBs2bIGjnFGo5GrV68SHx+PxWJh4MCB9O3bt11kq54Gg8Hg0MuVnZ1NTU2NwzmSJBEQEOBQWhgUFERNTY1STpiSkvLY5YTR0dGK4HqYC2Jz05ghxMKFCwkODsZkMvHFF1+QlJREfn6+g3lLYGAgoaGhXL16lc6dO7N06dJnEm990tPT2bJlCxUVFej1el544QUxHkTQKpFlmQ8++IDXXnutzc2QEzwaIbAEzxRZljl06BBnz551WPfy8mLFihXiS+YhrF27VpldYzAYSExMZMiQISxZsoSuXbs6ObonR5ZlPvvsM/Ly8pg+fTpDhgxxdkgPxWKxcPfuXZKSkrh165bDZtvHx0cRW2FhYVy4cIHjx4879PpIKjUdxs9A4+KK3fo3IWWz2bDbbahUKqrv28DrdDo8PTzx9fNFrVKRm5uHLMsEBQdRVlrmIPgeiCShc3FhXGwIQT61Yk6WZbKysoiPj+fmzZt06tSJgQMHEhMT0+6zVU+KLMuUlJQ4lBbm5eU1KA3VaDSEhoYqgissLAyz2UxqaiopKSmkpaU9Vjmhl5eXUkoYGxv7yOHKzUFBQQGbNm2isLAQjUbD1KlTGThwIMXFxXz++efcvn0bSZKIiopS7hMVFUVeXh5ms5mlS5c+s7EcdrudU6dOcfz4cWRZJjIykvnz5zco5xUIWgtFRUWsW7eOv/u7v3N2KIIWQAgswTNHlmX27dvH+fPnHdZ9fHxYsWJFmy93aym2bdvGlStXgNoenAsXLjBy5MjHHsbb2rh58ybr16/H09OTn/3sZ60ue/UwbDYbaWlpymDj+hkKDw8PunXrRlRUFHfv3uXKlSvIsoxXRAdC+w3FpogiGbPZgslcK8IkSUKSpFp79XsiR6fVEhERgVarw2azkp9fgCTVbuItlqZt0nU6HZ4+PoR5utLFS6dkq6xWq9Jb9Sw27e0Rq9VKfn6+Q2lhY9lLd3d3JcsVHByM3W4nOzublJQUBzfLpnB/OWFL/d5YLBb27dvHpUuXgFpjnVmzZpGWlsZ3333HzZs3CQkJcbg45uvrS2lpKQEBAbz99tstXupYWVnJ1q1bFfOfUaNGMX78+HbRmypov1y4cIHs7GzmzJnj7FAELYAQWAKnIMsyu3btUv5o1+Hv78+rr77a7ANK2wOHDh3izJkzQO37d/LkScaMGcOECRMYO3ask6N7MmRZZuXKleTm5jJt2jSGDh3q7JCeGLvdTlZWlmL/XlZWphxzdXUlMDCQvLw8woZPROfuid1qBWRqjMaGmQxZxmq1otZocNW7otVq0el0BAT4YzKZKCwqwmI24+LSNKc2F70Lfr6+mM0Wakwmkg9tJzY6SmSrWpCamhqHssKsrCylh7I+/v7+hIeH4+fnh9Vqpby8nLS0NCorG7o+PgitVutQThgYGNjsP9Nr166xa9cuzGYzvr6+LFiwgFu3bnHs2DFu3LhB586dFedAu92O2WxGr9cTFxfH8OHDmzWW+iQnJ7Nt2zYMBgPu7u7MmzePjh07ttjzCQTNxaZNm+jcuTP9+vVzdiiCFqDtXCoWtCskSWLmzJlYrVaHga11lqWvvvqquJp+H/V7YSRJQq1WY7FYHquvo7Vx+/ZtcnNz8fT0bNNZOKh1UouKiiIqKoopU6aQl5eniK3CwkIyMjJw8fZF4+aBsdoASFgsZuyNXeOSJNQaDcgyNpsNjUaN2Wymuroai8VCdXU12iZmLPR6F3QuLhQVFSHLMq7unsx+eQXdgkU5bkvi6upKp06d6NSpE1B7MaGsrMyhtDA3N5fi4mKHbJdarSY4OJjQ0FBsNhsGg4GioqKHlhNaLBaSk5NJTk4GwNPTUxFbzVVO2Lt3b8LCwti8eTO5ubl89dVXTJgwgS5dumC1Wrlz5w7du3dHrVajUqmoqalR3DX79OnjEINdljFa7ciyjEoloVerHlsQ2mw2jh49qlx0io2NZd68ec99z6CgbSDLMmlpaUyePNnZoQhaCCGwBE5DpVIxZ84crFYrN27cUNYLCwtZu3Ytr7zyykMHvT5v3L9x0Ol0bVpgybLM8ePHARg5cmSbKg18FHWOcKGhoUyYMIHCwkJu3rxJaoUJtUaDSqWiymDAZrWiUqmU/+pKAtVqNa56veImaLVa0OlcKCktxWw239uYqh4SgYzNZkelkjCbzUgqFd7e3uh0Oqx2KDbbH3JfQUsgSRK+vr74+vrSq1cvoFYkFBQUOJQWFhYWNhherdPp8PLywm63U11djdFofOiQ3MrKShISEkhISAAgJCREEVxRUVFP/Lvm7+/Pa6+9xqFDhxQL9JiYGPz8/IiKiiIlJYXOnTsjSbUz3G7fvo2LiwvHjh1jQtw0MiuqKaw2Y7DU9qtJgHzv/54uGkLcXQj3dEWnfthnu3bw8ebNm8nKykKlUjF+/HhGjhz5iN8JgaD1UFRUhE6nEz2C7Zj2s6MRtElUKhXz58/HZrNx69YtZT0vL49169axfPlyXO5ZWD/v3C+wtFptmxZYd+7cITc3Fw8PjzafvXoUgYGBBAYGkrRjP3u++IQeA4cQ2qEjVpUKm9WqZCcklQoXnQuuej2SpEKvd8FgsCIDNcYafjFtCh5e3vy/rzdz5sBuvvz9v7P43V8w46VXgFrRarVaMFssuOhc8Pbxxc3VzWHjqZZkqiw27LKMSpJ4//33cXNzE43WTkCtVitCvG40gdFoJCcnx6G0sKqqqoGRidFoRJZlampqUKvVeHh4PLDnKC8vj7y8PM6cOYNGoyEmJkaZvxUUFPRY2SONRsO0adOIiYlhx44dpKWlAbXf5d7e3uTk5BAeHo4kSURERJCWlUNHlTvHUwtQaVSoUaG512dYhyzLVJqsVJis3C4xEO6pp6ufB9pGhFZSUhI7duzAaDTi7e3N/PnzHUw2BIK2QGpqKjExMc4OQ9CCCIElcDpqtZqFCxfy3XffKS55UDtM95tvvuHll19+6NXa54XGBJbZbG6TAuv+7JVWq3VuQM+AmpoaTp84zsa//oU3fv0fdOzRG1m2Y7VaMZnNmE1m1CoVdtlOlcGAWqVGo9UoP2fbPREmy2C2WOjRfzDv/Pt/EdO5GzabFYvFgtVqRaPREBAQgK+PD7W5AUckSUKSZQwWG546De+//z4BAQFCYLUS9Hq94hQItb8rFRUVDqWFOTk5WO5Z+ev1eqqrqxUnQ4vFgru7O56enri5uTUQT1ar1aGc0MPDw6GcsKkldt27dyc0NFTJJNVl1ux2O+Xl5Xh7exParRc9X1iKJKmoqiwnwN+fB30mNffilGWZ7EojBQYTfYO98XfVKXEfOHCACxcuANC1a1fmzJkjqhwEbZK0tLQ27f4reDRCYAlaBRqNhsWLF/PNN98oV0QBMjIy+O6773jppZeei034w3hYiaAsy23KqODOnTvk5OTg4eHBoEGDnB3OU/HVV1/xhz/8gaysLPr27ctHH31ETU0Nb7/9Nrdv38bT05PRo0cTGRPL2v/9AIDP3/8XPn//X/jT5r28/97rVJaXsi3hDlfPf88/LVvIsMnTWP7L31CYmc2X7/8b2anJjJr+gvKcst1OUkI8X7z/r8x78z2mLHyJL/7fb7kRfx6L2URoVAyv/fLXjIqbQV5WBktHD6THgMG4e3qSGH+eIeMn88033zBryiQMBgMGgwFJknjllVdYvXo1v//971m5ciVFRUWMGDGCTz/9VNnwC54tkiTh7e2Nt7c3PXr0AGpNJAoLCx1KCwsKCpBlGbvdTkVFBfn5+YpRhqenJ15eXnh6ejaoCKiqquLKlSuKQ2lwcLBDOeHDvnfrnF+PHTvG6dOnSU5OpqqqCqPRSHT/oQT36IfNYqGqqhKbRYuHuzt6/cMFkSRJaCUJq13mYm4ZfYK80Bqr2Lx5M3l5eajVaqZMmcKQIUPa1HeeQFBHXf9VXFycs0MRtCBCYAlaDVqtliVLlrBu3ToyMzOV9dTUVDZu3MiiRYvaVZ/O46LX61Gr1cq8nboSQYvFgtlsbjOllLIsK8Om23r26vjx47z22mtMmTKFV199ldWrVzNr1ix69erF3bt3+fjjj7l79y5Hjx6lW5++jJw6kzP7dzNhzkK69R+Ip48vklS7qXTRueDl5QPU/qxd9a5s/uuHpCZdJ27RMspLSzAbjbh71vbi1NxzpJMAjVpNTNfuDBg1FpvFzMHN6/mvX7zLt2dHYL43g+tmQjyv/PwfKS8p5viubZw9dYrf/va3nDt3Dk9PTz788EM6dOjAmjVr+PWvf82iRYvo3bs3H330EQsXLiQ+Pt5J77LgflQqFcHBwQQHByvltWazuUFpYUVFBRaLhdLSUoqLi7lz5w6AIrbq/l+/tDA/P5/8/HzOnj2LRqMhOjpaKScMDg5uIGrUajWTJk0iJiaGLVu28P333+PfsRu+nXpgs5hBBhedDqPJRHFxMWFh4U0SRhqVhE2GC5mFpJw4QHleHn5+fixcuJDQ0NBmfDcFgmdLQUEBer1ejKRp5zy/u1VBq8TFxYWlS5eyZs0acnNzlfU7d+6wefNmFi5c+NzONpEkCQ8PD8rLy4FagVVn+1xZWdlmBFZycjLZ2dm4u7u3+ezVnj17ADh48CAHDx5U1rt27UpNTQ2rV69Gq9UycOBAPDy9iO7cjTP7d9OxR2+GT5qGp6cHKlXDz7NOpyMsLJQ7Vy/jFxTM/DffpaKsjB8O74N7/St1Ezbsdjsmo5Hc9FS2r/oMq+Vvw4tv37iB9l55bWyP3kyYtxiDwcDta1fYtm0rXSPDkO85FRYUFFBaWsqXX34JwIYNG9iwYQNQ28OzdetW/P390Wq1im183b8fdFuj0YgswzNCp9MRExPj0NdRWVnpILiys7MpKyujpKSE0tJS0tPTsdlsuLu74+XlpQgud3d3JEnCarWSkpJCSkoKhw4dwsPDQxFbsbGxDuM0OnXqxDvvvINfSBjqDj2pqTYgyTLu7u61Za4WCwaDgfLyMnx8Hu1gKct2KsvLqTFZCOwzmKjgAGZOn9ZmvucEggeRlpYm+q+eA4TAErQ69Ho9y5YtY82aNeTn5yvrN2/eZNu2bcybN++5dYuqL7B0Op3y76qqKgICApwZWpNoT9mr+nzwwQf06dMHqBU83bt354MPPuDixYtcvnyZ06dP8w///FtQ1YoNSSXh5+eLXu+KSq3Gbqt19auqKFceU6PRKuLEbrMpfYgqSYWXtzeqexcaVCoV2Sm3OXNgD32HjWT28tfY+93XxJ86jgTK/Ty9fdBo1GjuveeyzYbdbldiLikpAWp7xQDmzZunWGvLssylS5eeKIP8OILsSW4/r98FTcHT05Nu3brRrVs3oPbnXFxcrIitzMxM7ty5Q3FxMSUlJcpFLbVajaenZ4PSwqqqKq5evaqM1ggODlYEV3R0NB4eHvScOIPckjIK83Kx2e1UVlXVlga6uFBdU0NhUdE90fXgvlqL1UJpSQlWqw1JkvDy9SO2UwchrgTtgrS0NKXcV9B+EQJL0Cpxc3Nj2bJlrF69mqKiImX9+vXraDQaXnjhhefyynj9K8Z1JYJAmzG6SElJISsrq11krwBmzJjBH//4R7777jt8fHzIzc1l1apVdOnSBYvFQmBgIN7e3pSVlWEoL8fDq/bK/c1LFwgODmHczDmERESSm5HGznWrOLV/j/LY1dUGuvUfxMUTR9i77ivKiouR7XbUGjUx0dHKZ0Gj0ShZK4vZRFlBPreuXAbA29sLD6/aMhSdi46goGA8PWrvN3zoUN55fQVffvklJSUlBAYG0qtXL7y8vPj7v/978vPziYuLIz09nYsXL/Lmm286lKTW/ftht61Wq7LW2JDd5kCtVreogFOr1e3mu0alUimOlv379wdqZ2jl5uaSnZ3N3bt3uXr1KhkZGZSUlDgMy66ziq8vuurKCb///vtad8JuPfHsMRB3vStScDAFBQXYrFaqqqpwdXWr/axareTm5hIZFYXUwPBCxmCopqKiHFkGrVaDr68varWG7CojXfw9Gli4V1ZW8uc//5nly5cLN0FBq6eu/2r69OnODkXQwgiBJWi1eHh4sHz5clatWkVpaamynpCQgEajYcaMGe1m49NU6htdtDWBVd85cMSIEe3CGXLcuHGsWrWKP/zhD7z77rv4+voSGhqKyWTi4sWLVFVV4ebmxrhx4/D29KD36PEc29aH0wf2cPrAHsbNnMPyn/2KrNQU1n34AaPiZnDpzAlMJhNlZeUs/emvKCsu4vC2jYyfNRe9qxsSEpKkUj4LWq2WnoOGMWzSVC6dOg5IDBoznuO7tzcas0xtaaFeq0KtVvOP//iP/Pa3v+W9997jd7/7Hf/8z/+MxWLhs88+4z//8z+JiIhg0aJFjB49+rHfH7u91iWxqYLsSW7bbDZsNhtGo/HJfoiPQJKkFhVwWq3Wqd9jWq1WGZA9fPhwli5disFgICsri6SkJBISErh58yaFhYUUFRU5XPByc3NzKC0M1Lrxp7eWc/n0Cf5v6358A4Oorq7GarVSU1OtZE9rjEaKi4ruZd3vZWllO2VlZRhran+O7u5ueHl5K+/NH3/5E6Zu3cCFCxeUizNms5nFixezZMkSIa4EbYL8/Hzc3NwcLpYK2ieSXFfILxC0UsrKyli1apVSDlfHsGHDiIuLe65E1vHjxxWRUreJHzlyJKNGjWLSpEnODe4RpKSksHbtWmXmUnsQWHXYbDYOHTrEuXPnGj0uSRLjx4/Hq0tf8g2mRuf7QO1Mo/Lycuz2v30ta7QafH18HlJSJWM0GimvqMBmrTVAcXNzxcvLq9H+rtr5VzAhJvBxX2aro65/rKUFXEtTX3i1hJh72r7VOqOMS5cucfnyZZKSksjKylJKTOtY+q//zYe//gXXzp3hf9bvJCg8AqPRiNlswWazolKrkajNpGk0WkJCgvHw8MRsMVNaWorNakOlkvD28cH1PrfB65cuUJaXzU+WzMfX99E9XAJBa+TcuXMUFhYya9YsZ4ciaGFEBkvQ6vHx8eGVV15h1apViu0w1H5RaTQaJk6c+NyIrPszWFarFVmWW30Gqz1mr+qoqqpi06ZNpKenN3pcr9czf/58OnfuTHZlDfnVpgbn2O02ysrLlav3AEjg4e6Op6fXIz7fEnq9q9IjU1VVRXV1DUajUTEsqD97yCrLhLi1j14WSZLQaDQt6i5aN1uqpQRc/f9airoyyqYKsuLiYhYvXkzv3r1xc3MjOTmZv//7v+eLL74gNzeXXr168e///u8AXLp0ia+++oqMjAyK/+nnGKsNABhNJvJysvnw17+gILvWFTY8thML3/45wRFRyPZqtn35CRePH6G8pJjY7j35jy/WodNo+Ovvfsv3h/dTVVHBqLgZ/Pr/PmXPt2s4uGUD0wb3ZfDgwXz++ef893//Nzk5OfTs2ZM//elPjBo1itWrV7NixQoWL17MjRs3yMjI4N/+7d/42c9+1mLvr0DQVNLS0ujVq5ezwxA8A4TAErQJ/Pz8WL58OatXr8ZgMCjrp0+fRqvVMnbsWCdG9+yoL7BUqtoSL+u9HofWTGpqKpmZmbi5uTFkyBBnh9NsZGdns2HDBioqKho9HhwczKJFi/Dz8wMgyN0FqajyXhapVvQYjTWUlZcrRhcAGo0aHx8fdLqmCyFJUuHp6YWrmxsV5eUYjSbKyyuorq7G29sbnc6ldl4aEOUlhrM2FbVajVqtRq/Xt8jjy7LcogLObDY/dhllXe/VtWvXGDFiBMOHD+fXv/41HTt2ZPjw4Vy5coVFixbx05/+lIsXL3L37l169OlLz8FD2fTpXwCw2W3YbTb6DBuFl58/5cVFHN22kW2ff8Sb//p7Dm78hkMb1tJryHBeePUN8jPT8Q8I4I//8DP2b/qOSXMW0nfYCPKzsxxiM9vsHD16lDfffJPhw4fzy1/+kn/9139l9uzZig09wLFjx/jtb3/Lb3/7W/7pn/6Jt99+u11d2BG0Pex2O+np6cyYMcPZoQieAUJgCdoMgYGBisiqczqD2j+kGo2GkSNHOjG6Z8P9w4a1Wi1ms7lVC6z62avhw4e3m03OpUuX2LNnzwNLyHr37s2sWbMcXq9WpSLcQ09WhRFUMhXl5VRX1zjcr7b3xAtJejJ3PI1ag5+fn1I2aLFYKSoqxs3NFTdPT1y1Gnz17cO9sT0gSRI6na7Ffi/qyigfJsDuP1Y3hzA2NpZf/OIXrF69GkCxbK+joqKCjIwMAGbNX0ifqXO5fOo4t69ext3NA0mSuHn5AqlJicpYgdyMNGQg6eI5JEnix//yPh06dULvUitgvz9yEB//QP7xg48adYiUkdm7dy8A//7v/87kyZPJyMjg/fffdyjR/dGPfsS7777Lrl27OHDgAPn5+URGRjb32ysQNJn8/Hzc3d1F/9VzghBYgjZFcHCwYuFuMv2t1OrQoUNoNBqGDh3qxOhanvsFlk6nw2KxtGqBlZqaSkZGBq6uru0ie2W1Wtm3b98DB++qVComT57MsGHDGi3ti/Z2I7WkkpKyMgdxplar8PHxwcWlOTIldWWDeqqqKpWyQbPNjhYj9nDf53ae3PNG/TJKV9emZS7T0tIA6N69O/Pnz+fcuXPs3r27wTiCwYMHk5mZSVJSErNmzaTC82+fK51Ww4Et67l74zpTFr5Ej0FDWf0//w9jTTVq1d+cGbU6rUP2tgmvyOG11f9/feqyxnXlo8+il04geBhpaWl06NDB2WEInhFigIigzREWFsbLL7/c4Irvwza97YXGMliWewM87284bw3Un3s1YsSINj/HpqKigtWrVz/wc+bu7s6yZcsYPnx4o5s+s9nM8YP7SU+4APUEjpubK4FBQc0krv6GJEl4enoRGBSE3t2dmrISTu3ezmeffaZsogWCR1FX0vTdd9+RkZHBDz/8wE9/+lPc3NyUfpKfvvk6a//yAbeu1o4IsNntShlsjaGK1BvXKSsqRELC09ODQWPGI8syH//LP7Hzm1V8+G//HwDDJ06hrLiQP/z9e+zdsI5Vf/ovh1h0apVicf2v//qvfPbZZ3z55Zf4+voybNiwZ/J+CARPghgw/HwhBJagTRIZGclLL73UoLl99+7dyhDM9ohGo3HoBakrEZRlucXmDD0NaWlppKent4vsVXp6OitXriQrK6vR42FhYbz55psPvEKZnp7Op59+ysWLFym6dQ1LtQGNiw4/P198fHxRPWFJYFNQqdR4eXoyKCIAXx8fCgoKWL16NZs3b35g/5hAUEfdOILKykreeecd/vKXv+Dv788f/vAHZFmmT58+JF67xvXz5+jYo7dyv8kLltCxRy9+OHoQQ0U5MV26oVKpcHd3Z/byN5j/2o/JSUth9R/f5+aVy9hlO+/89j+ZsWQ5l86c4KN/+zW5GWkA1PkdqySJCRMmsHLlSgoKCvjFL35BREQEO3fuxN/f3wnvjkDwaOr6r4TAen4QNu2CNk1KSgrffvutQ/mHJEksWLCAnj17OjGyluOjjz5SZtGkpqYiSRIxMTG89dZbhIaGOjk6R1avXk1aWhoTJkxgzJgxzg7niZBlmfPnz3PgwIEHZgn79+/PjBkzGnWzs1gsHD16lHPnzlH/67Z7/0F49xqEWlKjVrWcC6Ysy1hkme7+HkR7u2G1Wjlz5gynTp3CarWi0+kYO3Ysw4YNE2WDAgdqamrIyMhQLpTk5ubysC1DxNBxeIWEoVWp0bnocNHpGh0VYDIZKS4pAcDP15fKqiosZgt6vR4/P19oMIAYzHY7sT5udPHzaHBMIGjt5OTksG3bNt59911nhyJ4RogeLEGbpmPHjrz44ots2LBB2fzKssyWLVvQaDR07drVyRE2Px4eHorA0mq1iuFHa+vDSktLIy0tDb1e32azVxaLhd27d3PlypVGj6vVaqZOncqgQYMaLQnMzs5m27ZtDsNZ9Xo906dPp3fv3hTVmLmUVw52WkRk2WUZqyzTwdtVcQ7UaDSMHTuWvn37sn//fm7evMmhQ4e4fPky06dPJzY2ttnjELQNqqurSU9PJz09nbS0NPLz8x8qqCRJIjg4mOjoaGJiYvAMDiexzIi2EXOK+ri46PHy9KSiopKysjJ8/fwoLSnBaDRSVVWFh4ejCUCd+2WEp3C/FLRNRP/V84cQWII2T9euXZk/fz6bN29WNgN2u52NGzeyZMkSOnXq5OQIm5f6DkRarVYp8WptAquu92r48OEtZnHdkpSVlbF+/Xry8vIaPe7p6cmLL77YqDOZzWbjxIkTnD592iHr1alTJ2bPno2XlxcAgW4uDAjx5nJeORa7jEaSmm2mm80uY0Omo48bnXzdGzyuj48PixcvJjk5mX379lFUVMTXX39Njx49iIuLw9vbu1niELReDAaDIqbS09PJz89/6PmSJBESEkJMTAzR0dFER0c7GGfIskxKlRmT1Y7mESLLw8MDs8WCscZIRXk53t4+lJaWUlFZiVanw6XeiAKrLBPgqsNNKzKsgrZJamoq/fv3d3YYgmeIEFiCdkHPnj2x2Wxs27ZNEVk2m43169fz8ssvt6u65/uHDZvNZqB1Caz09HRSU1PR6/Vt0tkxJSWFzZs3O4wDqE9UVBQLFy5s1G43Pz+fbdu2OQgznU5HXFwcAwYMaCB0At1cGBbuy9WCCqrMNjQqFHOAJ0G+l7VSqyT6BXgR4vFwcdupUyfefvttvv/+e06ePMmNGze4c+cOY8aMYfjw4S06xFfwbKmqqlIEVVpaGoWFhQ89X5IkwsLClAxVVFTUQy+WSJJE70AvLuSW1WadHjEg28fHhyJLIRaL9d5gbA8qK6soLS0lMDAQtUqNTZZRSdAjUFhbC9omdrudjIwM5s6d6+xQBM8Q8ZdT0G7o06cPVquVnTt3KmtWq5Vvv/2WZcuWtZsZKPUFVp1NO7QugVU392rYsGFtKnslyzJnzpzhyJEjDyyNGjJkCHFxcQ36lex2O2fPnuXYsWMOPYHR0dHMmTMHX1/fBz6vl4uWERF+3C2t5m6ZAZu9ViA9jtCqKweUgCA3HT0CvXBRN804Q6PRMHr0aPr06cOBAwe4ceMGR44cISEhgWnTprW7LPDzQmVlpSKm0tPTHUpVG0OlUhEWFqZkqKKioh7b+dPPVUeUlyvpFdVoUT1UZKkkFX5+fhQWFVFTU4OXlxcuLi6YTCZKS0vx8/PDJkPvAE9cNSJ7JWib5Obm4uPjg5ubm7NDETxDhMAStCsGDBiA1WpVBlFCrTX2unXreOWVVwgLC3NidM3D/Rms1iawMjIylOxVW7JNNpvNbN++nRs3bjR6XKPRMHPmTPr169fgWHFxMdu2bXNwGNRoNEycOPGB87DuRyVJdPJzJ9xLT3aFkbSKaqx2O3YZVFLt8fqCS5ZlbHKtsJKkWluAcA8Xorzd8NJpnqjU0NvbmxdffJGUlBSlbHDdunV069aNqVOn4uPj89iPKXh2lJeXO2SoSu4ZSTwItVqtCKqYmBgiIyObZeBxN38PTDY7+QYjmkeILI1Gi6+PDyUlpVRWVuDj64vVasFitlJpqKZ3WCDhXqL3StB2EfbszydCYAnaHUOGDMFisXDo0CFlzWQysXbtWl599VWCg4OdGN3T05jAkmW51QisuuzV0KFD20z2qri4mPXr1z+wZMrb25tFixY1EOh1DoOHDx9WhC5AeHg4c+bMITAw8LFjcdWo6eTnTqyvG8U1ZkqNFkprLFSYrZht9lolJYMkgYdWja9eh49eS6CbDm0TM1aPomPHjrz99tucO3eOEydOcPPmTZKTkxk9ejQjR44UZYOthLKyMiU7lZaWRmlp6UPPV6vVREREKCV/kZGRaLXaZo9LkiT6Bnlxq1hFekUNkiw/tCdLr3fFw9NCVWUV5eXleHr7UFVZSfqF03QZ0h/82qcjrOD5IDU1lYEDBzo7DMEzRvyVFLRLRo4cidVq5dixY8paTU0NX3/9Na+++uoTbXxbC/UFlkqlQq1WY7VaW4XAysjI4O7du7i4uLSZ7NXt27fZsmULJpOp0eMdOnRgwYIFuLu7O6yXlZWxY8cOUlNTlTWVSsW4ceMYNWoUqkc0+T8KlSQR6OZCoFttiZYsy8jcy1ghoZJoNkOMxlCr1YwcOZLevXtz8OBBrl+/zrFjx7hy5QpTp06lS5cuLfbcgobIsqwIqjpRVVZW9tD7aDQaIiIilAxVeHh4iwiqxpAkiW4BngR7uHC1oAKj1Q4yaFSNG7l4eHhgtctYLGaqigsIU1m4lZ7MjtwMgoODCQgIeCZxCwTNic1mIzMzk3nz5jk7FMEzRggsQbtlzJgxWCwWTp8+rawZDAa+/vprVqxYgZ+fnxOje3LqCyz4WxarNQisOufAYcOGObiLtUZkWebEiRNKxq0xRowYwaRJkxzEkizLJCQksH//fgdRFhwczNy5cwkJCWmReCVJQuLpDDCeBC8vLxYsWMDAgQPZu3cvhYWFfPvtt3Tt2pWpU6c+tLdM8OTIskxJSYlDhupRQ6G1Wi2RkZFKhio8PNzp2UZfvY4xkf4U15hJK6+hxGgGu6xkYrn3P7Uk0SHAh3P7d1GYmYZb//707NmTxMRENmzYwBtvvNEs5YsCwbMkNzcXX19f0X/1HCIElqDdIkkSEydOxGq1cu7cOWW9srKSNWvWsGLFijbZU+Lm5oZKpVLsv+ucBE0mE2az2WmbkMzMTFJSUtpE9spoNLJ161Zu377d6HGtVssLL7xAr169HNYrKyvZtWuXw/0kSWLUqFGMHTvW6ZvZlqRDhw78+Mc/5vz58xw/fpxbt26RkpLCyJEjGTVq1DPLjLRXZFmmuLjYIUNVWVn50PvodDoiIyMVU4rw8PBWOSxakiQC3FwIcHNBlmWqLTZqrDbscu38NzetGr26tlcrbHocX375JZcvX2bq1KkUFBRQWFjIrl27mDdvXotmbQWC5iY1NVX0Xz2ntN/dgEBA7R/2uLg4rFYrFy9eVNbLy8sVkVU3k6itIEkS7u7uyuarvpOgwWBwmsCqy14NHTq0VWevCgoK2LBhA8XFxY0e9/PzY9GiRQ169a5fv86ePXscrNv9/f2ZO3cuERERLRpza0GtVjN8+HB69erFoUOHuHr1KidOnFDKBrt27So2wE1ElmUKCwsd5lA9Kgut0+mU+VMxMTGEhoa2SkH1MCRJwl2nwV3X+PYjNDSUWbNmsW3bNg4dOsTs2bPZs2cP165dIzIyss0OLRc8n6SlpTF48GBnhyFwAkJgCdo9kiQxY8YMrFYrCQkJynppaanSk3V/2V1rx8PDQxFY9Z0EKysrnVKylZWVRXJyMjqdrlVnrxITE9mxY4cyO+x+OnfuzLx58xwEYnV1NXv27CExMdHh3KFDhzJp0qTnMnPj6enJvHnzlLLB/Px81q9fT+fOnZk2bVqbLb9tSWRZpqCgwCFDVV1d/dD7uLi4KGIqOjqa0NDQp+7tawv07duX7Oxszp8/z5EjR5g0aRJ79+7lwIEDhIWFPTcXNARtG5vNRlZWFgsWLHB2KAInIASW4LlAkiRmz56N1Wrl+vXrynpRURFr167llVdeaVM10q3Nqr1+9qo1vo92u50jR45w5syZB54zduxYxo0b55CBuXXrFrt27XJ4X318fHjhhRfo0KFDi8bcFoiOjuatt97iwoULHD16lDt37nD37l1GjhzJ6NGjn0vxWYfdbic/P98hQ/WgwdV1uLq6EhUVpZhSBAcHPxeCqjHi4uLIy8sjIyODxMREBg0axMWLF9m0aRNvvfVWq/yeEQjqk5OTg5+fX6uu6BC0HEJgCZ4bVCoVc+fOxWq1cvPmTWU9Pz9fEVltxVbc09NT+bdWq8VoNALOEVjZ2dncuXMHnU7H8OHDn/nzP4rq6mo2b97M3bt3Gz3u4uLC3Llz6datm7JmNBrZv3+/Q8YTauesxcXFPfbw1faMSqVi6NCh9OzZk8OHD5OQkMDJkye5cuUKcXFxdO/e/bkoG7Tb7eTl5SliKj09Xfm9fBBubm4OGarg4ODn4r1qCmq1moULF7Jy5UrS09MJCgoiMjKSzMxMtmzZwtKlS59b8SloG4j+q+cbIbAEzxVqtZoFCxawfv16kpOTlfXc3FzWrVvHsmXL2sTmuX4GS6fTKeWCzhBYdS58Q4YMaXVXlXNzc9mwYcMD7awDAwNZtGiRgwX03bt32bFjB+Xl5cqah4cHs2fPFtbkD8HDw4M5c+YwcOBA9uzZQ15eHhs3bqRjx45Mmzat3dls2+12cnJylAxVRkbGA63+63B3d1cEVUxMDIGBgUJQPQRPT09efPFFVq9ezYULF5gyZQrFxcWkpKRw4sQJxo8f7+wQBYIHkpaWxtChQ50dhsBJCIEleO7QaDQsWrSIb7/91mGGUVZWFt9++y0vv/xyqy9tai0lgvWzVyNGjHimz/0orly5wq5du7BarY0e7969O3PmzFEEtdls5vDhw5w/f97hvN69ezNt2rRWJx5bK5GRkbz55ptcvHiRo0ePkpKSwqeffsrw4cMZM2ZMm7Xattls5OTkKBmqjIyMB/by1eHh4aFkp2JiYggICBCC6jGJjIxk6tSp7Nmzh2PHjjFp0iT279/PiRMniIiIoHPnzs4OUSBogNVqJSsrixdffNHZoQichBBYgucSrVbLkiVLWLduHRkZGcp6eno669evZ8mSJa3acvt+gVW30XvWAquu92rw4MGtRoDYbDYOHjzIDz/80OjxOvv+kSNHKpvdzMxMtm3bRklJiXKem5sbM2bMoGfPns8k7vaESqViyJAh9OzZkyNHjnDp0iVOnz7N1atXiYuLo0ePHq1eaFitVkVQpaWlkZmZqVzIeBBeXl4OJX/+/v6t/nW2BQYNGkR2djYJCQmcO3eOESNGcObMGbZu3cpbb73VJsdtCNo32dnZBAQEtJm2A0Hz03p3kAJBC6PT6Vi6dClff/012dnZynpKSgobN25k0aJFrdYC+f4SQWdksHJycrh9+zZarbbVZK+qqqrYtGkT6enpjR53dXVl/vz5dOrUCajdRB87doyzZ88iy7JyXteuXZk1a1abc5dsbbi7uzN79mwGDBjA3r17ycnJYdOmTXTo0IHp06cTGBjo7BAV6q4415X8ZWZmPjD7WYe3t7dDhsrX11cIqhagzgm2oKCAnJwc8vLy6NSpE8nJyWzcuJEf/ehHrfqCmOD5Iy0tTfRfPeeIbyTBc42Liwsvv/wya9asIS8vT1m/ffs2W7ZsYcGCBa2ykbqxEkFZlp+pwKrLXg0ZMgR3d/dn9rwPIisriw0bNjxwOGtISAiLFi1SbOxzc3PZtm0bBQUFyjkuLi5MmzaNvn37io1yMxIREcHrr7/OpUuXOHLkCKmpqXz66acMGzaMsWPHOqXv0WKxkJWVpWSosrOzHymofHx8lP6p6OhofHx8xOfkGaHVannxxRdZuXIlKSkpDB06lKKiInJycti/fz8zZ850dogCgUJaWlqrufAocA5CYAmee1xdXVm2bBmrV6+msLBQWb9x4wbbt29nzpw5rU5k1RdYKpUKlUqF1WqlqqoKWZZbfNOXm5vLrVu3Wk32Kj4+nr1792Kz2Ro93rt3b2bPno1Wq8Vms3H69GlOnDiB3W5XzomNjeWFF17A29v7WYX9XKFSqRg0aBA9evRQygbPnj3LtWvXmDJlCr169WrRz63ZbCYzM1PJUGVnZz/w81KHn5+fgymF+Gw4Fx8fHxYsWMDatWv54YcfmDBhAidPnuTixYtERkbSt29fZ4coEGC1WsnOziYqKsrZoQiciBBYAgG1pUzLly9n1apVDn04V69eRaPRMGvWrFZ1pVqn06HT6ZTeq7oyQa1WS01NTYv3Q9XvvXJm9spqtbJ3714uXbrU6HGVSsWUKVMYOnQokiRRWFjItm3byMnJUc7RarVMnjyZwYMHt6qfcXvFzc2NWbNmKW6D2dnZbNmyhfj4eKZPn05QUFCzPI/JZCIzM1MxpcjOznYQ1I3h7+/vkKHy8vJqllgEzUdsbCyTJk3i0KFDnDlzhuHDh3Pq1Cl2795NSEgIwcHBzg5R8JyTlZVFYGBgm3AkFrQcQmAJBPfw9PTklVdeYdWqVQ623pcuXUKj0TBt2rRWtQH38PBQxGB9J8HKysoWFVh5eXncvHnT6dmr8vJyNm7c6NA/Vx93d3cWLlxITEwMdrud77//nqNHjzqUgUVGRjJnzhz8/f2fVdiCe4SFhfH6669z+fJlDh8+TFpaGn/9618ZMmQI48aNe+zmcKPRSEZGhpKhys3NfaSgCgwMdDClqD9fTtB6GTFiBDk5OSQmJnLjxg169uxJYmIiGzdu5I033hDGAgKnIvqvBCAElkDggLe3tyKyKioqlPXz58+j0WiYPHlyqxFZDxJYVVVVLXoVty57NWjQIKeZQKSlpbFp0yYMBkOjx8PDw1m0aBFeXl6UlJSwY8cOB+MLtVrNhAkTGD58eKsr/3yekCSJAQMG0L17d44ePcrFixc5d+4c169fZ/LkyfTp0+eBv29Go1ERU+np6eTm5joYlTRGUFCQIqaio6OFiUkbRZIkXnjhBQoLCykoKMDPz4/g4GDy8/PZsWMHL774Yqv5nhY8f6SlpTFq1ChnhyFwMkJgCQT34evrq4is+qYRZ8+eRavVtprhls6was/LyyMpKQmNRsPIkSNb7HkehCzL/PDDDxw8ePCB2YkBAwYwffp01Go1Fy9e5ODBgw7zikJDQ5k7d26zlaIJnh5XV1dmzJihuA3W2ebXlQ2GhIRQXV1NRkaGYkqRn5//UEElSRLBwcEOGarWMkpA8PTodDoWLVrEypUruXPnDoMHD6a8vJykpCS+//77VtEbKnj+sFgs5OTkEBkZ6exQBE5GCCyBoBH8/f1Zvnw5q1evprq6Wlk/ceIEGo2G0aNHOzG6Wpxh1e7M7JXFYmHnzp1cu3at0eNqtZrp06czcOBAKioq2LFjBykpKcpxlUrFmDFjGD16dKu133/eCQ0N5Uc/+hFXrlxhz549xMfHc/ToUby9vfH19X2oFbckSYSEhDhkqFxdXZ9h9IJnjb+/P/Pnz+fbb7/l4sWLDB8+nLNnz3L48GHCw8OJjo52doiC54ysrCyCgoJE/5VACCyB4EEEBQUpIstoNCrrR44cQavVMmzYMCdGh0O/iFarxWg0YrPZKC4upqKiArVajUajQaPRNIugyM/Pd1r2qrS0lA0bNjhY6dfH09OTRYsWER4ezpUrV9i3b5/DzywwMJC5c+cSFhb2rEIWPCZVVVVKyV9aWho1NTWUlZWRnZ1NVlYWWq2W2NhYQkJCkCQJSZIICwtTMlRRUVGi9+Y5pEuXLowbN47jx49z+fJl+vbty5UrV9i0aRNvvfWW6KsTPFPS0tLo0KGDs8MQtAKEwBIIHkJISAjLli3j66+/xmQyKev79+9Ho9EwaNCgZx6TLMtUVFRQXFxMRkYGVVVV5ObmUlpaSnZ2Nrdu3Wrgqufu7k5wcDBBQUHK/4OCgtBqtU1+3rrs1cCBA5/ppiUlJYXNmzdTU1PT6PGoqChefPFFADZs2MDNmzeVY5IkMXz4cCZMmCAGkbYyKisrlf6ptLQ0ioqKHI5rNBo6d+5MaGgod+7coaKigtzcXNzd3Zk/fz6DBg0SV4kFAIwdO1YZfJ6bm0tkZCSZmZls3ryZV155RfRZCp4ZaWlpjBkzxtlhCFoBYschEDyC8PBwli5dytq1a5UyPIDdu3ej0Wjo16/fM4mjqqqKq1evcvnyZQoLCykuLubu3btA7YyfOne8+v1GdRgMBu7evaucD7Ub2O7du9O/f386dOjw0KbwgoICbty4gUajeWbNu7Isc/r0aY4ePfrAXpshQ4YQFxfHrVu32L17t0M5p6+vL3PnzhWzSFoJ5eXlDqYUxcXFDz1frVYTFhZGTEwMP/7xjykvL+fYsWNUVVVx+PBhSktLmThxoigDFCBJEvPmzWPlypUUFBTQuXNnPDw8SE9P58iRI0yePNnZIQqeAywWiyLwBQIhsASCJhAVFcWSJUv49ttvHWy+d+zYgUajoVevXi3yvLIsc/v2bS5fvszt27cdjB10Op3yb5VKhd1uR6VSOQgsnU6H1Wpt1BDCarVy7do1rl27hre3N/369WPgwIGNzv551tkrk8nE9u3bSUpKavR43WyyLl26sH379gZ9WYMHD2by5MkO75Hg2VJWVuaQoSotLX3o+Wq1moiICKXkLzIyskGGtVevXhw/fpwffviBixcvcuPGDSZOnMiAAQOEa9xzjl6vZ/HixXz++efcuXOH/v37c+XKFc6cOUNERATdu3d3doiCdk5mZibBwcHi744AAEl+lK+tQCBQuHPnDuvXr8dmsylrKpWKhQsXNvsf8KKiInbt2uVgL16HVqvFz8+PU6dO4eHhgUaj4datW4wePRpXV1f+8R//UTlXlmVKS0vJz8+noKCA/Px88vLyHAYq13/cCRMmMHToUKWspqCggE8//RSVSsXPfvazFh++WlRUxIYNGygsLGz0uI+PD4sWLaKqqoqdO3dSWVmpHPPy8uKFF16gY8eOLRqjwBFZlhVBVSeq6s+SawyNRkNERIQy2Dc8PLzJJasFBQXs3buXtLQ0oDbLPH36dMLDw5/ylQjaOtevX2fz5s2oVCp69erF1atXcXFx4c033xTz7gQtytGjRwGYMGGCkyMRtAaEwBIIHpOkpCQ2bdrkkBVSq9UsXryYzp07P/Xj22w2zpw5w8mTJx2yZVCbSevfvz89e/ZEo9Hwu9/9DlmWsdlsnD59mjFjxiBJEv/8z//8yJ6jvLw8Ll++zNWrVxv0N4WFhTF79mxCQkLYvHkz169fZ8iQIUyfPv2pX9/DuHXrFlu3bnXod6tPbGwss2bN4vTp08THxzsc69evH1OnThVGB88AWZYpKSlxMKWoPzeuMbRaLZGRkUqGKjw8/Kn64mRZJjExkQMHDlBZWanM1Jo4caKwY3/OOXjwIGfPnsXd3Z3AwEDS0tIIDg7m9ddff6y+U4Hgcfjyyy8ZP348sbGxzg5F0AoQAksgeAKuX7/Oli1bHHqDNBoNL7300lN9uZaWlrJ+/Xry8/OVNZVKxZAhQxg8eHCDK7D/8z//owzbPXXqFMOHD0ej0fDzn/8cb2/vJj2n1Wrl9u3bnDhxosHz9u/fn/j4eNRqdYtmr2RZ5vjx40opYmOMHDmSjh07snPnTofsiLu7O7NmzaJbt24tEpug9udTXFzsUPJXP3PYGDqdjsjISMU2PTw8vEXs8U0mEydPnuT777/Hbrfj6urKhAkTGDhwoDA3eE6x2+2sXbuW1NRUgoODMZlMlJWV0bdvX+bMmSPKSQXNjtls5o9//CO/+tWvhIgXAEJgCQRPTEJCAtu3b3dY02q1vPzyy080f6WkpIQ1a9ZQXl6urNXPJDXGp59+qoiic+fO0bdvX1xdXXn99deJiIh4rOe32WycPXuWEydOKJmzGzduoNfrefHFF5k5c+Zjv6amUFNTw9atW7lz506jx3U6HdOnTycvL49z5845HOvRowczZszA3d29RWJ7XpFlmcLCQgdTikfNV9PpdMr8qZiYGEJDQ5/pvLGioiL27t2rGLmEhoYyY8aMx/49ELQPDAYDK1eupLy8nE6dOpGeno7FYmHWrFkMHDjQ2eEJ2hkpKSmcPHmSFStWODsUQStBmFwIBE9Iv379sFqt7N69W1mzWCx88803LF++/LE2dsXFxaxZs0Yps9JoNEycONGhF6oxPDw8FIGl1Woxm824uro+0bBhtVrN6NGj6dGjBzt27ODmzZsUFBQgSRIWiwVZlpv9ym9BQQHr169vtB8MwM/Pj7Fjx3Lq1CkHG2+9Xs+MGTPo1auXuBrdDMiyTEFBgUOGqr4jY2O4uLgoYio6OprQ0FCnZowCAgJYtmwZSUlJ7N+/n9zcXL744gv69+/PpEmThAh/znB3d2fRokV89dVXJCcn0717d5KSkti7dy+hoaFiJp6gWUlLSyMmJsbZYQhaESKDJRA8JefOnWP//v0Oa3q9nldeeYXQ0NBH3r+0tJSvvvpKKbnSaDQsWbKkSUYN27Zt48qVKwBcu3aN0NBQAgICmDlz5lPN6LJarfzqV7/i8uXLhIWF0aVLF0aMGMGUKVOe+DHvJzExke3btztY39enU6dOBAQE8MMPPziUYnbu3JnZs2eLAaJPgd1uJz8/3yFD9aA5Y3W4uroSFRWlmFIEBwe32hI8s9nMqVOnOHv2LDabDb1ez4QJExg0aFCrjVnQMly+fFlxe42KiuLu3bv4+Pjw5ptvil49QbPxxRdfMHHiRDFkWKAgMlgCwVMybNgwLBYLR44cUdaMRiNr167l1VdfJSgo6IH3tdvtbNu2TRFXWq2Wl156qclf0h4eHsq/tVqtIlaeJINVn7KyMry8vAgKClLmSJ09e5YOHTo8tZGH3W7n8OHDnD179oHn9OnTh/z8fJKTk5U1nU7H1KlT6d+/v8haPSZ2u528vDxFTKWnp2M0Gh96Hzc3N4eSv+Dg4Dbzvut0OiZOnEi/fv3Yt28fycnJ7N27l0uXLjF9+nQxG+05on///mRnZ3Px4kUKCgoICAigqKiIbdu28dJLL7WZz7Sg9WIymSgoKBDlyAIHhMASCJqB0aNHY7VaHUwaqqur+frrr1mxYsUD7YF/+OEHMjIygFpTiaVLlz5WmUH9LE5diSA8vcA6efIkkiTx0ksvUVNTw61btwDYuXMn77777hM79VVXV7N582aHgcf10Wq1xMTEkJiY6GCFHxMTw5w5c/Dx8Xmi533esNvt5ObmKg5/GRkZD3RmrMPd3V0RUzExMQQGBrb5zae/vz9Lly7l1q1b7N+/n7y8PL766iv69u3L5MmTHS5QCNovU6dOJS8vj6ysLNzd3XF1deXOnTucPHmSsWPHOjs8QRsnMzOTsLAwYW4hcEAILIGgmRg3bhwWi8UhM1NVVcWaNWtYsWIFvr6+DucXFxc7ZL3GjBnz2DXc9TeIOp1O2UQ/jcAqLi7m2rVrqFQqxo0bh06n4+OPP8ZgMFBZWcmBAwd44YUXHvtxc3Jy2LBhg4OJR31cXV1xcXFxMLvQaDRMmjSJoUOHtvnNfktis9nIyclRSv4yMjIcBk43hoeHh9I/FRMTQ0BAQLt8jyVJolu3bnTs2JHTp09z5swZrly5ws2bNxk/fjxDhgwRZYPtHI1Gw4svvsjKlSvJz88nOjqajIwMjh8/TkREhJibJ3gqUlNTRf+VoAFCYAkEzYQkSUyePBmr1cr58+eV9YqKCkVk1bdO37dvn+LWFxISwujRox/7Oe8vEawTVk8jsE6ePIksy/Tv31/JGM2cOZMNGzYAtT0N/fv3f6wyq4SEBHbv3t1grhfUGixotVqMRqNDH1BERARz5swhICDgiV9Le8VqtZKTk6OU/GVkZDywl60OLy8vB1MKf3//dimoHoRWq2X8+PH07duX/fv3c/v2bfbv36+UDYoNUvvGy8uLhQsXsmbNGtLT04mOjiY9PZ0tW7bw1ltvNXmshUBwP2lpaUyePNnZYQhaGUJgCQTNiCRJTJs2DavVyqVLl5T1srIyRWR5enpSVFSk9BdJksScOXOeyNL6foH1tCWCJSUlSvaqvuDr3r07PXv2JDExEYDz5883SWDZbDYOHDjgIDjrYzQalexB3WZfrVYzbtw4Ro4cKTIL97BarWRlZSkZqszMzEbFan28vb0dMlS+vr7PlaB6EH5+frz00ktK2WBBQQGrV6+md+/eTJkyRZintGOio6OJi4tj3759ZGdnExgYSGFhIZs2bWLFihXPdKyAoH1gMpkoLCwU/VeCBgiBJRA0M5IkMXPmTKxWK1evXlXWS0pK+Prrr3n11VeJj49X1rt27frAOVeP4v4SwfomF09iq37y5EnsdjsDBgxoUNI4ZswYRWAlJSVRVVX10B6WyspKNm3apPSY1UeWZUpLS9FqtQ4b2uDgYObOnfvE70d7wWKxkJWVpWSosrKyHimofHx8lP6p6OhofHx8hKB6CF27diU2NpYzZ85w+vRprl27xq1btxg3bhxDhw4Vm+12ypAhQ8jOzubq1avU1NTg5uZGVlYWBw4cYPr06c4OT9DGyMjIIDw8HI1GbKcFjohPhEDQAqhUKubMmYPVauXGjRvKemFhIatXr6a0tFRZexo7dRcXFzQaDVar1cFF0Gq1YjKZHsuMoqSkhKtXrzbIXtURHBxMZGQkmZmZ2Gw2Ll++/MCyxszMTDZu3Ki4I9bHZDKRn59PYGAgrq6uQK0oHT16NGPHjn0uN7Zms5nMzEwlQ5Wdne1g8tEYfn5+DqYUosTp8dFqtYwbN04pG7x16xYHDx7k8uXLTJ8+XVgut0MkSWLWrFkUFBSQl5eHv78/RqOR8+fPExkZSe/evZ0doqANIfqvBA9CCCyBoIVQqVTMnz8fm82muPAB3Lhxg4yMDPr27UtAQMBTNVhLkoSHhwdlZWWKwKrLXFVVVT2WwDp16hR2u53+/fs3yF7VMWjQIDIzMwG4cuVKA4ElyzIXL15k//79DQRC3TDbqqoqYmJiFCHl7+/P3Llzn6sSC7PZTEZGhpKhys7Oxm63P/Q+/v7+DhkqLy+vZxRt+8fX15clS5Zw584d9u3bR2FhIWvWrKFnz57ExcWJ97qdodVqWbRoEStXrqS4uJiQkBDy8vLYuXMnwcHBDx2tIRDUJy0tjbi4OGeHIWiFCIElELQgarWahQsX8t1335GSkgLUml5UVlZy7do1fvzjHz91GVedwKoTLDabDY1GQ2VlZZMNIkpLS7ly5coDs1d19OjRgx07dmC32ykqKsJoNCoizmq1smfPHi5fvtzgfmazmeTkZLy8vIiNjVVe87Bhw5g4cWK7t7c1mUyKoEpLSyM3N/eRgiowMNDBlEL0BrU8nTt3pkOHDpw9e5ZTp06RmJjInTt3GDNmDMOHD38us6vtFV9fXxYsWMC6devIzc0lICCA4uJiNm7cyBtvvIGLi4uzQxS0coxGI0VFRYSHhzs7FEErRAgsgaCF0Wg0LF68mG+++Ya0tDTFgKK8vJzLly8zadKkpxIYjfVhaTSaxzK6qMte9evXDz8/vweep9VqCQoKIi8vD4C8vDxiYmIoLy9n48aNZGdnN7hPUVERqampdO7cWXEl9PHxYc6cOe22tMJoNCoDfesElSzLD71PUFCQIqaio6PFjCYnodFoGDNmDH369OHAgQMkJSVx+PBhpWxQWHq3Hzp27MiECRM4cuQIZWVluLu7U1RUxM6dO1mwYIHoYRQ8lPT0dCIiIkT/laBRxKdCIHgGaLValixZwtq1azl9+rSyXl5ezoYNG1i8ePETf0nf7yRosVhwdXVtssAqLS0lISEBlUrFmDFjHnl+aGiog8AC2LRpEwaDweE8q9VKcnIy1dXV9OnTR7kiPHDgQKZMmdKurhDX1NQoYio9PZ28vLyHCipJkggODnbIULm5uT3DiAWPwsfHh0WLFpGSksLevXspLi5m7dq1dO/enalTp4qet3bCqFGjyMnJISkpCVmW0Wg0JCYmEhkZybBhw5wdnqAVk5aW1m4vEgqeHiGwBIJnhIuLC7Nnz2b79u1UVVWh0WjQ6/UkJyezefNmFi5c6BSr9rrsVd++fR+avaqjzuFPlmWOHz+O2WxuUO5WWlrKzZs38fPzo1+/fqhUKjw9PZk9ezadO3du6ktrtRgMBocMVX5+/kPPlySJkJAQhwxVncGHoHXTsWNH3nnnHb7//ntOnjxJUlISycnJjB49mhEjRoir122cujEZhYWFFBUV4e3tTVlZGQcPHiQsLOyx5v0Jni/S0tKE86TggYi/DALBM0StVtO3b18SEhKw2+1KCcrNmzfZtm0b8+bNe+zZT/V7c+63an8UZWVlJCQkIElSk7JXAO7u7opxhyzL9OzZUzlms9lISUkhNzeXzp07ExYWBkDv3r2ZPn16mxUVVVVVDhmqgoKCh54vSRJhYWFKhioqKuqxDEcErQu1Ws2oUaOUssHExESOHj1KQkIC06ZNaxcXDZ5nXFxcWLx4MZ9//jnl5eX4+PhQXl7Opk2b+PGPf4y7u7uzQxS0MmpqaigpKVH+xgkE9yMElkDwjNFqtfTt25fbt287rF+/fh21Ws2cOXMeq/a/sRJBaJrAqste9enTB39//yY9X3l5OZcuXcJgMBAYGOiwfvPmTcWJ0MvLCzc3N2bOnEmPHj2a/HpaA5WVlYqYSktLo6io6KHnq1QqwsLClAxVVFRUuyqBFNTi5eXFwoULGThwoOI2+M0339CtWzfi4uIe6L4paP0EBAQwZ84cNmzYQFlZGW5ublRWVrJ582aWLVsmhp4LHKjrvxLGN4IHIQSWQOAEdDodQ4YMwcXFxWEm1pUrV9BqtcyYMaPJIutJSwTLy8sfO3uVnJzcoN/KbreTmppKZmYm3t7e9OzZE51OR7du3Zg5c2abMGuoqKhQHP7S09MpLi5+6PlqtVoRVDExMURGRqLT6Z5RtAJnExsby49//GN++OEHjh8/zs2bN0lOTmbUqFGMHDmy3btitle6d+/O6NGjOXXqFGazGbVaTWpqKseOHWPixInODk/QihD9V4JHIQSWQOAkdDodr7zyCqtWraK8vFxZv3jxIhqNhri4uCaJrPsFVp34eZTAOnXqFDabjT59+jzSzl2WZU6dOsWxY8cwmUzKutFo5OLFi1RXVxMREUFsbCxubm5MmzaNPn36tFoXrrKyMiU7lZaW5iByG0OtVhMREaGU/EVGRopN9HOOWq1mxIgR9O7dm4MHD3Lt2jWOHz/OlStXmDp1Kl27dnV2iIInYPz48eTm5pKcnKyUXJ86dYqIiAjxMxUopKWlMXPmTGeHIWjFCIElEDxDPD09kSQJWZYpLy/H3d1dEVmVlZXKeefOnUOj0TBx4sRHipT6/QH1e7Cqq6ux2WyNljDUWcQ3JXtlMpnYtm0bN2/eVB63Lv7S0lL8/f3p3r07wcHBdOzYkdmzZ7cqhzVZlikrK3Mo+SsrK3vofTQaDREREUqGKjw8XAgqQaN4enoyf/58Bg4cyN69eykoKOC7776jS5cuTJ06tUnGMYLWQ92A+JUrV1JaWoqrqytGo5Ft27bx1ltviTJQAdXV1ZSWlhIaGursUAStGCGwBIJniE6nw9/fn6KiImRZJj8/n4iICJYvX87q1asdSu9Onz6NVqtl7NixD31MtVqNh7cvsk6PW0QHAnoOJDQ8HNlu52peKUFe7njqNHjoNKjuibXTp09js9no3bv3Q7NXRUVFrF+/3qEHqbi4mNzcXMxmM+Hh4QwYMABfX1+mTJnCoEGDnJ61kmWZkpISB1OK+hnCxtBqtURGRioZqvDwcOEOJ3gsYmJieOutt7hw4QLHjh3j9u3b3L17l5EjRzJq1Cgh0NsQrq6uLFq0iC+//BKj0YhOp8NoNLJhwwZee+018bN8zklPTycyMlL0XwkeiiQ/avqlQCBoVrZs2cK1a9cAmDFjBoMHDwYgPz+f1atXU1NT43D+5MmTGTlyZIPHMdvsZFfWkFZeQ2FJKVaLBVQqTEYjLi61jnXe3t5oNGokQAYC3HQEamTWfPoRdrudd955x8Gooj51zoZ1JYGyLJOZmcnJkyexWq3o9XqmTp1Knz59mDNnjtOu1MuyTHFxsUOGqn42sDF0Oh2RkZGKKUV4eLj4YyloNqqqqjh06BBXrlwBamdq1ZUNOvsChKDpXL16la1bt2K321Gr1ciyzIABA5g9e7azQxM4kX379uHl5dXo32WBoA5xiVYgeMaEhoYqAis9PV0RWMHBwSxbtow1a9Y49DkdOnQIjUbD0KFDATDZ7NwqriS3qvYcFRKS3YbdagVkTNUGtCoJkJBkG1pV7dVWWZYprDaTYTAQM34mroaSRrNXdrud48ePc/LkSWWtpqaGmzdvUlRUhNVqxdvbm8DAQBYsWMCIESOeqcOWLMsUFRU5mFI8qt9Mp9Mp86diYmIIDQ0VgkrQYnh4eDB37lylbDAvL4/169fTqVMnpk2b1mTHToFz6dOnDzk5OZw7dw673Y7dbufSpUtEREQwYMAAZ4cncBJpaWlCZAseiRBYAsEzJjY2Vvl3UlISBoNB6aMKCwvj5ZdfZu3atYobINReMVNrNIR37UliUSU2u4xGkpSr4aoHiAW7zab8W5Ik1LKd6qpKVDoX3IJ7cDm/nJ4BXrhoagVSTU0NW7ZsITk5GagVMzk5OaSkpGC326muriYwMJCQkBCmT5/OqFGjmvfNaQRZlikoKFDEVHp6ukMpZWO4uLgoYio6OprQ0FBhsyx45kRFRfHmm29y8eJFjh49SnJyMp988gkjRoxg9OjRwnmyDTB58mRyc3NJT09HrVZjt9vZu3cvoaGhogfnOcRgMFBWViZ+9oJHIkoEBQIn8MUXX5CVlQU0XgKYlpbGunXrsFqtAEgqFaH9hhPSuRuuLnrUKscyo4qKcqqqakWHwVCFq5sbKkmFl5cnHh5/G0RcXl6OwWDA1VWPj48vVtmOWlIxIMQbS0Up69evVxz1jEYjt27dUm67uLhgMpmUwbkvv/wyXbp0afb3pq43rX6G6v6yyftxdXUlKipKMaUIDg4WgkrQqjAYDBw+fJjLly8DteW7cXFxdO/eXZQNtnKqqqr47LPPqKysRJZlJEnC19eXN998s80OTxc8GTdu3ODy5cssXbrU2aEIWjlCYAkETiAhIYHt27cD4Ofnx3vvvddAEKSkpPDtt99il2UihozFLSAYm8WMr68vrnrHP+pVVVVUVFQAUF1twEWvR61S4+7urjj62ew2CvLzkWUICgpEo6ktHbTaZUxmE8knDlCem6kInDt37mC7lwHz9/enY8eO2Gw2PD098fb25mc/+1mziBi73U5eXp6DKYXRaHzofdzc3BxK/oKDg8UmVdAmyMzMZO/eveTm5gK1Ge3p06c/clSCwLlkZWWxatUqLBYLsiyjVqvp2rUrixcvFt89zxF79+7Fx8eHESNGODsUQStHCCyBwAlYLBY++OADRUg8yMji5q1bnE7JwT0oFNu9kkFJAl8/P/T3jCwAamqqKS0tU/6t1enQqDW4uurx9a01nyivKMdQZXBYA5mKigoMNUZUKhV3Tx3k0pmTDoN2O3TowPz585VyRoAJEyY0eTjx/djtdnJzcx1K/ur3nDWGu7u7IqZiYmIIDAwUmxpBm6Wul+fIkSPU1NSgUqkYPnw4Y8aMwcXFxdnhCR5AfHw8u3btUkSWTqdj0qRJz6RUWtA6+Pjjj5k7dy5hYWHODkXQyhE9WAKBE9BqtYwcOZIjR44AcOzYMbp27drgKrYuKIIg2Z2K0hJlTZahtKQEPz8/xS2wfg+WJEnIdhnUYLPbgdrsVfU9ceThWVsyaLfbKC0txWSqFW4Wm43AvkOpOnwQqJ0FNWzYMN58800SEhIUceXu7s6gQYOa/FptNhs5OTlKhiojI8Ohv6wxPDw8lP6pmJgYAgIChKAStBtUKhWDBg2iR48eHDlyhEuXLnHmzBmuXr1KXFwcPXv2FJ/3VsiAAQPIzs7m0qVLmM1mLBYLR44cITw8nA4dOjg7PEELYzAYqKysJCQkxNmhCNoAIoMlEDgJm83GF198oZQKRURE8KMf/Ugpu6syWzmTVYJakjAZaygtK6v1Wr+HJNWW7ul0LlitFgoKCgEwmYxIkgqdTodGoyYoKFjp0arLXlksZkpKS7FZbciyjMlkxGK14uLqRnriFS7u2sSyZcuYN28eGRkZrFu3TnneF198kR49ejz0dWVnZysZqszMzEcKKi8vLwdTCn9/f7HBFDw3ZGdns3fvXrKzs4HarPG0adMICgpycmSC+7FaraxatYrs7GxMJhM6nQ4PDw/eeustvLy8nB2eoAVJTEzk6tWrLFmyxNmhCNoAQmAJBE4kPz+flStXKr1Offv25YUXXkCSJM5mlVJlsaK9J7iqqw2UlTkOzJVUEv7+/mjUGvLy8gAwm83IsoyLiwuSSiIoKEjpvQoMCsRitlBeXoYsg9VmxWg0Uv9rwMPLm+4+Lgzq1pmcnBzWrl2rmEz07NmThQsXOsRgtVoVQZWWlkZWVhYWi+Whr9vb29shQ+Xr6ysEleC5RpZlpWywuroalUrF0KFDGTdunCgbbGWUl5ezcuVKqqqqMBqNuLq6EhkZyauvvirGP7Rj9uzZg5+fH8OHD3d2KII2gBBYAoGTOXnyJEePHlVu9+7dm9FTZ3A5v8LBih1qSxTKyx1FluqeyCoqKkKWa/u7bDYben1t+aC7uxsGQzV6vR61WoXBUI2MjMlkaiCEvL288AsMwkOnIUYysm7dWqVPzN3dnXfeeQedTkdWVpZS8peVlaW4HT4IHx8fpX8qOjoaHx8fIagEgkaoqanhyJEjxMfHI8syHh4eTJkyhd69e4vfmVZEamoqa9fWfj9arVbc3NwYNmwYU6dOdXZoghbio48+Yv78+cKiXdAkhMASCJyMLMvs3r2b+Ph4Za1X3Bw8gkLRNXI1tL5jYB0qlYRdlkGuzShZLGZcXd2QZTsgATIajQabzY7tXtbKXu9XX6VSERwcjJenF7IsYzRbuHV4JxWFedhsNsxmM4MGDaKyspLs7Gwl4/Yg/Pz8HEwp6pwMBQJB08jJyWHv3r3KOIfo6GimT59OcHCwkyMT1PH9999z4MABqqurkSQJV1dXFi5cSM+ePZ0dmqCZqays5JNPPuFXv/qVGAEiaBJCYAkErQBZltm7dy8XLlxA6+pO7MRZSHYbPj4+6HQNy4MqqyqprKhUbhfl5XJs11amL34ZvbsHJqMRNzd3TCYTkkpCttuVOVbm+7JWehcXwsLC0Gp1yLK91lXQZCY76RpntnyD0WikT58+eHh4PDB+f39/hwyV6EUQCJ4eWZZJSEjg8OHDGAwGVCoVgwcPZvz48UqGWuA8ZFlmy5YtXL9+ncrKSlxdXXFzc+PNN98UtvvtjOvXr3P9+nUWL17s7FAEbQThIigQtAIkSWL69OmoVCpu5hYBYLXaKCoqxs3dDS8vL1TS366aeXp4IMsyVZVVyLLM5+//C/1GjEFWqUGW2bnmC/as+4rXfv3v9B81nvemjyEsugP/vHLt354T8PbxITAgAIvFwn/85A1O7N7OP374BTHdeuAf3ZEao5G+jYirwMBAB1MKT09PBAJB8yJJEv3796dbt24cO3aMCxcu8MMPP3D9+nUmT55M3759RdmgE5EkidmzZ1NYWIgs1468UKvVbNiwgTfeeAOdTufsEAXNRGpqKjExMc4OQ9CGEAJLIGglSJLE1KlT0VxPpthsV9arDdUYa2qUq6NarRaQ8PL0BFnm7q1bjJ42m0ETJmOxWKipZ1phs9mQZTs/+v/+DVf3eyJJllGpVHh6eSFJEpn3TCmGTJ5Gx979CAwNQ7bba23aR41BbbcSFBSkiKno6OiHZrMEAkHz4urqyvTp0xkwYAB79+4lIyOD7du3Ex8fz/Tp00VPiBPR6XQsWrSIlStXYrfXVgBIksSuXbuYN2+eEMDthLS0NAYPHuzsMARtCCGwBIJWQGFhIVOmTOHOnTvYZYjt0ZPX/vFfSLp8ic/f/xeGTZpKdtpdivNzWfDGuyz40VvkpKXy/t/9mLysDDQaLWcP7WXJT3+Ft3+AYjpht8vYZZmvfv9vhEbH0KXvQK7/cIYdqz6jpCAPd08vBo2bxPy3fsKp3ds5d2gf//TRF0To9fzp739F8rUraDRqunfvzu7duwkMDHTyOyUQPL+EhISwYsUKrl69yqFDh8jMzGTlypUMGjSICRMm4Orq6uwQn0v8/PyYP38+3377LVqtloqKCq5du0ZkZCRDhgxxdniCp6SyspLq6mrR/yh4LESnnkDQClCpVMybN4//+7//Y+Fb75F68wbr/u9/cPdwByDp0gXGz56PhMR3H/0vxUXFlJWXM2jcRF77x98y8+UVXL9wjt1ff4HVYsEu12bAZFnGeq/nqq7bcu83qzCbjLz0s39g0oIl6PSuqFUqpXE3PCycxHNnuHb+e15776d88MEH9OvX75HGFgKBoOWRJIm+ffvy3nvvMWzYMCRJ4sKFC3z44YdcunQJ0VbtHDp37sy4ceNwc3PDbrdTU1PDgQMHFJMSQdslLS2NmJgYkY0UPBZCYAkErQCTycT+/ft58803+fp//0B1VRWpt27cKweEqQuX8MKyFcT26IXFbKK8pBidiwt3b1znu4/+j51rvkC228lOvau4CQLIyIpboARotRqCwiMxVFZw89IF1Go1Mxa/TGhoKO7utWJOrdYQHhMLwNmTx0lJSWHRokVier1A0IrQ6/VMnTqVt956i+joaKqrq9m5cydffvklOTk5zg7vuWTMmDF07doVHx8fDAYDZrOZjRs3YjAYnB2a4CkQ/VeCJ0EILIGgFfCXv/yFs2fP8tOf/Yz/XL2RwNAwzCaTctzHLwAfH1/c3NwA0Oo0bP3iExLOnmLR2z/jP778Do1Wi81qQavVotPVCjOJenO0JAmtVsu7//EH3v7t/yM4PIKDG7/h395chru7h8PVueETp/C/m/cyasIkTp8+zcSJEzl8+PCze0MEAkGTCA4O5tVXX2X+/Pl4enqSlZXF559/zu7du6murnZ2eM8VkiQxd+5c/P398fPzo7i4mPLycrZu3Yrdbn/0AwhaJXUZLIHgcRACSyBoRZSVlXHtwvcU5j7gCvQ9EeTj7YPq3r8LszM4tOVbrBYLGo2GAH9/3O5lo3QuOrzvWaarVCpcXFzY/NmHlBYXEd25K95+/pQXF2OzOlq3n9i7kx+OHSIsPEKZ6SKuigsErRNJkujduzfvvfceI0aMQJIkLl68yIcffsjFixfF5v4ZotfrWbRoEXq9Hm9vb4qLi0lJSeHEiRPODk3wBJSXl2M0GgkKCnJ2KII2hhBYAkEr4Cc/+QmDBw9m44YNlBbkEdOl+yPvs/S9nxMZ24m9G9YRGByKu6cXarWawMAg3FxrM13hYeHExtaW+6nVatzd3bHbbOz8+ks+/Y/fYKisZNkv/onC4mLMZjMANpsVvasrZw/s4V9/+Xds3LiRRYsWsWDBgpZ7AwQCwVPj4uLClClTePvtt+nQoQM1NTXs3r2bL774QvQCPUOCgoKYM2cOLi4uaDQaysvLOXHiBHfu3HF2aILHRPRfCZ4UMWhYIGhlXMwto7jGjLbFpsXLWG02zCYTJrMZs8mEzeZ4hVutVqFzdSXMWknnqAj8/f3FHxiBoA0hyzI3btzgwIEDVFRUADBgwAAmTpyo9FsKWpZDhw5x5swZ8vPz8fHxwcfHh7feegsfHx9nhyZoIjt27CA0NFS4QQoeGyGwBIJWxt1SA7dLqtCp1c/oGWWsVhtmswmTyYzZXCu41Dodt/duRLbb8fT0JCYmRvnPz89PCC6BoA1gNps5efIk33//PTabDb1ez4QJExg0aJDiHCpoGex2O+vWrSMlJYXs7GzCwsKIiIjgRz/6ERqNmJLTFvjzn//MSy+9JEaUCB4bIbAEglZGqdHC+ZxSNJLkJBEjY7JYkU01GJLiSUtLa+CC5eXl5SC4fH19heASCFoxRUVF7Nu3j5SUFKB2ptaMGTOIjIx0cmTtm+rqalauXElhYSH5+flEREQwaNAgZs6c6ezQBI+grKyMzz//nF/+8pfi75vgsRECSyBoZciyzMnMYkxWGY3KOV/qFrudvkFehHjokWWZwsJC0tLSlP/udyfz9vZWxFaHDh1ECYxA0AqRZZmbN2+yf/9+ysvLAejXrx+TJk3Cw8PDydG1X3Jzc/nyyy8pLS1VBtbOnTuXvn37Ojs0wUNISEjgzp07LFy40NmhCNogQmAJBK2QjPJqkoqrWrAP68HUzc2aEBOgOBXWR5ZlCgoKHARXTU2Nwzk+Pj4Ogsvb2/uZxC4QCB6NxWLh1KlTnDlzRikbHD9+PIMHDxZlgy1EQkIC27dvJz8/H1dXV/z9/Xn99dcJDg52dmiCB7B9+3bCw8MZPHiws0MRtEGEwBIIWiEWu50T6cXIMqifcRbLbLfTycedTn5Na4SXZZn8/HwHwWU0Gh3O8f3/27vz+Kjqe//j71my7wvZV3YI+45hCZB1qHt7y0+LShWoVAWXixf70La3j1sFtGJ7raAioFdF1KK1TBJ2iGGHALIYQJJM9n2dJLOd7+8PylzODUWWSc5k8n4+Hv2jnDNzPkaUeTnf8z1BQbIlhQwuIuXV19cjJyfHvrtdeHg4dDod4uPjFZ7MNen1ehw+fBgGgwGRkZGIiIjAokWL4OnpqfRodB1r1qzBL37xC4SGhio9CvVCDCwiJ1XR2oHvalt79F4siyTBS6tBckzwbYedJEmy4CopKekSXMHBwbLg8v/ns7qIqGcJIVBYWIicnBw0NTUBAEaNGoW0tDT4+fkpO5yLsdls2LhxI4qLi2EwGBAfH4+kpCT827/9G+/xcTKNjY1Yv349nn/+ef69odvCwCJyUkIInKhqRl2HCW7q7t9RUBICNiEwJSoIAZ5ujntfSUJVVZUsuEwmk+yckJAQWXDxgx1Rz7JYLMjPz8e3334Lq9UKDw8PpKSkYNKkSdD02I6mrq+1tRXr1q1DbW0tamtrERcXh/T0dNx1111Kj0bXKCgowA8//MDnP9JtY2AROTGT1YYD5Y2w2CRou/HeCCEELEJgQKAPBt3k0sDbJUkSKisrZcF19SHHV4WGhsqCizfgE/WMxsZG5OTkoLCwEMCVh+bqdDokJCQoO5gLMRgM2LhxI2pqamCz2RAZGYlHH32USzOdyNatWxEXF4fx48crPQr1UgwsIidnNFtxqKIRNkl0S2RdjatYPy8MD/Xt8eUQkiShoqLCHlwGg6FLcPXr108WXHxQKlH3unDhArKzs9HY2AgAGDFiBNLT07mc10GOHDkCvV6PkpIShISEICIiAosXL+a3905ACIE1a9bgkUceQUhIiNLjUC/FwCLqBYxmK45UNsFkk+DmwHuybJKADQIJAV4YEtzzcXXdmWy2LsFlsVhk54SFhcmCy9vbW6FpiVyX1WpFfn4+8vLyYLVa4e7ujpkzZ2LKlClcNniHhBD4+uuvUVBQgIsXLyIxMREDBw7EI488wp+twhoaGrBhwwY899xzTvFnIvVODCyiXsJsk3C2rhU1RhPUUN3R7oJCCFiFgEatwohQP4T7eDjtHyQ2mw3l5eWy4LJarbJzwsPD7bEVHx/P4CJyoKamJuTk5OD7778HcGUJr06nQ//+/RWerHezWCz44IMPUFJSguLiYgwaNAjJyclIT09XerQ+7cSJEygqKsKDDz6o9CjUizGwiHoRIQSqjSacrWuFVRJQAdDcwjdakhBXXqcCwrzdMbyfPzw0veu5N1arVRZcpaWlsuBSqVRdgsvLy0vBiYlcw6VLl5CdnY36+noAwPDhw5GRkcHHLtyBpqYmvPvuu6isrERjYyMSEhLw85//HMOGDVN6tD7rb3/7GxISEjBu3DilR6FejIFF1AvZpCuhdbnJCKPFBgBXnpmlUuHaL7YErkSVJHDl11VAtK8X4gK84OeuVWR2R7NarSgrK5MFl81msx9XqVSIiIiQBRefO0N0e6xWKw4ePIj9+/fDYrHAzc0NM2bMwNSpU6HVusa/U3ra5cuX8dFHH8FgMMDLywvR0dFYtGgR7/9RgBACf/rTn7BgwQIEBwcrPQ71Ygwsol5MCIE2sw3NJgsaOs1o6rTCZLNB+uc/1Rq1Cn7uWgR7uiHAww3BXm7duhuhM7BYLLLgKisr6xJckZGR9uCKi4tjcBHdoubmZuTm5uLcuXMArjxqISsrCwMHDlR4st4pPz8f27dvx4ULFxAbG4vExEQ88cQTcHNz3CMz6MfV19dj06ZNePbZZ5122Tz1DgwsInJpFosFpaWl9uAqLy/vElxRUVGy4PLw8FBwYqLe4/Lly9Dr9airqwMADB06FJmZmQgMDFR2sF5GCIHPP/8cp0+fRmFhIYYMGYLx48fjvvvu4wf9HnT8+HEYDAbcf//9So9CvRwDi4j6FLPZ3CW4JEmyH1er1V2Cy93dXcGJiZybzWbDoUOHsG/fPpjNZmi1WkyfPh3JyclcNngLTCYT3n//fftS5yFDhuDuu+/GhAkTlB6tz/jyyy/Rv39/jB07VulRqJdjYBFRn2Y2m2EwGOzBVVFR0SW4oqOj7cEVGxvL4CK6jpaWFmzfvh1nzpwBAAQHByMzMxODBw9WeLLeo76+Hu+++y4MBgNMJhMSExPx+OOPIyoqSunRXJ4QAm+88QYef/xxBAUFKT0O9XIMLCKia5hMJpSWlqKoqMgeXNf+a/JqcCUmJtqDi/dJEP2voqIi6PV61NbWAgCGDBmCzMxMfmi9SYWFhfj0009x8eJFhISEoH///li0aBEfP9HN6urq8NFHH2HZsmVclkl3jIFFRHQDJpMJBoPBHlyVlZWy4NJoNLLgiomJYXBRn2ez2XDkyBHs3bsXJpMJWq0WycnJmDZtGv/5uAl79uzBnj17cPbsWQwaNAgjR47EQw89xA/+3ejYsWMoKyvDfffdp/Qo5AIYWEREt6Czs1MWXFVVVV2CKzY21r6kMCYmhvehUJ/V2tqKHTt24PTp0wCAwMBAZGZmYsiQIYyFGxBC4JNPPsF3332HCxcuICkpCampqZg5c6bSo7mszz//HIMGDcKYMWOUHoVcAAOLiOgOdHR0yIKrurpaFlxarVYWXNHR0Qwu6nNKSkqg1+tRXV0NABg0aBCysrL4rKEb6OjowHvvvYdLly6hpqYGQ4cOxfz58zFgwAClR3M5Qgi8/vrrWLhwIXfAJIdgYBEROVBHRwdKSkpkwXUtNze3LsGl0WgUmpao50iShKNHj2L37t0wmUzQaDRITk7G9OnTuWzwX6iursb777+Pixcvws3NDYMGDcLixYsREBCg9Ggupba2Fp988gmWLl2q9CjkIhhYRETdqL29XRZcNTU1suNubm6Ii4uzB1dUVBSDi1xaW1sbdu7ciZMnTwIAAgICkJGRgWHDhnHZ4HWcOXMGn3/+Oc6cOYO4uDgMHz4cCxYs4DfhDnT06FFUVFTg3nvvVXoUchEMLCKiHmQ0GmXBdXWntavc3d0RGxtr3zQjMjKSwUUuqbS0FNu2bUNVVRUAYMCAAcjKykJoaKjCkzmf3Nxc7N+/H9999x2SkpIwffp06HQ6pcdyGVu2bMGQIUMwevRopUchF8HAIiJSUFtbmyy46urqZMfd3d0RFxcnCy61Wq3QtESOJUkSjh8/jl27dqGzsxMajQZTp07FjBkz+Ly5a0iShA8//BCnT5/G5cuXMWrUKPzsZz/DyJEjlR6t1xNCYPXq1Vx6SQ7FwCIiciKtra2y4Kqvr5cd9/DwkAVXREQEg4t6PaPRiF27duHEiRMAAH9/f2RkZGD48OFcNvhPRqMR69atw/nz52E0GpGUlISFCxciLCxM6dF6tZqaGmzevBnPPPOM0qOQC2FgERE5sZaWFllwNTQ0yI57eHggPj7eHlzh4eEMLuq1ysrKoNfrUVFRAQBITEyETqdDv379FJ7MOZSXl2P9+vU4e/YsAgMDMXLkSCxcuBAeHh5Kj9ZrHT58GNXV1bjnnnuUHoVcCAOLiKgXaW5ulgVXY2Oj7Linp2eX4OI3ANSbSJKEEydOYNeuXejo6IBarcaUKVMwc+ZMhgSAEydOYOvWrTh58iQGDx6MKVOm4Kc//Sn/Ob9Nn332GYYNG4ZRo0YpPQq5EAYWEVEv1tzcjOLiYhQXF6OoqAhNTU2y415eXrLgCgsL4wcx6hXa29uxe/duHD9+HEII+Pn5IT09HSNGjOjzv4e/+eYb5OXl4ezZsxg9ejTuueceTJkyRemxep2r91/96le/gr+/v9LjkAthYBERuZCmpiZ7bBUXF6O5uVl23NvbWxZc/fr16/MfVsm5VVRUYNu2bSgvLwcAJCQkQKfT9el7j6xWKzZu3IiCggKUl5dj7NixWLBgAeLi4pQerVeprq7Gli1b8PTTTys9CrkYBhYRkYsSQnQJrpaWFtk5Pj4+suAKDQ1lcJHTEUKgoKAAO3fuRHt7O9RqNSZNmoSUlBR4enoqPZ4iWlpasG7dOpw6dQoqlQpjxozB4sWL4evrq/RovcahQ4dQW1uLu+++W+lRyMUwsIiI+gghBBobG2XB1draKjvHx8cHCQkJ9uAKCQlhcJHT6OjowJ49e3D06FEIIeDr64u0tDSMGjWqT/4+LSkpwYYNG1BQUIDo6GhMnjwZ8+fP50Y3N2nz5s1ISkridvfkcAwsIqI+SgiBhoYG2T1cbW1tsnN8fX1lwRUcHNwnP8iSc6msrIRer0dpaSkAIC4uDjqdDhEREQpP1vMOHTqEr7/+GidPnsSIESOQmZmJOXPmKD2W0xNCYNWqVViyZAn8/PyUHodcDAOLiIgAXPnAUV9fLwsuo9EoO8fPz08WXEFBQQwuUoQQAqdOncKOHTtgNBqhUqkwceJEzJ49u08tGxRCYOvWrdi/fz8KCwsxfvx4zJ8/H0OGDFF6NKdWWVmJL7/8Ek899ZTSo5ALYmAREdF1CSFQV1dnD67i4uIuweXv7y8LrsDAQAYX9ajOzk7s2bMHR44cgRACPj4+SE1NxZgxY/rM70WLxYL169fjyJEjaG5uxoQJE/CrX/0KQUFBSo/mtA4ePIj6+nr85Cc/UXoUckEMLCIiuilCCNTW1sqCq729XXZOQEBAl+Ai6gnV1dXQ6/UoKSkBAMTExGDu3LmIjIxUeLKe0djYiHXr1uHYsWPw9fXFlClT8Pjjj8PNzU3p0ZzSp59+ilGjRiEpKUnpUcgFMbCIiOi2CCFQU1MjC66Ojg7ZOYGBgbLgCggIUGha6guEEPjuu++wfft2tLW1QaVSYfz48ZgzZw68vLyUHq/bXbp0CRs3bsTx48cxcOBApKam4t5771V6LKcjSRJWrVqFp556irsuUrdgYBERkUMIIVBdXS0Lrs7OTtk5QUFBsuDiwz2pO5hMJuzduxeHDx+GJEnw9vbGnDlzMG7cOJdfNpiXl4evv/4ap0+fxpgxYzBv3jyMGzdO6bGcSkVFBbZu3Ypf//rXSo9CLoqBRURE3UKSJFlwlZSUdAmu4OBgWXBxNy9ypJqaGuj1ehQXFwMAoqOjodPpEB0drexg3UgIgc8++wx79+5FaWkpJk2ahEWLFvWZpZI348CBA2hsbMTcuXOVHoVcFAOLiIh6hCRJqKqqkgWXyWSSnRMSEmIPrvj4eAYX3TEhBM6ePYvc3Fy0trZCpVJh3LhxmDNnDry9vZUer1uYTCa89957yM/Ph9VqRXJyMhYvXtwnlknejE8++QRjxozB8OHDlR6FXBQDi4iIFCFJEiorK2XBZTabZeeEhobKgov3S9DtMplM2L9/Pw4ePAhJkuDl5YXZs2dj/PjxLvlg3traWqxduxaHDx9GeHg4UlNTMW/ePJdfIvljrt5/9fTTT8PHx0fpcchFMbCIiMgp2Gw2WXAZDIYuwdWvXz8kJCTY/8cPSHSr6urqoNfrcfnyZQBAZGQk5s6di5iYGIUnc7zz589j48aNOHHiBIYPH46f/vSnmD59utJjKaq8vBxff/01lixZovQo5MIYWERE5JRsNhsqKipkwWWxWGTnhIWFyYLLVZd8kWMJIXD+/Hnk5uaiubkZADB27Fikpqa6XLTv2rULX331Fb7//ntMmDABixYtQmJiotJjKSY/Px8tLS3IyspSehRyYQwsIiLqFWw2G8rLy2XBZbVaZeeEh4fLgov3nNCNmM1m5OXl4cCBA7DZbPD09MTs2bMxYcIEl1k2KEkSPv74Y+zcuRMNDQ2YNm0annzyyT67g+fHH3+McePGYdiwYUqPQi6MgUVERL2S1WqVBVdpaaksuFQqlSy44uPjGVx0XfX19cjOzsalS5cAABEREdDpdIiLi1N4Msdob2/H2rVrkZeXBw8PD6SmpmLBggXQaDRKj9ajbDYbVq1ahaVLl/LbbupWDCwiInIJVqsVZWVlsuCy2Wz24yqVChEREbLg8vT0VHBiciZCCBQWFiInJwdNTU0AgNGjRyMtLc0lNleprKzEO++8g8OHDyM+Ph4PPPAAMjMzlR6rR5WVleGbb77Bk08+qfQo5OIYWERE5JIsFossuMrKyroEV2RkpCy4PDw8FJyYnIHFYsG3335r3+Lcw8MDs2bNwqRJk3r9ssHTp09jw4YNOHXqFEaPHo0nnngCSUlJSo/VY7799lu0tbX1ubCknsfAIiKiPsFisaC0tNQeXOXl5V2CKyoqyh5ccXFxDK4+rKGhATk5Obhw4QKAKxuq6HQ6JCQkKDvYHcrOzsaXX34Jg8GAqVOn4umnn0ZoaKjSY/WIjz76CBMnTsTQoUOVHoVcHAOLiIj6JLPZ3CW4JEmyH1er1V2Cy93dXcGJSQlXlw02NjYCAEaOHIn09PRe+xBsm82GTZs2IScnByaTCampqVi8eLHL/96+ev/VsmXLeC8mdTsGFhEREa4El8FgsAdXRUVFl+CKjo6WBZebm5uCE1NPsVgsOHDgAPLy8mC1WuHu7o6UlBRMnjy5V24U0dbWhrfffhv79u1DUFAQHnjgATzwwAMu/RDi0tJS6PV6LF68WOlRqA9gYBEREV2HyWTqElzX/pGp0WhkwRUbG8vgcnGNjY3Izc3F999/D+DKg691Ol2vfK5UaWkp3n77bRw9ehSDBw/GggULMGnSJKXH6jZ5eXlob29HRkaG0qNQH8DAIiIiugkmkwklJSX24KqsrOwSXDExMbLg0mq1Ck5M3eXixYvIzs5GQ0MDACApKQkZGRm97tlSx44dwwcffGB/CPEzzzyDmJgYpcfqFh9++CEmT56MIUOGKD0K9QEMLCIiotvQ2dkpC66qqipZcGm1WllwxcTEMLhciNVqtS8btFgscHd3x4wZMzB16tRes2xQCIG///3v2LJlC+rr65GSkoKnn34aPj4+So/mUDabDStXrsRzzz3HRzNQj2BgEREROUBHR0eX4LqWVqtFbGysPbiio6MZXC6gqakJ27dvx7lz5wAAISEh0Ol0GDBggMKT3Ryr1Yr3338fer0eGo0G9913H+bPn9/rt6S/lsFgQE5ODhYtWqT0KNRHMLCIiIi6QXt7uyy4qqurZcfd3Ny6BFdv+eaDuvrhhx+g1+tRX18PABg2bBgyMzMREBCg8GQ/rrm5GW+99Rby8vIQGRmJBQsWYNasWUqP5TD79u2DyWRCenq60qNQH8HAIiIi6gHt7e322CouLkZNTY3suJubG+Li4uzBFRUVxeDqZWw2Gw4ePIj9+/fDbDbDzc0N06dPx1133eX031YWFRVhzZo1OHXqFJKSkrBs2TIMGjRI6bEcYtOmTZg6dSoGDx6s9CjURzCwiIiIFGA0GmXBVVtbKzvu7u7eJbhcadmWK2tpaUFubi7Onj0LAAgODkZWVpbTB8uBAwfw3nvvwWAw4K677sLzzz+PwMBApce6I1arFatWrcLzzz/PB4dTj2FgEREROYG2tjZZcNXV1cmOu7u7Iz4+3h5ckZGRDC4nd/nyZWRnZ9vjeejQocjIyEBQUJDCk12fEAJffPEFNm/ejI6ODsydOxeLFy92+m/fbqSkpATbt2/HwoULlR6F+hAGFhERkRNqbW1FSUkJioqKUFxcbL+35yoPDw9ZcEVERDC4nJDNZsPhw4exd+9emM1maLVaTJs2DcnJyU753DSz2Yy//vWvyMnJga+vLx599FHcfffdSo912/bu3QuLxYK0tDSlR6E+hIFFRETUC7S0tMiC6+ozmK7y9PTsElwqlUqhaen/am1txfbt2/Hdd98BAIKCgpCZmemUz2VqaGjA66+/jkOHDiE+Ph5Lly7FmDFjlB7rtmzcuBHJyclOvzyTXAsDi4iIqBdqbm6WBVdjY6PsuJeXlyy4wsPDGVxOoLi4GHq93r7JyeDBg5GZmYng4GCFJ5O7cOEC3njjDRQWFmLs2LH4j//4D4SHhys91i3h/VekFAYWERGRC2hqapIFV1NTk+y4l5eXPbYSEhIQFhbG4FKIzWbD0aNHsWfPHphMJmi1WiQnJ2PatGlOtWxwz549WLduHerq6pCamoply5b1qgf1FhUVYdeuXXjiiSeUHoX6GAYWERGRC2pqakJxcbE9uJqbm2XHvb29ZcHVr18/BlcPa2trw44dO3Dq1CkAQGBgoH3ZoDP8vRBC4KOPPsKWLVsAAPPmzcPDDz/sFLPdjD179sBmsyE1NVXpUaiPYWARERG5OCFEl+BqaWmRnePj4yMLrtDQ0F7zQbq3MxgM0Ov1qKqqAgAMHDgQWVlZCAkJUXgyoLOzE2vWrMHOnTsRHByMZ555BtOmTVN6rJuyYcMGzJgxAwMGDFB6FOpjGFhERER9jBACjY2NsuBqbW2VnePr6ysLrpCQEAZXN5IkCceOHcPu3bvR2dkJjUaDu+66C9OnT4e7u7uis9XU1ODVV19FQUEBBg0ahN/85jdISEhQdKYfY7FYsHr1arzwwguK//yo72FgERER9XFCCDQ0NMiCq62tTXaOn5+fLLiCg4MZXN3AaDRi586dKCgoAAAEBAQgIyMDw4YNU/TnffbsWaxcuRKlpaWYOnUqVqxYAT8/P8Xm+TGXL1/Gnj178Pjjjys9CvVBDCwiIiKSEUKgvr5eFlxGo1F2jr+/vyy4goKCGFwOVFZWhm3btqGyshIA0L9/f+h0OoSGhio2U25uLt555x0YjUbcf//9WLx4MTQajWLz3Mju3bshhMCcOXOUHoX6IAYWERER3ZAQAnV1dbLgam9vl53j7++PxMREe3AFBgYyuO6QJEk4ceIEdu3ahY6ODqjVakydOhUzZsxQZNtxSZLw/vvv44svvoCnpyeefPJJZGVl9fgcN+ODDz5ASkoK+vfvr/Qo1AcxsIiIiOiWCCFQW1trD66SkpIuwRUQENAluOj2tLe3Y9euXThx4gSEEPDz80NGRgaSkpJ6PGKNRiNWrlyJvLw8RERE4JVXXsGwYcN6dIYfYzab8frrr+Pf//3fnWrbe+o7GFhERER0R4QQqKmpkQVXR0eH7JzAwEBZcAUEBCg0be9VXl4OvV6P8vJyAEBiYiKysrIQFhbWo3NUVFTgd7/7HS5cuIARI0bg97//vVPseHjVDz/8gH379uGXv/yl0qNQH8XAIiIiIocSQqC6uloWXJ2dnbJzgoKCZMHl7++v0LS9ixDCvmywvb0darUakydPRkpKSo8uGywoKMAf//hH1NXVIT09HS+88ILTfFu0a9cuqNVqzJo1S+lRqI9iYBEREVG3kiSpS3CZTCbZOcHBwbLgcuYd6pxBR0cHdu/ejWPHjkEIAV9fX6Snp2PkyJE9tmzwq6++wrp16yBJEh577DHMmzfPKe67W79+PWbPno3ExESlR6E+ioFFREREPUqSJFRVVdmDy2AwdAmukJAQWXD5+voqNK1zq6ysxLZt21BWVgYAiI+Ph06nQ3h4eLdf22az4c9//jO++eYb+Pv746WXXsKkSZO6/bo3wvuvyBkwsIiIiEhRkiShsrISxcXFKC4uRklJCcxms+yc0NBQWXD5+PgoNK3zEULg5MmT2LlzJ4xGI9RqNSZOnIhZs2bB09OzW6/d2tqK3/72tzhx4gTi4uLw2muvISoqqluveSOXLl1CXl4eFixYoNgMRAwsIiIicio2m00WXAaDoUtw9evXzx5c8fHxDC5cWTa4Z88eHD16FEII+Pj4IC0tDaNHj+7WpXsGgwEvvfQSysrKMGnSJPzud7+Dt7d3t13vRnbu3AmtVouUlBRFrk8EMLCIiIjIydlsNlRUVMiCy2KxyM4JCwuTBZdSH/CdQVVVFfR6PQwGAwAgNjYWOp0OkZGR3XbNAwcO4NVXX4XRaMSDDz6IJUuWKHI/1nvvvYe0tDQkJCT0+LWJrmJgERERUa9is9lQXl4uCy6r1So7Jzw8XBZcXl5eCk2rDCEETp8+jR07dqCtrQ0qlQoTJkzA7Nmzu+VnIYTAxx9/jE2bNkGj0eC5555Denq6w69zIyaTCW+88QaWL18OrVbbo9cmuhYDi4iIiHo1q9UqC67S0lJZcKlUqi7B1d33JjmLzs5O7N27F0eOHIEkSfD29kZqairGjh3r8G+YLBYLXn31VezevRshISFYuXIlBg4c6NBr3MjFixdx4MABPProoz12TaLrYWARERGRS7FarSgrK5MFl81msx9XqVSIiIiwB1dcXJzLB1d1dTX0ej1KSkoAADExMdDpdA7fkKKpqQnLly/HhQsXMGTIEKxatarHHiq9Y8cOuLu7Y+bMmT1yPaJ/hYFFRERELs1isciCq6ysrEtwRUZGIiEhAYmJiYiLi+vRh/b2FCEEzpw5g+3bt6O1tRUqlQrjx4/H7NmzHXrP2qVLl7B8+XI0NDRgzpw5WLFiRY8s2Xv33XeRkZGB+Pj4br8W0Y0wsIiIiKhPsVgsKC0tlQWXJEn242q1WhZcsbGxLhVcJpMJ+/btw6FDhyBJEry8vDBnzhyMGzcOarXaIdfYuXMnVq9eDavVioULF2LevHkOed9/pbOzE3/60594/xU5BQYWERER9Wlms1kWXOXl5V2CKyoqShZc7u7uCk7sGLW1tdDr9SgqKgIAREVFQafTISYm5o7fWwiBdevW4bPPPoOXlxf+8Ic/YPz48Xf8vv/KhQsXcOjQITzyyCPddg2im8XAIiIiIrqG2WyGwWCwB1dFRUWX4IqOjpYFl5ubm4IT3z4hBM6dO4fc3Fy0tLQAAMaNG4c5c+bc8bPFTCYTXn75ZRw5cgQRERF46623EB4e7oixu8jNzYWXlxdmzJjRLe9PdCsYWEREREQ3YDKZugTXtR+fNBqNLLhiYmJ6XXCZzWbs378fBw8ehM1mg6enJ2bPno0JEybc0bLB2tpaLFu2DOXl5Rg7dixee+21blluuW7dOmRlZSEuLs7h7010qxhYRERERLegs7NTFlyVlZVdgismJsa+S2FMTEyvuS+orq4O2dnZ+OGHHwAAERERmDt3LmJjY2/7PU+fPo0VK1bYH0L81FNPOXSL+I6ODqxZswbLly+HRqNx2PsS3S4GFhEREdEd6OzsRElJiT24qqqqZMGl1WoRExNj/4YrOjraqYNLCIHvv/8eOTk5aG5uBgCMGTMGqamp8PX1va333Lp1K/77v/8barUay5cvR1pamsPmLSwsxJEjRzB//nyHvSfRnWBgERERETlQR0dHl+C6llarRWxsrCy4nPGbF4vFgry8POTn59uXDc6aNQsTJ0685WWDkiRh9erVyM7Ohr+/P958800MGDDAIXPm5OTAx8cH06dPd8j7Ed0pBhYRERFRN2pvb5cFV3V1tey4m5ubLLiioqKcKrjq6+uRk5ODixcvAgDCw8Oh0+lu+XlTHR0deO6553D+/HkkJibiz3/+M/z8/O54vrVr197xMkYiR2JgEREREfUgo9EoC66amhrZcTc3N8TFxdmDKzIyUvHgEkLgwoULyM7ORlNTEwBg1KhRSEtLu6VIKi8vx9NPP42GhgbMnDkTv/3tb+9oEw3ef0XOiIFFREREpCCj0WiPreLiYtTW1sqOu7u7dwkuRz0Q+FZZLBbk5+fj22+/hdVqhYeHB1JSUjBp0qSbDpxDhw7hlVdegcViwcKFC/HQQw/d9jznz5/H8ePH8Ytf/OK234PI0RhYRERERE6kra1NFlx1dXWy4x4eHrLgioiI6PHgamxsRE5ODgoLCwEAYWFh0Ol0SEhIuKnXb9q0CRs2bIC7uztee+01jBs37rbmyM7Ohp+fH6ZNm3ZbryfqDgwsIiIiIifW2toqC676+nrZcQ8PD8THx9uDKzw8vMeC6+qywcbGRgDAiBEjkJ6eDn9//xu+zmaz4ZVXXkF+fj5CQkKwdu1a9OvX75av/8477+Cee+5BdHT0bc1P1B0YWERERES9SEtLiyy4GhoaZMc9PT27BJcjnzv1f1mtVuTn5yMvLw9WqxXu7u6YOXMmpkyZcsNlg62trViyZAlKS0sxYsQIvPnmm7f0gOb29na89dZbePHFFxVbMkl0PQwsIiIiol6sublZFlxXv026ysvLSxZcYWFh3RJcTU1NyM3Nxfnz5wEAoaGh0Ol06N+//798zaVLl7B06VIYjUbce++9ePbZZ2/6eufOnUNBQQEefvjhO56dyJEYWEREREQupKmpSRZcV3f9u8rb21sWXP369XNocF26dAnZ2dn2pYzDhw9HRkYGAgICrnt+bm4uVq5cCQB48cUXkZGRcVPX0ev1CAgIQHJysmMGJ3IQBhYRERGRC2tqakJRUZE9uJqbm2XHvb29kZCQYA+u0NDQOw4uq9WKgwcPYv/+/bBYLHBzc8OMGTMwdepUaLXaLue/9dZb2Lp1K7y9vfGXv/zF/hBiIQTaLDa0mCxo6rTCItkAqODrrsXenG1InzENcdFRdzQrkaMxsIiIiIj6CCFEl+BqaWmRnePj4yMLrpCQkNsOrubmZuTm5uLcuXMAgJCQEGRlZWHgwIGy86xWK5YtW4YzZ84gJiYGa9euRbPQoKipHUaL7Z+zAyoVAAHYhITmpiYEBwcj0tcD/QN94OveNdyIlMDAIiIiIuqjhBBobGyUBVdra6vsHF9fX1lwBQcH33JwXb58GXq93r7l/NChQ5GZmYnAwED7OQ0NDVi4cCHMUOG+xcsQPXAw1AA0KlWX63V0dqCjvR1BQcGwCAEVgIFBPkgM9Ia6Gzf0ILoZDCwiIiIiAnAluBoaGmTB1dbWJjvHz8/PHlwJCQk3HVw2mw2HDh3Cvn37YDabodVqMX36dCQnJ9uXDR45fRan6tuh1mjg7+ODkJCQ675Xc3MTtFotfHx8AQCSELAKgVAvN4wND4RGzcgi5TCwiIiIiOi6hBCor6+XBZfRaJSd4+/vLwuuoKCgGwZXS0sLtm/fjjNnzgAAgoODkZmZiYj4RByuaERLSwtqqquhAhAdHQMfH58u71FTW4OgwCDZtu5CCFgkCaHeHhgfEdCtW9MT3QgDi4iIiIhuihACdXV1suBqb2+XnRMQECBbUnjtMsBrFRUVQa/Xo7a2Fiq1GklZP4VPYBA83LSoqKhEa1srNBoNEuLjodX+b0hJkg01NTWIiIgAII8oIQQsQmB4iC/iArwd/ZdPdFMYWERERER0W4QQqK2tlQVXR0eH7JzAwEBZcF27XbvNZsORI0dw0lCFwAHDIFnM8PX1hbePD0oNBpjMZnh6eCAuPh6qf8ZUR0cHOjo6EBwcfN2ZJCEgQWBGbAg8tf/6QcdE3YWBRUREREQ/qri4GImJiZg7dy7+8Y9/XPccIQRqamrswVVSUtIluIKCgmTB5evnh52Xa9DW1op245VvwzRaDby9vFBVXQ1JkhAQEICI8AgAXe+/utacxH5IGDwU7+j3YWCQDwYGd11e+GP0ej2OHDmCxx57DAkJCbf8eiLuZ0lEREREDqFSqRAeHo7w8HBMmTIFQghUV1ejuLgYRUVFuHz5MhobG9HY2IiCggIAQFj/wYgYOxUe7u7wcPdAW1srLBYrWlvb4OPtjdbWVjQ3N8PL0wsBAQEwmUzwvs59WQDwmz+/C19/f2jUKpS0tGNAkPct34ul1+vx9ttvIyUlhYFFt0Wt9ABERERE1Hs0Nzfjnnvugb+/Px566CEIIfCPf/wDo0ePho+PD0aPHo0dO3YAAPbt24fIyEj853/+J958801s3boVI0eOxO9//3t89dVXWL9+PVb+/re4dPYMFulm4YGxg/HL2VPw+gu/Rl1VBYQQ0H+8AUsypuH1F5dh3l1j8NS9aTh1IA8vPvIzzE2Kx59eet4+2389swjr/vg7aFQqNDc349EFCxAWFobQ0FAsWrTIvkFHSkoKVCoVXnjhBYSFhWHo0KE4f/48Nm7ciLfffhsAMGvWLHuc/exnP0NQUBA8PT0xfPhwbN26tYd/6tSbMLCIiIiI6KYdOHAAU6dOxZAhQ/Dpp59i//79ePDBB+Hl5YWXX34ZHh4euP/++1FZWWl/zc6dO3H//ffj2WeftW+9fu7cOTz55JN4eMlS+Pr5Ytbd9+OR515E6oPzcPrwAXy18V2YTGZcvZmluPAcJqdmor6qEisW/D8MGzsBsf0HYtunH+LSue+6zLn2Dy/j048/xmOPPYYnnngC69evxyuvvCI7p7CwEA8//DAKCwvx+uuvY+bMmUhPTwcAvPzyy/j0008BABMnTsSqVavw6quvAgAeeeQRdHZ2OvxnS66BSwSJiIiI6KZNnjwZK1asgEqlwrFjx7B3716YzWYcPnwYhw8ftp938OBB+0YUP/nJT7BixQoAwN69e+2/9tJLL2F3cR1+OH8WB3fk4PL3Z+2vryi6DC9vL/szsqbNvRfjZ6biHx+tR0h4BB579kUIScLFM6dRVWrAwOEjZXMe3r0DVqsVq1evtv/a1W/WrnrjjTfg7++PNWvW2O8xGzRoELZv347Zs2cjJSUFNpsNZ8+exebNm2E2m+2vLS4uxtChQx3wEyVXw8AiIiIiopt2NZquhk9oaCgAYPny5UhLS7OfN2zYMFy8eBEAEBUV1eV9/vfXBD756xpc/v4snnjxZQxKGoXfPP4QJJsVERER8PW9splFaFgE+oWFAwB8/PwBAGrNlV0CbTbbdWftFx6OT/7nf+z/38PD47p/Lde+x/+9Z2vHjh348MMPkZqaihdeeAF/+ctfsG3bNn6DRf8SA4uIiIiIbpubmxvc3d3xt7/9DQMHDkRTUxO2bNmCzz///KZe76FR25cBtjQ24tvcbbBaLAAAFVTQ/DOiQkNDER8ff9NzTZ6dhh1fbsbf//53jB49GsePH4darcb06dNv+LqgoCAAwBdffAGj0Wi/vtFoxMWLF5Gfn3/TM1DfxHuwiIiIiOi2XY0rX19fLF26FG+++SYGDBhgD5UfE+jpjp8vWYq4AYPw9//5AH4BgfZvqO7Er17+Ax775S+xZcsWLF26FIcPH8a0adN+9HUPP/wwhg4dir/+9a9YunQp0tLSMG/ePJw8eRKbN29GRkbGHc9Gro3PwSIiIiIixVS1deJUTQvc1I777/7SPz/ezkkIveVt2onuFL/BIiIiIiLF9PP2gEalskeRI9gkgfgAL8YVKYKBRURERESK0ahViA/wgs1BgSUJAZUaiPH3csj7Ed0qBhYRERERKap/oA+8tBpYJOmO3kcIAasQGBzsCy+txkHTEd0aBhYRERERKUqjVmF0uD9UAKzS7X2TJYSARQiEeLohnt9ekYIYWERERESkuAAPN0yIDARUAhZJwq3swyYJAYskEOzphrERgbz3ihTFXQSJiIiIyGm0ma04Vd2CVosVaqigUXV9+O9VV5cEAkBioDcGBvlAzbgihTGwiIiIiMipSEKgvLUDRU3t6LRKEACEAK62kxCAWgUIAOHeHugf5A1/DzclRyayY2ARERERkVMSQqDFbEWLyYqmTgtMNglqFeDjpkWgpxsCPdzgoeUdL+RcGFhEREREREQOwuQnIiIiIiJyEAYWERERERGRgzCwiIiIiIiIHISBRURERERE5CAMLCIiIiIiIgdhYBERERERETkIA4uIiIiIiMhBGFhEREREREQOwsAiIiIiIiJyEAYWERERERGRgzCwiIiIiIiIHISBRURERERE5CAMLCIiIiIiIgdhYBERERERETkIA4uIiIiIiMhBGFhEREREREQOwsAiIiIiIiJyEAYWERERERGRgzCwiIiIiIiIHISBRURERERE5CAMLCIiIiIiIgdhYBERERERETkIA4uIiIiIiMhBGFhEREREREQOwsAiIiIiIiJyEAYWERERERGRgzCwiIiIiIiIHISBRURERERE5CAMLCIiIiIiIgdhYBERERERETkIA4uIiIiIiMhBGFhEREREREQOwsAiIiIiIiJyEAYWERERERGRgzCwiIiIiIiIHISBRURERERE5CAMLCIiIiIiIgdhYBERERERETkIA4uIiIiIiMhBGFhEREREREQO8v8BIbVOcNGAOsgAAAAASUVORK5CYII=', 'initialized': True, 'feedback_text_es': '', 'semantic_clear_chat_sebastian.marroquin@aideatext.ai': False, 'discourse_clear_chat_sebastian.marroquin@aideatext.ai': False, 'key_concepts': [('análisis', 12.0), ('estudiante', 12.0), ('texto', 11.0), ('oración', 7.0), ('redacción', 6.0), ('funcionalidad', 6.0), ('aprendizaje', 6.0), ('palabra', 6.0), (']', 6.0), ('herramienta', 5.0)], 'logged_in': True, 'feedback_email_es': '', 'morphosyntax_chat_history': [], 'feedback_submit_es': False, 'toggle_graph': False, 'entity_graph': 'data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAA1gAAAI5CAYAAAC8QJvjAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjguMywgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/H5lhTAAAACXBIWXMAAAsTAAALEwEAmpwYAAAdC0lEQVR4nO3deYxV5f348Q/gzAiMX1FAAZeBgAV0UIIi4gIIqBQs2miqESytNPJHbQwqagotiFZsKi4h7hZR0CBSJSgWaq3iAlpMq6laLVVA4gKlqGURZTm/P5q58TosQ/1Ypb/XK7kJ99wzz33OnWOcN+fMQ6OiKIoAAADgS2v8dU8AAADgf4XAAgAASCKwAAAAkggsAACAJAILAAAgicACAABIIrAAAACSCCxgjzNt2rRo1KhR6VFZWRkdO3aMn/70p7Fp06bdHu/pp5+ORo0axdNPP50+1+XLl0ejRo1i2rRp6WPviV5++eWYMGFCrF279r/yfhMmTCg7Vz7/6N69+26Pt7P5N2rUKCZMmLDLMfr16xf9+vXb7ffekR/84AfRvn37tPEaYsOGDdGuXbuYPXt2+tg33XRTdOvWLbZt25Y+NsB/w15f9wQA/lMPPfRQHHzwwbFu3bp45JFHYtKkSbFu3bqYMmXK1z21krZt28bixYujY8eOX/dUvhFefvnluOqqq2L48OGx//77/9fe97nnnosmTZqUbWvevPluj7Oz+S9evDgOPvjgLzXPPcXkyZOjVatWcdZZZ6WPPWrUqLjuuuvi3nvvjR/+8Ifp4wN81QQWsMfq3r17dOrUKSIiTjnllFi6dGlMnTo1br755mjc+Jtxgb6qqiqOO+64r3sae6StW7dGURSx115f/n9VvXr1ShlnZ/5/+T5/+umnMWXKlNLVwWxNmzaN73//+3H99dcLLGCP9M34CQQgQY8ePWLjxo2xZs2a0raNGzfGFVdcER06dIjKysro0KFD/OIXv9jl7Ue/+93vYvDgwdG2bdto1qxZ1NbWxuTJk2Pr1q319r3rrruiR48e0bRp09hvv/2ib9++sWjRoojY8S2CM2bMiKOOOir23nvvaNWqVZx//vnx/vvvl+3Tvn37GD58eMycOTO6du0azZs3j2OOOSaee+65enNYuHBhDBgwIPbZZ59o3rx5nHbaafHqq6+W7bNgwYI4/vjjY999943q6uro3LlzTJw4caefQ0TEsmXLYtiwYdG6deuoqqqK7t27xyOPPFK2T90P20uXLo0hQ4ZEdXV11NTUxMSJE0uf9bRp00o/MB922GGlW/WWL18eEf++xW7s2LFx3XXXlb5ff/nLXxp8fF9G1vy/eIvgzJkzo0uXLlFVVRVHHHFEvc8tImLTpk0xevToqK2tjerq6mjTpk185zvfiTfeeKPevk8++WT06NEj9t577+jYsWPccccd2z2ehpz369evj5/85Cdx6KGHRlVVVRxwwAExcODA7b7v582ZMyfWrl0b55xzTr3Xss7Dc889N15//fXSf0cAexKBBfzPWL58eey7777RsmXLiIjYsmVLnHbaaXH33XfHxRdfHL/97W/jRz/6UVx99dUxZsyYnY719ttvx4ABA2Lq1Kkxb968GDFiREyYMCHGjh1btt9ll10WF154YfTo0SNmzZoVM2bMiD59+sQ777yzw7HvvPPOOP/886Nr167x8MMPx3XXXRcLFiyIvn37xvr168v2ffbZZ2Py5Mlx9dVXx4MPPhhbt26N008/PT766KPSPvPmzYsBAwZEdXV1zJgxIx544IFYt25dnHTSSbFy5crS8QwdOjQ6dOgQDz74YMydOzcuueSS2LBhw04/h5UrV0avXr3ilVdeiRtvvDHmzp0bPXr0iLPOOivmzp1bb//vfve70b9//5gzZ06ceeaZMX78+Lj33nsjImLIkCExbty4iPj37Z2LFy+OxYsXR9u2bUtfP23atJg3b15cf/31MW/evGjXrl2Djm9Xtm7dGlu2bCl7bC+yv+z8P+/3v/99nHfeeXHYYYfFww8/HGPGjImLL7443nzzzbL9Pv3001i3bl2MGzcu5s2bF7fddlts2rQpevfuHR988EFpv7/+9a8xePDgaNq0acycOTOuvfbauOmmm+LJJ58sG6+h5/3o0aNj1qxZMX78+HjiiSfijjvuiO7du5edW9szf/786Nq1a7Rq1apse+Z52L1799hnn31i/vz5O50LwDdSAbCHueeee4qIKN54441i8+bNxdq1a4tf//rXRZMmTYopU6aU9rvvvvuKiCgWLlxY9vXXXHNNUVFRUaxataooiqJ46qmniogonnrqqe2+37Zt24rNmzcX11xzTdGiRYti69atRVEUxdKlS4vGjRsXo0eP3uFcly1bVkREcc899xRFURRbtmwpDjjggKJfv35l+z377LNFRBQ333xzaVtNTU3RokWLYu3ataVtS5YsKSKiuP/++0vbOnbsWPTv379svI8//rho2bJlcfHFFxdFURQPPfRQERHFxx9/vMO5bs8FF1xQtGrVqlizZk3Z9oEDBxZHHXVU6fn48eOLiCimTp1atl9tbW1xyimnlJ7Xfe+WLl1a770iomjbtm2xcePGsu0NOb4dqZvX9h4//vGP0+c/fvz40vPjjz++6Nq1a+l8KYqiWLx4cRERRd++fXc45y1bthQbNmwoqqurixtuuKG0/bzzzitatmxZrF+/vrTtnXfeKSoqKoqamprStoae90ccccROz90d6dKlS3HeeefV2559Hp544ollnz3AnsIVLGCP1aVLl6ioqIj9998/Ro4cGaNGjYqLLrqo9Pr8+fOjpqYmjj/++LIrF6eeemps3rw5XnjhhR2O/f7778eoUaOipqYmKisro6KiIsaNGxcfffRRrF69OiL+fYVi27ZtceGFFzZ4zm+++WasXr06hg0bVrb9xBNPjJqamli4cGHZ9t69e8d+++1Xet6tW7eIiNIVsqVLl8Zbb70Vw4YNKzvGZs2aRe/eveOZZ56JiH9fEaioqIhzzz03Zs+eXTqGXZk/f34MHjw49t1337LxTzvttHjllVfiX//6V9n+Q4YMKXteW1u706t5XzRo0KBo2rRp6XlDj29XXnjhhViyZEnZ4/LLL6+335edf52tW7fGkiVL4uyzzy77fcDjjjtuuyv+zZo1K3r16hUtWrSIvfbaK5o3bx7r168vu9q1ePHiGDx4cNniHIccckiccMIJZWM19Lzv2bNnTJs2La699tp46aWXtnv76/a899570bp167JtX8V52Lp163jvvfcaNCeAbxKBBeyxHnnkkViyZEk8/vjjMXDgwLj11lvjvvvuK72+evXqWLFiRVRUVJQ9jj322IiI+Oc//7ndcbdt2xZDhw6Nxx57LMaNGxd/+MMfYsmSJaXbA+uWgq/7+t1ZOa5uee/t3VbWpk2best/f3GluqqqqrI51P2AOnLkyHrH+dhjj5Xm2KlTp1iwYEFs27Ytzj///GjTpk0cd9xx9YLui1avXh333XdfvbHrbjX74me4vfnuztL5X/xcGnp8u3L00UfHMcccU/Y49NBD6+33ZedfZ82aNbF58+Y48MAD6732xW2PPvponHPOOdG1a9d44IEH4sUXX4wlS5ZE69aty977/fffb9B4DT3vp0yZEqNGjYqpU6dGz54944ADDojRo0fHxo0bd3psmzZtKp2Hn3/PiNzzsGnTpvHJJ5/sdC4A30RWEQT2WLW1taVVBPv37x9HHnlkjBkzJs4666xo3rx5tGzZMjp06BCzZs3a7tfv6N8Oeuutt+Kll16K6dOnx/Dhw0vbH3300bL96n4H5d13343OnTs3aM51P8B//ndr6nzwwQdx9NFHN2icOnW/bzZp0qQYOHBgvdcrKytLfz755JPj5JNPjk8//TSef/75+PnPfx5DhgyJ5cuX1/t9ms+Pf9JJJ8UVV1yx3dfbtWu3W/PdlS+uSrc7x/dN0qpVq6ioqIhVq1bVe23VqlVRU1NTej5z5szo1KlT2UIomzdvrhfbbdu23eF4n9fQ8766ujomTZoUkyZNihUrVsTs2bPjyiuvjMrKyvjlL3+5w2Nr2bJlfPjhh/W2ReSeh2vXrt3heQnwTSawgP8JVVVV8atf/SrOOOOMuPXWW2PMmDExaNCg+M1vfhPV1dXRpUuXBo9V9zf4FRUVpW2bN2+O+++/v2y/gQMHRuPGjePOO++MyZMnN2jszp07x4EHHhgzZ86MkSNHlrYvWrQoVqxYEZdeemmD51k3Xvv27eO1116LK6+8skFfU1VVFf3794/169fHGWecEcuWLdvhD7KDBg2KxYsXxxFHHFF2695/qu7KR0OvTPwnx/dVauj8mzRpEj179ozZs2fHhAkTSrcJvvjii7F8+fKywNq4cWO9JeSnT59e75a93r17x+OPPx4bNmwo3Sa4cuXKeP7558tC9z8572tqauLSSy+N+++/f5erM3bp0iXefvvtsm1fxXm4bNmy0lU3gD2JwAL+ZwwdOjR69uwZkydPjosuuiiGDRsW99xzTwwYMCAuvfTSOOqoo+Kzzz6Lt956K+bOnRtz5syJZs2a1Runa9euUVNTE2PHjo0mTZpERUVF3HjjjfX269ixY4wePTpuuOGGWLduXQwdOjSaNGkSf/zjH6NLly7bXca6SZMmMXHixBg1alQMHz48hg8fHu+++26MHTs2DjvssLjgggt265gbNWoUt9xyS5xxxhnx2Wefxfe+971o1apVrFq1KhYtWhSHHnpoXHLJJXH77bfHM888E4MHD45DDjkk1qxZE5MmTYp27dpFbW3tDsefOHFiHHvssdGnT5+46KKLon379vHhhx/Gq6++Gm+//XZMnTp1t+Z7+OGHR0TELbfcEiNGjIiKioo48sgjd3glqqHHtysvvvhivX9ouC6Cvqr5X3XVVXHqqafGmWeeGaNGjYp//OMfMX78+GjTpk3ZfoMGDYo5c+bE6NGj4/TTT4+XXnoppkyZEi1atCjbb9y4cfHQQw/FqaeeGmPGjInPPvssJkyYUO8WwYae9717946hQ4dGt27dorq6OhYuXBivvPJKjBgxYqefQZ8+feKmm26Kbdu2lcIx+zz86KOP4m9/+1tcdtllDf7eAHxjfN2rbADsrp2t5LZgwYIiIkqrr33yySfF+PHji86dOxeVlZXFfvvtVxxzzDHF+PHji82bNxdFsf1VBP/85z8XJ5xwQtG0adPioIMOKn72s58Vd911VxERxbJly8re87bbbiu6detWGr9v377FokWLiqKov4pgnenTpxdHHnlkUVlZWey///7F8OHDi/fee69sn5qammLYsGH1jjG+sFpdURTFokWLiiFDhhQtWrQoqqqqipqamuKcc84pzWPRokXF0KFDi4MPPriorKws2rRpU5x99tnFG2+8scvPe+XKlcXIkSOLdu3aFRUVFUWbNm2KgQMHFtOnTy/tU7cKX91nWmfEiBFlK9wVRVFMmDChaNeuXdG4ceOyzzMiirFjx253Drs6vh3Z2SqCzZs3T5//F78vDzzwQPGtb32rqKysLA4//PDi4YcfLvr27Vu2iuDWrVuLsWPHFm3bti2aNm1a9OnTp/jTn/5U1NTUFCNGjCgb74knnii6d+9eVFZWFh06dChuv/327c6xIef95ZdfXnTv3r34v//7v6JZs2ZFbW1t2SqWO/L6668XEVE8/fTT9V7LOg9nzJhRVFVV1Vu9EmBP0KgoiuK/3HQAwB6sX79+0alTp7j77ru/kvG//e1vR6tWrWL69OlfyfgAXyWBBQDslueffz4GDhwYf//73+Oggw5KHfvll1+OXr16xWuvvVZaxAZgT2KZdgBgt5xwwglx4403xooVK9LH/uCDD2LatGniCthjuYIFAACQxBUsAACAJAILAAAgicACAABIIrAAAACSCCwAAIAkAgsAACCJwAIAAEgisAAAAJIILAAAgCQCCwAAIInAAgAASCKwAAAAkggsAACAJAILAAAgicACAABIIrAAAACSCCwAAIAkAgsAACCJwAIAAEgisAAAAJIILAAAgCQCCwAAIInAAgAASCKwAAAAkggsAACAJAILAAAgicACAABIIrAAAACSCCwAAIAkAgsAACCJwAIAAEgisAAAAJIILAAAgCQCCwAAIInAAgAASCKwAAAAkggsAACAJAILAAAgicACAABIIrAAAACSCCwAAIAkAgsAACCJwAIAAEgisAAAAJIILAAAgCQCCwAAIInAAgAASCKwAAAAkggsAACAJAILAAAgicACAABIIrAAAACSCCwAAIAkAgsAACCJwAIAAEgisAAAAJIILAAAgCQCCwAAIInAAgAASCKwAAAAkggsAACAJAILAAAgicACAABIIrAAAACSCCwAAIAkAgsAACCJwAIAAEgisAAAAJIILAAAgCQCCwAAIInAAgAASCKwAAAAkggsAACAJAILAAAgicACAABIIrAAAACSCCwAAIAkAgsAACCJwAIAAEgisAAAAJIILAAAgCQCCwAAIInAAgAASCKwAAAAkggsAACAJAILAAAgicACAABIIrAAAACSCCwAAIAkAgsAACCJwAIAAEgisAAAAJIILAAAgCQCCwAAIInAAgAASCKwAAAAkggsAACAJAILAAAgicACAABIIrAAAACSCCwAAIAkAgsAACCJwAIAAEgisAAAAJIILAAAgCQCCwAAIInAAgAASCKwAAAAkggsAACAJAILAAAgicACAABIIrAAAACSCCwAAIAkAgsAACCJwAIAAEgisAAAAJIILAAAgCQCCwAAIInAAgAASCKwAAAAkggsAACAJAILAAAgicACAABIIrAAAACSCCwAAIAkAgsAACCJwAIAAEgisAAAAJIILAAAgCQCCwAAIInAAgAASCKwAAAAkggsAACAJAILAAAgicACAABIIrAAAACSCCwAAIAkAgsAACCJwAIAAEgisAAAAJIILAAAgCQCCwAAIInAAgAASCKwAAAAkggsAACAJAILAAAgicACAABIIrAAAACSCCwAAIAkAgsAACCJwAIAAEgisAAAAJIILAAAgCQCCwAAIInAAgAASCKwAAAAkggsAACAJAILAAAgicACAABIIrAAAACSCCwAAIAkAgsAACCJwAIAAEgisAAAAJIILAAAgCQCCwAAIInAAgAASCKwAAAAkggsAACAJAILAAAgicACAABIIrAAAACSCCwAAIAkAgsAACCJwAIAAEgisAAAAJIILAAAgCQCCwAAIInAAgAASCKwAAAAkggsAACAJAILAAAgicACAABIIrAAAACSCCwAAIAkAgsAACCJwAIAAEgisAAAAJIILAAAgCQCCwAAIInAAgAASCKwAAAAkggsAACAJAILAAAgicACAABIIrAAAACSCCwAAIAkAgsAACCJwAIAAEgisAAAAJIILAAAgCQCCwAAIInAAgAASCKwAAAAkggsAACAJAILAAAgicACAABIIrAAAACSCCwAAIAkAgsAACCJwAIAAEgisAAAAJIILAAAgCQCCwAAIInAAgAASCKwAAAAkggsAACAJAILAAAgicACAABIIrAAAACSCCwAAIAkAgsAACCJwAIAAEgisAAAAJIILAAAgCQCCwAAIInAAgAASCKwAAAAkggsAACAJAILAAAgicACAABIIrAAAACSCCwAAIAkAgsAACCJwAIAAEgisAAAAJIILAAAgCQCCwAAIInAAgAASCKwAAAAkggsAACAJAILAAAgicACAABIIrAAAACSCCwAAIAkAgsAACCJwAIAAEgisAAAAJIILAAAgCQCCwAAIInAAgAASCKwAAAAkggsAACAJAILAAAgicACAABIIrAAAACSCCwAAIAkAgsAACCJwAIAAEgisAAAAJIILAAAgCQCCwAAIInAAgAASCKwAAAAkggsAACAJAILAAAgicACAABIIrAAAACSCCwAAIAkAgsAACCJwAIAAEgisAAAAJIILAAAgCQCCwAAIInAAgAASCKwAAAAkggsAACAJAILAAAgicACAABIIrAAAACSCCwAAIAkAgsAACCJwAIAAEgisAAAAJIILAAAgCQCCwAAIInAAgAASCKwAAAAkggsAACAJAILAAAgicACAABIIrAAAACSCCwAAIAkAgsAACCJwAIAAEgisAAAAJIILAAAgCQCCwAAIInAAgAASCKwAAAAkggsAACAJAILAAAgicACAABIIrAAAACSCCwAAIAkAgsAACCJwAIAAEgisAAAAJIILAAAgCQCCwAAIInAAgAASCKwAAAAkggsAACAJAILAAAgicACAABIIrAAAACSCCwAAIAkAgsAACCJwAIAAEgisAAAAJIILAAAgCQCCwAAIInAAgAASCKwAAAAkggsAACAJAILAAAgicACAABIIrAAAACSCCwAAIAkAgsAACCJwAIAAEgisAAAAJIILAAAgCQCCwAAIInAAgAASCKwAAAAkggsAACAJAILAAAgicACAABIIrAAAACSCCwAAIAkAgsAACCJwAIAAEgisAAAAJIILAAAgCQCCwAAIInAAgAASCKwAAAAkggsAACAJAILAAAgicACAABIIrAAAACSCCwAAIAkAgsAACCJwAIAAEgisAAAAJIILAAAgCQCCwAAIInAAgAASCKwAAAAkggsAACAJAILAAAgicACAABIIrAAAACSCCwAAIAkAgsAACCJwAIAAEgisAAAAJIILAAAgCQCCwAAIInAAgAASCKwAAAAkggsAACAJAILAAAgicACAABIIrAAAACSCCwAAIAkAgsAACCJwAIAAEgisAAAAJIILAAAgCQCCwAAIInAAgAASCKwAAAAkggsAACAJAILAAAgicACAABIIrAAAACSCCwAAIAkAgsAACCJwAIAAEgisAAAAJIILAAAgCQCCwAAIInAAgAASCKwAAAAkggsAACAJAILAAAgicACAABIIrAAAACSCCwAAIAkAgsAACCJwAIAAEgisAAAAJIILAAAgCQCCwAAIInAAgAASCKwAAAAkggsAACAJAILAAAgicACAABIIrAAAACSCCwAAIAkAgsAACCJwAIAAEgisAAAAJIILAAAgCQCCwAAIInAAgAASCKwAAAAkggsAACAJAILAAAgicACAABIIrAAAACSCCwAAIAkAgsAACCJwAIAAEgisAAAAJIILAAAgCQCCwAAIInAAgAASCKwAAAAkggsAACAJAILAAAgicACAABIIrAAAACSCCwAAIAkAgsAACCJwAIAAEgisAAAAJIILAAAgCQCCwAAIInAAgAASCKwAAAAkggsAACAJAILAAAgicACAABIIrAAAACSCCwAAIAkAgsAACCJwAIAAEgisAAAAJIILAAAgCQCCwAAIInAAgAASCKwAAAAkggsAACAJAILAAAgicACAABIIrAAAACSCCwAAIAkAgsAACCJwAIAAEgisAAAAJIILAAAgCQCCwAAIInAAgAASCKwAAAAkggsAACAJAILAAAgicACAABIIrAAAACSCCwAAIAkAgsAACCJwAIAAEgisAAAAJIILAAAgCQCCwAAIInAAgAASCKwAAAAkggsAACAJAILAAAgicACAABIIrAAAACSCCwAAIAkAgsAACCJwAIAAEgisAAAAJIILAAAgCQCCwAAIInAAgAASCKwAAAAkggsAACAJAILAAAgicACAABIIrAAAACSCCwAAIAkAgsAACCJwAIAAEgisAAAAJIILAAAgCQCCwAAIInAAgAASCKwAAAAkggsAACAJAILAAAgicACAABIIrAAAACSCCwAAIAkAgsAACCJwAIAAEgisAAAAJIILAAAgCQCCwAAIInAAgAASCKwAAAAkggsAACAJAILAAAgicACAABIIrAAAACSCCwAAIAkAgsAACCJwAIAAEgisAAAAJIILAAAgCQCCwAAIInAAgAASCKwAAAAkggsAACAJAILAAAgicACAABIIrAAAACSCCwAAIAkAgsAACCJwAIAAEgisAAAAJIILAAAgCQCCwAAIInAAgAASCKwAAAAkggsAACAJAILAAAgicACAABIIrAAAACSCCwAAIAk/w+bOBNUMZVrXAAAAABJRU5ErkJggg==', 'graph_id': 'semantic-float-4a0c84f3', 'semantic_file_uploader_sebastian.marroquin@aideatext.ai': None, 'delete_Uso de stanza en el análisis sintác.txt': False, 'page': 'user'} diff --git a/modules/semantic/semantic_float.py b/modules/semantic/semantic_float.py new file mode 100644 index 0000000000000000000000000000000000000000..043ab99ab13630b25c8bbbedb4a734b627e4a337 --- /dev/null +++ b/modules/semantic/semantic_float.py @@ -0,0 +1,213 @@ +import streamlit as st +import uuid +import streamlit.components.v1 as components + + +''' + +# Lista de estilos de sombra y transición (sin cambios) +shadow_list = [ + "box-shadow: rgba(0, 0, 0, 0.1) 0px 4px 12px;", + "box-shadow: rgba(0, 0, 0, 0.15) 0px 5px 15px 0px;", + "box-shadow: rgba(0, 0, 0, 0.05) 0px 6px 24px 0px, rgba(0, 0, 0, 0.08) 0px 0px 0px 1px;", + "box-shadow: rgba(0, 0, 0, 0.16) 0px 10px 36px 0px, rgba(0, 0, 0, 0.06) 0px 0px 0px 1px;", +] + +transition_list = [ + "transition: all 0.3s ease;", + "transition: all 0.5s cubic-bezier(0.25, 0.8, 0.25, 1);", + "transition: all 0.4s cubic-bezier(0.165, 0.84, 0.44, 1);", +] + +def semantic_float_init(): + st.markdown(""" + + """, unsafe_allow_html=True) + +def float_graph(content, width="40%", height="60%", position="bottom-right", shadow=0, transition=0): + position_css = { + "top-left": "top: 20px; left: 20px;", + "top-right": "top: 20px; right: 20px;", + "bottom-left": "bottom: 20px; left: 20px;", + "bottom-right": "bottom: 20px; right: 20px;", + "center-right": "top: 50%; right: 20px; transform: translateY(-50%);" + } + + css = f""" + width: {width}; + height: {height}; + {position_css.get(position, position_css['bottom-right'])} + {shadow_list[shadow % len(shadow_list)]} + {transition_list[transition % len(transition_list)]} + """ + + return float_box(content, css=css) + +def float_box(content, css=""): + box_id = f"semantic-float-{str(uuid.uuid4())[:8]}" + components.html(f""" +
+
+ {content} +
+
+ + """, height=0) + return box_id + +def toggle_float_visibility(box_id, visible): + display = "block" if visible else "none" + components.html(f""" + + """, height=0) + +def update_float_content(box_id, new_content): + components.html(f""" + + """, height=0) +''' + + +# Lista de estilos de sombra (puedes ajustar según tus preferencias) +shadow_list = [ + "box-shadow: rgba(0, 0, 0, 0.1) 0px 4px 12px;", + "box-shadow: rgba(0, 0, 0, 0.15) 0px 5px 15px 0px;", + "box-shadow: rgba(0, 0, 0, 0.05) 0px 6px 24px 0px, rgba(0, 0, 0, 0.08) 0px 0px 0px 1px;", + "box-shadow: rgba(0, 0, 0, 0.16) 0px 10px 36px 0px, rgba(0, 0, 0, 0.06) 0px 0px 0px 1px;", +] + +# Lista de estilos de transición +transition_list = [ + "transition: all 0.3s ease;", + "transition: all 0.5s cubic-bezier(0.25, 0.8, 0.25, 1);", + "transition: all 0.4s cubic-bezier(0.165, 0.84, 0.44, 1);", +] + +def semantic_float_init(): + """Inicializa los estilos necesarios para los elementos flotantes en la interfaz semántica.""" + st.markdown(""" + + """, unsafe_allow_html=True) + +def float_graph(content, width="40%", height="60%", position="bottom-right", shadow=0, transition=0): + """ + Crea un contenedor flotante para el gráfico de visualización semántica. + + :param content: Contenido HTML o Markdown para el gráfico + :param width: Ancho del contenedor + :param height: Altura del contenedor + :param position: Posición del contenedor ('top-left', 'top-right', 'bottom-left', 'bottom-right') + :param shadow: Índice del estilo de sombra a utilizar + :param transition: Índice del estilo de transición a utilizar + """ + position_css = { + "top-left": "top: 20px; left: 20px;", + "top-right": "top: 20px; right: 20px;", + "bottom-left": "bottom: 20px; left: 20px;", + "bottom-right": "bottom: 20px; right: 20px;", + } + + css = f""" + width: {width}; + height: {height}; + {position_css.get(position, position_css['bottom-right'])} + {shadow_list[shadow % len(shadow_list)]} + {transition_list[transition % len(transition_list)]} + """ + + return float_box(content, css=css) + +def float_box(content, css=""): + """ + Crea un contenedor flotante genérico. + + :param content: Contenido HTML o Markdown para el contenedor + :param css: Estilos CSS adicionales + """ + box_id = f"semantic-float-{str(uuid.uuid4())[:8]}" + st.markdown(f""" +
+
+ {content} +
+
+ """, unsafe_allow_html=True) + return box_id + +def toggle_float_visibility(box_id, visible): + """ + Cambia la visibilidad de un contenedor flotante. + + :param box_id: ID del contenedor flotante + :param visible: True para mostrar, False para ocultar + """ + display = "block" if visible else "none" + st.markdown(f""" + + """, unsafe_allow_html=True) + +def update_float_content(box_id, new_content): + """ + Actualiza el contenido de un contenedor flotante. + + :param box_id: ID del contenedor flotante + :param new_content: Nuevo contenido HTML o Markdown + """ + st.markdown(f""" + + """, unsafe_allow_html=True) + +# Puedes agregar más funciones específicas para la interfaz semántica según sea necesario \ No newline at end of file diff --git a/modules/semantic/semantic_float68ok.py b/modules/semantic/semantic_float68ok.py new file mode 100644 index 0000000000000000000000000000000000000000..a57a08d49e3c3945b90a1a358305e520a6e1d650 --- /dev/null +++ b/modules/semantic/semantic_float68ok.py @@ -0,0 +1,467 @@ +import streamlit as st +import uuid +import streamlit.components.v1 as components +import streamlit.components.v1 as stc + +########################## PRUEBA 1 ######################### + # COMBINADO CON SEMANCTIC_INTERFACE_68OK APARECEN DOS BOX FLOTANTES +# Lista de estilos de sombra (puedes ajustar según tus preferencias) + +''' +shadow_list = [ + "box-shadow: rgba(0, 0, 0, 0.1) 0px 4px 12px;", + "box-shadow: rgba(0, 0, 0, 0.15) 0px 5px 15px 0px;", + "box-shadow: rgba(0, 0, 0, 0.05) 0px 6px 24px 0px, rgba(0, 0, 0, 0.08) 0px 0px 0px 1px;", + "box-shadow: rgba(0, 0, 0, 0.16) 0px 10px 36px 0px, rgba(0, 0, 0, 0.06) 0px 0px 0px 1px;", +] + +# Lista de estilos de transición +transition_list = [ + "transition: all 0.3s ease;", + "transition: all 0.5s cubic-bezier(0.25, 0.8, 0.25, 1);", + "transition: all 0.4s cubic-bezier(0.165, 0.84, 0.44, 1);", +] + +def semantic_float_init(): + st.markdown(""" + + """, unsafe_allow_html=True) + +#################################################### +def float_graph(content, width="40%", height="60%", position="bottom-right", shadow=0, transition=0): + position_css = { + "top-left": "top: 20px; left: 20px;", + "top-right": "top: 20px; right: 20px;", + "bottom-left": "bottom: 20px; left: 20px;", + "bottom-right": "bottom: 20px; right: 20px;", + } + css = f""" + width: {width}; + height: {height}; + position: fixed; + z-index: 9999; + background-color: white; + border: 1px solid #ddd; + padding: 10px; + overflow: auto; + {position_css.get(position, position_css['bottom-right'])} + {shadow_list[shadow % len(shadow_list)]} + {transition_list[transition % len(transition_list)]} + """ + return float_box(content, css=css) + +######################################################### +def float_box(content, css=""): + box_id = f"semantic-float-{str(uuid.uuid4())[:8]}" + st.markdown(f""" +
+ {content} +
+ """, unsafe_allow_html=True) + return box_id + +######################################################### + +def toggle_float_visibility(box_id, visible): + """ + Cambia la visibilidad de un contenedor flotante. + + :param box_id: ID del contenedor flotante + :param visible: True para mostrar, False para ocultar + """ + display = "block" if visible else "none" + st.markdown(f""" + + """, unsafe_allow_html=True) + +########################################################### +def update_float_content(box_id, new_content): + """ + Actualiza el contenido de un contenedor flotante. + + :param box_id: ID del contenedor flotante + :param new_content: Nuevo contenido HTML o Markdown + """ + st.markdown(f""" + + """, unsafe_allow_html=True) + +# Puedes agregar más funciones específicas para la interfaz semántica según sea necesario +''' + +################################################# version backup ######################### + # COMBINADO CON SEMANCTIC_INTERFACE_68OK APARECEN SOLO UN CUADRO A LA DERECJHA Y AL CENTRO + # Lista de estilos de sombra (puedes ajustar según tus preferencias) +shadow_list = [ + "box-shadow: rgba(0, 0, 0, 0.1) 0px 4px 12px;", + "box-shadow: rgba(0, 0, 0, 0.15) 0px 5px 15px 0px;", + "box-shadow: rgba(0, 0, 0, 0.05) 0px 6px 24px 0px, rgba(0, 0, 0, 0.08) 0px 0px 0px 1px;", + "box-shadow: rgba(0, 0, 0, 0.16) 0px 10px 36px 0px, rgba(0, 0, 0, 0.06) 0px 0px 0px 1px;", +] + +# Lista de estilos de transición +transition_list = [ + "transition: all 0.3s ease;", + "transition: all 0.5s cubic-bezier(0.25, 0.8, 0.25, 1);", + "transition: all 0.4s cubic-bezier(0.165, 0.84, 0.44, 1);", +] + + +def semantic_float_init(): + """Inicializa los estilos necesarios para los elementos flotantes en la interfaz semántica.""" + st.markdown(""" + + """, unsafe_allow_html=True) + +def float_graph(content, width="40%", height="60%", position="bottom-right", shadow=0, transition=0): + """ + Crea un contenedor flotante para el gráfico de visualización semántica. + + :param content: Contenido HTML o Markdown para el gráfico + :param width: Ancho del contenedor + :param height: Altura del contenedor + :param position: Posición del contenedor ('top-left', 'top-right', 'bottom-left', 'bottom-right') + :param shadow: Índice del estilo de sombra a utilizar + :param transition: Índice del estilo de transición a utilizar + """ + position_css = { + "top-left": "top: 20px; left: 20px;", + "top-right": "top: 20px; right: 20px;", + "bottom-left": "bottom: 20px; left: 20px;", + "bottom-right": "bottom: 20px; right: 20px;", + } + + css = f""" + width: {width}; + height: {height}; + {position_css.get(position, position_css['bottom-right'])} + {shadow_list[shadow % len(shadow_list)]} + {transition_list[transition % len(transition_list)]} + """ + + return float_box(content, css=css) + +def float_box(content, css=""): + """ + Crea un contenedor flotante genérico. + + :param content: Contenido HTML o Markdown para el contenedor + :param css: Estilos CSS adicionales + """ + box_id = f"semantic-float-{str(uuid.uuid4())[:8]}" + st.markdown(f""" +
+
+ {content} +
+
+ """, unsafe_allow_html=True) + return box_id + +def toggle_float_visibility(box_id, visible): + """ + Cambia la visibilidad de un contenedor flotante. + + :param box_id: ID del contenedor flotante + :param visible: True para mostrar, False para ocultar + """ + display = "block" if visible else "none" + st.markdown(f""" + + """, unsafe_allow_html=True) + +def update_float_content(box_id, new_content): + """ + Actualiza el contenido de un contenedor flotante. + + :param box_id: ID del contenedor flotante + :param new_content: Nuevo contenido HTML o Markdown + """ + st.markdown(f""" + + """, unsafe_allow_html=True) + +# Puedes agregar más funciones específicas para la interfaz semántica según sea necesario +#################FIN BLOQUE DEL BACK UP################################################# + + + + + + + + + + + + + + + + + + + + + +''' +############ TEST ######################################### +def semantic_float_init(): + st.markdown(""" + + """, unsafe_allow_html=True) + +def float_graph(content, width="40%", height="60%", position="center-right", shadow=0, transition=0): + position_css = { + "top-left": "top: 20px; left: 20px;", + "top-right": "top: 20px; right: 20px;", + "bottom-left": "bottom: 20px; left: 20px;", + "bottom-right": "bottom: 20px; right: 20px;", + "center-right": "top: 50%; right: 20px; transform: translateY(-50%);" + } + + css = f""" + position: fixed; + width: {width}; + height: {height}; + {position_css.get(position, position_css['center-right'])} + {shadow_list[shadow % len(shadow_list)]} + {transition_list[transition % len(transition_list)]} + z-index: 9999; + display: block !important; + background-color: white; + border: 1px solid #ddd; + border-radius: 5px; + padding: 10px; + overflow: auto; + """ + + box_id = f"semantic-float-{str(uuid.uuid4())[:8]}" + html_content = f""" +
+ {content} +
+ + """ + + components.html(html_content, height=600, scrolling=True) + return box_id + +def toggle_float_visibility(box_id, visible): + display = "block" if visible else "none" + components.html(f""" + + """, height=0) + +def update_float_content(box_id, new_content): + components.html(f""" + + """, height=0) + + + + + + + + + + +############BackUp ######################################### + + + + + + + + + + + + + + + + + + + + + + + + +# Lista de estilos de sombra y transición (sin cambios) +shadow_list = [ + "box-shadow: rgba(0, 0, 0, 0.1) 0px 4px 12px;", + "box-shadow: rgba(0, 0, 0, 0.15) 0px 5px 15px 0px;", + "box-shadow: rgba(0, 0, 0, 0.05) 0px 6px 24px 0px, rgba(0, 0, 0, 0.08) 0px 0px 0px 1px;", + "box-shadow: rgba(0, 0, 0, 0.16) 0px 10px 36px 0px, rgba(0, 0, 0, 0.06) 0px 0px 0px 1px;", +] + +transition_list = [ + "transition: all 0.3s ease;", + "transition: all 0.5s cubic-bezier(0.25, 0.8, 0.25, 1);", + "transition: all 0.4s cubic-bezier(0.165, 0.84, 0.44, 1);", +] + +def semantic_float_init(): + st.markdown(""" + + """, unsafe_allow_html=True) + +def float_graph(content, width="40%", height="60%", position="bottom-right", shadow=0, transition=0): + position_css = { + "top-left": "top: 20px; left: 20px;", + "top-right": "top: 20px; right: 20px;", + "bottom-left": "bottom: 20px; left: 20px;", + "bottom-right": "bottom: 20px; right: 20px;", + "center-right": "top: 50%; right: 20px; transform: translateY(-50%);" + } + + css = f""" + width: {width}; + height: {height}; + {position_css.get(position, position_css['bottom-right'])} + {shadow_list[shadow % len(shadow_list)]} + {transition_list[transition % len(transition_list)]} + """ + + return float_box(content, css=css) + +def float_box(content, css=""): + box_id = f"semantic-float-{str(uuid.uuid4())[:8]}" + components.html(f""" +
+
+ {content} +
+
+ + """, height=0) + return box_id + +def toggle_float_visibility(box_id, visible): + display = "block" if visible else "none" + components.html(f""" + + """, height=0) + +def update_float_content(box_id, new_content): + components.html(f""" + + """, height=0) +''' \ No newline at end of file diff --git a/modules/semantic/semantic_float_old.py b/modules/semantic/semantic_float_old.py new file mode 100644 index 0000000000000000000000000000000000000000..192c7a46004ab8b35c2046cde482a001088475c7 --- /dev/null +++ b/modules/semantic/semantic_float_old.py @@ -0,0 +1,220 @@ +import streamlit as st +import uuid +import streamlit.components.v1 as components +import base64 + +''' + +# Lista de estilos de sombra y transición (sin cambios) +shadow_list = [ + "box-shadow: rgba(0, 0, 0, 0.1) 0px 4px 12px;", + "box-shadow: rgba(0, 0, 0, 0.15) 0px 5px 15px 0px;", + "box-shadow: rgba(0, 0, 0, 0.05) 0px 6px 24px 0px, rgba(0, 0, 0, 0.08) 0px 0px 0px 1px;", + "box-shadow: rgba(0, 0, 0, 0.16) 0px 10px 36px 0px, rgba(0, 0, 0, 0.06) 0px 0px 0px 1px;", +] + +transition_list = [ + "transition: all 0.3s ease;", + "transition: all 0.5s cubic-bezier(0.25, 0.8, 0.25, 1);", + "transition: all 0.4s cubic-bezier(0.165, 0.84, 0.44, 1);", +] + +def semantic_float_init(): + st.markdown(""" + + """, unsafe_allow_html=True) + +def float_graph(content, width="40%", height="60%", position="bottom-right", shadow=0, transition=0): + position_css = { + "top-left": "top: 20px; left: 20px;", + "top-right": "top: 20px; right: 20px;", + "bottom-left": "bottom: 20px; left: 20px;", + "bottom-right": "bottom: 20px; right: 20px;", + "center-right": "top: 50%; right: 20px; transform: translateY(-50%);" + } + + css = f""" + width: {width}; + height: {height}; + {position_css.get(position, position_css['bottom-right'])} + {shadow_list[shadow % len(shadow_list)]} + {transition_list[transition % len(transition_list)]} + """ + + return float_box(content, css=css) + +def float_box(content, css=""): + box_id = f"semantic-float-{str(uuid.uuid4())[:8]}" + components.html(f""" +
+
+ {content} +
+
+ + """, height=0) + return box_id + +def toggle_float_visibility(box_id, visible): + display = "block" if visible else "none" + components.html(f""" + + """, height=0) + +def update_float_content(box_id, new_content): + components.html(f""" + + """, height=0) +''' + + +# Lista de estilos de sombra (puedes ajustar según tus preferencias) +shadow_list = [ + "box-shadow: rgba(0, 0, 0, 0.1) 0px 4px 12px;", + "box-shadow: rgba(0, 0, 0, 0.15) 0px 5px 15px 0px;", + "box-shadow: rgba(0, 0, 0, 0.05) 0px 6px 24px 0px, rgba(0, 0, 0, 0.08) 0px 0px 0px 1px;", + "box-shadow: rgba(0, 0, 0, 0.16) 0px 10px 36px 0px, rgba(0, 0, 0, 0.06) 0px 0px 0px 1px;", +] + +# Lista de estilos de transición +transition_list = [ + "transition: all 0.3s ease;", + "transition: all 0.5s cubic-bezier(0.25, 0.8, 0.25, 1);", + "transition: all 0.4s cubic-bezier(0.165, 0.84, 0.44, 1);", +] + + +def encode_image_to_base64(image_path): + with open(image_path, "rb") as image_file: + encoded_string = base64.b64encode(image_file.read()).decode("utf-8") + return f"data:image/png;base64,{encoded_string}" + + +def semantic_float_init(): + """Inicializa los estilos necesarios para los elementos flotantes en la interfaz semántica.""" + st.markdown(""" + + """, unsafe_allow_html=True) + +def float_graph(content, width="40%", height="60%", position="bottom-right", shadow=0, transition=0): + """ + Crea un contenedor flotante para el gráfico de visualización semántica. + + :param content: Contenido HTML o Markdown para el gráfico + :param width: Ancho del contenedor + :param height: Altura del contenedor + :param position: Posición del contenedor ('top-left', 'top-right', 'bottom-left', 'bottom-right') + :param shadow: Índice del estilo de sombra a utilizar + :param transition: Índice del estilo de transición a utilizar + """ + position_css = { + "top-left": "top: 20px; left: 20px;", + "top-right": "top: 20px; right: 20px;", + "bottom-left": "bottom: 20px; left: 20px;", + "bottom-right": "bottom: 20px; right: 20px;", + } + + css = f""" + width: {width}; + height: {height}; + {position_css.get(position, position_css['bottom-right'])} + {shadow_list[shadow % len(shadow_list)]} + {transition_list[transition % len(transition_list)]} + """ + + return float_box(content, css=css) + +def float_box(content, css=""): + """ + Crea un contenedor flotante genérico. + + :param content: Contenido HTML o Markdown para el contenedor + :param css: Estilos CSS adicionales + """ + box_id = f"semantic-float-{str(uuid.uuid4())[:8]}" + st.markdown(f""" +
+
+ {content} +
+
+ """, unsafe_allow_html=True) + return box_id + +def toggle_float_visibility(box_id, visible): + """ + Cambia la visibilidad de un contenedor flotante. + + :param box_id: ID del contenedor flotante + :param visible: True para mostrar, False para ocultar + """ + display = "block" if visible else "none" + st.markdown(f""" + + """, unsafe_allow_html=True) + +def update_float_content(box_id, new_content): + """ + Actualiza el contenido de un contenedor flotante. + + :param box_id: ID del contenedor flotante + :param new_content: Nuevo contenido HTML o Markdown + """ + st.markdown(f""" + + """, unsafe_allow_html=True) + +# Puedes agregar más funciones específicas para la interfaz semántica según sea necesario diff --git a/modules/semantic/semantic_float_reset.py b/modules/semantic/semantic_float_reset.py new file mode 100644 index 0000000000000000000000000000000000000000..1d782eb27f4493283de556391ef49334ed6e7256 --- /dev/null +++ b/modules/semantic/semantic_float_reset.py @@ -0,0 +1,94 @@ +import streamlit as st +import uuid +import streamlit.components.v1 as components +import base64 + +# Lista de estilos de sombra +shadow_list = [ + "box-shadow: rgba(0, 0, 0, 0.1) 0px 4px 12px;", + "box-shadow: rgba(0, 0, 0, 0.15) 0px 5px 15px 0px;", + "box-shadow: rgba(0, 0, 0, 0.05) 0px 6px 24px 0px, rgba(0, 0, 0, 0.08) 0px 0px 0px 1px;", + "box-shadow: rgba(0, 0, 0, 0.16) 0px 10px 36px 0px, rgba(0, 0, 0, 0.06) 0px 0px 0px 1px;", +] + +# Lista de estilos de transición +transition_list = [ + "transition: all 0.3s ease;", + "transition: all 0.5s cubic-bezier(0.25, 0.8, 0.25, 1);", + "transition: all 0.4s cubic-bezier(0.165, 0.84, 0.44, 1);", + +] + +################################################################################### +def semantic_float_init(): + st.markdown(""" + + """, unsafe_allow_html=True) + + components.html(""" +
+ + """, height=0) + +def float_graph(content): + js = f""" + + """ + components.html(js, height=0) + +def toggle_float_visibility(visible): + js = f""" + + """ + components.html(js, height=0) + +def update_float_content(new_content): + js = f""" + + """ + components.html(js, height=0) \ No newline at end of file diff --git a/modules/semantic/semantic_float_reset_23-9-2024.py b/modules/semantic/semantic_float_reset_23-9-2024.py new file mode 100644 index 0000000000000000000000000000000000000000..5d8fb602fec66518348fcfa37e1a272284a7adf4 --- /dev/null +++ b/modules/semantic/semantic_float_reset_23-9-2024.py @@ -0,0 +1,128 @@ +import streamlit as st +import uuid +import streamlit.components.v1 as components +import base64 + +# Lista de estilos de sombra +shadow_list = [ + "box-shadow: rgba(0, 0, 0, 0.1) 0px 4px 12px;", + "box-shadow: rgba(0, 0, 0, 0.15) 0px 5px 15px 0px;", + "box-shadow: rgba(0, 0, 0, 0.05) 0px 6px 24px 0px, rgba(0, 0, 0, 0.08) 0px 0px 0px 1px;", + "box-shadow: rgba(0, 0, 0, 0.16) 0px 10px 36px 0px, rgba(0, 0, 0, 0.06) 0px 0px 0px 1px;", +] + +# Lista de estilos de transición +transition_list = [ + "transition: all 0.3s ease;", + "transition: all 0.5s cubic-bezier(0.25, 0.8, 0.25, 1);", + "transition: all 0.4s cubic-bezier(0.165, 0.84, 0.44, 1);", +] + +def semantic_float_init(): + components.html(""" + + """, height=0) + +def float_graph(content, width="40%", height="60%", position="center-right", shadow=0, transition=0): + position_css = { + "top-left": "top: 20px; left: 20px;", + "top-right": "top: 20px; right: 20px;", + "bottom-left": "bottom: 20px; left: 20px;", + "bottom-right": "bottom: 20px; right: 20px;", + "center-right": "top: 50%; right: 20px; transform: translateY(-50%);" + } + + css = f""" + position: fixed; + width: {width}; + height: {height}; + {position_css.get(position, position_css['center-right'])} + {shadow_list[shadow % len(shadow_list)]} + {transition_list[transition % len(transition_list)]} + z-index: 9999; + display: block !important; + background-color: white; + border: 1px solid #ddd; + border-radius: 5px; + padding: 10px; + overflow: auto; + """ + + box_id = f"semantic-float-{str(uuid.uuid4())[:8]}" + components.html(f""" +
+ {content} +
+ + """, height=0) + return box_id + +def float_box(content, css=""): + box_id = f"semantic-float-{str(uuid.uuid4())[:8]}" + components.html(f""" +
+ {content} +
+ + """, height=0) + return box_id + +def toggle_float_visibility(box_id, visible): + display = "block" if visible else "none" + components.html(f""" + + """, height=0) + +def update_float_content(box_id, new_content): + components.html(f""" + + """, height=0) \ No newline at end of file diff --git a/modules/semantic/semantic_interfaceBackUp_2092024_1800.py b/modules/semantic/semantic_interfaceBackUp_2092024_1800.py new file mode 100644 index 0000000000000000000000000000000000000000..f9ef8533a44841e7fdcc66abd8b4c7a25b9e2914 --- /dev/null +++ b/modules/semantic/semantic_interfaceBackUp_2092024_1800.py @@ -0,0 +1,146 @@ +import streamlit as st +import logging +from .semantic_process import process_semantic_analysis +from ..chatbot.chatbot import initialize_chatbot, process_semantic_chat_input +from ..database.database_oldFromV2 import store_file_semantic_contents, retrieve_file_contents, delete_file, get_user_files +from ..utils.widget_utils import generate_unique_key + +logger = logging.getLogger(__name__) + +def get_translation(t, key, default): + return t.get(key, default) + +def display_semantic_interface(lang_code, nlp_models, t): + # Inicializar el chatbot al principio de la función + if 'semantic_chatbot' not in st.session_state: + st.session_state.semantic_chatbot = initialize_chatbot('semantic') + + st.markdown(""" + + """, unsafe_allow_html=True) + + tab1, tab2, tab3, tab4, tab5 = st.tabs(["Upload", "Analyze", "Results", "Chat", "Export"]) + + with tab1: + tab21, tab22 = st.tabs(["File Management", "File Analysis"]) + + with tab21: + st.subheader("Upload and Manage Files") + uploaded_file = st.file_uploader("Choose a file to upload", type=['txt', 'pdf', 'docx', 'doc', 'odt'], key=generate_unique_key('semantic', 'file_uploader')) + if uploaded_file is not None: + file_contents = uploaded_file.getvalue().decode('utf-8') + if store_file_semantic_contents(st.session_state.username, uploaded_file.name, file_contents): + st.success(f"File {uploaded_file.name} uploaded and saved successfully") + else: + st.error("Error uploading file") + + st.subheader("Manage Uploaded Files") + user_files = get_user_files(st.session_state.username, 'semantic') + if user_files: + for file in user_files: + col1, col2 = st.columns([3, 1]) + with col1: + st.write(file['file_name']) + with col2: + if st.button("Delete", key=f"delete_{file['file_name']}"): + if delete_file(st.session_state.username, file['file_name'], 'semantic'): + st.success(f"File {file['file_name']} deleted successfully") + st.rerun() + else: + st.error(f"Error deleting file {file['file_name']}") + else: + st.write("No files uploaded yet.") + + with tab22: + st.subheader("Select File for Analysis") + user_files = get_user_files(st.session_state.username, 'semantic') + file_options = [get_translation(t, 'select_saved_file', 'Select a saved file')] + [file['file_name'] for file in user_files] + selected_file = st.selectbox("", options=file_options, key=generate_unique_key('semantic', 'file_selector')) + + if st.button("Analyze Document", key=generate_unique_key('semantic', 'analyze_document')): + if selected_file and selected_file != get_translation(t, 'select_saved_file', 'Select a saved file'): + file_contents = retrieve_file_contents(st.session_state.username, selected_file, 'semantic') + if file_contents: + st.session_state.file_contents = file_contents + with st.spinner("Analyzing..."): + try: + nlp_model = nlp_models[lang_code] + concept_graph, entity_graph, key_concepts = process_semantic_analysis(file_contents, nlp_model, lang_code) + st.session_state.concept_graph = concept_graph + st.session_state.entity_graph = entity_graph + st.session_state.key_concepts = key_concepts + st.success("Analysis completed successfully") + except Exception as e: + logger.error(f"Error during analysis: {str(e)}") + st.error(f"Error during analysis: {str(e)}") + else: + st.error("Error loading file contents") + else: + st.error("Please select a file to analyze") + + with tab2: + st.subheader("Analysis Results") + if 'key_concepts' in st.session_state: + st.write("Key Concepts:") + st.write(', '.join([f"{concept}: {freq:.2f}" for concept, freq in st.session_state.key_concepts])) + + col1, col2 = st.columns(2) + with col1: + if 'concept_graph' in st.session_state: + st.subheader("Concept Graph") + st.pyplot(st.session_state.concept_graph) + with col2: + if 'entity_graph' in st.session_state: + st.subheader("Entity Graph") + st.pyplot(st.session_state.entity_graph) + + with tab3: + st.subheader("Chat with AI") + chat_container = st.container() + + with chat_container: + chat_history = st.session_state.get('semantic_chat_history', []) + for message in chat_history: + with st.chat_message(message["role"]): + st.write(message["content"]) + + user_input = st.chat_input("Type your message here...", key=generate_unique_key('semantic', 'chat_input')) + + if user_input: + chat_history.append({"role": "user", "content": user_input}) + + if user_input.startswith('/analyze_current'): + response = process_semantic_chat_input(user_input, lang_code, nlp_models[lang_code], st.session_state.get('file_contents', '')) + else: + response = st.session_state.semantic_chatbot.generate_response(user_input, lang_code) + + chat_history.append({"role": "assistant", "content": response}) + st.session_state.semantic_chat_history = chat_history + + if st.button("Clear Chat", key=generate_unique_key('semantic', 'clear_chat')): + st.session_state.semantic_chat_history = [] + st.rerun() + + with tab4: + st.subheader("Export Results") + # Add export functionality here + + with tab5: + st.subheader("Help") + # Add help information here \ No newline at end of file diff --git a/modules/semantic/semantic_interfaceBorrados.py b/modules/semantic/semantic_interfaceBorrados.py new file mode 100644 index 0000000000000000000000000000000000000000..9b2167adff34762e28fbd9ee65c64dd371ef713c --- /dev/null +++ b/modules/semantic/semantic_interfaceBorrados.py @@ -0,0 +1,196 @@ +import streamlit as st +import logging +from .semantic_process import process_semantic_analysis +from ..chatbot.chatbot import initialize_chatbot, process_semantic_chat_input +from ..database.database_oldFromV2 import store_file_semantic_contents, retrieve_file_contents, delete_file, get_user_files +from ..utils.widget_utils import generate_unique_key + +logger = logging.getLogger(__name__) + +def get_translation(t, key, default): + return t.get(key, default) + +def display_semantic_interface(lang_code, nlp_models, t): + # Inicializar el chatbot y el historial del chat al principio de la función + if 'semantic_chatbot' not in st.session_state: + st.session_state.semantic_chatbot = initialize_chatbot('semantic') + + if 'semantic_chat_history' not in st.session_state: + st.session_state.semantic_chat_history = [] + + st.markdown(""" + + """, unsafe_allow_html=True) + + # Mostrar el mensaje inicial como un párrafo estilizado + st.markdown(f""" +
+ {t['semantic_initial_message']} +
+ """, unsafe_allow_html=True) + + tab1, tab2 = st.tabs(["Upload", "Analyze"]) + + with tab1: + st.subheader("File Management") + uploaded_file = st.file_uploader("Choose a file to upload", type=['txt', 'pdf', 'docx', 'doc', 'odt'], key=generate_unique_key('semantic', 'file_uploader')) + if uploaded_file is not None: + file_contents = uploaded_file.getvalue().decode('utf-8') + if store_file_semantic_contents(st.session_state.username, uploaded_file.name, file_contents): + st.success(f"File {uploaded_file.name} uploaded and saved successfully") + else: + st.error("Error uploading file") + + st.markdown("---") # Línea separadora + + st.subheader("Manage Uploaded Files") + user_files = get_user_files(st.session_state.username, 'semantic') + if user_files: + for file in user_files: + col1, col2 = st.columns([3, 1]) + with col1: + st.write(file['file_name']) + with col2: + if st.button("Delete", key=f"delete_{file['file_name']}", help=f"Delete {file['file_name']}"): + if delete_file(st.session_state.username, file['file_name'], 'semantic'): + st.success(f"File {file['file_name']} deleted successfully") + st.rerun() + else: + st.error(f"Error deleting file {file['file_name']}") + else: + st.info("No files uploaded yet.") + + with tab2: + st.subheader("Select File for Analysis") + user_files = get_user_files(st.session_state.username, 'semantic') + file_options = [get_translation(t, 'select_saved_file', 'Select a saved file')] + [file['file_name'] for file in user_files] + selected_file = st.selectbox("", options=file_options, key=generate_unique_key('semantic', 'file_selector')) + + if st.button("Analyze Document", key=generate_unique_key('semantic', 'analyze_document')): + if selected_file and selected_file != get_translation(t, 'select_saved_file', 'Select a saved file'): + file_contents = retrieve_file_contents(st.session_state.username, selected_file, 'semantic') + if file_contents: + st.session_state.file_contents = file_contents + with st.spinner("Analyzing..."): + try: + nlp_model = nlp_models[lang_code] + concept_graph, entity_graph, key_concepts = process_semantic_analysis(file_contents, nlp_model, lang_code) + st.session_state.concept_graph = concept_graph + st.session_state.entity_graph = entity_graph + st.session_state.key_concepts = key_concepts + st.success("Analysis completed successfully") + except Exception as e: + logger.error(f"Error during analysis: {str(e)}") + st.error(f"Error during analysis: {str(e)}") + else: + st.error("Error loading file contents") + else: + st.error("Please select a file to analyze") + + # Chat and Visualization + with st.container(): + col_chat, col_graph = st.columns([1, 1]) + + with col_chat: + st.subheader("Chat with AI") + + chat_container = st.container() + with chat_container: + for message in st.session_state.semantic_chat_history: + with st.chat_message(message["role"]): + st.markdown(message["content"]) + + user_input = st.text_input("Type your message here...", key=generate_unique_key('semantic', 'chat_input')) + col1, col2 = st.columns([3, 1]) + with col1: + send_button = st.button("Send", key=generate_unique_key('semantic', 'send_message')) + with col2: + clear_button = st.button("Clear Chat", key=generate_unique_key('semantic', 'clear_chat')) + + if send_button and user_input: + st.session_state.semantic_chat_history.append({"role": "user", "content": user_input}) + + if user_input.startswith('/analyze_current'): + response = process_semantic_chat_input(user_input, lang_code, nlp_models[lang_code], st.session_state.get('file_contents', '')) + else: + response = st.session_state.semantic_chatbot.generate_response(user_input, lang_code, context=st.session_state.get('file_contents', '')) + + st.session_state.semantic_chat_history.append({"role": "assistant", "content": response}) + st.rerun() + + if clear_button: + st.session_state.semantic_chat_history = [] + st.rerun() + + with col_graph: + st.subheader("Visualization") + if 'key_concepts' in st.session_state: + st.write("Key Concepts:") + st.write(', '.join([f"{concept}: {freq:.2f}" for concept, freq in st.session_state.key_concepts])) + + tab_concept, tab_entity = st.tabs(["Concept Graph", "Entity Graph"]) + + with tab_concept: + if 'concept_graph' in st.session_state: + st.pyplot(st.session_state.concept_graph) + else: + st.info("No concept graph available. Please analyze a document first.") + + with tab_entity: + if 'entity_graph' in st.session_state: + st.pyplot(st.session_state.entity_graph) + else: + st.info("No entity graph available. Please analyze a document first.") diff --git a/modules/semantic/semantic_interfaceKoKo.py b/modules/semantic/semantic_interfaceKoKo.py new file mode 100644 index 0000000000000000000000000000000000000000..3a704b30129e521564b9222face9ec5c818bafea --- /dev/null +++ b/modules/semantic/semantic_interfaceKoKo.py @@ -0,0 +1,239 @@ +import streamlit as st +from streamlit_float import * +import logging +import sys +import io +from io import BytesIO +from datetime import datetime +import re +import base64 +import matplotlib.pyplot as plt +import plotly.graph_objects as go +import pandas as pd +import numpy as np + +from .flexible_analysis_handler import FlexibleAnalysisHandler + +from .semantic_float_reset import semantic_float_init, float_graph, toggle_float_visibility, update_float_content + +from .semantic_process import process_semantic_analysis + +from ..chatbot.chatbot import initialize_chatbot, process_semantic_chat_input +from ..database.database_oldFromV2 import manage_file_contents, delete_file, get_user_files +from ..utils.widget_utils import generate_unique_key + + +semantic_float_init() +logging.basicConfig(level=logging.DEBUG) +logger = logging.getLogger(__name__) + +def get_translation(t, key, default): + return t.get(key, default) + + +## +def fig_to_base64(fig): + buf = io.BytesIO() + fig.savefig(buf, format='png') + buf.seek(0) + img_str = base64.b64encode(buf.getvalue()).decode() + return f'' +## + + +def display_semantic_interface(lang_code, nlp_models, t): + #st.set_page_config(layout="wide") + + if 'semantic_chatbot' not in st.session_state: + st.session_state.semantic_chatbot = initialize_chatbot('semantic') + + if 'semantic_chat_history' not in st.session_state: + st.session_state.semantic_chat_history = [] + + if 'show_graph' not in st.session_state: + st.session_state.show_graph = False + + if 'graph_id' not in st.session_state: + st.session_state.graph_id = None + + if 'semantic_chatbot' not in st.session_state: + st.session_state.semantic_chatbot = initialize_chatbot('semantic') + + if 'semantic_chat_history' not in st.session_state: + st.session_state.semantic_chat_history = [] + + if 'show_graph' not in st.session_state: + st.session_state.show_graph = False + + st.markdown(""" + + """, unsafe_allow_html=True) + + st.markdown(f""" +
+ {t['semantic_initial_message']} +
+ """, unsafe_allow_html=True) + + col1, col2 = st.columns([2, 1]) + + with col1: + st.subheader("Chat with AI") + + chat_container = st.container() + with chat_container: + st.markdown('
', unsafe_allow_html=True) + for message in reversed(st.session_state.semantic_chat_history): + with st.chat_message(message["role"]): + st.markdown(message["content"]) + st.markdown('
', unsafe_allow_html=True) + + st.markdown('
', unsafe_allow_html=True) + user_input = st.text_input("Type your message here...", key=generate_unique_key('semantic', 'chat_input')) + send_button = st.button("Send", key=generate_unique_key('semantic', 'send_message')) + clear_button = st.button("Clear Chat", key=generate_unique_key('semantic', 'clear_chat')) + st.markdown('
', unsafe_allow_html=True) + + if send_button and user_input: + st.session_state.semantic_chat_history.append({"role": "user", "content": user_input}) + + if user_input.startswith('/analyze_current'): + response = process_semantic_chat_input(user_input, lang_code, nlp_models[lang_code], st.session_state.get('file_contents', '')) + else: + response = st.session_state.semantic_chatbot.generate_response(user_input, lang_code, context=st.session_state.get('file_contents', '')) + + st.session_state.semantic_chat_history.append({"role": "assistant", "content": response}) + st.rerun() + + if clear_button: + st.session_state.semantic_chat_history = [] + st.rerun() + + with col2: + st.subheader("Document Analysis") + user_files = get_user_files(st.session_state.username, 'semantic') + file_options = [get_translation(t, 'select_saved_file', 'Select a saved file')] + [file['file_name'] for file in user_files] + selected_file = st.selectbox("Select a file to analyze", options=file_options, key=generate_unique_key('semantic', 'file_selector')) + + if st.button("Analyze Document", key=generate_unique_key('semantic', 'analyze_document')): + if selected_file and selected_file != get_translation(t, 'select_saved_file', 'Select a saved file'): + file_contents = manage_file_contents(st.session_state.username, selected_file, 'semantic') + if file_contents: + st.session_state.file_contents = file_contents + with st.spinner("Analyzing..."): + try: + nlp_model = nlp_models[lang_code] + logger.debug("Calling process_semantic_analysis") + analysis_result = process_semantic_analysis(file_contents, nlp_model, lang_code) + + # Crear una instancia de FlexibleAnalysisHandler con los resultados del análisis + handler = FlexibleAnalysisHandler(analysis_result) + + logger.debug(f"Type of analysis_result: {type(analysis_result)}") + logger.debug(f"Keys in analysis_result: {analysis_result.keys() if isinstance(analysis_result, dict) else 'Not a dict'}") + + st.session_state.concept_graph = handler.get_concept_graph() + st.session_state.entity_graph = handler.get_entity_graph() + st.session_state.key_concepts = handler.get_key_concepts() + st.session_state.show_graph = True + st.success("Analysis completed successfully") + except Exception as e: + logger.error(f"Error during analysis: {str(e)}") + st.error(f"Error during analysis: {str(e)}") + else: + st.error("Error loading file contents") + else: + st.error("Please select a file to analyze") + + st.subheader("File Management") + + uploaded_file = st.file_uploader("Choose a file to upload", type=['txt', 'pdf', 'docx', 'doc', 'odt'], key=generate_unique_key('semantic', 'file_uploader')) + if uploaded_file is not None: + file_contents = uploaded_file.getvalue().decode('utf-8') + if manage_file_contents(st.session_state.username, uploaded_file.name, file_contents): + st.success(f"File {uploaded_file.name} uploaded and saved successfully") + else: + st.error("Error uploading file") + + st.markdown("---") + + st.subheader("Manage Uploaded Files") + + user_files = get_user_files(st.session_state.username, 'semantic') + if user_files: + for file in user_files: + col1, col2 = st.columns([3, 1]) + with col1: + st.write(file['file_name']) + with col2: + if st.button("Delete", key=f"delete_{file['file_name']}", help=f"Delete {file['file_name']}"): + if delete_file(st.session_state.username, file['file_name'], 'semantic'): + st.success(f"File {file['file_name']} deleted successfully") + st.rerun() + else: + st.error(f"Error deleting file {file['file_name']}") + else: + st.info("No files uploaded yet.") + + ######################################################################################################################### + # Floating graph visualization + if st.session_state.show_graph: + if st.session_state.graph_id is None: + st.session_state.graph_id = float_graph( + content="
Loading graph...
", + width="40%", + height="60%", + position="bottom-right", + shadow=2, + transition=1 + ) + + graph_id = st.session_state.graph_id + + if 'key_concepts' in st.session_state: + key_concepts_html = "

Key Concepts:

" + ', '.join([f"{concept}: {freq:.2f}" for concept, freq in st.session_state.key_concepts]) + "

" + update_float_content(graph_id, key_concepts_html) + + tab_concept, tab_entity = st.tabs(["Concept Graph", "Entity Graph"]) + + with tab_concept: + if 'concept_graph' in st.session_state: + update_float_content(graph_id, st.session_state.concept_graph) + else: + update_float_content(graph_id, "No concept graph available.") + + with tab_entity: + if 'entity_graph' in st.session_state: + update_float_content(graph_id, st.session_state.entity_graph) + else: + update_float_content(graph_id, "No entity graph available.") + + if st.button("Close Graph", key="close_graph"): + toggle_float_visibility(graph_id, False) + st.session_state.show_graph = False + st.session_state.graph_id = None + st.rerun() \ No newline at end of file diff --git a/modules/semantic/semantic_interfaceSideBar.py b/modules/semantic/semantic_interfaceSideBar.py new file mode 100644 index 0000000000000000000000000000000000000000..79f0777328d68330ea531f7104abbf8a4ab0fdfb --- /dev/null +++ b/modules/semantic/semantic_interfaceSideBar.py @@ -0,0 +1,207 @@ +import streamlit as st +from streamlit_float import * +import logging +from .semantic_process import process_semantic_analysis +from ..chatbot.chatbot import initialize_chatbot, process_semantic_chat_input +from ..database.database_oldFromV2 import store_file_semantic_contents, retrieve_file_contents, delete_file, get_user_files +from ..utils.widget_utils import generate_unique_key + +logger = logging.getLogger(__name__) + +def get_translation(t, key, default): + return t.get(key, default) + +def display_semantic_interface(lang_code, nlp_models, t): + # Inicializar el chatbot y el historial del chat al principio de la función + if 'semantic_chatbot' not in st.session_state: + st.session_state.semantic_chatbot = initialize_chatbot('semantic') + + if 'semantic_chat_history' not in st.session_state: + st.session_state.semantic_chat_history = [] + + st.markdown(""" + + """, unsafe_allow_html=True) + + # Sidebar for chat + with st.sidebar: + st.subheader("Chat with AI") + + messages = st.container(height=400) + + # Display chat messages + for message in st.session_state.semantic_chat_history: + with messages.chat_message(message["role"]): + st.markdown(message["content"]) + + # Chat input + if prompt := st.chat_input("Type your message here...", key=generate_unique_key('semantic', 'chat_input')): + st.session_state.semantic_chat_history.append({"role": "user", "content": prompt}) + + with messages.chat_message("user"): + st.markdown(prompt) + + with messages.chat_message("assistant"): + message_placeholder = st.empty() + full_response = "" + + if prompt.startswith('/analyze_current'): + assistant_response = process_semantic_chat_input(prompt, lang_code, nlp_models[lang_code], st.session_state.get('file_contents', '')) + else: + assistant_response = st.session_state.semantic_chatbot.generate_response(prompt, lang_code, context=st.session_state.get('file_contents', '')) + + # Simulate stream of response with milliseconds delay + for chunk in assistant_response.split(): + full_response += chunk + " " + message_placeholder.markdown(full_response + "▌") + message_placeholder.markdown(full_response) + + st.session_state.semantic_chat_history.append({"role": "assistant", "content": full_response}) + + if st.button("Clear Chat", key=generate_unique_key('semantic', 'clear_chat')): + st.session_state.semantic_chat_history = [] + st.rerun() + + # Main content area + st.title("Semantic Analysis") + + # Mostrar el mensaje inicial como un párrafo estilizado + st.markdown(f""" +
+ {t['semantic_initial_message']} +
+ """, unsafe_allow_html=True) + + tab1, tab2 = st.tabs(["Upload", "Analyze"]) + + with tab1: + st.subheader("File Management") + uploaded_file = st.file_uploader("Choose a file to upload", type=['txt', 'pdf', 'docx', 'doc', 'odt'], key=generate_unique_key('semantic', 'file_uploader')) + if uploaded_file is not None: + file_contents = uploaded_file.getvalue().decode('utf-8') + if store_file_semantic_contents(st.session_state.username, uploaded_file.name, file_contents): + st.success(f"File {uploaded_file.name} uploaded and saved successfully") + else: + st.error("Error uploading file") + + st.markdown("---") # Línea separadora + + st.subheader("Manage Uploaded Files") + user_files = get_user_files(st.session_state.username, 'semantic') + if user_files: + for file in user_files: + col1, col2 = st.columns([3, 1]) + with col1: + st.write(file['file_name']) + with col2: + if st.button("Delete", key=f"delete_{file['file_name']}", help=f"Delete {file['file_name']}"): + if delete_file(st.session_state.username, file['file_name'], 'semantic'): + st.success(f"File {file['file_name']} deleted successfully") + st.rerun() + else: + st.error(f"Error deleting file {file['file_name']}") + else: + st.info("No files uploaded yet.") + + with tab2: + st.subheader("Select File for Analysis") + user_files = get_user_files(st.session_state.username, 'semantic') + file_options = [get_translation(t, 'select_saved_file', 'Select a saved file')] + [file['file_name'] for file in user_files] + selected_file = st.selectbox("", options=file_options, key=generate_unique_key('semantic', 'file_selector')) + + if st.button("Analyze Document", key=generate_unique_key('semantic', 'analyze_document')): + if selected_file and selected_file != get_translation(t, 'select_saved_file', 'Select a saved file'): + file_contents = retrieve_file_contents(st.session_state.username, selected_file, 'semantic') + if file_contents: + st.session_state.file_contents = file_contents + with st.spinner("Analyzing..."): + try: + nlp_model = nlp_models[lang_code] + concept_graph, entity_graph, key_concepts = process_semantic_analysis(file_contents, nlp_model, lang_code) + st.session_state.concept_graph = concept_graph + st.session_state.entity_graph = entity_graph + st.session_state.key_concepts = key_concepts + st.success("Analysis completed successfully") + except Exception as e: + logger.error(f"Error during analysis: {str(e)}") + st.error(f"Error during analysis: {str(e)}") + else: + st.error("Error loading file contents") + else: + st.error("Please select a file to analyze") + + # Visualization + st.subheader("Visualization") + if 'key_concepts' in st.session_state: + st.write("Key Concepts:") + st.write(', '.join([f"{concept}: {freq:.2f}" for concept, freq in st.session_state.key_concepts])) + + tab_concept, tab_entity = st.tabs(["Concept Graph", "Entity Graph"]) + + with tab_concept: + if 'concept_graph' in st.session_state: + st.pyplot(st.session_state.concept_graph) + else: + st.info("No concept graph available. Please analyze a document first.") + + with tab_entity: + if 'entity_graph' in st.session_state: + st.pyplot(st.session_state.entity_graph) + else: + st.info("No entity graph available. Please analyze a document first.") \ No newline at end of file diff --git a/modules/semantic/semantic_interface_1.py b/modules/semantic/semantic_interface_1.py new file mode 100644 index 0000000000000000000000000000000000000000..432858c935c551fef038a0d87eebc8602e139672 --- /dev/null +++ b/modules/semantic/semantic_interface_1.py @@ -0,0 +1,55 @@ +import streamlit as st +from .semantic_process import process_semantic_analysis +from ..chatbot.chatbot import initialize_chatbot +from ..database.database_oldFromV2 import store_semantic_result +from ..text_analysis.semantic_analysis import perform_semantic_analysis +from ..utils.widget_utils import generate_unique_key + +def display_semantic_interface(lang_code, nlp_models, t): + st.subheader(t['title']) + + # Inicializar el chatbot si no existe + if 'semantic_chatbot' not in st.session_state: + st.session_state.semantic_chatbot = initialize_chatbot('semantic') + + # Sección para cargar archivo + uploaded_file = st.file_uploader(t['file_uploader'], type=['txt', 'pdf', 'docx', 'doc', 'odt']) + if uploaded_file: + file_contents = uploaded_file.getvalue().decode('utf-8') + st.session_state.file_contents = file_contents + + # Mostrar el historial del chat + chat_history = st.session_state.get('semantic_chat_history', []) + for message in chat_history: + with st.chat_message(message["role"]): + st.write(message["content"]) + if "visualization" in message: + st.pyplot(message["visualization"]) + + # Input del usuario + user_input = st.chat_input(t['semantic_initial_message'], key=generate_unique_key('semantic', st.session_state.username)) + + if user_input: + # Procesar el input del usuario + response, visualization = process_semantic_analysis(user_input, lang_code, nlp_models[lang_code], st.session_state.get('file_contents'), t) + + # Actualizar el historial del chat + chat_history.append({"role": "user", "content": user_input}) + chat_history.append({"role": "assistant", "content": response, "visualization": visualization}) + st.session_state.semantic_chat_history = chat_history + + # Mostrar el resultado más reciente + with st.chat_message("assistant"): + st.write(response) + if visualization: + st.pyplot(visualization) + + # Guardar el resultado en la base de datos si es un análisis + if user_input.startswith('/analisis_semantico'): + result = perform_semantic_analysis(st.session_state.file_contents, nlp_models[lang_code], lang_code) + store_semantic_result(st.session_state.username, st.session_state.file_contents, result) + + # Botón para limpiar el historial del chat + if st.button(t['clear_chat'], key=generate_unique_key('semantic', 'clear_chat')): + st.session_state.semantic_chat_history = [] + st.rerun() \ No newline at end of file diff --git a/modules/semantic/semantic_interface_2.py b/modules/semantic/semantic_interface_2.py new file mode 100644 index 0000000000000000000000000000000000000000..351f0319ae784b409a289b112c79caac25a3fbc3 --- /dev/null +++ b/modules/semantic/semantic_interface_2.py @@ -0,0 +1,167 @@ +import streamlit as st +from .semantic_process import process_semantic_analysis +from ..chatbot.chatbot import initialize_chatbot +from ..database.database_oldFromV2 import store_file_semantic_contents, retrieve_file_contents, delete_file, get_user_files +from ..utils.widget_utils import generate_unique_key + +def get_translation(t, key, default): + return t.get(key, default) + +def display_semantic_interface(lang_code, nlp_models, t): + #st.set_page_config(layout="wide") + + # Estilo CSS personalizado + st.markdown(""" + + """, unsafe_allow_html=True) + + # Mostrar el mensaje inicial como un párrafo estilizado + st.markdown(f""" +
+ {get_translation(t, 'semantic_initial_message', 'Welcome to the semantic analysis interface.')} +
+ """, unsafe_allow_html=True) + + # Inicializar el chatbot si no existe + if 'semantic_chatbot' not in st.session_state: + st.session_state.semantic_chatbot = initialize_chatbot('semantic') + + # Contenedor para la gestión de archivos + with st.container(): + st.markdown('
', unsafe_allow_html=True) + col1, col2, col3, col4 = st.columns(4) + + with col1: + if st.button(get_translation(t, 'upload_file', 'Upload File'), key=generate_unique_key('semantic', 'upload_button')): + uploaded_file = st.file_uploader(get_translation(t, 'file_uploader', 'Choose a file'), type=['txt', 'pdf', 'docx', 'doc', 'odt'], key=generate_unique_key('semantic', 'file_uploader')) + if uploaded_file is not None: + file_contents = uploaded_file.getvalue().decode('utf-8') + if store_file_semantic_contents(st.session_state.username, uploaded_file.name, file_contents): + st.success(get_translation(t, 'file_uploaded_success', 'File uploaded and saved to database successfully')) + st.session_state.file_contents = file_contents + st.rerun() + else: + st.error(get_translation(t, 'file_upload_error', 'Error uploading file')) + + with col2: + user_files = get_user_files(st.session_state.username, 'semantic') + file_options = [get_translation(t, 'select_file', 'Select a file')] + [file['file_name'] for file in user_files] + selected_file = st.selectbox(get_translation(t, 'file_list', 'File List'), options=file_options, key=generate_unique_key('semantic', 'file_selector')) + if selected_file != get_translation(t, 'select_file', 'Select a file'): + if st.button(get_translation(t, 'load_file', 'Load File'), key=generate_unique_key('semantic', 'load_file')): + file_contents = retrieve_file_contents(st.session_state.username, selected_file, 'semantic') + if file_contents: + st.session_state.file_contents = file_contents + st.success(get_translation(t, 'file_loaded_success', 'File loaded successfully')) + else: + st.error(get_translation(t, 'file_load_error', 'Error loading file')) + + with col3: + if st.button(get_translation(t, 'analyze_document', 'Analyze Document'), key=generate_unique_key('semantic', 'analyze_document')): + if 'file_contents' in st.session_state: + with st.spinner(get_translation(t, 'analyzing', 'Analyzing...')): + graph, key_concepts = process_semantic_analysis(st.session_state.file_contents, nlp_models[lang_code], lang_code) + st.session_state.graph = graph + st.session_state.key_concepts = key_concepts + st.success(get_translation(t, 'analysis_completed', 'Analysis completed')) + else: + st.error(get_translation(t, 'no_file_uploaded', 'No file uploaded')) + + with col4: + if st.button(get_translation(t, 'delete_file', 'Delete File'), key=generate_unique_key('semantic', 'delete_file')): + if selected_file and selected_file != get_translation(t, 'select_file', 'Select a file'): + if delete_file(st.session_state.username, selected_file, 'semantic'): + st.success(get_translation(t, 'file_deleted_success', 'File deleted successfully')) + if 'file_contents' in st.session_state: + del st.session_state.file_contents + st.rerun() + else: + st.error(get_translation(t, 'file_delete_error', 'Error deleting file')) + else: + st.error(get_translation(t, 'no_file_selected', 'No file selected')) + + st.markdown('
', unsafe_allow_html=True) + + # Crear dos columnas: una para el chat y otra para la visualización + col_chat, col_graph = st.columns([1, 1]) + + with col_chat: + st.subheader(get_translation(t, 'chat_title', 'Semantic Analysis Chat')) + # Chat interface + chat_container = st.container() + + with chat_container: + # Mostrar el historial del chat + chat_history = st.session_state.get('semantic_chat_history', []) + for message in chat_history: + with st.chat_message(message["role"]): + st.write(message["content"]) + + # Input del usuario + user_input = st.chat_input(get_translation(t, 'semantic_chat_input', 'Type your message here...'), key=generate_unique_key('semantic', 'chat_input')) + + if user_input: + # Añadir el mensaje del usuario al historial + chat_history.append({"role": "user", "content": user_input}) + + # Generar respuesta del chatbot + chatbot = st.session_state.semantic_chatbot + response = chatbot.generate_response(user_input, lang_code, context=st.session_state.get('file_contents')) + + # Añadir la respuesta del chatbot al historial + chat_history.append({"role": "assistant", "content": response}) + + # Actualizar el historial en session_state + st.session_state.semantic_chat_history = chat_history + + # Forzar la actualización de la interfaz + st.rerun() + + with col_graph: + st.subheader(get_translation(t, 'graph_title', 'Semantic Graph')) + + # Mostrar conceptos clave en un expander horizontal + with st.expander(get_translation(t, 'key_concepts_title', 'Key Concepts'), expanded=True): + if 'key_concepts' in st.session_state: + st.markdown('
', unsafe_allow_html=True) + for concept, freq in st.session_state.key_concepts: + st.markdown(f'{concept}: {freq:.2f}', unsafe_allow_html=True) + st.markdown('
', unsafe_allow_html=True) + + if 'graph' in st.session_state: + st.pyplot(st.session_state.graph) + + # Botón para limpiar el historial del chat + if st.button(get_translation(t, 'clear_chat', 'Clear chat'), key=generate_unique_key('semantic', 'clear_chat')): + st.session_state.semantic_chat_history = [] + st.rerun() \ No newline at end of file diff --git a/modules/semantic/semantic_interface_2192024_1632.py b/modules/semantic/semantic_interface_2192024_1632.py new file mode 100644 index 0000000000000000000000000000000000000000..cd2aff2f6a40d46999fd4548dd5697dd09f16e80 --- /dev/null +++ b/modules/semantic/semantic_interface_2192024_1632.py @@ -0,0 +1,244 @@ +import streamlit as st +import logging +import time +from .semantic_process import process_semantic_analysis +from ..chatbot.chatbot import initialize_chatbot, process_semantic_chat_input +from ..database.database_oldFromV2 import store_file_semantic_contents, retrieve_file_contents, delete_file, get_user_files +from ..utils.widget_utils import generate_unique_key + +logger = logging.getLogger(__name__) + +def get_translation(t, key, default): + return t.get(key, default) + +def display_semantic_interface(lang_code, nlp_models, t): + # Inicializar el chatbot y el historial del chat al principio de la función + if 'semantic_chatbot' not in st.session_state: + st.session_state.semantic_chatbot = initialize_chatbot('semantic') + + if 'semantic_chat_history' not in st.session_state: + st.session_state.semantic_chat_history = [] + + st.markdown(""" + + """, unsafe_allow_html=True) + + # Mostrar el mensaje inicial como un párrafo estilizado + st.markdown(f""" +
+ {t['semantic_initial_message']} +
+ """, unsafe_allow_html=True) + + tab1, tab2 = st.tabs(["Upload", "Analyze"]) + + with tab1: + st.subheader("File Management") + uploaded_file = st.file_uploader("Choose a file to upload", type=['txt', 'pdf', 'docx', 'doc', 'odt'], key=generate_unique_key('semantic', 'file_uploader')) + if uploaded_file is not None: + file_contents = uploaded_file.getvalue().decode('utf-8') + if store_file_semantic_contents(st.session_state.username, uploaded_file.name, file_contents): + st.success(f"File {uploaded_file.name} uploaded and saved successfully") + else: + st.error("Error uploading file") + + st.markdown("---") # Línea separadora + + st.subheader("Manage Uploaded Files") + user_files = get_user_files(st.session_state.username, 'semantic') + if user_files: + for file in user_files: + col1, col2 = st.columns([3, 1]) + with col1: + st.write(file['file_name']) + with col2: + if st.button("Delete", key=f"delete_{file['file_name']}", help=f"Delete {file['file_name']}"): + if delete_file(st.session_state.username, file['file_name'], 'semantic'): + st.success(f"File {file['file_name']} deleted successfully") + st.rerun() + else: + st.error(f"Error deleting file {file['file_name']}") + else: + st.info("No files uploaded yet.") + + with tab2: + st.subheader("Select File for Analysis") + user_files = get_user_files(st.session_state.username, 'semantic') + file_options = [get_translation(t, 'select_saved_file', 'Select a saved file')] + [file['file_name'] for file in user_files] + selected_file = st.selectbox("", options=file_options, key=generate_unique_key('semantic', 'file_selector')) + + if st.button("Analyze Document", key=generate_unique_key('semantic', 'analyze_document')): + if selected_file and selected_file != get_translation(t, 'select_saved_file', 'Select a saved file'): + file_contents = retrieve_file_contents(st.session_state.username, selected_file, 'semantic') + if file_contents: + st.session_state.file_contents = file_contents + with st.spinner("Analyzing..."): + try: + nlp_model = nlp_models[lang_code] + concept_graph, entity_graph, key_concepts = process_semantic_analysis(file_contents, nlp_model, lang_code) + st.session_state.concept_graph = concept_graph + st.session_state.entity_graph = entity_graph + st.session_state.key_concepts = key_concepts + st.success("Analysis completed successfully") + except Exception as e: + logger.error(f"Error during analysis: {str(e)}") + st.error(f"Error during analysis: {str(e)}") + else: + st.error("Error loading file contents") + else: + st.error("Please select a file to analyze") + + # Chat and Visualization --1 + with st.container(): + col_chat, col_graph = st.columns([1, 1]) + + with col_chat: + st.subheader("Chat with AI") + + # Create a container for the chat messages + chat_container = st.container() + + # Display chat messages from history on app rerun + with chat_container: + for message in st.session_state.semantic_chat_history: + with st.chat_message(message["role"]): + st.markdown(message["content"]) + + + user_input = st.text_input("Type your message here...", key=generate_unique_key('semantic', 'chat_input')) + col1, col2 = st.columns([3, 1]) + with col1: + send_button = st.button("Send", key=generate_unique_key('semantic', 'send_message')) + + with col2: + clear_button = st.button("Clear Chat", key=generate_unique_key('semantic', 'clear_chat')) + + if send_button and user_input: + st.session_state.semantic_chat_history.append({"role": "user", "content": user_input}) + + if user_input.startswith('/analyze_current'): + response = process_semantic_chat_input(user_input, lang_code, nlp_models[lang_code], st.session_state.get('file_contents', '')) + else: + response = st.session_state.semantic_chatbot.generate_response(user_input, lang_code, context=st.session_state.get('file_contents', '')) + + st.session_state.semantic_chat_history.append({"role": "assistant", "content": response}) + st.rerun() + + if clear_button: + st.session_state.semantic_chat_history = [] + st.rerun() + +''' + # Accept user input + if prompt := st.chat_input("Type your message here...", key=generate_unique_key('semantic', 'chat_input')): + # Add user message to chat history + st.session_state.semantic_chat_history.append({"role": "user", "content": prompt}) + # Display user message in chat message container + with st.chat_message("user"): + st.markdown(prompt) + + # Generate and display assistant response + with st.chat_message("assistant"): + message_placeholder = st.empty() + full_response = "" + + if prompt.startswith('/analyze_current'): + assistant_response = process_semantic_chat_input(prompt, lang_code, nlp_models[lang_code], st.session_state.get('file_contents', '')) + else: + assistant_response = st.session_state.semantic_chatbot.generate_response(prompt, lang_code, context=st.session_state.get('file_contents', '')) + + # Simulate stream of response with milliseconds delay + for chunk in assistant_response.split(): + full_response += chunk + " " + time.sleep(0.05) + # Add a blinking cursor to simulate typing + message_placeholder.markdown(full_response + "▌") + message_placeholder.markdown(full_response) + + # Add assistant response to chat history + st.session_state.semantic_chat_history.append({"role": "assistant", "content": full_response}) + + # Add a clear chat button + if st.button("Clear Chat", key=generate_unique_key('semantic', 'clear_chat')): + st.session_state.semantic_chat_history = [{"role": "assistant", "content": "Chat cleared. How can I assist you?"}] + st.rerun() + +''' + +''' + with col_graph: + st.subheader("Visualization") + if 'key_concepts' in st.session_state: + st.write("Key Concepts:") + st.write(', '.join([f"{concept}: {freq:.2f}" for concept, freq in st.session_state.key_concepts])) + + tab_concept, tab_entity = st.tabs(["Concept Graph", "Entity Graph"]) + + with tab_concept: + if 'concept_graph' in st.session_state: + st.pyplot(st.session_state.concept_graph) + else: + st.info("No concept graph available. Please analyze a document first.") + + with tab_entity: + if 'entity_graph' in st.session_state: + st.pyplot(st.session_state.entity_graph) + else: + st.info("No entity graph available. Please analyze a document first.") +''' \ No newline at end of file diff --git a/modules/semantic/semantic_interface_3.py b/modules/semantic/semantic_interface_3.py new file mode 100644 index 0000000000000000000000000000000000000000..a0f4b7392ea4e041d6885d07aa76e8209c6d03a9 --- /dev/null +++ b/modules/semantic/semantic_interface_3.py @@ -0,0 +1,182 @@ +import streamlit as st +import logging +from .semantic_process import process_semantic_analysis +from ..chatbot.chatbot import initialize_chatbot, process_semantic_chat_input +from ..database.database_oldFromV2 import store_file_semantic_contents, retrieve_file_contents, delete_file, get_user_files +from ..utils.widget_utils import generate_unique_key + +logger = logging.getLogger(__name__) + +def get_translation(t, key, default): + return t.get(key, default) + +def display_semantic_interface(lang_code, nlp_models, t): + st.markdown(""" + + """, unsafe_allow_html=True) + + st.markdown(f""" +
+ {get_translation(t, 'semantic_initial_message', 'Welcome to the semantic analysis interface.')} +
+ """, unsafe_allow_html=True) + + if 'semantic_chatbot' not in st.session_state: + st.session_state.semantic_chatbot = initialize_chatbot('semantic') + + # Contenedor para la gestión de archivos + with st.container(): + st.markdown('
', unsafe_allow_html=True) + col1, col2, col3, col4 = st.columns(4) + + with col1: + uploaded_file = st.file_uploader(get_translation(t, 'upload_file', 'Upload File'), type=['txt', 'pdf', 'docx', 'doc', 'odt'], key=generate_unique_key('semantic', 'file_uploader')) + if uploaded_file is not None: + file_contents = uploaded_file.getvalue().decode('utf-8') + if store_file_semantic_contents(st.session_state.username, uploaded_file.name, file_contents): + st.session_state.file_contents = file_contents + st.success(get_translation(t, 'file_uploaded_success', 'File uploaded and saved successfully')) + st.rerun() + else: + st.error(get_translation(t, 'file_upload_error', 'Error uploading file')) + + with col2: + user_files = get_user_files(st.session_state.username, 'semantic') + file_options = [get_translation(t, 'select_saved_file', 'Select a saved file')] + [file['file_name'] for file in user_files] + selected_file = st.selectbox("", options=file_options, key=generate_unique_key('semantic', 'file_selector')) + if selected_file and selected_file != get_translation(t, 'select_saved_file', 'Select a saved file'): + file_contents = retrieve_file_contents(st.session_state.username, selected_file, 'semantic') + if file_contents: + st.session_state.file_contents = file_contents + st.success(get_translation(t, 'file_loaded_success', 'File loaded successfully')) + else: + st.error(get_translation(t, 'file_load_error', 'Error loading file')) + + with col3: + if st.button(get_translation(t, 'analyze_document', 'Analyze Document'), key=generate_unique_key('semantic', 'analyze_document')): + if 'file_contents' in st.session_state: + with st.spinner(get_translation(t, 'analyzing', 'Analyzing...')): + try: + nlp_model = nlp_models[lang_code] + concept_graph, entity_graph, key_concepts = process_semantic_analysis(st.session_state.file_contents, nlp_model, lang_code) + st.session_state.concept_graph = concept_graph + st.session_state.entity_graph = entity_graph + st.session_state.key_concepts = key_concepts + st.success(get_translation(t, 'analysis_completed', 'Analysis completed')) + except Exception as e: + logger.error(f"Error during analysis: {str(e)}") + st.error(f"Error during analysis: {str(e)}") + else: + st.error(get_translation(t, 'no_file_uploaded', 'No file uploaded')) + + with col4: + if st.button(get_translation(t, 'delete_file', 'Delete File'), key=generate_unique_key('semantic', 'delete_file')): + if selected_file and selected_file != get_translation(t, 'select_saved_file', 'Select a saved file'): + if delete_file(st.session_state.username, selected_file, 'semantic'): + st.success(get_translation(t, 'file_deleted_success', 'File deleted successfully')) + if 'file_contents' in st.session_state: + del st.session_state.file_contents + st.rerun() + else: + st.error(get_translation(t, 'file_delete_error', 'Error deleting file')) + else: + st.error(get_translation(t, 'no_file_selected', 'No file selected')) + + st.markdown('
', unsafe_allow_html=True) + + # Contenedor para la sección de análisis + st.markdown('
', unsafe_allow_html=True) + col_chat, col_graph = st.columns([1, 1]) + + with col_chat: + st.subheader(get_translation(t, 'chat_title', 'Semantic Analysis Chat')) + chat_container = st.container() + + with chat_container: + chat_history = st.session_state.get('semantic_chat_history', []) + for message in chat_history: + with st.chat_message(message["role"]): + st.write(message["content"]) + + user_input = st.chat_input(get_translation(t, 'semantic_chat_input', 'Type your message here...'), key=generate_unique_key('semantic', 'chat_input')) + + if user_input: + chat_history.append({"role": "user", "content": user_input}) + + if user_input.startswith('/analyze_current'): + response = process_semantic_chat_input(user_input, lang_code, nlp_models[lang_code], st.session_state.get('file_contents', '')) + else: + response = st.session_state.semantic_chatbot.generate_response(user_input, lang_code) + + chat_history.append({"role": "assistant", "content": response}) + st.session_state.semantic_chat_history = chat_history + + with col_graph: + st.subheader(get_translation(t, 'graph_title', 'Semantic Graphs')) + + # Mostrar conceptos clave y entidades horizontalmente + if 'key_concepts' in st.session_state: + st.write(get_translation(t, 'key_concepts_title', 'Key Concepts')) + st.markdown('
', unsafe_allow_html=True) + for concept, freq in st.session_state.key_concepts: + st.markdown(f'{concept}: {freq:.2f}', unsafe_allow_html=True) + st.markdown('
', unsafe_allow_html=True) + + if 'entities' in st.session_state: + st.write(get_translation(t, 'entities_title', 'Entities')) + st.markdown('
', unsafe_allow_html=True) + for entity, type in st.session_state.entities.items(): + st.markdown(f'{entity}: {type}', unsafe_allow_html=True) + st.markdown('
', unsafe_allow_html=True) + + # Usar pestañas para mostrar los gráficos + tab1, tab2 = st.tabs(["Concept Graph", "Entity Graph"]) + + with tab1: + if 'concept_graph' in st.session_state: + st.pyplot(st.session_state.concept_graph) + + with tab2: + if 'entity_graph' in st.session_state: + st.pyplot(st.session_state.entity_graph) + + st.markdown('
', unsafe_allow_html=True) + + if st.button(get_translation(t, 'clear_chat', 'Clear chat'), key=generate_unique_key('semantic', 'clear_chat')): + st.session_state.semantic_chat_history = [] + st.rerun() \ No newline at end of file diff --git a/modules/semantic/semantic_interface_4.py b/modules/semantic/semantic_interface_4.py new file mode 100644 index 0000000000000000000000000000000000000000..e984b533ef5c21debf78bf317456523ffa13928a --- /dev/null +++ b/modules/semantic/semantic_interface_4.py @@ -0,0 +1,188 @@ +import streamlit as st +import logging +from .semantic_process import process_semantic_analysis +from ..chatbot.chatbot import initialize_chatbot, process_semantic_chat_input +from ..database.database_oldFromV2 import store_file_semantic_contents, retrieve_file_contents, delete_file, get_user_files +from ..utils.widget_utils import generate_unique_key + +logger = logging.getLogger(__name__) + +def get_translation(t, key, default): + return t.get(key, default) + +def display_semantic_interface(lang_code, nlp_models, t): + # Inicializar el chatbot al principio de la función + if 'semantic_chatbot' not in st.session_state: + st.session_state.semantic_chatbot = initialize_chatbot('semantic') + + st.markdown(""" + + """, unsafe_allow_html=True) + + # Mostrar el mensaje inicial como un párrafo estilizado + st.markdown(f""" +
+ {t['semantic_initial_message']} +
+ """, unsafe_allow_html=True) + + tab1, tab2 = st.tabs(["Upload", "Analyze"]) + + with tab1: + st.subheader("File Management") + uploaded_file = st.file_uploader("Choose a file to upload", type=['txt', 'pdf', 'docx', 'doc', 'odt'], key=generate_unique_key('semantic', 'file_uploader')) + if uploaded_file is not None: + file_contents = uploaded_file.getvalue().decode('utf-8') + if store_file_semantic_contents(st.session_state.username, uploaded_file.name, file_contents): + st.success(f"File {uploaded_file.name} uploaded and saved successfully") + else: + st.error("Error uploading file") + + st.markdown("---") # Línea separadora + + st.subheader("Manage Uploaded Files") + user_files = get_user_files(st.session_state.username, 'semantic') + if user_files: + for file in user_files: + col1, col2 = st.columns([3, 1]) + with col1: + st.write(file['file_name']) + with col2: + if st.button("Delete", key=f"delete_{file['file_name']}", help=f"Delete {file['file_name']}"): + if delete_file(st.session_state.username, file['file_name'], 'semantic'): + st.success(f"File {file['file_name']} deleted successfully") + st.rerun() + else: + st.error(f"Error deleting file {file['file_name']}") + else: + st.info("No files uploaded yet.") + + with tab2: + st.subheader("Select File for Analysis") + user_files = get_user_files(st.session_state.username, 'semantic') + file_options = [get_translation(t, 'select_saved_file', 'Select a saved file')] + [file['file_name'] for file in user_files] + selected_file = st.selectbox("", options=file_options, key=generate_unique_key('semantic', 'file_selector')) + + if st.button("Analyze Document", key=generate_unique_key('semantic', 'analyze_document')): + if selected_file and selected_file != get_translation(t, 'select_saved_file', 'Select a saved file'): + file_contents = retrieve_file_contents(st.session_state.username, selected_file, 'semantic') + if file_contents: + st.session_state.file_contents = file_contents + with st.spinner("Analyzing..."): + try: + nlp_model = nlp_models[lang_code] + concept_graph, entity_graph, key_concepts = process_semantic_analysis(file_contents, nlp_model, lang_code) + st.session_state.concept_graph = concept_graph + st.session_state.entity_graph = entity_graph + st.session_state.key_concepts = key_concepts + st.success("Analysis completed successfully") + except Exception as e: + logger.error(f"Error during analysis: {str(e)}") + st.error(f"Error during analysis: {str(e)}") + else: + st.error("Error loading file contents") + else: + st.error("Please select a file to analyze") + + # Chat and Visualization + col_chat, col_graph = st.columns([1, 1]) + + with col_chat: + st.subheader("Chat with AI") + st.markdown('
', unsafe_allow_html=True) + chat_history = st.session_state.get('semantic_chat_history', []) + for message in chat_history: + with st.chat_message(message["role"]): + st.write(message["content"]) + st.markdown('
', unsafe_allow_html=True) + + user_input = st.chat_input("Type your message here...", key=generate_unique_key('semantic', 'chat_input')) + + if user_input: + chat_history.append({"role": "user", "content": user_input}) + + if user_input.startswith('/analyze_current'): + response = process_semantic_chat_input(user_input, lang_code, nlp_models[lang_code], st.session_state.get('file_contents', '')) + else: + response = st.session_state.semantic_chatbot.generate_response(user_input, lang_code) + + chat_history.append({"role": "assistant", "content": response}) + st.session_state.semantic_chat_history = chat_history + + if st.button("Clear Chat", key=generate_unique_key('semantic', 'clear_chat')): + st.session_state.semantic_chat_history = [] + st.rerun() + + with col_graph: + st.subheader("Visualization") + st.markdown('
', unsafe_allow_html=True) + if 'key_concepts' in st.session_state: + st.write("Key Concepts:") + st.write(', '.join([f"{concept}: {freq:.2f}" for concept, freq in st.session_state.key_concepts])) + + tab_concept, tab_entity = st.tabs(["Concept Graph", "Entity Graph"]) + + with tab_concept: + if 'concept_graph' in st.session_state: + st.pyplot(st.session_state.concept_graph) + else: + st.info("No concept graph available. Please analyze a document first.") + + with tab_entity: + if 'entity_graph' in st.session_state: + st.pyplot(st.session_state.entity_graph) + else: + st.info("No entity graph available. Please analyze a document first.") + st.markdown('
', unsafe_allow_html=True) \ No newline at end of file diff --git a/modules/semantic/semantic_interface_5.py b/modules/semantic/semantic_interface_5.py new file mode 100644 index 0000000000000000000000000000000000000000..b9d8a0e565a92c0c140a7af0f672cf489b50ddb9 --- /dev/null +++ b/modules/semantic/semantic_interface_5.py @@ -0,0 +1,195 @@ +import streamlit as st +import logging +from .semantic_process import process_semantic_analysis +from ..chatbot.chatbot import initialize_chatbot, process_semantic_chat_input +from ..database.database_oldFromV2 import store_file_semantic_contents, retrieve_file_contents, delete_file, get_user_files +from ..utils.widget_utils import generate_unique_key + +logger = logging.getLogger(__name__) + +def get_translation(t, key, default): + return t.get(key, default) + +def display_semantic_interface(lang_code, nlp_models, t): + # Inicializar el chatbot y el historial del chat al principio de la función + if 'semantic_chatbot' not in st.session_state: + st.session_state.semantic_chatbot = initialize_chatbot('semantic') + + if 'semantic_chat_history' not in st.session_state: + st.session_state.semantic_chat_history = [] + + st.markdown(""" + + """, unsafe_allow_html=True) + + # Estilo CSS personalizado + st.markdown(""" + + """, unsafe_allow_html=True) + + tab1, tab2 = st.tabs(["Upload", "Analyze"]) + + with tab1: + st.subheader("File Management") + uploaded_file = st.file_uploader("Choose a file to upload", type=['txt', 'pdf', 'docx', 'doc', 'odt'], key=generate_unique_key('semantic', 'file_uploader')) + if uploaded_file is not None: + file_contents = uploaded_file.getvalue().decode('utf-8') + if store_file_semantic_contents(st.session_state.username, uploaded_file.name, file_contents): + st.success(f"File {uploaded_file.name} uploaded and saved successfully") + else: + st.error("Error uploading file") + + st.markdown("---") # Línea separadora + + st.subheader("Manage Uploaded Files") + user_files = get_user_files(st.session_state.username, 'semantic') + if user_files: + for file in user_files: + col1, col2 = st.columns([3, 1]) + with col1: + st.write(file['file_name']) + with col2: + if st.button("Delete", key=f"delete_{file['file_name']}", help=f"Delete {file['file_name']}"): + if delete_file(st.session_state.username, file['file_name'], 'semantic'): + st.success(f"File {file['file_name']} deleted successfully") + st.rerun() + else: + st.error(f"Error deleting file {file['file_name']}") + else: + st.info("No files uploaded yet.") + + with tab2: + st.subheader("Select File for Analysis") + user_files = get_user_files(st.session_state.username, 'semantic') + file_options = [get_translation(t, 'select_saved_file', 'Select a saved file')] + [file['file_name'] for file in user_files] + selected_file = st.selectbox("", options=file_options, key=generate_unique_key('semantic', 'file_selector')) + + if st.button("Analyze Document", key=generate_unique_key('semantic', 'analyze_document')): + if selected_file and selected_file != get_translation(t, 'select_saved_file', 'Select a saved file'): + file_contents = retrieve_file_contents(st.session_state.username, selected_file, 'semantic') + if file_contents: + st.session_state.file_contents = file_contents + with st.spinner("Analyzing..."): + try: + nlp_model = nlp_models[lang_code] + concept_graph, entity_graph, key_concepts = process_semantic_analysis(file_contents, nlp_model, lang_code) + st.session_state.concept_graph = concept_graph + st.session_state.entity_graph = entity_graph + st.session_state.key_concepts = key_concepts + st.success("Analysis completed successfully") + except Exception as e: + logger.error(f"Error during analysis: {str(e)}") + st.error(f"Error during analysis: {str(e)}") + else: + st.error("Error loading file contents") + else: + st.error("Please select a file to analyze") + + # Chat and Visualization + with st.container(): + col_chat, col_graph = st.columns([1, 1]) + + with col_chat: + st.subheader("Chat with AI") + + chat_container = st.container() + with chat_container: + for message in st.session_state.semantic_chat_history: + with st.chat_message(message["role"]): + st.markdown(message["content"]) + + user_input = st.text_input("Type your message here...", key=generate_unique_key('semantic', 'chat_input')) + col1, col2 = st.columns([3, 1]) + with col1: + send_button = st.button("Send", key=generate_unique_key('semantic', 'send_message')) + with col2: + clear_button = st.button("Clear Chat", key=generate_unique_key('semantic', 'clear_chat')) + + if send_button and user_input: + st.session_state.semantic_chat_history.append({"role": "user", "content": user_input}) + + if user_input.startswith('/analyze_current'): + response = process_semantic_chat_input(user_input, lang_code, nlp_models[lang_code], st.session_state.get('file_contents', '')) + else: + response = st.session_state.semantic_chatbot.generate_response(user_input, lang_code, context=st.session_state.get('file_contents', '')) + + st.session_state.semantic_chat_history.append({"role": "assistant", "content": response}) + st.rerun() + + if clear_button: + st.session_state.semantic_chat_history = [] + st.rerun() + + with col_graph: + st.subheader("Visualization") + if 'key_concepts' in st.session_state: + st.write("Key Concepts:") + st.write(', '.join([f"{concept}: {freq:.2f}" for concept, freq in st.session_state.key_concepts])) + + tab_concept, tab_entity = st.tabs(["Concept Graph", "Entity Graph"]) + + with tab_concept: + if 'concept_graph' in st.session_state: + st.pyplot(st.session_state.concept_graph) + else: + st.info("No concept graph available. Please analyze a document first.") + + with tab_entity: + if 'entity_graph' in st.session_state: + st.pyplot(st.session_state.entity_graph) + else: + st.info("No entity graph available. Please analyze a document first.") \ No newline at end of file diff --git a/modules/semantic/semantic_interface_6.py b/modules/semantic/semantic_interface_6.py new file mode 100644 index 0000000000000000000000000000000000000000..38df59957615774686e19ed33325ff346f948c7b --- /dev/null +++ b/modules/semantic/semantic_interface_6.py @@ -0,0 +1,223 @@ +import streamlit as st +import logging +from .semantic_process import process_semantic_analysis +from ..chatbot.chatbot import initialize_chatbot, process_semantic_chat_input +from ..database.database_oldFromV2 import store_file_semantic_contents, retrieve_file_contents, delete_file, get_user_files +from ..utils.widget_utils import generate_unique_key +from .semantic_float_reset import semantic_float_init, float_graph, toggle_float_visibility, update_float_content + +logger = logging.getLogger(__name__) +semantic_float_init() + +def get_translation(t, key, default): + return t.get(key, default) + +def display_semantic_interface(lang_code, nlp_models, t): + # Inicializar el chatbot y el historial del chat al principio de la función + if 'semantic_chatbot' not in st.session_state: + st.session_state.semantic_chatbot = initialize_chatbot('semantic') + + if 'semantic_chat_history' not in st.session_state: + st.session_state.semantic_chat_history = [] + + st.markdown(""" + + """, unsafe_allow_html=True) + + # Mostrar el mensaje inicial como un párrafo estilizado + st.markdown(f""" +
+ {t['semantic_initial_message']} +
+ """, unsafe_allow_html=True) + + tab1, tab2 = st.tabs(["Upload", "Analyze"]) + + with tab1: + st.subheader("File Management") + uploaded_file = st.file_uploader("Choose a file to upload", type=['txt', 'pdf', 'docx', 'doc', 'odt'], key=generate_unique_key('semantic', 'file_uploader')) + if uploaded_file is not None: + file_contents = uploaded_file.getvalue().decode('utf-8') + if store_file_semantic_contents(st.session_state.username, uploaded_file.name, file_contents): + st.success(f"File {uploaded_file.name} uploaded and saved successfully") + else: + st.error("Error uploading file") + + st.markdown("---") # Línea separadora + + st.subheader("Manage Uploaded Files") + user_files = get_user_files(st.session_state.username, 'semantic') + if user_files: + for file in user_files: + col1, col2 = st.columns([3, 1]) + with col1: + st.write(file['file_name']) + with col2: + if st.button("Delete", key=f"delete_{file['file_name']}", help=f"Delete {file['file_name']}"): + if delete_file(st.session_state.username, file['file_name'], 'semantic'): + st.success(f"File {file['file_name']} deleted successfully") + st.rerun() + else: + st.error(f"Error deleting file {file['file_name']}") + else: + st.info("No files uploaded yet.") + + with tab2: + st.subheader("Select File for Analysis") + user_files = get_user_files(st.session_state.username, 'semantic') + file_options = [get_translation(t, 'select_saved_file', 'Select a saved file')] + [file['file_name'] for file in user_files] + selected_file = st.selectbox("", options=file_options, key=generate_unique_key('semantic', 'file_selector')) + + if st.button("Analyze Document"): + if selected_file and selected_file != get_translation(t, 'select_saved_file', 'Select a saved file'): + file_contents = retrieve_file_contents(st.session_state.username, selected_file, 'semantic') + if file_contents: + with st.spinner("Analyzing..."): + try: + nlp_model = nlp_models[lang_code] + concept_graph, entity_graph, key_concepts = process_semantic_analysis(file_contents, nlp_model, lang_code) + st.session_state.concept_graph = concept_graph + st.session_state.entity_graph = entity_graph + st.session_state.key_concepts = key_concepts + st.success("Analysis completed successfully") + + # Crear el grafo flotante + if 'graph_id' not in st.session_state: + st.session_state.graph_id = float_graph( + content="
Loading graph...
", + width="40%", + height="60%", + position="bottom-right", + shadow=2, + transition=1 + ) + + # Actualizar el contenido del grafo flotante + update_float_content(st.session_state.graph_id, f""" +

Key Concepts:

+

{', '.join([f"{concept}: {freq:.2f}" for concept, freq in key_concepts])}

+ Concept Graph + """) + + except Exception as e: + logger.error(f"Error during analysis: {str(e)}") + st.error(f"Error during analysis: {str(e)}") + st.session_state.concept_graph = None + st.session_state.entity_graph = None + st.session_state.key_concepts = [] + else: + st.error("Error loading file contents") + else: + st.error("Please select a file to analyze") + + # Chat and Visualization + with st.container(): + col_chat, col_graph = st.columns([1, 1]) + + with col_chat: + with st.expander("Chat with AI", expanded=True): + chat_container = st.container() + + with chat_container: + for message in st.session_state.semantic_chat_history: + with st.chat_message(message["role"]): + st.markdown(message["content"]) + + user_input = st.text_input("Type your message here...", key=generate_unique_key('semantic', 'chat_input')) + col1, col2 = st.columns([3, 1]) + with col1: + send_button = st.button("Send", key=generate_unique_key('semantic', 'send_message')) + with col2: + clear_button = st.button("Clear Chat", key=generate_unique_key('semantic', 'clear_chat')) + + if send_button and user_input: + st.session_state.semantic_chat_history.append({"role": "user", "content": user_input}) + + if user_input.startswith('/analyze_current'): + response = process_semantic_chat_input(user_input, lang_code, nlp_models[lang_code], st.session_state.get('file_contents', '')) + else: + response = st.session_state.semantic_chatbot.generate_response(user_input, lang_code, context=st.session_state.get('file_contents', '')) + + st.session_state.semantic_chat_history.append({"role": "assistant", "content": response}) + st.rerun() + + if clear_button: + st.session_state.semantic_chat_history = [] + st.rerun() + + with col_graph: + st.subheader("Visualization") + if 'key_concepts' in st.session_state: + st.write("Key Concepts:") + st.write(', '.join([f"{concept}: {freq:.2f}" for concept, freq in st.session_state.key_concepts])) + + tab_concept, tab_entity = st.tabs(["Concept Graph", "Entity Graph"]) + + with tab_concept: + if 'concept_graph' in st.session_state and st.session_state.concept_graph: + st.image(st.session_state.concept_graph) + else: + st.info("No concept graph available. Please analyze a document first.") + + with tab_entity: + if 'entity_graph' in st.session_state and st.session_state.entity_graph: + st.image(st.session_state.entity_graph) + else: + st.info("No entity graph available. Please analyze a document first.") + + # Botón para cerrar el grafo flotante + if st.button("Close Graph", key="close_graph"): + if 'graph_id' in st.session_state: + toggle_float_visibility(st.session_state.graph_id, False) + del st.session_state.graph_id \ No newline at end of file diff --git a/modules/semantic/semantic_interface_61.py b/modules/semantic/semantic_interface_61.py new file mode 100644 index 0000000000000000000000000000000000000000..a2ac1e16628009ab14da1eb7cf94c967a22805ea --- /dev/null +++ b/modules/semantic/semantic_interface_61.py @@ -0,0 +1,198 @@ +import streamlit as st +import logging +from .semantic_process import process_semantic_analysis +from ..chatbot.chatbot import initialize_chatbot, process_semantic_chat_input +from ..database.database_oldFromV2 import store_file_semantic_contents, retrieve_file_contents, delete_file, get_user_files +from ..utils.widget_utils import generate_unique_key +from .semantic_float_reset import semantic_float_init, float_graph, toggle_float_visibility, update_float_content + +logger = logging.getLogger(__name__) +semantic_float_init() + +def get_translation(t, key, default): + return t.get(key, default) + +def display_semantic_interface(lang_code, nlp_models, t): + # Inicialización del chatbot y el historial del chat + if 'semantic_chatbot' not in st.session_state: + st.session_state.semantic_chatbot = initialize_chatbot('semantic') + if 'semantic_chat_history' not in st.session_state: + st.session_state.semantic_chat_history = [] + + st.markdown(""" + + """, unsafe_allow_html=True) + + st.markdown(f"
{t['semantic_initial_message']}
", unsafe_allow_html=True) + + tab1, tab2 = st.tabs(["Upload", "Analyze"]) + + with tab1: + st.subheader("File Management") + uploaded_file = st.file_uploader("Choose a file to upload", type=['txt', 'pdf', 'docx', 'doc', 'odt'], key=generate_unique_key('semantic', 'file_uploader')) + if uploaded_file is not None: + file_contents = uploaded_file.getvalue().decode('utf-8') + if store_file_semantic_contents(st.session_state.username, uploaded_file.name, file_contents): + st.success(f"File {uploaded_file.name} uploaded and saved successfully") + else: + st.error("Error uploading file") + + st.markdown("---") + st.subheader("Manage Uploaded Files") + user_files = get_user_files(st.session_state.username, 'semantic') + if user_files: + for file in user_files: + col1, col2 = st.columns([3, 1]) + with col1: + st.write(file['file_name']) + with col2: + if st.button("Delete", key=f"delete_{file['file_name']}", help=f"Delete {file['file_name']}"): + if delete_file(st.session_state.username, file['file_name'], 'semantic'): + st.success(f"File {file['file_name']} deleted successfully") + st.rerun() + else: + st.error(f"Error deleting file {file['file_name']}") + else: + st.info("No files uploaded yet.") + + with tab2: + st.subheader("Semantic Analysis") + col_left, col_right = st.columns([1, 1]) + + with col_left: + st.subheader("File Selection and Analysis") + user_files = get_user_files(st.session_state.username, 'semantic') + file_options = [get_translation(t, 'select_saved_file', 'Select a saved file')] + [file['file_name'] for file in user_files] + selected_file = st.selectbox("", options=file_options, key=generate_unique_key('semantic', 'file_selector')) + + if st.button("Analyze Document"): + if selected_file and selected_file != get_translation(t, 'select_saved_file', 'Select a saved file'): + file_contents = retrieve_file_contents(st.session_state.username, selected_file, 'semantic') + if file_contents: + with st.spinner("Analyzing..."): + try: + nlp_model = nlp_models[lang_code] + concept_graph, entity_graph, key_concepts = process_semantic_analysis(file_contents, nlp_model, lang_code) + st.session_state.concept_graph = concept_graph + st.session_state.entity_graph = entity_graph + st.session_state.key_concepts = key_concepts + st.success("Analysis completed successfully") + + if 'graph_id' not in st.session_state: + st.session_state.graph_id = float_graph( + content="
Loading graph...
", + width="40%", + height="60%", + position="bottom-right", + shadow=2, + transition=1 + ) + + update_float_content(st.session_state.graph_id, f""" +

Key Concepts:

+

{', '.join([f"{concept}: {freq:.2f}" for concept, freq in key_concepts])}

+ Concept Graph + """) + except Exception as e: + logger.error(f"Error during analysis: {str(e)}") + st.error(f"Error during analysis: {str(e)}") + st.session_state.concept_graph = None + st.session_state.entity_graph = None + st.session_state.key_concepts = [] + else: + st.error("Error loading file contents") + else: + st.error("Please select a file to analyze") + + st.subheader("Chat with AI") + chat_container = st.container() + with chat_container: + for message in st.session_state.semantic_chat_history: + with st.chat_message(message["role"]): + st.markdown(message["content"]) + + user_input = st.text_input("Type your message here...", key=generate_unique_key('semantic', 'chat_input')) + col1, col2, col3 = st.columns([2, 1, 1]) + with col1: + send_button = st.button("Send", key=generate_unique_key('semantic', 'send_message')) + with col2: + clear_button = st.button("Clear Chat", key=generate_unique_key('semantic', 'clear_chat')) + with col3: + if 'graph_id' in st.session_state: + toggle_button = st.button("Toggle Graph", key="toggle_graph") + if toggle_button: + toggle_float_visibility(st.session_state.graph_id, not st.session_state.get('graph_visible', True)) + st.session_state.graph_visible = not st.session_state.get('graph_visible', True) + + if send_button and user_input: + st.session_state.semantic_chat_history.append({"role": "user", "content": user_input}) + if user_input.startswith('/analyze_current'): + response = process_semantic_chat_input(user_input, lang_code, nlp_models[lang_code], st.session_state.get('file_contents', '')) + else: + response = st.session_state.semantic_chatbot.generate_response(user_input, lang_code, context=st.session_state.get('file_contents', '')) + st.session_state.semantic_chat_history.append({"role": "assistant", "content": response}) + st.rerun() + + if clear_button: + st.session_state.semantic_chat_history = [] + st.rerun() + + with col_right: + st.subheader("Visualization") + if 'key_concepts' in st.session_state: + st.write("Key Concepts:") + st.write(', '.join([f"{concept}: {freq:.2f}" for concept, freq in st.session_state.key_concepts])) + + tab_concept, tab_entity = st.tabs(["Concept Graph", "Entity Graph"]) + with tab_concept: + if 'concept_graph' in st.session_state and st.session_state.concept_graph: + st.image(st.session_state.concept_graph) + else: + st.info("No concept graph available. Please analyze a document first.") + with tab_entity: + if 'entity_graph' in st.session_state and st.session_state.entity_graph: + st.image(st.session_state.entity_graph) + else: + st.info("No entity graph available. Please analyze a document first.") \ No newline at end of file diff --git a/modules/semantic/semantic_interface_610.py b/modules/semantic/semantic_interface_610.py new file mode 100644 index 0000000000000000000000000000000000000000..7584017bdca599b7345e9728e5cdd887be94c885 --- /dev/null +++ b/modules/semantic/semantic_interface_610.py @@ -0,0 +1,186 @@ +import streamlit as st +import logging +from .semantic_process import process_semantic_analysis +from ..chatbot.chatbot import initialize_chatbot, process_semantic_chat_input +from ..database.database_oldFromV2 import store_file_semantic_contents, retrieve_file_contents, delete_file, get_user_files +from ..utils.widget_utils import generate_unique_key +from .semantic_float_reset import * + +logger = logging.getLogger(__name__) +semantic_float_init() + +def get_translation(t, key, default): + return t.get(key, default) + +def display_semantic_interface(lang_code, nlp_models, t): + # Inicialización del chatbot y el historial del chat + if 'semantic_chatbot' not in st.session_state: + st.session_state.semantic_chatbot = initialize_chatbot('semantic') + if 'semantic_chat_history' not in st.session_state: + st.session_state.semantic_chat_history = [] + + # Inicializar el estado del grafo si no existe + if 'graph_visible' not in st.session_state: + st.session_state.graph_visible = False + if 'graph_content' not in st.session_state: + st.session_state.graph_content = "" + + st.markdown(""" + + """, unsafe_allow_html=True) + + st.markdown(f"
{t['semantic_initial_message']}
", unsafe_allow_html=True) + + tab1, tab2 = st.tabs(["Upload", "Analyze"]) + + with tab1: + st.subheader("File Management") + uploaded_file = st.file_uploader("Choose a file to upload", type=['txt', 'pdf', 'docx', 'doc', 'odt'], key=generate_unique_key('semantic', 'file_uploader')) + if uploaded_file is not None: + file_contents = uploaded_file.getvalue().decode('utf-8') + if store_file_semantic_contents(st.session_state.username, uploaded_file.name, file_contents): + st.success(f"File {uploaded_file.name} uploaded and saved successfully") + else: + st.error("Error uploading file") + + st.markdown("---") + st.subheader("Manage Uploaded Files") + user_files = get_user_files(st.session_state.username, 'semantic') + if user_files: + for file in user_files: + col1, col2 = st.columns([3, 1]) + with col1: + st.write(file['file_name']) + with col2: + if st.button("Delete", key=f"delete_{file['file_name']}", help=f"Delete {file['file_name']}"): + if delete_file(st.session_state.username, file['file_name'], 'semantic'): + st.success(f"File {file['file_name']} deleted successfully") + st.rerun() + else: + st.error(f"Error deleting file {file['file_name']}") + else: + st.info("No files uploaded yet.") + + with tab2: + st.subheader("Semantic Analysis") + + st.subheader("File Selection and Analysis") + user_files = get_user_files(st.session_state.username, 'semantic') + file_options = [get_translation(t, 'select_saved_file', 'Select a saved file')] + [file['file_name'] for file in user_files] + selected_file = st.selectbox("", options=file_options, key=generate_unique_key('semantic', 'file_selector')) + + if st.button("Analyze Document"): + if selected_file and selected_file != get_translation(t, 'select_saved_file', 'Select a saved file'): + file_contents = retrieve_file_contents(st.session_state.username, selected_file, 'semantic') + if file_contents: + with st.spinner("Analyzing..."): + try: + nlp_model = nlp_models[lang_code] + concept_graph, entity_graph, key_concepts = process_semantic_analysis(file_contents, nlp_model, lang_code) + st.session_state.concept_graph = concept_graph + st.session_state.entity_graph = entity_graph + st.session_state.key_concepts = key_concepts + st.session_state.current_file_contents = file_contents + st.success("Analysis completed successfully") + + # Crear o actualizar el elemento flotante con el grafo + graph_content = f""" +

Key Concepts:

+

{', '.join([f"{concept}: {freq:.2f}" for concept, freq in key_concepts])}

+ Concept Graph + """ + st.session_state.graph_id = float_graph(graph_content, width="30%", height="80%", position="center-right", shadow=2) + st.session_state.graph_visible = True + + # Depuración: Mostrar los primeros 100 caracteres del grafo + st.write(f"Debug: Concept graph base64 (first 100 chars): {concept_graph[:100]}") + st.write(f"Debug: Graph ID: {st.session_state.graph_id}") + + except Exception as e: + st.error(f"Error during analysis: {str(e)}") + st.session_state.concept_graph = None + st.session_state.entity_graph = None + st.session_state.key_concepts = [] + else: + st.error("Error loading file contents") + else: + st.error("Please select a file to analyze") + + st.subheader("Chat with AI") + + # Mostrar el historial del chat + for message in st.session_state.semantic_chat_history: + message_class = "user-message" if message["role"] == "user" else "assistant-message" + st.markdown(f'
{message["content"]}
', unsafe_allow_html=True) + + # Colocar la entrada de usuario y los botones en la parte inferior + st.markdown('
', unsafe_allow_html=True) + user_input = st.text_input("Type your message here...", key=generate_unique_key('semantic', 'chat_input')) + col1, col2, col3 = st.columns([3, 1, 1]) + with col1: + send_button = st.button("Send", key=generate_unique_key('semantic', 'send_message')) + with col2: + clear_button = st.button("Clear Chat", key=generate_unique_key('semantic', 'clear_chat')) + with col3: + if 'graph_id' in st.session_state: + toggle_button = st.button("Toggle Graph", key="toggle_graph") + if toggle_button: + st.session_state.graph_visible = not st.session_state.get('graph_visible', True) + toggle_float_visibility(st.session_state.graph_id, st.session_state.graph_visible) + st.markdown('
', unsafe_allow_html=True) + + if send_button and user_input: + st.session_state.semantic_chat_history.append({"role": "user", "content": user_input}) + if user_input.startswith('/analyze_current'): + response = process_semantic_chat_input(user_input, lang_code, nlp_models[lang_code], st.session_state.get('current_file_contents', '')) + else: + response = st.session_state.semantic_chatbot.generate_response(user_input, lang_code, context=st.session_state.get('current_file_contents', '')) + st.session_state.semantic_chat_history.append({"role": "assistant", "content": response}) + st.rerun() + + if clear_button: + st.session_state.semantic_chat_history = [] + st.rerun() + + # Asegurarse de que el grafo flotante permanezca visible después de las interacciones + if 'graph_id' in st.session_state and st.session_state.get('graph_visible', False): + toggle_float_visibility(st.session_state.graph_id, True) + + +# Al final del archivo, después de todo el código: +if 'graph_id' in st.session_state and st.session_state.get('graph_visible', False): + components.html(f""" + + """, height=0) + +# Añadir un botón para alternar la visibilidad del grafo +if st.button("Toggle Graph Visibility"): + st.session_state.graph_visible = not st.session_state.get('graph_visible', False) + if st.session_state.graph_visible: + st.write("Graph should be visible now") + else: + st.write("Graph should be hidden now") + st.experimental_rerun() \ No newline at end of file diff --git a/modules/semantic/semantic_interface_62.py b/modules/semantic/semantic_interface_62.py new file mode 100644 index 0000000000000000000000000000000000000000..2cf56020a9772617f5f09a69450887c7e50614a8 --- /dev/null +++ b/modules/semantic/semantic_interface_62.py @@ -0,0 +1,206 @@ +import streamlit as st +import logging +from .semantic_process import process_semantic_analysis +from ..chatbot.chatbot import initialize_chatbot, process_semantic_chat_input +from ..database.database_oldFromV2 import store_file_semantic_contents, retrieve_file_contents, delete_file, get_user_files +from ..utils.widget_utils import generate_unique_key +from .semantic_float_reset import semantic_float_init, float_graph, toggle_float_visibility, update_float_content + +logger = logging.getLogger(__name__) +semantic_float_init() + +def get_translation(t, key, default): + return t.get(key, default) + +def display_semantic_interface(lang_code, nlp_models, t): + # Inicialización del chatbot y el historial del chat + if 'semantic_chatbot' not in st.session_state: + st.session_state.semantic_chatbot = initialize_chatbot('semantic') + if 'semantic_chat_history' not in st.session_state: + st.session_state.semantic_chat_history = [] + + st.markdown(""" + + """, unsafe_allow_html=True) + + st.markdown(f"
{t['semantic_initial_message']}
", unsafe_allow_html=True) + + tab1, tab2 = st.tabs(["Upload", "Analyze"]) + + with tab1: + st.subheader("File Management") + uploaded_file = st.file_uploader("Choose a file to upload", type=['txt', 'pdf', 'docx', 'doc', 'odt'], key=generate_unique_key('semantic', 'file_uploader')) + if uploaded_file is not None: + file_contents = uploaded_file.getvalue().decode('utf-8') + if store_file_semantic_contents(st.session_state.username, uploaded_file.name, file_contents): + st.success(f"File {uploaded_file.name} uploaded and saved successfully") + else: + st.error("Error uploading file") + + st.markdown("---") + st.subheader("Manage Uploaded Files") + user_files = get_user_files(st.session_state.username, 'semantic') + if user_files: + for file in user_files: + col1, col2 = st.columns([3, 1]) + with col1: + st.write(file['file_name']) + with col2: + if st.button("Delete", key=f"delete_{file['file_name']}", help=f"Delete {file['file_name']}"): + if delete_file(st.session_state.username, file['file_name'], 'semantic'): + st.success(f"File {file['file_name']} deleted successfully") + st.rerun() + else: + st.error(f"Error deleting file {file['file_name']}") + else: + st.info("No files uploaded yet.") + + with tab2: + st.subheader("Semantic Analysis") + col_left, col_right = st.columns([3, 2]) + + with col_left: + st.subheader("File Selection and Analysis") + user_files = get_user_files(st.session_state.username, 'semantic') + file_options = [get_translation(t, 'select_saved_file', 'Select a saved file')] + [file['file_name'] for file in user_files] + selected_file = st.selectbox("", options=file_options, key=generate_unique_key('semantic', 'file_selector')) + + if st.button("Analyze Document"): + if selected_file and selected_file != get_translation(t, 'select_saved_file', 'Select a saved file'): + file_contents = retrieve_file_contents(st.session_state.username, selected_file, 'semantic') + if file_contents: + with st.spinner("Analyzing..."): + try: + nlp_model = nlp_models[lang_code] + concept_graph, entity_graph, key_concepts = process_semantic_analysis(file_contents, nlp_model, lang_code) + st.session_state.concept_graph = concept_graph + st.session_state.entity_graph = entity_graph + st.session_state.key_concepts = key_concepts + st.session_state.current_file_contents = file_contents + st.success("Analysis completed successfully") + + # Crear o actualizar el grafo flotante + if 'graph_id' not in st.session_state: + st.session_state.graph_id = float_graph( + content="
Loading graph...
", + width="40%", + height="60%", + position="bottom-right", + shadow=2, + transition=1 + ) + update_float_content(st.session_state.graph_id, f""" +

Key Concepts:

+

{', '.join([f"{concept}: {freq:.2f}" for concept, freq in key_concepts])}

+ Concept Graph + """) + st.session_state.graph_visible = True + except Exception as e: + logger.error(f"Error during analysis: {str(e)}") + st.error(f"Error during analysis: {str(e)}") + st.session_state.concept_graph = None + st.session_state.entity_graph = None + st.session_state.key_concepts = [] + else: + st.error("Error loading file contents") + else: + st.error("Please select a file to analyze") + + st.subheader("Chat with AI") + chat_container = st.container() + with chat_container: + st.markdown('
', unsafe_allow_html=True) + for message in st.session_state.semantic_chat_history: + with st.chat_message(message["role"]): + st.markdown(message["content"]) + st.markdown('
', unsafe_allow_html=True) + + user_input = st.text_input("Type your message here...", key=generate_unique_key('semantic', 'chat_input')) + col1, col2 = st.columns([3, 1]) + with col1: + send_button = st.button("Send", key=generate_unique_key('semantic', 'send_message')) + with col2: + clear_button = st.button("Clear Chat and Graph", key=generate_unique_key('semantic', 'clear_chat')) + + if send_button and user_input: + st.session_state.semantic_chat_history.append({"role": "user", "content": user_input}) + if user_input.startswith('/analyze_current'): + response = process_semantic_chat_input(user_input, lang_code, nlp_models[lang_code], st.session_state.get('current_file_contents', '')) + else: + response = st.session_state.semantic_chatbot.generate_response(user_input, lang_code, context=st.session_state.get('current_file_contents', '')) + st.session_state.semantic_chat_history.append({"role": "assistant", "content": response}) + st.rerun() + + if clear_button: + if st.session_state.semantic_chat_history: + if st.button("Do you want to export the analysis before clearing?"): + # Aquí puedes implementar la lógica para exportar el análisis + st.success("Analysis exported successfully") + st.session_state.semantic_chat_history = [] + if 'graph_id' in st.session_state: + toggle_float_visibility(st.session_state.graph_id, False) + del st.session_state.graph_id + st.session_state.concept_graph = None + st.session_state.entity_graph = None + st.session_state.key_concepts = [] + st.rerun() + + with col_right: + st.subheader("Visualization") + if 'key_concepts' in st.session_state and st.session_state.key_concepts: + st.write("Key Concepts:") + st.write(', '.join([f"{concept}: {freq:.2f}" for concept, freq in st.session_state.key_concepts])) + + tab_concept, tab_entity = st.tabs(["Concept Graph", "Entity Graph"]) + with tab_concept: + if 'concept_graph' in st.session_state and st.session_state.concept_graph: + st.image(st.session_state.concept_graph) + else: + st.info("No concept graph available. Please analyze a document first.") + with tab_entity: + if 'entity_graph' in st.session_state and st.session_state.entity_graph: + st.image(st.session_state.entity_graph) + else: + st.info("No entity graph available. Please analyze a document first.") \ No newline at end of file diff --git a/modules/semantic/semantic_interface_63.py b/modules/semantic/semantic_interface_63.py new file mode 100644 index 0000000000000000000000000000000000000000..c32cf8d098b8ffb30163db19deef434fb2653d50 --- /dev/null +++ b/modules/semantic/semantic_interface_63.py @@ -0,0 +1,215 @@ +import streamlit as st +import logging +from .semantic_process import process_semantic_analysis +from ..chatbot.chatbot import initialize_chatbot, process_semantic_chat_input +from ..database.database_oldFromV2 import store_file_semantic_contents, retrieve_file_contents, delete_file, get_user_files +from ..utils.widget_utils import generate_unique_key +from .semantic_float_reset import semantic_float_init, float_graph, toggle_float_visibility, update_float_content + +logger = logging.getLogger(__name__) +semantic_float_init() + +def get_translation(t, key, default): + return t.get(key, default) + +def display_semantic_interface(lang_code, nlp_models, t): + # Inicialización del chatbot y el historial del chat + if 'semantic_chatbot' not in st.session_state: + st.session_state.semantic_chatbot = initialize_chatbot('semantic') + if 'semantic_chat_history' not in st.session_state: + st.session_state.semantic_chat_history = [] + + st.markdown(""" + + """, unsafe_allow_html=True) + + st.markdown(f"
{t['semantic_initial_message']}
", unsafe_allow_html=True) + + # Barra de progreso + progress_bar = st.progress(0) + + tab1, tab2 = st.tabs(["Upload", "Analyze"]) + + with tab1: + st.subheader("File Management") + uploaded_file = st.file_uploader("Choose a file to upload", type=['txt', 'pdf', 'docx', 'doc', 'odt'], key=generate_unique_key('semantic', 'file_uploader')) + if uploaded_file is not None: + file_contents = uploaded_file.getvalue().decode('utf-8') + if store_file_semantic_contents(st.session_state.username, uploaded_file.name, file_contents): + st.success(f"File {uploaded_file.name} uploaded and saved successfully") + else: + st.error("Error uploading file") + + st.markdown("---") + st.subheader("Manage Uploaded Files") + user_files = get_user_files(st.session_state.username, 'semantic') + if user_files: + for file in user_files: + col1, col2 = st.columns([3, 1]) + with col1: + st.write(file['file_name']) + with col2: + if st.button("Delete", key=f"delete_{file['file_name']}", help=f"Delete {file['file_name']}"): + if delete_file(st.session_state.username, file['file_name'], 'semantic'): + st.success(f"File {file['file_name']} deleted successfully") + st.rerun() + else: + st.error(f"Error deleting file {file['file_name']}") + else: + st.info("No files uploaded yet.") + + with tab2: + st.subheader("Semantic Analysis") + col_left, col_right = st.columns([2, 3]) # Invertimos las proporciones + + with col_left: + st.subheader("File Selection and Chat") + user_files = get_user_files(st.session_state.username, 'semantic') + file_options = [get_translation(t, 'select_saved_file', 'Select a saved file')] + [file['file_name'] for file in user_files] + selected_file = st.selectbox("", options=file_options, key=generate_unique_key('semantic', 'file_selector')) + + if st.button("Analyze Document"): + if selected_file and selected_file != get_translation(t, 'select_saved_file', 'Select a saved file'): + file_contents = retrieve_file_contents(st.session_state.username, selected_file, 'semantic') + if file_contents: + progress_bar.progress(10) + with st.spinner("Analyzing..."): + try: + nlp_model = nlp_models[lang_code] + progress_bar.progress(30) + concept_graph, entity_graph, key_concepts = process_semantic_analysis(file_contents, nlp_model, lang_code) + progress_bar.progress(70) + st.session_state.concept_graph = concept_graph + st.session_state.entity_graph = entity_graph + st.session_state.key_concepts = key_concepts + st.session_state.current_file_contents = file_contents + progress_bar.progress(100) + st.success("Analysis completed successfully") + + # Crear o actualizar el grafo flotante + if 'graph_id' not in st.session_state: + st.session_state.graph_id = float_graph( + content="
Loading graph...
", + width="40%", + height="60%", + position="bottom-right", + shadow=2, + transition=1 + ) + update_float_content(st.session_state.graph_id, f""" +

Key Concepts:

+

{', '.join([f"{concept}: {freq:.2f}" for concept, freq in key_concepts])}

+ Concept Graph + """) + st.session_state.graph_visible = True + except Exception as e: + logger.error(f"Error during analysis: {str(e)}") + st.error(f"Error during analysis: {str(e)}") + st.session_state.concept_graph = None + st.session_state.entity_graph = None + st.session_state.key_concepts = [] + finally: + progress_bar.empty() + else: + st.error("Error loading file contents") + else: + st.error("Please select a file to analyze") + + st.subheader("Chat with AI") + chat_container = st.container() + with chat_container: + st.markdown('
', unsafe_allow_html=True) + for message in st.session_state.semantic_chat_history: + with st.chat_message(message["role"]): + st.markdown(message["content"]) + st.markdown('
', unsafe_allow_html=True) + + user_input = st.text_input("Type your message here...", key=generate_unique_key('semantic', 'chat_input')) + col1, col2 = st.columns([3, 1]) + with col1: + send_button = st.button("Send", key=generate_unique_key('semantic', 'send_message')) + with col2: + clear_button = st.button("Clear Chat and Graph", key=generate_unique_key('semantic', 'clear_chat')) + + if send_button and user_input: + st.session_state.semantic_chat_history.append({"role": "user", "content": user_input}) + if user_input.startswith('/analyze_current'): + response = process_semantic_chat_input(user_input, lang_code, nlp_models[lang_code], st.session_state.get('current_file_contents', '')) + else: + response = st.session_state.semantic_chatbot.generate_response(user_input, lang_code, context=st.session_state.get('current_file_contents', '')) + st.session_state.semantic_chat_history.append({"role": "assistant", "content": response}) + st.rerun() + + if clear_button: + if st.session_state.semantic_chat_history: + if st.button("Do you want to export the analysis before clearing?"): + # Aquí puedes implementar la lógica para exportar el análisis + st.success("Analysis exported successfully") + st.session_state.semantic_chat_history = [] + if 'graph_id' in st.session_state: + toggle_float_visibility(st.session_state.graph_id, False) + del st.session_state.graph_id + st.session_state.concept_graph = None + st.session_state.entity_graph = None + st.session_state.key_concepts = [] + st.rerun() + + with col_right: + st.subheader("Visualization") + if 'key_concepts' in st.session_state and st.session_state.key_concepts: + st.write("Key Concepts:") + st.write(', '.join([f"{concept}: {freq:.2f}" for concept, freq in st.session_state.key_concepts])) + + tab_concept, tab_entity = st.tabs(["Concept Graph", "Entity Graph"]) + with tab_concept: + if 'concept_graph' in st.session_state and st.session_state.concept_graph: + st.image(st.session_state.concept_graph) + else: + st.info("No concept graph available. Please analyze a document first.") + with tab_entity: + if 'entity_graph' in st.session_state and st.session_state.entity_graph: + st.image(st.session_state.entity_graph) + else: + st.info("No entity graph available. Please analyze a document first.") \ No newline at end of file diff --git a/modules/semantic/semantic_interface_64.py b/modules/semantic/semantic_interface_64.py new file mode 100644 index 0000000000000000000000000000000000000000..731678c700b81bdb8043dfa75ef875544ef44860 --- /dev/null +++ b/modules/semantic/semantic_interface_64.py @@ -0,0 +1,170 @@ +import streamlit as st +import logging +from .semantic_process import process_semantic_analysis +from ..chatbot.chatbot import initialize_chatbot, process_semantic_chat_input +from ..database.database_oldFromV2 import store_file_semantic_contents, retrieve_file_contents, delete_file, get_user_files +from ..utils.widget_utils import generate_unique_key +from .semantic_float_reset import semantic_float_init, float_graph, toggle_float_visibility, update_float_content + +logger = logging.getLogger(__name__) +semantic_float_init() + +def get_translation(t, key, default): + return t.get(key, default) + +def display_semantic_interface(lang_code, nlp_models, t): + # Inicialización del chatbot y el historial del chat + if 'semantic_chatbot' not in st.session_state: + st.session_state.semantic_chatbot = initialize_chatbot('semantic') + if 'semantic_chat_history' not in st.session_state: + st.session_state.semantic_chat_history = [] + + st.markdown(""" + + """, unsafe_allow_html=True) + + st.markdown(f"
{t['semantic_initial_message']}
", unsafe_allow_html=True) + + tab1, tab2 = st.tabs(["Upload", "Analyze"]) + + with tab1: + st.subheader("File Management") + uploaded_file = st.file_uploader("Choose a file to upload", type=['txt', 'pdf', 'docx', 'doc', 'odt'], key=generate_unique_key('semantic', 'file_uploader')) + if uploaded_file is not None: + file_contents = uploaded_file.getvalue().decode('utf-8') + if store_file_semantic_contents(st.session_state.username, uploaded_file.name, file_contents): + st.success(f"File {uploaded_file.name} uploaded and saved successfully") + else: + st.error("Error uploading file") + + st.markdown("---") + st.subheader("Manage Uploaded Files") + user_files = get_user_files(st.session_state.username, 'semantic') + if user_files: + for file in user_files: + col1, col2 = st.columns([3, 1]) + with col1: + st.write(file['file_name']) + with col2: + if st.button("Delete", key=f"delete_{file['file_name']}", help=f"Delete {file['file_name']}"): + if delete_file(st.session_state.username, file['file_name'], 'semantic'): + st.success(f"File {file['file_name']} deleted successfully") + st.rerun() + else: + st.error(f"Error deleting file {file['file_name']}") + else: + st.info("No files uploaded yet.") + + with tab2: + st.subheader("Semantic Analysis") + + st.subheader("File Selection and Analysis") + user_files = get_user_files(st.session_state.username, 'semantic') + file_options = [get_translation(t, 'select_saved_file', 'Select a saved file')] + [file['file_name'] for file in user_files] + selected_file = st.selectbox("", options=file_options, key=generate_unique_key('semantic', 'file_selector')) + + if st.button("Analyze Document"): + if selected_file and selected_file != get_translation(t, 'select_saved_file', 'Select a saved file'): + file_contents = retrieve_file_contents(st.session_state.username, selected_file, 'semantic') + if file_contents: + with st.spinner("Analyzing..."): + try: + nlp_model = nlp_models[lang_code] + concept_graph, entity_graph, key_concepts = process_semantic_analysis(file_contents, nlp_model, lang_code) + st.session_state.concept_graph = concept_graph + st.session_state.entity_graph = entity_graph + st.session_state.key_concepts = key_concepts + st.session_state.current_file_contents = file_contents + st.success("Analysis completed successfully") + + # Actualizar el grafo flotante + graph_content = f""" +

Key Concepts:

+

{', '.join([f"{concept}: {freq:.2f}" for concept, freq in key_concepts])}

+ Concept Graph + """ + if 'graph_id' not in st.session_state: + st.session_state.graph_id = float_graph(graph_content, width="40%", height="60%", position="top-right") + else: + update_float_content(st.session_state.graph_id, graph_content) + toggle_float_visibility(st.session_state.graph_id, True) + st.session_state.graph_visible = True + except Exception as e: + logger.error(f"Error during analysis: {str(e)}") + st.error(f"Error during analysis: {str(e)}") + st.session_state.concept_graph = None + st.session_state.entity_graph = None + st.session_state.key_concepts = [] + else: + st.error("Error loading file contents") + else: + st.error("Please select a file to analyze") + + st.subheader("Chat with AI") + chat_container = st.container() + with chat_container: + st.markdown('
', unsafe_allow_html=True) + for message in st.session_state.semantic_chat_history: + message_class = "user-message" if message["role"] == "user" else "assistant-message" + st.markdown(f'
{message["content"]}
', unsafe_allow_html=True) + st.markdown('
', unsafe_allow_html=True) + + user_input = st.text_input("Type your message here...", key=generate_unique_key('semantic', 'chat_input')) + col1, col2 = st.columns([3, 1]) + with col1: + send_button = st.button("Send", key=generate_unique_key('semantic', 'send_message')) + with col2: + clear_button = st.button("Clear Chat", key=generate_unique_key('semantic', 'clear_chat')) + + if send_button and user_input: + st.session_state.semantic_chat_history.append({"role": "user", "content": user_input}) + if user_input.startswith('/analyze_current'): + response = process_semantic_chat_input(user_input, lang_code, nlp_models[lang_code], st.session_state.get('current_file_contents', '')) + else: + response = st.session_state.semantic_chatbot.generate_response(user_input, lang_code, context=st.session_state.get('current_file_contents', '')) + st.session_state.semantic_chat_history.append({"role": "assistant", "content": response}) + st.rerun() + + if clear_button: + st.session_state.semantic_chat_history = [] + st.rerun() + + # Botón para alternar la visibilidad del grafo flotante + if 'graph_id' in st.session_state: + if st.button("Toggle Graph Visibility"): + toggle_float_visibility(st.session_state.graph_id, not st.session_state.get('graph_visible', True)) + st.session_state.graph_visible = not st.session_state.get('graph_visible', True) \ No newline at end of file diff --git a/modules/semantic/semantic_interface_65.py b/modules/semantic/semantic_interface_65.py new file mode 100644 index 0000000000000000000000000000000000000000..6ea2f629e954c34ed7407e1d06241dc5040f1879 --- /dev/null +++ b/modules/semantic/semantic_interface_65.py @@ -0,0 +1,176 @@ +import streamlit as st +import logging +from .semantic_process import process_semantic_analysis +from ..chatbot.chatbot import initialize_chatbot, process_semantic_chat_input +from ..database.database_oldFromV2 import store_file_semantic_contents, retrieve_file_contents, delete_file, get_user_files +from ..utils.widget_utils import generate_unique_key +from .semantic_float_reset import semantic_float_init, float_graph, toggle_float_visibility, update_float_content + +logger = logging.getLogger(__name__) +semantic_float_init() + +def get_translation(t, key, default): + return t.get(key, default) + +def display_semantic_interface(lang_code, nlp_models, t): + # Inicialización del chatbot y el historial del chat + if 'semantic_chatbot' not in st.session_state: + st.session_state.semantic_chatbot = initialize_chatbot('semantic') + if 'semantic_chat_history' not in st.session_state: + st.session_state.semantic_chat_history = [] + + st.markdown(""" + + """, unsafe_allow_html=True) + + st.markdown(f"
{t['semantic_initial_message']}
", unsafe_allow_html=True) + + tab1, tab2 = st.tabs(["Upload", "Analyze"]) + + with tab1: + st.subheader("File Management") + uploaded_file = st.file_uploader("Choose a file to upload", type=['txt', 'pdf', 'docx', 'doc', 'odt'], key=generate_unique_key('semantic', 'file_uploader')) + if uploaded_file is not None: + file_contents = uploaded_file.getvalue().decode('utf-8') + if store_file_semantic_contents(st.session_state.username, uploaded_file.name, file_contents): + st.success(f"File {uploaded_file.name} uploaded and saved successfully") + else: + st.error("Error uploading file") + + st.markdown("---") + st.subheader("Manage Uploaded Files") + user_files = get_user_files(st.session_state.username, 'semantic') + if user_files: + for file in user_files: + col1, col2 = st.columns([3, 1]) + with col1: + st.write(file['file_name']) + with col2: + if st.button("Delete", key=f"delete_{file['file_name']}", help=f"Delete {file['file_name']}"): + if delete_file(st.session_state.username, file['file_name'], 'semantic'): + st.success(f"File {file['file_name']} deleted successfully") + st.rerun() + else: + st.error(f"Error deleting file {file['file_name']}") + else: + st.info("No files uploaded yet.") + + with tab2: + st.subheader("Semantic Analysis") + + st.subheader("File Selection and Analysis") + user_files = get_user_files(st.session_state.username, 'semantic') + file_options = [get_translation(t, 'select_saved_file', 'Select a saved file')] + [file['file_name'] for file in user_files] + selected_file = st.selectbox("", options=file_options, key=generate_unique_key('semantic', 'file_selector')) + + if st.button("Analyze Document"): + if selected_file and selected_file != get_translation(t, 'select_saved_file', 'Select a saved file'): + file_contents = retrieve_file_contents(st.session_state.username, selected_file, 'semantic') + if file_contents: + with st.spinner("Analyzing..."): + try: + nlp_model = nlp_models[lang_code] + concept_graph, entity_graph, key_concepts = process_semantic_analysis(file_contents, nlp_model, lang_code) + st.session_state.concept_graph = concept_graph + st.session_state.entity_graph = entity_graph + st.session_state.key_concepts = key_concepts + st.session_state.current_file_contents = file_contents + st.success("Analysis completed successfully") + + # Actualizar el grafo flotante + graph_content = f""" +

Key Concepts:

+

{', '.join([f"{concept}: {freq:.2f}" for concept, freq in key_concepts])}

+ Concept Graph + """ + if 'graph_id' not in st.session_state: + st.session_state.graph_id = float_graph(graph_content, width="40%", height="auto", position="center-right") + else: + update_float_content(st.session_state.graph_id, graph_content) + toggle_float_visibility(st.session_state.graph_id, True) + st.session_state.graph_visible = True + except Exception as e: + logger.error(f"Error during analysis: {str(e)}") + st.error(f"Error during analysis: {str(e)}") + st.session_state.concept_graph = None + st.session_state.entity_graph = None + st.session_state.key_concepts = [] + else: + st.error("Error loading file contents") + else: + st.error("Please select a file to analyze") + + st.subheader("Chat with AI") + chat_container = st.container() + with chat_container: + st.markdown('
', unsafe_allow_html=True) + for message in st.session_state.semantic_chat_history: + message_class = "user-message" if message["role"] == "user" else "assistant-message" + st.markdown(f'
{message["content"]}
', unsafe_allow_html=True) + st.markdown('
', unsafe_allow_html=True) + + user_input = st.text_input("Type your message here...", key=generate_unique_key('semantic', 'chat_input')) + col1, col2, col3 = st.columns([3, 1, 1]) + with col1: + send_button = st.button("Send", key=generate_unique_key('semantic', 'send_message')) + with col2: + clear_button = st.button("Clear Chat", key=generate_unique_key('semantic', 'clear_chat')) + with col3: + if 'graph_id' in st.session_state: + toggle_button = st.button("Toggle Graph", key="toggle_graph") + if toggle_button: + st.session_state.graph_visible = not st.session_state.get('graph_visible', True) + toggle_float_visibility(st.session_state.graph_id, st.session_state.graph_visible) + + if send_button and user_input: + st.session_state.semantic_chat_history.append({"role": "user", "content": user_input}) + if user_input.startswith('/analyze_current'): + response = process_semantic_chat_input(user_input, lang_code, nlp_models[lang_code], st.session_state.get('current_file_contents', '')) + else: + response = st.session_state.semantic_chatbot.generate_response(user_input, lang_code, context=st.session_state.get('current_file_contents', '')) + st.session_state.semantic_chat_history.append({"role": "assistant", "content": response}) + st.rerun() + + if clear_button: + st.session_state.semantic_chat_history = [] + st.rerun() + + # Asegurarse de que el grafo flotante permanezca visible después de las interacciones + if 'graph_id' in st.session_state and st.session_state.get('graph_visible', False): + toggle_float_visibility(st.session_state.graph_id, True) \ No newline at end of file diff --git a/modules/semantic/semantic_interface_66.py b/modules/semantic/semantic_interface_66.py new file mode 100644 index 0000000000000000000000000000000000000000..cfa57fb062f09215e606e80cdbe9dfdacfcda759 --- /dev/null +++ b/modules/semantic/semantic_interface_66.py @@ -0,0 +1,186 @@ +import streamlit as st +import logging +from .semantic_process import process_semantic_analysis +from ..chatbot.chatbot import initialize_chatbot, process_semantic_chat_input +from ..database.database_oldFromV2 import store_file_semantic_contents, retrieve_file_contents, delete_file, get_user_files +from ..utils.widget_utils import generate_unique_key +from .semantic_float_reset import semantic_float_init, float_graph, toggle_float_visibility, update_float_content + +logger = logging.getLogger(__name__) +semantic_float_init() + +def get_translation(t, key, default): + return t.get(key, default) + +def display_semantic_interface(lang_code, nlp_models, t): + # Inicialización del chatbot y el historial del chat + if 'semantic_chatbot' not in st.session_state: + st.session_state.semantic_chatbot = initialize_chatbot('semantic') + if 'semantic_chat_history' not in st.session_state: + st.session_state.semantic_chat_history = [] + + # Inicializar el estado del grafo si no existe + if 'graph_visible' not in st.session_state: + st.session_state.graph_visible = False + if 'graph_content' not in st.session_state: + st.session_state.graph_content = "" + + st.markdown(""" + + """, unsafe_allow_html=True) + + st.markdown(f"
{t['semantic_initial_message']}
", unsafe_allow_html=True) + + tab1, tab2 = st.tabs(["Upload", "Analyze"]) + + with tab1: + st.subheader("File Management") + uploaded_file = st.file_uploader("Choose a file to upload", type=['txt', 'pdf', 'docx', 'doc', 'odt'], key=generate_unique_key('semantic', 'file_uploader')) + if uploaded_file is not None: + file_contents = uploaded_file.getvalue().decode('utf-8') + if store_file_semantic_contents(st.session_state.username, uploaded_file.name, file_contents): + st.success(f"File {uploaded_file.name} uploaded and saved successfully") + else: + st.error("Error uploading file") + + st.markdown("---") + st.subheader("Manage Uploaded Files") + user_files = get_user_files(st.session_state.username, 'semantic') + if user_files: + for file in user_files: + col1, col2 = st.columns([3, 1]) + with col1: + st.write(file['file_name']) + with col2: + if st.button("Delete", key=f"delete_{file['file_name']}", help=f"Delete {file['file_name']}"): + if delete_file(st.session_state.username, file['file_name'], 'semantic'): + st.success(f"File {file['file_name']} deleted successfully") + st.rerun() + else: + st.error(f"Error deleting file {file['file_name']}") + else: + st.info("No files uploaded yet.") + + with tab2: + st.subheader("Semantic Analysis") + + st.subheader("File Selection and Analysis") + user_files = get_user_files(st.session_state.username, 'semantic') + file_options = [get_translation(t, 'select_saved_file', 'Select a saved file')] + [file['file_name'] for file in user_files] + selected_file = st.selectbox("", options=file_options, key=generate_unique_key('semantic', 'file_selector')) + + if st.button("Analyze Document"): + if selected_file and selected_file != get_translation(t, 'select_saved_file', 'Select a saved file'): + file_contents = retrieve_file_contents(st.session_state.username, selected_file, 'semantic') + if file_contents: + with st.spinner("Analyzing..."): + try: + nlp_model = nlp_models[lang_code] + concept_graph, entity_graph, key_concepts = process_semantic_analysis(file_contents, nlp_model, lang_code) + st.session_state.concept_graph = concept_graph + st.session_state.entity_graph = entity_graph + st.session_state.key_concepts = key_concepts + st.session_state.current_file_contents = file_contents + st.success("Analysis completed successfully") + + # Actualizar el contenido del grafo + st.session_state.graph_content = f""" +

Key Concepts:

+

{', '.join([f"{concept}: {freq:.2f}" for concept, freq in key_concepts])}

+ Concept Graph + """ + if 'graph_id' not in st.session_state: + st.session_state.graph_id = float_graph(st.session_state.graph_content, width="40%", height="auto", position="center-right") + else: + update_float_content(st.session_state.graph_id, st.session_state.graph_content) + toggle_float_visibility(st.session_state.graph_id, True) + st.session_state.graph_visible = True + except Exception as e: + logger.error(f"Error during analysis: {str(e)}") + st.error(f"Error during analysis: {str(e)}") + st.session_state.concept_graph = None + st.session_state.entity_graph = None + st.session_state.key_concepts = [] + else: + st.error("Error loading file contents") + else: + st.error("Please select a file to analyze") + + st.subheader("Chat with AI") + chat_container = st.container() + with chat_container: + st.markdown('
', unsafe_allow_html=True) + for message in st.session_state.semantic_chat_history: + message_class = "user-message" if message["role"] == "user" else "assistant-message" + st.markdown(f'
{message["content"]}
', unsafe_allow_html=True) + st.markdown('
', unsafe_allow_html=True) + + user_input = st.text_input("Type your message here...", key=generate_unique_key('semantic', 'chat_input')) + col1, col2, col3 = st.columns([3, 1, 1]) + with col1: + send_button = st.button("Send", key=generate_unique_key('semantic', 'send_message')) + with col2: + clear_button = st.button("Clear Chat", key=generate_unique_key('semantic', 'clear_chat')) + with col3: + if 'graph_id' in st.session_state: + toggle_button = st.button("Toggle Graph", key="toggle_graph") + if toggle_button: + st.session_state.graph_visible = not st.session_state.get('graph_visible', True) + toggle_float_visibility(st.session_state.graph_id, st.session_state.graph_visible) + + if send_button and user_input: + st.session_state.semantic_chat_history.append({"role": "user", "content": user_input}) + if user_input.startswith('/analyze_current'): + response = process_semantic_chat_input(user_input, lang_code, nlp_models[lang_code], st.session_state.get('current_file_contents', '')) + else: + response = st.session_state.semantic_chatbot.generate_response(user_input, lang_code, context=st.session_state.get('current_file_contents', '')) + st.session_state.semantic_chat_history.append({"role": "assistant", "content": response}) + st.rerun() + + if clear_button: + st.session_state.semantic_chat_history = [] + st.rerun() + + # Asegurarse de que el grafo flotante permanezca visible después de las interacciones + if 'graph_id' in st.session_state and st.session_state.get('graph_visible', False): + toggle_float_visibility(st.session_state.graph_id, True) + + # Mostrar el grafo flotante si está visible + if st.session_state.get('graph_visible', False) and 'graph_content' in st.session_state: + st.markdown( + f""" +
+ {st.session_state.graph_content} +
+ """, + unsafe_allow_html=True + ) \ No newline at end of file diff --git a/modules/semantic/semantic_interface_67.py b/modules/semantic/semantic_interface_67.py new file mode 100644 index 0000000000000000000000000000000000000000..952286e515d0b2aaded7d0e4ae21e5d4f6de8115 --- /dev/null +++ b/modules/semantic/semantic_interface_67.py @@ -0,0 +1,189 @@ +import streamlit as st +import logging +from .semantic_process import process_semantic_analysis +from ..chatbot.chatbot import initialize_chatbot, process_semantic_chat_input +from ..database.database_oldFromV2 import store_file_semantic_contents, retrieve_file_contents, delete_file, get_user_files +from ..utils.widget_utils import generate_unique_key +from .semantic_float_reset import semantic_float_init, float_graph, toggle_float_visibility, update_float_content + +logger = logging.getLogger(__name__) +semantic_float_init() + +def get_translation(t, key, default): + return t.get(key, default) + +def display_semantic_interface(lang_code, nlp_models, t): + # Inicialización del chatbot y el historial del chat + if 'semantic_chatbot' not in st.session_state: + st.session_state.semantic_chatbot = initialize_chatbot('semantic') + if 'semantic_chat_history' not in st.session_state: + st.session_state.semantic_chat_history = [] + + # Inicializar el estado del grafo si no existe + if 'graph_visible' not in st.session_state: + st.session_state.graph_visible = False + if 'graph_content' not in st.session_state: + st.session_state.graph_content = "" + + st.markdown(""" + + """, unsafe_allow_html=True) + + st.markdown(f"
{t['semantic_initial_message']}
", unsafe_allow_html=True) + + tab1, tab2 = st.tabs(["Upload", "Analyze"]) + + with tab1: + st.subheader("File Management") + uploaded_file = st.file_uploader("Choose a file to upload", type=['txt', 'pdf', 'docx', 'doc', 'odt'], key=generate_unique_key('semantic', 'file_uploader')) + if uploaded_file is not None: + file_contents = uploaded_file.getvalue().decode('utf-8') + if store_file_semantic_contents(st.session_state.username, uploaded_file.name, file_contents): + st.success(f"File {uploaded_file.name} uploaded and saved successfully") + else: + st.error("Error uploading file") + + st.markdown("---") + st.subheader("Manage Uploaded Files") + user_files = get_user_files(st.session_state.username, 'semantic') + if user_files: + for file in user_files: + col1, col2 = st.columns([3, 1]) + with col1: + st.write(file['file_name']) + with col2: + if st.button("Delete", key=f"delete_{file['file_name']}", help=f"Delete {file['file_name']}"): + if delete_file(st.session_state.username, file['file_name'], 'semantic'): + st.success(f"File {file['file_name']} deleted successfully") + st.rerun() + else: + st.error(f"Error deleting file {file['file_name']}") + else: + st.info("No files uploaded yet.") + + with tab2: + st.subheader("Semantic Analysis") + + st.subheader("File Selection and Analysis") + user_files = get_user_files(st.session_state.username, 'semantic') + file_options = [get_translation(t, 'select_saved_file', 'Select a saved file')] + [file['file_name'] for file in user_files] + selected_file = st.selectbox("", options=file_options, key=generate_unique_key('semantic', 'file_selector')) + + if st.button("Analyze Document"): + if selected_file and selected_file != get_translation(t, 'select_saved_file', 'Select a saved file'): + file_contents = retrieve_file_contents(st.session_state.username, selected_file, 'semantic') + if file_contents: + with st.spinner("Analyzing..."): + try: + nlp_model = nlp_models[lang_code] + concept_graph, entity_graph, key_concepts = process_semantic_analysis(file_contents, nlp_model, lang_code) + st.session_state.concept_graph = concept_graph + st.session_state.entity_graph = entity_graph + st.session_state.key_concepts = key_concepts + st.session_state.current_file_contents = file_contents + st.success("Analysis completed successfully") + + # Actualizar el contenido del grafo + st.session_state.graph_content = f""" +

Key Concepts:

+

{', '.join([f"{concept}: {freq:.2f}" for concept, freq in key_concepts])}

+ Concept Graph + """ + if 'graph_id' not in st.session_state: + st.session_state.graph_id = float_graph(st.session_state.graph_content, width="540px", height="540px", position="center-right") + else: + update_float_content(st.session_state.graph_id, st.session_state.graph_content) + toggle_float_visibility(st.session_state.graph_id, True) + st.session_state.graph_visible = True + except Exception as e: + logger.error(f"Error during analysis: {str(e)}") + st.error(f"Error during analysis: {str(e)}") + st.session_state.concept_graph = None + st.session_state.entity_graph = None + st.session_state.key_concepts = [] + else: + st.error("Error loading file contents") + else: + st.error("Please select a file to analyze") + + st.subheader("Chat with AI") + user_input = st.text_input("Type your message here...", key=generate_unique_key('semantic', 'chat_input')) + col1, col2, col3 = st.columns([3, 1, 1]) + with col1: + send_button = st.button("Send", key=generate_unique_key('semantic', 'send_message')) + with col2: + clear_button = st.button("Clear Chat", key=generate_unique_key('semantic', 'clear_chat')) + with col3: + if 'graph_id' in st.session_state: + toggle_button = st.button("Toggle Graph", key="toggle_graph") + if toggle_button: + st.session_state.graph_visible = not st.session_state.get('graph_visible', True) + toggle_float_visibility(st.session_state.graph_id, st.session_state.graph_visible) + + if send_button and user_input: + st.session_state.semantic_chat_history.append({"role": "user", "content": user_input}) + if user_input.startswith('/analyze_current'): + response = process_semantic_chat_input(user_input, lang_code, nlp_models[lang_code], st.session_state.get('current_file_contents', '')) + else: + response = st.session_state.semantic_chatbot.generate_response(user_input, lang_code, context=st.session_state.get('current_file_contents', '')) + st.session_state.semantic_chat_history.append({"role": "assistant", "content": response}) + st.rerun() + + if clear_button: + st.session_state.semantic_chat_history = [] + st.rerun() + + # Mostrar el historial del chat + for message in st.session_state.semantic_chat_history: + message_class = "user-message" if message["role"] == "user" else "assistant-message" + st.markdown(f'
{message["content"]}
', unsafe_allow_html=True) + + # Asegurarse de que el grafo flotante permanezca visible después de las interacciones + if 'graph_id' in st.session_state and st.session_state.get('graph_visible', False): + toggle_float_visibility(st.session_state.graph_id, True) + + # Mostrar el grafo flotante si está visible + if st.session_state.get('graph_visible', False) and 'graph_content' in st.session_state: + st.markdown( + f""" +
+ {st.session_state.graph_content} +
+ """, + unsafe_allow_html=True + ) \ No newline at end of file diff --git a/modules/semantic/semantic_interface_68.py b/modules/semantic/semantic_interface_68.py new file mode 100644 index 0000000000000000000000000000000000000000..7d76233b4405d8e141d906c75f98c4cba2cb822e --- /dev/null +++ b/modules/semantic/semantic_interface_68.py @@ -0,0 +1,195 @@ +import streamlit as st +import logging +from .semantic_process import process_semantic_analysis +from ..chatbot.chatbot import initialize_chatbot, process_semantic_chat_input +from ..database.database_oldFromV2 import store_file_semantic_contents, retrieve_file_contents, delete_file, get_user_files +from ..utils.widget_utils import generate_unique_key +from .semantic_float_reset import semantic_float_init, float_graph, toggle_float_visibility, update_float_content + +logger = logging.getLogger(__name__) +semantic_float_init() + +def get_translation(t, key, default): + return t.get(key, default) + +def display_semantic_interface(lang_code, nlp_models, t): + # Inicialización del chatbot y el historial del chat + if 'semantic_chatbot' not in st.session_state: + st.session_state.semantic_chatbot = initialize_chatbot('semantic') + if 'semantic_chat_history' not in st.session_state: + st.session_state.semantic_chat_history = [] + + # Inicializar el estado del grafo si no existe + if 'graph_visible' not in st.session_state: + st.session_state.graph_visible = False + if 'graph_content' not in st.session_state: + st.session_state.graph_content = "" + + st.markdown(""" + + """, unsafe_allow_html=True) + + st.markdown(f"
{t['semantic_initial_message']}
", unsafe_allow_html=True) + + tab1, tab2 = st.tabs(["Upload", "Analyze"]) + + with tab1: + st.subheader("File Management") + uploaded_file = st.file_uploader("Choose a file to upload", type=['txt', 'pdf', 'docx', 'doc', 'odt'], key=generate_unique_key('semantic', 'file_uploader')) + if uploaded_file is not None: + file_contents = uploaded_file.getvalue().decode('utf-8') + if store_file_semantic_contents(st.session_state.username, uploaded_file.name, file_contents): + st.success(f"File {uploaded_file.name} uploaded and saved successfully") + else: + st.error("Error uploading file") + + st.markdown("---") + st.subheader("Manage Uploaded Files") + user_files = get_user_files(st.session_state.username, 'semantic') + if user_files: + for file in user_files: + col1, col2 = st.columns([3, 1]) + with col1: + st.write(file['file_name']) + with col2: + if st.button("Delete", key=f"delete_{file['file_name']}", help=f"Delete {file['file_name']}"): + if delete_file(st.session_state.username, file['file_name'], 'semantic'): + st.success(f"File {file['file_name']} deleted successfully") + st.rerun() + else: + st.error(f"Error deleting file {file['file_name']}") + else: + st.info("No files uploaded yet.") + + with tab2: + st.subheader("Semantic Analysis") + + st.subheader("File Selection and Analysis") + user_files = get_user_files(st.session_state.username, 'semantic') + file_options = [get_translation(t, 'select_saved_file', 'Select a saved file')] + [file['file_name'] for file in user_files] + selected_file = st.selectbox("", options=file_options, key=generate_unique_key('semantic', 'file_selector')) + + if st.button("Analyze Document"): + if selected_file and selected_file != get_translation(t, 'select_saved_file', 'Select a saved file'): + file_contents = retrieve_file_contents(st.session_state.username, selected_file, 'semantic') + if file_contents: + with st.spinner("Analyzing..."): + try: + nlp_model = nlp_models[lang_code] + concept_graph_base64, entity_graph_base64, key_concepts = process_semantic_analysis(file_contents, nlp_model, lang_code) + st.session_state.current_file_contents = file_contents + st.success("Analysis completed successfully") + + # Aquí cambiamos el contenido del elemento flotante para mostrar un video de YouTube + youtube_video_id = "dQw4w9WgXcQ" # Cambia esto por el ID del video que quieras mostrar + video_content = f""" + + """ + st.session_state.graph_id = float_graph(video_content, width="800px", height="600px", position="center-right") + st.session_state.graph_visible = True + st.session_state.graph_content = video_content + + # Log para depuración + st.write(f"Debug: Graph ID: {st.session_state.get('graph_id')}") + st.write(f"Debug: Graph visible: {st.session_state.get('graph_visible')}") + + except Exception as e: + logger.error(f"Error during analysis: {str(e)}") + st.error(f"Error during analysis: {str(e)}") + else: + st.error("Error loading file contents") + else: + st.error("Please select a file to analyze") + + st.subheader("Chat with AI") + + # Mostrar el historial del chat + for message in st.session_state.semantic_chat_history: + message_class = "user-message" if message["role"] == "user" else "assistant-message" + st.markdown(f'
{message["content"]}
', unsafe_allow_html=True) + + # Colocar la entrada de usuario y los botones en la parte inferior + st.markdown('
', unsafe_allow_html=True) + user_input = st.text_input("Type your message here...", key=generate_unique_key('semantic', 'chat_input')) + col1, col2, col3 = st.columns([3, 1, 1]) + with col1: + send_button = st.button("Send", key=generate_unique_key('semantic', 'send_message')) + with col2: + clear_button = st.button("Clear Chat", key=generate_unique_key('semantic', 'clear_chat')) + with col3: + if 'graph_id' in st.session_state: + toggle_button = st.button("Toggle Graph", key="toggle_graph") + if toggle_button: + st.session_state.graph_visible = not st.session_state.get('graph_visible', True) + toggle_float_visibility(st.session_state.graph_id, st.session_state.graph_visible) + st.markdown('
', unsafe_allow_html=True) + + if send_button and user_input: + st.session_state.semantic_chat_history.append({"role": "user", "content": user_input}) + if user_input.startswith('/analyze_current'): + response = process_semantic_chat_input(user_input, lang_code, nlp_models[lang_code], st.session_state.get('current_file_contents', '')) + else: + response = st.session_state.semantic_chatbot.generate_response(user_input, lang_code, context=st.session_state.get('current_file_contents', '')) + st.session_state.semantic_chat_history.append({"role": "assistant", "content": response}) + st.rerun() + + if clear_button: + st.session_state.semantic_chat_history = [] + st.rerun() + + # Asegurarse de que el grafo flotante permanezca visible después de las interacciones + if 'graph_id' in st.session_state and st.session_state.get('graph_visible', False): + toggle_float_visibility(st.session_state.graph_id, True) + + # Mostrar el grafo flotante si está visible + if st.session_state.get('graph_visible', False) and 'graph_content' in st.session_state: + st.markdown( + f""" +
+ {st.session_state.graph_content} +
+ """, + unsafe_allow_html=True + ) \ No newline at end of file diff --git a/modules/semantic/semantic_interface_681.py b/modules/semantic/semantic_interface_681.py new file mode 100644 index 0000000000000000000000000000000000000000..9384c9f712a4145c14d5d43a1657e11e92cbeaea --- /dev/null +++ b/modules/semantic/semantic_interface_681.py @@ -0,0 +1,165 @@ +import streamlit as st +import logging +from .semantic_process import process_semantic_analysis +from ..chatbot.chatbot import initialize_chatbot, process_semantic_chat_input +from ..database.database_oldFromV2 import store_file_semantic_contents, retrieve_file_contents, delete_file, get_user_files +from ..utils.widget_utils import generate_unique_key +from .semantic_float_reset import semantic_float_init, float_graph, toggle_float_visibility, update_float_content + +logger = logging.getLogger(__name__) +semantic_float_init() + +def get_translation(t, key, default): + return t.get(key, default) + +def display_semantic_interface(lang_code, nlp_models, t): + # Inicialización del chatbot y el historial del chat + if 'semantic_chatbot' not in st.session_state: + st.session_state.semantic_chatbot = initialize_chatbot('semantic') + if 'semantic_chat_history' not in st.session_state: + st.session_state.semantic_chat_history = [] + + # Inicializar el estado del grafo si no existe + if 'graph_visible' not in st.session_state: + st.session_state.graph_visible = False + + + st.markdown(""" + + """, unsafe_allow_html=True) + + st.markdown(f"
{t['semantic_initial_message']}
", unsafe_allow_html=True) + + tab1, tab2 = st.tabs(["Upload", "Analyze"]) + + with tab1: + st.subheader("File Management") + uploaded_file = st.file_uploader("Choose a file to upload", type=['txt', 'pdf', 'docx', 'doc', 'odt'], key=generate_unique_key('semantic', 'file_uploader')) + if uploaded_file is not None: + file_contents = uploaded_file.getvalue().decode('utf-8') + if store_file_semantic_contents(st.session_state.username, uploaded_file.name, file_contents): + st.success(f"File {uploaded_file.name} uploaded and saved successfully") + else: + st.error("Error uploading file") + + st.markdown("---") + st.subheader("Manage Uploaded Files") + user_files = get_user_files(st.session_state.username, 'semantic') + if user_files: + for file in user_files: + col1, col2 = st.columns([3, 1]) + with col1: + st.write(file['file_name']) + with col2: + if st.button("Delete", key=f"delete_{file['file_name']}", help=f"Delete {file['file_name']}"): + if delete_file(st.session_state.username, file['file_name'], 'semantic'): + st.success(f"File {file['file_name']} deleted successfully") + st.rerun() + else: + st.error(f"Error deleting file {file['file_name']}") + else: + st.info("No files uploaded yet.") + + with tab2: + st.subheader("Semantic Analysis") + + st.subheader("File Selection and Analysis") + user_files = get_user_files(st.session_state.username, 'semantic') + file_options = [get_translation(t, 'select_saved_file', 'Select a saved file')] + [file['file_name'] for file in user_files] + selected_file = st.selectbox("", options=file_options, key=generate_unique_key('semantic', 'file_selector')) + + col1, col2 = st.columns([3, 1]) + with col1: + analyze_button = st.button("Analyze Document") + with col2: + toggle_graph = st.checkbox("Show Graph", value=st.session_state.graph_visible) + + if analyze_button: + if selected_file and selected_file != get_translation(t, 'select_saved_file', 'Select a saved file'): + file_contents = retrieve_file_contents(st.session_state.username, selected_file, 'semantic') + if file_contents: + with st.spinner("Analyzing..."): + try: + nlp_model = nlp_models[lang_code] + concept_graph, entity_graph, key_concepts = process_semantic_analysis(file_contents, nlp_model, lang_code) + st.session_state.concept_graph = concept_graph + st.session_state.entity_graph = entity_graph + st.session_state.key_concepts = key_concepts + st.session_state.current_file_contents = file_contents + st.success("Analysis completed successfully") + + graph_content = f""" +

Key Concepts:

+

{', '.join([f"{concept}: {freq:.2f}" for concept, freq in key_concepts])}

+ Concept Graph + """ + float_graph(graph_content) + st.session_state.graph_visible = True + toggle_float_visibility(True) + + except Exception as e: + logger.error(f"Error during analysis: {str(e)}") + st.error(f"Error during analysis: {str(e)}") + else: + st.error("Error loading file contents") + else: + st.error("Please select a file to analyze") + + if toggle_graph != st.session_state.graph_visible: + st.session_state.graph_visible = toggle_graph + toggle_float_visibility(toggle_graph) + + st.subheader("Chat with AI") + + # Mostrar el historial del chat + for message in st.session_state.semantic_chat_history: + message_class = "user-message" if message["role"] == "user" else "assistant-message" + st.markdown(f'
{message["content"]}
', unsafe_allow_html=True) + + # Colocar la entrada de usuario y los botones en la parte inferior + st.markdown('
', unsafe_allow_html=True) + user_input = st.text_input("Type your message here...", key=generate_unique_key('semantic', 'chat_input')) + col1, col2 = st.columns([3, 1]) + with col1: + send_button = st.button("Send", key=generate_unique_key('semantic', 'send_message')) + with col2: + clear_button = st.button("Clear Chat", key=generate_unique_key('semantic', 'clear_chat')) + st.markdown('
', unsafe_allow_html=True) + + if send_button and user_input: + st.session_state.semantic_chat_history.append({"role": "user", "content": user_input}) + if user_input.startswith('/analyze_current'): + response = process_semantic_chat_input(user_input, lang_code, nlp_models[lang_code], st.session_state.get('current_file_contents', '')) + else: + response = st.session_state.semantic_chatbot.generate_response(user_input, lang_code, context=st.session_state.get('current_file_contents', '')) + st.session_state.semantic_chat_history.append({"role": "assistant", "content": response}) + st.rerun() + + if clear_button: + st.session_state.semantic_chat_history = [] + st.rerun() + + # Asegurarse de que el grafo flotante permanezca visible si está activado + if st.session_state.graph_visible: + toggle_float_visibility(True) \ No newline at end of file diff --git a/modules/semantic/semantic_interface_681_23-9-24.py b/modules/semantic/semantic_interface_681_23-9-24.py new file mode 100644 index 0000000000000000000000000000000000000000..69477b49cf6dd9be21b06e330813aa2fe274e3ec --- /dev/null +++ b/modules/semantic/semantic_interface_681_23-9-24.py @@ -0,0 +1,222 @@ +import streamlit as st +import logging +from .semantic_process import process_semantic_analysis +from ..chatbot.chatbot import initialize_chatbot, process_semantic_chat_input +from ..database.database_oldFromV2 import store_file_semantic_contents, retrieve_file_contents, delete_file, get_user_files +from ..utils.widget_utils import generate_unique_key +from .semantic_float_reset import * + +logger = logging.getLogger(__name__) +semantic_float_init() + +def get_translation(t, key, default): + return t.get(key, default) + +def display_semantic_interface(lang_code, nlp_models, t): + # Inicialización del chatbot y el historial del chat + if 'semantic_chatbot' not in st.session_state: + st.session_state.semantic_chatbot = initialize_chatbot('semantic') + if 'semantic_chat_history' not in st.session_state: + st.session_state.semantic_chat_history = [] + + # Inicializar el estado del grafo si no existe + if 'graph_visible' not in st.session_state: + st.session_state.graph_visible = False + if 'graph_content' not in st.session_state: + st.session_state.graph_content = "" + + st.markdown(""" + + """, unsafe_allow_html=True) + + st.markdown(f"
{t['semantic_initial_message']}
", unsafe_allow_html=True) + + tab1, tab2 = st.tabs(["Upload", "Analyze"]) + + with tab1: + st.subheader("File Management") + uploaded_file = st.file_uploader("Choose a file to upload", type=['txt', 'pdf', 'docx', 'doc', 'odt'], key=generate_unique_key('semantic', 'file_uploader')) + if uploaded_file is not None: + file_contents = uploaded_file.getvalue().decode('utf-8') + if store_file_semantic_contents(st.session_state.username, uploaded_file.name, file_contents): + st.success(f"File {uploaded_file.name} uploaded and saved successfully") + else: + st.error("Error uploading file") + + st.markdown("---") + st.subheader("Manage Uploaded Files") + user_files = get_user_files(st.session_state.username, 'semantic') + if user_files: + for file in user_files: + col1, col2 = st.columns([3, 1]) + with col1: + st.write(file['file_name']) + with col2: + if st.button("Delete", key=f"delete_{file['file_name']}", help=f"Delete {file['file_name']}"): + if delete_file(st.session_state.username, file['file_name'], 'semantic'): + st.success(f"File {file['file_name']} deleted successfully") + st.rerun() + else: + st.error(f"Error deleting file {file['file_name']}") + else: + st.info("No files uploaded yet.") + + with tab2: + st.subheader("Semantic Analysis") + + st.subheader("File Selection and Analysis") + user_files = get_user_files(st.session_state.username, 'semantic') + file_options = [get_translation(t, 'select_saved_file', 'Select a saved file')] + [file['file_name'] for file in user_files] + selected_file = st.selectbox("", options=file_options, key=generate_unique_key('semantic', 'file_selector')) + + if st.button("Analyze Document"): + if selected_file and selected_file != get_translation(t, 'select_saved_file', 'Select a saved file'): + file_contents = retrieve_file_contents(st.session_state.username, selected_file, 'semantic') + if file_contents: + with st.spinner("Analyzing..."): + try: + nlp_model = nlp_models[lang_code] + concept_graph, entity_graph, key_concepts = process_semantic_analysis(file_contents, nlp_model, lang_code) + st.session_state.concept_graph = concept_graph + st.session_state.entity_graph = entity_graph + st.session_state.key_concepts = key_concepts + st.session_state.current_file_contents = file_contents + st.success("Analysis completed successfully") + + graph_content = f""" +

Key Concepts:

+

{', '.join([f"{concept}: {freq:.2f}" for concept, freq in key_concepts])}

+ Concept Graph + """ + + if 'graph_id' not in st.session_state: + st.session_state.graph_id = float_graph(graph_content, width="800px", height="600px", position="center-right") + st.write(f"New graph created with ID: {st.session_state.graph_id}") + else: + update_float_content(st.session_state.graph_id, graph_content) + st.write(f"Existing graph updated with ID: {st.session_state.graph_id}") + + toggle_float_visibility(st.session_state.graph_id, True) + st.session_state.graph_visible = True + + # Depuración + st.write(f"Debug: Graph ID: {st.session_state.graph_id}") + st.write(f"Debug: Graph visible: {st.session_state.graph_visible}") + st.write(f"Debug: Concept graph base64 (first 100 chars): {concept_graph[:100]}") + + except Exception as e: + logger.error(f"Error during analysis: {str(e)}") + st.error(f"Error during analysis: {str(e)}") + st.session_state.concept_graph = None + st.session_state.entity_graph = None + st.session_state.key_concepts = [] + else: + st.error("Error loading file contents") + else: + st.error("Please select a file to analyze") + + + st.subheader("Chat with AI") + + # Mostrar el historial del chat + for message in st.session_state.semantic_chat_history: + message_class = "user-message" if message["role"] == "user" else "assistant-message" + st.markdown(f'
{message["content"]}
', unsafe_allow_html=True) + + # Colocar la entrada de usuario y los botones en la parte inferior + + st.markdown('
', unsafe_allow_html=True) + + user_input = st.text_input("Type your message here...", key=generate_unique_key('semantic', 'chat_input')) + + col1, col2, col3 = st.columns([3, 1, 1]) + + with col1: + send_button = st.button("Send", key=generate_unique_key('semantic', 'send_message')) + + with col2: + clear_button = st.button("Clear Chat", key=generate_unique_key('semantic', 'clear_chat')) + + with col3: + if 'graph_id' in st.session_state: + toggle_button = st.button("Toggle Graph", key="toggle_graph") + if toggle_button: + st.session_state.graph_visible = not st.session_state.get('graph_visible', True) + toggle_float_visibility(st.session_state.graph_id, st.session_state.graph_visible) + + st.markdown('
', unsafe_allow_html=True) + + if send_button and user_input: + st.session_state.semantic_chat_history.append({"role": "user", "content": user_input}) + if user_input.startswith('/analyze_current'): + response = process_semantic_chat_input(user_input, lang_code, nlp_models[lang_code], st.session_state.get('current_file_contents', '')) + else: + response = st.session_state.semantic_chatbot.generate_response(user_input, lang_code, context=st.session_state.get('current_file_contents', '')) + st.session_state.semantic_chat_history.append({"role": "assistant", "content": response}) + st.rerun() + + if clear_button: + st.session_state.semantic_chat_history = [] + st.rerun() + + # Añadir botones para controlar el elemento flotante + col1, col2 = st.columns(2) + with col1: + if st.button("Show Graph"): + if 'graph_id' in st.session_state: + toggle_float_visibility(st.session_state.graph_id, True) + st.session_state.graph_visible = True + st.write(f"Showing graph with ID: {st.session_state.graph_id}") + else: + st.write("No graph available to show") + + with col2: + if st.button("Hide Graph"): + if 'graph_id' in st.session_state: + toggle_float_visibility(st.session_state.graph_id, False) + st.session_state.graph_visible = False + st.write(f"Hiding graph with ID: {st.session_state.graph_id}") + else: + st.write("No graph available to hide") \ No newline at end of file diff --git a/modules/semantic/semantic_interface_68ok copy.py b/modules/semantic/semantic_interface_68ok copy.py new file mode 100644 index 0000000000000000000000000000000000000000..fc16cf6f6c19e45753d432af4e13c32f5880841a --- /dev/null +++ b/modules/semantic/semantic_interface_68ok copy.py @@ -0,0 +1,215 @@ +import streamlit as st +import streamlit_float +import streamlit_option_menu +import streamlit_antd_components +import streamlit.components.v1 as components +import streamlit.components.v1 as stc +import logging +from .semantic_process import * +from ..chatbot.chatbot import initialize_chatbot, process_semantic_chat_input +from ..database.database_oldFromV2 import store_file_semantic_contents, retrieve_file_contents, delete_file, get_user_files +from ..utils.widget_utils import generate_unique_key +from .semantic_float68ok import * + +logger = logging.getLogger(__name__) +semantic_float_init() + +def get_translation(t, key, default): + return t.get(key, default) + +def display_semantic_interface(lang_code, nlp_models, t): + # Inicialización del chatbot y el historial del chat + if 'semantic_chatbot' not in st.session_state: + st.session_state.semantic_chatbot = initialize_chatbot('semantic') + if 'semantic_chat_history' not in st.session_state: + st.session_state.semantic_chat_history = [] + + # Inicializar el estado del grafo si no existe + if 'graph_visible' not in st.session_state: + st.session_state.graph_visible = False + if 'graph_content' not in st.session_state: + st.session_state.graph_content = "" + + st.markdown(""" + + """, unsafe_allow_html=True) + + st.markdown(f"
{t['semantic_initial_message']}
", unsafe_allow_html=True) + + tab1, tab2 = st.tabs(["Upload", "Analyze"]) + + with tab1: + st.subheader("File Management") + uploaded_file = st.file_uploader("Choose a file to upload", type=['txt', 'pdf', 'docx', 'doc', 'odt'], key=generate_unique_key('semantic', 'file_uploader')) + if uploaded_file is not None: + file_contents = uploaded_file.getvalue().decode('utf-8') + if store_file_semantic_contents(st.session_state.username, uploaded_file.name, file_contents): + st.success(f"File {uploaded_file.name} uploaded and saved successfully") + else: + st.error("Error uploading file") + + st.markdown("---") + st.subheader("Manage Uploaded Files") + user_files = get_user_files(st.session_state.username, 'semantic') + if user_files: + for file in user_files: + col1, col2 = st.columns([3, 1]) + with col1: + st.write(file['file_name']) + with col2: + if st.button("Delete", key=f"delete_{file['file_name']}", help=f"Delete {file['file_name']}"): + if delete_file(st.session_state.username, file['file_name'], 'semantic'): + st.success(f"File {file['file_name']} deleted successfully") + st.rerun() + else: + st.error(f"Error deleting file {file['file_name']}") + else: + st.info("No files uploaded yet.") + + with tab2: + st.subheader("Semantic Analysis") + + st.subheader("File Selection and Analysis") + user_files = get_user_files(st.session_state.username, 'semantic') + file_options = [get_translation(t, 'select_saved_file', 'Select a saved file')] + [file['file_name'] for file in user_files] + selected_file = st.selectbox("", options=file_options, key=generate_unique_key('semantic', 'file_selector')) + + if st.button("Analyze Document"): + if selected_file and selected_file != get_translation(t, 'select_saved_file', 'Select a saved file'): + file_contents = retrieve_file_contents(st.session_state.username, selected_file, 'semantic') + if file_contents: + with st.spinner("Analyzing..."): + try: + nlp_model = nlp_models[lang_code] + concept_graph_base64, entity_graph_base64, key_concepts = process_semantic_analysis(file_contents, nlp_model, lang_code) + st.session_state.current_file_contents = file_contents + st.success("Analysis completed successfully") + + if concept_graph_base64: + graph_content = f""" +

Key Concepts:

+

{', '.join([f"{concept}: {freq:.2f}" for concept, freq in key_concepts])}

+ Concept Graph + """ + st.session_state.graph_id = float_graph(graph_content, width="800px", height="600px", position="center-right") + st.session_state.graph_visible = True + st.session_state.graph_content = graph_content + + if entity_graph_base64: + entity_graph_content = f""" +

Entity Graph:

+ Entity Graph + """ + st.session_state.entity_graph_id = float_graph(entity_graph_content, width="800px", height="600px", position="bottom-left") + + # Log para depuración + st.write(f"Debug: Graph ID: {st.session_state.get('graph_id')}") + st.write(f"Debug: Graph visible: {st.session_state.get('graph_visible')}") + st.write(f"Debug: Concept graph base64 (first 100 chars): {concept_graph_base64[:100]}") + + except Exception as e: + logger.error(f"Error during analysis: {str(e)}") + st.error(f"Error during analysis: {str(e)}") + else: + st.error("Error loading file contents") + else: + st.error("Please select a file to analyze") + + st.subheader("Chat with AI") + + # Mostrar el historial del chat + for message in st.session_state.semantic_chat_history: + message_class = "user-message" if message["role"] == "user" else "assistant-message" + st.markdown(f'
{message["content"]}
', unsafe_allow_html=True) + + # Colocar la entrada de usuario y los botones en la parte inferior + st.markdown('
', unsafe_allow_html=True) + + user_input = st.text_input("Type your message here...", key=generate_unique_key('semantic', 'chat_input')) + + col1, col2, col3 = st.columns([3, 1, 1]) + + with col1: + send_button = st.button("Send", key=generate_unique_key('semantic', 'send_message')) + + with col2: + clear_button = st.button("Clear Chat", key=generate_unique_key('semantic', 'clear_chat')) + + with col3: + if 'graph_id' in st.session_state: + toggle_button = st.button("Toggle Graph", key="toggle_graph") + if toggle_button: + st.session_state.graph_visible = not st.session_state.get('graph_visible', True) + toggle_float_visibility(st.session_state.graph_id, st.session_state.graph_visible) + st.markdown('
', unsafe_allow_html=True) + + if send_button and user_input: + st.session_state.semantic_chat_history.append({"role": "user", "content": user_input}) + if user_input.startswith('/analyze_current'): + response = process_semantic_chat_input(user_input, lang_code, nlp_models[lang_code], st.session_state.get('current_file_contents', '')) + else: + response = st.session_state.semantic_chatbot.generate_response(user_input, lang_code, context=st.session_state.get('current_file_contents', '')) + st.session_state.semantic_chat_history.append({"role": "assistant", "content": response}) + st.rerun() + + if clear_button: + st.session_state.semantic_chat_history = [] + st.rerun() + +# Asegurarse de que el grafo flotante permanezca visible después de las interacciones +if 'graph_id' in st.session_state and st.session_state.get('graph_visible', False): + toggle_float_visibility(st.session_state.graph_id, True) + +# Mostrar el grafo flotante si está visible +if st.session_state.get('graph_visible', False) and 'graph_content' in st.session_state: + components.html( + f""" +
+ {st.session_state.graph_content} +
+ """, + height=600, + scrolling=True + ) \ No newline at end of file diff --git a/modules/semantic/semantic_interface_68ok.py b/modules/semantic/semantic_interface_68ok.py new file mode 100644 index 0000000000000000000000000000000000000000..8a34d56f794a81dca38b251a21fba4ca16b5a6ad --- /dev/null +++ b/modules/semantic/semantic_interface_68ok.py @@ -0,0 +1,98 @@ +import streamlit as st +import logging +from ..database.database_oldFromV2 import manage_file_contents, delete_file, get_user_files +from ..utils.widget_utils import generate_unique_key + +logger = logging.getLogger(__name__) + + + +def display_semantic_interface(lang_code, nlp_models, t): + st.subheader(t['semantic_title']) + + text_input = st.text_area( + t['warning_message'], + height=150, + key=generate_unique_key("semantic", "text_area") + ) + + if st.button( + t['results_title'], + key=generate_unique_key("semantic", "analyze_button") + ): + if text_input: + # Aquí iría tu lógica de análisis morfosintáctico + # Por ahora, solo mostraremos un mensaje de placeholder + st.info(t['analysis_placeholder']) + else: + st.warning(t['no_text_warning']) + + +''' +def display_semantic_interface(lang_code, nlp_models, t): + st.title("Semantic Analysis") + + tab1, tab2 = st.tabs(["File Management", "Analysis"]) + + with tab1: + display_file_management(lang_code, t) + + with tab2: + # Aquí irá el código para el análisis semántico (lo implementaremos después) + st.write("Semantic analysis section will be implemented here.") + +def display_file_management(lang_code, t): + st.header("File Management") + + # File Upload Section + st.subheader("Upload New File") + uploaded_file = st.file_uploader( + "Choose a file to upload", + type=['txt', 'pdf', 'docx', 'doc', 'odt'], + key=generate_unique_key('semantic', 'file_uploader') + ) + if uploaded_file is not None: + file_contents = uploaded_file.getvalue().decode('utf-8') + if manage_file_contents(st.session_state.username, uploaded_file.name, file_contents, 'semantic'): + st.success(f"File {uploaded_file.name} uploaded and saved successfully") + else: + st.error("Error uploading file") + + st.markdown("---") + + + # File Management Section + st.subheader("Manage Uploaded Files") + user_files = get_user_files(st.session_state.username, 'semantic') + if user_files: + for file in user_files: + col1, col2 = st.columns([3, 1]) + with col1: + st.write(file['file_name']) + with col2: + if st.button("Delete", key=f"delete_{file['file_name']}", help=f"Delete {file['file_name']}"): + try: + logger.info(f"Attempting to delete file: {file['file_name']} for user: {st.session_state.username}") + if delete_file(st.session_state.username, file['file_name'], 'semantic'): + st.success(f"File {file['file_name']} deleted successfully") + logger.info(f"File {file['file_name']} deleted successfully for user: {st.session_state.username}") + st.rerun() + else: + st.error(f"Error deleting file {file['file_name']}") + logger.error(f"Failed to delete file {file['file_name']} for user: {st.session_state.username}") + except Exception as e: + st.error(f"An error occurred while deleting file {file['file_name']}: {str(e)}") + logger.exception(f"Exception occurred while deleting file {file['file_name']} for user: {st.session_state.username}") + + else: + st.info("No files uploaded yet.") + +if __name__ == "__main__": + # This is just for testing purposes + class MockTranslation(dict): + def __getitem__(self, key): + return key + + display_semantic_interface('en', {}, MockTranslation()) + + ''' \ No newline at end of file diff --git a/modules/semantic/semantic_interface_68okBackUp.py b/modules/semantic/semantic_interface_68okBackUp.py new file mode 100644 index 0000000000000000000000000000000000000000..a8d8eaeafca312b1fa3d6ef2fc81bf2bf7a844ad --- /dev/null +++ b/modules/semantic/semantic_interface_68okBackUp.py @@ -0,0 +1,209 @@ +import streamlit as st +import streamlit.components.v1 as components +import streamlit.components.v1 as stc +import logging +from .semantic_process import * +from ..chatbot.chatbot import initialize_chatbot, process_semantic_chat_input +from ..database.database_oldFromV2 import store_file_semantic_contents, retrieve_file_contents, delete_file, get_user_files +from ..utils.widget_utils import generate_unique_key +from .semantic_float68ok import * + +concept_graph_base64, entity_graph_base64, key_concepts = process_semantic_analysis(file_contents, nlp_model, lang_code) + +logger = logging.getLogger(__name__) +semantic_float_init() + +def get_translation(t, key, default): + return t.get(key, default) + +def display_semantic_interface(lang_code, nlp_models, t): + # Inicialización del chatbot y el historial del chat + if 'semantic_chatbot' not in st.session_state: + st.session_state.semantic_chatbot = initialize_chatbot('semantic') + if 'semantic_chat_history' not in st.session_state: + st.session_state.semantic_chat_history = [] + + # Inicializar el estado del grafo si no existe + if 'graph_visible' not in st.session_state: + st.session_state.graph_visible = False + if 'graph_content' not in st.session_state: + st.session_state.graph_content = "" + + st.markdown(""" + + """, unsafe_allow_html=True) + + st.markdown(f"
{t['semantic_initial_message']}
", unsafe_allow_html=True) + + tab1, tab2 = st.tabs(["Upload", "Analyze"]) + + with tab1: + st.subheader("File Management") + uploaded_file = st.file_uploader("Choose a file to upload", type=['txt', 'pdf', 'docx', 'doc', 'odt'], key=generate_unique_key('semantic', 'file_uploader')) + if uploaded_file is not None: + file_contents = uploaded_file.getvalue().decode('utf-8') + if store_file_semantic_contents(st.session_state.username, uploaded_file.name, file_contents): + st.success(f"File {uploaded_file.name} uploaded and saved successfully") + else: + st.error("Error uploading file") + + st.markdown("---") + st.subheader("Manage Uploaded Files") + user_files = get_user_files(st.session_state.username, 'semantic') + if user_files: + for file in user_files: + col1, col2 = st.columns([3, 1]) + with col1: + st.write(file['file_name']) + with col2: + if st.button("Delete", key=f"delete_{file['file_name']}", help=f"Delete {file['file_name']}"): + if delete_file(st.session_state.username, file['file_name'], 'semantic'): + st.success(f"File {file['file_name']} deleted successfully") + st.rerun() + else: + st.error(f"Error deleting file {file['file_name']}") + else: + st.info("No files uploaded yet.") + + with tab2: + st.subheader("Semantic Analysis") + + st.subheader("File Selection and Analysis") + user_files = get_user_files(st.session_state.username, 'semantic') + file_options = [get_translation(t, 'select_saved_file', 'Select a saved file')] + [file['file_name'] for file in user_files] + selected_file = st.selectbox("", options=file_options, key=generate_unique_key('semantic', 'file_selector')) + + if st.button("Analyze Document"): + if selected_file and selected_file != get_translation(t, 'select_saved_file', 'Select a saved file'): + file_contents = retrieve_file_contents(st.session_state.username, selected_file, 'semantic') + if file_contents: + with st.spinner("Analyzing..."): + try: + nlp_model = nlp_models[lang_code] + concept_graph, entity_graph, key_concepts = process_semantic_analysis(file_contents, nlp_model, lang_code) + st.session_state.concept_graph = concept_graph + st.session_state.entity_graph = entity_graph + st.session_state.key_concepts = key_concepts + st.session_state.current_file_contents = file_contents + st.success("Analysis completed successfully") + + # Actualizar el contenido del grafo + st.session_state.graph_content = f""" +

Key Concepts:

+

{', '.join([f"{concept}: {freq:.2f}" for concept, freq in key_concepts])}

+ Concept Graph + """ + if 'graph_id' not in st.session_state: + st.session_state.graph_id = float_graph(st.session_state.graph_content, width="540px", height="540px", position="center-right") + else: + update_float_content(st.session_state.graph_id, st.session_state.graph_content) + toggle_float_visibility(st.session_state.graph_id, True) + st.session_state.graph_visible = True + except Exception as e: + logger.error(f"Error during analysis: {str(e)}") + st.error(f"Error during analysis: {str(e)}") + st.session_state.concept_graph = None + st.session_state.entity_graph = None + st.session_state.key_concepts = [] + else: + st.error("Error loading file contents") + else: + st.error("Please select a file to analyze") + + st.subheader("Chat with AI") + + # Mostrar el historial del chat + for message in st.session_state.semantic_chat_history: + message_class = "user-message" if message["role"] == "user" else "assistant-message" + st.markdown(f'
{message["content"]}
', unsafe_allow_html=True) + + # Colocar la entrada de usuario y los botones en la parte inferior + st.markdown('
', unsafe_allow_html=True) + + user_input = st.text_input("Type your message here...", key=generate_unique_key('semantic', 'chat_input')) + + col1, col2, col3 = st.columns([3, 1, 1]) + + with col1: + send_button = st.button("Send", key=generate_unique_key('semantic', 'send_message')) + + with col2: + clear_button = st.button("Clear Chat", key=generate_unique_key('semantic', 'clear_chat')) + + with col3: + if 'graph_id' in st.session_state: + toggle_button = st.button("Toggle Graph", key="toggle_graph") + if toggle_button: + st.session_state.graph_visible = not st.session_state.get('graph_visible', True) + toggle_float_visibility(st.session_state.graph_id, st.session_state.graph_visible) + st.markdown('
', unsafe_allow_html=True) + + if send_button and user_input: + st.session_state.semantic_chat_history.append({"role": "user", "content": user_input}) + if user_input.startswith('/analyze_current'): + response = process_semantic_chat_input(user_input, lang_code, nlp_models[lang_code], st.session_state.get('current_file_contents', '')) + else: + response = st.session_state.semantic_chatbot.generate_response(user_input, lang_code, context=st.session_state.get('current_file_contents', '')) + st.session_state.semantic_chat_history.append({"role": "assistant", "content": response}) + st.rerun() + + if clear_button: + st.session_state.semantic_chat_history = [] + st.rerun() + + # Asegurarse de que el grafo flotante permanezca visible después de las interacciones + if 'graph_id' in st.session_state and st.session_state.get('graph_visible', False): + toggle_float_visibility(st.session_state.graph_id, True) + + # Mostrar el grafo flotante si está visible + if st.session_state.get('graph_visible', False) and 'graph_content' in st.session_state: + st.markdown( + f""" +
+ {st.session_state.graph_content} +
+ """, + unsafe_allow_html=True + ) \ No newline at end of file diff --git a/modules/semantic/semantic_interface_69.py b/modules/semantic/semantic_interface_69.py new file mode 100644 index 0000000000000000000000000000000000000000..9491c4a0cd7e20c82eeb3bed69d2f3417e92e1d4 --- /dev/null +++ b/modules/semantic/semantic_interface_69.py @@ -0,0 +1,167 @@ +import streamlit as st +import logging +from .semantic_process import process_semantic_analysis +from ..chatbot.chatbot import initialize_chatbot, process_semantic_chat_input +from ..database.database_oldFromV2 import store_file_semantic_contents, retrieve_file_contents, delete_file, get_user_files +from ..utils.widget_utils import generate_unique_key +from .semantic_float_reset import * + +logger = logging.getLogger(__name__) +semantic_float_init() + +def get_translation(t, key, default): + return t.get(key, default) + +def display_semantic_interface(lang_code, nlp_models, t): + # Inicialización del chatbot y el historial del chat + if 'semantic_chatbot' not in st.session_state: + st.session_state.semantic_chatbot = initialize_chatbot('semantic') + if 'semantic_chat_history' not in st.session_state: + st.session_state.semantic_chat_history = [] + + # Inicializar el estado del grafo si no existe + if 'graph_visible' not in st.session_state: + st.session_state.graph_visible = False + if 'graph_content' not in st.session_state: + st.session_state.graph_content = "" + + st.markdown(""" + + """, unsafe_allow_html=True) + + st.markdown(f"
{t['semantic_initial_message']}
", unsafe_allow_html=True) + + tab1, tab2 = st.tabs(["Upload", "Analyze"]) + + with tab1: + st.subheader("File Management") + uploaded_file = st.file_uploader("Choose a file to upload", type=['txt', 'pdf', 'docx', 'doc', 'odt'], key=generate_unique_key('semantic', 'file_uploader')) + if uploaded_file is not None: + file_contents = uploaded_file.getvalue().decode('utf-8') + if store_file_semantic_contents(st.session_state.username, uploaded_file.name, file_contents): + st.success(f"File {uploaded_file.name} uploaded and saved successfully") + else: + st.error("Error uploading file") + + st.markdown("---") + st.subheader("Manage Uploaded Files") + user_files = get_user_files(st.session_state.username, 'semantic') + if user_files: + for file in user_files: + col1, col2 = st.columns([3, 1]) + with col1: + st.write(file['file_name']) + with col2: + if st.button("Delete", key=f"delete_{file['file_name']}", help=f"Delete {file['file_name']}"): + if delete_file(st.session_state.username, file['file_name'], 'semantic'): + st.success(f"File {file['file_name']} deleted successfully") + st.rerun() + else: + st.error(f"Error deleting file {file['file_name']}") + else: + st.info("No files uploaded yet.") + + with tab2: + st.subheader("Semantic Analysis") + + st.subheader("File Selection and Analysis") + user_files = get_user_files(st.session_state.username, 'semantic') + file_options = [get_translation(t, 'select_saved_file', 'Select a saved file')] + [file['file_name'] for file in user_files] + selected_file = st.selectbox("", options=file_options, key=generate_unique_key('semantic', 'file_selector')) + + if st.button("Analyze Document"): + if selected_file and selected_file != get_translation(t, 'select_saved_file', 'Select a saved file'): + file_contents = retrieve_file_contents(st.session_state.username, selected_file, 'semantic') + if file_contents: + with st.spinner("Analyzing..."): + try: + nlp_model = nlp_models[lang_code] + concept_graph, entity_graph, key_concepts = process_semantic_analysis(file_contents, nlp_model, lang_code) + st.session_state.concept_graph = concept_graph + st.session_state.entity_graph = entity_graph + st.session_state.key_concepts = key_concepts + st.session_state.current_file_contents = file_contents + st.success("Analysis completed successfully") + + # Crear o actualizar el elemento flotante con el grafo + graph_content = f""" +

Key Concepts:

+

{', '.join([f"{concept}: {freq:.2f}" for concept, freq in key_concepts])}

+ Concept Graph + """ + if 'graph_id' not in st.session_state: + st.session_state.graph_id = float_graph(graph_content, width="540px", height="540px", position="center-right") + else: + update_float_content(st.session_state.graph_id, graph_content) + + toggle_float_visibility(st.session_state.graph_id, True) + st.session_state.graph_visible = True + + # Depuración: Mostrar el grafo directamente en la interfaz + #st.image(f"data:image/png;base64,{concept_graph}", caption="Concept Graph (Debug View)", use_column_width=True) + except Exception as e: + logger.error(f"Error during analysis: {str(e)}") + st.error(f"Error during analysis: {str(e)}") + st.session_state.concept_graph = None + st.session_state.entity_graph = None + st.session_state.key_concepts = [] + else: + st.error("Error loading file contents") + else: + st.error("Please select a file to analyze") + + st.subheader("Chat with AI") + + # Mostrar el historial del chat + for message in st.session_state.semantic_chat_history: + message_class = "user-message" if message["role"] == "user" else "assistant-message" + st.markdown(f'
{message["content"]}
', unsafe_allow_html=True) + + # Colocar la entrada de usuario y los botones en la parte inferior + st.markdown('
', unsafe_allow_html=True) + user_input = st.text_input("Type your message here...", key=generate_unique_key('semantic', 'chat_input')) + col1, col2, col3 = st.columns([3, 1, 1]) + with col1: + send_button = st.button("Send", key=generate_unique_key('semantic', 'send_message')) + with col2: + clear_button = st.button("Clear Chat", key=generate_unique_key('semantic', 'clear_chat')) + with col3: + if 'graph_id' in st.session_state: + toggle_button = st.button("Toggle Graph", key="toggle_graph") + if toggle_button: + st.session_state.graph_visible = not st.session_state.get('graph_visible', True) + toggle_float_visibility(st.session_state.graph_id, st.session_state.graph_visible) + st.markdown('
', unsafe_allow_html=True) + + if send_button and user_input: + st.session_state.semantic_chat_history.append({"role": "user", "content": user_input}) + if user_input.startswith('/analyze_current'): + response = process_semantic_chat_input(user_input, lang_code, nlp_models[lang_code], st.session_state.get('current_file_contents', '')) + else: + response = st.session_state.semantic_chatbot.generate_response(user_input, lang_code, context=st.session_state.get('current_file_contents', '')) + st.session_state.semantic_chat_history.append({"role": "assistant", "content": response}) + st.rerun() + + if clear_button: + st.session_state.semantic_chat_history = [] + st.rerun() + + # Asegurarse de que el grafo flotante permanezca visible después de las interacciones + if 'graph_id' in st.session_state and st.session_state.get('graph_visible', False): + toggle_float_visibility(st.session_state.graph_id, True) \ No newline at end of file diff --git a/modules/semantic/semantic_interface_6_Ok-23-9-24.py b/modules/semantic/semantic_interface_6_Ok-23-9-24.py new file mode 100644 index 0000000000000000000000000000000000000000..bcefdcf92a7c7f4f828d3ca74a88cd0132c3c27a --- /dev/null +++ b/modules/semantic/semantic_interface_6_Ok-23-9-24.py @@ -0,0 +1,223 @@ +import streamlit as st +import logging +from .semantic_process import process_semantic_analysis +from ..chatbot.chatbot import initialize_chatbot, process_semantic_chat_input +from ..database.database_oldFromV2 import store_file_semantic_contents, retrieve_file_contents, delete_file, get_user_files +from ..utils.widget_utils import generate_unique_key +from .semantic_float_reset import semantic_float_init, float_graph, toggle_float_visibility, update_float_content + +logger = logging.getLogger(__name__) +semantic_float_init() + +def get_translation(t, key, default): + return t.get(key, default) + +def display_semantic_interface(lang_code, nlp_models, t): + # Inicializar el chatbot y el historial del chat al principio de la función + if 'semantic_chatbot' not in st.session_state: + st.session_state.semantic_chatbot = initialize_chatbot('semantic') + + if 'semantic_chat_history' not in st.session_state: + st.session_state.semantic_chat_history = [] + + st.markdown(""" + + """, unsafe_allow_html=True) + + # Mostrar el mensaje inicial como un párrafo estilizado + st.markdown(f""" +
+ {t['semantic_initial_message']} +
+ """, unsafe_allow_html=True) + + tab1, tab2 = st.tabs(["Upload", "Analyze"]) + + with tab1: + st.subheader("File Management") + uploaded_file = st.file_uploader("Choose a file to upload", type=['txt', 'pdf', 'docx', 'doc', 'odt'], key=generate_unique_key('semantic', 'file_uploader')) + if uploaded_file is not None: + file_contents = uploaded_file.getvalue().decode('utf-8') + if store_file_semantic_contents(st.session_state.username, uploaded_file.name, file_contents): + st.success(f"File {uploaded_file.name} uploaded and saved successfully") + else: + st.error("Error uploading file") + + st.markdown("---") # Línea separadora + + st.subheader("Manage Uploaded Files") + user_files = get_user_files(st.session_state.username, 'semantic') + if user_files: + for file in user_files: + col1, col2 = st.columns([3, 1]) + with col1: + st.write(file['file_name']) + with col2: + if st.button("Delete", key=f"delete_{file['file_name']}", help=f"Delete {file['file_name']}"): + if delete_file(st.session_state.username, file['file_name'], 'semantic'): + st.success(f"File {file['file_name']} deleted successfully") + st.rerun() + else: + st.error(f"Error deleting file {file['file_name']}") + else: + st.info("No files uploaded yet.") + + with tab2: + st.subheader("Select File for Analysis") + user_files = get_user_files(st.session_state.username, 'semantic') + file_options = [get_translation(t, 'select_saved_file', 'Select a saved file')] + [file['file_name'] for file in user_files] + selected_file = st.selectbox("", options=file_options, key=generate_unique_key('semantic', 'file_selector')) + + if st.button("Analyze Document"): + if selected_file and selected_file != get_translation(t, 'select_saved_file', 'Select a saved file'): + file_contents = retrieve_file_contents(st.session_state.username, selected_file, 'semantic') + if file_contents: + with st.spinner("Analyzing..."): + try: + nlp_model = nlp_models[lang_code] + concept_graph, entity_graph, key_concepts = process_semantic_analysis(file_contents, nlp_model, lang_code) + st.session_state.concept_graph = concept_graph + st.session_state.entity_graph = entity_graph + st.session_state.key_concepts = key_concepts + st.success("Analysis completed successfully") + + # Crear el grafo flotante + if 'graph_id' not in st.session_state: + st.session_state.graph_id = float_graph( + content="
Loading graph...
", + width="40%", + height="60%", + position="bottom-right", + shadow=2, + transition=1 + ) + + # Actualizar el contenido del grafo flotante + update_float_content(st.session_state.graph_id, f""" +

Key Concepts:

+

{', '.join([f"{concept}: {freq:.2f}" for concept, freq in key_concepts])}

+ Concept Graph + """) + + except Exception as e: + logger.error(f"Error during analysis: {str(e)}") + st.error(f"Error during analysis: {str(e)}") + st.session_state.concept_graph = None + st.session_state.entity_graph = None + st.session_state.key_concepts = [] + else: + st.error("Error loading file contents") + else: + st.error("Please select a file to analyze") + + # Chat and Visualization + with st.container(): + col_chat, col_graph = st.columns([1, 1]) + + with col_chat: + with st.expander("Chat with AI", expanded=True): + chat_container = st.container() + + with chat_container: + for message in st.session_state.semantic_chat_history: + with st.chat_message(message["role"]): + st.markdown(message["content"]) + + user_input = st.text_input("Type your message here...", key=generate_unique_key('semantic', 'chat_input')) + col1, col2 = st.columns([3, 1]) + with col1: + send_button = st.button("Send", key=generate_unique_key('semantic', 'send_message')) + with col2: + clear_button = st.button("Clear Chat", key=generate_unique_key('semantic', 'clear_chat')) + + if send_button and user_input: + st.session_state.semantic_chat_history.append({"role": "user", "content": user_input}) + + if user_input.startswith('/analyze_current'): + response = process_semantic_chat_input(user_input, lang_code, nlp_models[lang_code], st.session_state.get('file_contents', '')) + else: + response = st.session_state.semantic_chatbot.generate_response(user_input, lang_code, context=st.session_state.get('file_contents', '')) + + st.session_state.semantic_chat_history.append({"role": "assistant", "content": response}) + st.rerun() + + if clear_button: + st.session_state.semantic_chat_history = [] + st.rerun() + + with col_graph: + st.subheader("Visualization") + if 'key_concepts' in st.session_state: + st.write("Key Concepts:") + st.write(', '.join([f"{concept}: {freq:.2f}" for concept, freq in st.session_state.key_concepts])) + + tab_concept, tab_entity = st.tabs(["Concept Graph", "Entity Graph"]) + + with tab_concept: + if 'concept_graph' in st.session_state and st.session_state.concept_graph: + st.image(st.session_state.concept_graph) + else: + st.info("No concept graph available. Please analyze a document first.") + + with tab_entity: + if 'entity_graph' in st.session_state and st.session_state.entity_graph: + st.image(st.session_state.entity_graph) + else: + st.info("No entity graph available. Please analyze a document first.") + + # Botón para cerrar el grafo flotante + if st.button("Close Graph", key="close_graph"): + if 'graph_id' in st.session_state: + toggle_float_visibility(st.session_state.graph_id, False) + del st.session_state.graph_id \ No newline at end of file diff --git a/modules/semantic/semantic_interface_6_StarPoint.py b/modules/semantic/semantic_interface_6_StarPoint.py new file mode 100644 index 0000000000000000000000000000000000000000..b251f023c01e4c0d042605a5a97477c2269670e9 --- /dev/null +++ b/modules/semantic/semantic_interface_6_StarPoint.py @@ -0,0 +1,196 @@ +import streamlit as st +import logging +from .semantic_process import process_semantic_analysis +from ..chatbot.chatbot import initialize_chatbot, process_semantic_chat_input +from ..database.database_oldFromV2 import store_file_semantic_contents, retrieve_file_contents, delete_file, get_user_files +from ..utils.widget_utils import generate_unique_key + +logger = logging.getLogger(__name__) + +def get_translation(t, key, default): + return t.get(key, default) + +def display_semantic_interface(lang_code, nlp_models, t): + # Inicializar el chatbot y el historial del chat al principio de la función + if 'semantic_chatbot' not in st.session_state: + st.session_state.semantic_chatbot = initialize_chatbot('semantic') + + if 'semantic_chat_history' not in st.session_state: + st.session_state.semantic_chat_history = [] + + st.markdown(""" + + """, unsafe_allow_html=True) + + # Mostrar el mensaje inicial como un párrafo estilizado + st.markdown(f""" +
+ {t['semantic_initial_message']} +
+ """, unsafe_allow_html=True) + + tab1, tab2 = st.tabs(["Upload", "Analyze"]) + + with tab1: + st.subheader("File Management") + uploaded_file = st.file_uploader("Choose a file to upload", type=['txt', 'pdf', 'docx', 'doc', 'odt'], key=generate_unique_key('semantic', 'file_uploader')) + if uploaded_file is not None: + file_contents = uploaded_file.getvalue().decode('utf-8') + if store_file_semantic_contents(st.session_state.username, uploaded_file.name, file_contents): + st.success(f"File {uploaded_file.name} uploaded and saved successfully") + else: + st.error("Error uploading file") + + st.markdown("---") # Línea separadora + + st.subheader("Manage Uploaded Files") + user_files = get_user_files(st.session_state.username, 'semantic') + if user_files: + for file in user_files: + col1, col2 = st.columns([3, 1]) + with col1: + st.write(file['file_name']) + with col2: + if st.button("Delete", key=f"delete_{file['file_name']}", help=f"Delete {file['file_name']}"): + if delete_file(st.session_state.username, file['file_name'], 'semantic'): + st.success(f"File {file['file_name']} deleted successfully") + st.rerun() + else: + st.error(f"Error deleting file {file['file_name']}") + else: + st.info("No files uploaded yet.") + + with tab2: + st.subheader("Select File for Analysis") + user_files = get_user_files(st.session_state.username, 'semantic') + file_options = [get_translation(t, 'select_saved_file', 'Select a saved file')] + [file['file_name'] for file in user_files] + selected_file = st.selectbox("", options=file_options, key=generate_unique_key('semantic', 'file_selector')) + + if st.button("Analyze Document", key=generate_unique_key('semantic', 'analyze_document')): + if selected_file and selected_file != get_translation(t, 'select_saved_file', 'Select a saved file'): + file_contents = retrieve_file_contents(st.session_state.username, selected_file, 'semantic') + if file_contents: + st.session_state.file_contents = file_contents + with st.spinner("Analyzing..."): + try: + nlp_model = nlp_models[lang_code] + concept_graph, entity_graph, key_concepts = process_semantic_analysis(file_contents, nlp_model, lang_code) + st.session_state.concept_graph = concept_graph + st.session_state.entity_graph = entity_graph + st.session_state.key_concepts = key_concepts + st.success("Analysis completed successfully") + except Exception as e: + logger.error(f"Error during analysis: {str(e)}") + st.error(f"Error during analysis: {str(e)}") + else: + st.error("Error loading file contents") + else: + st.error("Please select a file to analyze") + + # Chat and Visualization + with st.container(): + col_chat, col_graph = st.columns([1, 1]) + + with col_chat: + with st.expander("Chat with AI", expanded=True): + chat_container = st.container() + + with chat_container: + for message in st.session_state.semantic_chat_history: + with st.chat_message(message["role"]): + st.markdown(message["content"]) + + user_input = st.text_input("Type your message here...", key=generate_unique_key('semantic', 'chat_input')) + + col1, col2 = st.columns([3, 1]) + + with col1: + send_button = st.button("Send", key=generate_unique_key('semantic', 'send_message')) + with col2: + clear_button = st.button("Clear Chat", key=generate_unique_key('semantic', 'clear_chat')) + + if send_button and user_input: + st.session_state.semantic_chat_history.append({"role": "user", "content": user_input}) + + if user_input.startswith('/analyze_current'): + response = process_semantic_chat_input(user_input, lang_code, nlp_models[lang_code], st.session_state.get('file_contents', '')) + else: + response = st.session_state.semantic_chatbot.generate_response(user_input, lang_code, context=st.session_state.get('file_contents', '')) + + st.session_state.semantic_chat_history.append({"role": "assistant", "content": response}) + st.rerun() + + if clear_button: + st.session_state.semantic_chat_history = [] + st.rerun() + + with col_graph: + st.subheader("Visualization") + if 'key_concepts' in st.session_state: + st.write("Key Concepts:") + st.write(', '.join([f"{concept}: {freq:.2f}" for concept, freq in st.session_state.key_concepts])) + + tab_concept, tab_entity = st.tabs(["Concept Graph", "Entity Graph"]) + + with tab_concept: + if 'concept_graph' in st.session_state: + st.pyplot(st.session_state.concept_graph) + else: + st.info("No concept graph available. Please analyze a document first.") + + with tab_entity: + if 'entity_graph' in st.session_state: + st.pyplot(st.session_state.entity_graph) + else: + st.info("No entity graph available. Please analyze a document first.") \ No newline at end of file diff --git a/modules/semantic/semantic_interface_7.py b/modules/semantic/semantic_interface_7.py new file mode 100644 index 0000000000000000000000000000000000000000..650182ceb40ce32885615517efeb32786009d996 --- /dev/null +++ b/modules/semantic/semantic_interface_7.py @@ -0,0 +1,201 @@ +import streamlit as st +import logging +from .semantic_process import process_semantic_analysis +from ..chatbot.chatbot import initialize_chatbot, process_semantic_chat_input +from ..database.database_oldFromV2 import store_file_semantic_contents, retrieve_file_contents, delete_file, get_user_files +from ..utils.widget_utils import generate_unique_key + +logger = logging.getLogger(__name__) + +def get_translation(t, key, default): + return t.get(key, default) + +def display_semantic_interface(lang_code, nlp_models, t): + # Inicializar el chatbot y el historial del chat al principio de la función + if 'semantic_chatbot' not in st.session_state: + st.session_state.semantic_chatbot = initialize_chatbot('semantic') + + if 'semantic_chat_history' not in st.session_state: + st.session_state.semantic_chat_history = [] + + st.markdown(""" + + """, unsafe_allow_html=True) + + st.markdown(""" + + """, unsafe_allow_html=True) + + # Mostrar el mensaje inicial como un párrafo estilizado + st.markdown(f""" +
+ {t['semantic_initial_message']} +
+ """, unsafe_allow_html=True) + + tab1, tab2 = st.tabs(["Upload", "Analyze"]) + + with tab1: + st.subheader("File Management") + uploaded_file = st.file_uploader("Choose a file to upload", type=['txt', 'pdf', 'docx', 'doc', 'odt'], key=generate_unique_key('semantic', 'file_uploader')) + if uploaded_file is not None: + file_contents = uploaded_file.getvalue().decode('utf-8') + if store_file_semantic_contents(st.session_state.username, uploaded_file.name, file_contents): + st.success(f"File {uploaded_file.name} uploaded and saved successfully") + else: + st.error("Error uploading file") + + st.markdown("---") # Línea separadora + + st.subheader("Manage Uploaded Files") + user_files = get_user_files(st.session_state.username, 'semantic') + if user_files: + for file in user_files: + col1, col2 = st.columns([3, 1]) + with col1: + st.write(file['file_name']) + with col2: + if st.button("Delete", key=f"delete_{file['file_name']}", help=f"Delete {file['file_name']}"): + if delete_file(st.session_state.username, file['file_name'], 'semantic'): + st.success(f"File {file['file_name']} deleted successfully") + st.rerun() + else: + st.error(f"Error deleting file {file['file_name']}") + else: + st.info("No files uploaded yet.") + + with tab2: + st.subheader("Select File for Analysis") + user_files = get_user_files(st.session_state.username, 'semantic') + file_options = [get_translation(t, 'select_saved_file', 'Select a saved file')] + [file['file_name'] for file in user_files] + selected_file = st.selectbox("", options=file_options, key=generate_unique_key('semantic', 'file_selector')) + + if st.button("Analyze Document", key=generate_unique_key('semantic', 'analyze_document')): + if selected_file and selected_file != get_translation(t, 'select_saved_file', 'Select a saved file'): + file_contents = retrieve_file_contents(st.session_state.username, selected_file, 'semantic') + if file_contents: + st.session_state.file_contents = file_contents + with st.spinner("Analyzing..."): + try: + nlp_model = nlp_models[lang_code] + concept_graph, entity_graph, key_concepts = process_semantic_analysis(file_contents, nlp_model, lang_code) + st.session_state.concept_graph = concept_graph + st.session_state.entity_graph = entity_graph + st.session_state.key_concepts = key_concepts + st.success("Analysis completed successfully") + except Exception as e: + logger.error(f"Error during analysis: {str(e)}") + st.error(f"Error during analysis: {str(e)}") + else: + st.error("Error loading file contents") + else: + st.error("Please select a file to analyze") + + # Chat and Visualization + with st.container(): + col_chat, col_graph = st.columns([1, 1]) + + with col_chat: + st.subheader("Chat with AI") + + chat_container = st.container() + with chat_container: + for message in st.session_state.semantic_chat_history: + with st.chat_message(message["role"]): + st.markdown(message["content"]) + + user_input = st.text_input("Type your message here...", key=generate_unique_key('semantic', 'chat_input')) + col1, col2 = st.columns([3, 1]) + with col1: + send_button = st.button("Send", key=generate_unique_key('semantic', 'send_message')) + with col2: + clear_button = st.button("Clear Chat", key=generate_unique_key('semantic', 'clear_chat')) + + if send_button and user_input: + st.session_state.semantic_chat_history.append({"role": "user", "content": user_input}) + + if user_input.startswith('/analyze_current'): + response = process_semantic_chat_input(user_input, lang_code, nlp_models[lang_code], st.session_state.get('file_contents', '')) + else: + response = st.session_state.semantic_chatbot.generate_response(user_input, lang_code, context=st.session_state.get('file_contents', '')) + + st.session_state.semantic_chat_history.append({"role": "assistant", "content": response}) + st.rerun() + + if clear_button: + st.session_state.semantic_chat_history = [] + st.rerun() + + with col_graph: + st.subheader("Visualization") + if 'key_concepts' in st.session_state: + st.write("Key Concepts:") + st.write(', '.join([f"{concept}: {freq:.2f}" for concept, freq in st.session_state.key_concepts])) + + tab_concept, tab_entity = st.tabs(["Concept Graph", "Entity Graph"]) + + with tab_concept: + if 'concept_graph' in st.session_state: + st.pyplot(st.session_state.concept_graph) + else: + st.info("No concept graph available. Please analyze a document first.") + + with tab_entity: + if 'entity_graph' in st.session_state: + st.pyplot(st.session_state.entity_graph) + else: + st.info("No entity graph available. Please analyze a document first.") \ No newline at end of file diff --git "a/modules/semantic/semantic_interface_Despu\303\251s.py" "b/modules/semantic/semantic_interface_Despu\303\251s.py" new file mode 100644 index 0000000000000000000000000000000000000000..2d289197b51402d9a108eacfd06668a2394fbdfe --- /dev/null +++ "b/modules/semantic/semantic_interface_Despu\303\251s.py" @@ -0,0 +1,116 @@ +import streamlit as st +import logging +from io import BytesIO +import base64 +from .semantic_float_reset import semantic_float_init, float_graph, toggle_float_visibility, update_float_content +from .semantic_process import process_semantic_analysis +from ..chatbot.chatbot import initialize_chatbot, process_semantic_chat_input +from ..database.database_oldFromV2 import ( + initialize_mongodb_connection, + initialize_database_connections, + create_admin_user, + create_student_user, + get_user, + get_student_data, + store_file_contents, + retrieve_file_contents, + get_user_files, + delete_file, + store_application_request, + store_user_feedback, + store_morphosyntax_result, + store_semantic_result, + store_discourse_analysis_result, + store_chat_history, + export_analysis_and_chat, + get_user_analysis_summary, + get_user_recents_chats, + get_user_analysis_details + ) + +from ..utils.widget_utils import generate_unique_key +from .flexible_analysis_handler import FlexibleAnalysisHandler + +semantic_float_init() +logging.basicConfig(level=logging.DEBUG) +logger = logging.getLogger(__name__) + +def get_translation(t, key, default): + return t.get(key, default) + +def fig_to_base64(fig): + buf = BytesIO() + fig.savefig(buf, format='png') + buf.seek(0) + img_str = base64.b64encode(buf.getvalue()).decode() + return f'' + +def display_semantic_interface(lang_code, nlp_models, t): + st.set_page_config(layout="wide") + + if 'semantic_chatbot' not in st.session_state: + st.session_state.semantic_chatbot = initialize_chatbot('semantic') + if 'semantic_chat_history' not in st.session_state: + st.session_state.semantic_chat_history = [] + if 'show_graph' not in st.session_state: + st.session_state.show_graph = False + if 'graph_id' not in st.session_state: + st.session_state.graph_id = None + + st.header(t['title']) + + # Opción para introducir texto + text_input = st.text_area( + t['text_input_label'], + height=150, + placeholder=t['text_input_placeholder'], + ) + + # Opción para cargar archivo + uploaded_file = st.file_uploader(t['file_uploader'], type=['txt']) + + if st.button(t['analyze_button']): + if text_input or uploaded_file is not None: + if uploaded_file: + text_content = uploaded_file.getvalue().decode('utf-8') + else: + text_content = text_input + + # Realizar el análisis + analysis_result = process_semantic_analysis(text_content, nlp_models[lang_code], lang_code) + + # Guardar el resultado en el estado de la sesión + st.session_state.semantic_result = analysis_result + + # Mostrar resultados + display_semantic_results(st.session_state.semantic_result, lang_code, t) + + # Guardar el resultado del análisis + if store_semantic_result(st.session_state.username, text_content, analysis_result): + st.success(t['success_message']) + else: + st.error(t['error_message']) + else: + st.warning(t['warning_message']) + + elif 'semantic_result' in st.session_state: + + # Si hay un resultado guardado, mostrarlo + display_semantic_results(st.session_state.semantic_result, lang_code, t) + + else: + st.info(t['initial_message']) # Asegúrate de que 'initial_message' esté en tus traducciones + +def display_semantic_results(result, lang_code, t): + if result is None: + st.warning(t['no_results']) # Asegúrate de que 'no_results' esté en tus traducciones + return + + # Mostrar conceptos clave + with st.expander(t['key_concepts'], expanded=True): + concept_text = " | ".join([f"{concept} ({frequency:.2f})" for concept, frequency in result['key_concepts']]) + st.write(concept_text) + + # Mostrar el gráfico de relaciones conceptuales + with st.expander(t['conceptual_relations'], expanded=True): + st.pyplot(result['relations_graph']) diff --git a/modules/semantic/semantic_interface_StreamLitChat.py b/modules/semantic/semantic_interface_StreamLitChat.py new file mode 100644 index 0000000000000000000000000000000000000000..e0eb527289912cd0295833c4e93cd2e91bd3b6d2 --- /dev/null +++ b/modules/semantic/semantic_interface_StreamLitChat.py @@ -0,0 +1,157 @@ +import streamlit as st +import logging +from streamlit_chat import message +from .semantic_process import process_semantic_analysis +from ..chatbot.chatbot import initialize_chatbot, process_semantic_chat_input +from ..database.database_oldFromV2 import store_file_semantic_contents, retrieve_file_contents, delete_file, get_user_files +from ..utils.widget_utils import generate_unique_key + +logger = logging.getLogger(__name__) + +def get_translation(t, key, default): + return t.get(key, default) + +def display_semantic_interface(lang_code, nlp_models, t): + # Inicializar el chatbot y el historial del chat al principio de la función + if 'semantic_chatbot' not in st.session_state: + st.session_state.semantic_chatbot = initialize_chatbot('semantic') + + if 'messages' not in st.session_state: + st.session_state.messages = [] + + st.markdown(""" + + """, unsafe_allow_html=True) + + # Mostrar el mensaje inicial como un párrafo estilizado + st.markdown(f""" +
+ {t['semantic_initial_message']} +
+ """, unsafe_allow_html=True) + + + st.title("Semantic Analysis") + + # Crear dos columnas principales: una para el chat y otra para la visualización + chat_col, viz_col = st.columns([1, 1]) + + with chat_col: + st.subheader("Chat with AI") + + # Contenedor para los mensajes del chat + chat_container = st.container() + + # Input para el chat + user_input = st.text_input("Type your message here...", key=generate_unique_key('semantic', 'chat_input')) + + if user_input: + # Añadir mensaje del usuario + st.session_state.messages.append({"role": "user", "content": user_input}) + + # Generar respuesta del asistente + if user_input.startswith('/analyze_current'): + response = process_semantic_chat_input(user_input, lang_code, nlp_models[lang_code], st.session_state.get('file_contents', '')) + else: + response = st.session_state.semantic_chatbot.generate_response(user_input, lang_code, context=st.session_state.get('file_contents', '')) + + # Añadir respuesta del asistente + st.session_state.messages.append({"role": "assistant", "content": response}) + + # Mostrar mensajes en el contenedor del chat + with chat_container: + for i, msg in enumerate(st.session_state.messages): + message(msg['content'], is_user=msg['role'] == 'user', key=f"{i}_{msg['role']}") + + # Botón para limpiar el chat + if st.button("Clear Chat", key=generate_unique_key('semantic', 'clear_chat')): + st.session_state.messages = [] + st.rerun() + + with viz_col: + st.subheader("Visualization") + + # Selector de archivo y botón de análisis + user_files = get_user_files(st.session_state.username, 'semantic') + file_options = [get_translation(t, 'select_saved_file', 'Select a saved file')] + [file['file_name'] for file in user_files] + selected_file = st.selectbox("Select a file to analyze", options=file_options, key=generate_unique_key('semantic', 'file_selector')) + + if st.button("Analyze Document", key=generate_unique_key('semantic', 'analyze_document')): + if selected_file and selected_file != get_translation(t, 'select_saved_file', 'Select a saved file'): + file_contents = retrieve_file_contents(st.session_state.username, selected_file, 'semantic') + if file_contents: + st.session_state.file_contents = file_contents + with st.spinner("Analyzing..."): + try: + nlp_model = nlp_models[lang_code] + concept_graph, entity_graph, key_concepts = process_semantic_analysis(file_contents, nlp_model, lang_code) + st.session_state.concept_graph = concept_graph + st.session_state.entity_graph = entity_graph + st.session_state.key_concepts = key_concepts + st.success("Analysis completed successfully") + except Exception as e: + logger.error(f"Error during analysis: {str(e)}") + st.error(f"Error during analysis: {str(e)}") + else: + st.error("Error loading file contents") + else: + st.error("Please select a file to analyze") + + # Visualización de conceptos clave + if 'key_concepts' in st.session_state: + st.write("Key Concepts:") + st.write(', '.join([f"{concept}: {freq:.2f}" for concept, freq in st.session_state.key_concepts])) + + # Pestañas para los gráficos + tab_concept, tab_entity = st.tabs(["Concept Graph", "Entity Graph"]) + + with tab_concept: + if 'concept_graph' in st.session_state: + st.pyplot(st.session_state.concept_graph) + else: + st.info("No concept graph available. Please analyze a document first.") + + with tab_entity: + if 'entity_graph' in st.session_state: + st.pyplot(st.session_state.entity_graph) + else: + st.info("No entity graph available. Please analyze a document first.") + + # Sección de carga de archivos + st.subheader("File Management") + uploaded_file = st.file_uploader("Choose a file to upload", type=['txt', 'pdf', 'docx', 'doc', 'odt'], key=generate_unique_key('semantic', 'file_uploader')) + if uploaded_file is not None: + file_contents = uploaded_file.getvalue().decode('utf-8') + if store_file_semantic_contents(st.session_state.username, uploaded_file.name, file_contents): + st.success(f"File {uploaded_file.name} uploaded and saved successfully") + else: + st.error("Error uploading file") + + st.markdown("---") + + # Gestión de archivos cargados + st.subheader("Manage Uploaded Files") + user_files = get_user_files(st.session_state.username, 'semantic') + if user_files: + for file in user_files: + col1, col2 = st.columns([3, 1]) + with col1: + st.write(file['file_name']) + with col2: + if st.button("Delete", key=f"delete_{file['file_name']}", help=f"Delete {file['file_name']}"): + if delete_file(st.session_state.username, file['file_name'], 'semantic'): + st.success(f"File {file['file_name']} deleted successfully") + st.rerun() + else: + st.error(f"Error deleting file {file['file_name']}") + else: + st.info("No files uploaded yet.") \ No newline at end of file diff --git a/modules/semantic/semantic_interface_Test.py b/modules/semantic/semantic_interface_Test.py new file mode 100644 index 0000000000000000000000000000000000000000..435d574d8c6ff1b985807249e9a02061e0bd4a54 --- /dev/null +++ b/modules/semantic/semantic_interface_Test.py @@ -0,0 +1,22 @@ +import streamlit as st +from streamlit_float import * + +# Limpiar el caché al inicio +st.cache_data.clear() +st.cache_resource.clear() + + +# initialize float feature/capability +float_init() + +col1, col2 = st.columns(2) + +# Fix/float the whole column +col1.write("This entire column is fixed/floating") +col1.float() + +with col2: + container = st.container() + # Fix/float a single container inside + container.write("This text is in a container that is fixed") + container.float() \ No newline at end of file diff --git a/modules/semantic/semantic_interface_afterParty.py b/modules/semantic/semantic_interface_afterParty.py new file mode 100644 index 0000000000000000000000000000000000000000..2d289197b51402d9a108eacfd06668a2394fbdfe --- /dev/null +++ b/modules/semantic/semantic_interface_afterParty.py @@ -0,0 +1,116 @@ +import streamlit as st +import logging +from io import BytesIO +import base64 +from .semantic_float_reset import semantic_float_init, float_graph, toggle_float_visibility, update_float_content +from .semantic_process import process_semantic_analysis +from ..chatbot.chatbot import initialize_chatbot, process_semantic_chat_input +from ..database.database_oldFromV2 import ( + initialize_mongodb_connection, + initialize_database_connections, + create_admin_user, + create_student_user, + get_user, + get_student_data, + store_file_contents, + retrieve_file_contents, + get_user_files, + delete_file, + store_application_request, + store_user_feedback, + store_morphosyntax_result, + store_semantic_result, + store_discourse_analysis_result, + store_chat_history, + export_analysis_and_chat, + get_user_analysis_summary, + get_user_recents_chats, + get_user_analysis_details + ) + +from ..utils.widget_utils import generate_unique_key +from .flexible_analysis_handler import FlexibleAnalysisHandler + +semantic_float_init() +logging.basicConfig(level=logging.DEBUG) +logger = logging.getLogger(__name__) + +def get_translation(t, key, default): + return t.get(key, default) + +def fig_to_base64(fig): + buf = BytesIO() + fig.savefig(buf, format='png') + buf.seek(0) + img_str = base64.b64encode(buf.getvalue()).decode() + return f'' + +def display_semantic_interface(lang_code, nlp_models, t): + st.set_page_config(layout="wide") + + if 'semantic_chatbot' not in st.session_state: + st.session_state.semantic_chatbot = initialize_chatbot('semantic') + if 'semantic_chat_history' not in st.session_state: + st.session_state.semantic_chat_history = [] + if 'show_graph' not in st.session_state: + st.session_state.show_graph = False + if 'graph_id' not in st.session_state: + st.session_state.graph_id = None + + st.header(t['title']) + + # Opción para introducir texto + text_input = st.text_area( + t['text_input_label'], + height=150, + placeholder=t['text_input_placeholder'], + ) + + # Opción para cargar archivo + uploaded_file = st.file_uploader(t['file_uploader'], type=['txt']) + + if st.button(t['analyze_button']): + if text_input or uploaded_file is not None: + if uploaded_file: + text_content = uploaded_file.getvalue().decode('utf-8') + else: + text_content = text_input + + # Realizar el análisis + analysis_result = process_semantic_analysis(text_content, nlp_models[lang_code], lang_code) + + # Guardar el resultado en el estado de la sesión + st.session_state.semantic_result = analysis_result + + # Mostrar resultados + display_semantic_results(st.session_state.semantic_result, lang_code, t) + + # Guardar el resultado del análisis + if store_semantic_result(st.session_state.username, text_content, analysis_result): + st.success(t['success_message']) + else: + st.error(t['error_message']) + else: + st.warning(t['warning_message']) + + elif 'semantic_result' in st.session_state: + + # Si hay un resultado guardado, mostrarlo + display_semantic_results(st.session_state.semantic_result, lang_code, t) + + else: + st.info(t['initial_message']) # Asegúrate de que 'initial_message' esté en tus traducciones + +def display_semantic_results(result, lang_code, t): + if result is None: + st.warning(t['no_results']) # Asegúrate de que 'no_results' esté en tus traducciones + return + + # Mostrar conceptos clave + with st.expander(t['key_concepts'], expanded=True): + concept_text = " | ".join([f"{concept} ({frequency:.2f})" for concept, frequency in result['key_concepts']]) + st.write(concept_text) + + # Mostrar el gráfico de relaciones conceptuales + with st.expander(t['conceptual_relations'], expanded=True): + st.pyplot(result['relations_graph']) diff --git a/modules/semantic/semantic_interface_backup2092024_1930 copy.py b/modules/semantic/semantic_interface_backup2092024_1930 copy.py new file mode 100644 index 0000000000000000000000000000000000000000..fab61a80830dc404e0c3d7694f93803f900061b5 --- /dev/null +++ b/modules/semantic/semantic_interface_backup2092024_1930 copy.py @@ -0,0 +1,188 @@ +import streamlit as st +import logging +from .semantic_process import process_semantic_analysis +from ..chatbot.chatbot import initialize_chatbot, process_semantic_chat_input +from ..database.database_oldFromV2 import store_file_semantic_contents, retrieve_file_contents, delete_file, get_user_files +from ..utils.widget_utils import generate_unique_key + +logger = logging.getLogger(__name__) + +def get_translation(t, key, default): + return t.get(key, default) + +def display_semantic_interface(lang_code, nlp_models, t): + # Inicializar el chatbot al principio de la función + if 'semantic_chatbot' not in st.session_state: + st.session_state.semantic_chatbot = initialize_chatbot('semantic') + + st.markdown(""" + + """, unsafe_allow_html=True) + + # Mostrar el mensaje inicial como un párrafo estilizado + st.markdown(f""" +
+ {t['semantic_initial_message']} +
+ """, unsafe_allow_html=True) + + tab1, tab2 = st.tabs(["Upload", "Analyze"]) + + with tab1: + st.subheader("File Management") + uploaded_file = st.file_uploader("Choose a file to upload", type=['txt', 'pdf', 'docx', 'doc', 'odt'], key=generate_unique_key('semantic', 'file_uploader')) + if uploaded_file is not None: + file_contents = uploaded_file.getvalue().decode('utf-8') + if store_file_semantic_contents(st.session_state.username, uploaded_file.name, file_contents): + st.success(f"File {uploaded_file.name} uploaded and saved successfully") + else: + st.error("Error uploading file") + + st.markdown("---") # Línea separadora + + st.subheader("Manage Uploaded Files") + user_files = get_user_files(st.session_state.username, 'semantic') + if user_files: + for file in user_files: + col1, col2 = st.columns([3, 1]) + with col1: + st.write(file['file_name']) + with col2: + if st.button("Delete", key=f"delete_{file['file_name']}", help=f"Delete {file['file_name']}"): + if delete_file(st.session_state.username, file['file_name'], 'semantic'): + st.success(f"File {file['file_name']} deleted successfully") + st.rerun() + else: + st.error(f"Error deleting file {file['file_name']}") + else: + st.info("No files uploaded yet.") + + with tab2: + st.subheader("Select File for Analysis") + user_files = get_user_files(st.session_state.username, 'semantic') + file_options = [get_translation(t, 'select_saved_file', 'Select a saved file')] + [file['file_name'] for file in user_files] + selected_file = st.selectbox("", options=file_options, key=generate_unique_key('semantic', 'file_selector')) + + if st.button("Analyze Document", key=generate_unique_key('semantic', 'analyze_document')): + if selected_file and selected_file != get_translation(t, 'select_saved_file', 'Select a saved file'): + file_contents = retrieve_file_contents(st.session_state.username, selected_file, 'semantic') + if file_contents: + st.session_state.file_contents = file_contents + with st.spinner("Analyzing..."): + try: + nlp_model = nlp_models[lang_code] + concept_graph, entity_graph, key_concepts = process_semantic_analysis(file_contents, nlp_model, lang_code) + st.session_state.concept_graph = concept_graph + st.session_state.entity_graph = entity_graph + st.session_state.key_concepts = key_concepts + st.success("Analysis completed successfully") + except Exception as e: + logger.error(f"Error during analysis: {str(e)}") + st.error(f"Error during analysis: {str(e)}") + else: + st.error("Error loading file contents") + else: + st.error("Please select a file to analyze") + + # Chat and Visualization + col_chat, col_graph = st.columns([1, 1]) + + with col_chat: + st.subheader("Chat with AI") + st.markdown('
', unsafe_allow_html=True) + chat_history = st.session_state.get('semantic_chat_history', []) + for message in chat_history: + with st.chat_message(message["role"]): + st.write(message["content"]) + st.markdown('
', unsafe_allow_html=True) + + user_input = st.chat_input("Type your message here...", key=generate_unique_key('semantic', 'chat_input')) + + if user_input: + chat_history.append({"role": "user", "content": user_input}) + + if user_input.startswith('/analyze_current'): + response = process_semantic_chat_input(user_input, lang_code, nlp_models[lang_code], st.session_state.get('file_contents', '')) + else: + response = st.session_state.semantic_chatbot.generate_response(user_input, lang_code) + + chat_history.append({"role": "assistant", "content": response}) + st.session_state.semantic_chat_history = chat_history + + if st.button("Clear Chat", key=generate_unique_key('semantic', 'clear_chat')): + st.session_state.semantic_chat_history = [] + st.rerun() + + with col_graph: + st.subheader("Visualization") + st.markdown('
', unsafe_allow_html=True) + if 'key_concepts' in st.session_state: + st.write("Key Concepts:") + st.write(', '.join([f"{concept}: {freq:.2f}" for concept, freq in st.session_state.key_concepts])) + + tab_concept, tab_entity = st.tabs(["Concept Graph", "Entity Graph"]) + + with tab_concept: + if 'concept_graph' in st.session_state: + st.pyplot(st.session_state.concept_graph) + else: + st.info("No concept graph available. Please analyze a document first.") + + with tab_entity: + if 'entity_graph' in st.session_state: + st.pyplot(st.session_state.entity_graph) + else: + st.info("No entity graph available. Please analyze a document first.") + st.markdown('
', unsafe_allow_html=True) \ No newline at end of file diff --git a/modules/semantic/semantic_interface_backup2092024_1930.py b/modules/semantic/semantic_interface_backup2092024_1930.py new file mode 100644 index 0000000000000000000000000000000000000000..3d97ce833c0da8a58ea642ca760ba50503b998a9 --- /dev/null +++ b/modules/semantic/semantic_interface_backup2092024_1930.py @@ -0,0 +1,192 @@ +import streamlit as st +import logging +from .semantic_process import process_semantic_analysis +from ..chatbot.chatbot import initialize_chatbot, process_semantic_chat_input +from ..database.database_oldFromV2 import store_file_semantic_contents, retrieve_file_contents, delete_file, get_user_files +from ..utils.widget_utils import generate_unique_key +from .flexible_analysis_handler import FlexibleAnalysisHandler # Añade esta línea + +logger = logging.getLogger(__name__) + +def get_translation(t, key, default): + return t.get(key, default) + +def display_semantic_interface(lang_code, nlp_models, t): + # Inicializar el chatbot al principio de la función + if 'semantic_chatbot' not in st.session_state: + st.session_state.semantic_chatbot = initialize_chatbot('semantic') + + st.markdown(""" + + """, unsafe_allow_html=True) + + # Mostrar el mensaje inicial como un párrafo estilizado + st.markdown(f""" +
+ {t['semantic_initial_message']} +
+ """, unsafe_allow_html=True) + + tab1, tab2 = st.tabs(["Upload", "Analyze"]) + + with tab1: + st.subheader("File Management") + uploaded_file = st.file_uploader("Choose a file to upload", type=['txt', 'pdf', 'docx', 'doc', 'odt'], key=generate_unique_key('semantic', 'file_uploader')) + if uploaded_file is not None: + file_contents = uploaded_file.getvalue().decode('utf-8') + if store_file_semantic_contents(st.session_state.username, uploaded_file.name, file_contents): + st.success(f"File {uploaded_file.name} uploaded and saved successfully") + else: + st.error("Error uploading file") + + st.markdown("---") # Línea separadora + + st.subheader("Manage Uploaded Files") + user_files = get_user_files(st.session_state.username, 'semantic') + if user_files: + for file in user_files: + col1, col2 = st.columns([3, 1]) + with col1: + st.write(file['file_name']) + with col2: + if st.button("Delete", key=f"delete_{file['file_name']}", help=f"Delete {file['file_name']}"): + if delete_file(st.session_state.username, file['file_name'], 'semantic'): + st.success(f"File {file['file_name']} deleted successfully") + st.rerun() + else: + st.error(f"Error deleting file {file['file_name']}") + else: + st.info("No files uploaded yet.") + + with tab2: + st.subheader("Select File for Analysis") + user_files = get_user_files(st.session_state.username, 'semantic') + file_options = [get_translation(t, 'select_saved_file', 'Select a saved file')] + [file['file_name'] for file in user_files] + selected_file = st.selectbox("", options=file_options, key=generate_unique_key('semantic', 'file_selector')) + + if st.button("Analyze Document", key=generate_unique_key('semantic', 'analyze_document')): + if selected_file and selected_file != get_translation(t, 'select_saved_file', 'Select a saved file'): + file_contents = retrieve_file_contents(st.session_state.username, selected_file, 'semantic') + if file_contents: + st.session_state.file_contents = file_contents + with st.spinner("Analyzing..."): + try: + nlp_model = nlp_models[lang_code] + analysis_result = process_semantic_analysis(file_contents, nlp_model, lang_code) + + handler = FlexibleAnalysisHandler(analysis_result) + + st.session_state.concept_graph = handler.get_concept_graph() + st.session_state.entity_graph = handler.get_entity_graph() + st.session_state.key_concepts = handler.get_key_concepts() + st.success("Analysis completed successfully") + except Exception as e: + logger.error(f"Error during analysis: {str(e)}") + st.error(f"Error during analysis: {str(e)}") + else: + st.error("Error loading file contents") + else: + st.error("Please select a file to analyze") + + # Chat and Visualization + col_chat, col_graph = st.columns([1, 1]) + + with col_chat: + st.subheader("Chat with AI") + st.markdown('
', unsafe_allow_html=True) + chat_history = st.session_state.get('semantic_chat_history', []) + for message in chat_history: + with st.chat_message(message["role"]): + st.write(message["content"]) + st.markdown('
', unsafe_allow_html=True) + + user_input = st.chat_input("Type your message here...", key=generate_unique_key('semantic', 'chat_input')) + + if user_input: + chat_history.append({"role": "user", "content": user_input}) + + if user_input.startswith('/analyze_current'): + response = process_semantic_chat_input(user_input, lang_code, nlp_models[lang_code], st.session_state.get('file_contents', '')) + else: + response = st.session_state.semantic_chatbot.generate_response(user_input, lang_code) + + chat_history.append({"role": "assistant", "content": response}) + st.session_state.semantic_chat_history = chat_history + + if st.button("Clear Chat", key=generate_unique_key('semantic', 'clear_chat')): + st.session_state.semantic_chat_history = [] + st.rerun() + + with col_graph: + st.subheader("Visualization") + st.markdown('
', unsafe_allow_html=True) + if 'key_concepts' in st.session_state: + st.write("Key Concepts:") + st.write(', '.join([f"{concept}: {freq:.2f}" for concept, freq in st.session_state.key_concepts])) + + tab_concept, tab_entity = st.tabs(["Concept Graph", "Entity Graph"]) + + with tab_concept: + if 'concept_graph' in st.session_state: + st.pyplot(st.session_state.concept_graph) + else: + st.info("No concept graph available. Please analyze a document first.") + + with tab_entity: + if 'entity_graph' in st.session_state: + st.pyplot(st.session_state.entity_graph) + else: + st.info("No entity graph available. Please analyze a document first.") + st.markdown('
', unsafe_allow_html=True) \ No newline at end of file diff --git a/modules/semantic/semantic_interface_backup_2092024.py b/modules/semantic/semantic_interface_backup_2092024.py new file mode 100644 index 0000000000000000000000000000000000000000..549e15f8d5e26c1ecfbe0bff01c05f539da7a296 --- /dev/null +++ b/modules/semantic/semantic_interface_backup_2092024.py @@ -0,0 +1,165 @@ +import streamlit as st +import logging +from .semantic_process import process_semantic_analysis +from ..chatbot.chatbot import initialize_chatbot, process_semantic_chat_input +from ..database.database_oldFromV2 import store_file_semantic_contents, retrieve_file_contents, delete_file, get_user_files +from ..utils.widget_utils import generate_unique_key + +logger = logging.getLogger(__name__) + +def get_translation(t, key, default): + return t.get(key, default) + +def display_semantic_interface(lang_code, nlp_models, t): + # Inicializar el chatbot al principio de la función + if 'semantic_chatbot' not in st.session_state: + st.session_state.semantic_chatbot = initialize_chatbot('semantic') + + st.markdown(""" + + """, unsafe_allow_html=True) + + st.markdown(f""" +
+ {get_translation(t, 'semantic_initial_message', 'Welcome to the semantic analysis interface.')} +
+ """, unsafe_allow_html=True) + + # File management container + st.markdown('
', unsafe_allow_html=True) + col1, col2, col3, col4 = st.columns(4) + + with col1: + if st.button("Upload File", key=generate_unique_key('semantic', 'upload_button')): + st.session_state.show_uploader = True + + with col2: + user_files = get_user_files(st.session_state.username, 'semantic') + file_options = [get_translation(t, 'select_saved_file', 'Select a saved file')] + [file['file_name'] for file in user_files] + selected_file = st.selectbox("", options=file_options, key=generate_unique_key('semantic', 'file_selector')) + + with col3: + analyze_button = st.button("Analyze Document", key=generate_unique_key('semantic', 'analyze_document')) + + with col4: + delete_button = st.button("Delete File", key=generate_unique_key('semantic', 'delete_file')) + + st.markdown('
', unsafe_allow_html=True) + + # File uploader (hidden by default) + if st.session_state.get('show_uploader', False): + uploaded_file = st.file_uploader("Choose a file", type=['txt', 'pdf', 'docx', 'doc', 'odt'], key=generate_unique_key('semantic', 'file_uploader')) + if uploaded_file is not None: + file_contents = uploaded_file.getvalue().decode('utf-8') + if store_file_semantic_contents(st.session_state.username, uploaded_file.name, file_contents): + st.session_state.file_contents = file_contents + st.success(get_translation(t, 'file_uploaded_success', 'File uploaded and saved successfully')) + st.session_state.show_uploader = False # Hide uploader after successful upload + else: + st.error(get_translation(t, 'file_upload_error', 'Error uploading file')) + + + # Contenedor para la sección de análisis + st.markdown('
', unsafe_allow_html=True) + col_chat, col_graph = st.columns([1, 1]) + + with col_chat: + st.subheader(get_translation(t, 'chat_title', 'Semantic Analysis Chat')) + chat_container = st.container() + + with chat_container: + chat_history = st.session_state.get('semantic_chat_history', []) + for message in chat_history: + with st.chat_message(message["role"]): + st.write(message["content"]) + + user_input = st.chat_input(get_translation(t, 'semantic_chat_input', 'Type your message here...'), key=generate_unique_key('semantic', 'chat_input')) + + if user_input: + chat_history.append({"role": "user", "content": user_input}) + + if user_input.startswith('/analyze_current'): + response = process_semantic_chat_input(user_input, lang_code, nlp_models[lang_code], st.session_state.get('file_contents', '')) + else: + response = st.session_state.semantic_chatbot.generate_response(user_input, lang_code) + + chat_history.append({"role": "assistant", "content": response}) + st.session_state.semantic_chat_history = chat_history + + with col_graph: + st.subheader(get_translation(t, 'graph_title', 'Semantic Graphs')) + + # Mostrar conceptos clave y entidades horizontalmente + if 'key_concepts' in st.session_state: + st.write(get_translation(t, 'key_concepts_title', 'Key Concepts')) + st.markdown('
', unsafe_allow_html=True) + for concept, freq in st.session_state.key_concepts: + st.markdown(f'{concept}: {freq:.2f}', unsafe_allow_html=True) + st.markdown('
', unsafe_allow_html=True) + + if 'entities' in st.session_state: + st.write(get_translation(t, 'entities_title', 'Entities')) + st.markdown('
', unsafe_allow_html=True) + for entity, type in st.session_state.entities.items(): + st.markdown(f'{entity}: {type}', unsafe_allow_html=True) + st.markdown('
', unsafe_allow_html=True) + + # Usar pestañas para mostrar los gráficos + tab1, tab2 = st.tabs(["Concept Graph", "Entity Graph"]) + + with tab1: + if 'concept_graph' in st.session_state: + st.pyplot(st.session_state.concept_graph) + + with tab2: + if 'entity_graph' in st.session_state: + st.pyplot(st.session_state.entity_graph) + + st.markdown('
', unsafe_allow_html=True) + + if st.button(get_translation(t, 'clear_chat', 'Clear chat'), key=generate_unique_key('semantic', 'clear_chat')): + st.session_state.semantic_chat_history = [] + st.rerun() \ No newline at end of file diff --git a/modules/semantic/semantic_interface_backup_2192024_1230.py b/modules/semantic/semantic_interface_backup_2192024_1230.py new file mode 100644 index 0000000000000000000000000000000000000000..241407616ae3ce590be4cb7268b82eef2325d8a8 --- /dev/null +++ b/modules/semantic/semantic_interface_backup_2192024_1230.py @@ -0,0 +1,194 @@ +import streamlit as st +import logging +from .semantic_process import process_semantic_analysis +from ..chatbot.chatbot import initialize_chatbot, process_semantic_chat_input +from ..database.database_oldFromV2 import store_file_semantic_contents, retrieve_file_contents, delete_file, get_user_files +from ..utils.widget_utils import generate_unique_key + +logger = logging.getLogger(__name__) + +def get_translation(t, key, default): + return t.get(key, default) + +def display_semantic_interface(lang_code, nlp_models, t): + # Inicializar el chatbot y el historial del chat al principio de la función + if 'semantic_chatbot' not in st.session_state: + st.session_state.semantic_chatbot = initialize_chatbot('semantic') + + if 'semantic_chat_history' not in st.session_state: + st.session_state.semantic_chat_history = [] + + st.markdown(""" + + """, unsafe_allow_html=True) + + # Mostrar el mensaje inicial como un párrafo estilizado + st.markdown(f""" +
+ {t['semantic_initial_message']} +
+ """, unsafe_allow_html=True) + + tab1, tab2 = st.tabs(["Upload", "Analyze"]) + + with tab1: + st.subheader("File Management") + uploaded_file = st.file_uploader("Choose a file to upload", type=['txt', 'pdf', 'docx', 'doc', 'odt'], key=generate_unique_key('semantic', 'file_uploader')) + if uploaded_file is not None: + file_contents = uploaded_file.getvalue().decode('utf-8') + if store_file_semantic_contents(st.session_state.username, uploaded_file.name, file_contents): + st.success(f"File {uploaded_file.name} uploaded and saved successfully") + else: + st.error("Error uploading file") + + st.markdown("---") # Línea separadora + + st.subheader("Manage Uploaded Files") + user_files = get_user_files(st.session_state.username, 'semantic') + if user_files: + for file in user_files: + col1, col2 = st.columns([3, 1]) + with col1: + st.write(file['file_name']) + with col2: + if st.button("Delete", key=f"delete_{file['file_name']}", help=f"Delete {file['file_name']}"): + if delete_file(st.session_state.username, file['file_name'], 'semantic'): + st.success(f"File {file['file_name']} deleted successfully") + st.rerun() + else: + st.error(f"Error deleting file {file['file_name']}") + else: + st.info("No files uploaded yet.") + + with tab2: + st.subheader("Select File for Analysis") + user_files = get_user_files(st.session_state.username, 'semantic') + file_options = [get_translation(t, 'select_saved_file', 'Select a saved file')] + [file['file_name'] for file in user_files] + selected_file = st.selectbox("", options=file_options, key=generate_unique_key('semantic', 'file_selector')) + + if st.button("Analyze Document", key=generate_unique_key('semantic', 'analyze_document')): + if selected_file and selected_file != get_translation(t, 'select_saved_file', 'Select a saved file'): + file_contents = retrieve_file_contents(st.session_state.username, selected_file, 'semantic') + if file_contents: + st.session_state.file_contents = file_contents + with st.spinner("Analyzing..."): + try: + nlp_model = nlp_models[lang_code] + concept_graph, entity_graph, key_concepts = process_semantic_analysis(file_contents, nlp_model, lang_code) + st.session_state.concept_graph = concept_graph + st.session_state.entity_graph = entity_graph + st.session_state.key_concepts = key_concepts + st.success("Analysis completed successfully") + except Exception as e: + logger.error(f"Error during analysis: {str(e)}") + st.error(f"Error during analysis: {str(e)}") + else: + st.error("Error loading file contents") + else: + st.error("Please select a file to analyze") + + # Chat and Visualization + with st.container(): + col_chat, col_graph = st.columns([1, 1]) + + with col_chat: + with st.expander("Chat with AI", expanded=True): + chat_container = st.container() + + with chat_container: + for message in st.session_state.semantic_chat_history: + with st.chat_message(message["role"]): + st.markdown(message["content"]) + + user_input = st.text_input("Type your message here...", key=generate_unique_key('semantic', 'chat_input')) + col1, col2 = st.columns([3, 1]) + with col1: + send_button = st.button("Send", key=generate_unique_key('semantic', 'send_message')) + with col2: + clear_button = st.button("Clear Chat", key=generate_unique_key('semantic', 'clear_chat')) + + if send_button and user_input: + st.session_state.semantic_chat_history.append({"role": "user", "content": user_input}) + + if user_input.startswith('/analyze_current'): + response = process_semantic_chat_input(user_input, lang_code, nlp_models[lang_code], st.session_state.get('file_contents', '')) + else: + response = st.session_state.semantic_chatbot.generate_response(user_input, lang_code, context=st.session_state.get('file_contents', '')) + + st.session_state.semantic_chat_history.append({"role": "assistant", "content": response}) + st.rerun() + + if clear_button: + st.session_state.semantic_chat_history = [] + st.rerun() + + with col_graph: + st.subheader("Visualization") + if 'key_concepts' in st.session_state: + st.write("Key Concepts:") + st.write(', '.join([f"{concept}: {freq:.2f}" for concept, freq in st.session_state.key_concepts])) + + tab_concept, tab_entity = st.tabs(["Concept Graph", "Entity Graph"]) + + with tab_concept: + if 'concept_graph' in st.session_state: + st.pyplot(st.session_state.concept_graph) + else: + st.info("No concept graph available. Please analyze a document first.") + + with tab_entity: + if 'entity_graph' in st.session_state: + st.pyplot(st.session_state.entity_graph) + else: + st.info("No entity graph available. Please analyze a document first.") \ No newline at end of file diff --git a/modules/semantic/semantic_interface_chatforup.py b/modules/semantic/semantic_interface_chatforup.py new file mode 100644 index 0000000000000000000000000000000000000000..47c5d8789c4fa9de0c544bad98ecc137cfc2825c --- /dev/null +++ b/modules/semantic/semantic_interface_chatforup.py @@ -0,0 +1,196 @@ +import streamlit as st +import logging +from .semantic_process import process_semantic_analysis +from ..chatbot.chatbot import initialize_chatbot, process_semantic_chat_input +from ..database.database_oldFromV2 import store_file_semantic_contents, retrieve_file_contents, delete_file, get_user_files +from ..utils.widget_utils import generate_unique_key + +logger = logging.getLogger(__name__) + +def get_translation(t, key, default): + return t.get(key, default) + +def display_semantic_interface(lang_code, nlp_models, t): + # Inicializar el chatbot y el historial del chat al principio de la función + if 'semantic_chatbot' not in st.session_state: + st.session_state.semantic_chatbot = initialize_chatbot('semantic') + + if 'semantic_chat_history' not in st.session_state: + st.session_state.semantic_chat_history = [] + + st.markdown(""" + + """, unsafe_allow_html=True) + + # Mostrar el mensaje inicial como un párrafo estilizado + st.markdown(f""" +
+ {t['semantic_initial_message']} +
+ """, unsafe_allow_html=True) + + tab1, tab2 = st.tabs(["Upload", "Analyze"]) + + with tab1: + st.subheader("File Management") + uploaded_file = st.file_uploader("Choose a file to upload", type=['txt', 'pdf', 'docx', 'doc', 'odt'], key=generate_unique_key('semantic', 'file_uploader')) + if uploaded_file is not None: + file_contents = uploaded_file.getvalue().decode('utf-8') + if store_file_semantic_contents(st.session_state.username, uploaded_file.name, file_contents): + st.success(f"File {uploaded_file.name} uploaded and saved successfully") + else: + st.error("Error uploading file") + + st.markdown("---") # Línea separadora + + st.subheader("Manage Uploaded Files") + user_files = get_user_files(st.session_state.username, 'semantic') + if user_files: + for file in user_files: + col1, col2 = st.columns([3, 1]) + with col1: + st.write(file['file_name']) + with col2: + if st.button("Delete", key=f"delete_{file['file_name']}", help=f"Delete {file['file_name']}"): + if delete_file(st.session_state.username, file['file_name'], 'semantic'): + st.success(f"File {file['file_name']} deleted successfully") + st.rerun() + else: + st.error(f"Error deleting file {file['file_name']}") + else: + st.info("No files uploaded yet.") + + with tab2: + st.subheader("Select File for Analysis") + user_files = get_user_files(st.session_state.username, 'semantic') + file_options = [get_translation(t, 'select_saved_file', 'Select a saved file')] + [file['file_name'] for file in user_files] + selected_file = st.selectbox("", options=file_options, key=generate_unique_key('semantic', 'file_selector')) + + if st.button("Analyze Document", key=generate_unique_key('semantic', 'analyze_document')): + if selected_file and selected_file != get_translation(t, 'select_saved_file', 'Select a saved file'): + file_contents = retrieve_file_contents(st.session_state.username, selected_file, 'semantic') + if file_contents: + st.session_state.file_contents = file_contents + with st.spinner("Analyzing..."): + try: + nlp_model = nlp_models[lang_code] + concept_graph, entity_graph, key_concepts = process_semantic_analysis(file_contents, nlp_model, lang_code) + st.session_state.concept_graph = concept_graph + st.session_state.entity_graph = entity_graph + st.session_state.key_concepts = key_concepts + st.success("Analysis completed successfully") + except Exception as e: + logger.error(f"Error during analysis: {str(e)}") + st.error(f"Error during analysis: {str(e)}") + else: + st.error("Error loading file contents") + else: + st.error("Please select a file to analyze") + + # Chat and Visualization + with st.container(): + col_chat, col_graph = st.columns([1, 1]) + + with col_chat: + st.subheader("Chat with AI") + + chat_container = st.container() + with chat_container: + for message in st.session_state.semantic_chat_history: + with st.chat_message(message["role"]): + st.markdown(message["content"]) + + user_input = st.text_input("Type your message here...", key=generate_unique_key('semantic', 'chat_input')) + col1, col2 = st.columns([3, 1]) + with col1: + send_button = st.button("Send", key=generate_unique_key('semantic', 'send_message')) + with col2: + clear_button = st.button("Clear Chat", key=generate_unique_key('semantic', 'clear_chat')) + + if send_button and user_input: + st.session_state.semantic_chat_history.append({"role": "user", "content": user_input}) + + if user_input.startswith('/analyze_current'): + response = process_semantic_chat_input(user_input, lang_code, nlp_models[lang_code], st.session_state.get('file_contents', '')) + else: + response = st.session_state.semantic_chatbot.generate_response(user_input, lang_code, context=st.session_state.get('file_contents', '')) + + st.session_state.semantic_chat_history.append({"role": "assistant", "content": response}) + st.rerun() + + if clear_button: + st.session_state.semantic_chat_history = [] + st.rerun() + + with col_graph: + st.subheader("Visualization") + if 'key_concepts' in st.session_state: + st.write("Key Concepts:") + st.write(', '.join([f"{concept}: {freq:.2f}" for concept, freq in st.session_state.key_concepts])) + + tab_concept, tab_entity = st.tabs(["Concept Graph", "Entity Graph"]) + + with tab_concept: + if 'concept_graph' in st.session_state: + st.pyplot(st.session_state.concept_graph) + else: + st.info("No concept graph available. Please analyze a document first.") + + with tab_entity: + if 'entity_graph' in st.session_state: + st.pyplot(st.session_state.entity_graph) + else: + st.info("No entity graph available. Please analyze a document first.") \ No newline at end of file diff --git a/modules/semantic/semantic_interface_stcontainerforchat.py b/modules/semantic/semantic_interface_stcontainerforchat.py new file mode 100644 index 0000000000000000000000000000000000000000..47c5d8789c4fa9de0c544bad98ecc137cfc2825c --- /dev/null +++ b/modules/semantic/semantic_interface_stcontainerforchat.py @@ -0,0 +1,196 @@ +import streamlit as st +import logging +from .semantic_process import process_semantic_analysis +from ..chatbot.chatbot import initialize_chatbot, process_semantic_chat_input +from ..database.database_oldFromV2 import store_file_semantic_contents, retrieve_file_contents, delete_file, get_user_files +from ..utils.widget_utils import generate_unique_key + +logger = logging.getLogger(__name__) + +def get_translation(t, key, default): + return t.get(key, default) + +def display_semantic_interface(lang_code, nlp_models, t): + # Inicializar el chatbot y el historial del chat al principio de la función + if 'semantic_chatbot' not in st.session_state: + st.session_state.semantic_chatbot = initialize_chatbot('semantic') + + if 'semantic_chat_history' not in st.session_state: + st.session_state.semantic_chat_history = [] + + st.markdown(""" + + """, unsafe_allow_html=True) + + # Mostrar el mensaje inicial como un párrafo estilizado + st.markdown(f""" +
+ {t['semantic_initial_message']} +
+ """, unsafe_allow_html=True) + + tab1, tab2 = st.tabs(["Upload", "Analyze"]) + + with tab1: + st.subheader("File Management") + uploaded_file = st.file_uploader("Choose a file to upload", type=['txt', 'pdf', 'docx', 'doc', 'odt'], key=generate_unique_key('semantic', 'file_uploader')) + if uploaded_file is not None: + file_contents = uploaded_file.getvalue().decode('utf-8') + if store_file_semantic_contents(st.session_state.username, uploaded_file.name, file_contents): + st.success(f"File {uploaded_file.name} uploaded and saved successfully") + else: + st.error("Error uploading file") + + st.markdown("---") # Línea separadora + + st.subheader("Manage Uploaded Files") + user_files = get_user_files(st.session_state.username, 'semantic') + if user_files: + for file in user_files: + col1, col2 = st.columns([3, 1]) + with col1: + st.write(file['file_name']) + with col2: + if st.button("Delete", key=f"delete_{file['file_name']}", help=f"Delete {file['file_name']}"): + if delete_file(st.session_state.username, file['file_name'], 'semantic'): + st.success(f"File {file['file_name']} deleted successfully") + st.rerun() + else: + st.error(f"Error deleting file {file['file_name']}") + else: + st.info("No files uploaded yet.") + + with tab2: + st.subheader("Select File for Analysis") + user_files = get_user_files(st.session_state.username, 'semantic') + file_options = [get_translation(t, 'select_saved_file', 'Select a saved file')] + [file['file_name'] for file in user_files] + selected_file = st.selectbox("", options=file_options, key=generate_unique_key('semantic', 'file_selector')) + + if st.button("Analyze Document", key=generate_unique_key('semantic', 'analyze_document')): + if selected_file and selected_file != get_translation(t, 'select_saved_file', 'Select a saved file'): + file_contents = retrieve_file_contents(st.session_state.username, selected_file, 'semantic') + if file_contents: + st.session_state.file_contents = file_contents + with st.spinner("Analyzing..."): + try: + nlp_model = nlp_models[lang_code] + concept_graph, entity_graph, key_concepts = process_semantic_analysis(file_contents, nlp_model, lang_code) + st.session_state.concept_graph = concept_graph + st.session_state.entity_graph = entity_graph + st.session_state.key_concepts = key_concepts + st.success("Analysis completed successfully") + except Exception as e: + logger.error(f"Error during analysis: {str(e)}") + st.error(f"Error during analysis: {str(e)}") + else: + st.error("Error loading file contents") + else: + st.error("Please select a file to analyze") + + # Chat and Visualization + with st.container(): + col_chat, col_graph = st.columns([1, 1]) + + with col_chat: + st.subheader("Chat with AI") + + chat_container = st.container() + with chat_container: + for message in st.session_state.semantic_chat_history: + with st.chat_message(message["role"]): + st.markdown(message["content"]) + + user_input = st.text_input("Type your message here...", key=generate_unique_key('semantic', 'chat_input')) + col1, col2 = st.columns([3, 1]) + with col1: + send_button = st.button("Send", key=generate_unique_key('semantic', 'send_message')) + with col2: + clear_button = st.button("Clear Chat", key=generate_unique_key('semantic', 'clear_chat')) + + if send_button and user_input: + st.session_state.semantic_chat_history.append({"role": "user", "content": user_input}) + + if user_input.startswith('/analyze_current'): + response = process_semantic_chat_input(user_input, lang_code, nlp_models[lang_code], st.session_state.get('file_contents', '')) + else: + response = st.session_state.semantic_chatbot.generate_response(user_input, lang_code, context=st.session_state.get('file_contents', '')) + + st.session_state.semantic_chat_history.append({"role": "assistant", "content": response}) + st.rerun() + + if clear_button: + st.session_state.semantic_chat_history = [] + st.rerun() + + with col_graph: + st.subheader("Visualization") + if 'key_concepts' in st.session_state: + st.write("Key Concepts:") + st.write(', '.join([f"{concept}: {freq:.2f}" for concept, freq in st.session_state.key_concepts])) + + tab_concept, tab_entity = st.tabs(["Concept Graph", "Entity Graph"]) + + with tab_concept: + if 'concept_graph' in st.session_state: + st.pyplot(st.session_state.concept_graph) + else: + st.info("No concept graph available. Please analyze a document first.") + + with tab_entity: + if 'entity_graph' in st.session_state: + st.pyplot(st.session_state.entity_graph) + else: + st.info("No entity graph available. Please analyze a document first.") \ No newline at end of file diff --git a/modules/semantic/semantic_interface_test610.py b/modules/semantic/semantic_interface_test610.py new file mode 100644 index 0000000000000000000000000000000000000000..4ae439ec0086c3baa0bc74374358a81e8f865135 --- /dev/null +++ b/modules/semantic/semantic_interface_test610.py @@ -0,0 +1,212 @@ +import streamlit as st +import logging +from .semantic_process import process_semantic_analysis +from ..chatbot.chatbot import initialize_chatbot, process_semantic_chat_input +from ..database.database_oldFromV2 import store_file_semantic_contents, retrieve_file_contents, delete_file, get_user_files +from ..utils.widget_utils import generate_unique_key +#from .semantic_float import semantic_float_init, float_graph, toggle_float_visibility, update_float_content +from .semantic_float_reset import * + +logger = logging.getLogger(__name__) +semantic_float_init() + +def get_translation(t, key, default): + return t.get(key, default) + +def display_semantic_interface(lang_code, nlp_models, t): + # Inicialización del chatbot y el historial del chat + if 'semantic_chatbot' not in st.session_state: + st.session_state.semantic_chatbot = initialize_chatbot('semantic') + if 'semantic_chat_history' not in st.session_state: + st.session_state.semantic_chat_history = [] + + # Inicializar el estado del grafo si no existe + if 'graph_visible' not in st.session_state: + st.session_state.graph_visible = False + if 'graph_content' not in st.session_state: + st.session_state.graph_content = "" + + st.markdown(""" + + """, unsafe_allow_html=True) + + st.markdown(f"
{t['semantic_initial_message']}
", unsafe_allow_html=True) + + tab1, tab2 = st.tabs(["Upload", "Analyze"]) + + with tab1: + st.subheader("File Management") + uploaded_file = st.file_uploader("Choose a file to upload", type=['txt', 'pdf', 'docx', 'doc', 'odt'], key=generate_unique_key('semantic', 'file_uploader')) + if uploaded_file is not None: + file_contents = uploaded_file.getvalue().decode('utf-8') + if store_file_semantic_contents(st.session_state.username, uploaded_file.name, file_contents): + st.success(f"File {uploaded_file.name} uploaded and saved successfully") + else: + st.error("Error uploading file") + + st.markdown("---") + st.subheader("Manage Uploaded Files") + user_files = get_user_files(st.session_state.username, 'semantic') + if user_files: + for file in user_files: + col1, col2 = st.columns([3, 1]) + with col1: + st.write(file['file_name']) + with col2: + if st.button("Delete", key=f"delete_{file['file_name']}", help=f"Delete {file['file_name']}"): + if delete_file(st.session_state.username, file['file_name'], 'semantic'): + st.success(f"File {file['file_name']} deleted successfully") + st.rerun() + else: + st.error(f"Error deleting file {file['file_name']}") + else: + st.info("No files uploaded yet.") + + with tab2: + st.subheader("Semantic Analysis") + + st.subheader("File Selection and Analysis") + user_files = get_user_files(st.session_state.username, 'semantic') + file_options = [get_translation(t, 'select_saved_file', 'Select a saved file')] + [file['file_name'] for file in user_files] + selected_file = st.selectbox("", options=file_options, key=generate_unique_key('semantic', 'file_selector')) + + if st.button("Analyze Document"): + if selected_file and selected_file != get_translation(t, 'select_saved_file', 'Select a saved file'): + file_contents = retrieve_file_contents(st.session_state.username, selected_file, 'semantic') + if file_contents: + with st.spinner("Analyzing..."): + try: + nlp_model = nlp_models[lang_code] + concept_graph, entity_graph, key_concepts = process_semantic_analysis(file_contents, nlp_model, lang_code) + st.session_state.concept_graph = concept_graph + st.session_state.entity_graph = entity_graph + st.session_state.key_concepts = key_concepts + st.session_state.current_file_contents = file_contents + st.success("Analysis completed successfully") + + # Depuración: Mostrar los primeros 100 caracteres del grafo + logger.debug(f"Concept graph base64 (first 100 chars): {concept_graph[:100]}") + st.write(f"Debug: Concept graph base64 (first 100 chars): {concept_graph[:100]}") + + # Actualizar el contenido del grafo + st.session_state.graph_content = f""" +

Key Concepts:

+

{', '.join([f"{concept}: {freq:.2f}" for concept, freq in key_concepts])}

+ Concept Graph + """ + if 'graph_id' not in st.session_state: + st.session_state.graph_id = float_graph(st.session_state.graph_content, width="540px", height="540px", position="center-right") + else: + update_float_content(st.session_state.graph_id, st.session_state.graph_content) + toggle_float_visibility(st.session_state.graph_id, True) + st.session_state.graph_visible = True + + # Depuración: Verificar si el grafo se está creando + logger.debug(f"Graph ID: {st.session_state.graph_id}") + logger.debug(f"Graph visible: {st.session_state.graph_visible}") + + # Mostrar el grafo directamente en la interfaz para verificación + st.image(f"data:image/png;base64,{concept_graph}", caption="Concept Graph (Debug View)", use_column_width=True) + except Exception as e: + logger.error(f"Error during analysis: {str(e)}") + st.error(f"Error during analysis: {str(e)}") + st.session_state.concept_graph = None + st.session_state.entity_graph = None + st.session_state.key_concepts = [] + else: + st.error("Error loading file contents") + else: + st.error("Please select a file to analyze") + + st.subheader("Chat with AI") + + # Mostrar el historial del chat + for message in st.session_state.semantic_chat_history: + message_class = "user-message" if message["role"] == "user" else "assistant-message" + st.markdown(f'
{message["content"]}
', unsafe_allow_html=True) + + # Colocar la entrada de usuario y los botones en la parte inferior + st.markdown('
', unsafe_allow_html=True) + user_input = st.text_input("Type your message here...", key=generate_unique_key('semantic', 'chat_input')) + col1, col2, col3 = st.columns([3, 1, 1]) + with col1: + send_button = st.button("Send", key=generate_unique_key('semantic', 'send_message')) + with col2: + clear_button = st.button("Clear Chat", key=generate_unique_key('semantic', 'clear_chat')) + with col3: + if 'graph_id' in st.session_state: + toggle_button = st.button("Toggle Graph", key="toggle_graph") + if toggle_button: + st.session_state.graph_visible = not st.session_state.get('graph_visible', True) + toggle_float_visibility(st.session_state.graph_id, st.session_state.graph_visible) + st.markdown('
', unsafe_allow_html=True) + + if send_button and user_input: + st.session_state.semantic_chat_history.append({"role": "user", "content": user_input}) + if user_input.startswith('/analyze_current'): + response = process_semantic_chat_input(user_input, lang_code, nlp_models[lang_code], st.session_state.get('current_file_contents', '')) + else: + response = st.session_state.semantic_chatbot.generate_response(user_input, lang_code, context=st.session_state.get('current_file_contents', '')) + st.session_state.semantic_chat_history.append({"role": "assistant", "content": response}) + st.rerun() + + if clear_button: + st.session_state.semantic_chat_history = [] + st.rerun() + + # Asegurarse de que el grafo flotante permanezca visible después de las interacciones + if 'graph_id' in st.session_state and st.session_state.get('graph_visible', False): + toggle_float_visibility(st.session_state.graph_id, True) + + # Mostrar el grafo flotante si está visible + if st.session_state.get('graph_visible', False) and 'graph_content' in st.session_state: + st.markdown( + f""" +
+ {st.session_state.graph_content} +
+ """, + unsafe_allow_html=True + ) \ No newline at end of file diff --git a/modules/semantic/semantic_interface_vOk.py b/modules/semantic/semantic_interface_vOk.py new file mode 100644 index 0000000000000000000000000000000000000000..9b2167adff34762e28fbd9ee65c64dd371ef713c --- /dev/null +++ b/modules/semantic/semantic_interface_vOk.py @@ -0,0 +1,196 @@ +import streamlit as st +import logging +from .semantic_process import process_semantic_analysis +from ..chatbot.chatbot import initialize_chatbot, process_semantic_chat_input +from ..database.database_oldFromV2 import store_file_semantic_contents, retrieve_file_contents, delete_file, get_user_files +from ..utils.widget_utils import generate_unique_key + +logger = logging.getLogger(__name__) + +def get_translation(t, key, default): + return t.get(key, default) + +def display_semantic_interface(lang_code, nlp_models, t): + # Inicializar el chatbot y el historial del chat al principio de la función + if 'semantic_chatbot' not in st.session_state: + st.session_state.semantic_chatbot = initialize_chatbot('semantic') + + if 'semantic_chat_history' not in st.session_state: + st.session_state.semantic_chat_history = [] + + st.markdown(""" + + """, unsafe_allow_html=True) + + # Mostrar el mensaje inicial como un párrafo estilizado + st.markdown(f""" +
+ {t['semantic_initial_message']} +
+ """, unsafe_allow_html=True) + + tab1, tab2 = st.tabs(["Upload", "Analyze"]) + + with tab1: + st.subheader("File Management") + uploaded_file = st.file_uploader("Choose a file to upload", type=['txt', 'pdf', 'docx', 'doc', 'odt'], key=generate_unique_key('semantic', 'file_uploader')) + if uploaded_file is not None: + file_contents = uploaded_file.getvalue().decode('utf-8') + if store_file_semantic_contents(st.session_state.username, uploaded_file.name, file_contents): + st.success(f"File {uploaded_file.name} uploaded and saved successfully") + else: + st.error("Error uploading file") + + st.markdown("---") # Línea separadora + + st.subheader("Manage Uploaded Files") + user_files = get_user_files(st.session_state.username, 'semantic') + if user_files: + for file in user_files: + col1, col2 = st.columns([3, 1]) + with col1: + st.write(file['file_name']) + with col2: + if st.button("Delete", key=f"delete_{file['file_name']}", help=f"Delete {file['file_name']}"): + if delete_file(st.session_state.username, file['file_name'], 'semantic'): + st.success(f"File {file['file_name']} deleted successfully") + st.rerun() + else: + st.error(f"Error deleting file {file['file_name']}") + else: + st.info("No files uploaded yet.") + + with tab2: + st.subheader("Select File for Analysis") + user_files = get_user_files(st.session_state.username, 'semantic') + file_options = [get_translation(t, 'select_saved_file', 'Select a saved file')] + [file['file_name'] for file in user_files] + selected_file = st.selectbox("", options=file_options, key=generate_unique_key('semantic', 'file_selector')) + + if st.button("Analyze Document", key=generate_unique_key('semantic', 'analyze_document')): + if selected_file and selected_file != get_translation(t, 'select_saved_file', 'Select a saved file'): + file_contents = retrieve_file_contents(st.session_state.username, selected_file, 'semantic') + if file_contents: + st.session_state.file_contents = file_contents + with st.spinner("Analyzing..."): + try: + nlp_model = nlp_models[lang_code] + concept_graph, entity_graph, key_concepts = process_semantic_analysis(file_contents, nlp_model, lang_code) + st.session_state.concept_graph = concept_graph + st.session_state.entity_graph = entity_graph + st.session_state.key_concepts = key_concepts + st.success("Analysis completed successfully") + except Exception as e: + logger.error(f"Error during analysis: {str(e)}") + st.error(f"Error during analysis: {str(e)}") + else: + st.error("Error loading file contents") + else: + st.error("Please select a file to analyze") + + # Chat and Visualization + with st.container(): + col_chat, col_graph = st.columns([1, 1]) + + with col_chat: + st.subheader("Chat with AI") + + chat_container = st.container() + with chat_container: + for message in st.session_state.semantic_chat_history: + with st.chat_message(message["role"]): + st.markdown(message["content"]) + + user_input = st.text_input("Type your message here...", key=generate_unique_key('semantic', 'chat_input')) + col1, col2 = st.columns([3, 1]) + with col1: + send_button = st.button("Send", key=generate_unique_key('semantic', 'send_message')) + with col2: + clear_button = st.button("Clear Chat", key=generate_unique_key('semantic', 'clear_chat')) + + if send_button and user_input: + st.session_state.semantic_chat_history.append({"role": "user", "content": user_input}) + + if user_input.startswith('/analyze_current'): + response = process_semantic_chat_input(user_input, lang_code, nlp_models[lang_code], st.session_state.get('file_contents', '')) + else: + response = st.session_state.semantic_chatbot.generate_response(user_input, lang_code, context=st.session_state.get('file_contents', '')) + + st.session_state.semantic_chat_history.append({"role": "assistant", "content": response}) + st.rerun() + + if clear_button: + st.session_state.semantic_chat_history = [] + st.rerun() + + with col_graph: + st.subheader("Visualization") + if 'key_concepts' in st.session_state: + st.write("Key Concepts:") + st.write(', '.join([f"{concept}: {freq:.2f}" for concept, freq in st.session_state.key_concepts])) + + tab_concept, tab_entity = st.tabs(["Concept Graph", "Entity Graph"]) + + with tab_concept: + if 'concept_graph' in st.session_state: + st.pyplot(st.session_state.concept_graph) + else: + st.info("No concept graph available. Please analyze a document first.") + + with tab_entity: + if 'entity_graph' in st.session_state: + st.pyplot(st.session_state.entity_graph) + else: + st.info("No entity graph available. Please analyze a document first.") diff --git a/modules/semantic/semantic_process.py b/modules/semantic/semantic_process.py new file mode 100644 index 0000000000000000000000000000000000000000..0245b0dfc24145b2c34195057173298942d7584b --- /dev/null +++ b/modules/semantic/semantic_process.py @@ -0,0 +1,51 @@ +import logging +import io +import base64 +import matplotlib.pyplot as plt +from ..text_analysis.semantic_analysis import perform_semantic_analysis +from .flexible_analysis_handler import FlexibleAnalysisHandler + +logger = logging.getLogger(__name__) + +def encode_image_to_base64(image_data): + return base64.b64encode(image_data).decode('utf-8') + +def process_semantic_analysis(file_contents, nlp_model, lang_code): + logger.info(f"Starting semantic analysis processing for language: {lang_code}") + try: + result = perform_semantic_analysis(file_contents, nlp_model, lang_code) + + concept_graph = result['concept_graph'] + entity_graph = result['entity_graph'] + key_concepts = result['key_concepts'] + + # Convertir los gráficos a base64 + concept_graph_base64 = encode_image_to_base64(concept_graph) + entity_graph_base64 = encode_image_to_base64(entity_graph) + + logger.info("Semantic analysis processing completed successfully") + logger.debug(f"Concept graph base64 (first 100 chars): {concept_graph_base64[:100]}") + return concept_graph_base64, entity_graph_base64, key_concepts + except Exception as e: + logger.error(f"Error in semantic analysis processing: {str(e)}") + return None, None, [] + +''' +logger = logging.getLogger(__name__) +logging.basicConfig(level=logging.DEBUG) + +def process_semantic_analysis(file_contents, nlp_model, lang_code): + logger.info(f"Starting semantic analysis for language: {lang_code}") + try: + logger.debug("Calling perform_semantic_analysis") + result = perform_semantic_analysis(file_contents, nlp_model, lang_code) + logger.debug(f"Result keys: {result.keys()}") + logger.debug(f"Type of concept_graph: {type(result['concept_graph'])}") + logger.debug(f"Type of entity_graph: {type(result['entity_graph'])}") + logger.debug(f"Number of key_concepts: {len(result['key_concepts'])}") + logger.info("Semantic analysis completed successfully") + return result['concept_graph'], result['entity_graph'], result['key_concepts'] + except Exception as e: + logger.error(f"Error in semantic analysis: {str(e)}") + raise +''' \ No newline at end of file diff --git a/modules/semantic/semantic_process_23-9-24.py b/modules/semantic/semantic_process_23-9-24.py new file mode 100644 index 0000000000000000000000000000000000000000..6f3a7adb62c8f15ccd4616fd3e4b20beddf33be3 --- /dev/null +++ b/modules/semantic/semantic_process_23-9-24.py @@ -0,0 +1,62 @@ +import logging +import io +import base64 +import matplotlib.pyplot as plt +from ..text_analysis.semantic_analysis import perform_semantic_analysis +from .flexible_analysis_handler import FlexibleAnalysisHandler + +logger = logging.getLogger(__name__) + +def encode_image_to_base64(image_data): + if isinstance(image_data, str): # Si es una ruta de archivo + with open(image_data, "rb") as image_file: + encoded_string = base64.b64encode(image_file.read()).decode("utf-8") + elif isinstance(image_data, bytes): # Si son datos de imagen en memoria + encoded_string = base64.b64encode(image_data).decode("utf-8") + else: + raise ValueError("Invalid image data type. Expected string (file path) or bytes.") + return encoded_string # + +def process_semantic_analysis(file_contents, nlp_model, lang_code): + logger.info(f"Starting semantic analysis processing for language: {lang_code}") + try: + result = perform_semantic_analysis(file_contents, nlp_model, lang_code) + #handler = FlexibleAnalysisHandler(result) + + #concept_graph = handler.get_graph('concept_graph') + #entity_graph = handler.get_graph('entity_graph') + #key_concepts = handler.get_key_concepts() + + concept_graph = result['concept_graph'] + entity_graph = result['entity_graph'] + key_concepts = result['key_concepts'] + + # Convertir los gráficos a base64 + concept_graph_base64 = fig_to_base64(concept_graph) if concept_graph else None + entity_graph_base64 = fig_to_base64(entity_graph) if entity_graph else None + + logger.info("Semantic analysis processing completed successfully") + return concept_graph_base64, entity_graph_base64, key_concepts + except Exception as e: + logger.error(f"Error in semantic analysis processing: {str(e)}") + return None, None, [] # Retorna valores vacíos en caso de error + +''' +logger = logging.getLogger(__name__) +logging.basicConfig(level=logging.DEBUG) + +def process_semantic_analysis(file_contents, nlp_model, lang_code): + logger.info(f"Starting semantic analysis for language: {lang_code}") + try: + logger.debug("Calling perform_semantic_analysis") + result = perform_semantic_analysis(file_contents, nlp_model, lang_code) + logger.debug(f"Result keys: {result.keys()}") + logger.debug(f"Type of concept_graph: {type(result['concept_graph'])}") + logger.debug(f"Type of entity_graph: {type(result['entity_graph'])}") + logger.debug(f"Number of key_concepts: {len(result['key_concepts'])}") + logger.info("Semantic analysis completed successfully") + return result['concept_graph'], result['entity_graph'], result['key_concepts'] + except Exception as e: + logger.error(f"Error in semantic analysis: {str(e)}") + raise +''' \ No newline at end of file diff --git a/modules/studentact/__pycache__/student_activities.cpython-311.pyc b/modules/studentact/__pycache__/student_activities.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6e2b820fda86da0621440ebd0d0aabd60e9e259d Binary files /dev/null and b/modules/studentact/__pycache__/student_activities.cpython-311.pyc differ diff --git a/modules/studentact/__pycache__/student_activities_v2.cpython-311.pyc b/modules/studentact/__pycache__/student_activities_v2.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6b2ea707d57df5d3e7a4171dd1065ddf0c25bdd9 Binary files /dev/null and b/modules/studentact/__pycache__/student_activities_v2.cpython-311.pyc differ diff --git a/modules/studentact/student_activities.py b/modules/studentact/student_activities.py new file mode 100644 index 0000000000000000000000000000000000000000..1103fcd6e3d21d097845c1be5b161ecb6431d967 --- /dev/null +++ b/modules/studentact/student_activities.py @@ -0,0 +1,105 @@ +import streamlit as st +import pandas as pd +import matplotlib.pyplot as plt +import seaborn as sns +import base64 +from io import BytesIO +from reportlab.pdfgen import canvas +from reportlab.lib.pagesizes import letter +from docx import Document +from odf.opendocument import OpenDocumentText +from odf.text import P +from datetime import datetime, timedelta +import pytz +import logging + +# Configuración de logging +logging.basicConfig(level=logging.DEBUG) +logger = logging.getLogger(__name__) + +# Importaciones locales +try: + from ..database.morphosintax_mongo_db import get_student_morphosyntax_data + from ..database.chat_db import get_chat_history + logger.info("Importaciones locales exitosas") +except ImportError as e: + logger.error(f"Error en las importaciones locales: {e}") + +def display_student_progress(username, lang_code, t): + logger.debug(f"Iniciando display_student_progress para {username}") + + st.title(f"{t.get('progress_of', 'Progreso de')} {username}") + + # Obtener los datos del estudiante + student_data = get_student_morphosyntax_data(username) + + if not student_data or len(student_data.get('entries', [])) == 0: + logger.warning(f"No se encontraron datos para el estudiante {username}") + st.warning(t.get("no_data_warning", "No se encontraron datos para este estudiante.")) + st.info(t.get("try_analysis", "Intenta realizar algunos análisis de texto primero.")) + return + + logger.debug(f"Datos del estudiante obtenidos: {len(student_data['entries'])} entradas") + + # Resumen de actividades + with st.expander(t.get("activities_summary", "Resumen de Actividades"), expanded=True): + total_entries = len(student_data['entries']) + st.write(f"{t.get('total_analyses', 'Total de análisis realizados')}: {total_entries}") + + # Gráfico de tipos de análisis + try: + analysis_types = [entry.get('analysis_type', 'unknown') for entry in student_data['entries']] + analysis_counts = pd.Series(analysis_types).value_counts() + fig, ax = plt.subplots() + sns.barplot(x=analysis_counts.index, y=analysis_counts.values, ax=ax) + ax.set_title(t.get("analysis_types_chart", "Tipos de análisis realizados")) + ax.set_xlabel(t.get("analysis_type", "Tipo de análisis")) + ax.set_ylabel(t.get("count", "Cantidad")) + st.pyplot(fig) + except Exception as e: + logger.error(f"Error al crear el gráfico: {e}") + st.error("No se pudo crear el gráfico de tipos de análisis.") + + # Función para generar el contenido del archivo de actividades de las últimas 48 horas + def generate_activity_content_48h(): + content = f"Actividades de {username} en las últimas 48 horas\n\n" + + two_days_ago = datetime.now(pytz.utc) - timedelta(days=2) + + try: + morphosyntax_analyses = get_student_morphosyntax_data(username) + recent_morphosyntax = [a for a in morphosyntax_analyses if datetime.fromisoformat(a['timestamp']) > two_days_ago] + + content += f"Análisis morfosintácticos: {len(recent_morphosyntax)}\n" + for analysis in recent_morphosyntax: + content += f"- Análisis del {analysis['timestamp']}: {analysis['text'][:50]}...\n" + + chat_history = get_chat_history(username, None) + recent_chats = [c for c in chat_history if datetime.fromisoformat(c['timestamp']) > two_days_ago] + + content += f"\nConversaciones de chat: {len(recent_chats)}\n" + for chat in recent_chats: + content += f"- Chat del {chat['timestamp']}: {len(chat['messages'])} mensajes\n" + except Exception as e: + logger.error(f"Error al generar el contenido de actividades: {e}") + content += "Error al recuperar los datos de actividades.\n" + + return content + + # Botones para descargar el histórico de actividades de las últimas 48 horas + st.subheader(t.get("download_history_48h", "Descargar Histórico de Actividades (Últimas 48 horas)")) + if st.button("Generar reporte de 48 horas"): + try: + report_content = generate_activity_content_48h() + st.text_area("Reporte de 48 horas", report_content, height=300) + st.download_button( + label="Descargar TXT (48h)", + data=report_content, + file_name="actividades_48h.txt", + mime="text/plain" + ) + except Exception as e: + logger.error(f"Error al generar el reporte: {e}") + st.error("No se pudo generar el reporte. Por favor, verifica los logs para más detalles.") + + logger.debug("Finalizando display_student_progress") \ No newline at end of file diff --git a/modules/studentact/student_activities_v2.py b/modules/studentact/student_activities_v2.py new file mode 100644 index 0000000000000000000000000000000000000000..a2b3eb6913e28d7ad6e0eb71026980019f970202 --- /dev/null +++ b/modules/studentact/student_activities_v2.py @@ -0,0 +1,668 @@ +############## +#########student_activities.py +import streamlit as st +import re +import io +from io import BytesIO +import pandas as pd +import numpy as np +import time +import matplotlib.pyplot as plt +from datetime import datetime +from spacy import displacy +import random +import base64 +import seaborn as sns +import logging + +logger = logging.getLogger(__name__) + +################################################################################### + +def display_student_progress(username, lang_code, t, student_data): + if not student_data or len(student_data['entries']) == 0: + st.warning(t.get("no_data_warning", "No se encontraron datos para este estudiante.")) + st.info(t.get("try_analysis", "Intenta realizar algunos análisis de texto primero.")) + return + + st.title(f"{t.get('progress_of', 'Progreso de')} {username}") + + with st.expander(t.get("activities_summary", "Resumen de Actividades y Progreso"), expanded=True): + total_entries = len(student_data['entries']) + st.write(f"{t.get('total_analyses', 'Total de análisis realizados')}: {total_entries}") + + # Gráfico de tipos de análisis + analysis_types = [entry['analysis_type'] for entry in student_data['entries']] + analysis_counts = pd.Series(analysis_types).value_counts() + + fig, ax = plt.subplots(figsize=(8, 4)) + analysis_counts.plot(kind='bar', ax=ax) + ax.set_title(t.get("analysis_types_chart", "Tipos de análisis realizados")) + ax.set_xlabel(t.get("analysis_type", "Tipo de análisis")) + ax.set_ylabel(t.get("count", "Cantidad")) + st.pyplot(fig) + + # Histórico de Análisis Morfosintácticos + with st.expander(t.get("morphosyntax_history", "Histórico de Análisis Morfosintácticos")): + morphosyntax_entries = [entry for entry in username['entries'] if entry['analysis_type'] == 'morphosyntax'] + if not morphosyntax_entries: + st.warning("No se encontraron análisis morfosintácticos.") + for entry in morphosyntax_entries: + st.subheader(f"{t.get('analysis_of', 'Análisis del')} {entry['timestamp']}") + if 'arc_diagrams' in entry and entry['arc_diagrams']: + try: + st.write(entry['arc_diagrams'][0], unsafe_allow_html=True) + except Exception as e: + logger.error(f"Error al mostrar diagrama de arco: {str(e)}") + st.error("Error al mostrar el diagrama de arco.") + else: + st.write(t.get("no_arc_diagram", "No se encontró diagrama de arco para este análisis.")) + + # Histórico de Análisis Semánticos + with st.expander(t.get("semantic_history", "Histórico de Análisis Semánticos")): + semantic_entries = [entry for entry in username['entries'] if entry['analysis_type'] == 'semantic'] + if not semantic_entries: + st.warning("No se encontraron análisis semánticos.") + for entry in semantic_entries: + st.subheader(f"{t.get('analysis_of', 'Análisis del')} {entry['timestamp']}") + if 'key_concepts' in entry: + st.write(t.get("key_concepts", "Conceptos clave:")) + concepts_str = " | ".join([f"{concept} ({frequency:.2f})" for concept, frequency in entry['key_concepts']]) + st.markdown(f"
{concepts_str}
", unsafe_allow_html=True) + if 'graph' in entry: + try: + img_bytes = base64.b64decode(entry['graph']) + st.image(img_bytes, caption=t.get("conceptual_relations_graph", "Gráfico de relaciones conceptuales")) + except Exception as e: + logger.error(f"Error al mostrar gráfico semántico: {str(e)}") + st.error(t.get("graph_display_error", f"No se pudo mostrar el gráfico: {str(e)}")) + + # Histórico de Análisis Discursivos + with st.expander(t.get("discourse_history", "Histórico de Análisis Discursivos")): + discourse_entries = [entry for entry in username['entries'] if entry['analysis_type'] == 'discourse'] + for entry in discourse_entries: + st.subheader(f"{t.get('analysis_of', 'Análisis del')} {entry['timestamp']}") + for i in [1, 2]: + if f'key_concepts{i}' in entry: + st.write(f"{t.get('key_concepts', 'Conceptos clave')} {t.get('document', 'documento')} {i}:") + concepts_str = " | ".join([f"{concept} ({frequency:.2f})" for concept, frequency in entry[f'key_concepts{i}']]) + st.markdown(f"
{concepts_str}
", unsafe_allow_html=True) + try: + if 'combined_graph' in entry and entry['combined_graph']: + img_bytes = base64.b64decode(entry['combined_graph']) + st.image(img_bytes, caption=t.get("combined_graph", "Gráfico combinado")) + elif 'graph1' in entry and 'graph2' in entry: + col1, col2 = st.columns(2) + with col1: + if entry['graph1']: + img_bytes1 = base64.b64decode(entry['graph1']) + st.image(img_bytes1, caption=t.get("graph_doc1", "Gráfico documento 1")) + with col2: + if entry['graph2']: + img_bytes2 = base64.b64decode(entry['graph2']) + st.image(img_bytes2, caption=t.get("graph_doc2", "Gráfico documento 2")) + except Exception as e: + st.error(t.get("graph_display_error", f"No se pudieron mostrar los gráficos: {str(e)}")) + + # Histórico de Conversaciones con el ChatBot + with st.expander(t.get("chatbot_history", "Histórico de Conversaciones con el ChatBot")): + if 'chat_history' in username and username['chat_history']: + for i, chat in enumerate(username['chat_history']): + st.subheader(f"{t.get('conversation', 'Conversación')} {i+1} - {chat['timestamp']}") + for message in chat['messages']: + if message['role'] == 'user': + st.write(f"{t.get('user', 'Usuario')}: {message['content']}") + else: + st.write(f"{t.get('assistant', 'Asistente')}: {message['content']}") + st.write("---") + else: + st.write(t.get("no_chat_history", "No se encontraron conversaciones con el ChatBot.")) + + # Añadir logs para depuración + if st.checkbox(t.get("show_debug_data", "Mostrar datos de depuración")): + st.write(t.get("student_debug_data", "Datos del estudiante (para depuración):")) + st.json(username) + + # Mostrar conteo de tipos de análisis + analysis_types = [entry['analysis_type'] for entry in username['entries']] + type_counts = {t: analysis_types.count(t) for t in set(analysis_types)} + st.write("Conteo de tipos de análisis:") + st.write(type_counts) + + + + +''' +##########versión 25-9-2024---02:30 ################ OK (username)#################### + +def display_student_progress(username, lang_code, t, student_data): + st.title(f"{t.get('progress_of', 'Progreso de')} {username}") + + if not student_data or len(student_data.get('entries', [])) == 0: + st.warning(t.get("no_data_warning", "No se encontraron datos para este estudiante.")) + st.info(t.get("try_analysis", "Intenta realizar algunos análisis de texto primero.")) + return + + with st.expander(t.get("activities_summary", "Resumen de Actividades"), expanded=True): + total_entries = len(student_data['entries']) + st.write(f"{t.get('total_analyses', 'Total de análisis realizados')}: {total_entries}") + + # Gráfico de tipos de análisis + analysis_types = [entry['analysis_type'] for entry in student_data['entries']] + analysis_counts = pd.Series(analysis_types).value_counts() + fig, ax = plt.subplots() + analysis_counts.plot(kind='bar', ax=ax) + ax.set_title(t.get("analysis_types_chart", "Tipos de análisis realizados")) + ax.set_xlabel(t.get("analysis_type", "Tipo de análisis")) + ax.set_ylabel(t.get("count", "Cantidad")) + st.pyplot(fig) + + # Mostrar los últimos análisis morfosintácticos + with st.expander(t.get("morphosyntax_history", "Histórico de Análisis Morfosintácticos")): + morphosyntax_entries = [entry for entry in student_data['entries'] if entry['analysis_type'] == 'morphosyntax'] + for entry in morphosyntax_entries[:5]: # Mostrar los últimos 5 + st.subheader(f"{t.get('analysis_of', 'Análisis del')} {entry['timestamp']}") + if 'arc_diagrams' in entry and entry['arc_diagrams']: + st.components.v1.html(entry['arc_diagrams'][0], height=300, scrolling=True) + + # Añadir secciones similares para análisis semánticos y discursivos si es necesario + + # Mostrar el historial de chat + with st.expander(t.get("chat_history", "Historial de Chat")): + if 'chat_history' in student_data: + for chat in student_data['chat_history'][:5]: # Mostrar las últimas 5 conversaciones + st.subheader(f"{t.get('chat_from', 'Chat del')} {chat['timestamp']}") + for message in chat['messages']: + st.write(f"{message['role'].capitalize()}: {message['content']}") + st.write("---") + else: + st.write(t.get("no_chat_history", "No hay historial de chat disponible.")) + + +##########versión 24-9-2024---17:30 ################ OK FROM--V2 de def get_student_data(username)#################### + +def display_student_progress(username, lang_code, t, student_data): + if not student_data or len(student_data['entries']) == 0: + st.warning(t.get("no_data_warning", "No se encontraron datos para este estudiante.")) + st.info(t.get("try_analysis", "Intenta realizar algunos análisis de texto primero.")) + return + + st.title(f"{t.get('progress_of', 'Progreso de')} {username}") + + with st.expander(t.get("activities_summary", "Resumen de Actividades y Progreso"), expanded=True): + total_entries = len(student_data['entries']) + st.write(f"{t.get('total_analyses', 'Total de análisis realizados')}: {total_entries}") + + # Gráfico de tipos de análisis + analysis_types = [entry['analysis_type'] for entry in student_data['entries']] + analysis_counts = pd.Series(analysis_types).value_counts() + + fig, ax = plt.subplots(figsize=(8, 4)) + analysis_counts.plot(kind='bar', ax=ax) + ax.set_title(t.get("analysis_types_chart", "Tipos de análisis realizados")) + ax.set_xlabel(t.get("analysis_type", "Tipo de análisis")) + ax.set_ylabel(t.get("count", "Cantidad")) + st.pyplot(fig) + + # Histórico de Análisis Morfosintácticos + with st.expander(t.get("morphosyntax_history", "Histórico de Análisis Morfosintácticos")): + morphosyntax_entries = [entry for entry in student_data['entries'] if entry['analysis_type'] == 'morphosyntax'] + if not morphosyntax_entries: + st.warning("No se encontraron análisis morfosintácticos.") + for entry in morphosyntax_entries: + st.subheader(f"{t.get('analysis_of', 'Análisis del')} {entry['timestamp']}") + if 'arc_diagrams' in entry and entry['arc_diagrams']: + try: + st.write(entry['arc_diagrams'][0], unsafe_allow_html=True) + except Exception as e: + logger.error(f"Error al mostrar diagrama de arco: {str(e)}") + st.error("Error al mostrar el diagrama de arco.") + else: + st.write(t.get("no_arc_diagram", "No se encontró diagrama de arco para este análisis.")) + + # Histórico de Análisis Semánticos + with st.expander(t.get("semantic_history", "Histórico de Análisis Semánticos")): + semantic_entries = [entry for entry in student_data['entries'] if entry['analysis_type'] == 'semantic'] + if not semantic_entries: + st.warning("No se encontraron análisis semánticos.") + for entry in semantic_entries: + st.subheader(f"{t.get('analysis_of', 'Análisis del')} {entry['timestamp']}") + if 'key_concepts' in entry: + st.write(t.get("key_concepts", "Conceptos clave:")) + concepts_str = " | ".join([f"{concept} ({frequency:.2f})" for concept, frequency in entry['key_concepts']]) + st.markdown(f"
{concepts_str}
", unsafe_allow_html=True) + if 'graph' in entry: + try: + img_bytes = base64.b64decode(entry['graph']) + st.image(img_bytes, caption=t.get("conceptual_relations_graph", "Gráfico de relaciones conceptuales")) + except Exception as e: + logger.error(f"Error al mostrar gráfico semántico: {str(e)}") + st.error(t.get("graph_display_error", f"No se pudo mostrar el gráfico: {str(e)}")) + + # Histórico de Análisis Discursivos + with st.expander(t.get("discourse_history", "Histórico de Análisis Discursivos")): + discourse_entries = [entry for entry in student_data['entries'] if entry['analysis_type'] == 'discourse'] + for entry in discourse_entries: + st.subheader(f"{t.get('analysis_of', 'Análisis del')} {entry['timestamp']}") + for i in [1, 2]: + if f'key_concepts{i}' in entry: + st.write(f"{t.get('key_concepts', 'Conceptos clave')} {t.get('document', 'documento')} {i}:") + concepts_str = " | ".join([f"{concept} ({frequency:.2f})" for concept, frequency in entry[f'key_concepts{i}']]) + st.markdown(f"
{concepts_str}
", unsafe_allow_html=True) + try: + if 'combined_graph' in entry and entry['combined_graph']: + img_bytes = base64.b64decode(entry['combined_graph']) + st.image(img_bytes, caption=t.get("combined_graph", "Gráfico combinado")) + elif 'graph1' in entry and 'graph2' in entry: + col1, col2 = st.columns(2) + with col1: + if entry['graph1']: + img_bytes1 = base64.b64decode(entry['graph1']) + st.image(img_bytes1, caption=t.get("graph_doc1", "Gráfico documento 1")) + with col2: + if entry['graph2']: + img_bytes2 = base64.b64decode(entry['graph2']) + st.image(img_bytes2, caption=t.get("graph_doc2", "Gráfico documento 2")) + except Exception as e: + st.error(t.get("graph_display_error", f"No se pudieron mostrar los gráficos: {str(e)}")) + + # Histórico de Conversaciones con el ChatBot + with st.expander(t.get("chatbot_history", "Histórico de Conversaciones con el ChatBot")): + if 'chat_history' in student_data and student_data['chat_history']: + for i, chat in enumerate(student_data['chat_history']): + st.subheader(f"{t.get('conversation', 'Conversación')} {i+1} - {chat['timestamp']}") + for message in chat['messages']: + if message['role'] == 'user': + st.write(f"{t.get('user', 'Usuario')}: {message['content']}") + else: + st.write(f"{t.get('assistant', 'Asistente')}: {message['content']}") + st.write("---") + else: + st.write(t.get("no_chat_history", "No se encontraron conversaciones con el ChatBot.")) + + # Añadir logs para depuración + if st.checkbox(t.get("show_debug_data", "Mostrar datos de depuración")): + st.write(t.get("student_debug_data", "Datos del estudiante (para depuración):")) + st.json(student_data) + + # Mostrar conteo de tipos de análisis + analysis_types = [entry['analysis_type'] for entry in student_data['entries']] + type_counts = {t: analysis_types.count(t) for t in set(analysis_types)} + st.write("Conteo de tipos de análisis:") + st.write(type_counts) + + +#############################--- Update 16:00 24-9 ######################################### +def display_student_progress(username, lang_code, t, student_data): + try: + st.subheader(t.get('student_activities', 'Student Activitie')) + + if not student_data or all(len(student_data.get(key, [])) == 0 for key in ['morphosyntax_analyses', 'semantic_analyses', 'discourse_analyses']): + st.warning(t.get('no_data_warning', 'No analysis data found for this student.')) + return + + # Resumen de actividades + total_analyses = sum(len(student_data.get(key, [])) for key in ['morphosyntax_analyses', 'semantic_analyses', 'discourse_analyses']) + st.write(f"{t.get('total_analyses', 'Total analyses performed')}: {total_analyses}") + + # Gráfico de tipos de análisis + analysis_counts = { + t.get('morpho_analyses', 'Morphosyntactic Analyses'): len(student_data.get('morphosyntax_analyses', [])), + t.get('semantic_analyses', 'Semantic Analyses'): len(student_data.get('semantic_analyses', [])), + t.get('discourse_analyses', 'Discourse Analyses'): len(student_data.get('discourse_analyses', [])) + } + # Configurar el estilo de seaborn para un aspecto más atractivo + sns.set_style("whitegrid") + + # Crear una figura más pequeña + fig, ax = plt.subplots(figsize=(6, 4)) + + # Usar colores más atractivos + colors = ['#ff9999', '#66b3ff', '#99ff99'] + + # Crear el gráfico de barras + bars = ax.bar(analysis_counts.keys(), analysis_counts.values(), color=colors) + + # Añadir etiquetas de valor encima de cada barra + for bar in bars: + height = bar.get_height() + ax.text(bar.get_x() + bar.get_width()/2., height, + f'{height}', + ha='center', va='bottom') + + # Configurar el título y las etiquetas + ax.set_title(t.get('analysis_types_chart', 'Types of analyses performed'), fontsize=12) + ax.set_ylabel(t.get('count', 'Count'), fontsize=10) + + # Rotar las etiquetas del eje x para mejor legibilidad + plt.xticks(rotation=45, ha='right') + + # Ajustar el diseño para que todo quepa + plt.tight_layout() + + # Mostrar el gráfico en Streamlit + st.pyplot(fig) + + # Mostrar los últimos análisis + for analysis_type in ['morphosyntax_analyses', 'semantic_analyses', 'discourse_analyses']: + with st.expander(t.get(f'{analysis_type}_expander', f'{analysis_type.capitalize()} History')): + for analysis in student_data.get(analysis_type, [])[:5]: # Mostrar los últimos 5 + st.subheader(f"{t.get('analysis_from', 'Analysis from')} {analysis.get('timestamp', 'N/A')}") + if analysis_type == 'morphosyntax_analyses': + if 'arc_diagrams' in analysis: + st.write(analysis['arc_diagrams'][0], unsafe_allow_html=True) + elif analysis_type == 'semantic_analyses': + if 'key_concepts' in analysis: + st.write(t.get('key_concepts', 'Key concepts')) + st.write(", ".join([f"{concept} ({freq:.2f})" for concept, freq in analysis['key_concepts']])) + if 'graph' in analysis: + st.image(base64.b64decode(analysis['graph'])) + elif analysis_type == 'discourse_analyses': + for i in [1, 2]: + if f'key_concepts{i}' in analysis: + st.write(f"{t.get('key_concepts', 'Key concepts')} {t.get('document', 'Document')} {i}") + st.write(", ".join([f"{concept} ({freq:.2f})" for concept, freq in analysis[f'key_concepts{i}']])) + if 'combined_graph' in analysis: + st.image(base64.b64decode(analysis['combined_graph'])) + + # Mostrar el historial de chat + with st.expander(t.get('chat_history_expander', 'Chat History')): + for chat in student_data.get('chat_history', [])[:5]: # Mostrar las últimas 5 conversaciones + st.subheader(f"{t.get('chat_from', 'Chat from')} {chat.get('timestamp', 'N/A')}") + for message in chat.get('messages', []): + st.write(f"{message.get('role', 'Unknown').capitalize()}: {message.get('content', 'No content')}") + st.write("---") + + except Exception as e: + logger.error(f"Error in display_student_progress: {str(e)}", exc_info=True) + st.error(t.get('error_loading_progress', 'Error loading student progress. Please try again later.')) + + + + + + + + + + + + + + + + + + + + + + + + + + + +##################################################################### +def display_student_progress(username, lang_code, t, student_data): + st.subheader(t['student_progress']) + + if not student_data or all(len(student_data[key]) == 0 for key in ['morphosyntax_analyses', 'semantic_analyses', 'discourse_analyses']): + st.warning(t['no_data_warning']) + return + + # Resumen de actividades + total_analyses = sum(len(student_data[key]) for key in ['morphosyntax_analyses', 'semantic_analyses', 'discourse_analyses']) + st.write(f"{t['total_analyses']}: {total_analyses}") + + # Gráfico de tipos de análisis + analysis_counts = { + t['morpho_analyses']: len(student_data['morphosyntax_analyses']), + t['semantic_analyses']: len(student_data['semantic_analyses']), + t['discourse_analyses']: len(student_data['discourse_analyses']) + } + fig, ax = plt.subplots() + ax.bar(analysis_counts.keys(), analysis_counts.values()) + ax.set_title(t['analysis_types_chart']) + st.pyplot(fig) + + # Mostrar los últimos análisis + for analysis_type in ['morphosyntax_analyses', 'semantic_analyses', 'discourse_analyses']: + with st.expander(t[f'{analysis_type}_expander']): + for analysis in student_data[analysis_type][:5]: # Mostrar los últimos 5 + st.subheader(f"{t['analysis_from']} {analysis['timestamp']}") + if analysis_type == 'morphosyntax_analyses': + if 'arc_diagrams' in analysis: + st.write(analysis['arc_diagrams'][0], unsafe_allow_html=True) + elif analysis_type == 'semantic_analyses': + if 'key_concepts' in analysis: + st.write(t['key_concepts']) + st.write(", ".join([f"{concept} ({freq:.2f})" for concept, freq in analysis['key_concepts']])) + if 'graph' in analysis: + st.image(base64.b64decode(analysis['graph'])) + elif analysis_type == 'discourse_analyses': + for i in [1, 2]: + if f'key_concepts{i}' in analysis: + st.write(f"{t['key_concepts']} {t['document']} {i}") + st.write(", ".join([f"{concept} ({freq:.2f})" for concept, freq in analysis[f'key_concepts{i}']])) + if 'combined_graph' in analysis: + st.image(base64.b64decode(analysis['combined_graph'])) + + # Mostrar el historial de chat + with st.expander(t['chat_history_expander']): + for chat in student_data['chat_history'][:5]: # Mostrar las últimas 5 conversaciones + st.subheader(f"{t['chat_from']} {chat['timestamp']}") + for message in chat['messages']: + st.write(f"{message['role'].capitalize()}: {message['content']}") + st.write("---") + + + +def display_student_progress(username, lang_code, t, student_data): + st.subheader(t['student_activities']) + + if not student_data or all(len(student_data[key]) == 0 for key in ['morphosyntax_analyses', 'semantic_analyses', 'discourse_analyses']): + st.warning(t['no_data_warning']) + return + + # Resumen de actividades + total_analyses = sum(len(student_data[key]) for key in ['morphosyntax_analyses', 'semantic_analyses', 'discourse_analyses']) + st.write(f"{t['total_analyses']}: {total_analyses}") + + # Gráfico de tipos de análisis + analysis_counts = { + t['morphological_analysis']: len(student_data['morphosyntax_analyses']), + t['semantic_analyses']: len(student_data['semantic_analyses']), + t['discourse_analyses']: len(student_data['discourse_analyses']) + } + fig, ax = plt.subplots() + ax.bar(analysis_counts.keys(), analysis_counts.values()) + ax.set_title(t['analysis_types_chart']) + st.pyplot(fig) + + # Mostrar los últimos análisis + for analysis_type in ['morphosyntax_analyses', 'semantic_analyses', 'discourse_analyses']: + with st.expander(t[f'{analysis_type}_expander']): + for analysis in student_data[analysis_type][:5]: # Mostrar los últimos 5 + st.subheader(f"{t['analysis_from']} {analysis['timestamp']}") + if analysis_type == 'morphosyntax_analyses': + if 'arc_diagrams' in analysis: + st.write(analysis['arc_diagrams'][0], unsafe_allow_html=True) + elif analysis_type == 'semantic_analyses': + if 'key_concepts' in analysis: + st.write(t['key_concepts']) + st.write(", ".join([f"{concept} ({freq:.2f})" for concept, freq in analysis['key_concepts']])) + if 'graph' in analysis: + st.image(base64.b64decode(analysis['graph'])) + elif analysis_type == 'discourse_analyses': + for i in [1, 2]: + if f'key_concepts{i}' in analysis: + st.write(f"{t['key_concepts']} {t['document']} {i}") + st.write(", ".join([f"{concept} ({freq:.2f})" for concept, freq in analysis[f'key_concepts{i}']])) + if 'combined_graph' in analysis: + st.image(base64.b64decode(analysis['combined_graph'])) + + # Mostrar el historial de chat + with st.expander(t['chat_history_expander']): + for chat in student_data['chat_history'][:5]: # Mostrar las últimas 5 conversaciones + st.subheader(f"{t['chat_from']} {chat['timestamp']}") + for message in chat['messages']: + st.write(f"{message['role'].capitalize()}: {message['content']}") + st.write("---") + + + + +def display_student_progress(username, lang_code, t, student_data): + st.subheader(t['student_activities']) + + if not student_data or all(len(student_data[key]) == 0 for key in ['morphosyntax_analyses', 'semantic_analyses', 'discourse_analyses']): + st.warning(t['no_data_warning']) + return + + # Resumen de actividades + total_analyses = sum(len(student_data[key]) for key in ['morphosyntax_analyses', 'semantic_analyses', 'discourse_analyses']) + st.write(f"{t['total_analyses']}: {total_analyses}") + + # Gráfico de tipos de análisis + analysis_counts = { + t['morphological_analysis']: len(student_data['morphosyntax_analyses']), + t['semantic_analyses']: len(student_data['semantic_analyses']), + t['discourse_analyses']: len(student_data['discourse_analyses']) + } + fig, ax = plt.subplots() + ax.bar(analysis_counts.keys(), analysis_counts.values()) + ax.set_title(t['analysis_types_chart']) + st.pyplot(fig) + + # Mostrar los últimos análisis + for analysis_type in ['morphosyntax_analyses', 'semantic_analyses', 'discourse_analyses']: + with st.expander(t[f'{analysis_type}_expander']): + for analysis in student_data[analysis_type][:5]: # Mostrar los últimos 5 + st.subheader(f"{t['analysis_from']} {analysis['timestamp']}") + if analysis_type == 'morphosyntax_analyses': + if 'arc_diagrams' in analysis: + st.write(analysis['arc_diagrams'][0], unsafe_allow_html=True) + elif analysis_type == 'semantic_analyses': + if 'key_concepts' in analysis: + st.write(t['key_concepts']) + st.write(", ".join([f"{concept} ({freq:.2f})" for concept, freq in analysis['key_concepts']])) + if 'graph' in analysis: + st.image(base64.b64decode(analysis['graph'])) + elif analysis_type == 'discourse_analyses': + for i in [1, 2]: + if f'key_concepts{i}' in analysis: + st.write(f"{t['key_concepts']} {t['document']} {i}") + st.write(", ".join([f"{concept} ({freq:.2f})" for concept, freq in analysis[f'key_concepts{i}']])) + if 'combined_graph' in analysis: + st.image(base64.b64decode(analysis['combined_graph'])) + + # Mostrar el historial de chat + with st.expander(t['chat_history_expander']): + for chat in student_data['chat_history'][:5]: # Mostrar las últimas 5 conversaciones + st.subheader(f"{t['chat_from']} {chat['timestamp']}") + for message in chat['messages']: + st.write(f"{message['role'].capitalize()}: {message['content']}") + st.write("---") + + + + +def display_student_progress(username, lang_code, t): + st.subheader(t['student_activities']) + st.write(f"{t['activities_message']} {username}") + + # Aquí puedes agregar más contenido estático o placeholder + st.info(t['activities_placeholder']) + + # Si necesitas mostrar algún dato, puedes usar datos de ejemplo o placeholders + col1, col2, col3 = st.columns(3) + col1.metric(t['morpho_analyses'], "5") # Ejemplo de dato + col2.metric(t['semantic_analyses'], "3") # Ejemplo de dato + col3.metric(t['discourse_analyses'], "2") # Ejemplo de dato + + + +def display_student_progress(username, lang_code, t): + st.title(f"Actividades de {username}") + + # Obtener todos los datos del estudiante + student_data = get_student_data(username) + + if not student_data or len(student_data.get('entries', [])) == 0: + st.warning("No se encontraron datos de análisis para este estudiante.") + st.info("Intenta realizar algunos análisis de texto primero.") + return + + # Resumen de actividades + with st.expander("Resumen de Actividades", expanded=True): + total_entries = len(student_data['entries']) + st.write(f"Total de análisis realizados: {total_entries}") + + # Gráfico de tipos de análisis + analysis_types = [entry['analysis_type'] for entry in student_data['entries']] + analysis_counts = pd.Series(analysis_types).value_counts() + fig, ax = plt.subplots() + analysis_counts.plot(kind='bar', ax=ax) + ax.set_title("Tipos de análisis realizados") + ax.set_xlabel("Tipo de análisis") + ax.set_ylabel("Cantidad") + st.pyplot(fig) + + # Histórico de Análisis Morfosintácticos + with st.expander("Histórico de Análisis Morfosintácticos"): + morpho_analyses = [entry for entry in student_data['entries'] if entry['analysis_type'] == 'morphosyntax'] + for analysis in morpho_analyses[:5]: # Mostrar los últimos 5 + st.subheader(f"Análisis del {analysis['timestamp']}") + if 'arc_diagrams' in analysis: + st.write(analysis['arc_diagrams'][0], unsafe_allow_html=True) + + # Histórico de Análisis Semánticos + with st.expander("Histórico de Análisis Semánticos"): + semantic_analyses = [entry for entry in student_data['entries'] if entry['analysis_type'] == 'semantic'] + for analysis in semantic_analyses[:5]: # Mostrar los últimos 5 + st.subheader(f"Análisis del {analysis['timestamp']}") + if 'key_concepts' in analysis: + concepts_str = " | ".join([f"{concept} ({frequency:.2f})" for concept, frequency in analysis['key_concepts']]) + st.markdown(f"
{concepts_str}
", unsafe_allow_html=True) + if 'graph' in analysis: + try: + img_bytes = base64.b64decode(analysis['graph']) + st.image(img_bytes, caption="Gráfico de relaciones conceptuales") + except Exception as e: + st.error(f"No se pudo mostrar el gráfico: {str(e)}") + + # Histórico de Análisis Discursivos + with st.expander("Histórico de Análisis Discursivos"): + discourse_analyses = [entry for entry in student_data['entries'] if entry['analysis_type'] == 'discourse'] + for analysis in discourse_analyses[:5]: # Mostrar los últimos 5 + st.subheader(f"Análisis del {analysis['timestamp']}") + for i in [1, 2]: + if f'key_concepts{i}' in analysis: + concepts_str = " | ".join([f"{concept} ({frequency:.2f})" for concept, frequency in analysis[f'key_concepts{i}']]) + st.write(f"Conceptos clave del documento {i}:") + st.markdown(f"
{concepts_str}
", unsafe_allow_html=True) + if 'combined_graph' in analysis: + try: + img_bytes = base64.b64decode(analysis['combined_graph']) + st.image(img_bytes) + except Exception as e: + st.error(f"No se pudo mostrar el gráfico combinado: {str(e)}") + + # Histórico de Conversaciones con el ChatBot + with st.expander("Histórico de Conversaciones con el ChatBot"): + if 'chat_history' in student_data: + for i, chat in enumerate(student_data['chat_history'][:5]): # Mostrar las últimas 5 conversaciones + st.subheader(f"Conversación {i+1} - {chat['timestamp']}") + for message in chat['messages']: + st.write(f"{message['role'].capitalize()}: {message['content']}") + st.write("---") + else: + st.write("No se encontraron conversaciones con el ChatBot.") + + # Opción para mostrar datos de depuración + if st.checkbox("Mostrar datos de depuración"): + st.write("Datos del estudiante (para depuración):") + st.json(student_data) + +''' \ No newline at end of file diff --git a/modules/text_analysis/__init__.py b/modules/text_analysis/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/modules/text_analysis/__pycache__/__init__.cpython-311.pyc b/modules/text_analysis/__pycache__/__init__.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3e7768ec60cb4a06cdaec79228f7036c906df7b0 Binary files /dev/null and b/modules/text_analysis/__pycache__/__init__.cpython-311.pyc differ diff --git a/modules/text_analysis/__pycache__/discourse_analysis.cpython-311.pyc b/modules/text_analysis/__pycache__/discourse_analysis.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5f526ab04a41ca07b198c4e4aaf1337b595c5f33 Binary files /dev/null and b/modules/text_analysis/__pycache__/discourse_analysis.cpython-311.pyc differ diff --git a/modules/text_analysis/__pycache__/morpho_analysis.cpython-311.pyc b/modules/text_analysis/__pycache__/morpho_analysis.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d74855a63b98e328d11eb2b08352b26f31a23655 Binary files /dev/null and b/modules/text_analysis/__pycache__/morpho_analysis.cpython-311.pyc differ diff --git a/modules/text_analysis/__pycache__/semantic_analysis.cpython-311.pyc b/modules/text_analysis/__pycache__/semantic_analysis.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..53c63e092b99349668b992be27100905c36bb707 Binary files /dev/null and b/modules/text_analysis/__pycache__/semantic_analysis.cpython-311.pyc differ diff --git a/modules/text_analysis/coherence_analysis.py b/modules/text_analysis/coherence_analysis.py new file mode 100644 index 0000000000000000000000000000000000000000..d3f5a12faa99758192ecc4ed3fc22c9249232e86 --- /dev/null +++ b/modules/text_analysis/coherence_analysis.py @@ -0,0 +1 @@ + diff --git a/modules/text_analysis/complex_structures.py b/modules/text_analysis/complex_structures.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/modules/text_analysis/discourse_analysis.py b/modules/text_analysis/discourse_analysis.py new file mode 100644 index 0000000000000000000000000000000000000000..d1e19717a070b495c8e26314aeb2e4501b5c8fdb --- /dev/null +++ b/modules/text_analysis/discourse_analysis.py @@ -0,0 +1,72 @@ +import streamlit as st +import spacy +import networkx as nx +import matplotlib.pyplot as plt +import pandas as pd +import numpy as np +from .semantic_analysis import ( + create_concept_graph, + visualize_concept_graph, + identify_key_concepts, + POS_COLORS, + POS_TRANSLATIONS, + ENTITY_LABELS +) + +def compare_semantic_analysis(text1, text2, nlp, lang): + doc1 = nlp(text1) + doc2 = nlp(text2) + + # Identificar conceptos clave para ambos documentos + key_concepts1 = identify_key_concepts(doc1) + key_concepts2 = identify_key_concepts(doc2) + + # Crear grafos de conceptos para ambos documentos + G1 = create_concept_graph(doc1, key_concepts1) + G2 = create_concept_graph(doc2, key_concepts2) + + # Visualizar los grafos de conceptos + fig1 = visualize_concept_graph(G1, lang) + fig2 = visualize_concept_graph(G2, lang) + + # Remover los títulos superpuestos + fig1.suptitle("") + fig2.suptitle("") + + return fig1, fig2, key_concepts1, key_concepts2 + +def create_concept_table(key_concepts): + df = pd.DataFrame(key_concepts, columns=['Concepto', 'Frecuencia']) + df['Frecuencia'] = df['Frecuencia'].round(2) + return df + +def perform_discourse_analysis(text1, text2, nlp, lang): + graph1, graph2, key_concepts1, key_concepts2 = compare_semantic_analysis(text1, text2, nlp, lang) + + # Crear tablas de conceptos clave + table1 = create_concept_table(key_concepts1) + table2 = create_concept_table(key_concepts2) + + return { + 'graph1': graph1, + 'graph2': graph2, + 'key_concepts1': key_concepts1, + 'key_concepts2': key_concepts2 + } + +def display_discourse_analysis_results(analysis_result, lang_code): + t = get_translations(lang_code) + + col1, col2 = st.columns(2) + + with col1: + with st.expander(t['doc1_title'], expanded=True): + st.pyplot(analysis_result['graph1']) + st.subheader(t['key_concepts']) + st.table(analysis_result['table1']) + + with col2: + with st.expander(t['doc2_title'], expanded=True): + st.pyplot(analysis_result['graph2']) + st.subheader(t['key_concepts']) + st.table(analysis_result['table2']) \ No newline at end of file diff --git a/modules/text_analysis/entity_analysis.py b/modules/text_analysis/entity_analysis.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/modules/text_analysis/idiom_detection.py b/modules/text_analysis/idiom_detection.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/modules/text_analysis/intertextual_analysis.py b/modules/text_analysis/intertextual_analysis.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/modules/text_analysis/morpho_analysis-Back1910-25-9-24.py b/modules/text_analysis/morpho_analysis-Back1910-25-9-24.py new file mode 100644 index 0000000000000000000000000000000000000000..7a0823063d965ac2ca3715fc1484046dd8be39a6 --- /dev/null +++ b/modules/text_analysis/morpho_analysis-Back1910-25-9-24.py @@ -0,0 +1,253 @@ +import spacy +from spacy import displacy +from streamlit.components.v1 import html +import base64 + +from collections import Counter +import re +from ..utils.widget_utils import generate_unique_key + +import logging +logger = logging.getLogger(__name__) + + +# Define colors for grammatical categories +POS_COLORS = { + 'ADJ': '#FFA07A', # Light Salmon + 'ADP': '#98FB98', # Pale Green + 'ADV': '#87CEFA', # Light Sky Blue + 'AUX': '#DDA0DD', # Plum + 'CCONJ': '#F0E68C', # Khaki + 'DET': '#FFB6C1', # Light Pink + 'INTJ': '#FF6347', # Tomato + 'NOUN': '#90EE90', # Light Green + 'NUM': '#FAFAD2', # Light Goldenrod Yellow + 'PART': '#D3D3D3', # Light Gray + 'PRON': '#FFA500', # Orange + 'PROPN': '#20B2AA', # Light Sea Green + 'SCONJ': '#DEB887', # Burlywood + 'SYM': '#7B68EE', # Medium Slate Blue + 'VERB': '#FF69B4', # Hot Pink + 'X': '#A9A9A9', # Dark Gray +} + +POS_TRANSLATIONS = { + 'es': { + 'ADJ': 'Adjetivo', + 'ADP': 'Preposición', + 'ADV': 'Adverbio', + 'AUX': 'Auxiliar', + 'CCONJ': 'Conjunción Coordinante', + 'DET': 'Determinante', + 'INTJ': 'Interjección', + 'NOUN': 'Sustantivo', + 'NUM': 'Número', + 'PART': 'Partícula', + 'PRON': 'Pronombre', + 'PROPN': 'Nombre Propio', + 'SCONJ': 'Conjunción Subordinante', + 'SYM': 'Símbolo', + 'VERB': 'Verbo', + 'X': 'Otro', + }, + 'en': { + 'ADJ': 'Adjective', + 'ADP': 'Preposition', + 'ADV': 'Adverb', + 'AUX': 'Auxiliary', + 'CCONJ': 'Coordinating Conjunction', + 'DET': 'Determiner', + 'INTJ': 'Interjection', + 'NOUN': 'Noun', + 'NUM': 'Number', + 'PART': 'Particle', + 'PRON': 'Pronoun', + 'PROPN': 'Proper Noun', + 'SCONJ': 'Subordinating Conjunction', + 'SYM': 'Symbol', + 'VERB': 'Verb', + 'X': 'Other', + }, + 'fr': { + 'ADJ': 'Adjectif', + 'ADP': 'Préposition', + 'ADV': 'Adverbe', + 'AUX': 'Auxiliaire', + 'CCONJ': 'Conjonction de Coordination', + 'DET': 'Déterminant', + 'INTJ': 'Interjection', + 'NOUN': 'Nom', + 'NUM': 'Nombre', + 'PART': 'Particule', + 'PRON': 'Pronom', + 'PROPN': 'Nom Propre', + 'SCONJ': 'Conjonction de Subordination', + 'SYM': 'Symbole', + 'VERB': 'Verbe', + 'X': 'Autre', + } +} + +def generate_arc_diagram(doc): + arc_diagrams = [] + for sent in doc.sents: + words = [token.text for token in sent] + # Calculamos el ancho del SVG basado en la longitud de la oración + svg_width = max(100, len(words) * 120) + # Altura fija para cada oración + svg_height = 300 # Controla la altura del SVG + + # Renderizamos el diagrama de dependencias + html = displacy.render(sent, style="dep", options={ + "add_lemma":False, # Introduced in version 2.2.4, this argument prints the lemma’s in a separate row below the token texts. + "arrow_spacing": 12, #This argument is used for adjusting the spacing between arrows in px to avoid overlaps. + "arrow_width": 2, #This argument is used for adjusting the width of arrow head in px. + "arrow_stroke": 2, #This argument is used for adjusting the width of arrow path in px. + "collapse_punct": True, #It attaches punctuation to the tokens. + "collapse_phrases": False, # This argument merges the noun phrases into one token. + "compact":False, # If you will take this argument as true, you will get the “Compact mode” with square arrows that takes up less space. + "color": "#ffffff", + "bg": "#0d6efd", + "compact": False, #Put the value of this argument True, if you want to use fine-grained part-of-speech tags (Token.tag_), instead of coarse-grained tags (Token.pos_). + "distance": 100, # Aumentamos la distancia entre palabras + "fine_grained": False, #Put the value of this argument True, if you want to use fine-grained part-of-speech tags (Token.tag_), instead of coarse-grained tags (Token.pos_). + "offset_x": 0, # This argument is used for spacing on left side of the SVG in px. + "word_spacing": 25, #This argument is used for adjusting the vertical spacing between words and arcs in px. + }) + + # Ajustamos el tamaño del SVG y el viewBox + html = re.sub(r'width="(\d+)"', f'width="{svg_width}"', html) + html = re.sub(r'height="(\d+)"', f'height="{svg_height}"', html) + html = re.sub(r']*>', lambda m: m.group(0).replace('height="450"', 'height="300"'), html) + #html = re.sub(r']*transform="translate\((\d+),(\d+)\)"', lambda m: f' 1} + + word_colors = {} + for token in doc: + if token.text.lower() in repeated_words: + word_colors[token.text.lower()] = POS_COLORS.get(token.pos_, '#FFFFFF') + + return word_colors + +def highlight_repeated_words(doc): + word_colors = get_repeated_words_colors(doc) + highlighted_text = [] + for token in doc: + if token.text.lower() in word_colors: + color = word_colors[token.text.lower()] + highlighted_text.append(f'{token.text}') + else: + highlighted_text.append(token.text) + return ' '.join(highlighted_text) + + +# Exportar todas las funciones y variables necesarias +__all__ = [ + 'get_repeated_words_colors', + 'highlight_repeated_words', + 'generate_arc_diagram', + 'perform_pos_analysis', + 'perform_morphological_analysis', + 'analyze_sentence_structure', + 'perform_advanced_morphosyntactic_analysis', + 'POS_COLORS', + 'POS_TRANSLATIONS' +] \ No newline at end of file diff --git a/modules/text_analysis/morpho_analysis.py b/modules/text_analysis/morpho_analysis.py new file mode 100644 index 0000000000000000000000000000000000000000..42007f145af9bed723e1b98993a3a6bda8cdf51b --- /dev/null +++ b/modules/text_analysis/morpho_analysis.py @@ -0,0 +1,158 @@ +import spacy +from spacy import displacy +from streamlit.components.v1 import html +import base64 + +from collections import Counter +import re +from ..utils.widget_utils import generate_unique_key + +import logging +logger = logging.getLogger(__name__) + + +# Define colors for grammatical categories +POS_COLORS = { + 'ADJ': '#FFA07A', # Light Salmon + 'ADP': '#98FB98', # Pale Green + 'ADV': '#87CEFA', # Light Sky Blue + 'AUX': '#DDA0DD', # Plum + 'CCONJ': '#F0E68C', # Khaki + 'DET': '#FFB6C1', # Light Pink + 'INTJ': '#FF6347', # Tomato + 'NOUN': '#90EE90', # Light Green + 'NUM': '#FAFAD2', # Light Goldenrod Yellow + 'PART': '#D3D3D3', # Light Gray + 'PRON': '#FFA500', # Orange + 'PROPN': '#20B2AA', # Light Sea Green + 'SCONJ': '#DEB887', # Burlywood + 'SYM': '#7B68EE', # Medium Slate Blue + 'VERB': '#FF69B4', # Hot Pink + 'X': '#A9A9A9', # Dark Gray +} + +POS_TRANSLATIONS = { + 'es': { + 'ADJ': 'Adjetivo', + 'ADP': 'Preposición', + 'ADV': 'Adverbio', + 'AUX': 'Auxiliar', + 'CCONJ': 'Conjunción Coordinante', + 'DET': 'Determinante', + 'INTJ': 'Interjección', + 'NOUN': 'Sustantivo', + 'NUM': 'Número', + 'PART': 'Partícula', + 'PRON': 'Pronombre', + 'PROPN': 'Nombre Propio', + 'SCONJ': 'Conjunción Subordinante', + 'SYM': 'Símbolo', + 'VERB': 'Verbo', + 'X': 'Otro', + }, + 'en': { + 'ADJ': 'Adjective', + 'ADP': 'Preposition', + 'ADV': 'Adverb', + 'AUX': 'Auxiliary', + 'CCONJ': 'Coordinating Conjunction', + 'DET': 'Determiner', + 'INTJ': 'Interjection', + 'NOUN': 'Noun', + 'NUM': 'Number', + 'PART': 'Particle', + 'PRON': 'Pronoun', + 'PROPN': 'Proper Noun', + 'SCONJ': 'Subordinating Conjunction', + 'SYM': 'Symbol', + 'VERB': 'Verb', + 'X': 'Other', + }, + 'fr': { + 'ADJ': 'Adjectif', + 'ADP': 'Préposition', + 'ADV': 'Adverbe', + 'AUX': 'Auxiliaire', + 'CCONJ': 'Conjonction de Coordination', + 'DET': 'Déterminant', + 'INTJ': 'Interjection', + 'NOUN': 'Nom', + 'NUM': 'Nombre', + 'PART': 'Particule', + 'PRON': 'Pronom', + 'PROPN': 'Nom Propre', + 'SCONJ': 'Conjonction de Subordination', + 'SYM': 'Symbole', + 'VERB': 'Verbe', + 'X': 'Autre', + } +} + +def generate_arc_diagram(doc): + arc_diagrams = [] + for sent in doc.sents: + words = [token.text for token in sent] + # Calculamos el ancho del SVG basado en la longitud de la oración + svg_width = max(600, len(words) * 120) + # Altura fija para cada oración + svg_height = 350 # Controla la altura del SVG + + # Renderizamos el diagrama de dependencias + html = displacy.render(sent, style="dep", options={ + "add_lemma":False, # Introduced in version 2.2.4, this argument prints the lemma’s in a separate row below the token texts. + "arrow_spacing": 12, #This argument is used for adjusting the spacing between arrows in px to avoid overlaps. + "arrow_width": 2, #This argument is used for adjusting the width of arrow head in px. + "arrow_stroke": 2, #This argument is used for adjusting the width of arrow path in px. + "collapse_punct": True, #It attaches punctuation to the tokens. + "collapse_phrases": False, # This argument merges the noun phrases into one token. + "compact":False, # If you will take this argument as true, you will get the “Compact mode” with square arrows that takes up less space. + "color": "#ffffff", + "bg": "#0d6efd", + "compact": False, #Put the value of this argument True, if you want to use fine-grained part-of-speech tags (Token.tag_), instead of coarse-grained tags (Token.pos_). + "distance": 100, # Aumentamos la distancia entre palabras + "fine_grained": False, #Put the value of this argument True, if you want to use fine-grained part-of-speech tags (Token.tag_), instead of coarse-grained tags (Token.pos_). + "offset_x": 55, # This argument is used for spacing on left side of the SVG in px. + "word_spacing": 25, #This argument is used for adjusting the vertical spacing between words and arcs in px. + }) + + # Ajustamos el tamaño del SVG y el viewBox + html = re.sub(r'width="(\d+)"', f'width="{svg_width}"', html) + html = re.sub(r'height="(\d+)"', f'height="{svg_height}"', html) + html = re.sub(r']*>', lambda m: m.group(0).replace('height="450"', 'height="300"'), html) + #html = re.sub(r']*transform="translate\((\d+),(\d+)\)"', lambda m: f'' + + + +def identify_key_concepts(doc): + logger.info("Identifying key concepts") + word_freq = Counter([token.lemma_.lower() for token in doc if token.pos_ in ['NOUN', 'VERB'] and not token.is_stop]) + key_concepts = word_freq.most_common(10) + return [(concept, float(freq)) for concept, freq in key_concepts] + + +def create_concept_graph(doc, key_concepts): + G = nx.Graph() + for concept, freq in key_concepts: + G.add_node(concept, weight=freq) + for sent in doc.sents: + sent_concepts = [token.lemma_.lower() for token in sent if token.lemma_.lower() in dict(key_concepts)] + for i, concept1 in enumerate(sent_concepts): + for concept2 in sent_concepts[i+1:]: + if G.has_edge(concept1, concept2): + G[concept1][concept2]['weight'] += 1 + else: + G.add_edge(concept1, concept2, weight=1) + return G + +def visualize_concept_graph(G, lang_code): + fig, ax = plt.subplots(figsize=(12, 8)) + pos = nx.spring_layout(G, k=0.5, iterations=50) + node_sizes = [G.nodes[node]['weight'] * 100 for node in G.nodes()] + nx.draw_networkx_nodes(G, pos, node_size=node_sizes, node_color='lightblue', alpha=0.8, ax=ax) + nx.draw_networkx_labels(G, pos, font_size=10, font_weight="bold", ax=ax) + edge_weights = [G[u][v]['weight'] for u, v in G.edges()] + nx.draw_networkx_edges(G, pos, width=edge_weights, alpha=0.5, ax=ax) + title = { + 'es': "Relaciones entre Conceptos Clave", + 'en': "Key Concept Relations", + 'fr': "Relations entre Concepts Clés" + } + ax.set_title(title[lang_code], fontsize=16) + ax.axis('off') + plt.tight_layout() + return fig + +def create_entity_graph(entities): + G = nx.Graph() + for entity_type, entity_list in entities.items(): + for entity in entity_list: + G.add_node(entity, type=entity_type) + for i, entity1 in enumerate(entity_list): + for entity2 in entity_list[i+1:]: + G.add_edge(entity1, entity2) + return G + +def visualize_entity_graph(G, lang_code): + fig, ax = plt.subplots(figsize=(12, 8)) + pos = nx.spring_layout(G) + for entity_type, color in ENTITY_LABELS[lang_code].items(): + node_list = [node for node, data in G.nodes(data=True) if data['type'] == entity_type] + nx.draw_networkx_nodes(G, pos, nodelist=node_list, node_color=color, node_size=500, alpha=0.8, ax=ax) + nx.draw_networkx_edges(G, pos, width=1, alpha=0.5, ax=ax) + nx.draw_networkx_labels(G, pos, font_size=8, font_weight="bold", ax=ax) + ax.set_title(f"Relaciones entre Entidades ({lang_code})", fontsize=16) + ax.axis('off') + plt.tight_layout() + return fig + + +################################################################################# +def create_topic_graph(topics, doc): + G = nx.Graph() + for topic in topics: + G.add_node(topic, weight=doc.text.count(topic)) + for i, topic1 in enumerate(topics): + for topic2 in topics[i+1:]: + weight = sum(1 for sent in doc.sents if topic1 in sent.text and topic2 in sent.text) + if weight > 0: + G.add_edge(topic1, topic2, weight=weight) + return G + +def visualize_topic_graph(G, lang_code): + fig, ax = plt.subplots(figsize=(12, 8)) + pos = nx.spring_layout(G) + node_sizes = [G.nodes[node]['weight'] * 100 for node in G.nodes()] + nx.draw_networkx_nodes(G, pos, node_size=node_sizes, node_color='lightgreen', alpha=0.8, ax=ax) + nx.draw_networkx_labels(G, pos, font_size=10, font_weight="bold", ax=ax) + edge_weights = [G[u][v]['weight'] for u, v in G.edges()] + nx.draw_networkx_edges(G, pos, width=edge_weights, alpha=0.5, ax=ax) + ax.set_title(f"Relaciones entre Temas ({lang_code})", fontsize=16) + ax.axis('off') + plt.tight_layout() + return fig + +########################################################################################### +def generate_summary(doc, lang_code): + sentences = list(doc.sents) + summary = sentences[:3] # Toma las primeras 3 oraciones como resumen + return " ".join([sent.text for sent in summary]) + +def extract_entities(doc, lang_code): + entities = defaultdict(list) + for ent in doc.ents: + if ent.label_ in ENTITY_LABELS[lang_code]: + entities[ent.label_].append(ent.text) + return dict(entities) + +def analyze_sentiment(doc, lang_code): + positive_words = sum(1 for token in doc if token.sentiment > 0) + negative_words = sum(1 for token in doc if token.sentiment < 0) + total_words = len(doc) + if positive_words > negative_words: + return "Positivo" + elif negative_words > positive_words: + return "Negativo" + else: + return "Neutral" + +def extract_topics(doc, lang_code): + vectorizer = TfidfVectorizer(stop_words='english', max_features=5) + tfidf_matrix = vectorizer.fit_transform([doc.text]) + feature_names = vectorizer.get_feature_names_out() + return list(feature_names) + +# Asegúrate de que todas las funciones necesarias estén exportadas +__all__ = [ + 'perform_semantic_analysis', + 'identify_key_concepts', + 'create_concept_graph', + 'visualize_concept_graph', + 'create_entity_graph', + 'visualize_entity_graph', + 'generate_summary', + 'extract_entities', + 'analyze_sentiment', + 'create_topic_graph', + 'visualize_topic_graph', + 'extract_topics', + 'ENTITY_LABELS', + 'POS_COLORS', + 'POS_TRANSLATIONS' +] \ No newline at end of file diff --git a/modules/text_analysis/semantic_analysis_v0.py b/modules/text_analysis/semantic_analysis_v0.py new file mode 100644 index 0000000000000000000000000000000000000000..c1b4d7c9379a0c76f686b8be2e529cf4311cceb5 --- /dev/null +++ b/modules/text_analysis/semantic_analysis_v0.py @@ -0,0 +1,264 @@ +#semantic_analysis.py +import streamlit as st +import spacy +import networkx as nx +import matplotlib.pyplot as plt +from collections import Counter +from collections import defaultdict +from sklearn.feature_extraction.text import TfidfVectorizer +from sklearn.metrics.pairwise import cosine_similarity + +# Define colors for grammatical categories +POS_COLORS = { + 'ADJ': '#FFA07A', # Light Salmon + 'ADP': '#98FB98', # Pale Green + 'ADV': '#87CEFA', # Light Sky Blue + 'AUX': '#DDA0DD', # Plum + 'CCONJ': '#F0E68C', # Khaki + 'DET': '#FFB6C1', # Light Pink + 'INTJ': '#FF6347', # Tomato + 'NOUN': '#90EE90', # Light Green + 'NUM': '#FAFAD2', # Light Goldenrod Yellow + 'PART': '#D3D3D3', # Light Gray + 'PRON': '#FFA500', # Orange + 'PROPN': '#20B2AA', # Light Sea Green + 'SCONJ': '#DEB887', # Burlywood + 'SYM': '#7B68EE', # Medium Slate Blue + 'VERB': '#FF69B4', # Hot Pink + 'X': '#A9A9A9', # Dark Gray +} + +POS_TRANSLATIONS = { + 'es': { + 'ADJ': 'Adjetivo', + 'ADP': 'Preposición', + 'ADV': 'Adverbio', + 'AUX': 'Auxiliar', + 'CCONJ': 'Conjunción Coordinante', + 'DET': 'Determinante', + 'INTJ': 'Interjección', + 'NOUN': 'Sustantivo', + 'NUM': 'Número', + 'PART': 'Partícula', + 'PRON': 'Pronombre', + 'PROPN': 'Nombre Propio', + 'SCONJ': 'Conjunción Subordinante', + 'SYM': 'Símbolo', + 'VERB': 'Verbo', + 'X': 'Otro', + }, + 'en': { + 'ADJ': 'Adjective', + 'ADP': 'Preposition', + 'ADV': 'Adverb', + 'AUX': 'Auxiliary', + 'CCONJ': 'Coordinating Conjunction', + 'DET': 'Determiner', + 'INTJ': 'Interjection', + 'NOUN': 'Noun', + 'NUM': 'Number', + 'PART': 'Particle', + 'PRON': 'Pronoun', + 'PROPN': 'Proper Noun', + 'SCONJ': 'Subordinating Conjunction', + 'SYM': 'Symbol', + 'VERB': 'Verb', + 'X': 'Other', + }, + 'fr': { + 'ADJ': 'Adjectif', + 'ADP': 'Préposition', + 'ADV': 'Adverbe', + 'AUX': 'Auxiliaire', + 'CCONJ': 'Conjonction de Coordination', + 'DET': 'Déterminant', + 'INTJ': 'Interjection', + 'NOUN': 'Nom', + 'NUM': 'Nombre', + 'PART': 'Particule', + 'PRON': 'Pronom', + 'PROPN': 'Nom Propre', + 'SCONJ': 'Conjonction de Subordination', + 'SYM': 'Symbole', + 'VERB': 'Verbe', + 'X': 'Autre', + } +} +######################################################################################################################################## + +# Definimos las etiquetas y colores para cada idioma +ENTITY_LABELS = { + 'es': { + "Personas": "lightblue", + "Conceptos": "lightgreen", + "Lugares": "lightcoral", + "Fechas": "lightyellow" + }, + 'en': { + "People": "lightblue", + "Concepts": "lightgreen", + "Places": "lightcoral", + "Dates": "lightyellow" + }, + 'fr': { + "Personnes": "lightblue", + "Concepts": "lightgreen", + "Lieux": "lightcoral", + "Dates": "lightyellow" + } +} + +######################################################################################################### +def count_pos(doc): + return Counter(token.pos_ for token in doc if token.pos_ != 'PUNCT') + +##################################################################################################################### + +def create_semantic_graph(doc, lang): + G = nx.Graph() + word_freq = defaultdict(int) + lemma_to_word = {} + lemma_to_pos = {} + + # Count frequencies of lemmas and map lemmas to their most common word form and POS + for token in doc: + if token.pos_ in ['NOUN', 'VERB']: + lemma = token.lemma_.lower() + word_freq[lemma] += 1 + if lemma not in lemma_to_word or token.text.lower() == lemma: + lemma_to_word[lemma] = token.text + lemma_to_pos[lemma] = token.pos_ + + # Get top 20 most frequent lemmas + top_lemmas = [lemma for lemma, _ in sorted(word_freq.items(), key=lambda x: x[1], reverse=True)[:20]] + + # Add nodes + for lemma in top_lemmas: + word = lemma_to_word[lemma] + G.add_node(word, pos=lemma_to_pos[lemma]) + + # Add edges + for token in doc: + if token.lemma_.lower() in top_lemmas: + if token.head.lemma_.lower() in top_lemmas: + source = lemma_to_word[token.lemma_.lower()] + target = lemma_to_word[token.head.lemma_.lower()] + if source != target: # Avoid self-loops + G.add_edge(source, target, label=token.dep_) + + return G, word_freq + +############################################################################################################################################ + +def visualize_semantic_relations(doc, lang): + G = nx.Graph() + word_freq = defaultdict(int) + lemma_to_word = {} + lemma_to_pos = {} + + # Count frequencies of lemmas and map lemmas to their most common word form and POS + for token in doc: + if token.pos_ in ['NOUN', 'VERB']: + lemma = token.lemma_.lower() + word_freq[lemma] += 1 + if lemma not in lemma_to_word or token.text.lower() == lemma: + lemma_to_word[lemma] = token.text + lemma_to_pos[lemma] = token.pos_ + + # Get top 20 most frequent lemmas + top_lemmas = [lemma for lemma, _ in sorted(word_freq.items(), key=lambda x: x[1], reverse=True)[:20]] + + # Add nodes + for lemma in top_lemmas: + word = lemma_to_word[lemma] + G.add_node(word, pos=lemma_to_pos[lemma]) + + # Add edges + for token in doc: + if token.lemma_.lower() in top_lemmas: + if token.head.lemma_.lower() in top_lemmas: + source = lemma_to_word[token.lemma_.lower()] + target = lemma_to_word[token.head.lemma_.lower()] + if source != target: # Avoid self-loops + G.add_edge(source, target, label=token.dep_) + + fig, ax = plt.subplots(figsize=(36, 27)) + pos = nx.spring_layout(G, k=0.7, iterations=50) + + node_colors = [POS_COLORS.get(G.nodes[node]['pos'], '#CCCCCC') for node in G.nodes()] + + nx.draw(G, pos, node_color=node_colors, with_labels=True, + node_size=10000, + font_size=16, + font_weight='bold', + arrows=True, + arrowsize=30, + width=3, + edge_color='gray', + ax=ax) + + edge_labels = nx.get_edge_attributes(G, 'label') + nx.draw_networkx_edge_labels(G, pos, edge_labels=edge_labels, font_size=14, ax=ax) + + title = { + 'es': "Relaciones Semánticas Relevantes", + 'en': "Relevant Semantic Relations", + 'fr': "Relations Sémantiques Pertinentes" + } + ax.set_title(title[lang], fontsize=24, fontweight='bold') + ax.axis('off') + + legend_elements = [plt.Rectangle((0,0),1,1,fc=POS_COLORS.get(pos, '#CCCCCC'), edgecolor='none', + label=f"{POS_TRANSLATIONS[lang].get(pos, pos)}") + for pos in ['NOUN', 'VERB']] + ax.legend(handles=legend_elements, loc='center left', bbox_to_anchor=(1, 0.5), fontsize=16) + + return fig + +############################################################################################################################################ +def identify_and_contextualize_entities(doc, lang): + entities = [] + for ent in doc.ents: + # Obtener el contexto (3 palabras antes y después de la entidad) + start = max(0, ent.start - 3) + end = min(len(doc), ent.end + 3) + context = doc[start:end].text + + entities.append({ + 'text': ent.text, + 'label': ent.label_, + 'start': ent.start, + 'end': ent.end, + 'context': context + }) + + # Identificar conceptos clave (usando sustantivos y verbos más frecuentes) + word_freq = Counter([token.lemma_.lower() for token in doc if token.pos_ in ['NOUN', 'VERB'] and not token.is_stop]) + key_concepts = word_freq.most_common(10) # Top 10 conceptos clave + + return entities, key_concepts + + +############################################################################################################################################ +def perform_semantic_analysis(text, nlp, lang): + doc = nlp(text) + + # Identificar entidades y conceptos clave + entities, key_concepts = identify_and_contextualize_entities(doc, lang) + + # Visualizar relaciones semánticas + relations_graph = visualize_semantic_relations(doc, lang) + + # Imprimir entidades para depuración + print(f"Entidades encontradas ({lang}):") + for ent in doc.ents: + print(f"{ent.text} - {ent.label_}") + + relations_graph = visualize_semantic_relations(doc, lang) + return { + 'entities': entities, + 'key_concepts': key_concepts, + 'relations_graph': relations_graph + } + +__all__ = ['visualize_semantic_relations', 'create_semantic_graph', 'POS_COLORS', 'POS_TRANSLATIONS', 'identify_and_contextualize_entities'] \ No newline at end of file diff --git a/modules/text_analysis/semantic_analysis_v00.py b/modules/text_analysis/semantic_analysis_v00.py new file mode 100644 index 0000000000000000000000000000000000000000..22a37a74e462656aeb061fc8b6a65723d1a654a5 --- /dev/null +++ b/modules/text_analysis/semantic_analysis_v00.py @@ -0,0 +1,153 @@ +#semantic_analysis.py +import streamlit as st +import spacy +import networkx as nx +import matplotlib.pyplot as plt +from collections import Counter, defaultdict +from sklearn.feature_extraction.text import TfidfVectorizer +from sklearn.metrics.pairwise import cosine_similarity + +# Define colors for grammatical categories +POS_COLORS = { + 'ADJ': '#FFA07A', 'ADP': '#98FB98', 'ADV': '#87CEFA', 'AUX': '#DDA0DD', + 'CCONJ': '#F0E68C', 'DET': '#FFB6C1', 'INTJ': '#FF6347', 'NOUN': '#90EE90', + 'NUM': '#FAFAD2', 'PART': '#D3D3D3', 'PRON': '#FFA500', 'PROPN': '#20B2AA', + 'SCONJ': '#DEB887', 'SYM': '#7B68EE', 'VERB': '#FF69B4', 'X': '#A9A9A9', +} + +POS_TRANSLATIONS = { + 'es': { + 'ADJ': 'Adjetivo', 'ADP': 'Preposición', 'ADV': 'Adverbio', 'AUX': 'Auxiliar', + 'CCONJ': 'Conjunción Coordinante', 'DET': 'Determinante', 'INTJ': 'Interjección', + 'NOUN': 'Sustantivo', 'NUM': 'Número', 'PART': 'Partícula', 'PRON': 'Pronombre', + 'PROPN': 'Nombre Propio', 'SCONJ': 'Conjunción Subordinante', 'SYM': 'Símbolo', + 'VERB': 'Verbo', 'X': 'Otro', + }, + 'en': { + 'ADJ': 'Adjective', 'ADP': 'Preposition', 'ADV': 'Adverb', 'AUX': 'Auxiliary', + 'CCONJ': 'Coordinating Conjunction', 'DET': 'Determiner', 'INTJ': 'Interjection', + 'NOUN': 'Noun', 'NUM': 'Number', 'PART': 'Particle', 'PRON': 'Pronoun', + 'PROPN': 'Proper Noun', 'SCONJ': 'Subordinating Conjunction', 'SYM': 'Symbol', + 'VERB': 'Verb', 'X': 'Other', + }, + 'fr': { + 'ADJ': 'Adjectif', 'ADP': 'Préposition', 'ADV': 'Adverbe', 'AUX': 'Auxiliaire', + 'CCONJ': 'Conjonction de Coordination', 'DET': 'Déterminant', 'INTJ': 'Interjection', + 'NOUN': 'Nom', 'NUM': 'Nombre', 'PART': 'Particule', 'PRON': 'Pronom', + 'PROPN': 'Nom Propre', 'SCONJ': 'Conjonction de Subordination', 'SYM': 'Symbole', + 'VERB': 'Verbe', 'X': 'Autre', + } +} + +ENTITY_LABELS = { + 'es': { + "Personas": "lightblue", + "Lugares": "lightcoral", + "Inventos": "lightgreen", + "Fechas": "lightyellow", + "Conceptos": "lightpink" + }, + 'en': { + "People": "lightblue", + "Places": "lightcoral", + "Inventions": "lightgreen", + "Dates": "lightyellow", + "Concepts": "lightpink" + }, + 'fr': { + "Personnes": "lightblue", + "Lieux": "lightcoral", + "Inventions": "lightgreen", + "Dates": "lightyellow", + "Concepts": "lightpink" + } +} + +def identify_and_contextualize_entities(doc, lang): + entities = [] + for ent in doc.ents: + # Obtener el contexto (3 palabras antes y después de la entidad) + start = max(0, ent.start - 3) + end = min(len(doc), ent.end + 3) + context = doc[start:end].text + + # Mapear las etiquetas de spaCy a nuestras categorías + if ent.label_ in ['PERSON', 'ORG']: + category = "Personas" if lang == 'es' else "People" if lang == 'en' else "Personnes" + elif ent.label_ in ['LOC', 'GPE']: + category = "Lugares" if lang == 'es' else "Places" if lang == 'en' else "Lieux" + elif ent.label_ in ['PRODUCT']: + category = "Inventos" if lang == 'es' else "Inventions" if lang == 'en' else "Inventions" + elif ent.label_ in ['DATE', 'TIME']: + category = "Fechas" if lang == 'es' else "Dates" if lang == 'en' else "Dates" + else: + category = "Conceptos" if lang == 'es' else "Concepts" if lang == 'en' else "Concepts" + + entities.append({ + 'text': ent.text, + 'label': category, + 'start': ent.start, + 'end': ent.end, + 'context': context + }) + + # Identificar conceptos clave (usando sustantivos y verbos más frecuentes) + word_freq = Counter([token.lemma_.lower() for token in doc if token.pos_ in ['NOUN', 'VERB'] and not token.is_stop]) + key_concepts = word_freq.most_common(10) # Top 10 conceptos clave + + return entities, key_concepts + +def create_concept_graph(text, concepts): + vectorizer = TfidfVectorizer() + tfidf_matrix = vectorizer.fit_transform([text]) + concept_vectors = vectorizer.transform(concepts) + similarity_matrix = cosine_similarity(concept_vectors, concept_vectors) + + G = nx.Graph() + for i, concept in enumerate(concepts): + G.add_node(concept) + for j in range(i+1, len(concepts)): + if similarity_matrix[i][j] > 0.1: + G.add_edge(concept, concepts[j], weight=similarity_matrix[i][j]) + + return G + +def visualize_concept_graph(G, lang): + fig, ax = plt.subplots(figsize=(12, 8)) + pos = nx.spring_layout(G) + + nx.draw_networkx_nodes(G, pos, node_size=3000, node_color='lightblue', ax=ax) + nx.draw_networkx_labels(G, pos, font_size=10, font_weight="bold", ax=ax) + nx.draw_networkx_edges(G, pos, width=1, ax=ax) + + edge_labels = nx.get_edge_attributes(G, 'weight') + nx.draw_networkx_edge_labels(G, pos, edge_labels=edge_labels, font_size=8, ax=ax) + + title = { + 'es': "Relaciones Conceptuales", + 'en': "Conceptual Relations", + 'fr': "Relations Conceptuelles" + } + ax.set_title(title[lang], fontsize=16) + ax.axis('off') + + return fig + +def perform_semantic_analysis(text, nlp, lang): + doc = nlp(text) + + # Identificar entidades y conceptos clave + entities, key_concepts = identify_and_contextualize_entities(doc, lang) + + # Crear y visualizar grafo de conceptos + concepts = [concept for concept, _ in key_concepts] + concept_graph = create_concept_graph(text, concepts) + relations_graph = visualize_concept_graph(concept_graph, lang) + + return { + 'entities': entities, + 'key_concepts': key_concepts, + 'relations_graph': relations_graph + } + +__all__ = ['perform_semantic_analysis', 'ENTITY_LABELS', 'POS_TRANSLATIONS'] \ No newline at end of file diff --git a/modules/text_analysis/semantic_analysis_v23-9-2024.py b/modules/text_analysis/semantic_analysis_v23-9-2024.py new file mode 100644 index 0000000000000000000000000000000000000000..7e1c435e13ca0c6b33bcb707ad5079c48707c581 --- /dev/null +++ b/modules/text_analysis/semantic_analysis_v23-9-2024.py @@ -0,0 +1,247 @@ +#semantic_analysis.py +import streamlit as st +import spacy +import networkx as nx +import matplotlib.pyplot as plt +import io +import base64 +from collections import Counter, defaultdict +from sklearn.feature_extraction.text import TfidfVectorizer +from sklearn.metrics.pairwise import cosine_similarity +import logging + +logger = logging.getLogger(__name__) + + +# Define colors for grammatical categories +POS_COLORS = { + 'ADJ': '#FFA07A', 'ADP': '#98FB98', 'ADV': '#87CEFA', 'AUX': '#DDA0DD', + 'CCONJ': '#F0E68C', 'DET': '#FFB6C1', 'INTJ': '#FF6347', 'NOUN': '#90EE90', + 'NUM': '#FAFAD2', 'PART': '#D3D3D3', 'PRON': '#FFA500', 'PROPN': '#20B2AA', + 'SCONJ': '#DEB887', 'SYM': '#7B68EE', 'VERB': '#FF69B4', 'X': '#A9A9A9', +} + +POS_TRANSLATIONS = { + 'es': { + 'ADJ': 'Adjetivo', 'ADP': 'Preposición', 'ADV': 'Adverbio', 'AUX': 'Auxiliar', + 'CCONJ': 'Conjunción Coordinante', 'DET': 'Determinante', 'INTJ': 'Interjección', + 'NOUN': 'Sustantivo', 'NUM': 'Número', 'PART': 'Partícula', 'PRON': 'Pronombre', + 'PROPN': 'Nombre Propio', 'SCONJ': 'Conjunción Subordinante', 'SYM': 'Símbolo', + 'VERB': 'Verbo', 'X': 'Otro', + }, + 'en': { + 'ADJ': 'Adjective', 'ADP': 'Preposition', 'ADV': 'Adverb', 'AUX': 'Auxiliary', + 'CCONJ': 'Coordinating Conjunction', 'DET': 'Determiner', 'INTJ': 'Interjection', + 'NOUN': 'Noun', 'NUM': 'Number', 'PART': 'Particle', 'PRON': 'Pronoun', + 'PROPN': 'Proper Noun', 'SCONJ': 'Subordinating Conjunction', 'SYM': 'Symbol', + 'VERB': 'Verb', 'X': 'Other', + }, + 'fr': { + 'ADJ': 'Adjectif', 'ADP': 'Préposition', 'ADV': 'Adverbe', 'AUX': 'Auxiliaire', + 'CCONJ': 'Conjonction de Coordination', 'DET': 'Déterminant', 'INTJ': 'Interjection', + 'NOUN': 'Nom', 'NUM': 'Nombre', 'PART': 'Particule', 'PRON': 'Pronom', + 'PROPN': 'Nom Propre', 'SCONJ': 'Conjonction de Subordination', 'SYM': 'Symbole', + 'VERB': 'Verbe', 'X': 'Autre', + } +} + +ENTITY_LABELS = { + 'es': { + "Personas": "lightblue", + "Lugares": "lightcoral", + "Inventos": "lightgreen", + "Fechas": "lightyellow", + "Conceptos": "lightpink" + }, + 'en': { + "People": "lightblue", + "Places": "lightcoral", + "Inventions": "lightgreen", + "Dates": "lightyellow", + "Concepts": "lightpink" + }, + 'fr': { + "Personnes": "lightblue", + "Lieux": "lightcoral", + "Inventions": "lightgreen", + "Dates": "lightyellow", + "Concepts": "lightpink" + } +} + +############################################################################################################## +def perform_semantic_analysis(text, nlp, lang_code): + logger.info(f"Starting semantic analysis for language: {lang_code}") + try: + doc = nlp(text) + + # Conceptos clave y grafo de conceptos + key_concepts = identify_key_concepts(doc) + concept_graph = create_concept_graph(doc, key_concepts) + concept_graph_fig = visualize_concept_graph(concept_graph, lang_code) + #concept_graph_html = fig_to_html(concept_graph_fig) + + # Entidades y grafo de entidades + entities = extract_entities(doc, lang_code) + entity_graph = create_entity_graph(entities) + entity_graph_fig = visualize_entity_graph(entity_graph, lang_code) + #entity_graph_html = fig_to_html(entity_graph_fig) + + logger.info("Semantic analysis completed successfully") + return { + 'doc': doc, + 'key_concepts': key_concepts, + 'concept_graph': concept_graph_fig, + 'entities': entities, + 'entity_graph': entity_graph_fig + } + except Exception as e: + logger.error(f"Error in perform_semantic_analysis: {str(e)}") + raise + +''' +def fig_to_html(fig): + buf = io.BytesIO() + fig.savefig(buf, format='png') + buf.seek(0) + img_str = base64.b64encode(buf.getvalue()).decode() + return f'' +''' + + +def identify_key_concepts(doc): + logger.info("Identifying key concepts") + word_freq = Counter([token.lemma_.lower() for token in doc if token.pos_ in ['NOUN', 'VERB'] and not token.is_stop]) + key_concepts = word_freq.most_common(10) + return [(concept, float(freq)) for concept, freq in key_concepts] + + +def create_concept_graph(doc, key_concepts): + G = nx.Graph() + for concept, freq in key_concepts: + G.add_node(concept, weight=freq) + for sent in doc.sents: + sent_concepts = [token.lemma_.lower() for token in sent if token.lemma_.lower() in dict(key_concepts)] + for i, concept1 in enumerate(sent_concepts): + for concept2 in sent_concepts[i+1:]: + if G.has_edge(concept1, concept2): + G[concept1][concept2]['weight'] += 1 + else: + G.add_edge(concept1, concept2, weight=1) + return G + +def visualize_concept_graph(G, lang_code): + fig, ax = plt.subplots(figsize=(12, 8)) + pos = nx.spring_layout(G, k=0.5, iterations=50) + node_sizes = [G.nodes[node]['weight'] * 100 for node in G.nodes()] + nx.draw_networkx_nodes(G, pos, node_size=node_sizes, node_color='lightblue', alpha=0.8, ax=ax) + nx.draw_networkx_labels(G, pos, font_size=10, font_weight="bold", ax=ax) + edge_weights = [G[u][v]['weight'] for u, v in G.edges()] + nx.draw_networkx_edges(G, pos, width=edge_weights, alpha=0.5, ax=ax) + title = { + 'es': "Relaciones entre Conceptos Clave", + 'en': "Key Concept Relations", + 'fr': "Relations entre Concepts Clés" + } + ax.set_title(title[lang_code], fontsize=16) + ax.axis('off') + plt.tight_layout() + return fig + +def create_entity_graph(entities): + G = nx.Graph() + for entity_type, entity_list in entities.items(): + for entity in entity_list: + G.add_node(entity, type=entity_type) + for i, entity1 in enumerate(entity_list): + for entity2 in entity_list[i+1:]: + G.add_edge(entity1, entity2) + return G + +def visualize_entity_graph(G, lang_code): + fig, ax = plt.subplots(figsize=(12, 8)) + pos = nx.spring_layout(G) + for entity_type, color in ENTITY_LABELS[lang_code].items(): + node_list = [node for node, data in G.nodes(data=True) if data['type'] == entity_type] + nx.draw_networkx_nodes(G, pos, nodelist=node_list, node_color=color, node_size=500, alpha=0.8, ax=ax) + nx.draw_networkx_edges(G, pos, width=1, alpha=0.5, ax=ax) + nx.draw_networkx_labels(G, pos, font_size=8, font_weight="bold", ax=ax) + ax.set_title(f"Relaciones entre Entidades ({lang_code})", fontsize=16) + ax.axis('off') + plt.tight_layout() + return fig + + +################################################################################# +def create_topic_graph(topics, doc): + G = nx.Graph() + for topic in topics: + G.add_node(topic, weight=doc.text.count(topic)) + for i, topic1 in enumerate(topics): + for topic2 in topics[i+1:]: + weight = sum(1 for sent in doc.sents if topic1 in sent.text and topic2 in sent.text) + if weight > 0: + G.add_edge(topic1, topic2, weight=weight) + return G + +def visualize_topic_graph(G, lang_code): + fig, ax = plt.subplots(figsize=(12, 8)) + pos = nx.spring_layout(G) + node_sizes = [G.nodes[node]['weight'] * 100 for node in G.nodes()] + nx.draw_networkx_nodes(G, pos, node_size=node_sizes, node_color='lightgreen', alpha=0.8, ax=ax) + nx.draw_networkx_labels(G, pos, font_size=10, font_weight="bold", ax=ax) + edge_weights = [G[u][v]['weight'] for u, v in G.edges()] + nx.draw_networkx_edges(G, pos, width=edge_weights, alpha=0.5, ax=ax) + ax.set_title(f"Relaciones entre Temas ({lang_code})", fontsize=16) + ax.axis('off') + plt.tight_layout() + return fig + +########################################################################################### +def generate_summary(doc, lang_code): + sentences = list(doc.sents) + summary = sentences[:3] # Toma las primeras 3 oraciones como resumen + return " ".join([sent.text for sent in summary]) + +def extract_entities(doc, lang_code): + entities = defaultdict(list) + for ent in doc.ents: + if ent.label_ in ENTITY_LABELS[lang_code]: + entities[ent.label_].append(ent.text) + return dict(entities) + +def analyze_sentiment(doc, lang_code): + positive_words = sum(1 for token in doc if token.sentiment > 0) + negative_words = sum(1 for token in doc if token.sentiment < 0) + total_words = len(doc) + if positive_words > negative_words: + return "Positivo" + elif negative_words > positive_words: + return "Negativo" + else: + return "Neutral" + +def extract_topics(doc, lang_code): + vectorizer = TfidfVectorizer(stop_words='english', max_features=5) + tfidf_matrix = vectorizer.fit_transform([doc.text]) + feature_names = vectorizer.get_feature_names_out() + return list(feature_names) + +# Asegúrate de que todas las funciones necesarias estén exportadas +__all__ = [ + 'perform_semantic_analysis', + 'identify_key_concepts', + 'create_concept_graph', + 'visualize_concept_graph', + 'create_entity_graph', + 'visualize_entity_graph', + 'generate_summary', + 'extract_entities', + 'analyze_sentiment', + 'create_topic_graph', + 'visualize_topic_graph', + 'extract_topics', + 'ENTITY_LABELS', + 'POS_COLORS', + 'POS_TRANSLATIONS' +] \ No newline at end of file diff --git a/modules/text_analysis/structure_analysis.py b/modules/text_analysis/structure_analysis.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/modules/text_analysis/style_analysis.py b/modules/text_analysis/style_analysis.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/modules/text_analysis/thematic_analysis.py b/modules/text_analysis/thematic_analysis.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/modules/text_analysis/txt.txt b/modules/text_analysis/txt.txt new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/modules/text_analysis/vocabulary_analysis.py b/modules/text_analysis/vocabulary_analysis.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/modules/ui/__init__.py b/modules/ui/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/modules/ui/__pycache__/__init__.cpython-311.pyc b/modules/ui/__pycache__/__init__.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3ac9684e4704cf8e942f2a15e5fea186fe3be7a0 Binary files /dev/null and b/modules/ui/__pycache__/__init__.cpython-311.pyc differ diff --git a/modules/ui/__pycache__/ui.cpython-311.pyc b/modules/ui/__pycache__/ui.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dcda6c057d448090e4adece4b140c7714344d570 Binary files /dev/null and b/modules/ui/__pycache__/ui.cpython-311.pyc differ diff --git a/modules/ui/txt.txt b/modules/ui/txt.txt new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/modules/ui/ui backUpError_24-9-24.py b/modules/ui/ui backUpError_24-9-24.py new file mode 100644 index 0000000000000000000000000000000000000000..e5ce2b4a89d3c1f9bba42093a838ff9b97f7cc17 --- /dev/null +++ b/modules/ui/ui backUpError_24-9-24.py @@ -0,0 +1,473 @@ +# Importaciones generales +import streamlit as st +from streamlit_player import st_player # Necesitarás instalar esta librería: pip install streamlit-player +from streamlit_float import * +from streamlit_antd_components import * +from streamlit_option_menu import * +from streamlit_chat import * +import logging +import time +from datetime import datetime +import re +import io +from io import BytesIO +import base64 +import matplotlib.pyplot as plt +import plotly.graph_objects as go +import pandas as pd +import numpy as np +from spacy import displacy +import random + +# Configuración del logger +logging.basicConfig(level=logging.INFO) +logger = logging.getLogger(__name__) + +# Importaciones locales +from translations import get_translations + +# Importaciones locales +from ..studentact.student_activities_v2 import display_student_progress + +# Importaciones directas de los módulos necesarios +from ..auth.auth import authenticate_user, register_user + + +from ..database.database_oldFromV2 import ( + get_student_data, + store_application_request, + store_morphosyntax_result, + store_semantic_result, + store_discourse_analysis_result, + store_chat_history, + create_admin_user, + create_student_user, + store_user_feedback +) + +from ..admin.admin_ui import admin_page + +from ..morphosyntax.morphosyntax_interface import display_morphosyntax_interface + +from ..semantic.semantic_interface_68ok import display_semantic_interface + +from ..discourse.discourse_interface import display_discourse_interface + +# Nueva importación para semantic_float_init +#from ..semantic.semantic_float import semantic_float_init +from ..semantic.semantic_float68ok import semantic_float_init + + +############### Iniciar sesión ###################### + + +def initialize_session_state(): + if 'initialized' not in st.session_state: + st.session_state.clear() + st.session_state.initialized = True + st.session_state.logged_in = False + st.session_state.page = 'login' + st.session_state.username = None + st.session_state.role = None + st.session_state.lang_code = 'es' # Idioma por defecto + +def main(): + logger.info(f"Entrando en main() - Página actual: {st.session_state.page}") + + if 'nlp_models' not in st.session_state: + st.error("Los modelos NLP no están inicializados. Por favor, reinicie la aplicación.") + return + + semantic_float_init() + + if st.session_state.page == 'login': + login_register_page() + elif st.session_state.page == 'admin': + logger.info("Mostrando página de admin") + admin_page() + elif st.session_state.page == 'user': + user_page() + else: + logger.warning(f"Página no reconocida: {st.session_state.page}") + st.error(f"Página no reconocida: {st.session_state.page}") + + logger.info(f"Saliendo de main() - Estado final de la sesión: {st.session_state}") + +############### Después de iniciar sesión ###################### + +def user_page(): + logger.info(f"Entrando en user_page para el usuario: {st.session_state.username}") + + if 'user_data' not in st.session_state or time.time() - st.session_state.get('last_data_fetch', 0) > 60: + with st.spinner("Cargando tus datos..."): + try: + st.session_state.user_data = get_student_data(st.session_state.username) + st.session_state.last_data_fetch = time.time() + except Exception as e: + logger.error(f"Error al obtener datos del usuario: {str(e)}") + st.error("Hubo un problema al cargar tus datos. Por favor, intenta recargar la página.") + return + + logger.info(f"Idioma actual: {st.session_state.lang_code}") + logger.info(f"Modelos NLP cargados: {'nlp_models' in st.session_state}") + + languages = {'Español': 'es', 'English': 'en', 'Français': 'fr'} + + if 'lang_code' not in st.session_state: + st.session_state.lang_code = 'es' # Idioma por defecto + elif not isinstance(st.session_state.lang_code, str) or st.session_state.lang_code not in ['es', 'en', 'fr']: + logger.warning(f"Invalid lang_code: {st.session_state.lang_code}. Setting to default 'es'") + st.session_state.lang_code = 'es' + + # Obtener traducciones + t = get_translations(st.session_state.lang_code) + + # Estilos CSS personalizados (mantener los estilos existentes) + st.markdown(""" + + """, unsafe_allow_html=True) + + # Crear un contenedor para la barra superior + with st.container(): + col1, col2, col3 = st.columns([2, 2, 1]) + with col1: + st.markdown(f"

{t['welcome']}, {st.session_state.username}

", unsafe_allow_html=True) + with col2: + selected_lang = st.selectbox( + t['select_language'], + list(languages.keys()), + index=list(languages.values()).index(st.session_state.lang_code), + key=f"language_selector_{st.session_state.username}_{st.session_state.lang_code}" + ) + new_lang_code = languages[selected_lang] + if st.session_state.lang_code != new_lang_code: + st.session_state.lang_code = new_lang_code + st.rerun() # Esto recargará la página con el nuevo idioma + with col3: + if st.button(t['logout'], key=f"logout_button_{st.session_state.username}_{st.session_state.lang_code}"): + # Implementación temporal de logout + for key in list(st.session_state.keys()): + del st.session_state[key] + st.rerun() + + st.markdown("---") + + # Mostrar resumen de análisis + #st.subheader(t['analysis_summary']) + #col1, col2, col3 = st.columns(3) + #col1.metric(t['morpho_analyses'], len(st.session_state.user_data['morphosyntax_analyses'])) + #col2.metric(t['semantic_analyses'], len(st.session_state.user_data['semantic_analyses'])) + #col3.metric(t['discourse_analyses'], len(st.session_state.user_data['discourse_analyses'])) + + + # Opción para exportar datos + #if st.button(t['export_all_analyses']): + # st.info(t['export_in_progress']) + # Aquí iría la llamada a export_data cuando esté implementada + # export_data(st.session_state.user_data, t) + + # Crear las pestañas + tabs = st.tabs([ + t['morpho_tab'], + t['semantic_tab'], + t['discourse_tab'], + t['activities_tab'], + t['feedback_tab'] + ]) + + # Usar las pestañas creadas + for i, (tab, func) in enumerate(zip(tabs, [ + display_morphosyntax_interface, + display_semantic_interface, + display_discourse_interface, + display_student_progress, + display_feedback_form + ])): + with tab: + try: + if i < 5: # Para las primeras tres pestañas (análisis) + func(st.session_state.lang_code, st.session_state.nlp_models, t, st.session_state.user_data) + elif i == 3: # Para la pestaña de progreso del estudiante + func(st.session_state.username, st.session_state.lang_code, t, st.session_state.user_data) + else: # Para la pestaña de feedback + func(st.session_state.lang_code, t) + except Exception as e: + st.error(f"Error al cargar la pestaña: {str(e)}") + logger.error(f"Error en la pestaña {i}: {str(e)}", exc_info=True) + + logger.debug(f"Translations loaded: {t}") # Log para depuración + logger.info("Finalizada la renderización de user_page") + + + +##################################### + +def login_register_page(): + logger.info("Renderizando página de login/registro") + st.title("AIdeaText") + st.write("Bienvenido. Por favor, inicie sesión o regístrese.") + + left_column, right_column = st.columns([1, 3]) + + with left_column: + tab1, tab2 = st.tabs(["Iniciar Sesión", "Registrarse"]) + + with tab1: + login_form() + + with tab2: + register_form() + + with right_column: + display_videos_and_info() + + +################################################### +def login_form(): + with st.form("login_form"): + username = st.text_input("Correo electrónico") + password = st.text_input("Contraseña", type="password") + submit_button = st.form_submit_button("Iniciar Sesión") + + if submit_button: + success, role = authenticate_user(username, password) + if success: + st.session_state.logged_in = True + st.session_state.username = username + st.session_state.role = role + st.session_state.page = 'admin' if role == 'Administrador' else 'user' + st.rerun() + else: + st.error("Credenciales incorrectas") + + +################################################### +def register_form(): + st.header("Solicitar prueba de la aplicación") + + name = st.text_input("Nombre completo") + email = st.text_input("Correo electrónico institucional") + institution = st.text_input("Institución") + role = st.selectbox("Rol", ["Estudiante", "Profesor", "Investigador", "Otro"]) + reason = st.text_area("¿Por qué estás interesado en probar AIdeaText?") + + if st.button("Enviar solicitud"): + if not name or not email or not institution or not reason: + st.error("Por favor, completa todos los campos.") + elif not is_institutional_email(email): + st.error("Por favor, utiliza un correo electrónico institucional.") + else: + success = store_application_request(name, email, institution, role, reason) + if success: + st.success("Tu solicitud ha sido enviada. Te contactaremos pronto.") + else: + st.error("Hubo un problema al enviar tu solicitud. Por favor, intenta de nuevo más tarde.") + + + +################################################### +def is_institutional_email(email): + forbidden_domains = ['gmail.com', 'hotmail.com', 'yahoo.com', 'outlook.com'] + return not any(domain in email.lower() for domain in forbidden_domains) + + +################################################### +def display_videos_and_info(): + st.header("Videos: pitch, demos, entrevistas, otros") + + videos = { + "Presentación en PyCon Colombia, Medellín, 2024": "https://www.youtube.com/watch?v=Jn545-IKx5Q", + "Presentación fundación Ser Maaestro": "https://www.youtube.com/watch?v=imc4TI1q164", + "Pitch IFE Explora": "https://www.youtube.com/watch?v=Fqi4Di_Rj_s", + "Entrevista Dr. Guillermo Ruíz": "https://www.youtube.com/watch?v=_ch8cRja3oc", + "Demo versión desktop": "https://www.youtube.com/watch?v=nP6eXbog-ZY" + } + + selected_title = st.selectbox("Selecciona un video tutorial:", list(videos.keys())) + + if selected_title in videos: + try: + st_player(videos[selected_title]) + except Exception as e: + st.error(f"Error al cargar el video: {str(e)}") + + st.markdown(""" + ## Novedades de la versión actual + - Nueva función de análisis semántico + - Soporte para múltiples idiomas + - Interfaz mejorada para una mejor experiencia de usuario + """) + +def display_feedback_form(lang_code, t): + logging.info(f"display_feedback_form called with lang_code: {lang_code}") + + st.header(t['title']) + + name = st.text_input(t['name'], key=f"feedback_name_{lang_code}") + email = st.text_input(t['email'], key=f"feedback_email_{lang_code}") + feedback = st.text_area(t['feedback'], key=f"feedback_text_{lang_code}") + + if st.button(t['submit'], key=f"feedback_submit_{lang_code}"): + if name and email and feedback: + if store_user_feedback(st.session_state.username, name, email, feedback): + st.success(t['success']) + else: + st.error(t['error']) + else: + st.warning("Por favor, completa todos los campos.") + +''' +def display_student_progress(username, lang_code, t): + student_data = get_student_data(username) + + if student_data is None or len(student_data['entries']) == 0: + st.warning("No se encontraron datos para este estudiante.") + st.info("Intenta realizar algunos análisis de texto primero.") + return + + st.title(f"Progreso de {username}") + + with st.expander("Resumen de Actividades y Progreso", expanded=True): + # Resumen de actividades + total_entries = len(student_data['entries']) + st.write(f"Total de análisis realizados: {total_entries}") + + # Gráfico de tipos de análisis + analysis_types = [entry['analysis_type'] for entry in student_data['entries']] + analysis_counts = pd.Series(analysis_types).value_counts() + + fig, ax = plt.subplots() + analysis_counts.plot(kind='bar', ax=ax) + ax.set_title("Tipos de análisis realizados") + ax.set_xlabel("Tipo de análisis") + ax.set_ylabel("Cantidad") + st.pyplot(fig) + + # Progreso a lo largo del tiempo + dates = [datetime.fromisoformat(entry['timestamp']) for entry in student_data['entries']] + analysis_counts = pd.Series(dates).value_counts().sort_index() + + fig, ax = plt.subplots() + analysis_counts.plot(kind='line', ax=ax) + ax.set_title("Análisis realizados a lo largo del tiempo") + ax.set_xlabel("Fecha") + ax.set_ylabel("Cantidad de análisis") + st.pyplot(fig) + +########################################################## + with st.expander("Histórico de Análisis Morfosintácticos"): + morphosyntax_entries = [entry for entry in student_data['entries'] if entry['analysis_type'] == 'morphosyntax'] + for entry in morphosyntax_entries: + st.subheader(f"Análisis del {entry['timestamp']}") + if entry['arc_diagrams']: + st.write(entry['arc_diagrams'][0], unsafe_allow_html=True) + + + ########################################################## + with st.expander("Histórico de Análisis Semánticos"): + semantic_entries = [entry for entry in student_data['entries'] if entry['analysis_type'] == 'semantic'] + for entry in semantic_entries: + st.subheader(f"Análisis del {entry['timestamp']}") + + # Mostrar conceptos clave + if 'key_concepts' in entry: + st.write("Conceptos clave:") + concepts_str = " | ".join([f"{concept} ({frequency:.2f})" for concept, frequency in entry['key_concepts']]) + #st.write("Conceptos clave:") + #st.write(concepts_str) + st.markdown(f"
{concepts_str}
", unsafe_allow_html=True) + + # Mostrar gráfico + if 'graph' in entry: + try: + img_bytes = base64.b64decode(entry['graph']) + st.image(img_bytes, caption="Gráfico de relaciones conceptuales") + except Exception as e: + st.error(f"No se pudo mostrar el gráfico: {str(e)}") + +########################################################## + with st.expander("Histórico de Análisis Discursivos"): + discourse_entries = [entry for entry in student_data['entries'] if entry['analysis_type'] == 'discourse'] + for entry in discourse_entries: + st.subheader(f"Análisis del {entry['timestamp']}") + + # Mostrar conceptos clave para ambos documentos + if 'key_concepts1' in entry: + concepts_str1 = " | ".join([f"{concept} ({frequency:.2f})" for concept, frequency in entry['key_concepts1']]) + st.write("Conceptos clave del documento 1:") + #st.write(concepts_str1) + st.markdown(f"
{concepts_str1}
", unsafe_allow_html=True) + + if 'key_concepts2' in entry: + concepts_str2 = " | ".join([f"{concept} ({frequency:.2f})" for concept, frequency in entry['key_concepts2']]) + st.write("Conceptos clave del documento 2:") + #st.write(concepts_str2) + st.markdown(f"
{concepts_str2}
", unsafe_allow_html=True) + + try: + if 'combined_graph' in entry and entry['combined_graph']: + img_bytes = base64.b64decode(entry['combined_graph']) + st.image(img_bytes) + elif 'graph1' in entry and 'graph2' in entry: + col1, col2 = st.columns(2) + with col1: + if entry['graph1']: + img_bytes1 = base64.b64decode(entry['graph1']) + st.image(img_bytes1) + with col2: + if entry['graph2']: + img_bytes2 = base64.b64decode(entry['graph2']) + st.image(img_bytes2) + else: + st.write("No se encontraron gráficos para este análisis.") + except Exception as e: + st.error(f"No se pudieron mostrar los gráficos: {str(e)}") + st.write("Datos de los gráficos (para depuración):") + if 'graph1' in entry: + st.write("Graph 1:", entry['graph1'][:100] + "...") + if 'graph2' in entry: + st.write("Graph 2:", entry['graph2'][:100] + "...") + if 'combined_graph' in entry: + st.write("Combined Graph:", entry['combined_graph'][:100] + "...") + +########################################################## + with st.expander("Histórico de Conversaciones con el ChatBot"): + if 'chat_history' in student_data: + for i, chat in enumerate(student_data['chat_history']): + st.subheader(f"Conversación {i+1} - {chat['timestamp']}") + for message in chat['messages']: + if message['role'] == 'user': + st.write("Usuario: " + message['content']) + else: + st.write("Asistente: " + message['content']) + st.write("---") + else: + st.write("No se encontraron conversaciones con el ChatBot.") + + # Añadir logs para depuración + if st.checkbox("Mostrar datos de depuración"): + st.write("Datos del estudiante (para depuración):") + st.json(student_data) + + +''' + +# Definición de __all__ para especificar qué se exporta +__all__ = ['main', 'login_register_page', 'initialize_session_state'] + +# Bloque de ejecución condicional +if __name__ == "__main__": + main() diff --git a/modules/ui/ui.py b/modules/ui/ui.py new file mode 100644 index 0000000000000000000000000000000000000000..cc326b7f57627b9815546d5db4a06ec668705f94 --- /dev/null +++ b/modules/ui/ui.py @@ -0,0 +1,313 @@ +# Importaciones generales +import streamlit as st +from streamlit_player import st_player +import logging +from datetime import datetime, timezone +from dateutil.parser import parse + +# Configuración del logger +logging.basicConfig(level=logging.INFO) +logger = logging.getLogger(__name__) + +# Importaciones locales +from session_state import initialize_session_state, logout + +from translations import get_translations + +from ..studentact.student_activities import display_student_progress + +from ..auth.auth import authenticate_user, authenticate_student, authenticate_admin + +from ..admin.admin_ui import admin_page + +##Importaciones desde la configuración de bases datos ####### + +from ..database.sql_db import ( + store_application_request, + store_student_feedback, + store_application_request +) + +from ..database.mongo_db import ( + get_collection, + insert_document, + find_documents, + update_document, + delete_document +) + +from ..database.morphosintax_mongo_db import ( + store_student_morphosyntax_result, + get_student_morphosyntax_analysis, + update_student_morphosyntax_analysis, + delete_student_morphosyntax_analysis, + get_student_morphosyntax_data +) + +from ..morphosyntax.morphosyntax_interface import ( + display_morphosyntax_interface +) + +def main(): + logger.info(f"Entrando en main() - Página actual: {st.session_state.page}") + + if 'nlp_models' not in st.session_state: + logger.error("Los modelos NLP no están inicializados.") + st.error("Los modelos NLP no están inicializados. Por favor, reinicie la aplicación.") + return + + lang_code = st.session_state.get('lang_code', 'es') + t = get_translations(lang_code) + + logger.info(f"Página actual antes de la lógica de enrutamiento: {st.session_state.page}") + + if st.session_state.get('logged_out', False): + st.session_state.logged_out = False + st.session_state.page = 'login' + st.rerun() + + if not st.session_state.get('logged_in', False): + logger.info("Usuario no ha iniciado sesión. Mostrando página de login/registro") + login_register_page(lang_code, t) + elif st.session_state.page == 'user': + if st.session_state.role == 'Administrador': + logger.info("Redirigiendo a la página de administrador") + st.session_state.page = 'Admin' + st.rerun() + else: + logger.info("Renderizando página de usuario") + user_page(lang_code, t) + elif st.session_state.page == "Admin": + logger.info("Renderizando página de administrador") + admin_page() + else: + logger.error(f"Página no reconocida: {st.session_state.page}") + st.error(t.get('unrecognized_page', 'Página no reconocida')) + + logger.info(f"Saliendo de main() - Estado final de la sesión: {st.session_state}") + + +def login_register_page(lang_code, t): + st.title("AIdeaText") + st.write(t.get("welcome_message", "Bienvenido. Por favor, inicie sesión o regístrese.")) + + left_column, right_column = st.columns([1, 3]) + + with left_column: + tab1, tab2 = st.tabs([t.get("login", "Iniciar Sesión"), t.get("register", "Registrarse")]) + + with tab1: + login_form(lang_code, t) + + with tab2: + register_form(lang_code, t) + + with right_column: + display_videos_and_info(lang_code, t) + +def login_form(lang_code, t): + with st.form("login_form"): + username = st.text_input(t.get("email", "Correo electrónico")) + password = st.text_input(t.get("password", "Contraseña"), type="password") + submit_button = st.form_submit_button(t.get("login", "Iniciar Sesión")) + + if submit_button: + success, role = authenticate_user(username, password) + if success: + st.session_state.logged_in = True + st.session_state.username = username + st.session_state.role = role + if role == 'Administrador': + st.session_state.page = 'Admin' + else: + st.session_state.page = 'user' + logger.info(f"Usuario autenticado: {username}, Rol: {role}") + st.rerun() + else: + st.error(t.get("invalid_credentials", "Credenciales incorrectas")) + +def register_form(lang_code, t): + st.header(t.get("request_trial", "Solicitar prueba de la aplicación")) + + name = st.text_input(t.get("name", "Nombre")) + lastname = st.text_input(t.get("lastname", "Apellidos")) + institution = st.text_input(t.get("institution", "Institución")) + current_role = st.selectbox(t.get("current_role", "Rol en la institución donde labora"), + [t.get("professor", "Profesor"), t.get("student", "Estudiante"), t.get("administrative", "Administrativo")]) + desired_role = st.selectbox(t.get("desired_role", "Rol con el que desea registrarse en AIdeaText"), + [t.get("professor", "Profesor"), t.get("student", "Estudiante")]) + email = st.text_input(t.get("institutional_email", "Correo electrónico de su institución")) + reason = st.text_area(t.get("interest_reason", "¿Por qué estás interesado en probar AIdeaText?")) + + if st.button(t.get("submit_application", "Enviar solicitud")): + logger.info(f"Attempting to submit application for {email}") + logger.debug(f"Form data: name={name}, lastname={lastname}, email={email}, institution={institution}, current_role={current_role}, desired_role={desired_role}, reason={reason}") + + if not name or not lastname or not email or not institution or not reason: + logger.warning("Incomplete form submission") + st.error(t.get("complete_all_fields", "Por favor, completa todos los campos.")) + elif not is_institutional_email(email): + logger.warning(f"Non-institutional email used: {email}") + st.error(t.get("use_institutional_email", "Por favor, utiliza un correo electrónico institucional.")) + else: + logger.info(f"Attempting to store application for {email}") + success = store_application_request(name, lastname, email, institution, current_role, desired_role, reason) + if success: + st.success(t.get("application_sent", "Tu solicitud ha sido enviada. Te contactaremos pronto.")) + logger.info(f"Application request stored successfully for {email}") + else: + st.error(t.get("application_error", "Hubo un problema al enviar tu solicitud. Por favor, intenta de nuevo más tarde.")) + logger.error(f"Failed to store application request for {email}") + +def is_institutional_email(email): + forbidden_domains = ['gmail.com', 'hotmail.com', 'yahoo.com', 'outlook.com'] + return not any(domain in email.lower() for domain in forbidden_domains) + +def display_videos_and_info(lang_code, t): + st.header("Videos: pitch, demos, entrevistas, otros") + + videos = { + "Presentación en PyCon Colombia, Medellín, 2024": "https://www.youtube.com/watch?v=Jn545-IKx5Q", + "Presentación fundación Ser Maaestro": "https://www.youtube.com/watch?v=imc4TI1q164", + } + + selected_title = st.selectbox("Selecciona un video tutorial:", list(videos.keys())) + + if selected_title in videos: + try: + st_player(videos[selected_title]) + except Exception as e: + st.error(f"Error al cargar el video: {str(e)}") + + st.markdown(""" + ## Novedades de la versión actual + - Interfaz mejorada para una mejor experiencia de usuario + - Optimización del análisis morfosintáctico + - Soporte para múltiples idiomas + """) + +#Después de iniciar sesión + +def user_page(lang_code, t): + logger.info(f"Entrando en user_page para el estudiante: {st.session_state.username}") + + current_time = datetime.now(timezone.utc) + last_fetch_time = st.session_state.get('last_data_fetch') + + if last_fetch_time: + last_fetch_time = parse(last_fetch_time) + else: + last_fetch_time = datetime.min.replace(tzinfo=timezone.utc) + + # Comprobar si necesitamos recargar los datos del usuario + if 'user_data' not in st.session_state or (current_time - last_fetch_time).total_seconds() > 60: + with st.spinner(t.get('loading_data', "Cargando tus datos...")): + try: + st.session_state.user_data = get_student_morphosyntax_data(st.session_state.username) + st.session_state.last_data_fetch = current_time.isoformat() + except Exception as e: + logger.error(f"Error al obtener datos del usuario: {str(e)}") + st.error(t.get('data_load_error', "Hubo un problema al cargar tus datos. Por favor, intenta recargar la página.")) + return + + logger.info(f"Idioma actual: {st.session_state.lang_code}") + logger.info(f"Modelos NLP cargados: {'nlp_models' in st.session_state}") + + languages = {'Español': 'es', 'English': 'en', 'Français': 'fr'} + + # Estilos CSS personalizados (mantener los estilos existentes) + st.markdown(""" + + """, unsafe_allow_html=True) + + with st.container(): + col1, col2, col3 = st.columns([2, 2, 1]) + with col1: + st.markdown(f"

{t['welcome']}, {st.session_state.username}

", unsafe_allow_html=True) + with col2: + selected_lang = st.selectbox( + t['select_language'], + list(languages.keys()), + index=list(languages.values()).index(st.session_state.lang_code), + key=f"language_selector_{st.session_state.username}_{st.session_state.lang_code}" + ) + new_lang_code = languages[selected_lang] + if st.session_state.lang_code != new_lang_code: + st.session_state.lang_code = new_lang_code + st.rerun() + with col3: + if st.button(t['logout'], key=f"logout_button_{st.session_state.username}_{st.session_state.lang_code}"): + logout() + st.rerun() + + # Reinicializar el estado de la sesión + initialize_session_state() + + # Recargar la aplicación + st.rerun() + + st.markdown("---") + + tabs = st.tabs([ + t.get('morpho_tab', 'Análisis Morfosintáctico'), + t.get('activities_tab', 'Mis Actividades'), + t.get('feedback_tab', 'Formulario de Comentarios') + ]) + + with tabs[0]: + display_morphosyntax_interface(st.session_state.lang_code, st.session_state.nlp_models, t) + + with tabs[1]: + if 'user_data' in st.session_state and st.session_state.user_data: + display_student_progress(st.session_state.username, st.session_state.lang_code, t) + else: + st.warning(t.get('no_data_warning', 'No se encontraron datos para este estudiante.')) + + with tabs[2]: + display_feedback_form(st.session_state.lang_code, t) + + # Información de depuración + with st.expander("Debug Info"): + st.write(f"Página actual: {st.session_state.page}") + st.write(f"Usuario: {st.session_state.get('username', 'No logueado')}") + st.write(f"Rol: {st.session_state.get('role', 'No definido')}") + st.write(f"Idioma: {st.session_state.lang_code}") + st.write(f"Última actualización de datos: {st.session_state.get('last_data_fetch', 'Nunca')}") + +def display_feedback_form(lang_code, t): + logging.info(f"display_feedback_form called with lang_code: {lang_code}") + + st.header(t['feedback_title']) + + name = st.text_input(t['name']) + email = st.text_input(t['email']) + feedback = st.text_area(t['feedback']) + + if st.button(t['submit']): + if name and email and feedback: + if store_student_feedback(st.session_state.username, name, email, feedback): + st.success(t['feedback_success']) + else: + st.error(t['feedback_error']) + else: + st.warning(t['complete_all_fields']) + +# Definición de __all__ para especificar qué se exporta +__all__ = ['main', 'login_register_page', 'initialize_session_state'] + +# Bloque de ejecución condicional +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/modules/ui/ui_BackUp-19-9-2024.py b/modules/ui/ui_BackUp-19-9-2024.py new file mode 100644 index 0000000000000000000000000000000000000000..1a8e1fb7c1daf723d04cbd2ca1fa301b4dd78091 --- /dev/null +++ b/modules/ui/ui_BackUp-19-9-2024.py @@ -0,0 +1,1160 @@ +# Importaciones generales +import sys +import streamlit as st +import re +import io +from io import BytesIO +import base64 +import matplotlib.pyplot as plt +import plotly.graph_objects as go +import pandas as pd +import numpy as np +import time +from datetime import datetime +from streamlit_player import st_player # Necesitarás instalar esta librería: pip install streamlit-player +from spacy import displacy +import logging +import random + +###################################################### +# Configuración del logger +logging.basicConfig(level=logging.INFO) +logger = logging.getLogger(__name__) + + +###################################################### +#imporraciones locales de traducción +from translations import get_translations + +###################################################### +# Importaciones locales +from ..email.email import send_email_notification + +###################################################### +# Importaciones locales de autenticación y base de datos +from ..auth.auth import ( + authenticate_user, + register_user +) + +###################################################### +from ..database.database_oldFromV2 import ( + create_admin_user, + create_student_user, + get_user, + get_student_data, + store_file_contents, #gestión archivos + retrieve_file_contents, #gestión archivos + get_user_files, #gestión archivos + delete_file, # #gestión archivos + store_application_request, # form + store_user_feedback, # form + store_morphosyntax_result, + store_semantic_result, + store_discourse_analysis_result, + store_chat_history, + export_analysis_and_chat +) + +###################################################### +# Importaciones locales de uiadmin +from ..admin.admin_ui import admin_page + +###################################################### +# Importaciones locales funciones de análisis +from ..text_analysis.morpho_analysis import ( + generate_arc_diagram, + get_repeated_words_colors, + highlight_repeated_words, + POS_COLORS, + POS_TRANSLATIONS, + perform_advanced_morphosyntactic_analysis +) + +###################################################### +from ..text_analysis.semantic_analysis import ( + #visualize_semantic_relations, + perform_semantic_analysis, + create_concept_graph, + visualize_concept_graph +) + +###################################################### +from ..text_analysis.discourse_analysis import ( + perform_discourse_analysis, + display_discourse_analysis_results +) + +###################################################### +from ..chatbot.chatbot import ( + initialize_chatbot, + process_morphosyntactic_input, + process_semantic_input, + process_discourse_input, + process_chat_input, + get_connectors, + #handle_semantic_commands, + generate_topics_visualization, + extract_topics, + get_semantic_chatbot_response +) + +#####################-- Funciones de inicialización y configuración--- ############################################################################## +def initialize_session_state(): + if 'initialized' not in st.session_state: + st.session_state.clear() + st.session_state.initialized = True + st.session_state.logged_in = False + st.session_state.page = 'login' + st.session_state.username = None + st.session_state.role = None + +def main(): + initialize_session_state() + + print(f"Página actual: {st.session_state.page}") + print(f"Rol del usuario: {st.session_state.role}") + + if st.session_state.page == 'login': + login_register_page() + elif st.session_state.page == 'admin': + print("Intentando mostrar página de admin") + admin_page() + elif st.session_state.page == 'user': + user_page() + else: + print(f"Página no reconocida: {st.session_state.page}") + + print(f"Estado final de la sesión: {st.session_state}") + +#############################--- # Funciones de autenticación y registro --- ##################################################################### +def login_register_page(): + st.title("AIdeaText") + + left_column, right_column = st.columns([1, 3]) + + with left_column: + tab1, tab2 = st.tabs(["Iniciar Sesión", "Registrarse"]) + + with tab1: + login_form() + + with tab2: + register_form() + + with right_column: + display_videos_and_info() + +def login_form(): + with st.form("login_form"): + username = st.text_input("Correo electrónico") + password = st.text_input("Contraseña", type="password") + submit_button = st.form_submit_button("Iniciar Sesión") + + if submit_button: + success, role = authenticate_user(username, password) + if success: + st.session_state.logged_in = True + st.session_state.username = username + st.session_state.role = role + st.session_state.page = 'admin' if role == 'Administrador' else 'user' + st.rerun() + else: + st.error("Credenciales incorrectas") + +def register_form(): + st.header("Solicitar prueba de la aplicación") + + name = st.text_input("Nombre completo") + email = st.text_input("Correo electrónico institucional") + institution = st.text_input("Institución") + role = st.selectbox("Rol", ["Estudiante", "Profesor", "Investigador", "Otro"]) + reason = st.text_area("¿Por qué estás interesado en probar AIdeaText?") + + if st.button("Enviar solicitud"): + logger.info(f"Attempting to submit application for {email}") + logger.debug(f"Form data: name={name}, email={email}, institution={institution}, role={role}, reason={reason}") + + if not name or not email or not institution or not reason: + logger.warning("Incomplete form submission") + st.error("Por favor, completa todos los campos.") + elif not is_institutional_email(email): + logger.warning(f"Non-institutional email used: {email}") + st.error("Por favor, utiliza un correo electrónico institucional.") + else: + logger.info(f"Attempting to store application for {email}") + success = store_application_request(name, email, institution, role, reason) + if success: + st.success("Tu solicitud ha sido enviada. Te contactaremos pronto.") + logger.info(f"Application request stored successfully for {email}") + else: + st.error("Hubo un problema al enviar tu solicitud. Por favor, intenta de nuevo más tarde.") + logger.error(f"Failed to store application request for {email}") + +def is_institutional_email(email): + forbidden_domains = ['gmail.com', 'hotmail.com', 'yahoo.com', 'outlook.com'] + return not any(domain in email.lower() for domain in forbidden_domains) + +###########################################--- Funciones de interfaz general --- ###################################################### + +def user_page(): + # Asumimos que el idioma seleccionado está almacenado en st.session_state.lang_code + # Si no está definido, usamos 'es' como valor predeterminado + t = get_translations(lang_code) + + st.title(t['welcome']) + st.write(f"{t['hello']}, {st.session_state.username}") + + # Dividir la pantalla en dos columnas + col1, col2 = st.columns(2) + + with col1: + st.subheader(t['chat_title']) + display_chatbot_interface(lang_code) + + with col2: + st.subheader(t['results_title']) + if 'current_analysis' in st.session_state and st.session_state.current_analysis is not None: + display_analysis_results(st.session_state.current_analysis, lang_code) + if st.button(t['export_button']): + if export_analysis_and_chat(st.session_state.username, st.session_state.current_analysis, st.session_state.messages): + st.success(t['export_success']) + else: + st.error(t['export_error']) + else: + st.info(t['no_analysis']) + +def admin_page(): + st.title("Panel de Administración") + st.write(f"Bienvenida, {st.session_state.username}") + + st.header("Crear Nuevo Usuario Estudiante") + new_username = st.text_input("Correo electrónico del nuevo usuario", key="admin_new_username") + new_password = st.text_input("Contraseña", type="password", key="admin_new_password") + if st.button("Crear Usuario", key="admin_create_user"): + if create_student_user(new_username, new_password): + st.success(f"Usuario estudiante {new_username} creado exitosamente") + else: + st.error("Error al crear el usuario estudiante") + + # Aquí puedes añadir más funcionalidades para el panel de administración + +def display_videos_and_info(): + st.header("Videos: pitch, demos, entrevistas, otros") + + videos = { + "Presentación en PyCon Colombia, Medellín, 2024": "https://www.youtube.com/watch?v=Jn545-IKx5Q", + "Presentación fundación Ser Maaestro": "https://www.youtube.com/watch?v=imc4TI1q164", + "Pitch IFE Explora": "https://www.youtube.com/watch?v=Fqi4Di_Rj_s", + "Entrevista Dr. Guillermo Ruíz": "https://www.youtube.com/watch?v=_ch8cRja3oc", + "Demo versión desktop": "https://www.youtube.com/watch?v=nP6eXbog-ZY" + } + + selected_title = st.selectbox("Selecciona un video tutorial:", list(videos.keys())) + + if selected_title in videos: + try: + st_player(videos[selected_title]) + except Exception as e: + st.error(f"Error al cargar el video: {str(e)}") + + st.markdown(""" + ## Novedades de la versión actual + - Nueva función de análisis semántico + - Soporte para múltiples idiomas + - Interfaz mejorada para una mejor experiencia de usuario + """) + +def display_feedback_form(lang_code, t): + logging.info(f"display_feedback_form called with lang_code: {lang_code}") + + st.header(t['title']) + + name = st.text_input(t['name'], key=f"feedback_name_{lang_code}") + email = st.text_input(t['email'], key=f"feedback_email_{lang_code}") + feedback = st.text_area(t['feedback'], key=f"feedback_text_{lang_code}") + + if st.button(t['submit'], key=f"feedback_submit_{lang_code}"): + if name and email and feedback: + if store_user_feedback(st.session_state.username, name, email, feedback): + st.success(t['success']) + else: + st.error(t['error']) + else: + st.warning("Por favor, completa todos los campos.") + +def display_student_progress(username, lang_code, t): + student_data = get_student_data(username) + + if student_data is None or len(student_data['entries']) == 0: + st.warning("No se encontraron datos para este estudiante.") + st.info("Intenta realizar algunos análisis de texto primero.") + return + + st.title(f"Progreso de {username}") + + with st.expander("Resumen de Actividades y Progreso", expanded=True): + # Resumen de actividades + total_entries = len(student_data['entries']) + st.write(f"Total de análisis realizados: {total_entries}") + + # Gráfico de tipos de análisis + analysis_types = [entry['analysis_type'] for entry in student_data['entries']] + analysis_counts = pd.Series(analysis_types).value_counts() + + fig, ax = plt.subplots() + analysis_counts.plot(kind='bar', ax=ax) + ax.set_title("Tipos de análisis realizados") + ax.set_xlabel("Tipo de análisis") + ax.set_ylabel("Cantidad") + st.pyplot(fig) + + # Progreso a lo largo del tiempo + dates = [datetime.fromisoformat(entry['timestamp']) for entry in student_data['entries']] + analysis_counts = pd.Series(dates).value_counts().sort_index() + + fig, ax = plt.subplots() + analysis_counts.plot(kind='line', ax=ax) + ax.set_title("Análisis realizados a lo largo del tiempo") + ax.set_xlabel("Fecha") + ax.set_ylabel("Cantidad de análisis") + st.pyplot(fig) + +########################################################## + with st.expander("Histórico de Análisis Morfosintácticos"): + morphosyntax_entries = [entry for entry in student_data['entries'] if entry['analysis_type'] == 'morphosyntax'] + for entry in morphosyntax_entries: + st.subheader(f"Análisis del {entry['timestamp']}") + if entry['arc_diagrams']: + st.write(entry['arc_diagrams'][0], unsafe_allow_html=True) + + + ########################################################## + with st.expander("Histórico de Análisis Semánticos"): + semantic_entries = [entry for entry in student_data['entries'] if entry['analysis_type'] == 'semantic'] + for entry in semantic_entries: + st.subheader(f"Análisis del {entry['timestamp']}") + + # Mostrar conceptos clave + if 'key_concepts' in entry: + st.write("Conceptos clave:") + concepts_str = " | ".join([f"{concept} ({frequency:.2f})" for concept, frequency in entry['key_concepts']]) + #st.write("Conceptos clave:") + #st.write(concepts_str) + st.markdown(f"
{concepts_str}
", unsafe_allow_html=True) + + # Mostrar gráfico + if 'graph' in entry: + try: + img_bytes = base64.b64decode(entry['graph']) + st.image(img_bytes, caption="Gráfico de relaciones conceptuales") + except Exception as e: + st.error(f"No se pudo mostrar el gráfico: {str(e)}") + +########################################################## + with st.expander("Histórico de Análisis Discursivos"): + discourse_entries = [entry for entry in student_data['entries'] if entry['analysis_type'] == 'discourse'] + for entry in discourse_entries: + st.subheader(f"Análisis del {entry['timestamp']}") + + # Mostrar conceptos clave para ambos documentos + if 'key_concepts1' in entry: + concepts_str1 = " | ".join([f"{concept} ({frequency:.2f})" for concept, frequency in entry['key_concepts1']]) + st.write("Conceptos clave del documento 1:") + #st.write(concepts_str1) + st.markdown(f"
{concepts_str1}
", unsafe_allow_html=True) + + if 'key_concepts2' in entry: + concepts_str2 = " | ".join([f"{concept} ({frequency:.2f})" for concept, frequency in entry['key_concepts2']]) + st.write("Conceptos clave del documento 2:") + #st.write(concepts_str2) + st.markdown(f"
{concepts_str2}
", unsafe_allow_html=True) + + try: + if 'combined_graph' in entry and entry['combined_graph']: + img_bytes = base64.b64decode(entry['combined_graph']) + st.image(img_bytes) + elif 'graph1' in entry and 'graph2' in entry: + col1, col2 = st.columns(2) + with col1: + if entry['graph1']: + img_bytes1 = base64.b64decode(entry['graph1']) + st.image(img_bytes1) + with col2: + if entry['graph2']: + img_bytes2 = base64.b64decode(entry['graph2']) + st.image(img_bytes2) + else: + st.write("No se encontraron gráficos para este análisis.") + except Exception as e: + st.error(f"No se pudieron mostrar los gráficos: {str(e)}") + st.write("Datos de los gráficos (para depuración):") + if 'graph1' in entry: + st.write("Graph 1:", entry['graph1'][:100] + "...") + if 'graph2' in entry: + st.write("Graph 2:", entry['graph2'][:100] + "...") + if 'combined_graph' in entry: + st.write("Combined Graph:", entry['combined_graph'][:100] + "...") + +########################################################## + with st.expander("Histórico de Conversaciones con el ChatBot"): + if 'chat_history' in student_data: + for i, chat in enumerate(student_data['chat_history']): + st.subheader(f"Conversación {i+1} - {chat['timestamp']}") + for message in chat['messages']: + if message['role'] == 'user': + st.write("Usuario: " + message['content']) + else: + st.write("Asistente: " + message['content']) + st.write("---") + else: + st.write("No se encontraron conversaciones con el ChatBot.") + + # Añadir logs para depuración + if st.checkbox("Mostrar datos de depuración"): + st.write("Datos del estudiante (para depuración):") + st.json(student_data) + +#####################--- Funciones de manejo de archivos --- ############################################################################# + +def handle_file_upload(username, lang_code, nlp_models, t, analysis_type): + get_text = get_text if callable(get_text) else lambda key, section, default: t.get(key, default) + st.subheader(get_text('file_upload_section', analysis_type.upper(), 'File Upload')) + + uploaded_file = st.file_uploader( + get_text('file_uploader', analysis_type.upper(), 'Upload a file'), + type=['txt', 'pdf', 'docx', 'doc', 'odt'] + ) + + if uploaded_file is not None: + file_contents = read_file_contents(uploaded_file) + + if store_file_contents(username, uploaded_file.name, file_contents, analysis_type): + st.success(get_text('file_upload_success', analysis_type.upper(), 'File uploaded successfully')) + return file_contents, uploaded_file.name + else: + st.error(get_text('file_upload_error', analysis_type.upper(), 'Error uploading file')) + + return None, None + +def read_file_contents(uploaded_file): + # Implementar la lógica para leer diferentes tipos de archivos + # Por ahora, asumimos que es un archivo de texto + return uploaded_file.getvalue().decode('utf-8') + +######################--- Funciones generales de análisis ---######################################################## +def display_analysis_results(analysis, lang_code, t): + if analysis is None: + st.warning(t.get('no_analysis', "No hay análisis disponible.")) + return + + if not isinstance(analysis, dict): + st.error(f"Error: El resultado del análisis no es un diccionario. Tipo actual: {type(analysis)}") + return + + if 'type' not in analysis: + st.error("Error: El resultado del análisis no contiene la clave 'type'") + st.write("Claves presentes en el resultado:", list(analysis.keys())) + return + + if analysis['type'] == 'morphosyntactic': + st.subheader(t.get('morphosyntactic_title', "Análisis Morfosintáctico")) + display_morphosyntax_results(analysis['result'], lang_code, t) + elif analysis['type'] == 'semantic': + st.subheader(t.get('semantic_title', "Análisis Semántico")) + display_semantic_results(analysis['result'], lang_code, t) + elif analysis['type'] == 'discourse': + st.subheader(t.get('discourse_title', "Análisis del Discurso")) + display_discourse_results(analysis['result'], lang_code, t) + else: + st.warning(t.get('no_analysis', "No hay análisis disponible.")) + + # Mostrar el contenido completo del análisis para depuración + st.write("Contenido completo del análisis:", analysis) + +def handle_user_input(user_input, lang_code, nlp_models, analysis_type, file_contents=None): + response = process_chat_input(user_input, lang_code, nlp_models, analysis_type, file_contents, t) + # Procesa la respuesta y actualiza la interfaz de usuario + + +###################################--- Funciones específicas de análisis morfosintáctico ---################################################################ + +def display_morphosyntax_analysis_interface(user_input, nlp_models, lang_code, t): + get_text = get_text if callable(get_text) else lambda key, section, default: t.get(key, default) + logging.info(f"Displaying morphosyntax analysis interface. Language code: {lang_code}") + + # Inicializar el historial del chat si no existe + if 'morphosyntax_chat_history' not in st.session_state: + initial_message = get_text('initial_message', 'MORPHOSYNTACTIC', + "Este es un chatbot para análisis morfosintáctico. Para generar un diagrama de arco, " + "use el comando /analisis_morfosintactico seguido del texto entre corchetes.") + st.session_state.morphosyntax_chat_history = [{"role": "assistant", "content": initial_message}] + + # Contenedor para el chat + chat_container = st.container() + + # Mostrar el historial del chat + with chat_container: + for message in st.session_state.morphosyntax_chat_history: + with st.chat_message(message["role"]): + st.write(message["content"]) + if "visualization" in message: + st.components.v1.html(message["visualization"], height=450, scrolling=True) + + # Input del usuario + user_input = st.chat_input(get_text('chat_placeholder', 'MORPHOSYNTACTIC', + "Ingrese su mensaje o use /analisis_morfosintactico [texto] para analizar")) + + if user_input: + # Añadir el mensaje del usuario al historial + st.session_state.morphosyntax_chat_history.append({"role": "user", "content": user_input}) + + # Procesar el input del usuario + if user_input.startswith('/analisis_morfosintactico'): + text_to_analyze = user_input.split('[', 1)[1].rsplit(']', 1)[0] + try: + result = perform_advanced_morphosyntactic_analysis(text_to_analyze, nlp_models[lang_code]) + + # Guardar el resultado en el estado de la sesión + st.session_state.current_analysis = { + 'type': 'morphosyntactic', + 'result': result + } + + # Añadir el resultado al historial del chat + response = get_text('analysis_completed', 'MORPHOSYNTACTIC', 'Análisis morfosintáctico completado.') + st.session_state.morphosyntax_chat_history.append({ + "role": "assistant", + "content": response, + "visualization": result['arc_diagram'][0] if result['arc_diagram'] else None + }) + + # Guardar resultados en la base de datos + if store_morphosyntax_result( + st.session_state.username, + text_to_analyze, + get_repeated_words_colors(nlp_models[lang_code](text_to_analyze)), + result['arc_diagram'], + result['pos_analysis'], + result['morphological_analysis'], + result['sentence_structure'] + ): + st.success(get_text('success_message', 'MORPHOSYNTACTIC', 'Análisis guardado correctamente.')) + else: + st.error(get_text('error_message', 'MORPHOSYNTACTIC', 'Hubo un problema al guardar el análisis.')) + + except Exception as e: + error_message = get_text('analysis_error', 'MORPHOSYNTACTIC', f'Ocurrió un error durante el análisis: {str(e)}') + st.session_state.morphosyntax_chat_history.append({"role": "assistant", "content": error_message}) + logging.error(f"Error in morphosyntactic analysis: {str(e)}") + else: + # Aquí puedes procesar otros tipos de inputs del usuario si es necesario + response = get_text('command_not_recognized', 'MORPHOSYNTACTIC', + "Comando no reconocido. Use /analisis_morfosintactico [texto] para realizar un análisis.") + st.session_state.morphosyntax_chat_history.append({"role": "assistant", "content": response}) + + # Forzar la actualización de la interfaz + st.rerun() + + logging.info("Morphosyntax analysis interface displayed successfully") + + +################################################################################################# +def display_morphosyntax_results(result, lang_code, t): + if result is None: + st.warning(t['no_results']) # Añade esta traducción a tu diccionario + return + + # doc = result['doc'] + # advanced_analysis = result['advanced_analysis'] + advanced_analysis = result + + # Mostrar leyenda (código existente) + st.markdown(f"##### {t['legend']}") + legend_html = "
" + for pos, color in POS_COLORS.items(): + if pos in POS_TRANSLATIONS[lang_code]: + legend_html += f"
{POS_TRANSLATIONS[lang_code][pos]}
" + legend_html += "
" + st.markdown(legend_html, unsafe_allow_html=True) + + # Mostrar análisis de palabras repetidas (código existente) + if 'repeated_words' in advanced_analysis: + with st.expander(t['repeated_words'], expanded=True): + st.markdown(advanced_analysis['repeated_words'], unsafe_allow_html=True) + + # Mostrar estructura de oraciones + if 'sentence_structure' in advanced_analysis: + with st.expander(t['sentence_structure'], expanded=True): + for i, sent_analysis in enumerate(advanced_analysis['sentence_structure']): + sentence_str = ( + f"**{t['sentence']} {i+1}** " + f"{t['root']}: {sent_analysis['root']} ({sent_analysis['root_pos']}) -- " + f"{t['subjects']}: {', '.join(sent_analysis['subjects'])} -- " + f"{t['objects']}: {', '.join(sent_analysis['objects'])} -- " + f"{t['verbs']}: {', '.join(sent_analysis['verbs'])}" + ) + st.markdown(sentence_str) + else: + st.warning("No se encontró información sobre la estructura de las oraciones.") + + + # Mostrar análisis de categorías gramaticales # Mostrar análisis morfológico + col1, col2 = st.columns(2) + + with col1: + with st.expander(t['pos_analysis'], expanded=True): + pos_df = pd.DataFrame(advanced_analysis['pos_analysis']) + + # Traducir las etiquetas POS a sus nombres en el idioma seleccionado + pos_df['pos'] = pos_df['pos'].map(lambda x: POS_TRANSLATIONS[lang_code].get(x, x)) + + # Renombrar las columnas para mayor claridad + pos_df = pos_df.rename(columns={ + 'pos': t['grammatical_category'], + 'count': t['count'], + 'percentage': t['percentage'], + 'examples': t['examples'] + }) + + # Mostrar el dataframe + st.dataframe(pos_df) + + with col2: + with st.expander(t['morphological_analysis'], expanded=True): + morph_df = pd.DataFrame(advanced_analysis['morphological_analysis']) + + # Definir el mapeo de columnas + column_mapping = { + 'text': t['word'], + 'lemma': t['lemma'], + 'pos': t['grammatical_category'], + 'dep': t['dependency'], + 'morph': t['morphology'] + } + + # Renombrar las columnas existentes + morph_df = morph_df.rename(columns={col: new_name for col, new_name in column_mapping.items() if col in morph_df.columns}) + + # Traducir las categorías gramaticales + morph_df[t['grammatical_category']] = morph_df[t['grammatical_category']].map(lambda x: POS_TRANSLATIONS[lang_code].get(x, x)) + + # Traducir las dependencias + dep_translations = { + 'es': { + 'ROOT': 'RAÍZ', 'nsubj': 'sujeto nominal', 'obj': 'objeto', 'iobj': 'objeto indirecto', + 'csubj': 'sujeto clausal', 'ccomp': 'complemento clausal', 'xcomp': 'complemento clausal abierto', + 'obl': 'oblicuo', 'vocative': 'vocativo', 'expl': 'expletivo', 'dislocated': 'dislocado', + 'advcl': 'cláusula adverbial', 'advmod': 'modificador adverbial', 'discourse': 'discurso', + 'aux': 'auxiliar', 'cop': 'cópula', 'mark': 'marcador', 'nmod': 'modificador nominal', + 'appos': 'aposición', 'nummod': 'modificador numeral', 'acl': 'cláusula adjetiva', + 'amod': 'modificador adjetival', 'det': 'determinante', 'clf': 'clasificador', + 'case': 'caso', 'conj': 'conjunción', 'cc': 'coordinante', 'fixed': 'fijo', + 'flat': 'plano', 'compound': 'compuesto', 'list': 'lista', 'parataxis': 'parataxis', + 'orphan': 'huérfano', 'goeswith': 'va con', 'reparandum': 'reparación', 'punct': 'puntuación' + }, + 'en': { + 'ROOT': 'ROOT', 'nsubj': 'nominal subject', 'obj': 'object', + 'iobj': 'indirect object', 'csubj': 'clausal subject', 'ccomp': 'clausal complement', 'xcomp': 'open clausal complement', + 'obl': 'oblique', 'vocative': 'vocative', 'expl': 'expletive', 'dislocated': 'dislocated', 'advcl': 'adverbial clause modifier', + 'advmod': 'adverbial modifier', 'discourse': 'discourse element', 'aux': 'auxiliary', 'cop': 'copula', 'mark': 'marker', + 'nmod': 'nominal modifier', 'appos': 'appositional modifier', 'nummod': 'numeric modifier', 'acl': 'clausal modifier of noun', + 'amod': 'adjectival modifier', 'det': 'determiner', 'clf': 'classifier', 'case': 'case marking', + 'conj': 'conjunct', 'cc': 'coordinating conjunction', 'fixed': 'fixed multiword expression', + 'flat': 'flat multiword expression', 'compound': 'compound', 'list': 'list', 'parataxis': 'parataxis', 'orphan': 'orphan', + 'goeswith': 'goes with', 'reparandum': 'reparandum', 'punct': 'punctuation' + }, + 'fr': { + 'ROOT': 'RACINE', 'nsubj': 'sujet nominal', 'obj': 'objet', 'iobj': 'objet indirect', + 'csubj': 'sujet phrastique', 'ccomp': 'complément phrastique', 'xcomp': 'complément phrastique ouvert', 'obl': 'oblique', + 'vocative': 'vocatif', 'expl': 'explétif', 'dislocated': 'disloqué', 'advcl': 'clause adverbiale', 'advmod': 'modifieur adverbial', + 'discourse': 'élément de discours', 'aux': 'auxiliaire', 'cop': 'copule', 'mark': 'marqueur', 'nmod': 'modifieur nominal', + 'appos': 'apposition', 'nummod': 'modifieur numéral', 'acl': 'clause relative', 'amod': 'modifieur adjectival', 'det': 'déterminant', + 'clf': 'classificateur', 'case': 'marqueur de cas', 'conj': 'conjonction', 'cc': 'coordination', 'fixed': 'expression figée', + 'flat': 'construction plate', 'compound': 'composé', 'list': 'liste', 'parataxis': 'parataxe', 'orphan': 'orphelin', + 'goeswith': 'va avec', 'reparandum': 'réparation', 'punct': 'ponctuation' + } + } + morph_df[t['dependency']] = morph_df[t['dependency']].map(lambda x: dep_translations[lang_code].get(x, x)) + + # Traducir la morfología + def translate_morph(morph_string, lang_code): + morph_translations = { + 'es': { + 'Gender': 'Género', 'Number': 'Número', 'Case': 'Caso', 'Definite': 'Definido', + 'PronType': 'Tipo de Pronombre', 'Person': 'Persona', 'Mood': 'Modo', + 'Tense': 'Tiempo', 'VerbForm': 'Forma Verbal', 'Voice': 'Voz', + 'Fem': 'Femenino', 'Masc': 'Masculino', 'Sing': 'Singular', 'Plur': 'Plural', + 'Ind': 'Indicativo', 'Sub': 'Subjuntivo', 'Imp': 'Imperativo', 'Inf': 'Infinitivo', + 'Part': 'Participio', 'Ger': 'Gerundio', 'Pres': 'Presente', 'Past': 'Pasado', + 'Fut': 'Futuro', 'Perf': 'Perfecto', 'Imp': 'Imperfecto' + }, + 'en': { + 'Gender': 'Gender', 'Number': 'Number', 'Case': 'Case', 'Definite': 'Definite', 'PronType': 'Pronoun Type', 'Person': 'Person', + 'Mood': 'Mood', 'Tense': 'Tense', 'VerbForm': 'Verb Form', 'Voice': 'Voice', + 'Fem': 'Feminine', 'Masc': 'Masculine', 'Sing': 'Singular', 'Plur': 'Plural', 'Ind': 'Indicative', + 'Sub': 'Subjunctive', 'Imp': 'Imperative', 'Inf': 'Infinitive', 'Part': 'Participle', + 'Ger': 'Gerund', 'Pres': 'Present', 'Past': 'Past', 'Fut': 'Future', 'Perf': 'Perfect', 'Imp': 'Imperfect' + }, + 'fr': { + 'Gender': 'Genre', 'Number': 'Nombre', 'Case': 'Cas', 'Definite': 'Défini', 'PronType': 'Type de Pronom', + 'Person': 'Personne', 'Mood': 'Mode', 'Tense': 'Temps', 'VerbForm': 'Forme Verbale', 'Voice': 'Voix', + 'Fem': 'Féminin', 'Masc': 'Masculin', 'Sing': 'Singulier', 'Plur': 'Pluriel', 'Ind': 'Indicatif', + 'Sub': 'Subjonctif', 'Imp': 'Impératif', 'Inf': 'Infinitif', 'Part': 'Participe', + 'Ger': 'Gérondif', 'Pres': 'Présent', 'Past': 'Passé', 'Fut': 'Futur', 'Perf': 'Parfait', 'Imp': 'Imparfait' + } + } + for key, value in morph_translations[lang_code].items(): + morph_string = morph_string.replace(key, value) + return morph_string + + morph_df[t['morphology']] = morph_df[t['morphology']].apply(lambda x: translate_morph(x, lang_code)) + + # Seleccionar y ordenar las columnas a mostrar + columns_to_display = [t['word'], t['lemma'], t['grammatical_category'], t['dependency'], t['morphology']] + columns_to_display = [col for col in columns_to_display if col in morph_df.columns] + + # Mostrar el DataFrame + st.dataframe(morph_df[columns_to_display]) + + # Mostrar diagramas de arco (código existente) + #with st.expander(t['arc_diagram'], expanded=True): + # sentences = list(doc.sents) + # arc_diagrams = [] + # for i, sent in enumerate(sentences): + # st.subheader(f"{t['sentence']} {i+1}") + # html = displacy.render(sent, style="dep", options={"distance": 100}) + # html = html.replace('height="375"', 'height="200"') + # html = re.sub(r']*>', lambda m: m.group(0).replace('height="450"', 'height="300"'), html) + # html = re.sub(r']*transform="translate\((\d+),(\d+)\)"', lambda m: f'{concepts_str}", unsafe_allow_html=True) + + # Mostrar gráfico + if 'graph' in entry: + try: + img_bytes = base64.b64decode(entry['graph']) + st.image(img_bytes, caption="Gráfico de relaciones conceptuales") + except Exception as e: + st.error(f"No se pudo mostrar el gráfico: {str(e)}") + +########################################################## + with st.expander("Histórico de Análisis Discursivos"): + discourse_entries = [entry for entry in student_data['entries'] if entry['analysis_type'] == 'discourse'] + for entry in discourse_entries: + st.subheader(f"Análisis del {entry['timestamp']}") + + # Mostrar conceptos clave para ambos documentos + if 'key_concepts1' in entry: + concepts_str1 = " | ".join([f"{concept} ({frequency:.2f})" for concept, frequency in entry['key_concepts1']]) + st.write("Conceptos clave del documento 1:") + #st.write(concepts_str1) + st.markdown(f"
{concepts_str1}
", unsafe_allow_html=True) + + if 'key_concepts2' in entry: + concepts_str2 = " | ".join([f"{concept} ({frequency:.2f})" for concept, frequency in entry['key_concepts2']]) + st.write("Conceptos clave del documento 2:") + #st.write(concepts_str2) + st.markdown(f"
{concepts_str2}
", unsafe_allow_html=True) + + try: + if 'combined_graph' in entry and entry['combined_graph']: + img_bytes = base64.b64decode(entry['combined_graph']) + st.image(img_bytes) + elif 'graph1' in entry and 'graph2' in entry: + col1, col2 = st.columns(2) + with col1: + if entry['graph1']: + img_bytes1 = base64.b64decode(entry['graph1']) + st.image(img_bytes1) + with col2: + if entry['graph2']: + img_bytes2 = base64.b64decode(entry['graph2']) + st.image(img_bytes2) + else: + st.write("No se encontraron gráficos para este análisis.") + except Exception as e: + st.error(f"No se pudieron mostrar los gráficos: {str(e)}") + st.write("Datos de los gráficos (para depuración):") + if 'graph1' in entry: + st.write("Graph 1:", entry['graph1'][:100] + "...") + if 'graph2' in entry: + st.write("Graph 2:", entry['graph2'][:100] + "...") + if 'combined_graph' in entry: + st.write("Combined Graph:", entry['combined_graph'][:100] + "...") + +########################################################## + with st.expander("Histórico de Conversaciones con el ChatBot"): + if 'chat_history' in student_data: + for i, chat in enumerate(student_data['chat_history']): + st.subheader(f"Conversación {i+1} - {chat['timestamp']}") + for message in chat['messages']: + if message['role'] == 'user': + st.write("Usuario: " + message['content']) + else: + st.write("Asistente: " + message['content']) + st.write("---") + else: + st.write("No se encontraron conversaciones con el ChatBot.") + + # Añadir logs para depuración + if st.checkbox("Mostrar datos de depuración"): + st.write("Datos del estudiante (para depuración):") + st.json(student_data) + +#####################--- Funciones de manejo de archivos --- ############################################################################# + +def handle_file_upload(username, lang_code, nlp_models, t, analysis_type): + st.subheader(t['get_text']('file_upload_section', analysis_type.upper(), 'File Upload')) + + uploaded_file = st.file_uploader( + t['get_text']('file_uploader', analysis_type.upper(), 'Upload a file'), + type=['txt', 'pdf', 'docx', 'doc', 'odt'] + ) + + if uploaded_file is not None: + file_contents = read_file_contents(uploaded_file) + + if store_file_contents(username, uploaded_file.name, file_contents, analysis_type): + st.success(t['get_text']('file_upload_success', analysis_type.upper(), 'File uploaded successfully')) + return file_contents, uploaded_file.name + else: + st.error(t['get_text']('file_upload_error', analysis_type.upper(), 'Error uploading file')) + + return None, None + +def read_file_contents(uploaded_file): + # Implementar la lógica para leer diferentes tipos de archivos + # Por ahora, asumimos que es un archivo de texto + return uploaded_file.getvalue().decode('utf-8') + +######################--- Funciones generales de análisis ---######################################################## +def display_analysis_results(analysis, lang_code, t): + if analysis is None: + st.warning(t.get('no_analysis', "No hay análisis disponible.")) + return + + if not isinstance(analysis, dict): + st.error(f"Error: El resultado del análisis no es un diccionario. Tipo actual: {type(analysis)}") + return + + if 'type' not in analysis: + st.error("Error: El resultado del análisis no contiene la clave 'type'") + st.write("Claves presentes en el resultado:", list(analysis.keys())) + return + + if analysis['type'] == 'morphosyntactic': + st.subheader(t.get('morphosyntactic_title', "Análisis Morfosintáctico")) + display_morphosyntax_results(analysis['result'], lang_code, t) + elif analysis['type'] == 'semantic': + st.subheader(t.get('semantic_title', "Análisis Semántico")) + display_semantic_results(analysis['result'], lang_code, t) + elif analysis['type'] == 'discourse': + st.subheader(t.get('discourse_title', "Análisis del Discurso")) + display_discourse_results(analysis['result'], lang_code, t) + else: + st.warning(t.get('no_analysis', "No hay análisis disponible.")) + + # Mostrar el contenido completo del análisis para depuración + st.write("Contenido completo del análisis:", analysis) + +def handle_user_input(user_input, lang_code, nlp_models, analysis_type, file_contents=None): + response = process_chat_input(user_input, lang_code, nlp_models, analysis_type, file_contents, t) + # Procesa la respuesta y actualiza la interfaz de usuario + + +###################################--- Funciones específicas de análisis morfosintáctico ---################################################################ + +def display_morphosyntax_analysis_interface(user_input, nlp_models, lang_code, t): + logging.info(f"Displaying morphosyntax analysis interface. Language code: {lang_code}") + + # Inicializar el historial del chat si no existe + if 'morphosyntax_chat_history' not in st.session_state: + initial_message = t['get_text']('initial_message', 'MORPHOSYNTACTIC', + "Este es un chatbot para análisis morfosintáctico. Para generar un diagrama de arco, " + "use el comando /analisis_morfosintactico seguido del texto entre corchetes.") + st.session_state.morphosyntax_chat_history = [{"role": "assistant", "content": initial_message}] + + # Contenedor para el chat + chat_container = st.container() + + # Mostrar el historial del chat + with chat_container: + for message in st.session_state.morphosyntax_chat_history: + with st.chat_message(message["role"]): + st.write(message["content"]) + if "visualization" in message: + st.components.v1.html(message["visualization"], height=450, scrolling=True) + + # Input del usuario + user_input = st.chat_input(t['get_text']('chat_placeholder', 'MORPHOSYNTACTIC', + "Ingrese su mensaje o use /analisis_morfosintactico [texto] para analizar")) + + if user_input: + # Añadir el mensaje del usuario al historial + st.session_state.morphosyntax_chat_history.append({"role": "user", "content": user_input}) + + # Procesar el input del usuario + if user_input.startswith('/analisis_morfosintactico'): + text_to_analyze = user_input.split('[', 1)[1].rsplit(']', 1)[0] + try: + result = perform_advanced_morphosyntactic_analysis(text_to_analyze, nlp_models[lang_code]) + + # Guardar el resultado en el estado de la sesión + st.session_state.current_analysis = { + 'type': 'morphosyntactic', + 'result': result + } + + # Añadir el resultado al historial del chat + response = t['get_text']('analysis_completed', 'MORPHOSYNTACTIC', 'Análisis morfosintáctico completado.') + st.session_state.morphosyntax_chat_history.append({ + "role": "assistant", + "content": response, + "visualization": result['arc_diagram'][0] if result['arc_diagram'] else None + }) + + # Guardar resultados en la base de datos + if store_morphosyntax_result( + st.session_state.username, + text_to_analyze, + get_repeated_words_colors(nlp_models[lang_code](text_to_analyze)), + result['arc_diagram'], + result['pos_analysis'], + result['morphological_analysis'], + result['sentence_structure'] + ): + st.success(t['get_text']('success_message', 'MORPHOSYNTACTIC', 'Análisis guardado correctamente.')) + else: + st.error(t['get_text']('error_message', 'MORPHOSYNTACTIC', 'Hubo un problema al guardar el análisis.')) + + except Exception as e: + error_message = t['get_text']('analysis_error', 'MORPHOSYNTACTIC', f'Ocurrió un error durante el análisis: {str(e)}') + st.session_state.morphosyntax_chat_history.append({"role": "assistant", "content": error_message}) + logging.error(f"Error in morphosyntactic analysis: {str(e)}") + else: + # Aquí puedes procesar otros tipos de inputs del usuario si es necesario + response = t['get_text']('command_not_recognized', 'MORPHOSYNTACTIC', + "Comando no reconocido. Use /analisis_morfosintactico [texto] para realizar un análisis.") + st.session_state.morphosyntax_chat_history.append({"role": "assistant", "content": response}) + + # Forzar la actualización de la interfaz + st.experimental_rerun() + + logging.info("Morphosyntax analysis interface displayed successfully") + + +################################################################################################# +def display_morphosyntax_results(result, lang_code, t): + if result is None: + st.warning(t['no_results']) # Añade esta traducción a tu diccionario + return + + # doc = result['doc'] + # advanced_analysis = result['advanced_analysis'] + advanced_analysis = result + + # Mostrar leyenda (código existente) + st.markdown(f"##### {t['legend']}") + legend_html = "
" + for pos, color in POS_COLORS.items(): + if pos in POS_TRANSLATIONS[lang_code]: + legend_html += f"
{POS_TRANSLATIONS[lang_code][pos]}
" + legend_html += "
" + st.markdown(legend_html, unsafe_allow_html=True) + + # Mostrar análisis de palabras repetidas (código existente) + if 'repeated_words' in advanced_analysis: + with st.expander(t['repeated_words'], expanded=True): + st.markdown(advanced_analysis['repeated_words'], unsafe_allow_html=True) + + # Mostrar estructura de oraciones + if 'sentence_structure' in advanced_analysis: + with st.expander(t['sentence_structure'], expanded=True): + for i, sent_analysis in enumerate(advanced_analysis['sentence_structure']): + sentence_str = ( + f"**{t['sentence']} {i+1}** " + f"{t['root']}: {sent_analysis['root']} ({sent_analysis['root_pos']}) -- " + f"{t['subjects']}: {', '.join(sent_analysis['subjects'])} -- " + f"{t['objects']}: {', '.join(sent_analysis['objects'])} -- " + f"{t['verbs']}: {', '.join(sent_analysis['verbs'])}" + ) + st.markdown(sentence_str) + else: + st.warning("No se encontró información sobre la estructura de las oraciones.") + + + # Mostrar análisis de categorías gramaticales # Mostrar análisis morfológico + col1, col2 = st.columns(2) + + with col1: + with st.expander(t['pos_analysis'], expanded=True): + pos_df = pd.DataFrame(advanced_analysis['pos_analysis']) + + # Traducir las etiquetas POS a sus nombres en el idioma seleccionado + pos_df['pos'] = pos_df['pos'].map(lambda x: POS_TRANSLATIONS[lang_code].get(x, x)) + + # Renombrar las columnas para mayor claridad + pos_df = pos_df.rename(columns={ + 'pos': t['grammatical_category'], + 'count': t['count'], + 'percentage': t['percentage'], + 'examples': t['examples'] + }) + + # Mostrar el dataframe + st.dataframe(pos_df) + + with col2: + with st.expander(t['morphological_analysis'], expanded=True): + morph_df = pd.DataFrame(advanced_analysis['morphological_analysis']) + + # Definir el mapeo de columnas + column_mapping = { + 'text': t['word'], + 'lemma': t['lemma'], + 'pos': t['grammatical_category'], + 'dep': t['dependency'], + 'morph': t['morphology'] + } + + # Renombrar las columnas existentes + morph_df = morph_df.rename(columns={col: new_name for col, new_name in column_mapping.items() if col in morph_df.columns}) + + # Traducir las categorías gramaticales + morph_df[t['grammatical_category']] = morph_df[t['grammatical_category']].map(lambda x: POS_TRANSLATIONS[lang_code].get(x, x)) + + # Traducir las dependencias + dep_translations = { + 'es': { + 'ROOT': 'RAÍZ', 'nsubj': 'sujeto nominal', 'obj': 'objeto', 'iobj': 'objeto indirecto', + 'csubj': 'sujeto clausal', 'ccomp': 'complemento clausal', 'xcomp': 'complemento clausal abierto', + 'obl': 'oblicuo', 'vocative': 'vocativo', 'expl': 'expletivo', 'dislocated': 'dislocado', + 'advcl': 'cláusula adverbial', 'advmod': 'modificador adverbial', 'discourse': 'discurso', + 'aux': 'auxiliar', 'cop': 'cópula', 'mark': 'marcador', 'nmod': 'modificador nominal', + 'appos': 'aposición', 'nummod': 'modificador numeral', 'acl': 'cláusula adjetiva', + 'amod': 'modificador adjetival', 'det': 'determinante', 'clf': 'clasificador', + 'case': 'caso', 'conj': 'conjunción', 'cc': 'coordinante', 'fixed': 'fijo', + 'flat': 'plano', 'compound': 'compuesto', 'list': 'lista', 'parataxis': 'parataxis', + 'orphan': 'huérfano', 'goeswith': 'va con', 'reparandum': 'reparación', 'punct': 'puntuación' + }, + 'en': { + 'ROOT': 'ROOT', 'nsubj': 'nominal subject', 'obj': 'object', + 'iobj': 'indirect object', 'csubj': 'clausal subject', 'ccomp': 'clausal complement', 'xcomp': 'open clausal complement', + 'obl': 'oblique', 'vocative': 'vocative', 'expl': 'expletive', 'dislocated': 'dislocated', 'advcl': 'adverbial clause modifier', + 'advmod': 'adverbial modifier', 'discourse': 'discourse element', 'aux': 'auxiliary', 'cop': 'copula', 'mark': 'marker', + 'nmod': 'nominal modifier', 'appos': 'appositional modifier', 'nummod': 'numeric modifier', 'acl': 'clausal modifier of noun', + 'amod': 'adjectival modifier', 'det': 'determiner', 'clf': 'classifier', 'case': 'case marking', + 'conj': 'conjunct', 'cc': 'coordinating conjunction', 'fixed': 'fixed multiword expression', + 'flat': 'flat multiword expression', 'compound': 'compound', 'list': 'list', 'parataxis': 'parataxis', 'orphan': 'orphan', + 'goeswith': 'goes with', 'reparandum': 'reparandum', 'punct': 'punctuation' + }, + 'fr': { + 'ROOT': 'RACINE', 'nsubj': 'sujet nominal', 'obj': 'objet', 'iobj': 'objet indirect', + 'csubj': 'sujet phrastique', 'ccomp': 'complément phrastique', 'xcomp': 'complément phrastique ouvert', 'obl': 'oblique', + 'vocative': 'vocatif', 'expl': 'explétif', 'dislocated': 'disloqué', 'advcl': 'clause adverbiale', 'advmod': 'modifieur adverbial', + 'discourse': 'élément de discours', 'aux': 'auxiliaire', 'cop': 'copule', 'mark': 'marqueur', 'nmod': 'modifieur nominal', + 'appos': 'apposition', 'nummod': 'modifieur numéral', 'acl': 'clause relative', 'amod': 'modifieur adjectival', 'det': 'déterminant', + 'clf': 'classificateur', 'case': 'marqueur de cas', 'conj': 'conjonction', 'cc': 'coordination', 'fixed': 'expression figée', + 'flat': 'construction plate', 'compound': 'composé', 'list': 'liste', 'parataxis': 'parataxe', 'orphan': 'orphelin', + 'goeswith': 'va avec', 'reparandum': 'réparation', 'punct': 'ponctuation' + } + } + morph_df[t['dependency']] = morph_df[t['dependency']].map(lambda x: dep_translations[lang_code].get(x, x)) + + # Traducir la morfología + def translate_morph(morph_string, lang_code): + morph_translations = { + 'es': { + 'Gender': 'Género', 'Number': 'Número', 'Case': 'Caso', 'Definite': 'Definido', + 'PronType': 'Tipo de Pronombre', 'Person': 'Persona', 'Mood': 'Modo', + 'Tense': 'Tiempo', 'VerbForm': 'Forma Verbal', 'Voice': 'Voz', + 'Fem': 'Femenino', 'Masc': 'Masculino', 'Sing': 'Singular', 'Plur': 'Plural', + 'Ind': 'Indicativo', 'Sub': 'Subjuntivo', 'Imp': 'Imperativo', 'Inf': 'Infinitivo', + 'Part': 'Participio', 'Ger': 'Gerundio', 'Pres': 'Presente', 'Past': 'Pasado', + 'Fut': 'Futuro', 'Perf': 'Perfecto', 'Imp': 'Imperfecto' + }, + 'en': { + 'Gender': 'Gender', 'Number': 'Number', 'Case': 'Case', 'Definite': 'Definite', 'PronType': 'Pronoun Type', 'Person': 'Person', + 'Mood': 'Mood', 'Tense': 'Tense', 'VerbForm': 'Verb Form', 'Voice': 'Voice', + 'Fem': 'Feminine', 'Masc': 'Masculine', 'Sing': 'Singular', 'Plur': 'Plural', 'Ind': 'Indicative', + 'Sub': 'Subjunctive', 'Imp': 'Imperative', 'Inf': 'Infinitive', 'Part': 'Participle', + 'Ger': 'Gerund', 'Pres': 'Present', 'Past': 'Past', 'Fut': 'Future', 'Perf': 'Perfect', 'Imp': 'Imperfect' + }, + 'fr': { + 'Gender': 'Genre', 'Number': 'Nombre', 'Case': 'Cas', 'Definite': 'Défini', 'PronType': 'Type de Pronom', + 'Person': 'Personne', 'Mood': 'Mode', 'Tense': 'Temps', 'VerbForm': 'Forme Verbale', 'Voice': 'Voix', + 'Fem': 'Féminin', 'Masc': 'Masculin', 'Sing': 'Singulier', 'Plur': 'Pluriel', 'Ind': 'Indicatif', + 'Sub': 'Subjonctif', 'Imp': 'Impératif', 'Inf': 'Infinitif', 'Part': 'Participe', + 'Ger': 'Gérondif', 'Pres': 'Présent', 'Past': 'Passé', 'Fut': 'Futur', 'Perf': 'Parfait', 'Imp': 'Imparfait' + } + } + for key, value in morph_translations[lang_code].items(): + morph_string = morph_string.replace(key, value) + return morph_string + + morph_df[t['morphology']] = morph_df[t['morphology']].apply(lambda x: translate_morph(x, lang_code)) + + # Seleccionar y ordenar las columnas a mostrar + columns_to_display = [t['word'], t['lemma'], t['grammatical_category'], t['dependency'], t['morphology']] + columns_to_display = [col for col in columns_to_display if col in morph_df.columns] + + # Mostrar el DataFrame + st.dataframe(morph_df[columns_to_display]) + + # Mostrar diagramas de arco (código existente) + #with st.expander(t['arc_diagram'], expanded=True): + # sentences = list(doc.sents) + # arc_diagrams = [] + # for i, sent in enumerate(sentences): + # st.subheader(f"{t['sentence']} {i+1}") + # html = displacy.render(sent, style="dep", options={"distance": 100}) + # html = html.replace('height="375"', 'height="200"') + # html = re.sub(r']*>', lambda m: m.group(0).replace('height="450"', 'height="300"'), html) + # html = re.sub(r']*transform="translate\((\d+),(\d+)\)"', lambda m: f'{concepts_str}", unsafe_allow_html=True) + + # Mostrar gráfico + if 'graph' in entry: + try: + img_bytes = base64.b64decode(entry['graph']) + st.image(img_bytes, caption="Gráfico de relaciones conceptuales") + except Exception as e: + st.error(f"No se pudo mostrar el gráfico: {str(e)}") + +########################################################## + with st.expander("Histórico de Análisis Discursivos"): + discourse_entries = [entry for entry in student_data['entries'] if entry['analysis_type'] == 'discourse'] + for entry in discourse_entries: + st.subheader(f"Análisis del {entry['timestamp']}") + + # Mostrar conceptos clave para ambos documentos + if 'key_concepts1' in entry: + concepts_str1 = " | ".join([f"{concept} ({frequency:.2f})" for concept, frequency in entry['key_concepts1']]) + st.write("Conceptos clave del documento 1:") + #st.write(concepts_str1) + st.markdown(f"
{concepts_str1}
", unsafe_allow_html=True) + + if 'key_concepts2' in entry: + concepts_str2 = " | ".join([f"{concept} ({frequency:.2f})" for concept, frequency in entry['key_concepts2']]) + st.write("Conceptos clave del documento 2:") + #st.write(concepts_str2) + st.markdown(f"
{concepts_str2}
", unsafe_allow_html=True) + + try: + if 'combined_graph' in entry and entry['combined_graph']: + img_bytes = base64.b64decode(entry['combined_graph']) + st.image(img_bytes) + elif 'graph1' in entry and 'graph2' in entry: + col1, col2 = st.columns(2) + with col1: + if entry['graph1']: + img_bytes1 = base64.b64decode(entry['graph1']) + st.image(img_bytes1) + with col2: + if entry['graph2']: + img_bytes2 = base64.b64decode(entry['graph2']) + st.image(img_bytes2) + else: + st.write("No se encontraron gráficos para este análisis.") + except Exception as e: + st.error(f"No se pudieron mostrar los gráficos: {str(e)}") + st.write("Datos de los gráficos (para depuración):") + if 'graph1' in entry: + st.write("Graph 1:", entry['graph1'][:100] + "...") + if 'graph2' in entry: + st.write("Graph 2:", entry['graph2'][:100] + "...") + if 'combined_graph' in entry: + st.write("Combined Graph:", entry['combined_graph'][:100] + "...") + +########################################################## + with st.expander("Histórico de Conversaciones con el ChatBot"): + if 'chat_history' in student_data: + for i, chat in enumerate(student_data['chat_history']): + st.subheader(f"Conversación {i+1} - {chat['timestamp']}") + for message in chat['messages']: + if message['role'] == 'user': + st.write("Usuario: " + message['content']) + else: + st.write("Asistente: " + message['content']) + st.write("---") + else: + st.write("No se encontraron conversaciones con el ChatBot.") + + # Añadir logs para depuración + if st.checkbox("Mostrar datos de depuración"): + st.write("Datos del estudiante (para depuración):") + st.json(student_data) + +################################################################################################## +def display_morphosyntax_analysis_interface(nlp_models, lang_code): + translations = { + 'es': { + 'title': "AIdeaText - Análisis morfológico y sintáctico", + 'input_label': "Ingrese un texto para analizar (máximo 5,000 palabras", + 'input_placeholder': "Esta funcionalidad le ayudará con dos competencias:\n" + "[1] \"Escribe diversos tipos de textos en su lengua materna\"\n" + "[2] \"Lee diversos tipos de textos escritos en su lengua materna\"\n\n" + "Ingrese su texto aquí para analizar...", + 'analyze_button': "Analizar texto", + 'repeated_words': "Palabras repetidas", + 'legend': "Leyenda: Categorías gramaticales", + 'arc_diagram': "Análisis sintáctico: Diagrama de arco", + 'sentence': "Oración", + 'success_message': "Análisis guardado correctamente.", + 'error_message': "Hubo un problema al guardar el análisis. Por favor, inténtelo de nuevo.", + 'warning_message': "Por favor, ingrese un texto para analizar.", + 'initial_message': "Ingrese un texto y presione 'Analizar texto' para comenzar.", + 'no_results': "No hay resultados disponibles. Por favor, realice un análisis primero.", + 'pos_analysis': "Análisis de categorías gramaticales", + 'morphological_analysis': "Análisis morfológico", + 'sentence_structure': "Estructura de oraciones", + 'word': "Palabra", + 'count': "Cantidad", + 'percentage': "Porcentaje", + 'examples': "Ejemplos", + 'lemma': "Lema", + 'tag': "Etiqueta", + 'dep': "Dependencia", + 'morph': "Morfología", + 'root': "Raíz", + 'subjects': "Sujetos", + 'objects': "Objetos", + 'verbs': "Verbos", + 'grammatical_category': "Categoría gramatical", + 'dependency': "Dependencia", + 'morphology': "Morfología" + }, + 'en': { + 'title': "AIdeaText - Morphological and Syntactic Analysis", + 'input_label': "Enter a text to analyze (max 5,000 words):", + 'input_placeholder': "This functionality will help you with two competencies:\n" + "[1] \"Write various types of texts in your native language\"\n" + "[2] \"Read various types of written texts in your native language\"\n\n" + "Enter your text here to analyze...", + 'analyze_button': "Analyze text", + 'repeated_words': "Repeated words", + 'legend': "Legend: Grammatical categories", + 'arc_diagram': "Syntactic analysis: Arc diagram", + 'sentence': "Sentence", + 'success_message': "Analysis saved successfully.", + 'error_message': "There was a problem saving the analysis. Please try again.", + 'warning_message': "Please enter a text to analyze.", + 'initial_message': "Enter a text and press 'Analyze text' to start.", + 'no_results': "No results available. Please perform an analysis first.", + 'pos_analysis': "Part of Speech Analysis", + 'morphological_analysis': "Morphological Analysis", + 'sentence_structure': "Sentence Structure", + 'word': "Word", + 'count': "Count", + 'percentage': "Percentage", + 'examples': "Examples", + 'lemma': "Lemma", + 'tag': "Tag", + 'dep': "Dependency", + 'morph': "Morphology", + 'root': "Root", + 'subjects': "Subjects", + 'objects': "Objects", + 'verbs': "Verbs", + 'grammatical_category': "Grammatical category", + 'dependency': "Dependency", + 'morphology': "Morphology" + }, + 'fr': { + 'title': "AIdeaText - Analyse morphologique et syntaxique", + 'input_label': "Entrez un texte à analyser (max 5 000 mots) :", + 'input_placeholder': "Cette fonctionnalité vous aidera avec deux compétences :\n" + "[1] \"Écrire divers types de textes dans votre langue maternelle\"\n" + "[2] \"Lire divers types de textes écrits dans votre langue maternelle\"\n\n" + "Entrez votre texte ici pour l'analyser...", + 'analyze_button': "Analyser le texte", + 'repeated_words': "Mots répétés", + 'legend': "Légende : Catégories grammaticales", + 'arc_diagram': "Analyse syntaxique : Diagramme en arc", + 'sentence': "Phrase", + 'success_message': "Analyse enregistrée avec succès.", + 'error_message': "Un problème est survenu lors de l'enregistrement de l'analyse. Veuillez réessayer.", + 'warning_message': "Veuillez entrer un texte à analyser.", + 'initial_message': "Entrez un texte et appuyez sur 'Analyser le texte' pour commencer.", + 'no_results': "Aucun résultat disponible. Veuillez d'abord effectuer une analyse.", + 'pos_analysis': "Analyse des parties du discours", + 'morphological_analysis': "Analyse morphologique", + 'sentence_structure': "Structure des phrases", + 'word': "Mot", + 'count': "Nombre", + 'percentage': "Pourcentage", + 'examples': "Exemples", + 'lemma': "Lemme", + 'tag': "Étiquette", + 'dep': "Dépendance", + 'morph': "Morphologie", + 'root': "Racine", + 'subjects': "Sujets", + 'objects': "Objets", + 'verbs': "Verbes", + 'grammatical_category': "Catégorie grammaticale", + 'dependency': "Dépendance", + 'morphology': "Morphologie" + } + } + + t = translations[lang_code] + + input_key = f"morphosyntax_input_{lang_code}" + + if input_key not in st.session_state: + st.session_state[input_key] = "" + + sentence_input = st.text_area( + t['input_label'], + height=150, + placeholder=t['input_placeholder'], + value=st.session_state[input_key], + key=f"text_area_{lang_code}", + on_change=lambda: setattr(st.session_state, input_key, st.session_state[f"text_area_{lang_code}"]) + ) + + if st.button(t['analyze_button'], key=f"analyze_button_{lang_code}"): + current_input = st.session_state[input_key] + if current_input: + doc = nlp_models[lang_code](current_input) + + # Análisis morfosintáctico avanzado + advanced_analysis = perform_advanced_morphosyntactic_analysis(current_input, nlp_models[lang_code]) + + # Guardar el resultado en el estado de la sesión + st.session_state.morphosyntax_result = { + 'doc': doc, + 'advanced_analysis': advanced_analysis + } + + # Mostrar resultados + display_morphosyntax_results(st.session_state.morphosyntax_result, lang_code, t) + + # Guardar resultados + if store_morphosyntax_result( + st.session_state.username, + current_input, + get_repeated_words_colors(doc), + advanced_analysis['arc_diagram'], + advanced_analysis['pos_analysis'], + advanced_analysis['morphological_analysis'], + advanced_analysis['sentence_structure'] + ): + st.success(t['success_message']) + else: + st.error(t['error_message']) + else: + st.warning(t['warning_message']) + elif 'morphosyntax_result' in st.session_state and st.session_state.morphosyntax_result is not None: + + # Si hay un resultado guardado, mostrarlo + display_morphosyntax_results(st.session_state.morphosyntax_result, lang_code, t) + else: + st.info(t['initial_message']) # Añade esta traducción a tu diccionario + +################################################################################################# +################################################################################################# +def display_morphosyntax_results(result, lang_code, t): + if result is None: + st.warning(t['no_results']) # Añade esta traducción a tu diccionario + return + + # doc = result['doc'] + # advanced_analysis = result['advanced_analysis'] + advanced_analysis = result + + # Mostrar leyenda (código existente) + st.markdown(f"##### {t['legend']}") + legend_html = "
" + for pos, color in POS_COLORS.items(): + if pos in POS_TRANSLATIONS[lang_code]: + legend_html += f"
{POS_TRANSLATIONS[lang_code][pos]}
" + legend_html += "
" + st.markdown(legend_html, unsafe_allow_html=True) + + # Mostrar análisis de palabras repetidas (código existente) + if 'repeated_words' in advanced_analysis: + with st.expander(t['repeated_words'], expanded=True): + st.markdown(advanced_analysis['repeated_words'], unsafe_allow_html=True) + + # Mostrar estructura de oraciones + if 'sentence_structure' in advanced_analysis: + with st.expander(t['sentence_structure'], expanded=True): + for i, sent_analysis in enumerate(advanced_analysis['sentence_structure']): + sentence_str = ( + f"**{t['sentence']} {i+1}** " + f"{t['root']}: {sent_analysis['root']} ({sent_analysis['root_pos']}) -- " + f"{t['subjects']}: {', '.join(sent_analysis['subjects'])} -- " + f"{t['objects']}: {', '.join(sent_analysis['objects'])} -- " + f"{t['verbs']}: {', '.join(sent_analysis['verbs'])}" + ) + st.markdown(sentence_str) + else: + st.warning("No se encontró información sobre la estructura de las oraciones.") + + + # Mostrar análisis de categorías gramaticales # Mostrar análisis morfológico + col1, col2 = st.columns(2) + + with col1: + with st.expander(t['pos_analysis'], expanded=True): + pos_df = pd.DataFrame(advanced_analysis['pos_analysis']) + + # Traducir las etiquetas POS a sus nombres en el idioma seleccionado + pos_df['pos'] = pos_df['pos'].map(lambda x: POS_TRANSLATIONS[lang_code].get(x, x)) + + # Renombrar las columnas para mayor claridad + pos_df = pos_df.rename(columns={ + 'pos': t['grammatical_category'], + 'count': t['count'], + 'percentage': t['percentage'], + 'examples': t['examples'] + }) + + # Mostrar el dataframe + st.dataframe(pos_df) + + with col2: + with st.expander(t['morphological_analysis'], expanded=True): + morph_df = pd.DataFrame(advanced_analysis['morphological_analysis']) + + # Definir el mapeo de columnas + column_mapping = { + 'text': t['word'], + 'lemma': t['lemma'], + 'pos': t['grammatical_category'], + 'dep': t['dependency'], + 'morph': t['morphology'] + } + + # Renombrar las columnas existentes + morph_df = morph_df.rename(columns={col: new_name for col, new_name in column_mapping.items() if col in morph_df.columns}) + + # Traducir las categorías gramaticales + morph_df[t['grammatical_category']] = morph_df[t['grammatical_category']].map(lambda x: POS_TRANSLATIONS[lang_code].get(x, x)) + + # Traducir las dependencias + dep_translations = { + 'es': { + 'ROOT': 'RAÍZ', 'nsubj': 'sujeto nominal', 'obj': 'objeto', 'iobj': 'objeto indirecto', + 'csubj': 'sujeto clausal', 'ccomp': 'complemento clausal', 'xcomp': 'complemento clausal abierto', + 'obl': 'oblicuo', 'vocative': 'vocativo', 'expl': 'expletivo', 'dislocated': 'dislocado', + 'advcl': 'cláusula adverbial', 'advmod': 'modificador adverbial', 'discourse': 'discurso', + 'aux': 'auxiliar', 'cop': 'cópula', 'mark': 'marcador', 'nmod': 'modificador nominal', + 'appos': 'aposición', 'nummod': 'modificador numeral', 'acl': 'cláusula adjetiva', + 'amod': 'modificador adjetival', 'det': 'determinante', 'clf': 'clasificador', + 'case': 'caso', 'conj': 'conjunción', 'cc': 'coordinante', 'fixed': 'fijo', + 'flat': 'plano', 'compound': 'compuesto', 'list': 'lista', 'parataxis': 'parataxis', + 'orphan': 'huérfano', 'goeswith': 'va con', 'reparandum': 'reparación', 'punct': 'puntuación' + }, + 'en': { + 'ROOT': 'ROOT', 'nsubj': 'nominal subject', 'obj': 'object', + 'iobj': 'indirect object', 'csubj': 'clausal subject', 'ccomp': 'clausal complement', 'xcomp': 'open clausal complement', + 'obl': 'oblique', 'vocative': 'vocative', 'expl': 'expletive', 'dislocated': 'dislocated', 'advcl': 'adverbial clause modifier', + 'advmod': 'adverbial modifier', 'discourse': 'discourse element', 'aux': 'auxiliary', 'cop': 'copula', 'mark': 'marker', + 'nmod': 'nominal modifier', 'appos': 'appositional modifier', 'nummod': 'numeric modifier', 'acl': 'clausal modifier of noun', + 'amod': 'adjectival modifier', 'det': 'determiner', 'clf': 'classifier', 'case': 'case marking', + 'conj': 'conjunct', 'cc': 'coordinating conjunction', 'fixed': 'fixed multiword expression', + 'flat': 'flat multiword expression', 'compound': 'compound', 'list': 'list', 'parataxis': 'parataxis', 'orphan': 'orphan', + 'goeswith': 'goes with', 'reparandum': 'reparandum', 'punct': 'punctuation' + }, + 'fr': { + 'ROOT': 'RACINE', 'nsubj': 'sujet nominal', 'obj': 'objet', 'iobj': 'objet indirect', + 'csubj': 'sujet phrastique', 'ccomp': 'complément phrastique', 'xcomp': 'complément phrastique ouvert', 'obl': 'oblique', + 'vocative': 'vocatif', 'expl': 'explétif', 'dislocated': 'disloqué', 'advcl': 'clause adverbiale', 'advmod': 'modifieur adverbial', + 'discourse': 'élément de discours', 'aux': 'auxiliaire', 'cop': 'copule', 'mark': 'marqueur', 'nmod': 'modifieur nominal', + 'appos': 'apposition', 'nummod': 'modifieur numéral', 'acl': 'clause relative', 'amod': 'modifieur adjectival', 'det': 'déterminant', + 'clf': 'classificateur', 'case': 'marqueur de cas', 'conj': 'conjonction', 'cc': 'coordination', 'fixed': 'expression figée', + 'flat': 'construction plate', 'compound': 'composé', 'list': 'liste', 'parataxis': 'parataxe', 'orphan': 'orphelin', + 'goeswith': 'va avec', 'reparandum': 'réparation', 'punct': 'ponctuation' + } + } + morph_df[t['dependency']] = morph_df[t['dependency']].map(lambda x: dep_translations[lang_code].get(x, x)) + + # Traducir la morfología + def translate_morph(morph_string, lang_code): + morph_translations = { + 'es': { + 'Gender': 'Género', 'Number': 'Número', 'Case': 'Caso', 'Definite': 'Definido', + 'PronType': 'Tipo de Pronombre', 'Person': 'Persona', 'Mood': 'Modo', + 'Tense': 'Tiempo', 'VerbForm': 'Forma Verbal', 'Voice': 'Voz', + 'Fem': 'Femenino', 'Masc': 'Masculino', 'Sing': 'Singular', 'Plur': 'Plural', + 'Ind': 'Indicativo', 'Sub': 'Subjuntivo', 'Imp': 'Imperativo', 'Inf': 'Infinitivo', + 'Part': 'Participio', 'Ger': 'Gerundio', 'Pres': 'Presente', 'Past': 'Pasado', + 'Fut': 'Futuro', 'Perf': 'Perfecto', 'Imp': 'Imperfecto' + }, + 'en': { + 'Gender': 'Gender', 'Number': 'Number', 'Case': 'Case', 'Definite': 'Definite', 'PronType': 'Pronoun Type', 'Person': 'Person', + 'Mood': 'Mood', 'Tense': 'Tense', 'VerbForm': 'Verb Form', 'Voice': 'Voice', + 'Fem': 'Feminine', 'Masc': 'Masculine', 'Sing': 'Singular', 'Plur': 'Plural', 'Ind': 'Indicative', + 'Sub': 'Subjunctive', 'Imp': 'Imperative', 'Inf': 'Infinitive', 'Part': 'Participle', + 'Ger': 'Gerund', 'Pres': 'Present', 'Past': 'Past', 'Fut': 'Future', 'Perf': 'Perfect', 'Imp': 'Imperfect' + }, + 'fr': { + 'Gender': 'Genre', 'Number': 'Nombre', 'Case': 'Cas', 'Definite': 'Défini', 'PronType': 'Type de Pronom', + 'Person': 'Personne', 'Mood': 'Mode', 'Tense': 'Temps', 'VerbForm': 'Forme Verbale', 'Voice': 'Voix', + 'Fem': 'Féminin', 'Masc': 'Masculin', 'Sing': 'Singulier', 'Plur': 'Pluriel', 'Ind': 'Indicatif', + 'Sub': 'Subjonctif', 'Imp': 'Impératif', 'Inf': 'Infinitif', 'Part': 'Participe', + 'Ger': 'Gérondif', 'Pres': 'Présent', 'Past': 'Passé', 'Fut': 'Futur', 'Perf': 'Parfait', 'Imp': 'Imparfait' + } + } + for key, value in morph_translations[lang_code].items(): + morph_string = morph_string.replace(key, value) + return morph_string + + morph_df[t['morphology']] = morph_df[t['morphology']].apply(lambda x: translate_morph(x, lang_code)) + + # Seleccionar y ordenar las columnas a mostrar + columns_to_display = [t['word'], t['lemma'], t['grammatical_category'], t['dependency'], t['morphology']] + columns_to_display = [col for col in columns_to_display if col in morph_df.columns] + + # Mostrar el DataFrame + st.dataframe(morph_df[columns_to_display]) + + # Mostrar diagramas de arco (código existente) + #with st.expander(t['arc_diagram'], expanded=True): + # sentences = list(doc.sents) + # arc_diagrams = [] + # for i, sent in enumerate(sentences): + # st.subheader(f"{t['sentence']} {i+1}") + # html = displacy.render(sent, style="dep", options={"distance": 100}) + # html = html.replace('height="375"', 'height="200"') + # html = re.sub(r']*>', lambda m: m.group(0).replace('height="450"', 'height="300"'), html) + # html = re.sub(r']*transform="translate\((\d+),(\d+)\)"', lambda m: f'{POS_TRANSLATIONS[lang_code][pos]}" + legend_html += "" + st.markdown(legend_html, unsafe_allow_html=True) + + # Mostrar análisis de palabras repetidas (código existente) + word_colors = get_repeated_words_colors(doc) + with st.expander(t['repeated_words'], expanded=True): + highlighted_text = highlight_repeated_words(doc, word_colors) + st.markdown(highlighted_text, unsafe_allow_html=True) + + # Mostrar estructura de oraciones + with st.expander(t['sentence_structure'], expanded=True): + for i, sent_analysis in enumerate(advanced_analysis['sentence_structure']): + sentence_str = ( + f"**{t['sentence']} {i+1}** " + f"{t['root']}: {sent_analysis['root']} ({sent_analysis['root_pos']}) -- " + f"{t['subjects']}: {', '.join(sent_analysis['subjects'])} -- " + f"{t['objects']}: {', '.join(sent_analysis['objects'])} -- " + f"{t['verbs']}: {', '.join(sent_analysis['verbs'])}" + ) + st.markdown(sentence_str) + + # Mostrar análisis de categorías gramaticales # Mostrar análisis morfológico + col1, col2 = st.columns(2) + + with col1: + with st.expander(t['pos_analysis'], expanded=True): + pos_df = pd.DataFrame(advanced_analysis['pos_analysis']) + + # Traducir las etiquetas POS a sus nombres en el idioma seleccionado + pos_df['pos'] = pos_df['pos'].map(lambda x: POS_TRANSLATIONS[lang_code].get(x, x)) + + # Renombrar las columnas para mayor claridad + pos_df = pos_df.rename(columns={ + 'pos': t['grammatical_category'], + 'count': t['count'], + 'percentage': t['percentage'], + 'examples': t['examples'] + }) + + # Mostrar el dataframe + st.dataframe(pos_df) + + with col2: + with st.expander(t['morphological_analysis'], expanded=True): + morph_df = pd.DataFrame(advanced_analysis['morphological_analysis']) + + # Definir el mapeo de columnas + column_mapping = { + 'text': t['word'], + 'lemma': t['lemma'], + 'pos': t['grammatical_category'], + 'dep': t['dependency'], + 'morph': t['morphology'] + } + + # Renombrar las columnas existentes + morph_df = morph_df.rename(columns={col: new_name for col, new_name in column_mapping.items() if col in morph_df.columns}) + + # Traducir las categorías gramaticales + morph_df[t['grammatical_category']] = morph_df[t['grammatical_category']].map(lambda x: POS_TRANSLATIONS[lang_code].get(x, x)) + + # Traducir las dependencias + dep_translations = { + 'es': { + 'ROOT': 'RAÍZ', 'nsubj': 'sujeto nominal', 'obj': 'objeto', 'iobj': 'objeto indirecto', + 'csubj': 'sujeto clausal', 'ccomp': 'complemento clausal', 'xcomp': 'complemento clausal abierto', + 'obl': 'oblicuo', 'vocative': 'vocativo', 'expl': 'expletivo', 'dislocated': 'dislocado', + 'advcl': 'cláusula adverbial', 'advmod': 'modificador adverbial', 'discourse': 'discurso', + 'aux': 'auxiliar', 'cop': 'cópula', 'mark': 'marcador', 'nmod': 'modificador nominal', + 'appos': 'aposición', 'nummod': 'modificador numeral', 'acl': 'cláusula adjetiva', + 'amod': 'modificador adjetival', 'det': 'determinante', 'clf': 'clasificador', + 'case': 'caso', 'conj': 'conjunción', 'cc': 'coordinante', 'fixed': 'fijo', + 'flat': 'plano', 'compound': 'compuesto', 'list': 'lista', 'parataxis': 'parataxis', + 'orphan': 'huérfano', 'goeswith': 'va con', 'reparandum': 'reparación', 'punct': 'puntuación' + }, + 'en': { + 'ROOT': 'ROOT', 'nsubj': 'nominal subject', 'obj': 'object', + 'iobj': 'indirect object', 'csubj': 'clausal subject', 'ccomp': 'clausal complement', 'xcomp': 'open clausal complement', + 'obl': 'oblique', 'vocative': 'vocative', 'expl': 'expletive', 'dislocated': 'dislocated', 'advcl': 'adverbial clause modifier', + 'advmod': 'adverbial modifier', 'discourse': 'discourse element', 'aux': 'auxiliary', 'cop': 'copula', 'mark': 'marker', + 'nmod': 'nominal modifier', 'appos': 'appositional modifier', 'nummod': 'numeric modifier', 'acl': 'clausal modifier of noun', + 'amod': 'adjectival modifier', 'det': 'determiner', 'clf': 'classifier', 'case': 'case marking', + 'conj': 'conjunct', 'cc': 'coordinating conjunction', 'fixed': 'fixed multiword expression', + 'flat': 'flat multiword expression', 'compound': 'compound', 'list': 'list', 'parataxis': 'parataxis', 'orphan': 'orphan', + 'goeswith': 'goes with', 'reparandum': 'reparandum', 'punct': 'punctuation' + }, + 'fr': { + 'ROOT': 'RACINE', 'nsubj': 'sujet nominal', 'obj': 'objet', 'iobj': 'objet indirect', + 'csubj': 'sujet phrastique', 'ccomp': 'complément phrastique', 'xcomp': 'complément phrastique ouvert', 'obl': 'oblique', + 'vocative': 'vocatif', 'expl': 'explétif', 'dislocated': 'disloqué', 'advcl': 'clause adverbiale', 'advmod': 'modifieur adverbial', + 'discourse': 'élément de discours', 'aux': 'auxiliaire', 'cop': 'copule', 'mark': 'marqueur', 'nmod': 'modifieur nominal', + 'appos': 'apposition', 'nummod': 'modifieur numéral', 'acl': 'clause relative', 'amod': 'modifieur adjectival', 'det': 'déterminant', + 'clf': 'classificateur', 'case': 'marqueur de cas', 'conj': 'conjonction', 'cc': 'coordination', 'fixed': 'expression figée', + 'flat': 'construction plate', 'compound': 'composé', 'list': 'liste', 'parataxis': 'parataxe', 'orphan': 'orphelin', + 'goeswith': 'va avec', 'reparandum': 'réparation', 'punct': 'ponctuation' + } + } + morph_df[t['dependency']] = morph_df[t['dependency']].map(lambda x: dep_translations[lang_code].get(x, x)) + + # Traducir la morfología + def translate_morph(morph_string, lang_code): + morph_translations = { + 'es': { + 'Gender': 'Género', 'Number': 'Número', 'Case': 'Caso', 'Definite': 'Definido', + 'PronType': 'Tipo de Pronombre', 'Person': 'Persona', 'Mood': 'Modo', + 'Tense': 'Tiempo', 'VerbForm': 'Forma Verbal', 'Voice': 'Voz', + 'Fem': 'Femenino', 'Masc': 'Masculino', 'Sing': 'Singular', 'Plur': 'Plural', + 'Ind': 'Indicativo', 'Sub': 'Subjuntivo', 'Imp': 'Imperativo', 'Inf': 'Infinitivo', + 'Part': 'Participio', 'Ger': 'Gerundio', 'Pres': 'Presente', 'Past': 'Pasado', + 'Fut': 'Futuro', 'Perf': 'Perfecto', 'Imp': 'Imperfecto' + }, + 'en': { + 'Gender': 'Gender', 'Number': 'Number', 'Case': 'Case', 'Definite': 'Definite', 'PronType': 'Pronoun Type', 'Person': 'Person', + 'Mood': 'Mood', 'Tense': 'Tense', 'VerbForm': 'Verb Form', 'Voice': 'Voice', + 'Fem': 'Feminine', 'Masc': 'Masculine', 'Sing': 'Singular', 'Plur': 'Plural', 'Ind': 'Indicative', + 'Sub': 'Subjunctive', 'Imp': 'Imperative', 'Inf': 'Infinitive', 'Part': 'Participle', + 'Ger': 'Gerund', 'Pres': 'Present', 'Past': 'Past', 'Fut': 'Future', 'Perf': 'Perfect', 'Imp': 'Imperfect' + }, + 'fr': { + 'Gender': 'Genre', 'Number': 'Nombre', 'Case': 'Cas', 'Definite': 'Défini', 'PronType': 'Type de Pronom', + 'Person': 'Personne', 'Mood': 'Mode', 'Tense': 'Temps', 'VerbForm': 'Forme Verbale', 'Voice': 'Voix', + 'Fem': 'Féminin', 'Masc': 'Masculin', 'Sing': 'Singulier', 'Plur': 'Pluriel', 'Ind': 'Indicatif', + 'Sub': 'Subjonctif', 'Imp': 'Impératif', 'Inf': 'Infinitif', 'Part': 'Participe', + 'Ger': 'Gérondif', 'Pres': 'Présent', 'Past': 'Passé', 'Fut': 'Futur', 'Perf': 'Parfait', 'Imp': 'Imparfait' + } + } + for key, value in morph_translations[lang_code].items(): + morph_string = morph_string.replace(key, value) + return morph_string + + morph_df[t['morphology']] = morph_df[t['morphology']].apply(lambda x: translate_morph(x, lang_code)) + + # Seleccionar y ordenar las columnas a mostrar + columns_to_display = [t['word'], t['lemma'], t['grammatical_category'], t['dependency'], t['morphology']] + columns_to_display = [col for col in columns_to_display if col in morph_df.columns] + + # Mostrar el DataFrame + st.dataframe(morph_df[columns_to_display]) + + # Mostrar diagramas de arco (código existente) + with st.expander(t['arc_diagram'], expanded=True): + sentences = list(doc.sents) + arc_diagrams = [] + for i, sent in enumerate(sentences): + st.subheader(f"{t['sentence']} {i+1}") + html = displacy.render(sent, style="dep", options={"distance": 100}) + html = html.replace('height="375"', 'height="200"') + html = re.sub(r']*>', lambda m: m.group(0).replace('height="450"', 'height="300"'), html) + html = re.sub(r']*transform="translate\((\d+),(\d+)\)"', lambda m: f'window.scrollTo(0,document.body.scrollHeight);', unsafe_allow_html=True) + +###################################################### +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/modules/utils/__init__.py b/modules/utils/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/modules/utils/__pycache__/__init__.cpython-311.pyc b/modules/utils/__pycache__/__init__.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9b51589b9cec07021967714049af5e2b3eb0779c Binary files /dev/null and b/modules/utils/__pycache__/__init__.cpython-311.pyc differ diff --git a/modules/utils/__pycache__/spacy_utils.cpython-311.pyc b/modules/utils/__pycache__/spacy_utils.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f9acf7db1ebe2e977bd8d44a8b8442e5f4add28f Binary files /dev/null and b/modules/utils/__pycache__/spacy_utils.cpython-311.pyc differ diff --git a/modules/utils/__pycache__/svg_to_png_converter.cpython-311.pyc b/modules/utils/__pycache__/svg_to_png_converter.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d6c81622cb038ba7575d796656a4a1da92a2b2cc Binary files /dev/null and b/modules/utils/__pycache__/svg_to_png_converter.cpython-311.pyc differ diff --git a/modules/utils/__pycache__/widget_utils.cpython-311.pyc b/modules/utils/__pycache__/widget_utils.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2f86da5dc8aa5d728adc48c4cbb90af05ed1cc12 Binary files /dev/null and b/modules/utils/__pycache__/widget_utils.cpython-311.pyc differ diff --git a/modules/utils/export_utils.py b/modules/utils/export_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..9c9ea8feefe24680e643b77a7fd3de1f2afc4028 --- /dev/null +++ b/modules/utils/export_utils.py @@ -0,0 +1,70 @@ +import streamlit as st +from reportlab.lib.pagesizes import letter +from reportlab.pdfgen import canvas +from docx import Document +import io + +def export_data(user_data, t, format='pdf'): + if format == 'pdf': + return export_to_pdf(user_data, t) + elif format == 'docx': + return export_to_docx(user_data, t) + else: + raise ValueError(f"Unsupported format: {format}") + +def export_to_pdf(user_data, t): + buffer = io.BytesIO() + c = canvas.Canvas(buffer, pagesize=letter) + width, height = letter + + # Título + c.setFont("Helvetica-Bold", 16) + c.drawString(50, height - 50, t['analysis_report']) + + # Resumen + c.setFont("Helvetica", 12) + c.drawString(50, height - 80, f"{t['morpho_analyses']}: {len(user_data['morphosyntax_analyses'])}") + c.drawString(50, height - 100, f"{t['semantic_analyses']}: {len(user_data['semantic_analyses'])}") + c.drawString(50, height - 120, f"{t['discourse_analyses']}: {len(user_data['discourse_analyses'])}") + + # Aquí agregarías más detalles de los análisis... + + c.save() + buffer.seek(0) + return buffer + +def export_to_docx(user_data, t): + doc = Document() + doc.add_heading(t['analysis_report'], 0) + + doc.add_paragraph(f"{t['morpho_analyses']}: {len(user_data['morphosyntax_analyses'])}") + doc.add_paragraph(f"{t['semantic_analyses']}: {len(user_data['semantic_analyses'])}") + doc.add_paragraph(f"{t['discourse_analyses']}: {len(user_data['discourse_analyses'])}") + + # Aquí agregarías más detalles de los análisis... + + buffer = io.BytesIO() + doc.save(buffer) + buffer.seek(0) + return buffer + +def display_export_options(t): + format = st.radio(t['select_export_format'], ['PDF', 'DOCX']) + if st.button(t['export']): + user_data = st.session_state.user_data + if format == 'PDF': + buffer = export_data(user_data, t, format='pdf') + st.download_button( + label=t['download_pdf'], + data=buffer, + file_name="analysis_report.pdf", + mime="application/pdf" + ) + elif format == 'DOCX': + buffer = export_data(user_data, t, format='docx') + st.download_button( + label=t['download_docx'], + data=buffer, + file_name="analysis_report.docx", + mime="application/vnd.openxmlformats-officedocument.wordprocessingml.document" + ) \ No newline at end of file diff --git a/modules/utils/spacy_utils.py b/modules/utils/spacy_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..c1a52ebea7e62eadbbc48c53ed3a6f20021cfa9d --- /dev/null +++ b/modules/utils/spacy_utils.py @@ -0,0 +1,9 @@ +# modules/spacy_utils.py +import spacy + +def load_spacy_models(): + return { + 'es': spacy.load("es_core_news_lg"), + 'en': spacy.load("en_core_web_lg"), + 'fr': spacy.load("fr_core_news_lg") + } \ No newline at end of file diff --git a/modules/utils/svg_to_png_converter.py b/modules/utils/svg_to_png_converter.py new file mode 100644 index 0000000000000000000000000000000000000000..14faf269b1299501090f8ea04563982d30347af7 --- /dev/null +++ b/modules/utils/svg_to_png_converter.py @@ -0,0 +1,51 @@ +import io +from svglib.svglib import svg2rlg +from reportlab.graphics import renderPM +from pymongo import MongoClient +import base64 + +# Asume que tienes una función para obtener la conexión a MongoDB +from ..database.mongo_db import get_mongodb + +def convert_svg_to_png(svg_string): + """Convierte una cadena SVG a una imagen PNG.""" + drawing = svg2rlg(io.BytesIO(svg_string.encode('utf-8'))) + png_bio = io.BytesIO() + renderPM.drawToFile(drawing, png_bio, fmt="PNG") + return png_bio.getvalue() + +def save_png_to_database(username, analysis_id, png_data): + """Guarda la imagen PNG en la base de datos.""" + client = get_mongodb() + db = client['aideatext_db'] # Asegúrate de usar el nombre correcto de tu base de datos + collection = db['png_diagrams'] + + png_base64 = base64.b64encode(png_data).decode('utf-8') + + document = { + 'username': username, + 'analysis_id': analysis_id, + 'png_data': png_base64 + } + + result = collection.insert_one(document) + return result.inserted_id + +def process_and_save_svg_diagrams(username, analysis_id, svg_diagrams): + """Procesa una lista de diagramas SVG, los convierte a PNG y los guarda en la base de datos.""" + png_ids = [] + for svg in svg_diagrams: + png_data = convert_svg_to_png(svg) + png_id = save_png_to_database(username, analysis_id, png_data) + png_ids.append(png_id) + return png_ids + +# Función para recuperar PNGs de la base de datos +def get_png_diagrams(username, analysis_id): + """Recupera los diagramas PNG de la base de datos para un análisis específico.""" + client = get_mongodb() + db = client['aideatext_db'] + collection = db['png_diagrams'] + + diagrams = collection.find({'username': username, 'analysis_id': analysis_id}) + return [base64.b64decode(doc['png_data']) for doc in diagrams] \ No newline at end of file diff --git a/modules/utils/txt.txt b/modules/utils/txt.txt new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/modules/utils/widget_utils.py b/modules/utils/widget_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..bc4d09e817ebfc82da66c36c4c64dd6d76038b97 --- /dev/null +++ b/modules/utils/widget_utils.py @@ -0,0 +1,6 @@ +import streamlit as st + +def generate_unique_key(module_name, element_type="input", username=None): + # Si el nombre de usuario no se pasa explícitamente, lo toma de session_state + username = username or st.session_state.username + return f"{module_name}_{element_type}_{username}" \ No newline at end of file diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..7f480640d65cb5e283082cb4cb79c04b16fbf5eb --- /dev/null +++ b/requirements.txt @@ -0,0 +1,50 @@ +anthropic +azure-identity +azure-cosmos +antiword +bcrypt +certifi +cairosvg +python-dotenv +drawSvg +docx2txt +#https://huggingface.co/spacy/es_core_news_lg/resolve/main/es_core_news_lg-any-py3-none-any.whl +#https://huggingface.co/spacy/en_core_web_lg/resolve/main/en_core_web_lg-any-py3-none-any.whl +#https://huggingface.co/spacy/fr_core_news_lg/resolve/main/fr_core_news_lg-any-py3-none-any.whl +es-core-news-lg @ https://github.com/explosion/spacy-models/releases/download/es_core_news_lg-3.5.0/es_core_news_lg-3.5.0-py3-none-any.whl +en-core-web-lg @ https://github.com/explosion/spacy-models/releases/download/en_core_web_lg-3.5.0/en_core_web_lg-3.5.0-py3-none-any.whl +fr-core-news-lg @ https://github.com/explosion/spacy-models/releases/download/fr_core_news_lg-3.5.0/fr_core_news_lg-3.5.0-py3-none-any.whl +numpy +networkx +matplotlib +odfpy +plotly +pydantic +python-dateutil +pandas +python-docx +pywin32 +pymssql +python-dotenv +pymongo +PyPDF2 +rlPyCairo +requests +reportlab +spacy==3.5.4 +seaborn +squarify +streamlit==1.38.0 +streamlit-float +streamlit-player +streamlit-chat +streamlit-antd-components +streamlit-option-menu +scipy +sentencepiece +scikit-learn +svglib +transformers +torch +tqdm +thinc \ No newline at end of file diff --git a/run_app.py b/run_app.py new file mode 100644 index 0000000000000000000000000000000000000000..6189f8dd3ed3a310ea26102ab83d264a72e54a90 --- /dev/null +++ b/run_app.py @@ -0,0 +1,7 @@ +import subprocess +import sys +from setup_logging import setup_logging + +if __name__ == "__main__": + setup_logging() + subprocess.run([sys.executable, "-m", "streamlit", "run", "app.py", "--server.runOnSave=true"]) \ No newline at end of file diff --git a/translations/__init__.py b/translations/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..517b1a75adacf334d015e71ba42fe57630097200 --- /dev/null +++ b/translations/__init__.py @@ -0,0 +1,35 @@ +# translations/__init__.py +import logging +from importlib import import_module + +logger = logging.getLogger(__name__) + +def get_translations(lang_code): + # Asegurarse de que lang_code sea válido + if lang_code not in ['es', 'en', 'fr']: + print(f"Invalid lang_code: {lang_code}. Defaulting to 'es'") + lang_code = 'es' + + try: + # Importar dinámicamente el módulo de traducción + translation_module = import_module(f'.{lang_code}', package='translations') + translations = getattr(translation_module, 'TRANSLATIONS', {}) + except ImportError: + logger.warning(f"Translation module for {lang_code} not found. Falling back to English.") + # Importar el módulo de inglés como fallback + translation_module = import_module('.en', package='translations') + translations = getattr(translation_module, 'TRANSLATIONS', {}) + + def get_text(key, section='COMMON', default=''): + return translations.get(section, {}).get(key, default) + + return { + 'get_text': get_text, + **translations.get('COMMON', {}), + **translations.get('TABS', {}), + **translations.get('MORPHOSYNTACTIC', {}), + **translations.get('SEMANTIC', {}), + **translations.get('DISCOURSE', {}), + **translations.get('ACTIVITIES', {}), + **translations.get('FEEDBACK', {}) + } \ No newline at end of file diff --git a/translations/__pycache__/__init__.cpython-311.pyc b/translations/__pycache__/__init__.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5a3cb0fbded72c1fd91b4fb643f57b18ff072536 Binary files /dev/null and b/translations/__pycache__/__init__.cpython-311.pyc differ diff --git a/translations/__pycache__/en.cpython-311.pyc b/translations/__pycache__/en.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..eb946b6c97b7cde61d91326601c845d99f33597e Binary files /dev/null and b/translations/__pycache__/en.cpython-311.pyc differ diff --git a/translations/__pycache__/es.cpython-311.pyc b/translations/__pycache__/es.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e3b0a733ca2568cd90b0510740556861917a674d Binary files /dev/null and b/translations/__pycache__/es.cpython-311.pyc differ diff --git a/translations/__pycache__/fr.cpython-311.pyc b/translations/__pycache__/fr.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a0f66470dadd4d8ddb1428de937a915471d5f942 Binary files /dev/null and b/translations/__pycache__/fr.cpython-311.pyc differ diff --git a/translations/en.py b/translations/en.py new file mode 100644 index 0000000000000000000000000000000000000000..9d6ac5f87cf5ec309fe462b6d15fa929b766ab01 --- /dev/null +++ b/translations/en.py @@ -0,0 +1,282 @@ +# translations/en.py + +COMMON = { + # A + 'activities_message':"Activities messages", + 'activities_placeholder':"Activities placeholder", + 'analysis_placeholder':"Analysis placeholder", + 'analyze_button' : "Analyze", + 'analysis_types_chart' : "Analyze type chart", + 'analysis_from': "Analysis carried out on", + # C + 'chat_title': "Analysis Chat", + 'export_button': "Export Current Analysis", + 'export_success': "Analysis and chat exported successfully.", + 'export_error': "There was a problem exporting the analysis and chat.", + 'get_text': "Get text.", + 'hello': "Hello", + # L + 'logout': "End session.", + 'loading_data': "Loading data", + 'load_selected_file': 'Load selected file', + # N + 'no_analysis': "No analysis available. Use the chat to perform an analysis.", + 'nothing_to_export': "No analysis or chat to export.", + 'results_title': "Analysis Results", + 'select_language': "Language select", + 'student_activities':"Student activities", + # T + 'total_analyses': "Total analyses", + # W + 'welcome': "Welcome to AIdeaText" + +} + +TABS = { + 'morpho_tab': "Morphosyntactic Analysis", + 'semantic_tab': "Semantic Analysis", + 'discourse_tab': "Discourse Analysis", + 'activities_tab': "My Activities", + 'feedback_tab': "Feedback Form" +} + +MORPHOSYNTACTIC = { + #A + 'analyze_button': "Analyze text", + 'arc_diagram': "Syntactic analysis: Arc diagram", + #C + 'count': "Count", + #D + 'dependency': "Dependency", + 'dep': "Dependency", + #E + 'error_message': "There was a problem saving the analysis. Please try again.", + 'examples': "Examples", + #G + 'grammatical_category': "Grammatical category", + #L + 'lemma': "Lemma", + 'legend': "Legend: Grammatical categories", + #O + 'objects': "Objects", + #P + 'pos_analysis': "Part of Speech Analysis", + 'percentage': "Percentage", + #N + 'no_results': "No results available. Please perform an analysis first.", + #M + 'morpho_title': "AIdeaText - Morphological analysis ", + 'morpho_initial_message': "This is a general purpose chatbot, but it has a specific function for visual text analysis: generating arc diagrams. To produce them, enter the following command /analisis_morfosintactico [followed by brackets inside which you must place the text you want to analyze]", + 'morpho_input_label': "Enter a text to analyze (max 30 words):", + 'morphosyntactic_analysis_completed': 'Morphosyntactic analysis completed. Please review the results in the following section.', + 'morphological_analysis': "Morphological Analysis", + 'morphology': "Morphology", + 'morph': "Morphology", + #R + 'root': "Root", + 'repeated_words': "Repeated words", + #S + 'sentence': "Sentence", + 'success_message': "Analysis saved successfully.", + 'sentence_structure': "Sentence Structure", + 'subjects': "Subjects", + #V + 'verbs': "Verbs", + #T + 'title': "AIdeaText - Morphological and Syntactic Analysis", + 'tag': "Tag", + #W + 'warning_message': "Please enter a text to analyze.", + 'word': "Word", + 'processing': 'Processing...', + 'error_processing': 'Error processing', + 'morphosyntactic_analysis_error': 'Error in morphosyntactic analysis', + 'morphosyntactic_analysis_completed': 'Morphosyntactic analysis completed' +} + +SEMANTIC = { + # A + 'analysis_completed': "Analysis completed", + 'analysis_section': "Semantic Analysis", + 'analyze_document': 'Analyze document', + 'analysis_saved_success': 'Analysis saved successfully', + 'analysis_save_error': 'Error saving the analysis', + 'analyze_button': "Analyze text", + 'analyzing_doc': "Analyzing document", + # C + 'chat_title': "Semantic Analysis Chat", + 'chat_placeholder': "Ask a question or use a command (/summary, /entities, /sentiment, /topics, /concept_graph, /entity_graph, /topic_graph)", + 'clear_chat': "Clear chat", + 'conceptual_relations': "Conceptual Relations", + # D + 'delete_file': "Delete file", + # E + 'error_message': "There was a problem saving the semantic analysis. Please try again.", + # F + 'file_uploader': "Or upload a text file", + 'file_upload_success': "File uploaded and saved successfully", + 'file_upload_error': 'Error uploading file', + 'file_section': "Files", + 'file_loaded_success': "File loaded successfully", + 'file_load_error': "Error loading file", + 'file_upload_error': "Error uploading and saving file", + 'file_deleted_success': 'File deleted successfully', + 'file_delete_error': 'Error deleting file', + # G + 'graph_title': "Semantic Analysis Visualization", + # I + 'identified_entities': "Identified Entities", + # K + 'key_concepts': "Key Concepts", + # N + 'no_analysis': "No analysis available. Please upload or select a file.", + 'no_results': "No results available. Please perform an analysis first.", + 'no_file': "Please upload a file to start the analysis.", + 'no_file_selected': "Please select an archive to start the analysis.", + # S + 'semantic_title': "Semantic Analysis", + 'semantic_initial_message': "This is a general-purpose chatbot, but it has a specific function for visual text analysis: it generates a graph with the main entities of the text. To produce it, enter a text file in txt, pdf, doc, docx or odt format and press the 'analyze file' button. After generating the graph, you can interact with the chat based on the document.", + 'send_button': "Send", + 'select_saved_file': "Select saved file", + 'success_message': "Semantic analysis saved successfully.", + # T + 'text_input_label': "Enter a text to analyze (max. 5,000 words):", + 'text_input_placeholder': "The purpose of this application is to improve your writing skills...", + 'title': "AIdeaText - Semantic Analysis", + # U + 'upload_file': "Upload file", + # W + 'warning_message': "Please enter a text or upload a file to analyze." +} + +DISCOURSE = { + 'discourse_title': "AIdeaText - Discourse Analysis", + 'file_uploader1': "Upload text file 1 (Pattern)", + 'file_uploader2': "Upload text file 2 (Comparison)", + 'discourse_initial_message': "This is a general purpose chatbot, but it has a specific function for visual text analysis: it generates two graphs with the main entities of each file to make a comparison between both texts. To produce it, enter one file first and then another in txt, pdf, doc, docx or odt format and press the 'analyze file' button. After the graph is generated, you can interact with the chat based on the document.", + 'analyze_button': "Analyze texts", + 'comparison': "Comparison of Semantic Relations", + 'success_message': "Discourse analysis saved successfully.", + 'error_message': "There was a problem saving the discourse analysis. Please try again.", + 'warning_message': "Please upload both files to analyze.", + 'no_results': "No results available. Please perform an analysis first.", + 'key_concepts': "Key Concepts", + 'graph_not_available': "The graph is not available.", + 'concepts_not_available': "Key concepts are not available.", + 'comparison_not_available': "The comparison is not available.", + 'warning_message': "Please enter a text or upload a file to analyze.", + 'morphosyntax_history': "Morphosyntax history", + 'analysis_of': "Analysis of" + +} + +ACTIVITIES = { + 'analysis_types_chart_title': "Types of analyses performed", + 'analysis_types_chart_x': "Analysis type", + 'analysis_types_chart_y': "Count", + 'analysis_from': "Analysis from", + 'assistant': "Assistant", + 'activities_summary': "Activities and Progress Summary", + 'chat_history_expander': "Chat History", + 'chat_from': "Chat from", + 'combined_graph': "Combined Graph", + 'conceptual_relations_graph': "Conceptual Relations Graph", + 'conversation': "Conversation", + 'discourse_analyses_expander': "Discourse Analyses History", + 'discourse_analyses': "Discourse Analyses", + 'discourse_history': "Discourse Analysis History", + 'document': "Document", + 'data_load_error': "Error loading student data", + 'graph_display_error': "Could not display the graph", + 'graph_doc1': "Graph document 1", + 'graph_doc2': "Graph document 2", + 'key_concepts': "Key concepts", + 'loading_data': "Loading student data...", + 'morphological_analysis': "Morphological Analysis", + 'morphosyntax_analyses_expander': "Morphosyntactic Analyses History", + 'morphosyntax_history': "Morphosyntactic Analysis History", + 'no_arc_diagram': "No arc diagram found for this analysis.", + 'no_chat_history': "No conversations with the ChatBot were found.", + 'no_data_warning': "No analysis data found for this student.", + 'progress_of': "Progress of", + 'semantic_analyses': "Semantic Analyses", + 'semantic_analyses_expander': "Semantic Analyses History", + 'semantic_history': "Semantic Analysis History", + 'show_debug_data': "Show debug data", + 'student_debug_data': "Student data (for debugging):", + 'summary_title': "Activities Summary", + 'title': "Activities", + 'timestamp': "Timestamp", + 'total_analyses': "Total analyses performed:", + 'try_analysis': "Try performing some text analyses first.", + 'user': "User" +} + +FEEDBACK = { + 'email': "Email", + 'feedback': "Feedback", + 'feedback_title': "Feedback form", + 'feedback_error': "There was a problem submitting the form. Please try again.", + 'feedback_success': "Thank for your feedback", + 'complete_all_fields': "Please, complete all fields", + 'name': "Name", + 'submit': "Submit" +} + + +TEXT_TYPES = { + 'descriptive': [ + 'What are you describing?', + 'What are its main characteristics?', + 'How does it look, sound, smell, or feel?', + 'What makes it unique or special?' + ], + 'narrative': [ + 'Who is the protagonist?', + 'Where and when does the story take place?', + 'What event starts the action?', + 'What happens next?', + 'How does the story end?' + ], + 'expository': [ + 'What is the main topic?', + 'What important aspects do you want to explain?', + 'Can you provide examples or data to support your explanation?', + 'How does this topic relate to other concepts?' + ], + 'argumentative': [ + 'What is your main thesis or argument?', + 'What are your supporting arguments?', + 'What evidence do you have to back up your arguments?', + 'What are the counterarguments and how do you refute them?', + 'What is your conclusion?' + ], + 'instructive': [ + 'What task or process are you explaining?', + 'What materials or tools are needed?', + 'What are the steps to follow?', + 'Are there any important precautions or tips to mention?' + ], + 'pitch': [ + 'What?', + 'What for?', + 'For whom?', + 'How?' + ] + } + +# Configuration of the language model for English +NLP_MODEL = 'en_core_web_lg' + +# Esta línea es crucial: +TRANSLATIONS = { + 'COMMON': COMMON, + 'TABS': TABS, + 'MORPHOSYNTACTIC': MORPHOSYNTACTIC, + 'SEMANTIC': SEMANTIC, + 'DISCOURSE': DISCOURSE, + 'ACTIVITIES': ACTIVITIES, + 'FEEDBACK': FEEDBACK, + 'TEXT_TYPES': TEXT_TYPES, + 'NLP_MODEL': NLP_MODEL +} \ No newline at end of file diff --git a/translations/es.py b/translations/es.py new file mode 100644 index 0000000000000000000000000000000000000000..44753fc4cfa7a08ef14463a1518c2e8ca245bc81 --- /dev/null +++ b/translations/es.py @@ -0,0 +1,259 @@ +# translations/es.py + +COMMON = { + 'analyze_button' : "Analizar", + 'activities_message':"Mensajes de las actividades", + 'activities_placeholder':"Espacio de las actividades", + 'analysis_placeholder':"Marcador de posición del análisis", + 'analysis_types_chart' : "Gráfico para el tipo de análisis", + 'analysis_from': "Análisis realizado el ", + 'welcome': "Bienvenido a AIdeaText", + 'select_language': "Selecciona un idioma", + 'hello': "Hola", + 'chat_title': "Chat de Análisis", + 'results_title': "Resultados del Análisis", + 'export_button': "Exportar Análisis Actual", + 'no_analysis': "No hay análisis disponible. Utiliza el chat para realizar un análisis.", + 'export_success': "Análisis y chat exportados correctamente.", + 'export_error': "Hubo un problema al exportar el análisis y el chat.", + 'nothing_to_export': "No hay análisis o chat para exportar.", + # L + 'loading_data': "Cargando datos", + 'load_selected_file': 'Load selected file', + 'logout': "Cerrar sesión.", + # S + 'student_activities':"Actividades del estudiante", + 'get_text': "Obtener texto.", + #T + 'total_analyses': "Analisis total" +} + +TABS = { + 'morpho_tab': "Análisis Morfosintáctico", + 'semantic_tab': "Análisis Semántico", + 'discourse_tab': "Análisis del Discurso", + 'activities_tab': "Mis Actividades", + 'feedback_tab': "Formulario de Opinión" +} + +MORPHOSYNTACTIC = { + 'morpho_title': "AIdeaText - Análisis morfológico y sintáctico", + 'morpho_initial_message': "Este es un chatbot de propósito general, pero tiene una función específica para el análisis visual de textos: generación de diagramas de arco. Para producirlos, ingrese el siguiente comando /analisis_morfosintactico [seguido de corchetes dentro de los cuales debe colocar el texto que quiere analizar]", + 'morpho_input_label': "Ingrese un texto para analizar (máximo 30 palabras)", + 'morphosyntactic_analysis_completed': "Análisis morfosintáctico completado. Por favor, revisa los resultados en la siguiente sección.", + 'analyze_button': "Analizar texto", + 'repeated_words': "Palabras repetidas", + 'legend': "Leyenda: Categorías gramaticales", + 'arc_diagram': "Análisis sintáctico: Diagrama de arco", + 'sentence': "Oración", + 'success_message': "Análisis guardado correctamente.", + 'error_message': "Hubo un problema al guardar el análisis. Por favor, inténtelo de nuevo.", + 'warning_message': "Por favor, ingrese un texto para analizar.", + 'no_results': "No hay resultados disponibles. Por favor, realice un análisis primero.", + 'pos_analysis': "Análisis de categorías gramaticales", + 'morphological_analysis': "Análisis morfológico", + 'sentence_structure': "Estructura de oraciones", + 'word': "Palabra", + 'count': "Cantidad", + 'percentage': "Porcentaje", + 'examples': "Ejemplos", + 'lemma': "Lema", + 'tag': "Etiqueta", + 'dep': "Dependencia", + 'morph': "Morfología", + 'root': "Raíz", + 'subjects': "Sujetos", + 'objects': "Objetos", + 'verbs': "Verbos", + 'grammatical_category': "Categoría gramatical", + 'dependency': "Dependencia", + 'morphology': "Morfología", + 'processing': 'Procesando...', + 'error_processing': 'Error al procesar', + 'morphosyntactic_analysis_error': 'Error en el análisis morfosintáctico', + 'morphosyntactic_analysis_completed': 'Análisis morfosintáctico completado' +} + +SEMANTIC = { + # A + 'analysis_completed': "Análisis terminado", + 'analysis_section': "Análisis Semántico", + 'analyze_document': 'Analizar documento', + 'analysis_saved_success': 'Análisis guardado con éxito', + 'analysis_save_error': 'Error al guardar el análisis', + 'analyze_button': "Analizar texto", + 'analyzing_doc': "Analizando documento", + # C + 'chat_title': "Chat de Análisis Semántico", + 'chat_placeholder': "Haz una pregunta o usa un comando (/resumen, /entidades, /sentimiento, /temas, /grafo_conceptos, /grafo_entidades, /grafo_temas)", + 'clear_chat': "Limpiar chat", + 'conceptual_relations': "Relaciones Conceptuales", + # D + 'delete_file': "Borrar archivo", + # E + 'error_message': "Hubo un problema al guardar el análisis semántico. Por favor, inténtelo de nuevo.", + # F + 'file_uploader': "O cargue un archivo de texto", + 'file_upload_success': "Archivo subido y guardado exitosamente", + 'file_upload_error': 'Error al cargar el archivo', + 'file_section': "Archivos", + 'file_loaded_success': "Archivo cargado exitosamente", + 'file_load_error': "Error al cargar el archivo", + 'file_upload_error': "Error al subir y guardar el archivo", + 'file_deleted_success': 'Archivo borrado con éxito', + 'file_delete_error': 'Error al borrar el archivo', + # G + 'graph_title': "Visualización de Análisis Semántico", + # I + 'identified_entities': "Entidades Identificadas", + # K + 'key_concepts': "Conceptos Clave", + # L + 'load_selected_file': 'Cargar archivo seleccionado', + # N + 'no_analysis': "No hay análisis disponible. Por favor, cargue o seleccione un archivo.", + 'no_results': "No hay resultados disponibles. Por favor, realice un análisis primero.", + 'no_file': "Por favor, cargue un archivo para comenzar el análisis.", + 'no_file_selected': "Por favor, seleccione un archivo para comenzar el análisis.", + # S + 'semantic_initial_message': "Este es un chatbot de propósito general, pero tiene una función específica para el análisis visual de textos: genera un grafo con las principales entidades del texto. Para producirlo, ingrese un archivo de texto en formato txt, pdf, doc, docx o odt y pulse el botón 'analizar archivo'. Después de la generación del grafo puede interactuar con el chat en función del documento.", + 'semantic_title': "Análisis Semántico", + 'send_button': "Enviar", + 'select_saved_file': "Seleccionar archivo guardado", + 'success_message': "Análisis semántico guardado correctamente.", + # T + 'text_input_label': "Ingrese un texto para analizar (máx. 5,000 palabras):", + 'text_input_placeholder': "El objetivo de esta aplicación es que mejore sus habilidades de redacción...", + 'title': "AIdeaText - Análisis semántico", + # U + 'upload_file': "Agregar un archivo", + # W + 'warning_message': "Por favor, ingrese un texto o cargue un archivo para analizar." +} + +DISCOURSE = { + 'discourse_title': "AIdeaText - Análisis del discurso", + 'discourse_initial_message': "Este es un chatbot de propósito general, pero tiene una función específica para el análisis visual de textos: genera dos grafos con las principales entidades de cada archivo para hacer una comparación entre ambos textos. Para producirlo, ingrese un archivo primero y otro después en formato txt, pdf, doc, docx o odt y pulse el botón 'analizar archivo'. Después de la generación del grafo puede interactuar con el chat en función del documento.", + 'file_uploader1': "Cargar archivo de texto 1 (Patrón)", + 'file_uploader2': "Cargar archivo de texto 2 (Comparación)", + 'analyze_button': "Analizar textos", + 'comparison': "Comparación de Relaciones Semánticas", + 'success_message': "Análisis del discurso guardado correctamente.", + 'error_message': "Hubo un problema al guardar el análisis del discurso. Por favor, inténtelo de nuevo.", + 'warning_message': "Por favor, cargue ambos archivos para analizar.", + 'no_results': "No hay resultados disponibles. Por favor, realice un análisis primero.", + 'key_concepts': "Conceptos Clave", + 'graph_not_available': "El gráfico no está disponible.", + 'concepts_not_available': "Los conceptos clave no están disponibles.", + 'comparison_not_available': "La comparación no está disponible." +} + +ACTIVITIES = { + 'analysis_types_chart_title': "Tipos de análisis realizados", + 'analysis_types_chart_x': "Tipo de análisis", + 'analysis_types_chart_y': "Cantidad", + 'analysis_from': "Análisis del", + 'assistant': "Asistente", + 'activities_summary': "Resumen de Actividades y Progreso", + 'chat_history_expander': "Historial de Chat", + 'chat_from': "Chat del", + 'combined_graph': "Gráfico combinado", + 'conceptual_relations_graph': "Gráfico de relaciones conceptuales", + 'conversation': "Conversación", + 'discourse_analyses_expander': "Historial de Análisis del Discurso", + 'discourse_analyses': "Análisis del Discurso", + 'discourse_history': "Histórico de Análisis del Discurso", + 'document': "Documento", + 'data_load_error': "Error al cargar los datos del estudiante", + 'graph_display_error': "No se pudo mostrar el gráfico", + 'graph_doc1': "Gráfico documento 1", + 'graph_doc2': "Gráfico documento 2", + 'key_concepts': "Conceptos clave", + 'loading_data': "Cargando datos del estudiante...", + 'morphological_analysis': "Análisis Morfológico", + 'morphosyntax_analyses_expander': "Historial de Análisis Morfosintácticos", + 'morphosyntax_history': "Histórico de Análisis Morfosintácticos", + 'no_arc_diagram': "No se encontró diagrama de arco para este análisis.", + 'no_chat_history': "No se encontraron conversaciones con el ChatBot.", + 'no_data_warning': "No se encontraron datos de análisis para este estudiante.", + 'progress_of': "Progreso de", + 'semantic_analyses': "Análisis Semánticos", + 'semantic_analyses_expander': "Historial de Análisis Semánticos", + 'semantic_history': "Histórico de Análisis Semánticos", + 'show_debug_data': "Mostrar datos de depuración", + 'student_debug_data': "Datos del estudiante (para depuración):", + 'summary_title': "Resumen de Actividades", + 'title': "Actividades", + 'timestamp': "Fecha y hora", + 'total_analyses': "Total de análisis realizados:", + 'try_analysis': "Intenta realizar algunos análisis de texto primero.", + 'user': "Usuario" +} + +FEEDBACK = { + 'email': "Correo electrónico", + 'feedback': "Retroalimentación", + 'feedback_title': "Formulario de opinión", + 'feedback_error': "Hubo un problema al enviar el formulario. Por favor, intenta de nuevo.", + 'feedback_success': "Gracias por tu respuesta", + 'complete_all_fields': "Por favor, completa todos los campos", + 'name': "Nombre", + 'submit': "Enviar" +} + +TEXT_TYPES = { + 'descriptivo': [ + '¿Qué estás describiendo?', + '¿Cuáles son sus características principales?', + '¿Cómo se ve, suena, huele o se siente?', + '¿Qué lo hace único o especial?' + ], + 'narrativo': [ + '¿Quién es el protagonista?', + '¿Dónde y cuándo ocurre la historia?', + '¿Qué evento inicia la acción?', + '¿Qué sucede después?', + '¿Cómo termina la historia?' + ], + 'expositivo': [ + '¿Cuál es el tema principal?', + '¿Qué aspectos importantes quieres explicar?', + '¿Puedes dar ejemplos o datos que apoyen tu explicación?', + '¿Cómo se relaciona este tema con otros conceptos?' + ], + 'argumentativo': [ + '¿Cuál es tu tesis o argumento principal?', + '¿Cuáles son tus argumentos de apoyo?', + '¿Qué evidencias tienes para respaldar tus argumentos?', + '¿Cuáles son los contraargumentos y cómo los refutas?', + '¿Cuál es tu conclusión?' + ], + 'instructivo': [ + '¿Qué tarea o proceso estás explicando?', + '¿Qué materiales o herramientas se necesitan?', + '¿Cuáles son los pasos a seguir?', + '¿Hay precauciones o consejos importantes que mencionar?' + ], + 'pitch': [ + '¿Qué?', + '¿Para qué?', + '¿Para quién?', + '¿Cómo?' + ] + } + +# Configuración del modelo de lenguaje para español +NLP_MODEL = 'es_core_news_lg' + +# Esta línea es crucial: +TRANSLATIONS = { + 'COMMON': COMMON, + 'TABS': TABS, + 'MORPHOSYNTACTIC': MORPHOSYNTACTIC, + 'SEMANTIC': SEMANTIC, + 'DISCOURSE': DISCOURSE, + 'ACTIVITIES': ACTIVITIES, + 'FEEDBACK': FEEDBACK, + 'TEXT_TYPES': TEXT_TYPES, + 'NLP_MODEL': NLP_MODEL +} \ No newline at end of file diff --git a/translations/fr.py b/translations/fr.py new file mode 100644 index 0000000000000000000000000000000000000000..d2b967d0963f34a4bda186b809eef2fc800eeef2 --- /dev/null +++ b/translations/fr.py @@ -0,0 +1,261 @@ +# translations/fr.py + +COMMON = { + 'activities_message':"Messages d'activités", + 'activities_placeholder':"Espace réservé aux activités", + 'analyze_button' : "Analyser", + 'analysis_placeholder':"Espace réservé à l'analyse", + 'analysis_types_chart' : "Graphique pour le type d'analyse", + 'analysis_from': "Analyse réalisée sur", + 'welcome': "Bienvenue à AIdeaText", + 'select_language': "Sélectionner la langue", + 'hello': "Bonjour", + 'chat_title': "Chat d'Analyse", + 'results_title': "Résultats de l'Analyse", + 'export_button': "Exporter l'Analyse Actuelle", + 'no_analysis': "Aucune analyse disponible. Utilisez le chat pour effectuer une analyse.", + 'export_success': "Analyse et chat exportés avec succès.", + 'export_error': "Un problème est survenu lors de l'exportation de l'analyse et du chat.", + 'nothing_to_export': "Aucune analyse ou chat à exporter.", + 'logout': "Déconnexion.", + 'loading_data': "Chargement des données", + 'get_text': "Obtenir du texte.", + 'student_activities':"Activités étudiantes", + 'total_analyses': "analyses totales", + 'data_load_error': 'Erreur de chargement des données' +} + +TABS = { + 'morpho_tab': "Analyse Morphosyntaxique", + 'semantic_tab': "Analyse Sémantique", + 'discourse_tab': "Analyse du Discours", + 'activities_tab': "Mes Activités", + 'feedback_tab': "Formulaire d'Opinion" +} + +MORPHOSYNTACTIC = { + 'morpho_title': "AIdeaText - Analyse morphologique et syntaxique", + 'morpho_initial_message': "Il s'agit d'un chatbot à usage général, mais il possède une fonction spécifique pour l'analyse visuelle de texte : la génération de diagrammes d'arc. Pour les produire, entrez la commande suivante /analisis_morfosintatico [suivie de crochets à l'intérieur desquels vous devez placer le texte que vous souhaitez analyser]", + 'morpho_input_label': "Entrez un texte à analyser (max 30 mots) :", + 'morphosyntactic_analysis_completed': 'Analyse morphosyntaxique terminée. Veuillez consulter les résultats dans la section suivante.', + # + 'analyze_button': "Analyser le texte", + 'repeated_words': "Mots répétés", + 'legend': "Légende : Catégories grammaticales", + 'arc_diagram': "Analyse syntaxique : Diagramme en arc", + 'sentence': "Phrase", + 'success_message': "Analyse enregistrée avec succès.", + 'error_message': "Un problème est survenu lors de l'enregistrement de l'analyse. Veuillez réessayer.", + 'warning_message': "Veuillez entrer un texte à analyser.", + # + 'no_results': "Aucun résultat disponible. Veuillez d'abord effectuer une analyse.", + 'pos_analysis': "Analyse des parties du discours", + 'morphological_analysis': "Analyse morphologique", + 'sentence_structure': "Structure des phrases", + 'word': "Mot", + 'count': "Nombre", + 'percentage': "Pourcentage", + 'examples': "Exemples", + 'lemma': "Lemme", + 'tag': "Étiquette", + 'dep': "Dépendance", + 'morph': "Morphologie", + 'root': "Racine", + 'subjects': "Sujets", + 'objects': "Objets", + 'verbs': "Verbes", + 'grammatical_category': "Catégorie grammaticale", + 'dependency': "Dépendance", + 'morphology': "Morphologie", + 'processing': 'Traitement en cours...', + 'error_processing': 'Erreur de traitement', + 'morphosyntactic_analysis_error': 'Erreur dans l\'analyse morphosyntaxique', + 'morphosyntactic_analysis_completed': 'Analyse morphosyntaxique terminée' +} + +SEMANTIC = { + # A + 'analysis_completed': "Analyse terminée", + 'analysis_section': "Analyse Sémantique", + 'analyze_document': 'Analyser le document', + 'analysis_saved_success': 'Analyse enregistrée avec succès', + 'analysis_save_error': 'Erreur lors de l\'enregistrement de l\'analyse', + 'analyze_button': "Analyser le texte", + 'analyzing_doc': "analyse du document", + # C + 'chat_title': "Chat d'Analyse Sémantique", + 'chat_placeholder': "Posez une question ou utilisez une commande (/résumé, /entités, /sentiment, /thèmes, /graphe_concepts, /graphe_entités, /graphe_thèmes)", + 'clear_chat': "Effacer le chat", + 'conceptual_relations': "Relations Conceptuelles", + # D + 'delete_file': "Supprimer le fichier", + # E + 'error_message': "Un problème est survenu lors de l'enregistrement de l'analyse sémantique. Veuillez réessayer.", + # F + 'file_uploader': "Ou téléchargez un fichier texte", + 'file_upload_success': "Fichier téléchargé et enregistré avec succès", + 'file_upload_error': 'Erreur lors du téléchargement du fichier', + 'file_section': "Fichiers", + 'file_loaded_success': "Fichier chargé avec succès", + 'file_load_error': "Erreur lors du chargement du fichier", + 'file_upload_error': "Erreur lors du téléchargement et de l'enregistrement du fichier", + 'file_deleted_success': 'Fichier supprimé avec succès', + 'file_delete_error': 'Erreur lors de la suppression du fichier', + # G + 'graph_title': "Visualisation de l'Analyse Sémantique", + # I + 'identified_entities': "Entités Identifiées", + # K + 'key_concepts': "Concepts Clés", + # L + 'load_selected_file': 'Charger le fichier sélectionné', + # N + 'no_analysis': "Aucune analyse disponible. Veuillez télécharger ou sélectionner un fichier.", + 'no_results': "Aucun résultat disponible. Veuillez d'abord effectuer une analyse.", + 'no_file': "Veuillez télécharger un fichier pour commencer l'analyse.", + 'no_file_selected': "Veuillez sélectionner une archive pour démarrer l'analyse.", + # S + 'semantic_initial_message': "Ceci est un chatbot à usage général, mais il a une fonction spécifique pour l'analyse visuelle de textes : il génère un graphe avec les principales entités du texte. Pour le produire, entrez un fichier texte au format txt, pdf, doc, docx ou odt et appuyez sur le bouton 'analyser le fichier'. Après la génération du graphe, vous pouvez interagir avec le chat en fonction du document.", + 'semantic_title': "Analyse Sémantique", + 'send_button': "Envoyer", + 'select_saved_file': "Sélectionner un fichier enregistré", + 'success_message': "Analyse sémantique enregistrée avec succès.", + # T + 'text_input_label': "Entrez un texte à analyser (max. 5 000 mots) :", + 'text_input_placeholder': "L'objectif de cette application est d'améliorer vos compétences en rédaction...", + 'title': "AIdeaText - Analyse Sémantique", + # U + 'upload_file': "télécharger le fichier", + # W + 'warning_message': "Veuillez entrer un texte ou télécharger un fichier à analyser." +} + +DISCOURSE = { + 'discourse_title': "AIdeaText - Analyse du discours", + 'file_uploader1': "Télécharger le fichier texte 1 (Modèle)", + 'file_uploader2': "Télécharger le fichier texte 2 (Comparaison)", + 'analyze_button': "Analyser les textes", + 'comparison': "Comparaison des Relations Sémantiques", + 'success_message': "Analyse du discours enregistrée avec succès.", + 'error_message': "Un problème est survenu lors de l'enregistrement de l'analyse du discours. Veuillez réessayer.", + 'warning_message': "Veuillez télécharger les deux fichiers à analyser.", + 'discourse_initial_message': "C'est un chatbot de proposition générale, mais il a une fonction spécifique pour l'analyse visuelle des textes : générer des graphiques avec les principales entités de chaque fichier pour faire une comparaison entre plusieurs textes. Pour produire, insérer un premier fichier et l'autre après au format txt, pdf, doc, docx ou odt et appuyez sur le bouton 'analyser les archives'. Après la génération du graphique, vous pouvez interagir avec le chat en fonction du document.", + 'no_results': "Aucun résultat disponible. Veuillez d'abord effectuer une analyse.", + 'key_concepts': "Concepts Clés", + 'graph_not_available': "Le graphique n'est pas disponible.", + 'concepts_not_available': "Les concepts clés ne sont pas disponibles.", + 'comparison_not_available': "La comparaison n'est pas disponible.", + 'loading_data': 'Chargement des données de l\'étudiant...', + 'data_load_error': 'Erreur lors du chargement des données de l\'étudiant' +} + +ACTIVITIES = { + 'analysis_types_chart_title': "Types d'analyses effectuées", + 'analysis_types_chart_x': "Type d'analyse", + 'analysis_types_chart_y': "Nombre", + 'analysis_from': "Analyse du", + 'assistant': "Assistant", + 'activities_summary': "Résumé des Activités et Progrès", + 'chat_history_expander': "Historique des Conversations", + 'chat_from': "Conversation du", + 'combined_graph': "Graphique combiné", + 'conceptual_relations_graph': "Graphique des relations conceptuelles", + 'conversation': "Conversation", + 'discourse_analyses_expander': "Historique des Analyses de Discours", + 'discourse_analyses': "Analyses de Discours", + 'discourse_history': "Historique des Analyses de Discours", + 'document': "Document", + 'data_load_error': "Erreur lors du chargement des données de l'étudiant", + 'graph_display_error': "Impossible d'afficher le graphique", + 'graph_doc1': "Graphique document 1", + 'graph_doc2': "Graphique document 2", + 'key_concepts': "Concepts clés", + 'loading_data': "Chargement des données de l'étudiant...", + 'morphological_analysis': "Analyse Morphologique", + 'morphosyntax_analyses_expander': "Historique des Analyses Morphosyntaxiques", + 'morphosyntax_history': "Historique des Analyses Morphosyntaxiques", + 'no_arc_diagram': "Aucun diagramme en arc trouvé pour cette analyse.", + 'no_chat_history': "Aucune conversation avec le ChatBot n'a été trouvée.", + 'no_data_warning': "Aucune donnée d'analyse trouvée pour cet étudiant.", + 'progress_of': "Progrès de", + 'semantic_analyses': "Analyses Sémantiques", + 'semantic_analyses_expander': "Historique des Analyses Sémantiques", + 'semantic_history': "Historique des Analyses Sémantiques", + 'show_debug_data': "Afficher les données de débogage", + 'student_debug_data': "Données de l'étudiant (pour le débogage) :", + 'summary_title': "Résumé des Activités", + 'title': "Activités", + 'timestamp': "Horodatage", + 'total_analyses': "Total des analyses effectuées :", + 'try_analysis': "Essayez d'effectuer d'abord quelques analyses de texte.", + 'user': "Utilisateur" +} + +FEEDBACK = { + 'email': "E-mail", + 'feedback': "Retour", + 'feedback_title': "Formulaire de commentaires", + 'feedback_error': "Un problème est survenu lors de l'envoi du formulaire. Veuillez réessayer.", + 'feedback_success': "Merci pour votre réponse", + 'complete_all_fields': "Veuillez remplir tous les champs", + 'name': "Nom", + 'submit': "Envoyer" +} + + +TEXT_TYPES = { + "descriptif": [ + "Que décrivez-vous ?", + "Quelles sont ses principales caractéristiques ?", + "À quoi ressemble-t-il, quel son produit-il, quelle odeur dégage-t-il ou quelle sensation procure-t-il ?", + "Qu'est-ce qui le rend unique ou spécial ?" + ], + "narratif": [ + "Qui est le protagoniste ?", + "Où et quand se déroule l'histoire ?", + "Quel événement déclenche l'action ?", + "Que se passe-t-il ensuite ?", + "Comment se termine l'histoire ?" + ], + "explicatif": [ + "Quel est le sujet principal ?", + "Quels aspects importants voulez-vous expliquer ?", + "Pouvez-vous donner des exemples ou des données pour appuyer votre explication ?", + "Comment ce sujet est-il lié à d'autres concepts ?" + ], + "argumentatif": [ + "Quelle est votre thèse ou argument principal ?", + "Quels sont vos arguments de soutien ?", + "Quelles preuves avez-vous pour étayer vos arguments ?", + "Quels sont les contre-arguments et comment les réfutez-vous ?", + "Quelle est votre conclusion ?" + ], + "instructif": [ + "Quelle tâche ou quel processus expliquez-vous ?", + "Quels matériaux ou outils sont nécessaires ?", + "Quelles sont les étapes à suivre ?", + "Y a-t-il des précautions ou des conseils importants à mentionner ?" + ], + "pitch": [ + "Quoi ?", + "Pour quoi ?", + "Pour qui ?", + "Comment ?" + ] + } + +# Configuration du modèle de langage pour le français +NLP_MODEL = 'fr_core_news_lg' + +# Esta línea es crucial: +TRANSLATIONS = { + 'COMMON': COMMON, + 'TABS': TABS, + 'MORPHOSYNTACTIC': MORPHOSYNTACTIC, + 'SEMANTIC': SEMANTIC, + 'DISCOURSE': DISCOURSE, + 'ACTIVITIES': ACTIVITIES, + 'FEEDBACK': FEEDBACK, + 'TEXT_TYPES': TEXT_TYPES, + 'NLP_MODEL': NLP_MODEL +} \ No newline at end of file diff --git a/translations/txt.txt b/translations/txt.txt new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391