Upload 182 files
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- modules/__init__.py +319 -0
- modules/admin/__init__.py +0 -0
- modules/admin/__pycache__/__init__.cpython-311.pyc +0 -0
- modules/admin/__pycache__/admin_ui.cpython-311.pyc +0 -0
- modules/admin/admin_ui.py +252 -0
- modules/admin/txt.txt +0 -0
- modules/chatbot/__init__.py +8 -0
- modules/chatbot/__pycache__/__init__.cpython-311.pyc +0 -0
- modules/chatbot/__pycache__/chatbot.cpython-311.pyc +0 -0
- modules/chatbot/chabot.py +60 -0
- modules/chatbot/chat_interface.py +25 -0
- modules/chatbot/chat_process.py +56 -0
- modules/chatbot/chatbot-Old.py +46 -0
- modules/chatbot/chatbot_open_Source_Model-test.py +124 -0
- modules/chatbot/sidebar_chat.py +113 -0
- modules/chatbot/txt.txt +0 -0
- modules/discourse/__init__.py +17 -0
- modules/discourse/__pycache__/__init__.cpython-311.pyc +0 -0
- modules/discourse/__pycache__/discourse_interface.cpython-311.pyc +0 -0
- modules/discourse/__pycache__/discourse_process.cpython-311.pyc +0 -0
- modules/discourse/discourse_interface.py +281 -0
- modules/discourse/discourse_live_interface.py +151 -0
- modules/discourse/discourse_process.py +68 -0
- modules/email/__init__.py +0 -0
- modules/email/__pycache__/__init__.cpython-311.pyc +0 -0
- modules/email/__pycache__/email.cpython-311.pyc +0 -0
- modules/email/email.py +92 -0
- modules/email/txt.txt +0 -0
- modules/morphosyntax/__init__.py +29 -0
- modules/morphosyntax/__pycache__/__init__.cpython-311.pyc +0 -0
- modules/morphosyntax/__pycache__/morphosyntax_interface.cpython-311.pyc +0 -0
- modules/morphosyntax/__pycache__/morphosyntax_process.cpython-311.pyc +0 -0
- modules/morphosyntax/morphosyntax_interface-Back1910-25-9-24.py +171 -0
- modules/morphosyntax/morphosyntax_interface-BackUp_Dec24_OK.py +322 -0
- modules/morphosyntax/morphosyntax_interface.py +228 -0
- modules/morphosyntax/morphosyntax_interface_BackUp_Dec-28-Ok.py +164 -0
- modules/morphosyntax/morphosyntax_interface_vOk-30-12-24.py +247 -0
- modules/morphosyntax/morphosyntax_process-Back1910-25-9-24.py +29 -0
- modules/morphosyntax/morphosyntax_process.py +132 -0
- modules/morphosyntax/morphosyntax_process_BackUp_Dec24_Ok.py +132 -0
- modules/morphosyntax/txt.txt +0 -0
- modules/semantic/__init_.py +17 -0
- modules/semantic/__pycache__/flexible_analysis_handler.cpython-311.pyc +0 -0
- modules/semantic/__pycache__/semantic_float.cpython-311.pyc +0 -0
- modules/semantic/__pycache__/semantic_float68ok.cpython-311.pyc +0 -0
- modules/semantic/__pycache__/semantic_float86ok.cpython-311.pyc +0 -0
- modules/semantic/__pycache__/semantic_float_reset.cpython-311.pyc +0 -0
- modules/semantic/__pycache__/semantic_interface.cpython-311.pyc +0 -0
- modules/semantic/__pycache__/semantic_interfaceBackUp_2092024_1800.cpython-311.pyc +0 -0
- modules/semantic/__pycache__/semantic_interfaceBorrados.cpython-311.pyc +0 -0
modules/__init__.py
ADDED
@@ -0,0 +1,319 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# modules/__init__.py
|
2 |
+
|
3 |
+
def load_auth_functions():
|
4 |
+
from .auth.auth import authenticate_student, register_student, update_student_info, delete_student
|
5 |
+
return {
|
6 |
+
'authenticate_student': authenticate_student,
|
7 |
+
'register_student': register_student,
|
8 |
+
'update_student_info': update_student_info,
|
9 |
+
'delete_student': delete_student
|
10 |
+
}
|
11 |
+
|
12 |
+
# Agregar nuevo import para current_situation
|
13 |
+
def load_current_situation_functions():
|
14 |
+
"""
|
15 |
+
Carga las funciones relacionadas con el análisis de situación actual.
|
16 |
+
Returns:
|
17 |
+
dict: Diccionario con las funciones de situación actual
|
18 |
+
"""
|
19 |
+
from .studentact.current_situation_interface import (
|
20 |
+
display_current_situation_interface,
|
21 |
+
display_metrics_in_one_row,
|
22 |
+
display_empty_metrics_row,
|
23 |
+
display_metrics_analysis,
|
24 |
+
display_comparison_results,
|
25 |
+
display_metrics_and_suggestions,
|
26 |
+
display_radar_chart,
|
27 |
+
suggest_improvement_tools,
|
28 |
+
prepare_metrics_config
|
29 |
+
)
|
30 |
+
|
31 |
+
from .studentact.current_situation_analysis import (
|
32 |
+
correlate_metrics,
|
33 |
+
analyze_text_dimensions,
|
34 |
+
analyze_clarity,
|
35 |
+
analyze_vocabulary_diversity,
|
36 |
+
analyze_cohesion,
|
37 |
+
analyze_structure,
|
38 |
+
get_dependency_depths,
|
39 |
+
normalize_score,
|
40 |
+
generate_sentence_graphs,
|
41 |
+
generate_word_connections,
|
42 |
+
generate_connection_paths,
|
43 |
+
create_vocabulary_network,
|
44 |
+
create_syntax_complexity_graph,
|
45 |
+
create_cohesion_heatmap
|
46 |
+
)
|
47 |
+
|
48 |
+
return {
|
49 |
+
'display_current_situation_interface': display_current_situation_interface,
|
50 |
+
'display_metrics_in_one_row': display_metrics_in_one_line,
|
51 |
+
'display_empty_metrics_row': display_empty_metrics_row,
|
52 |
+
'display_metrics_analysis': display_metrics_analysis,
|
53 |
+
'display_comparison_results': display_comparison_results,
|
54 |
+
'display_metrics_and_suggestions': display_metrics_and_suggestions,
|
55 |
+
'display_radar_chart': display_radar_chart,
|
56 |
+
'suggest_improvement_tools': suggest_improvement_tools,
|
57 |
+
'prepare_metrics_config': prepare_metrics_config,
|
58 |
+
'display_empty_metrics_row' : display_empty_metrics_row,
|
59 |
+
'correlate_metrics': correlate_metrics,
|
60 |
+
'analyze_text_dimensions': analyze_text_dimensions,
|
61 |
+
'analyze_clarity': analyze_clarity,
|
62 |
+
'analyze_vocabulary_diversity': analyze_vocabulary_diversity,
|
63 |
+
'analyze_cohesion': analyze_cohesion,
|
64 |
+
'analyze_structure': analyze_structure,
|
65 |
+
'get_dependency_depths': get_dependency_depths,
|
66 |
+
'normalize_score': normalize_score,
|
67 |
+
'generate_sentence_graphs': generate_sentence_graphs,
|
68 |
+
'generate_word_connections': generate_word_connections,
|
69 |
+
'generate_connection_paths': generate_connection_paths,
|
70 |
+
'create_vocabulary_network': create_vocabulary_network,
|
71 |
+
'create_syntax_complexity_graph': create_syntax_complexity_graph,
|
72 |
+
'create_cohesion_heatmap': create_cohesion_heatmap
|
73 |
+
}
|
74 |
+
|
75 |
+
def load_database_functions():
|
76 |
+
|
77 |
+
from .database.database_init import (
|
78 |
+
initialize_database_connections,
|
79 |
+
get_container,
|
80 |
+
get_mongodb
|
81 |
+
)
|
82 |
+
|
83 |
+
# Importar funciones SQL
|
84 |
+
from .database.sql_db import (
|
85 |
+
create_student_user,
|
86 |
+
get_student_user,
|
87 |
+
update_student_user,
|
88 |
+
delete_student_user,
|
89 |
+
store_application_request,
|
90 |
+
store_student_feedback,
|
91 |
+
record_login,
|
92 |
+
record_logout,
|
93 |
+
get_recent_sessions,
|
94 |
+
get_user_total_time
|
95 |
+
)
|
96 |
+
|
97 |
+
from .database.mongo_db import (
|
98 |
+
get_collection,
|
99 |
+
insert_document,
|
100 |
+
find_documents,
|
101 |
+
update_document,
|
102 |
+
delete_document,
|
103 |
+
)
|
104 |
+
|
105 |
+
from .database.morphosintax_mongo_db import (
|
106 |
+
store_student_morphosyntax_result,
|
107 |
+
get_student_morphosyntax_analysis,
|
108 |
+
update_student_morphosyntax_analysis,
|
109 |
+
delete_student_morphosyntax_analysis,
|
110 |
+
get_student_morphosyntax_data
|
111 |
+
)
|
112 |
+
|
113 |
+
from .database.semantic_mongo_db import (
|
114 |
+
store_student_semantic_result,
|
115 |
+
get_student_semantic_analysis,
|
116 |
+
update_student_semantic_analysis,
|
117 |
+
delete_student_semantic_analysis,
|
118 |
+
get_student_semantic_data
|
119 |
+
)
|
120 |
+
|
121 |
+
from .database.discourse_mongo_db import (
|
122 |
+
store_student_discourse_result,
|
123 |
+
get_student_discourse_analysis,
|
124 |
+
update_student_discourse_analysis,
|
125 |
+
delete_student_discourse_analysis,
|
126 |
+
get_student_discourse_data
|
127 |
+
)
|
128 |
+
|
129 |
+
# Agregar nueva importación para current_situation
|
130 |
+
from .database.current_situation_mongo_db import (
|
131 |
+
store_current_situation_result,
|
132 |
+
verify_storage,
|
133 |
+
get_recent_sessions,
|
134 |
+
get_student_situation_history,
|
135 |
+
update_exercise_status
|
136 |
+
)
|
137 |
+
|
138 |
+
# Importar nuevas funciones de análisis morfosintáctico iterativo
|
139 |
+
from .morphosyntax_iterative_mongo_db import (
|
140 |
+
store_student_morphosyntax_base,
|
141 |
+
store_student_morphosyntax_iteration,
|
142 |
+
get_student_morphosyntax_analysis,
|
143 |
+
update_student_morphosyntax_analysis,
|
144 |
+
delete_student_morphosyntax_analysis,
|
145 |
+
get_student_morphosyntax_data
|
146 |
+
)
|
147 |
+
|
148 |
+
from .database.chat_mongo_db import store_chat_history, get_chat_history
|
149 |
+
|
150 |
+
return {
|
151 |
+
# Nuevas funciones morfosintácticas iterativas
|
152 |
+
'store_student_morphosyntax_base': store_student_morphosyntax_base,
|
153 |
+
'store_student_morphosyntax_iteration': store_student_morphosyntax_iteration,
|
154 |
+
'get_student_morphosyntax_iterative_analysis': get_student_morphosyntax_analysis, # Renombrada para evitar conflicto
|
155 |
+
'update_student_morphosyntax_iterative': update_student_morphosyntax_analysis, # Renombrada para evitar conflicto
|
156 |
+
'delete_student_morphosyntax_iterative': delete_student_morphosyntax_analysis, # Renombrada para evitar conflicto
|
157 |
+
'get_student_morphosyntax_iterative_data': get_student_morphosyntax_data,
|
158 |
+
'store_current_situation_result': store_current_situation_result,
|
159 |
+
'verify_storage': verify_storage,
|
160 |
+
'get_recent_sessions': get_recent_sessions,
|
161 |
+
'get_student_situation_history': get_student_situation_history,
|
162 |
+
'update_exercise_status': update_exercise_status,
|
163 |
+
'initialize_database_connections': initialize_database_connections,
|
164 |
+
'get_container': get_container,
|
165 |
+
'get_mongodb': get_mongodb,
|
166 |
+
'create_student_user': create_student_user,
|
167 |
+
'get_student_user': get_student_user,
|
168 |
+
'update_student_user': update_student_user,
|
169 |
+
'delete_student_user': delete_student_user,
|
170 |
+
'store_application_request': store_application_request,
|
171 |
+
'store_student_feedback': store_student_feedback,
|
172 |
+
'get_collection': get_collection,
|
173 |
+
'insert_document': insert_document,
|
174 |
+
'find_documents': find_documents,
|
175 |
+
'update_document': update_document,
|
176 |
+
'delete_document': delete_document,
|
177 |
+
'store_student_morphosyntax_result': store_student_morphosyntax_result,
|
178 |
+
'get_student_morphosyntax_analysis': get_student_morphosyntax_analysis,
|
179 |
+
'update_student_morphosyntax_analysis': update_student_morphosyntax_analysis,
|
180 |
+
'delete_student_morphosyntax_analysis': delete_student_morphosyntax_analysis,
|
181 |
+
'get_student_morphosyntax_data': get_student_morphosyntax_data,
|
182 |
+
'store_student_semantic_result': store_student_semantic_result,
|
183 |
+
'get_student_semantic_analysis': get_student_semantic_analysis,
|
184 |
+
'update_student_semantic_analysis': update_student_semantic_analysis,
|
185 |
+
'delete_student_semantic_analysis': delete_student_semantic_analysis,
|
186 |
+
'get_student_semantic_data': get_student_semantic_data,
|
187 |
+
'store_chat_history': store_chat_history,
|
188 |
+
'get_chat_history': get_chat_history,
|
189 |
+
'store_student_discourse_result': store_student_discourse_result,
|
190 |
+
'get_student_discourse_analysis': get_student_discourse_analysis,
|
191 |
+
'update_student_discourse_analysis': update_student_discourse_analysis,
|
192 |
+
'delete_student_discourse_analysis': delete_student_discourse_analysis,
|
193 |
+
'get_student_discourse_data': get_student_discourse_data,
|
194 |
+
'record_login': record_login,
|
195 |
+
'record_logout': record_logout,
|
196 |
+
'get_recent_sessions': get_recent_sessions,
|
197 |
+
'get_user_total_time': get_user_total_time
|
198 |
+
}
|
199 |
+
|
200 |
+
def load_ui_functions():
|
201 |
+
# No importamos nada de ui.py aquí
|
202 |
+
return {} # Retornamos un diccionario vacío
|
203 |
+
|
204 |
+
def load_student_activities_v2_functions():
|
205 |
+
from .studentact.student_activities_v2 import display_student_activities
|
206 |
+
return {
|
207 |
+
'display_student_progress': display_student_activities
|
208 |
+
}
|
209 |
+
|
210 |
+
def load_morphosyntax_functions():
|
211 |
+
from .morphosyntax.morphosyntax_interface import (
|
212 |
+
initialize_arc_analysis_state,
|
213 |
+
reset_arc_analysis_state,
|
214 |
+
display_arc_diagrams,
|
215 |
+
display_morphosyntax_results
|
216 |
+
)
|
217 |
+
from .morphosyntax.morphosyntax_process import (
|
218 |
+
process_morphosyntactic_input,
|
219 |
+
format_analysis_results,
|
220 |
+
perform_advanced_morphosyntactic_analysis # Añadir esta función
|
221 |
+
)
|
222 |
+
|
223 |
+
return {
|
224 |
+
#Interface
|
225 |
+
'initialize_arc_analysis_state': initialize_arc_analysis_state,
|
226 |
+
'reset_arc_analysis_state': reset_morpho_state,
|
227 |
+
'display_arc_diagrams': display_arc_diagrams,
|
228 |
+
'display_morphosyntax_interface': display_morphosyntax_interface,
|
229 |
+
#Process
|
230 |
+
'process_morphosyntactic_input': process_morphosyntactic_input,
|
231 |
+
'format_analysis_results': format_analysis_results,
|
232 |
+
'perform_advanced_morphosyntactic_analysis': perform_advanced_morphosyntactic_analysis
|
233 |
+
}
|
234 |
+
|
235 |
+
def load_semantic_functions():
|
236 |
+
from .semantic.semantic_interface import (
|
237 |
+
display_semantic_interface,
|
238 |
+
display_semantic_results
|
239 |
+
)
|
240 |
+
from modules.semantic.semantic_process import (
|
241 |
+
process_semantic_input,
|
242 |
+
format_semantic_results
|
243 |
+
)
|
244 |
+
|
245 |
+
return {
|
246 |
+
'display_semantic_interface': display_semantic_interface,
|
247 |
+
'display_semantic_results': display_semantic_results,
|
248 |
+
'process_semantic_input': process_semantic_input,
|
249 |
+
'format_semantic_results': format_analysis_results,
|
250 |
+
}
|
251 |
+
|
252 |
+
|
253 |
+
def load_discourse_functions():
|
254 |
+
from .discourse.discourse_interface import (
|
255 |
+
display_discourse_interface,
|
256 |
+
display_discourse_results
|
257 |
+
)
|
258 |
+
from modules.discourse.discourse_process import (
|
259 |
+
perform_discourse_analysis, # Este es el nombre correcto de la función
|
260 |
+
extract_key_concepts, # Función adicional que necesitamos
|
261 |
+
generate_concept_graph, # Función adicional que necesitamos
|
262 |
+
calculate_similarity_matrix # Función adicional que necesitamos
|
263 |
+
)
|
264 |
+
|
265 |
+
return {
|
266 |
+
'display_discourse_interface': display_discourse_interface,
|
267 |
+
'display_discourse_results': display_discourse_results,
|
268 |
+
'perform_discourse_analysis': perform_discourse_analysis,
|
269 |
+
'extract_key_concepts': extract_key_concepts,
|
270 |
+
'generate_concept_graph': generate_concept_graph,
|
271 |
+
'calculate_similarity_matrix': calculate_similarity_matrix
|
272 |
+
}
|
273 |
+
|
274 |
+
def load_admin_functions():
|
275 |
+
from .admin.admin_ui import admin_page
|
276 |
+
return {
|
277 |
+
'admin_page': admin_page
|
278 |
+
}
|
279 |
+
|
280 |
+
def load_utils_functions():
|
281 |
+
from .utils.spacy_utils import load_spacy_models
|
282 |
+
return {
|
283 |
+
'load_spacy_models': load_spacy_models
|
284 |
+
}
|
285 |
+
|
286 |
+
def load_chatbot_functions():
|
287 |
+
"""
|
288 |
+
Carga las funciones del módulo de chatbot
|
289 |
+
Returns:
|
290 |
+
dict: Diccionario con las funciones del chatbot
|
291 |
+
"""
|
292 |
+
from modules.chatbot.sidebar_chat import (
|
293 |
+
display_sidebar_chat
|
294 |
+
)
|
295 |
+
|
296 |
+
from modules.chatbot.chat_process import (
|
297 |
+
ChatProcessor
|
298 |
+
)
|
299 |
+
|
300 |
+
return {
|
301 |
+
'display_sidebar_chat': display_sidebar_chat,
|
302 |
+
'ChatProcessor': ChatProcessor
|
303 |
+
}
|
304 |
+
|
305 |
+
# Función para cargar todas las funciones
|
306 |
+
def load_all_functions():
|
307 |
+
return {
|
308 |
+
**load_auth_functions(),
|
309 |
+
**load_database_functions(),
|
310 |
+
# **load_ui_functions(),
|
311 |
+
**load_admin_functions(),
|
312 |
+
**load_morphosyntax_functions(),
|
313 |
+
**load_semantic_functions(),
|
314 |
+
**load_discourse_functions(),
|
315 |
+
**load_utils_functions(),
|
316 |
+
**load_chatbot_functions(),
|
317 |
+
**load_student_activities_functions(),
|
318 |
+
**load_current_situation_functions() # Agregar el nuevo loader
|
319 |
+
}
|
modules/admin/__init__.py
ADDED
File without changes
|
modules/admin/__pycache__/__init__.cpython-311.pyc
ADDED
Binary file (147 Bytes). View file
|
|
modules/admin/__pycache__/admin_ui.cpython-311.pyc
ADDED
Binary file (3.07 kB). View file
|
|
modules/admin/admin_ui.py
ADDED
@@ -0,0 +1,252 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#modules/admin/admin_ui.py
|
2 |
+
|
3 |
+
import streamlit as st
|
4 |
+
|
5 |
+
from datetime import datetime
|
6 |
+
|
7 |
+
from ..database.sql_db import (
|
8 |
+
get_user,
|
9 |
+
get_student_user,
|
10 |
+
get_admin_user,
|
11 |
+
get_teacher_user,
|
12 |
+
create_student_user,
|
13 |
+
update_student_user,
|
14 |
+
delete_student_user,
|
15 |
+
record_login,
|
16 |
+
record_logout,
|
17 |
+
get_recent_sessions,
|
18 |
+
get_user_total_time
|
19 |
+
)
|
20 |
+
|
21 |
+
from ..database.morphosintax_mongo_db import get_student_morphosyntax_analysis
|
22 |
+
|
23 |
+
from ..auth.auth import hash_password # Agregar esta importación al inicio
|
24 |
+
|
25 |
+
########################################################
|
26 |
+
def format_duration(seconds):
|
27 |
+
"""Convierte segundos a formato legible"""
|
28 |
+
if not seconds:
|
29 |
+
return "0h 0m"
|
30 |
+
hours = seconds // 3600
|
31 |
+
minutes = (seconds % 3600) // 60
|
32 |
+
return f"{hours}h {minutes}m"
|
33 |
+
|
34 |
+
|
35 |
+
|
36 |
+
########################################################
|
37 |
+
def admin_page():
|
38 |
+
st.title("Panel de Administración")
|
39 |
+
st.write(f"Bienvenido, {st.session_state.username}")
|
40 |
+
|
41 |
+
# Crear tres tabs para las diferentes secciones
|
42 |
+
tab1, tab2, tab3 = st.tabs([
|
43 |
+
"Gestión de Usuarios",
|
44 |
+
"Búsqueda de Usuarios",
|
45 |
+
"Actividad de la Plataforma"
|
46 |
+
])
|
47 |
+
|
48 |
+
|
49 |
+
########################################################
|
50 |
+
# Tab 1: Gestión de Usuarios
|
51 |
+
with tab1:
|
52 |
+
st.header("Crear Nuevo Usuario Estudiante")
|
53 |
+
|
54 |
+
# Crear dos columnas para el formulario
|
55 |
+
col1, col2 = st.columns(2)
|
56 |
+
|
57 |
+
with col1:
|
58 |
+
new_username = st.text_input(
|
59 |
+
"Correo electrónico del nuevo usuario",
|
60 |
+
key="admin_new_username"
|
61 |
+
)
|
62 |
+
|
63 |
+
with col2:
|
64 |
+
new_password = st.text_input(
|
65 |
+
"Contraseña",
|
66 |
+
type="password",
|
67 |
+
key="admin_new_password"
|
68 |
+
)
|
69 |
+
|
70 |
+
if st.button("Crear Usuario", key="admin_create_user", type="primary"):
|
71 |
+
if new_username and new_password: # Verificamos que ambos campos tengan valor
|
72 |
+
try:
|
73 |
+
# Hashear la contraseña antes de crear el usuario
|
74 |
+
hashed_password = hash_password(new_password)
|
75 |
+
if create_student_user(new_username, hashed_password, {'partitionKey': new_username}):
|
76 |
+
st.success(f"Usuario estudiante {new_username} creado exitosamente")
|
77 |
+
else:
|
78 |
+
st.error("Error al crear el usuario estudiante")
|
79 |
+
except Exception as e:
|
80 |
+
st.error(f"Error al crear usuario: {str(e)}")
|
81 |
+
else:
|
82 |
+
st.warning("Por favor complete todos los campos")
|
83 |
+
|
84 |
+
#######################################################################
|
85 |
+
# Tab 2: Búsqueda de Usuarios
|
86 |
+
with tab2:
|
87 |
+
st.header("Búsqueda de Usuarios")
|
88 |
+
|
89 |
+
search_col1, search_col2 = st.columns([2,1])
|
90 |
+
|
91 |
+
with search_col1:
|
92 |
+
student_username = st.text_input(
|
93 |
+
"Nombre de usuario del estudiante",
|
94 |
+
key="admin_view_student"
|
95 |
+
)
|
96 |
+
|
97 |
+
with search_col2:
|
98 |
+
search_button = st.button(
|
99 |
+
"Buscar",
|
100 |
+
key="admin_view_student_data",
|
101 |
+
type="primary"
|
102 |
+
)
|
103 |
+
|
104 |
+
if search_button:
|
105 |
+
student = get_student_user(student_username)
|
106 |
+
if student:
|
107 |
+
# Crear tabs para diferentes tipos de información
|
108 |
+
info_tab1, info_tab2, info_tab3 = st.tabs([
|
109 |
+
"Información Básica",
|
110 |
+
"Análisis Realizados",
|
111 |
+
"Tiempo en Plataforma"
|
112 |
+
])
|
113 |
+
|
114 |
+
with info_tab1:
|
115 |
+
st.subheader("Información del Usuario")
|
116 |
+
st.json(student)
|
117 |
+
|
118 |
+
with info_tab2:
|
119 |
+
st.subheader("Análisis Realizados")
|
120 |
+
student_data = get_student_morphosyntax_analysis(student_username)
|
121 |
+
if student_data:
|
122 |
+
st.json(student_data)
|
123 |
+
else:
|
124 |
+
st.info("No hay datos de análisis para este estudiante.")
|
125 |
+
|
126 |
+
with info_tab3:
|
127 |
+
st.subheader("Tiempo en Plataforma")
|
128 |
+
total_time = get_user_total_time(student_username)
|
129 |
+
if total_time:
|
130 |
+
st.metric(
|
131 |
+
"Tiempo Total",
|
132 |
+
format_duration(total_time)
|
133 |
+
)
|
134 |
+
else:
|
135 |
+
st.info("No hay registros de tiempo para este usuario")
|
136 |
+
else:
|
137 |
+
st.error("Estudiante no encontrado")
|
138 |
+
|
139 |
+
#######################################################################
|
140 |
+
# Tab 3: Actividad de la Plataforma
|
141 |
+
with tab3:
|
142 |
+
st.header("Actividad Reciente")
|
143 |
+
|
144 |
+
# Agregar botón de actualización
|
145 |
+
if st.button("Actualizar datos", key="refresh_sessions", type="primary"):
|
146 |
+
st.rerun()
|
147 |
+
|
148 |
+
# Mostrar spinner mientras carga
|
149 |
+
with st.spinner("Cargando datos de sesiones..."):
|
150 |
+
# Obtener sesiones recientes
|
151 |
+
recent_sessions = get_recent_sessions(20) # Aumentado a 20 para más datos
|
152 |
+
|
153 |
+
if recent_sessions:
|
154 |
+
# Crear dataframe para mostrar los datos
|
155 |
+
sessions_data = []
|
156 |
+
for session in recent_sessions:
|
157 |
+
try:
|
158 |
+
# Manejar el formato de fecha con manejo de excepciones
|
159 |
+
try:
|
160 |
+
login_time = datetime.fromisoformat(
|
161 |
+
session['loginTime'].replace('Z', '+00:00')
|
162 |
+
).strftime("%Y-%m-%d %H:%M:%S")
|
163 |
+
except Exception as e:
|
164 |
+
login_time = session['loginTime']
|
165 |
+
|
166 |
+
# Manejar el caso de logout_time cuando la sesión está activa
|
167 |
+
if session.get('logoutTime') and session['logoutTime'] != "Activo":
|
168 |
+
try:
|
169 |
+
logout_time = datetime.fromisoformat(
|
170 |
+
session['logoutTime'].replace('Z', '+00:00')
|
171 |
+
).strftime("%Y-%m-%d %H:%M:%S")
|
172 |
+
except Exception as e:
|
173 |
+
logout_time = session['logoutTime']
|
174 |
+
else:
|
175 |
+
logout_time = "Activo"
|
176 |
+
|
177 |
+
# Agregar datos a la lista
|
178 |
+
sessions_data.append({
|
179 |
+
"Usuario": session.get('username', 'Desconocido'),
|
180 |
+
"Inicio de Sesión": login_time,
|
181 |
+
"Fin de Sesión": logout_time,
|
182 |
+
"Duración": format_duration(session.get('sessionDuration', 0))
|
183 |
+
})
|
184 |
+
except Exception as e:
|
185 |
+
st.error(f"Error procesando sesión: {str(e)}")
|
186 |
+
continue
|
187 |
+
|
188 |
+
# Mostrar información de depuración si hay problemas
|
189 |
+
with st.expander("Información de depuración", expanded=False):
|
190 |
+
st.write("Datos crudos recuperados:")
|
191 |
+
st.json(recent_sessions)
|
192 |
+
|
193 |
+
st.write("Datos procesados para mostrar:")
|
194 |
+
st.json(sessions_data)
|
195 |
+
|
196 |
+
# Mostrar tabla con estilos
|
197 |
+
st.dataframe(
|
198 |
+
sessions_data,
|
199 |
+
hide_index=True,
|
200 |
+
column_config={
|
201 |
+
"Usuario": st.column_config.TextColumn(
|
202 |
+
"Usuario",
|
203 |
+
width="medium"
|
204 |
+
),
|
205 |
+
"Inicio de Sesión": st.column_config.TextColumn(
|
206 |
+
"Inicio de Sesión",
|
207 |
+
width="medium"
|
208 |
+
),
|
209 |
+
"Fin de Sesión": st.column_config.TextColumn(
|
210 |
+
"Fin de Sesión",
|
211 |
+
width="medium"
|
212 |
+
),
|
213 |
+
"Duración": st.column_config.TextColumn(
|
214 |
+
"Duración",
|
215 |
+
width="small"
|
216 |
+
)
|
217 |
+
}
|
218 |
+
)
|
219 |
+
|
220 |
+
# Añadir métricas resumen
|
221 |
+
total_sessions = len(sessions_data)
|
222 |
+
total_users = len(set(session['Usuario'] for session in sessions_data))
|
223 |
+
|
224 |
+
metric_col1, metric_col2 = st.columns(2)
|
225 |
+
with metric_col1:
|
226 |
+
st.metric("Total de Sesiones", total_sessions)
|
227 |
+
with metric_col2:
|
228 |
+
st.metric("Usuarios Únicos", total_users)
|
229 |
+
else:
|
230 |
+
st.info("No hay registros de sesiones recientes o hubo un problema al recuperarlos.")
|
231 |
+
|
232 |
+
# Ayuda de depuración
|
233 |
+
if st.button("Mostrar diagnóstico"):
|
234 |
+
st.write("Verificando la función get_recent_sessions:")
|
235 |
+
container = get_container("users_sessions")
|
236 |
+
if container:
|
237 |
+
st.success("✅ Conectado al contenedor users_sessions")
|
238 |
+
else:
|
239 |
+
st.error("❌ No se pudo conectar al contenedor users_sessions")
|
240 |
+
|
241 |
+
##################################################################
|
242 |
+
# Agregar una línea divisoria antes del botón
|
243 |
+
st.markdown("---")
|
244 |
+
|
245 |
+
##################################################################
|
246 |
+
# Centrar el botón de cierre de sesión
|
247 |
+
col1, col2, col3 = st.columns([2,1,2])
|
248 |
+
with col2:
|
249 |
+
if st.button("Cerrar Sesión", key="admin_logout", type="primary", use_container_width=True):
|
250 |
+
from ..auth.auth import logout
|
251 |
+
logout()
|
252 |
+
st.rerun()
|
modules/admin/txt.txt
ADDED
File without changes
|
modules/chatbot/__init__.py
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# modules/chatbot/__init__.py
|
2 |
+
from .sidebar_chat import display_sidebar_chat
|
3 |
+
from .chat_process import ChatProcessor
|
4 |
+
|
5 |
+
__all__ = [
|
6 |
+
'display_sidebar_chat',
|
7 |
+
'ChatProcessor'
|
8 |
+
]
|
modules/chatbot/__pycache__/__init__.cpython-311.pyc
ADDED
Binary file (149 Bytes). View file
|
|
modules/chatbot/__pycache__/chatbot.cpython-311.pyc
ADDED
Binary file (3.3 kB). View file
|
|
modules/chatbot/chabot.py
ADDED
@@ -0,0 +1,60 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# chatbot/chatbot.py
|
2 |
+
import streamlit as st
|
3 |
+
from typing import Dict, List, Tuple
|
4 |
+
import logging
|
5 |
+
|
6 |
+
logger = logging.getLogger(__name__)
|
7 |
+
|
8 |
+
class AIdeaTextChatbot:
|
9 |
+
def __init__(self, lang_code: str):
|
10 |
+
self.lang_code = lang_code
|
11 |
+
self.conversation_history = []
|
12 |
+
self.context = {
|
13 |
+
'current_analysis': None,
|
14 |
+
'last_question': None,
|
15 |
+
'user_profile': None
|
16 |
+
}
|
17 |
+
|
18 |
+
def process_message(self, message: str, context: Dict = None) -> str:
|
19 |
+
"""
|
20 |
+
Procesa el mensaje del usuario y genera una respuesta
|
21 |
+
"""
|
22 |
+
try:
|
23 |
+
# Actualizar contexto
|
24 |
+
if context:
|
25 |
+
self.context.update(context)
|
26 |
+
|
27 |
+
# Analizar intención del mensaje
|
28 |
+
intent = self._analyze_intent(message)
|
29 |
+
|
30 |
+
# Generar respuesta basada en la intención
|
31 |
+
response = self._generate_response(intent, message)
|
32 |
+
|
33 |
+
# Actualizar historial
|
34 |
+
self._update_history(message, response)
|
35 |
+
|
36 |
+
return response
|
37 |
+
|
38 |
+
except Exception as e:
|
39 |
+
logger.error(f"Error procesando mensaje: {str(e)}")
|
40 |
+
return self._get_fallback_response()
|
41 |
+
|
42 |
+
def _analyze_intent(self, message: str) -> str:
|
43 |
+
"""
|
44 |
+
Analiza la intención del mensaje del usuario
|
45 |
+
"""
|
46 |
+
# Implementar análisis de intención
|
47 |
+
pass
|
48 |
+
|
49 |
+
def _generate_response(self, intent: str, message: str) -> str:
|
50 |
+
"""
|
51 |
+
Genera una respuesta basada en la intención
|
52 |
+
"""
|
53 |
+
# Implementar generación de respuesta
|
54 |
+
pass
|
55 |
+
|
56 |
+
def get_conversation_history(self) -> List[Tuple[str, str]]:
|
57 |
+
"""
|
58 |
+
Retorna el historial de conversación
|
59 |
+
"""
|
60 |
+
return self.conversation_history
|
modules/chatbot/chat_interface.py
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# chatbot/chat_interface.py
|
2 |
+
import streamlit as st
|
3 |
+
from .chatbot import AIdeaTextChatbot
|
4 |
+
|
5 |
+
def display_chat_interface(lang_code: str, chat_translations: Dict):
|
6 |
+
"""
|
7 |
+
Muestra la interfaz del chat
|
8 |
+
"""
|
9 |
+
# Inicializar chatbot si no existe
|
10 |
+
if 'chatbot' not in st.session_state:
|
11 |
+
st.session_state.chatbot = AIdeaTextChatbot(lang_code)
|
12 |
+
|
13 |
+
# Mostrar historial
|
14 |
+
for msg in st.session_state.chatbot.get_conversation_history():
|
15 |
+
with st.chat_message(msg[0]):
|
16 |
+
st.write(msg[1])
|
17 |
+
|
18 |
+
# Input del usuario
|
19 |
+
if prompt := st.chat_input(chat_translations.get('chat_placeholder', 'Escribe tu mensaje...')):
|
20 |
+
# Procesar mensaje
|
21 |
+
response = st.session_state.chatbot.process_message(prompt)
|
22 |
+
|
23 |
+
# Mostrar respuesta
|
24 |
+
with st.chat_message("assistant"):
|
25 |
+
st.write(response)
|
modules/chatbot/chat_process.py
ADDED
@@ -0,0 +1,56 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# modules/chatbot/chat_process.py
|
2 |
+
import os
|
3 |
+
import anthropic
|
4 |
+
import logging
|
5 |
+
from typing import Dict, Generator
|
6 |
+
|
7 |
+
logger = logging.getLogger(__name__)
|
8 |
+
|
9 |
+
####################################################
|
10 |
+
class ChatProcessor:
|
11 |
+
def __init__(self):
|
12 |
+
"""Inicializa el procesador de chat con la API de Claude"""
|
13 |
+
api_key = os.environ.get("ANTHROPIC_API_KEY")
|
14 |
+
if not api_key:
|
15 |
+
raise ValueError("No se encontró la clave API de Anthropic. Asegúrate de configurarla en las variables de entorno.")
|
16 |
+
self.client = anthropic.Anthropic(api_key=api_key)
|
17 |
+
self.conversation_history = []
|
18 |
+
|
19 |
+
def process_chat_input(self, message: str, lang_code: str) -> Generator[str, None, None]:
|
20 |
+
"""Procesa el mensaje y genera una respuesta"""
|
21 |
+
try:
|
22 |
+
# Agregar mensaje a la historia
|
23 |
+
self.conversation_history.append({"role": "user", "content": message})
|
24 |
+
|
25 |
+
# Generar respuesta usando la API de Claude
|
26 |
+
response = self.client.messages.create(
|
27 |
+
model="claude-3-5-sonnet-20241022",
|
28 |
+
messages=self.conversation_history,
|
29 |
+
max_tokens=8000, # Añadimos este parámetro requerido
|
30 |
+
temperature=0.7,
|
31 |
+
)
|
32 |
+
|
33 |
+
# Procesar la respuesta
|
34 |
+
claude_response = response.content[0].text
|
35 |
+
self.conversation_history.append({"role": "assistant", "content": claude_response})
|
36 |
+
|
37 |
+
# Mantener un historial limitado
|
38 |
+
if len(self.conversation_history) > 10:
|
39 |
+
self.conversation_history = self.conversation_history[-10:]
|
40 |
+
|
41 |
+
# Dividir la respuesta en palabras para streaming
|
42 |
+
words = claude_response.split()
|
43 |
+
for word in words:
|
44 |
+
yield word + " "
|
45 |
+
|
46 |
+
except Exception as e:
|
47 |
+
logger.error(f"Error en process_chat_input: {str(e)}")
|
48 |
+
yield f"Error: {str(e)}"
|
49 |
+
|
50 |
+
def get_conversation_history(self) -> list:
|
51 |
+
"""Retorna el historial de la conversación"""
|
52 |
+
return self.conversation_history
|
53 |
+
|
54 |
+
def clear_history(self):
|
55 |
+
"""Limpia el historial de la conversación"""
|
56 |
+
self.conversation_history = []
|
modules/chatbot/chatbot-Old.py
ADDED
@@ -0,0 +1,46 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
from ..text_analysis.morpho_analysis import perform_advanced_morphosyntactic_analysis
|
3 |
+
from ..text_analysis.semantic_analysis import perform_semantic_analysis
|
4 |
+
from ..text_analysis.discourse_analysis import perform_discourse_analysis
|
5 |
+
|
6 |
+
class AIdeaTextChatbot:
|
7 |
+
def __init__(self):
|
8 |
+
self.conversation_history = []
|
9 |
+
|
10 |
+
def handle_morphosyntactic_input(self, user_input, lang_code, nlp_models, t):
|
11 |
+
if user_input.startswith('/analisis_morfosintactico'):
|
12 |
+
text_to_analyze = user_input.split('[', 1)[1].rsplit(']', 1)[0]
|
13 |
+
result = perform_advanced_morphosyntactic_analysis(text_to_analyze, nlp_models[lang_code])
|
14 |
+
if result is None or 'arc_diagrams' not in result:
|
15 |
+
return t.get('morphosyntactic_analysis_error', 'Error en el análisis morfosintáctico'), None, None
|
16 |
+
return t.get('morphosyntactic_analysis_completed', 'Análisis morfosintáctico completado'), result['arc_diagrams'], result
|
17 |
+
else:
|
18 |
+
# Aquí puedes manejar otras interacciones relacionadas con el análisis morfosintáctico
|
19 |
+
return self.generate_response(user_input, lang_code), None, None
|
20 |
+
|
21 |
+
|
22 |
+
def handle_semantic_input(self, user_input, lang_code, nlp_models, t):
|
23 |
+
# Implementar lógica para análisis semántico
|
24 |
+
pass
|
25 |
+
|
26 |
+
def handle_discourse_input(self, user_input, lang_code, nlp_models, t):
|
27 |
+
# Implementar lógica para análisis de discurso
|
28 |
+
pass
|
29 |
+
|
30 |
+
def handle_generate_response(self, prompt, lang_code):
|
31 |
+
# Aquí iría la lógica para generar respuestas generales del chatbot
|
32 |
+
# Puedes usar la API de Claude aquí si lo deseas
|
33 |
+
pass
|
34 |
+
|
35 |
+
def initialize_chatbot():
|
36 |
+
return AIdeaTextChatbot()
|
37 |
+
|
38 |
+
def process_chat_input(user_input, lang_code, nlp_models, analysis_type, t, file_contents=None):
|
39 |
+
chatbot = st.session_state.get('aideatext_chatbot')
|
40 |
+
if not chatbot:
|
41 |
+
chatbot = initialize_chatbot()
|
42 |
+
st.session_state.aideatext_chatbot = chatbot
|
43 |
+
|
44 |
+
if analysis_type == 'morphosyntactic':
|
45 |
+
return chatbot.handle_morphosyntactic_input(user_input, lang_code, nlp_models, t)
|
46 |
+
# ... manejar otros tipos de análisis ...
|
modules/chatbot/chatbot_open_Source_Model-test.py
ADDED
@@ -0,0 +1,124 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from transformers import GPT2LMHeadModel, GPT2Tokenizer
|
2 |
+
import torch
|
3 |
+
from torch.optim import Adam
|
4 |
+
from torch.utils.data import DataLoader, Dataset
|
5 |
+
import json
|
6 |
+
import tqdm
|
7 |
+
|
8 |
+
tokenizer = GPT2Tokenizer.from_pretrained("openai-community/gpt2")
|
9 |
+
model = GPT2LMHeadModel.from_pretrained("openai-community/gpt2")
|
10 |
+
|
11 |
+
class MultilingualChatData(Dataset):
|
12 |
+
def __init__(self, file_path, tokenizer, max_length=512):
|
13 |
+
with open(file_path, 'r', encoding='utf-8') as f:
|
14 |
+
self.data = json.load(f)
|
15 |
+
self.tokenizer = tokenizer
|
16 |
+
self.max_length = max_length
|
17 |
+
|
18 |
+
def __len__(self):
|
19 |
+
return len(self.data)
|
20 |
+
|
21 |
+
def __getitem__(self, idx):
|
22 |
+
item = self.data[idx]
|
23 |
+
input_text = f"<startofstring> {item['input']} <bot>: {item['output']} <endofstring>"
|
24 |
+
encoding = self.tokenizer(input_text, truncation=True, padding='max_length', max_length=self.max_length, return_tensors="pt")
|
25 |
+
return encoding['input_ids'].squeeze(), encoding['attention_mask'].squeeze()
|
26 |
+
|
27 |
+
class MultilingualChatbot:
|
28 |
+
def __init__(self):
|
29 |
+
self.models = {
|
30 |
+
'en': GPT2LMHeadModel.from_pretrained("microsoft/DialoGPT-medium"),
|
31 |
+
'es': GPT2LMHeadModel.from_pretrained("DeepESP/gpt2-spanish"),
|
32 |
+
'fr': GPT2LMHeadModel.from_pretrained("asi/gpt-fr-cased-small")
|
33 |
+
}
|
34 |
+
self.tokenizers = {
|
35 |
+
'en': GPT2Tokenizer.from_pretrained("microsoft/DialoGPT-medium"),
|
36 |
+
'es': GPT2Tokenizer.from_pretrained("DeepESP/gpt2-spanish"),
|
37 |
+
'fr': GPT2Tokenizer.from_pretrained("asi/gpt-fr-cased-small")
|
38 |
+
}
|
39 |
+
for tokenizer in self.tokenizers.values():
|
40 |
+
tokenizer.pad_token = tokenizer.eos_token
|
41 |
+
tokenizer.add_special_tokens({
|
42 |
+
"bos_token": "<startofstring>",
|
43 |
+
"eos_token": "<endofstring>"
|
44 |
+
})
|
45 |
+
tokenizer.add_tokens(["<bot>:"])
|
46 |
+
|
47 |
+
for model in self.models.values():
|
48 |
+
model.resize_token_embeddings(len(self.tokenizers['en'])) # Assuming all tokenizers have the same vocabulary size
|
49 |
+
|
50 |
+
self.device = "cuda" if torch.cuda.is_available() else "mps" if torch.backends.mps.is_available() else "cpu"
|
51 |
+
for model in self.models.values():
|
52 |
+
model.to(self.device)
|
53 |
+
|
54 |
+
def train(self, lang, data_file, epochs=5, batch_size=32, learning_rate=1e-4):
|
55 |
+
model = self.models[lang]
|
56 |
+
tokenizer = self.tokenizers[lang]
|
57 |
+
|
58 |
+
chat_data = MultilingualChatData(data_file, tokenizer)
|
59 |
+
data_loader = DataLoader(chat_data, batch_size=batch_size, shuffle=True)
|
60 |
+
|
61 |
+
optimizer = Adam(model.parameters(), lr=learning_rate)
|
62 |
+
|
63 |
+
model.train()
|
64 |
+
for epoch in range(epochs):
|
65 |
+
total_loss = 0
|
66 |
+
for batch in tqdm.tqdm(data_loader, desc=f"Epoch {epoch+1}/{epochs}"):
|
67 |
+
input_ids, attention_mask = [b.to(self.device) for b in batch]
|
68 |
+
|
69 |
+
optimizer.zero_grad()
|
70 |
+
outputs = model(input_ids, attention_mask=attention_mask, labels=input_ids)
|
71 |
+
loss = outputs.loss
|
72 |
+
loss.backward()
|
73 |
+
optimizer.step()
|
74 |
+
|
75 |
+
total_loss += loss.item()
|
76 |
+
|
77 |
+
print(f"Epoch {epoch+1}/{epochs}, Loss: {total_loss/len(data_loader):.4f}")
|
78 |
+
|
79 |
+
torch.save(model.state_dict(), f"model_state_{lang}.pt")
|
80 |
+
|
81 |
+
def generate_response(self, prompt, src_lang):
|
82 |
+
model = self.models.get(src_lang, self.models['en'])
|
83 |
+
tokenizer = self.tokenizers.get(src_lang, self.tokenizers['en'])
|
84 |
+
|
85 |
+
input_text = f"<startofstring> {prompt} <bot>: "
|
86 |
+
input_ids = tokenizer.encode(input_text, return_tensors='pt').to(self.device)
|
87 |
+
|
88 |
+
attention_mask = torch.ones(input_ids.shape, dtype=torch.long, device=self.device)
|
89 |
+
|
90 |
+
output = model.generate(
|
91 |
+
input_ids,
|
92 |
+
attention_mask=attention_mask,
|
93 |
+
max_length=1000,
|
94 |
+
pad_token_id=tokenizer.eos_token_id,
|
95 |
+
no_repeat_ngram_size=3,
|
96 |
+
do_sample=True,
|
97 |
+
top_k=50,
|
98 |
+
top_p=0.95,
|
99 |
+
temperature=0.7,
|
100 |
+
num_return_sequences=1,
|
101 |
+
length_penalty=1.0,
|
102 |
+
repetition_penalty=1.2
|
103 |
+
)
|
104 |
+
|
105 |
+
decoded_output = tokenizer.decode(output[0], skip_special_tokens=True)
|
106 |
+
return decoded_output.split("<bot>:")[-1].strip()
|
107 |
+
|
108 |
+
def initialize_chatbot():
|
109 |
+
return MultilingualChatbot()
|
110 |
+
|
111 |
+
def get_chatbot_response(chatbot, prompt, src_lang):
|
112 |
+
return chatbot.generate_response(prompt, src_lang)
|
113 |
+
|
114 |
+
# Ejemplo de uso
|
115 |
+
if __name__ == "__main__":
|
116 |
+
chatbot = initialize_chatbot()
|
117 |
+
|
118 |
+
# Entrenar el modelo en español (asumiendo que tienes un archivo de datos en español)
|
119 |
+
chatbot.train('es', './spanish_chat_data.json', epochs=3)
|
120 |
+
|
121 |
+
# Generar respuestas
|
122 |
+
print(get_chatbot_response(chatbot, "Hola, ¿cómo estás?", 'es'))
|
123 |
+
print(get_chatbot_response(chatbot, "Hello, how are you?", 'en'))
|
124 |
+
print(get_chatbot_response(chatbot, "Bonjour, comment allez-vous?", 'fr'))
|
modules/chatbot/sidebar_chat.py
ADDED
@@ -0,0 +1,113 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# modules/chatbot/sidebar_chat.py
|
2 |
+
import streamlit as st
|
3 |
+
from .chat_process import ChatProcessor
|
4 |
+
from ..database.chat_mongo_db import store_chat_history, get_chat_history
|
5 |
+
import logging
|
6 |
+
|
7 |
+
logger = logging.getLogger(__name__)
|
8 |
+
|
9 |
+
def display_sidebar_chat(lang_code: str, chatbot_t: dict):
|
10 |
+
"""
|
11 |
+
Muestra el chatbot en el sidebar
|
12 |
+
Args:
|
13 |
+
lang_code: Código del idioma
|
14 |
+
chatbot_t: Diccionario de traducciones del chatbot
|
15 |
+
"""
|
16 |
+
# Asegurar que tenemos las traducciones necesarias
|
17 |
+
default_translations = {
|
18 |
+
'error_message': 'An error occurred',
|
19 |
+
'expand_chat': 'Open Assistant',
|
20 |
+
'initial_message': 'Hi! How can I help?',
|
21 |
+
'input_placeholder': 'Type your message...',
|
22 |
+
'clear_chat': 'Clear chat'
|
23 |
+
}
|
24 |
+
|
25 |
+
# Combinar traducciones por defecto con las proporcionadas
|
26 |
+
translations = {**default_translations, **chatbot_t}
|
27 |
+
|
28 |
+
with st.sidebar:
|
29 |
+
# Chatbot expandible
|
30 |
+
with st.expander(translations['expand_chat'], expanded=False):
|
31 |
+
try:
|
32 |
+
# Inicializar procesador si no existe
|
33 |
+
if 'chat_processor' not in st.session_state:
|
34 |
+
try:
|
35 |
+
st.session_state.chat_processor = ChatProcessor()
|
36 |
+
except Exception as e:
|
37 |
+
logger.error(f"Error inicializando ChatProcessor: {str(e)}")
|
38 |
+
st.error("Error: No se pudo inicializar el chat. Verifica la configuración.")
|
39 |
+
return
|
40 |
+
|
41 |
+
# Inicializar mensajes si no existen
|
42 |
+
if 'sidebar_messages' not in st.session_state:
|
43 |
+
# Intentar recuperar historial previo
|
44 |
+
try:
|
45 |
+
history = get_chat_history(st.session_state.username, 'sidebar', 10)
|
46 |
+
if history:
|
47 |
+
st.session_state.sidebar_messages = history[0]['messages']
|
48 |
+
else:
|
49 |
+
st.session_state.sidebar_messages = [
|
50 |
+
{"role": "assistant", "content": translations['initial_message']}
|
51 |
+
]
|
52 |
+
except Exception as e:
|
53 |
+
logger.error(f"Error recuperando historial: {str(e)}")
|
54 |
+
st.session_state.sidebar_messages = [
|
55 |
+
{"role": "assistant", "content": translations['initial_message']}
|
56 |
+
]
|
57 |
+
|
58 |
+
# Contenedor del chat
|
59 |
+
chat_container = st.container()
|
60 |
+
|
61 |
+
# Mostrar mensajes existentes
|
62 |
+
with chat_container:
|
63 |
+
for message in st.session_state.sidebar_messages:
|
64 |
+
with st.chat_message(message["role"]):
|
65 |
+
st.markdown(message["content"])
|
66 |
+
|
67 |
+
# Input del usuario
|
68 |
+
user_input = st.text_input(
|
69 |
+
translations['input_placeholder'],
|
70 |
+
key='sidebar_chat_input'
|
71 |
+
)
|
72 |
+
|
73 |
+
if user_input:
|
74 |
+
# Agregar mensaje del usuario
|
75 |
+
st.session_state.sidebar_messages.append(
|
76 |
+
{"role": "user", "content": user_input}
|
77 |
+
)
|
78 |
+
|
79 |
+
# Generar y mostrar respuesta
|
80 |
+
with chat_container:
|
81 |
+
with st.chat_message("assistant"):
|
82 |
+
message_placeholder = st.empty()
|
83 |
+
full_response = ""
|
84 |
+
|
85 |
+
for chunk in st.session_state.chat_processor.process_chat_input(
|
86 |
+
user_input,
|
87 |
+
lang_code
|
88 |
+
):
|
89 |
+
full_response += chunk
|
90 |
+
message_placeholder.markdown(full_response)
|
91 |
+
|
92 |
+
# Guardar respuesta
|
93 |
+
st.session_state.sidebar_messages.append(
|
94 |
+
{"role": "assistant", "content": full_response.strip()}
|
95 |
+
)
|
96 |
+
|
97 |
+
# En la función donde guardamos el chat
|
98 |
+
store_chat_history(
|
99 |
+
username=st.session_state.username,
|
100 |
+
messages=st.session_state.sidebar_messages,
|
101 |
+
analysis_type='sidebar' # Especificar el tipo
|
102 |
+
)
|
103 |
+
|
104 |
+
# Botón para limpiar chat
|
105 |
+
if st.button(translations['clear_chat']):
|
106 |
+
st.session_state.sidebar_messages = [
|
107 |
+
{"role": "assistant", "content": translations['initial_message']}
|
108 |
+
]
|
109 |
+
st.rerun()
|
110 |
+
|
111 |
+
except Exception as e:
|
112 |
+
logger.error(f"Error en sidebar chat: {str(e)}")
|
113 |
+
st.error(translations['error_message'])
|
modules/chatbot/txt.txt
ADDED
File without changes
|
modules/discourse/__init__.py
ADDED
@@ -0,0 +1,17 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# En /modules/discourse/__init__.py
|
2 |
+
|
3 |
+
from ..database.discourse_mongo_db import (
|
4 |
+
store_student_discourse_result,
|
5 |
+
get_student_discourse_analysis,
|
6 |
+
update_student_discourse_analysis,
|
7 |
+
delete_student_discourse_analysis,
|
8 |
+
get_student_discourse_data
|
9 |
+
)
|
10 |
+
|
11 |
+
__all__ = [
|
12 |
+
'store_student_discourse_result',
|
13 |
+
'get_student_discourse_analysis',
|
14 |
+
'update_student_discourse_analysis',
|
15 |
+
'delete_student_discourse_analysis',
|
16 |
+
'get_student_discourse_data'
|
17 |
+
]
|
modules/discourse/__pycache__/__init__.cpython-311.pyc
ADDED
Binary file (151 Bytes). View file
|
|
modules/discourse/__pycache__/discourse_interface.cpython-311.pyc
ADDED
Binary file (1.62 kB). View file
|
|
modules/discourse/__pycache__/discourse_process.cpython-311.pyc
ADDED
Binary file (1.58 kB). View file
|
|
modules/discourse/discourse_interface.py
ADDED
@@ -0,0 +1,281 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# modules/discourse/discourse/discourse_interface.py
|
2 |
+
|
3 |
+
import streamlit as st
|
4 |
+
import pandas as pd
|
5 |
+
import plotly.graph_objects as go
|
6 |
+
import logging
|
7 |
+
from ..utils.widget_utils import generate_unique_key
|
8 |
+
from .discourse_process import perform_discourse_analysis
|
9 |
+
from ..database.chat_mongo_db import store_chat_history
|
10 |
+
from ..database.discourse_mongo_db import store_student_discourse_result
|
11 |
+
|
12 |
+
logger = logging.getLogger(__name__)
|
13 |
+
|
14 |
+
def display_discourse_interface(lang_code, nlp_models, discourse_t):
|
15 |
+
"""
|
16 |
+
Interfaz para el análisis del discurso
|
17 |
+
Args:
|
18 |
+
lang_code: Código del idioma actual
|
19 |
+
nlp_models: Modelos de spaCy cargados
|
20 |
+
discourse_t: Diccionario de traducciones
|
21 |
+
"""
|
22 |
+
try:
|
23 |
+
# 1. Inicializar estado si no existe
|
24 |
+
if 'discourse_state' not in st.session_state:
|
25 |
+
st.session_state.discourse_state = {
|
26 |
+
'analysis_count': 0,
|
27 |
+
'last_analysis': None,
|
28 |
+
'current_files': None
|
29 |
+
}
|
30 |
+
|
31 |
+
# 2. Título y descripción
|
32 |
+
st.subheader(discourse_t.get('discourse_title', 'Análisis del Discurso'))
|
33 |
+
st.info(discourse_t.get('initial_instruction',
|
34 |
+
'Cargue dos archivos de texto para realizar un análisis comparativo del discurso.'))
|
35 |
+
|
36 |
+
# 3. Área de carga de archivos
|
37 |
+
col1, col2 = st.columns(2)
|
38 |
+
with col1:
|
39 |
+
st.markdown(discourse_t.get('file1_label', "**Documento 1 (Patrón)**"))
|
40 |
+
uploaded_file1 = st.file_uploader(
|
41 |
+
discourse_t.get('file_uploader1', "Cargar archivo 1"),
|
42 |
+
type=['txt'],
|
43 |
+
key=f"discourse_file1_{st.session_state.discourse_state['analysis_count']}"
|
44 |
+
)
|
45 |
+
|
46 |
+
with col2:
|
47 |
+
st.markdown(discourse_t.get('file2_label', "**Documento 2 (Comparación)**"))
|
48 |
+
uploaded_file2 = st.file_uploader(
|
49 |
+
discourse_t.get('file_uploader2', "Cargar archivo 2"),
|
50 |
+
type=['txt'],
|
51 |
+
key=f"discourse_file2_{st.session_state.discourse_state['analysis_count']}"
|
52 |
+
)
|
53 |
+
|
54 |
+
# 4. Botón de análisis
|
55 |
+
col1, col2, col3 = st.columns([1,2,1])
|
56 |
+
with col1:
|
57 |
+
analyze_button = st.button(
|
58 |
+
discourse_t.get('discourse_analyze_button', 'Analizar Discurso'),
|
59 |
+
key=generate_unique_key("discourse", "analyze_button"),
|
60 |
+
type="primary",
|
61 |
+
icon="🔍",
|
62 |
+
disabled=not (uploaded_file1 and uploaded_file2),
|
63 |
+
use_container_width=True
|
64 |
+
)
|
65 |
+
|
66 |
+
# 5. Proceso de análisis
|
67 |
+
if analyze_button and uploaded_file1 and uploaded_file2:
|
68 |
+
try:
|
69 |
+
with st.spinner(discourse_t.get('processing', 'Procesando análisis...')):
|
70 |
+
# Leer contenido de archivos
|
71 |
+
text1 = uploaded_file1.getvalue().decode('utf-8')
|
72 |
+
text2 = uploaded_file2.getvalue().decode('utf-8')
|
73 |
+
|
74 |
+
# Realizar análisis
|
75 |
+
result = perform_discourse_analysis(
|
76 |
+
text1,
|
77 |
+
text2,
|
78 |
+
nlp_models[lang_code],
|
79 |
+
lang_code
|
80 |
+
)
|
81 |
+
|
82 |
+
if result['success']:
|
83 |
+
# Guardar estado
|
84 |
+
st.session_state.discourse_result = result
|
85 |
+
st.session_state.discourse_state['analysis_count'] += 1
|
86 |
+
st.session_state.discourse_state['current_files'] = (
|
87 |
+
uploaded_file1.name,
|
88 |
+
uploaded_file2.name
|
89 |
+
)
|
90 |
+
|
91 |
+
# Guardar en base de datos
|
92 |
+
if store_student_discourse_result(
|
93 |
+
st.session_state.username,
|
94 |
+
text1,
|
95 |
+
text2,
|
96 |
+
result
|
97 |
+
):
|
98 |
+
st.success(discourse_t.get('success_message', 'Análisis guardado correctamente'))
|
99 |
+
|
100 |
+
# Mostrar resultados
|
101 |
+
display_discourse_results(result, lang_code, discourse_t)
|
102 |
+
else:
|
103 |
+
st.error(discourse_t.get('error_message', 'Error al guardar el análisis'))
|
104 |
+
else:
|
105 |
+
st.error(discourse_t.get('analysis_error', 'Error en el análisis'))
|
106 |
+
|
107 |
+
except Exception as e:
|
108 |
+
logger.error(f"Error en análisis del discurso: {str(e)}")
|
109 |
+
st.error(discourse_t.get('error_processing', f'Error procesando archivos: {str(e)}'))
|
110 |
+
|
111 |
+
# 6. Mostrar resultados previos
|
112 |
+
elif 'discourse_result' in st.session_state and st.session_state.discourse_result is not None:
|
113 |
+
if st.session_state.discourse_state.get('current_files'):
|
114 |
+
st.info(
|
115 |
+
discourse_t.get('current_analysis_message', 'Mostrando análisis de los archivos: {} y {}')
|
116 |
+
.format(*st.session_state.discourse_state['current_files'])
|
117 |
+
)
|
118 |
+
display_discourse_results(
|
119 |
+
st.session_state.discourse_result,
|
120 |
+
lang_code,
|
121 |
+
discourse_t
|
122 |
+
)
|
123 |
+
|
124 |
+
except Exception as e:
|
125 |
+
logger.error(f"Error general en interfaz del discurso: {str(e)}")
|
126 |
+
st.error(discourse_t.get('general_error', 'Se produjo un error. Por favor, intente de nuevo.'))
|
127 |
+
|
128 |
+
|
129 |
+
|
130 |
+
#####################################################################################################################
|
131 |
+
|
132 |
+
def display_discourse_results(result, lang_code, discourse_t):
|
133 |
+
"""
|
134 |
+
Muestra los resultados del análisis del discurso
|
135 |
+
"""
|
136 |
+
if not result.get('success'):
|
137 |
+
st.warning(discourse_t.get('no_results', 'No hay resultados disponibles'))
|
138 |
+
return
|
139 |
+
|
140 |
+
# Estilo CSS
|
141 |
+
st.markdown("""
|
142 |
+
<style>
|
143 |
+
.concepts-container {
|
144 |
+
display: flex;
|
145 |
+
flex-wrap: nowrap;
|
146 |
+
gap: 8px;
|
147 |
+
padding: 12px;
|
148 |
+
background-color: #f8f9fa;
|
149 |
+
border-radius: 8px;
|
150 |
+
overflow-x: auto;
|
151 |
+
margin-bottom: 15px;
|
152 |
+
white-space: nowrap;
|
153 |
+
}
|
154 |
+
.concept-item {
|
155 |
+
background-color: white;
|
156 |
+
border-radius: 4px;
|
157 |
+
padding: 6px 10px;
|
158 |
+
display: inline-flex;
|
159 |
+
align-items: center;
|
160 |
+
gap: 4px;
|
161 |
+
box-shadow: 0 1px 2px rgba(0,0,0,0.1);
|
162 |
+
flex-shrink: 0;
|
163 |
+
}
|
164 |
+
.concept-name {
|
165 |
+
font-weight: 500;
|
166 |
+
color: #1f2937;
|
167 |
+
font-size: 0.85em;
|
168 |
+
}
|
169 |
+
.concept-freq {
|
170 |
+
color: #6b7280;
|
171 |
+
font-size: 0.75em;
|
172 |
+
}
|
173 |
+
.graph-container {
|
174 |
+
background-color: white;
|
175 |
+
padding: 15px;
|
176 |
+
border-radius: 8px;
|
177 |
+
box-shadow: 0 2px 4px rgba(0,0,0,0.1);
|
178 |
+
margin-top: 10px;
|
179 |
+
}
|
180 |
+
</style>
|
181 |
+
""", unsafe_allow_html=True)
|
182 |
+
|
183 |
+
col1, col2 = st.columns(2)
|
184 |
+
|
185 |
+
# Documento 1
|
186 |
+
with col1:
|
187 |
+
st.subheader(discourse_t.get('doc1_title', 'Documento 1'))
|
188 |
+
st.markdown(discourse_t.get('key_concepts', 'Conceptos Clave'))
|
189 |
+
if 'key_concepts1' in result:
|
190 |
+
concepts_html = f"""
|
191 |
+
<div class="concepts-container">
|
192 |
+
{''.join([
|
193 |
+
f'<div class="concept-item"><span class="concept-name">{concept}</span>'
|
194 |
+
f'<span class="concept-freq">({freq:.2f})</span></div>'
|
195 |
+
for concept, freq in result['key_concepts1']
|
196 |
+
])}
|
197 |
+
</div>
|
198 |
+
"""
|
199 |
+
st.markdown(concepts_html, unsafe_allow_html=True)
|
200 |
+
|
201 |
+
if 'graph1' in result:
|
202 |
+
st.markdown('<div class="graph-container">', unsafe_allow_html=True)
|
203 |
+
st.pyplot(result['graph1'])
|
204 |
+
|
205 |
+
# Botones y controles
|
206 |
+
button_col1, spacer_col1 = st.columns([1,4])
|
207 |
+
with button_col1:
|
208 |
+
if 'graph1_bytes' in result:
|
209 |
+
st.download_button(
|
210 |
+
label="📥 " + discourse_t.get('download_graph', "Download"),
|
211 |
+
data=result['graph1_bytes'],
|
212 |
+
file_name="discourse_graph1.png",
|
213 |
+
mime="image/png",
|
214 |
+
use_container_width=True
|
215 |
+
)
|
216 |
+
|
217 |
+
# Interpretación como texto normal sin expander
|
218 |
+
st.markdown("**📊 Interpretación del grafo:**")
|
219 |
+
st.markdown("""
|
220 |
+
- 🔀 Las flechas indican la dirección de la relación entre conceptos
|
221 |
+
- 🎨 Los colores más intensos indican conceptos más centrales en el texto
|
222 |
+
- ⭕ El tamaño de los nodos representa la frecuencia del concepto
|
223 |
+
- ↔️ El grosor de las líneas indica la fuerza de la conexión
|
224 |
+
""")
|
225 |
+
|
226 |
+
st.markdown('</div>', unsafe_allow_html=True)
|
227 |
+
else:
|
228 |
+
st.warning(discourse_t.get('graph_not_available', 'Gráfico no disponible'))
|
229 |
+
else:
|
230 |
+
st.warning(discourse_t.get('concepts_not_available', 'Conceptos no disponibles'))
|
231 |
+
|
232 |
+
# Documento 2
|
233 |
+
with col2:
|
234 |
+
st.subheader(discourse_t.get('doc2_title', 'Documento 2'))
|
235 |
+
st.markdown(discourse_t.get('key_concepts', 'Conceptos Clave'))
|
236 |
+
if 'key_concepts2' in result:
|
237 |
+
concepts_html = f"""
|
238 |
+
<div class="concepts-container">
|
239 |
+
{''.join([
|
240 |
+
f'<div class="concept-item"><span class="concept-name">{concept}</span>'
|
241 |
+
f'<span class="concept-freq">({freq:.2f})</span></div>'
|
242 |
+
for concept, freq in result['key_concepts2']
|
243 |
+
])}
|
244 |
+
</div>
|
245 |
+
"""
|
246 |
+
st.markdown(concepts_html, unsafe_allow_html=True)
|
247 |
+
|
248 |
+
if 'graph2' in result:
|
249 |
+
st.markdown('<div class="graph-container">', unsafe_allow_html=True)
|
250 |
+
st.pyplot(result['graph2'])
|
251 |
+
|
252 |
+
# Botones y controles
|
253 |
+
button_col2, spacer_col2 = st.columns([1,4])
|
254 |
+
with button_col2:
|
255 |
+
if 'graph2_bytes' in result:
|
256 |
+
st.download_button(
|
257 |
+
label="📥 " + discourse_t.get('download_graph', "Download"),
|
258 |
+
data=result['graph2_bytes'],
|
259 |
+
file_name="discourse_graph2.png",
|
260 |
+
mime="image/png",
|
261 |
+
use_container_width=True
|
262 |
+
)
|
263 |
+
|
264 |
+
# Interpretación como texto normal sin expander
|
265 |
+
st.markdown("**📊 Interpretación del grafo:**")
|
266 |
+
st.markdown("""
|
267 |
+
- 🔀 Las flechas indican la dirección de la relación entre conceptos
|
268 |
+
- 🎨 Los colores más intensos indican conceptos más centrales en el texto
|
269 |
+
- ⭕ El tamaño de los nodos representa la frecuencia del concepto
|
270 |
+
- ↔️ El grosor de las líneas indica la fuerza de la conexión
|
271 |
+
""")
|
272 |
+
|
273 |
+
st.markdown('</div>', unsafe_allow_html=True)
|
274 |
+
else:
|
275 |
+
st.warning(discourse_t.get('graph_not_available', 'Gráfico no disponible'))
|
276 |
+
else:
|
277 |
+
st.warning(discourse_t.get('concepts_not_available', 'Conceptos no disponibles'))
|
278 |
+
|
279 |
+
# Nota informativa sobre la comparación
|
280 |
+
st.info(discourse_t.get('comparison_note',
|
281 |
+
'La funcionalidad de comparación detallada estará disponible en una próxima actualización.'))
|
modules/discourse/discourse_live_interface.py
ADDED
@@ -0,0 +1,151 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# modules/discourse/discourse/discourse_live_interface.py
|
2 |
+
|
3 |
+
import streamlit as st
|
4 |
+
from streamlit_float import *
|
5 |
+
from streamlit_antd_components import *
|
6 |
+
import pandas as pd
|
7 |
+
import logging
|
8 |
+
import io
|
9 |
+
import matplotlib.pyplot as plt
|
10 |
+
|
11 |
+
# Configuración del logger
|
12 |
+
logger = logging.getLogger(__name__)
|
13 |
+
|
14 |
+
# Importaciones locales
|
15 |
+
from .discourse_process import perform_discourse_analysis
|
16 |
+
from .discourse_interface import display_discourse_results # Añadida esta importación
|
17 |
+
from ..utils.widget_utils import generate_unique_key
|
18 |
+
from ..database.discourse_mongo_db import store_student_discourse_result
|
19 |
+
from ..database.chat_mongo_db import store_chat_history, get_chat_history
|
20 |
+
|
21 |
+
|
22 |
+
#####################################################################################################
|
23 |
+
def fig_to_bytes(fig):
|
24 |
+
"""Convierte una figura de matplotlib a bytes."""
|
25 |
+
try:
|
26 |
+
buf = io.BytesIO()
|
27 |
+
fig.savefig(buf, format='png', dpi=300, bbox_inches='tight')
|
28 |
+
buf.seek(0)
|
29 |
+
return buf.getvalue()
|
30 |
+
except Exception as e:
|
31 |
+
logger.error(f"Error en fig_to_bytes: {str(e)}")
|
32 |
+
return None
|
33 |
+
|
34 |
+
#################################################################################################
|
35 |
+
def display_discourse_live_interface(lang_code, nlp_models, discourse_t):
|
36 |
+
"""
|
37 |
+
Interfaz para el análisis del discurso en vivo con layout mejorado
|
38 |
+
"""
|
39 |
+
try:
|
40 |
+
if 'discourse_live_state' not in st.session_state:
|
41 |
+
st.session_state.discourse_live_state = {
|
42 |
+
'analysis_count': 0,
|
43 |
+
'current_text1': '',
|
44 |
+
'current_text2': '',
|
45 |
+
'last_result': None,
|
46 |
+
'text_changed': False
|
47 |
+
}
|
48 |
+
|
49 |
+
# Título
|
50 |
+
st.subheader(discourse_t.get('enter_text', 'Ingrese sus textos'))
|
51 |
+
|
52 |
+
# Área de entrada de textos en dos columnas
|
53 |
+
text_col1, text_col2 = st.columns(2)
|
54 |
+
|
55 |
+
# Texto 1
|
56 |
+
with text_col1:
|
57 |
+
st.markdown("**Texto 1 (Patrón)**")
|
58 |
+
text_input1 = st.text_area(
|
59 |
+
"Texto 1",
|
60 |
+
height=200,
|
61 |
+
key="discourse_live_text1",
|
62 |
+
value=st.session_state.discourse_live_state.get('current_text1', ''),
|
63 |
+
label_visibility="collapsed"
|
64 |
+
)
|
65 |
+
st.session_state.discourse_live_state['current_text1'] = text_input1
|
66 |
+
|
67 |
+
# Texto 2
|
68 |
+
with text_col2:
|
69 |
+
st.markdown("**Texto 2 (Comparación)**")
|
70 |
+
text_input2 = st.text_area(
|
71 |
+
"Texto 2",
|
72 |
+
height=200,
|
73 |
+
key="discourse_live_text2",
|
74 |
+
value=st.session_state.discourse_live_state.get('current_text2', ''),
|
75 |
+
label_visibility="collapsed"
|
76 |
+
)
|
77 |
+
st.session_state.discourse_live_state['current_text2'] = text_input2
|
78 |
+
|
79 |
+
# Botón de análisis centrado
|
80 |
+
col1, col2, col3 = st.columns([1,2,1])
|
81 |
+
with col1:
|
82 |
+
analyze_button = st.button(
|
83 |
+
discourse_t.get('analyze_button', 'Analizar'),
|
84 |
+
key="discourse_live_analyze",
|
85 |
+
type="primary",
|
86 |
+
icon="🔍",
|
87 |
+
disabled=not (text_input1 and text_input2),
|
88 |
+
use_container_width=True
|
89 |
+
)
|
90 |
+
|
91 |
+
# Proceso y visualización de resultados
|
92 |
+
if analyze_button and text_input1 and text_input2:
|
93 |
+
try:
|
94 |
+
with st.spinner(discourse_t.get('processing', 'Procesando...')):
|
95 |
+
result = perform_discourse_analysis(
|
96 |
+
text_input1,
|
97 |
+
text_input2,
|
98 |
+
nlp_models[lang_code],
|
99 |
+
lang_code
|
100 |
+
)
|
101 |
+
|
102 |
+
if result['success']:
|
103 |
+
# Procesar ambos gráficos
|
104 |
+
for graph_key in ['graph1', 'graph2']:
|
105 |
+
if graph_key in result and result[graph_key] is not None:
|
106 |
+
bytes_key = f'{graph_key}_bytes'
|
107 |
+
graph_bytes = fig_to_bytes(result[graph_key])
|
108 |
+
if graph_bytes:
|
109 |
+
result[bytes_key] = graph_bytes
|
110 |
+
plt.close(result[graph_key])
|
111 |
+
|
112 |
+
st.session_state.discourse_live_state['last_result'] = result
|
113 |
+
st.session_state.discourse_live_state['analysis_count'] += 1
|
114 |
+
|
115 |
+
store_student_discourse_result(
|
116 |
+
st.session_state.username,
|
117 |
+
text_input1,
|
118 |
+
text_input2,
|
119 |
+
result
|
120 |
+
)
|
121 |
+
|
122 |
+
# Mostrar resultados
|
123 |
+
st.markdown("---")
|
124 |
+
st.subheader(discourse_t.get('results_title', 'Resultados del Análisis'))
|
125 |
+
display_discourse_results(result, lang_code, discourse_t)
|
126 |
+
|
127 |
+
else:
|
128 |
+
st.error(result.get('message', 'Error en el análisis'))
|
129 |
+
|
130 |
+
except Exception as e:
|
131 |
+
logger.error(f"Error en análisis: {str(e)}")
|
132 |
+
st.error(discourse_t.get('error_processing', f'Error al procesar el texto: {str(e)}'))
|
133 |
+
|
134 |
+
# Mostrar resultados previos si existen
|
135 |
+
elif 'last_result' in st.session_state.discourse_live_state and \
|
136 |
+
st.session_state.discourse_live_state['last_result'] is not None:
|
137 |
+
|
138 |
+
st.markdown("---")
|
139 |
+
st.subheader(discourse_t.get('previous_results', 'Resultados del Análisis Anterior'))
|
140 |
+
display_discourse_results(
|
141 |
+
st.session_state.discourse_live_state['last_result'],
|
142 |
+
lang_code,
|
143 |
+
discourse_t
|
144 |
+
)
|
145 |
+
|
146 |
+
except Exception as e:
|
147 |
+
logger.error(f"Error general en interfaz del discurso en vivo: {str(e)}")
|
148 |
+
st.error(discourse_t.get('general_error', "Se produjo un error. Por favor, intente de nuevo."))
|
149 |
+
|
150 |
+
|
151 |
+
|
modules/discourse/discourse_process.py
ADDED
@@ -0,0 +1,68 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from ..text_analysis.discourse_analysis import perform_discourse_analysis, compare_semantic_analysis
|
2 |
+
import streamlit as st
|
3 |
+
|
4 |
+
def process_discourse_input(text1, text2, nlp_models, lang_code):
|
5 |
+
"""
|
6 |
+
Procesa la entrada para el análisis del discurso
|
7 |
+
Args:
|
8 |
+
text1: Texto del primer documento
|
9 |
+
text2: Texto del segundo documento
|
10 |
+
nlp_models: Diccionario de modelos de spaCy
|
11 |
+
lang_code: Código del idioma actual
|
12 |
+
Returns:
|
13 |
+
dict: Resultados del análisis
|
14 |
+
"""
|
15 |
+
try:
|
16 |
+
# Obtener el modelo específico del idioma
|
17 |
+
nlp = nlp_models[lang_code]
|
18 |
+
|
19 |
+
# Realizar el análisis
|
20 |
+
analysis_result = perform_discourse_analysis(text1, text2, nlp, lang_code)
|
21 |
+
|
22 |
+
if analysis_result['success']:
|
23 |
+
return {
|
24 |
+
'success': True,
|
25 |
+
'analysis': analysis_result
|
26 |
+
}
|
27 |
+
else:
|
28 |
+
return {
|
29 |
+
'success': False,
|
30 |
+
'error': 'Error en el análisis del discurso'
|
31 |
+
}
|
32 |
+
|
33 |
+
except Exception as e:
|
34 |
+
logger.error(f"Error en process_discourse_input: {str(e)}")
|
35 |
+
return {
|
36 |
+
'success': False,
|
37 |
+
'error': str(e)
|
38 |
+
}
|
39 |
+
|
40 |
+
def format_discourse_results(result):
|
41 |
+
"""
|
42 |
+
Formatea los resultados del análisis para su visualización
|
43 |
+
Args:
|
44 |
+
result: Resultado del análisis
|
45 |
+
Returns:
|
46 |
+
dict: Resultados formateados
|
47 |
+
"""
|
48 |
+
try:
|
49 |
+
if not result['success']:
|
50 |
+
return result
|
51 |
+
|
52 |
+
analysis = result['analysis']
|
53 |
+
return {
|
54 |
+
'success': True,
|
55 |
+
'graph1': analysis['graph1'],
|
56 |
+
'graph2': analysis['graph2'],
|
57 |
+
'key_concepts1': analysis['key_concepts1'],
|
58 |
+
'key_concepts2': analysis['key_concepts2'],
|
59 |
+
'table1': analysis['table1'],
|
60 |
+
'table2': analysis['table2']
|
61 |
+
}
|
62 |
+
|
63 |
+
except Exception as e:
|
64 |
+
logger.error(f"Error en format_discourse_results: {str(e)}")
|
65 |
+
return {
|
66 |
+
'success': False,
|
67 |
+
'error': str(e)
|
68 |
+
}
|
modules/email/__init__.py
ADDED
File without changes
|
modules/email/__pycache__/__init__.cpython-311.pyc
ADDED
Binary file (147 Bytes). View file
|
|
modules/email/__pycache__/email.cpython-311.pyc
ADDED
Binary file (5.19 kB). View file
|
|
modules/email/email.py
ADDED
@@ -0,0 +1,92 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import smtplib
|
2 |
+
from email.mime.text import MIMEText
|
3 |
+
from email.mime.multipart import MIMEMultipart
|
4 |
+
import os
|
5 |
+
|
6 |
+
def send_email_notification(name, email, institution, role, reason):
|
7 |
+
sender_email = "[email protected]" # Configura esto con tu dirección de correo
|
8 |
+
receiver_email = "[email protected]"
|
9 |
+
password = os.environ.get("NOREPLY_EMAIL_PASSWORD") # Configura esto en tus variables de entorno
|
10 |
+
|
11 |
+
message = MIMEMultipart("alternative")
|
12 |
+
message["Subject"] = "Nueva solicitud de prueba de AIdeaText"
|
13 |
+
message["From"] = sender_email
|
14 |
+
message["To"] = receiver_email
|
15 |
+
|
16 |
+
text = f"""\
|
17 |
+
Nueva solicitud de prueba de AIdeaText:
|
18 |
+
Nombre: {name}
|
19 |
+
Email: {email}
|
20 |
+
Institución: {institution}
|
21 |
+
Rol: {role}
|
22 |
+
Razón: {reason}
|
23 |
+
"""
|
24 |
+
|
25 |
+
html = f"""\
|
26 |
+
<html>
|
27 |
+
<body>
|
28 |
+
<h2>Nueva solicitud de prueba de AIdeaText</h2>
|
29 |
+
<p><strong>Nombre:</strong> {name}</p>
|
30 |
+
<p><strong>Email:</strong> {email}</p>
|
31 |
+
<p><strong>Institución:</strong> {institution}</p>
|
32 |
+
<p><strong>Rol:</strong> {role}</p>
|
33 |
+
<p><strong>Razón:</strong> {reason}</p>
|
34 |
+
</body>
|
35 |
+
</html>
|
36 |
+
"""
|
37 |
+
|
38 |
+
part1 = MIMEText(text, "plain")
|
39 |
+
part2 = MIMEText(html, "html")
|
40 |
+
|
41 |
+
message.attach(part1)
|
42 |
+
message.attach(part2)
|
43 |
+
|
44 |
+
try:
|
45 |
+
with smtplib.SMTP_SSL("smtp.titan.email", 465) as server:
|
46 |
+
logger.info("Conectado al servidor SMTP")
|
47 |
+
server.login(sender_email, password)
|
48 |
+
logger.info("Inicio de sesión exitoso")
|
49 |
+
server.sendmail(sender_email, receiver_email, message.as_string())
|
50 |
+
logger.info(f"Correo enviado de {sender_email} a {receiver_email}")
|
51 |
+
logger.info(f"Email notification sent for application request: {email}")
|
52 |
+
return True
|
53 |
+
except Exception as e:
|
54 |
+
logger.error(f"Error sending email notification: {str(e)}")
|
55 |
+
return False
|
56 |
+
|
57 |
+
def send_user_feedback_notification(name, email, feedback):
|
58 |
+
sender_email = "[email protected]"
|
59 |
+
receiver_email = "[email protected]" # Cambia esto a la dirección que desees
|
60 |
+
password = os.environ.get("NOREPLY_EMAIL_PASSWORD")
|
61 |
+
|
62 |
+
message = MIMEMultipart("alternative")
|
63 |
+
message["Subject"] = "Nuevo comentario de usuario en AIdeaText"
|
64 |
+
message["From"] = sender_email
|
65 |
+
message["To"] = receiver_email
|
66 |
+
|
67 |
+
html = f"""\
|
68 |
+
<html>
|
69 |
+
<body>
|
70 |
+
<h2>Nuevo comentario de usuario en AIdeaText</h2>
|
71 |
+
<p><strong>Nombre:</strong> {name}</p>
|
72 |
+
<p><strong>Email:</strong> {email}</p>
|
73 |
+
<p><strong>Comentario:</strong> {feedback}</p>
|
74 |
+
</body>
|
75 |
+
</html>
|
76 |
+
"""
|
77 |
+
|
78 |
+
part = MIMEText(html, "html")
|
79 |
+
message.attach(part)
|
80 |
+
|
81 |
+
try:
|
82 |
+
with smtplib.SMTP_SSL("smtp.titan.email", 465) as server:
|
83 |
+
logger.info("Conectado al servidor SMTP")
|
84 |
+
server.login(sender_email, password)
|
85 |
+
logger.info("Inicio de sesión exitoso")
|
86 |
+
server.sendmail(sender_email, receiver_email, message.as_string())
|
87 |
+
logger.info(f"Correo enviado de {sender_email} a {receiver_email}")
|
88 |
+
logger.info(f"Email notification sent for user feedback from: {email}")
|
89 |
+
return True
|
90 |
+
except Exception as e:
|
91 |
+
logger.error(f"Error sending user feedback email notification: {str(e)}")
|
92 |
+
return False
|
modules/email/txt.txt
ADDED
File without changes
|
modules/morphosyntax/__init__.py
ADDED
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from .morphosyntax_interface import (
|
2 |
+
display_morphosyntax_interface,
|
3 |
+
display_arc_diagram
|
4 |
+
# display_morphosyntax_results
|
5 |
+
)
|
6 |
+
|
7 |
+
from .morphosyntax_process import (
|
8 |
+
process_morphosyntactic_input,
|
9 |
+
format_analysis_results,
|
10 |
+
perform_advanced_morphosyntactic_analysis,
|
11 |
+
get_repeated_words_colors,
|
12 |
+
highlight_repeated_words,
|
13 |
+
POS_COLORS,
|
14 |
+
POS_TRANSLATIONS
|
15 |
+
)
|
16 |
+
|
17 |
+
__all__ = [
|
18 |
+
'display_morphosyntax_interface',
|
19 |
+
'display_arc_diagram',
|
20 |
+
#'display_morphosyntax_results',
|
21 |
+
'process_morphosyntactic_input',
|
22 |
+
'format_analysis_results',
|
23 |
+
'perform_advanced_morphosyntactic_analysis',
|
24 |
+
'get_repeated_words_colors',
|
25 |
+
'highlight_repeated_words',
|
26 |
+
'POS_COLORS',
|
27 |
+
'POS_TRANSLATIONS'
|
28 |
+
]
|
29 |
+
|
modules/morphosyntax/__pycache__/__init__.cpython-311.pyc
ADDED
Binary file (154 Bytes). View file
|
|
modules/morphosyntax/__pycache__/morphosyntax_interface.cpython-311.pyc
ADDED
Binary file (4.95 kB). View file
|
|
modules/morphosyntax/__pycache__/morphosyntax_process.cpython-311.pyc
ADDED
Binary file (1.8 kB). View file
|
|
modules/morphosyntax/morphosyntax_interface-Back1910-25-9-24.py
ADDED
@@ -0,0 +1,171 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#modules/morphosyntax/morphosyntax_interface.py
|
2 |
+
import streamlit as st
|
3 |
+
from streamlit_float import *
|
4 |
+
from streamlit_antd_components import *
|
5 |
+
from streamlit.components.v1 import html
|
6 |
+
import base64
|
7 |
+
from .morphosyntax_process import process_morphosyntactic_input
|
8 |
+
from ..chatbot.chatbot import initialize_chatbot
|
9 |
+
from ..utils.widget_utils import generate_unique_key
|
10 |
+
from ..database.database_oldFromV2 import store_morphosyntax_result
|
11 |
+
|
12 |
+
import logging
|
13 |
+
logger = logging.getLogger(__name__)
|
14 |
+
|
15 |
+
|
16 |
+
####################### VERSION ANTERIOR A LAS 20:00 24-9-24
|
17 |
+
|
18 |
+
def display_morphosyntax_interface(lang_code, nlp_models, t):
|
19 |
+
# Estilo CSS personalizado
|
20 |
+
st.markdown("""
|
21 |
+
<style>
|
22 |
+
.morpho-initial-message {
|
23 |
+
background-color: #f0f2f6;
|
24 |
+
border-left: 5px solid #4CAF50;
|
25 |
+
padding: 10px;
|
26 |
+
border-radius: 5px;
|
27 |
+
font-size: 16px;
|
28 |
+
margin-bottom: 20px;
|
29 |
+
}
|
30 |
+
</style>
|
31 |
+
""", unsafe_allow_html=True)
|
32 |
+
|
33 |
+
# Mostrar el mensaje inicial como un párrafo estilizado
|
34 |
+
st.markdown(f"""
|
35 |
+
<div class="morpho-initial-message">
|
36 |
+
{t['morpho_initial_message']}
|
37 |
+
</div>
|
38 |
+
""", unsafe_allow_html=True)
|
39 |
+
|
40 |
+
# Inicializar el chatbot si no existe
|
41 |
+
if 'morphosyntax_chatbot' not in st.session_state:
|
42 |
+
st.session_state.morphosyntax_chatbot = initialize_chatbot('morphosyntactic')
|
43 |
+
|
44 |
+
# Crear un contenedor para el chat
|
45 |
+
chat_container = st.container()
|
46 |
+
|
47 |
+
# Mostrar el historial del chat
|
48 |
+
with chat_container:
|
49 |
+
if 'morphosyntax_chat_history' not in st.session_state:
|
50 |
+
st.session_state.morphosyntax_chat_history = []
|
51 |
+
for i, message in enumerate(st.session_state.morphosyntax_chat_history):
|
52 |
+
with st.chat_message(message["role"]):
|
53 |
+
st.write(message["content"])
|
54 |
+
if "visualizations" in message:
|
55 |
+
for viz in message["visualizations"]:
|
56 |
+
st.components.v1.html(
|
57 |
+
f"""
|
58 |
+
<div style="width: 100%; overflow-x: auto; white-space: nowrap;">
|
59 |
+
<div style="min-width: 1200px;">
|
60 |
+
{viz}
|
61 |
+
</div>
|
62 |
+
</div>
|
63 |
+
""",
|
64 |
+
height=370,
|
65 |
+
scrolling=True
|
66 |
+
)
|
67 |
+
|
68 |
+
|
69 |
+
# Input del usuario
|
70 |
+
user_input = st.chat_input(
|
71 |
+
t['morpho_input_label'],
|
72 |
+
key=generate_unique_key('morphosyntax', "chat_input")
|
73 |
+
)
|
74 |
+
|
75 |
+
if user_input:
|
76 |
+
# Añadir el mensaje del usuario al historial
|
77 |
+
st.session_state.morphosyntax_chat_history.append({"role": "user", "content": user_input})
|
78 |
+
|
79 |
+
# Mostrar indicador de carga
|
80 |
+
with st.spinner(t.get('processing', 'Processing...')):
|
81 |
+
try:
|
82 |
+
# Procesar el input del usuario
|
83 |
+
response, visualizations, result = process_morphosyntactic_input(user_input, lang_code, nlp_models, t)
|
84 |
+
|
85 |
+
# Añadir la respuesta al historial
|
86 |
+
message = {
|
87 |
+
"role": "assistant",
|
88 |
+
"content": response
|
89 |
+
}
|
90 |
+
if visualizations:
|
91 |
+
message["visualizations"] = visualizations
|
92 |
+
st.session_state.morphosyntax_chat_history.append(message)
|
93 |
+
|
94 |
+
# Mostrar la respuesta más reciente
|
95 |
+
with st.chat_message("assistant"):
|
96 |
+
st.write(response)
|
97 |
+
if visualizations:
|
98 |
+
for i, viz in enumerate(visualizations):
|
99 |
+
st.components.v1.html(
|
100 |
+
f"""
|
101 |
+
<div style="width: 100%; overflow-x: auto; white-space: nowrap;">
|
102 |
+
<div style="min-width: 1200px;">
|
103 |
+
{viz}
|
104 |
+
</div>
|
105 |
+
</div>
|
106 |
+
""",
|
107 |
+
height=350,
|
108 |
+
scrolling=True
|
109 |
+
)
|
110 |
+
|
111 |
+
# Si es un análisis, guardarlo en la base de datos
|
112 |
+
if user_input.startswith('/analisis_morfosintactico') and result:
|
113 |
+
store_morphosyntax_result(
|
114 |
+
st.session_state.username,
|
115 |
+
user_input.split('[', 1)[1].rsplit(']', 1)[0], # texto analizado
|
116 |
+
result.get('repeated_words', {}),
|
117 |
+
visualizations,
|
118 |
+
result.get('pos_analysis', []),
|
119 |
+
result.get('morphological_analysis', []),
|
120 |
+
result.get('sentence_structure', [])
|
121 |
+
)
|
122 |
+
|
123 |
+
except Exception as e:
|
124 |
+
st.error(f"{t['error_processing']}: {str(e)}")
|
125 |
+
|
126 |
+
# Si es un análisis, guardarlo en la base de datos
|
127 |
+
if user_input.startswith('/analisis_morfosintactico') and result:
|
128 |
+
store_morphosyntax_result(
|
129 |
+
st.session_state.username,
|
130 |
+
user_input.split('[', 1)[1].rsplit(']', 1)[0], # texto analizado
|
131 |
+
result['repeated_words'],
|
132 |
+
visualizations, # Ahora pasamos todas las visualizaciones
|
133 |
+
result['pos_analysis'],
|
134 |
+
result['morphological_analysis'],
|
135 |
+
result['sentence_structure']
|
136 |
+
)
|
137 |
+
|
138 |
+
# Forzar la actualización de la interfaz
|
139 |
+
st.rerun()
|
140 |
+
|
141 |
+
# Botón para limpiar el historial del chat
|
142 |
+
if st.button(t['clear_chat'], key=generate_unique_key('morphosyntax', 'clear_chat')):
|
143 |
+
st.session_state.morphosyntax_chat_history = []
|
144 |
+
st.rerun()
|
145 |
+
|
146 |
+
|
147 |
+
|
148 |
+
'''
|
149 |
+
############ MODULO PARA DEPURACIÓN Y PRUEBAS #####################################################
|
150 |
+
def display_morphosyntax_interface(lang_code, nlp_models, t):
|
151 |
+
st.subheader(t['morpho_title'])
|
152 |
+
|
153 |
+
text_input = st.text_area(
|
154 |
+
t['warning_message'],
|
155 |
+
height=150,
|
156 |
+
key=generate_unique_key("morphosyntax", "text_area")
|
157 |
+
)
|
158 |
+
|
159 |
+
if st.button(
|
160 |
+
t['results_title'],
|
161 |
+
key=generate_unique_key("morphosyntax", "analyze_button")
|
162 |
+
):
|
163 |
+
if text_input:
|
164 |
+
# Aquí iría tu lógica de análisis morfosintáctico
|
165 |
+
# Por ahora, solo mostraremos un mensaje de placeholder
|
166 |
+
st.info(t['analysis_placeholder'])
|
167 |
+
else:
|
168 |
+
st.warning(t['no_text_warning'])
|
169 |
+
###
|
170 |
+
#################################################
|
171 |
+
'''
|
modules/morphosyntax/morphosyntax_interface-BackUp_Dec24_OK.py
ADDED
@@ -0,0 +1,322 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#modules/morphosyntax/morphosyntax_interface.py
|
2 |
+
import streamlit as st
|
3 |
+
from streamlit_float import *
|
4 |
+
from streamlit_antd_components import *
|
5 |
+
from streamlit.components.v1 import html
|
6 |
+
import spacy
|
7 |
+
from spacy import displacy
|
8 |
+
import spacy_streamlit
|
9 |
+
import pandas as pd
|
10 |
+
import base64
|
11 |
+
import re
|
12 |
+
|
13 |
+
# Importar desde morphosyntax_process.py
|
14 |
+
from .morphosyntax_process import (
|
15 |
+
process_morphosyntactic_input,
|
16 |
+
format_analysis_results,
|
17 |
+
perform_advanced_morphosyntactic_analysis, # Añadir esta importación
|
18 |
+
get_repeated_words_colors, # Y estas también
|
19 |
+
highlight_repeated_words,
|
20 |
+
POS_COLORS,
|
21 |
+
POS_TRANSLATIONS
|
22 |
+
)
|
23 |
+
|
24 |
+
from ..utils.widget_utils import generate_unique_key
|
25 |
+
|
26 |
+
from ..database.morphosintax_mongo_db import store_student_morphosyntax_result
|
27 |
+
from ..database.chat_mongo_db import store_chat_history, get_chat_history
|
28 |
+
|
29 |
+
# from ..database.morphosintaxis_export import export_user_interactions
|
30 |
+
|
31 |
+
import logging
|
32 |
+
logger = logging.getLogger(__name__)
|
33 |
+
|
34 |
+
############################################################################################################
|
35 |
+
def display_morphosyntax_interface(lang_code, nlp_models, morpho_t):
|
36 |
+
try:
|
37 |
+
# 1. Inicializar el estado morfosintáctico si no existe
|
38 |
+
if 'morphosyntax_state' not in st.session_state:
|
39 |
+
st.session_state.morphosyntax_state = {
|
40 |
+
'input_text': "",
|
41 |
+
'analysis_count': 0,
|
42 |
+
'last_analysis': None
|
43 |
+
}
|
44 |
+
|
45 |
+
# 2. Campo de entrada de texto con key única basada en el contador
|
46 |
+
input_key = f"morpho_input_{st.session_state.morphosyntax_state['analysis_count']}"
|
47 |
+
|
48 |
+
sentence_input = st.text_area(
|
49 |
+
morpho_t.get('morpho_input_label', 'Enter text to analyze'),
|
50 |
+
height=150,
|
51 |
+
placeholder=morpho_t.get('morpho_input_placeholder', 'Enter your text here...'),
|
52 |
+
key=input_key
|
53 |
+
)
|
54 |
+
|
55 |
+
# 3. Actualizar el estado con el texto actual
|
56 |
+
st.session_state.morphosyntax_state['input_text'] = sentence_input
|
57 |
+
|
58 |
+
# 4. Crear columnas para el botón
|
59 |
+
col1, col2, col3 = st.columns([2,1,2])
|
60 |
+
|
61 |
+
# 5. Botón de análisis en la columna central
|
62 |
+
with col1:
|
63 |
+
analyze_button = st.button(
|
64 |
+
morpho_t.get('morpho_analyze_button', 'Analyze Morphosyntax'),
|
65 |
+
key=f"morpho_button_{st.session_state.morphosyntax_state['analysis_count']}",
|
66 |
+
type="primary", # Nuevo en Streamlit 1.39.0
|
67 |
+
icon="🔍", # Nuevo en Streamlit 1.39.0
|
68 |
+
disabled=not bool(sentence_input.strip()), # Se activa solo cuando hay texto
|
69 |
+
use_container_width=True
|
70 |
+
)
|
71 |
+
|
72 |
+
# 6. Lógica de análisis
|
73 |
+
if analyze_button and sentence_input.strip(): # Verificar que haya texto y no solo espacios
|
74 |
+
try:
|
75 |
+
with st.spinner(morpho_t.get('processing', 'Processing...')):
|
76 |
+
# Obtener el modelo específico del idioma y procesar el texto
|
77 |
+
doc = nlp_models[lang_code](sentence_input)
|
78 |
+
|
79 |
+
# Realizar análisis morfosintáctico con el mismo modelo
|
80 |
+
advanced_analysis = perform_advanced_morphosyntactic_analysis(
|
81 |
+
sentence_input,
|
82 |
+
nlp_models[lang_code]
|
83 |
+
)
|
84 |
+
|
85 |
+
# Guardar resultado en el estado de la sesión
|
86 |
+
st.session_state.morphosyntax_result = {
|
87 |
+
'doc': doc,
|
88 |
+
'advanced_analysis': advanced_analysis
|
89 |
+
}
|
90 |
+
|
91 |
+
# Incrementar el contador de análisis
|
92 |
+
st.session_state.morphosyntax_state['analysis_count'] += 1
|
93 |
+
|
94 |
+
# Guardar el análisis en la base de datos
|
95 |
+
if store_student_morphosyntax_result(
|
96 |
+
username=st.session_state.username,
|
97 |
+
text=sentence_input,
|
98 |
+
arc_diagrams=advanced_analysis['arc_diagrams']
|
99 |
+
):
|
100 |
+
st.success(morpho_t.get('success_message', 'Analysis saved successfully'))
|
101 |
+
|
102 |
+
# Mostrar resultados
|
103 |
+
display_morphosyntax_results(
|
104 |
+
st.session_state.morphosyntax_result,
|
105 |
+
lang_code,
|
106 |
+
morpho_t
|
107 |
+
)
|
108 |
+
else:
|
109 |
+
st.error(morpho_t.get('error_message', 'Error saving analysis'))
|
110 |
+
|
111 |
+
except Exception as e:
|
112 |
+
logger.error(f"Error en análisis morfosintáctico: {str(e)}")
|
113 |
+
st.error(morpho_t.get('error_processing', f'Error processing text: {str(e)}'))
|
114 |
+
|
115 |
+
# 7. Mostrar resultados previos si existen
|
116 |
+
elif 'morphosyntax_result' in st.session_state and st.session_state.morphosyntax_result is not None:
|
117 |
+
display_morphosyntax_results(
|
118 |
+
st.session_state.morphosyntax_result,
|
119 |
+
lang_code,
|
120 |
+
morpho_t
|
121 |
+
)
|
122 |
+
elif not sentence_input.strip():
|
123 |
+
st.info(morpho_t.get('morpho_initial_message', 'Enter text to begin analysis'))
|
124 |
+
|
125 |
+
except Exception as e:
|
126 |
+
logger.error(f"Error general en display_morphosyntax_interface: {str(e)}")
|
127 |
+
st.error("Se produjo un error. Por favor, intente de nuevo.")
|
128 |
+
st.error(f"Detalles del error: {str(e)}") # Añadido para mejor debugging
|
129 |
+
|
130 |
+
############################################################################################################
|
131 |
+
def display_morphosyntax_results(result, lang_code, morpho_t):
|
132 |
+
"""
|
133 |
+
Muestra los resultados del análisis morfosintáctico.
|
134 |
+
Args:
|
135 |
+
result: Resultado del análisis
|
136 |
+
lang_code: Código del idioma
|
137 |
+
t: Diccionario de traducciones
|
138 |
+
"""
|
139 |
+
# Obtener el diccionario de traducciones morfosintácticas
|
140 |
+
# morpho_t = t.get('MORPHOSYNTACTIC', {})
|
141 |
+
|
142 |
+
if result is None:
|
143 |
+
st.warning(morpho_t.get('no_results', 'No results available'))
|
144 |
+
return
|
145 |
+
|
146 |
+
doc = result['doc']
|
147 |
+
advanced_analysis = result['advanced_analysis']
|
148 |
+
|
149 |
+
# Mostrar leyenda
|
150 |
+
st.markdown(f"##### {morpho_t.get('legend', 'Legend: Grammatical categories')}")
|
151 |
+
legend_html = "<div style='display: flex; flex-wrap: wrap;'>"
|
152 |
+
for pos, color in POS_COLORS.items():
|
153 |
+
if pos in POS_TRANSLATIONS[lang_code]:
|
154 |
+
legend_html += f"<div style='margin-right: 10px;'><span style='background-color: {color}; padding: 2px 5px;'>{POS_TRANSLATIONS[lang_code][pos]}</span></div>"
|
155 |
+
legend_html += "</div>"
|
156 |
+
st.markdown(legend_html, unsafe_allow_html=True)
|
157 |
+
|
158 |
+
# Mostrar análisis de palabras repetidas
|
159 |
+
word_colors = get_repeated_words_colors(doc)
|
160 |
+
with st.expander(morpho_t.get('repeated_words', 'Repeated words'), expanded=True):
|
161 |
+
highlighted_text = highlight_repeated_words(doc, word_colors)
|
162 |
+
st.markdown(highlighted_text, unsafe_allow_html=True)
|
163 |
+
|
164 |
+
# Mostrar estructura de oraciones
|
165 |
+
with st.expander(morpho_t.get('sentence_structure', 'Sentence structure'), expanded=True):
|
166 |
+
for i, sent_analysis in enumerate(advanced_analysis['sentence_structure']):
|
167 |
+
sentence_str = (
|
168 |
+
f"**{morpho_t.get('sentence', 'Sentence')} {i+1}** " # Aquí está el cambio
|
169 |
+
f"{morpho_t.get('root', 'Root')}: {sent_analysis['root']} ({sent_analysis['root_pos']}) -- " # Y aquí
|
170 |
+
f"{morpho_t.get('subjects', 'Subjects')}: {', '.join(sent_analysis['subjects'])} -- " # Y aquí
|
171 |
+
f"{morpho_t.get('objects', 'Objects')}: {', '.join(sent_analysis['objects'])} -- " # Y aquí
|
172 |
+
f"{morpho_t.get('verbs', 'Verbs')}: {', '.join(sent_analysis['verbs'])}" # Y aquí
|
173 |
+
)
|
174 |
+
st.markdown(sentence_str)
|
175 |
+
|
176 |
+
# Mostrar análisis de categorías gramaticales # Mostrar análisis morfológico
|
177 |
+
col1, col2 = st.columns(2)
|
178 |
+
|
179 |
+
with col1:
|
180 |
+
with st.expander(morpho_t.get('pos_analysis', 'Part of speech'), expanded=True):
|
181 |
+
pos_df = pd.DataFrame(advanced_analysis['pos_analysis'])
|
182 |
+
|
183 |
+
# Traducir las etiquetas POS a sus nombres en el idioma seleccionado
|
184 |
+
pos_df['pos'] = pos_df['pos'].map(lambda x: POS_TRANSLATIONS[lang_code].get(x, x))
|
185 |
+
|
186 |
+
# Renombrar las columnas para mayor claridad
|
187 |
+
pos_df = pos_df.rename(columns={
|
188 |
+
'pos': morpho_t.get('grammatical_category', 'Grammatical category'),
|
189 |
+
'count': morpho_t.get('count', 'Count'),
|
190 |
+
'percentage': morpho_t.get('percentage', 'Percentage'),
|
191 |
+
'examples': morpho_t.get('examples', 'Examples')
|
192 |
+
})
|
193 |
+
|
194 |
+
# Mostrar el dataframe
|
195 |
+
st.dataframe(pos_df)
|
196 |
+
|
197 |
+
with col2:
|
198 |
+
with st.expander(morpho_t.get('morphological_analysis', 'Morphological Analysis'), expanded=True):
|
199 |
+
# 1. Crear el DataFrame inicial
|
200 |
+
morph_df = pd.DataFrame(advanced_analysis['morphological_analysis'])
|
201 |
+
|
202 |
+
# 2. Primero renombrar las columnas usando las traducciones de la interfaz
|
203 |
+
column_mapping = {
|
204 |
+
'text': morpho_t.get('word', 'Word'),
|
205 |
+
'lemma': morpho_t.get('lemma', 'Lemma'),
|
206 |
+
'pos': morpho_t.get('grammatical_category', 'Grammatical category'),
|
207 |
+
'dep': morpho_t.get('dependency', 'Dependency'),
|
208 |
+
'morph': morpho_t.get('morphology', 'Morphology')
|
209 |
+
}
|
210 |
+
|
211 |
+
# 3. Aplicar el renombrado
|
212 |
+
morph_df = morph_df.rename(columns=column_mapping)
|
213 |
+
|
214 |
+
# 4. Traducir las categorías gramaticales usando POS_TRANSLATIONS global
|
215 |
+
grammatical_category = morpho_t.get('grammatical_category', 'Grammatical category')
|
216 |
+
morph_df[grammatical_category] = morph_df[grammatical_category].map(lambda x: POS_TRANSLATIONS[lang_code].get(x, x))
|
217 |
+
|
218 |
+
# 2.2 Traducir dependencias usando traducciones específicas
|
219 |
+
dep_translations = {
|
220 |
+
|
221 |
+
'es': {
|
222 |
+
'ROOT': 'RAÍZ', 'nsubj': 'sujeto nominal', 'obj': 'objeto', 'iobj': 'objeto indirecto',
|
223 |
+
'csubj': 'sujeto clausal', 'ccomp': 'complemento clausal', 'xcomp': 'complemento clausal abierto',
|
224 |
+
'obl': 'oblicuo', 'vocative': 'vocativo', 'expl': 'expletivo', 'dislocated': 'dislocado',
|
225 |
+
'advcl': 'cláusula adverbial', 'advmod': 'modificador adverbial', 'discourse': 'discurso',
|
226 |
+
'aux': 'auxiliar', 'cop': 'cópula', 'mark': 'marcador', 'nmod': 'modificador nominal',
|
227 |
+
'appos': 'aposición', 'nummod': 'modificador numeral', 'acl': 'cláusula adjetiva',
|
228 |
+
'amod': 'modificador adjetival', 'det': 'determinante', 'clf': 'clasificador',
|
229 |
+
'case': 'caso', 'conj': 'conjunción', 'cc': 'coordinante', 'fixed': 'fijo',
|
230 |
+
'flat': 'plano', 'compound': 'compuesto', 'list': 'lista', 'parataxis': 'parataxis',
|
231 |
+
'orphan': 'huérfano', 'goeswith': 'va con', 'reparandum': 'reparación', 'punct': 'puntuación'
|
232 |
+
},
|
233 |
+
|
234 |
+
'en': {
|
235 |
+
'ROOT': 'ROOT', 'nsubj': 'nominal subject', 'obj': 'object',
|
236 |
+
'iobj': 'indirect object', 'csubj': 'clausal subject', 'ccomp': 'clausal complement', 'xcomp': 'open clausal complement',
|
237 |
+
'obl': 'oblique', 'vocative': 'vocative', 'expl': 'expletive', 'dislocated': 'dislocated', 'advcl': 'adverbial clause modifier',
|
238 |
+
'advmod': 'adverbial modifier', 'discourse': 'discourse element', 'aux': 'auxiliary', 'cop': 'copula', 'mark': 'marker',
|
239 |
+
'nmod': 'nominal modifier', 'appos': 'appositional modifier', 'nummod': 'numeric modifier', 'acl': 'clausal modifier of noun',
|
240 |
+
'amod': 'adjectival modifier', 'det': 'determiner', 'clf': 'classifier', 'case': 'case marking',
|
241 |
+
'conj': 'conjunct', 'cc': 'coordinating conjunction', 'fixed': 'fixed multiword expression',
|
242 |
+
'flat': 'flat multiword expression', 'compound': 'compound', 'list': 'list', 'parataxis': 'parataxis', 'orphan': 'orphan',
|
243 |
+
'goeswith': 'goes with', 'reparandum': 'reparandum', 'punct': 'punctuation'
|
244 |
+
},
|
245 |
+
|
246 |
+
'fr': {
|
247 |
+
'ROOT': 'RACINE', 'nsubj': 'sujet nominal', 'obj': 'objet', 'iobj': 'objet indirect',
|
248 |
+
'csubj': 'sujet phrastique', 'ccomp': 'complément phrastique', 'xcomp': 'complément phrastique ouvert', 'obl': 'oblique',
|
249 |
+
'vocative': 'vocatif', 'expl': 'explétif', 'dislocated': 'disloqué', 'advcl': 'clause adverbiale', 'advmod': 'modifieur adverbial',
|
250 |
+
'discourse': 'élément de discours', 'aux': 'auxiliaire', 'cop': 'copule', 'mark': 'marqueur', 'nmod': 'modifieur nominal',
|
251 |
+
'appos': 'apposition', 'nummod': 'modifieur numéral', 'acl': 'clause relative', 'amod': 'modifieur adjectival', 'det': 'déterminant',
|
252 |
+
'clf': 'classificateur', 'case': 'marqueur de cas', 'conj': 'conjonction', 'cc': 'coordination', 'fixed': 'expression figée',
|
253 |
+
'flat': 'construction plate', 'compound': 'composé', 'list': 'liste', 'parataxis': 'parataxe', 'orphan': 'orphelin',
|
254 |
+
'goeswith': 'va avec', 'reparandum': 'réparation', 'punct': 'ponctuation'
|
255 |
+
}
|
256 |
+
}
|
257 |
+
|
258 |
+
dependency = morpho_t.get('dependency', 'Dependency')
|
259 |
+
morph_df[dependency] = morph_df[dependency].map(lambda x: dep_translations[lang_code].get(x, x))
|
260 |
+
|
261 |
+
morph_translations = {
|
262 |
+
'es': {
|
263 |
+
'Gender': 'Género', 'Number': 'Número', 'Case': 'Caso', 'Definite': 'Definido',
|
264 |
+
'PronType': 'Tipo de Pronombre', 'Person': 'Persona', 'Mood': 'Modo',
|
265 |
+
'Tense': 'Tiempo', 'VerbForm': 'Forma Verbal', 'Voice': 'Voz',
|
266 |
+
'Fem': 'Femenino', 'Masc': 'Masculino', 'Sing': 'Singular', 'Plur': 'Plural',
|
267 |
+
'Ind': 'Indicativo', 'Sub': 'Subjuntivo', 'Imp': 'Imperativo', 'Inf': 'Infinitivo',
|
268 |
+
'Part': 'Participio', 'Ger': 'Gerundio', 'Pres': 'Presente', 'Past': 'Pasado',
|
269 |
+
'Fut': 'Futuro', 'Perf': 'Perfecto', 'Imp': 'Imperfecto'
|
270 |
+
},
|
271 |
+
|
272 |
+
'en': {
|
273 |
+
'Gender': 'Gender', 'Number': 'Number', 'Case': 'Case', 'Definite': 'Definite', 'PronType': 'Pronoun Type', 'Person': 'Person',
|
274 |
+
'Mood': 'Mood', 'Tense': 'Tense', 'VerbForm': 'Verb Form', 'Voice': 'Voice',
|
275 |
+
'Fem': 'Feminine', 'Masc': 'Masculine', 'Sing': 'Singular', 'Plur': 'Plural', 'Ind': 'Indicative',
|
276 |
+
'Sub': 'Subjunctive', 'Imp': 'Imperative', 'Inf': 'Infinitive', 'Part': 'Participle',
|
277 |
+
'Ger': 'Gerund', 'Pres': 'Present', 'Past': 'Past', 'Fut': 'Future', 'Perf': 'Perfect', 'Imp': 'Imperfect'
|
278 |
+
},
|
279 |
+
|
280 |
+
'fr': {
|
281 |
+
'Gender': 'Genre', 'Number': 'Nombre', 'Case': 'Cas', 'Definite': 'Défini', 'PronType': 'Type de Pronom',
|
282 |
+
'Person': 'Personne', 'Mood': 'Mode', 'Tense': 'Temps', 'VerbForm': 'Forme Verbale', 'Voice': 'Voix',
|
283 |
+
'Fem': 'Féminin', 'Masc': 'Masculin', 'Sing': 'Singulier', 'Plur': 'Pluriel', 'Ind': 'Indicatif',
|
284 |
+
'Sub': 'Subjonctif', 'Imp': 'Impératif', 'Inf': 'Infinitif', 'Part': 'Participe',
|
285 |
+
'Ger': 'Gérondif', 'Pres': 'Présent', 'Past': 'Passé', 'Fut': 'Futur', 'Perf': 'Parfait', 'Imp': 'Imparfait'
|
286 |
+
}
|
287 |
+
}
|
288 |
+
|
289 |
+
def translate_morph(morph_string, lang_code):
|
290 |
+
for key, value in morph_translations[lang_code].items():
|
291 |
+
morph_string = morph_string.replace(key, value)
|
292 |
+
return morph_string
|
293 |
+
|
294 |
+
morphology = morpho_t.get('morphology', 'Morphology')
|
295 |
+
morph_df[morphology] = morph_df[morphology].apply(lambda x: translate_morph(x, lang_code))
|
296 |
+
|
297 |
+
st.dataframe(morph_df)
|
298 |
+
|
299 |
+
# Mostrar diagramas de arco
|
300 |
+
with st.expander(morpho_t.get('arc_diagram', 'Syntactic analysis: Arc diagram'), expanded=True):
|
301 |
+
sentences = list(doc.sents)
|
302 |
+
arc_diagrams = []
|
303 |
+
|
304 |
+
for i, sent in enumerate(sentences):
|
305 |
+
st.subheader(f"{morpho_t.get('sentence', 'Sentence')} {i+1}")
|
306 |
+
html = displacy.render(sent, style="dep", options={"distance": 100})
|
307 |
+
html = html.replace('height="375"', 'height="200"')
|
308 |
+
html = re.sub(r'<svg[^>]*>', lambda m: m.group(0).replace('height="450"', 'height="300"'), html)
|
309 |
+
html = re.sub(r'<g [^>]*transform="translate\((\d+),(\d+)\)"',
|
310 |
+
lambda m: f'<g transform="translate({m.group(1)},50)"', html)
|
311 |
+
st.write(html, unsafe_allow_html=True)
|
312 |
+
arc_diagrams.append(html)
|
313 |
+
|
314 |
+
# Botón de exportación
|
315 |
+
# if st.button(morpho_t.get('export_button', 'Export Analysis')):
|
316 |
+
# pdf_buffer = export_user_interactions(st.session_state.username, 'morphosyntax')
|
317 |
+
# st.download_button(
|
318 |
+
# label=morpho_t.get('download_pdf', 'Download PDF'),
|
319 |
+
# data=pdf_buffer,
|
320 |
+
# file_name="morphosyntax_analysis.pdf",
|
321 |
+
# mime="application/pdf"
|
322 |
+
# )
|
modules/morphosyntax/morphosyntax_interface.py
ADDED
@@ -0,0 +1,228 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# modules/morphosyntax/morphosyntax_interface.py
|
2 |
+
|
3 |
+
import streamlit as st
|
4 |
+
import re
|
5 |
+
import logging
|
6 |
+
from spacy import displacy
|
7 |
+
|
8 |
+
# Funciones de análisis y DB que ya tienes en tus módulos
|
9 |
+
from ..morphosyntax.morphosyntax_process import perform_advanced_morphosyntactic_analysis
|
10 |
+
from ..database.morphosyntax_iterative_mongo_db import (
|
11 |
+
store_student_morphosyntax_base,
|
12 |
+
store_student_morphosyntax_iteration,
|
13 |
+
)
|
14 |
+
|
15 |
+
logger = logging.getLogger(__name__)
|
16 |
+
|
17 |
+
###########################################################################
|
18 |
+
def initialize_arc_analysis_state():
|
19 |
+
"""Inicializa el estado de análisis de arcos (base e iteraciones) si no existe."""
|
20 |
+
if "arc_analysis_state" not in st.session_state:
|
21 |
+
st.session_state.arc_analysis_state = {
|
22 |
+
"base_id": None,
|
23 |
+
"base_text": "",
|
24 |
+
"base_diagram": None,
|
25 |
+
"iteration_text": "",
|
26 |
+
"iteration_diagram": None,
|
27 |
+
}
|
28 |
+
logger.info("Estado de análisis de arcos inicializado.")
|
29 |
+
|
30 |
+
###########################################################################
|
31 |
+
def reset_arc_analysis_state():
|
32 |
+
"""Resetea completamente el estado de análisis de arcos."""
|
33 |
+
st.session_state.arc_analysis_state = {
|
34 |
+
"base_id": None,
|
35 |
+
"base_text": "",
|
36 |
+
"base_diagram": None,
|
37 |
+
"iteration_text": "",
|
38 |
+
"iteration_diagram": None,
|
39 |
+
}
|
40 |
+
logger.info("Estado de arcos reseteado.")
|
41 |
+
|
42 |
+
###########################################################################
|
43 |
+
def display_arc_diagram(doc):
|
44 |
+
"""
|
45 |
+
Genera y retorna el HTML del diagrama de arco para un Doc de spaCy.
|
46 |
+
No imprime directamente; retorna el HTML para usar con st.write(...).
|
47 |
+
"""
|
48 |
+
try:
|
49 |
+
diagram_html = ""
|
50 |
+
for sent in doc.sents:
|
51 |
+
svg_html = displacy.render(
|
52 |
+
sent,
|
53 |
+
style="dep",
|
54 |
+
options={
|
55 |
+
"distance": 100,
|
56 |
+
"arrow_spacing": 20,
|
57 |
+
"word_spacing": 30
|
58 |
+
}
|
59 |
+
)
|
60 |
+
# Ajustar tamaños
|
61 |
+
svg_html = svg_html.replace('height="375"', 'height="200"')
|
62 |
+
svg_html = re.sub(
|
63 |
+
r'<svg[^>]*>',
|
64 |
+
lambda m: m.group(0).replace('height="450"', 'height="300"'),
|
65 |
+
svg_html
|
66 |
+
)
|
67 |
+
svg_html = re.sub(
|
68 |
+
r'<g [^>]*transform="translate\((\d+),(\d+)\)"',
|
69 |
+
lambda m: f'<g transform="translate({m.group(1)},50)"',
|
70 |
+
svg_html
|
71 |
+
)
|
72 |
+
# Envolver en contenedor
|
73 |
+
diagram_html += f'<div class="arc-diagram-container">{svg_html}</div>'
|
74 |
+
return diagram_html
|
75 |
+
|
76 |
+
except Exception as e:
|
77 |
+
logger.error(f"Error en display_arc_diagram: {str(e)}")
|
78 |
+
return "<p style='color:red;'>Error generando diagrama</p>"
|
79 |
+
|
80 |
+
###########################################################################
|
81 |
+
def display_morphosyntax_interface(lang_code, nlp_models, morpho_t):
|
82 |
+
"""
|
83 |
+
Interfaz principal para la visualización de diagramas de arco
|
84 |
+
(Texto Base vs Iteraciones).
|
85 |
+
"""
|
86 |
+
# CSS para layout vertical y estable
|
87 |
+
st.markdown("""
|
88 |
+
<style>
|
89 |
+
.stTextArea textarea {
|
90 |
+
font-size: 1rem;
|
91 |
+
line-height: 1.5;
|
92 |
+
min-height: 100px !important;
|
93 |
+
height: 100px !important;
|
94 |
+
}
|
95 |
+
.arc-diagram-container {
|
96 |
+
width: 100%;
|
97 |
+
padding: 0.5rem;
|
98 |
+
margin: 0.5rem 0;
|
99 |
+
}
|
100 |
+
.divider {
|
101 |
+
height: 3px;
|
102 |
+
border: none;
|
103 |
+
background-color: #333;
|
104 |
+
margin: 2rem 0;
|
105 |
+
}
|
106 |
+
</style>
|
107 |
+
""", unsafe_allow_html=True)
|
108 |
+
|
109 |
+
# 1) Inicializar estados
|
110 |
+
initialize_arc_analysis_state()
|
111 |
+
arc_state = st.session_state.arc_analysis_state
|
112 |
+
|
113 |
+
# 2) Creamos pestañas: "Texto Base" y "Iteraciones"
|
114 |
+
tabs = st.tabs(["Texto Base", "Iteraciones"])
|
115 |
+
|
116 |
+
# =================== PESTAÑA 1: Texto Base ==========================
|
117 |
+
with tabs[0]:
|
118 |
+
st.subheader("Análisis de Texto Base")
|
119 |
+
|
120 |
+
# Botón para iniciar nuevo análisis
|
121 |
+
if st.button("Nuevo Análisis", key="btn_reset_base"):
|
122 |
+
# Si requieres recargar la app por completo, podrías descomentar:
|
123 |
+
# st.experimental_rerun()
|
124 |
+
reset_arc_analysis_state()
|
125 |
+
|
126 |
+
# Textarea de texto base
|
127 |
+
arc_state["base_text"] = st.text_area(
|
128 |
+
"Ingrese su texto inicial",
|
129 |
+
value=arc_state["base_text"],
|
130 |
+
key="base_text_input",
|
131 |
+
height=150
|
132 |
+
)
|
133 |
+
|
134 |
+
# Botón para analizar texto base
|
135 |
+
if st.button("Analizar Texto Base", key="btn_analyze_base"):
|
136 |
+
if not arc_state["base_text"].strip():
|
137 |
+
st.warning("Ingrese un texto para analizar.")
|
138 |
+
else:
|
139 |
+
try:
|
140 |
+
# Procesar con spaCy
|
141 |
+
doc = nlp_models[lang_code](arc_state["base_text"])
|
142 |
+
base_arc_html = display_arc_diagram(doc)
|
143 |
+
arc_state["base_diagram"] = base_arc_html
|
144 |
+
|
145 |
+
# Guardar en Mongo
|
146 |
+
analysis = perform_advanced_morphosyntactic_analysis(
|
147 |
+
arc_state["base_text"],
|
148 |
+
nlp_models[lang_code]
|
149 |
+
)
|
150 |
+
base_id = store_student_morphosyntax_base(
|
151 |
+
username=st.session_state.username,
|
152 |
+
text=arc_state["base_text"],
|
153 |
+
arc_diagrams=analysis["arc_diagrams"]
|
154 |
+
)
|
155 |
+
if base_id:
|
156 |
+
arc_state["base_id"] = base_id
|
157 |
+
st.success(f"Análisis base guardado. ID: {base_id}")
|
158 |
+
|
159 |
+
except Exception as exc:
|
160 |
+
st.error("Error procesando texto base")
|
161 |
+
logger.error(f"Error en análisis base: {str(exc)}")
|
162 |
+
|
163 |
+
# Mostrar diagrama base
|
164 |
+
if arc_state["base_diagram"]:
|
165 |
+
st.markdown("<hr class='divider'>", unsafe_allow_html=True)
|
166 |
+
st.markdown("#### Diagrama de Arco (Texto Base)")
|
167 |
+
st.write(arc_state["base_diagram"], unsafe_allow_html=True)
|
168 |
+
|
169 |
+
# ================== PESTAÑA 2: Iteraciones ==========================
|
170 |
+
with tabs[1]:
|
171 |
+
st.subheader("Análisis de Cambios / Iteraciones")
|
172 |
+
|
173 |
+
# Verificar que exista un texto base
|
174 |
+
if not arc_state["base_id"]:
|
175 |
+
st.info("Primero analiza un texto base en la pestaña anterior.")
|
176 |
+
return
|
177 |
+
|
178 |
+
# --- 1) Mostrar SIEMPRE el diagrama base arriba ---
|
179 |
+
st.markdown("#### Diagrama de Arco (Texto Base)")
|
180 |
+
if arc_state["base_diagram"]:
|
181 |
+
st.write(arc_state["base_diagram"], unsafe_allow_html=True)
|
182 |
+
else:
|
183 |
+
st.info("No hay diagrama base disponible.")
|
184 |
+
|
185 |
+
# --- 2) Caja de texto para la iteración ---
|
186 |
+
st.markdown("<hr class='divider'>", unsafe_allow_html=True)
|
187 |
+
st.subheader("Texto de Iteración")
|
188 |
+
arc_state["iteration_text"] = st.text_area(
|
189 |
+
"Ingrese su nueva versión / iteración",
|
190 |
+
value=arc_state["iteration_text"],
|
191 |
+
height=150
|
192 |
+
)
|
193 |
+
|
194 |
+
# Botón para analizar iteración
|
195 |
+
if st.button("Analizar Cambios", key="btn_analyze_iteration"):
|
196 |
+
if not arc_state["iteration_text"].strip():
|
197 |
+
st.warning("Ingrese texto de iteración.")
|
198 |
+
else:
|
199 |
+
try:
|
200 |
+
# Procesar con spaCy
|
201 |
+
doc_iter = nlp_models[lang_code](arc_state["iteration_text"])
|
202 |
+
arc_html_iter = display_arc_diagram(doc_iter)
|
203 |
+
arc_state["iteration_diagram"] = arc_html_iter
|
204 |
+
|
205 |
+
# Guardar en Mongo
|
206 |
+
analysis_iter = perform_advanced_morphosyntactic_analysis(
|
207 |
+
arc_state["iteration_text"],
|
208 |
+
nlp_models[lang_code]
|
209 |
+
)
|
210 |
+
iteration_id = store_student_morphosyntax_iteration(
|
211 |
+
username=st.session_state.username,
|
212 |
+
base_id=arc_state["base_id"],
|
213 |
+
original_text=arc_state["base_text"],
|
214 |
+
iteration_text=arc_state["iteration_text"],
|
215 |
+
arc_diagrams=analysis_iter["arc_diagrams"]
|
216 |
+
)
|
217 |
+
if iteration_id:
|
218 |
+
st.success(f"Iteración guardada. ID: {iteration_id}")
|
219 |
+
|
220 |
+
except Exception as exc:
|
221 |
+
st.error("Error procesando iteración")
|
222 |
+
logger.error(f"Error en iteración: {str(exc)}")
|
223 |
+
|
224 |
+
# --- 3) Mostrar diagrama de iteración debajo ---
|
225 |
+
if arc_state["iteration_diagram"]:
|
226 |
+
st.markdown("<hr class='divider'>", unsafe_allow_html=True)
|
227 |
+
st.markdown("#### Diagrama de Arco (Iteración)")
|
228 |
+
st.write(arc_state["iteration_diagram"], unsafe_allow_html=True)
|
modules/morphosyntax/morphosyntax_interface_BackUp_Dec-28-Ok.py
ADDED
@@ -0,0 +1,164 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#modules/morphosyntax/morphosyntax_interface.py
|
2 |
+
|
3 |
+
import streamlit as st
|
4 |
+
from streamlit_float import *
|
5 |
+
from streamlit_antd_components import *
|
6 |
+
from streamlit.components.v1 import html
|
7 |
+
import spacy
|
8 |
+
from spacy import displacy
|
9 |
+
import spacy_streamlit
|
10 |
+
import pandas as pd
|
11 |
+
import base64
|
12 |
+
import re
|
13 |
+
|
14 |
+
from .morphosyntax_process import (
|
15 |
+
process_morphosyntactic_input,
|
16 |
+
format_analysis_results,
|
17 |
+
perform_advanced_morphosyntactic_analysis,
|
18 |
+
get_repeated_words_colors,
|
19 |
+
highlight_repeated_words,
|
20 |
+
POS_COLORS,
|
21 |
+
POS_TRANSLATIONS
|
22 |
+
)
|
23 |
+
|
24 |
+
from ..utils.widget_utils import generate_unique_key
|
25 |
+
from ..database.morphosintax_mongo_db import store_student_morphosyntax_result
|
26 |
+
from ..database.chat_mongo_db import store_chat_history, get_chat_history
|
27 |
+
|
28 |
+
import logging
|
29 |
+
logger = logging.getLogger(__name__)
|
30 |
+
|
31 |
+
|
32 |
+
def display_morphosyntax_interface(lang_code, nlp_models, morpho_t):
|
33 |
+
try:
|
34 |
+
# Inicializar el estado si no existe
|
35 |
+
if 'morphosyntax_state' not in st.session_state:
|
36 |
+
st.session_state.morphosyntax_state = {
|
37 |
+
'analysis_count': 0,
|
38 |
+
'current_text': '', # Almacenar el texto actual
|
39 |
+
'last_analysis': None,
|
40 |
+
'needs_update': False # Flag para actualización
|
41 |
+
}
|
42 |
+
|
43 |
+
# Campo de entrada de texto que mantiene su valor
|
44 |
+
text_key = "morpho_text_input"
|
45 |
+
|
46 |
+
# Función para manejar cambios en el texto
|
47 |
+
def on_text_change():
|
48 |
+
st.session_state.morphosyntax_state['current_text'] = st.session_state[text_key]
|
49 |
+
st.session_state.morphosyntax_state['needs_update'] = True
|
50 |
+
|
51 |
+
# Recuperar el texto anterior si existe
|
52 |
+
default_text = st.session_state.morphosyntax_state.get('current_text', '')
|
53 |
+
|
54 |
+
sentence_input = st.text_area(
|
55 |
+
morpho_t.get('morpho_input_label', 'Enter text to analyze'),
|
56 |
+
value=default_text, # Usar el texto guardado
|
57 |
+
height=150,
|
58 |
+
key=text_key,
|
59 |
+
on_change=on_text_change,
|
60 |
+
placeholder=morpho_t.get('morpho_input_placeholder', 'Enter your text here...')
|
61 |
+
)
|
62 |
+
|
63 |
+
# Botón de análisis
|
64 |
+
col1, col2, col3 = st.columns([2,1,2])
|
65 |
+
with col1:
|
66 |
+
analyze_button = st.button(
|
67 |
+
morpho_t.get('morpho_analyze_button', 'Analyze Morphosyntax'),
|
68 |
+
key=f"morpho_button_{st.session_state.morphosyntax_state['analysis_count']}",
|
69 |
+
type="primary",
|
70 |
+
icon="🔍",
|
71 |
+
disabled=not bool(sentence_input.strip()),
|
72 |
+
use_container_width=True
|
73 |
+
)
|
74 |
+
|
75 |
+
# Procesar análisis solo cuando sea necesario
|
76 |
+
if (analyze_button or st.session_state.morphosyntax_state['needs_update']) and sentence_input.strip():
|
77 |
+
try:
|
78 |
+
with st.spinner(morpho_t.get('processing', 'Processing...')):
|
79 |
+
doc = nlp_models[lang_code](sentence_input)
|
80 |
+
advanced_analysis = perform_advanced_morphosyntactic_analysis(
|
81 |
+
sentence_input,
|
82 |
+
nlp_models[lang_code]
|
83 |
+
)
|
84 |
+
|
85 |
+
st.session_state.morphosyntax_result = {
|
86 |
+
'doc': doc,
|
87 |
+
'advanced_analysis': advanced_analysis
|
88 |
+
}
|
89 |
+
|
90 |
+
# Solo guardar en DB si fue un click en el botón
|
91 |
+
if analyze_button:
|
92 |
+
if store_student_morphosyntax_result(
|
93 |
+
username=st.session_state.username,
|
94 |
+
text=sentence_input,
|
95 |
+
arc_diagrams=advanced_analysis['arc_diagrams']
|
96 |
+
):
|
97 |
+
st.success(morpho_t.get('success_message', 'Analysis saved successfully'))
|
98 |
+
st.session_state.morphosyntax_state['analysis_count'] += 1
|
99 |
+
|
100 |
+
st.session_state.morphosyntax_state['needs_update'] = False
|
101 |
+
|
102 |
+
# Mostrar resultados en un contenedor específico
|
103 |
+
with st.container():
|
104 |
+
display_morphosyntax_results(
|
105 |
+
st.session_state.morphosyntax_result,
|
106 |
+
lang_code,
|
107 |
+
morpho_t
|
108 |
+
)
|
109 |
+
|
110 |
+
except Exception as e:
|
111 |
+
logger.error(f"Error en análisis morfosintáctico: {str(e)}")
|
112 |
+
st.error(morpho_t.get('error_processing', f'Error processing text: {str(e)}'))
|
113 |
+
|
114 |
+
# Mostrar resultados previos si existen
|
115 |
+
elif 'morphosyntax_result' in st.session_state and st.session_state.morphosyntax_result:
|
116 |
+
with st.container():
|
117 |
+
display_morphosyntax_results(
|
118 |
+
st.session_state.morphosyntax_result,
|
119 |
+
lang_code,
|
120 |
+
morpho_t
|
121 |
+
)
|
122 |
+
|
123 |
+
except Exception as e:
|
124 |
+
logger.error(f"Error general en display_morphosyntax_interface: {str(e)}")
|
125 |
+
st.error("Se produjo un error. Por favor, intente de nuevo.")
|
126 |
+
|
127 |
+
|
128 |
+
|
129 |
+
def display_morphosyntax_results(result, lang_code, morpho_t):
|
130 |
+
"""
|
131 |
+
Muestra solo el análisis sintáctico con diagramas de arco.
|
132 |
+
"""
|
133 |
+
if result is None:
|
134 |
+
st.warning(morpho_t.get('no_results', 'No results available'))
|
135 |
+
return
|
136 |
+
|
137 |
+
doc = result['doc']
|
138 |
+
|
139 |
+
# Análisis sintáctico (diagramas de arco)
|
140 |
+
st.markdown(f"### {morpho_t.get('arc_diagram', 'Syntactic analysis: Arc diagram')}")
|
141 |
+
|
142 |
+
with st.container():
|
143 |
+
sentences = list(doc.sents)
|
144 |
+
for i, sent in enumerate(sentences):
|
145 |
+
with st.container():
|
146 |
+
st.subheader(f"{morpho_t.get('sentence', 'Sentence')} {i+1}")
|
147 |
+
try:
|
148 |
+
html = displacy.render(sent, style="dep", options={
|
149 |
+
"distance": 100,
|
150 |
+
"arrow_spacing": 20,
|
151 |
+
"word_spacing": 30
|
152 |
+
})
|
153 |
+
# Ajustar dimensiones del SVG
|
154 |
+
html = html.replace('height="375"', 'height="200"')
|
155 |
+
html = re.sub(r'<svg[^>]*>', lambda m: m.group(0).replace('height="450"', 'height="300"'), html)
|
156 |
+
html = re.sub(r'<g [^>]*transform="translate\((\d+),(\d+)\)"',
|
157 |
+
lambda m: f'<g transform="translate({m.group(1)},50)"', html)
|
158 |
+
|
159 |
+
# Envolver en un div con clase para estilos
|
160 |
+
html = f'<div class="arc-diagram-container">{html}</div>'
|
161 |
+
st.write(html, unsafe_allow_html=True)
|
162 |
+
except Exception as e:
|
163 |
+
logger.error(f"Error rendering sentence {i}: {str(e)}")
|
164 |
+
st.error(f"Error displaying diagram for sentence {i+1}")
|
modules/morphosyntax/morphosyntax_interface_vOk-30-12-24.py
ADDED
@@ -0,0 +1,247 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# modules/morphosyntax/morphosyntax_interface.py
|
2 |
+
|
3 |
+
import streamlit as st
|
4 |
+
import re
|
5 |
+
import logging
|
6 |
+
from spacy import displacy
|
7 |
+
|
8 |
+
# Se asume que la función perform_advanced_morphosyntactic_analysis
|
9 |
+
# y los métodos store_student_morphosyntax_base/iteration existen.
|
10 |
+
from ..morphosyntax.morphosyntax_process import perform_advanced_morphosyntactic_analysis
|
11 |
+
from ..database.morphosyntax_iterative_mongo_db import (
|
12 |
+
store_student_morphosyntax_base,
|
13 |
+
store_student_morphosyntax_iteration,
|
14 |
+
)
|
15 |
+
|
16 |
+
logger = logging.getLogger(__name__)
|
17 |
+
|
18 |
+
###########################################################################
|
19 |
+
def initialize_arc_analysis_state():
|
20 |
+
"""
|
21 |
+
Inicializa el estado de análisis de arcos (base e iteraciones) si no existe.
|
22 |
+
"""
|
23 |
+
if "arc_analysis_state" not in st.session_state:
|
24 |
+
st.session_state.arc_analysis_state = {
|
25 |
+
"base_id": None,
|
26 |
+
"base_text": "",
|
27 |
+
"base_diagram": None,
|
28 |
+
"iteration_text": "",
|
29 |
+
"iteration_diagram": None,
|
30 |
+
}
|
31 |
+
logger.info("Estado de análisis de arcos inicializado.")
|
32 |
+
|
33 |
+
###########################################################################
|
34 |
+
def reset_arc_analysis_state():
|
35 |
+
"""
|
36 |
+
Resetea completamente el estado de análisis de arcos.
|
37 |
+
"""
|
38 |
+
st.session_state.arc_analysis_state = {
|
39 |
+
"base_id": None,
|
40 |
+
"base_text": "",
|
41 |
+
"base_diagram": None,
|
42 |
+
"iteration_text": "",
|
43 |
+
"iteration_diagram": None,
|
44 |
+
}
|
45 |
+
logger.info("Estado de arcos reseteado.")
|
46 |
+
|
47 |
+
###########################################################################
|
48 |
+
def display_arc_diagram(doc):
|
49 |
+
"""
|
50 |
+
Genera y retorna el HTML del diagrama de arco para un `Doc` de spaCy.
|
51 |
+
No imprime directamente en pantalla; regresa el HTML para
|
52 |
+
usar con `st.write(..., unsafe_allow_html=True)`.
|
53 |
+
"""
|
54 |
+
try:
|
55 |
+
diagram_html = ""
|
56 |
+
for sent in doc.sents:
|
57 |
+
svg_html = displacy.render(
|
58 |
+
sent,
|
59 |
+
style="dep",
|
60 |
+
options={
|
61 |
+
"distance": 100,
|
62 |
+
"arrow_spacing": 20,
|
63 |
+
"word_spacing": 30
|
64 |
+
}
|
65 |
+
)
|
66 |
+
# Ajustar tamaños
|
67 |
+
svg_html = svg_html.replace('height="375"', 'height="200"')
|
68 |
+
svg_html = re.sub(
|
69 |
+
r'<svg[^>]*>',
|
70 |
+
lambda m: m.group(0).replace('height="450"', 'height="300"'),
|
71 |
+
svg_html
|
72 |
+
)
|
73 |
+
svg_html = re.sub(
|
74 |
+
r'<g [^>]*transform="translate\((\d+),(\d+)\)"',
|
75 |
+
lambda m: f'<g transform="translate({m.group(1)},50)"',
|
76 |
+
svg_html
|
77 |
+
)
|
78 |
+
# Envolver en contenedor
|
79 |
+
diagram_html += f'<div class="arc-diagram-container">{svg_html}</div>'
|
80 |
+
return diagram_html
|
81 |
+
|
82 |
+
except Exception as e:
|
83 |
+
logger.error(f"Error en display_arc_diagram: {str(e)}")
|
84 |
+
return "<p style='color:red;'>Error generando diagrama</p>"
|
85 |
+
|
86 |
+
###########################################################################
|
87 |
+
def display_morphosyntax_interface(lang_code, nlp_models, morpho_t):
|
88 |
+
"""
|
89 |
+
Interfaz principal para la visualización de diagramas de arco
|
90 |
+
(Texto Base vs Iteraciones).
|
91 |
+
"""
|
92 |
+
# CSS para layout vertical y estable
|
93 |
+
st.markdown("""
|
94 |
+
<style>
|
95 |
+
.stTextArea textarea {
|
96 |
+
font-size: 1rem;
|
97 |
+
line-height: 1.5;
|
98 |
+
min-height: 100px !important;
|
99 |
+
height: 100px !important;
|
100 |
+
}
|
101 |
+
.arc-diagram-container {
|
102 |
+
width: 100%;
|
103 |
+
padding: 0.5rem;
|
104 |
+
margin: 0.5rem 0;
|
105 |
+
}
|
106 |
+
.divider {
|
107 |
+
height: 3px;
|
108 |
+
border: none;
|
109 |
+
background-color: #333;
|
110 |
+
margin: 2rem 0;
|
111 |
+
}
|
112 |
+
</style>
|
113 |
+
""", unsafe_allow_html=True)
|
114 |
+
|
115 |
+
# 1) Inicializar estados
|
116 |
+
initialize_arc_analysis_state()
|
117 |
+
arc_state = st.session_state.arc_analysis_state
|
118 |
+
|
119 |
+
# 2) Creamos pestañas: "Texto Base" y "Iteraciones"
|
120 |
+
tabs = st.tabs(["Texto Base", "Iteraciones"])
|
121 |
+
|
122 |
+
# =================== PESTAÑA 1: Texto Base ==========================
|
123 |
+
with tabs[0]:
|
124 |
+
st.subheader("Análisis de Texto Base")
|
125 |
+
|
126 |
+
# Botón para iniciar nuevo análisis
|
127 |
+
if st.button("Nuevo Análisis", key="btn_reset_base"):
|
128 |
+
# Solo limpiamos el estado; si requieres forzar reload,
|
129 |
+
# descomenta la siguiente línea:
|
130 |
+
# st.experimental_rerun()
|
131 |
+
reset_arc_analysis_state()
|
132 |
+
|
133 |
+
# Textarea de texto base
|
134 |
+
arc_state["base_text"] = st.text_area(
|
135 |
+
"Ingrese su texto inicial",
|
136 |
+
value=arc_state["base_text"],
|
137 |
+
key="base_text_input",
|
138 |
+
height=150
|
139 |
+
)
|
140 |
+
|
141 |
+
# Botón para analizar texto base
|
142 |
+
if st.button("Analizar Texto Base", key="btn_analyze_base"):
|
143 |
+
if not arc_state["base_text"].strip():
|
144 |
+
st.warning("Ingrese un texto para analizar.")
|
145 |
+
else:
|
146 |
+
try:
|
147 |
+
# Procesar con spaCy
|
148 |
+
doc = nlp_models[lang_code](arc_state["base_text"])
|
149 |
+
# Generar HTML del arco
|
150 |
+
arc_html = display_arc_diagram(doc)
|
151 |
+
arc_state["base_diagram"] = arc_html
|
152 |
+
|
153 |
+
# Guardar en Mongo
|
154 |
+
analysis = perform_advanced_morphosyntactic_analysis(
|
155 |
+
arc_state["base_text"],
|
156 |
+
nlp_models[lang_code]
|
157 |
+
)
|
158 |
+
base_id = store_student_morphosyntax_base(
|
159 |
+
username=st.session_state.username,
|
160 |
+
text=arc_state["base_text"],
|
161 |
+
arc_diagrams=analysis["arc_diagrams"]
|
162 |
+
)
|
163 |
+
if base_id:
|
164 |
+
arc_state["base_id"] = base_id
|
165 |
+
st.success(f"Análisis base guardado. ID: {base_id}")
|
166 |
+
|
167 |
+
except Exception as exc:
|
168 |
+
st.error("Error procesando texto base")
|
169 |
+
logger.error(f"Error en análisis base: {str(exc)}")
|
170 |
+
|
171 |
+
# Mostrar diagrama base
|
172 |
+
if arc_state["base_diagram"]:
|
173 |
+
st.markdown("<hr class='divider'>", unsafe_allow_html=True)
|
174 |
+
st.markdown("#### Diagrama de Arco (Texto Base)")
|
175 |
+
st.write(arc_state["base_diagram"], unsafe_allow_html=True)
|
176 |
+
|
177 |
+
# ================== PESTAÑA 2: Iteraciones ==========================
|
178 |
+
with tabs[1]:
|
179 |
+
st.subheader("Análisis de Cambios / Iteraciones")
|
180 |
+
|
181 |
+
# Verificar que exista texto base analizado
|
182 |
+
if not arc_state["base_id"]:
|
183 |
+
st.info("Primero analiza un texto base en la pestaña anterior.")
|
184 |
+
return
|
185 |
+
|
186 |
+
# Mostrar texto base como referencia (solo lectura)
|
187 |
+
st.text_area(
|
188 |
+
"Texto Base (solo lectura)",
|
189 |
+
value=arc_state["base_text"],
|
190 |
+
height=80,
|
191 |
+
disabled=True
|
192 |
+
)
|
193 |
+
|
194 |
+
# Caja de texto para la iteración
|
195 |
+
arc_state["iteration_text"] = st.text_area(
|
196 |
+
"Texto de Iteración",
|
197 |
+
value=arc_state["iteration_text"],
|
198 |
+
height=150
|
199 |
+
)
|
200 |
+
|
201 |
+
# Botón analizar iteración
|
202 |
+
if st.button("Analizar Cambios", key="btn_analyze_iteration"):
|
203 |
+
if not arc_state["iteration_text"].strip():
|
204 |
+
st.warning("Ingrese texto de iteración.")
|
205 |
+
else:
|
206 |
+
try:
|
207 |
+
# Procesar con spaCy
|
208 |
+
doc_iter = nlp_models[lang_code](arc_state["iteration_text"])
|
209 |
+
arc_html_iter = display_arc_diagram(doc_iter)
|
210 |
+
arc_state["iteration_diagram"] = arc_html_iter
|
211 |
+
|
212 |
+
# Guardar en Mongo
|
213 |
+
analysis_iter = perform_advanced_morphosyntactic_analysis(
|
214 |
+
arc_state["iteration_text"],
|
215 |
+
nlp_models[lang_code]
|
216 |
+
)
|
217 |
+
iteration_id = store_student_morphosyntax_iteration(
|
218 |
+
username=st.session_state.username,
|
219 |
+
base_id=arc_state["base_id"],
|
220 |
+
original_text=arc_state["base_text"],
|
221 |
+
iteration_text=arc_state["iteration_text"],
|
222 |
+
arc_diagrams=analysis_iter["arc_diagrams"]
|
223 |
+
)
|
224 |
+
if iteration_id:
|
225 |
+
st.success(f"Iteración guardada. ID: {iteration_id}")
|
226 |
+
|
227 |
+
except Exception as exc:
|
228 |
+
st.error("Error procesando iteración")
|
229 |
+
logger.error(f"Error en iteración: {str(exc)}")
|
230 |
+
|
231 |
+
# Mostrar diagrama de iteración
|
232 |
+
if arc_state["iteration_diagram"]:
|
233 |
+
st.markdown("<hr class='divider'>", unsafe_allow_html=True)
|
234 |
+
st.markdown("#### Diagrama de Arco (Iteración)")
|
235 |
+
st.write(arc_state["iteration_diagram"], unsafe_allow_html=True)
|
236 |
+
|
237 |
+
# Comparación vertical (uno abajo del otro)
|
238 |
+
if arc_state["base_diagram"] and arc_state["iteration_diagram"]:
|
239 |
+
st.markdown("<hr class='divider'>", unsafe_allow_html=True)
|
240 |
+
st.markdown("### Comparación Vertical: Base vs. Iteración")
|
241 |
+
|
242 |
+
st.markdown("**Diagrama Base**")
|
243 |
+
st.write(arc_state["base_diagram"], unsafe_allow_html=True)
|
244 |
+
|
245 |
+
st.markdown("---")
|
246 |
+
st.markdown("**Diagrama Iterado**")
|
247 |
+
st.write(arc_state["iteration_diagram"], unsafe_allow_html=True)
|
modules/morphosyntax/morphosyntax_process-Back1910-25-9-24.py
ADDED
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#modules/morphosyntax/morphosyntax_process.py
|
2 |
+
from ..text_analysis.morpho_analysis import perform_advanced_morphosyntactic_analysis
|
3 |
+
from ..database.database_oldFromV2 import store_morphosyntax_result
|
4 |
+
import streamlit as st
|
5 |
+
|
6 |
+
def process_morphosyntactic_input(user_input, lang_code, nlp_models, t):
|
7 |
+
if user_input.startswith('/analisis_morfosintactico'):
|
8 |
+
# Extraer el texto entre corchetes
|
9 |
+
text_to_analyze = user_input.split('[', 1)[1].rsplit(']', 1)[0]
|
10 |
+
|
11 |
+
# Realizar el análisis morfosintáctico
|
12 |
+
result = perform_advanced_morphosyntactic_analysis(text_to_analyze, nlp_models[lang_code])
|
13 |
+
|
14 |
+
if result is None:
|
15 |
+
response = t.get('morphosyntactic_analysis_error', 'Error in morphosyntactic analysis')
|
16 |
+
return response, None, None
|
17 |
+
|
18 |
+
# Preparar la respuesta
|
19 |
+
response = t.get('morphosyntactic_analysis_completed', 'Morphosyntactic analysis completed')
|
20 |
+
|
21 |
+
# Obtener todos los diagramas de arco
|
22 |
+
visualizations = result['arc_diagram']
|
23 |
+
|
24 |
+
return response, visualizations, result
|
25 |
+
else:
|
26 |
+
# Para otros tipos de input, simplemente devolver la respuesta del chatbot
|
27 |
+
chatbot = st.session_state.morphosyntax_chatbot
|
28 |
+
response = chatbot.generate_response(user_input, lang_code)
|
29 |
+
return response, None, None
|
modules/morphosyntax/morphosyntax_process.py
ADDED
@@ -0,0 +1,132 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#modules/morphosyntax/morphosyntax_process.py
|
2 |
+
import streamlit as st
|
3 |
+
|
4 |
+
from ..text_analysis.morpho_analysis import (
|
5 |
+
get_repeated_words_colors,
|
6 |
+
highlight_repeated_words,
|
7 |
+
generate_arc_diagram,
|
8 |
+
get_detailed_pos_analysis,
|
9 |
+
get_morphological_analysis,
|
10 |
+
get_sentence_structure_analysis,
|
11 |
+
perform_advanced_morphosyntactic_analysis,
|
12 |
+
POS_COLORS,
|
13 |
+
POS_TRANSLATIONS
|
14 |
+
)
|
15 |
+
|
16 |
+
from ..database.morphosintax_mongo_db import store_student_morphosyntax_result
|
17 |
+
|
18 |
+
import logging
|
19 |
+
logger = logging.getLogger(__name__)
|
20 |
+
|
21 |
+
|
22 |
+
def process_morphosyntactic_input(text, lang_code, nlp_models, t):
|
23 |
+
"""
|
24 |
+
Procesa el texto ingresado para realizar el análisis morfosintáctico.
|
25 |
+
|
26 |
+
Args:
|
27 |
+
text: Texto a analizar
|
28 |
+
lang_code: Código del idioma
|
29 |
+
nlp_models: Diccionario de modelos spaCy
|
30 |
+
t: Diccionario de traducciones
|
31 |
+
|
32 |
+
Returns:
|
33 |
+
tuple: (análisis, visualizaciones, texto_resaltado, mensaje)
|
34 |
+
"""
|
35 |
+
try:
|
36 |
+
# Realizar el análisis morfosintáctico
|
37 |
+
doc = nlp_models[lang_code](text)
|
38 |
+
|
39 |
+
# Obtener el análisis avanzado
|
40 |
+
analysis = perform_advanced_morphosyntactic_analysis(text, nlp_models[lang_code])
|
41 |
+
|
42 |
+
# Generar visualizaciones - AQUÍ ESTÁ EL CAMBIO
|
43 |
+
arc_diagrams = generate_arc_diagram(doc) # Quitamos lang_code
|
44 |
+
|
45 |
+
# Obtener palabras repetidas y texto resaltado
|
46 |
+
word_colors = get_repeated_words_colors(doc)
|
47 |
+
highlighted_text = highlight_repeated_words(doc, word_colors)
|
48 |
+
|
49 |
+
# Guardar el análisis en la base de datos
|
50 |
+
store_student_morphosyntax_result(
|
51 |
+
st.session_state.username,
|
52 |
+
text,
|
53 |
+
{
|
54 |
+
'arc_diagrams': arc_diagrams,
|
55 |
+
'pos_analysis': analysis['pos_analysis'],
|
56 |
+
'morphological_analysis': analysis['morphological_analysis'],
|
57 |
+
'sentence_structure': analysis['sentence_structure']
|
58 |
+
}
|
59 |
+
)
|
60 |
+
|
61 |
+
return {
|
62 |
+
'analysis': analysis,
|
63 |
+
'visualizations': arc_diagrams,
|
64 |
+
'highlighted_text': highlighted_text,
|
65 |
+
'success': True,
|
66 |
+
'message': t.get('MORPHOSYNTACTIC', {}).get('success_message', 'Analysis completed successfully')
|
67 |
+
}
|
68 |
+
|
69 |
+
except Exception as e:
|
70 |
+
logger.error(f"Error en el análisis morfosintáctico: {str(e)}")
|
71 |
+
return {
|
72 |
+
'analysis': None,
|
73 |
+
'visualizations': None,
|
74 |
+
'highlighted_text': None,
|
75 |
+
'success': False,
|
76 |
+
'message': t.get('MORPHOSYNTACTIC', {}).get('error_message', f'Error in analysis: {str(e)}')
|
77 |
+
}
|
78 |
+
|
79 |
+
|
80 |
+
def format_analysis_results(analysis_result, t):
|
81 |
+
"""
|
82 |
+
Formatea los resultados del análisis para su visualización.
|
83 |
+
|
84 |
+
Args:
|
85 |
+
analysis_result: Resultado del análisis morfosintáctico
|
86 |
+
t: Diccionario de traducciones
|
87 |
+
|
88 |
+
Returns:
|
89 |
+
dict: Resultados formateados para visualización
|
90 |
+
"""
|
91 |
+
morpho_t = t.get('MORPHOSYNTACTIC', {})
|
92 |
+
|
93 |
+
if not analysis_result['success']:
|
94 |
+
return {
|
95 |
+
'formatted_text': analysis_result['message'],
|
96 |
+
'visualizations': None
|
97 |
+
}
|
98 |
+
|
99 |
+
formatted_sections = []
|
100 |
+
|
101 |
+
# Formato para análisis POS
|
102 |
+
if 'pos_analysis' in analysis_result['analysis']:
|
103 |
+
pos_section = [f"### {morpho_t.get('pos_analysis', 'Part of Speech Analysis')}"]
|
104 |
+
for pos_item in analysis_result['analysis']['pos_analysis']:
|
105 |
+
pos_section.append(
|
106 |
+
f"- {morpho_t.get(pos_item['pos'], pos_item['pos'])}: "
|
107 |
+
f"{pos_item['count']} ({pos_item['percentage']}%)\n "
|
108 |
+
f"Ejemplos: {', '.join(pos_item['examples'])}"
|
109 |
+
)
|
110 |
+
formatted_sections.append('\n'.join(pos_section))
|
111 |
+
|
112 |
+
# Agregar otras secciones de formato según sea necesario
|
113 |
+
|
114 |
+
return {
|
115 |
+
'formatted_text': '\n\n'.join(formatted_sections),
|
116 |
+
'visualizations': analysis_result['visualizations'],
|
117 |
+
'highlighted_text': analysis_result['highlighted_text']
|
118 |
+
}
|
119 |
+
|
120 |
+
# Re-exportar las funciones y constantes necesarias
|
121 |
+
__all__ = [
|
122 |
+
'process_morphosyntactic_input',
|
123 |
+
'highlight_repeated_words',
|
124 |
+
'generate_arc_diagram',
|
125 |
+
'get_repeated_words_colors',
|
126 |
+
'get_detailed_pos_analysis',
|
127 |
+
'get_morphological_analysis',
|
128 |
+
'get_sentence_structure_analysis',
|
129 |
+
'perform_advanced_morphosyntactic_analysis',
|
130 |
+
'POS_COLORS',
|
131 |
+
'POS_TRANSLATIONS'
|
132 |
+
]
|
modules/morphosyntax/morphosyntax_process_BackUp_Dec24_Ok.py
ADDED
@@ -0,0 +1,132 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#modules/morphosyntax/morphosyntax_process.py
|
2 |
+
import streamlit as st
|
3 |
+
|
4 |
+
from ..text_analysis.morpho_analysis import (
|
5 |
+
get_repeated_words_colors,
|
6 |
+
highlight_repeated_words,
|
7 |
+
generate_arc_diagram,
|
8 |
+
get_detailed_pos_analysis,
|
9 |
+
get_morphological_analysis,
|
10 |
+
get_sentence_structure_analysis,
|
11 |
+
perform_advanced_morphosyntactic_analysis,
|
12 |
+
POS_COLORS,
|
13 |
+
POS_TRANSLATIONS
|
14 |
+
)
|
15 |
+
|
16 |
+
from ..database.morphosintax_mongo_db import store_student_morphosyntax_result
|
17 |
+
|
18 |
+
import logging
|
19 |
+
logger = logging.getLogger(__name__)
|
20 |
+
|
21 |
+
|
22 |
+
def process_morphosyntactic_input(text, lang_code, nlp_models, t):
|
23 |
+
"""
|
24 |
+
Procesa el texto ingresado para realizar el análisis morfosintáctico.
|
25 |
+
|
26 |
+
Args:
|
27 |
+
text: Texto a analizar
|
28 |
+
lang_code: Código del idioma
|
29 |
+
nlp_models: Diccionario de modelos spaCy
|
30 |
+
t: Diccionario de traducciones
|
31 |
+
|
32 |
+
Returns:
|
33 |
+
tuple: (análisis, visualizaciones, texto_resaltado, mensaje)
|
34 |
+
"""
|
35 |
+
try:
|
36 |
+
# Realizar el análisis morfosintáctico
|
37 |
+
doc = nlp_models[lang_code](text)
|
38 |
+
|
39 |
+
# Obtener el análisis avanzado
|
40 |
+
analysis = perform_advanced_morphosyntactic_analysis(text, nlp_models[lang_code])
|
41 |
+
|
42 |
+
# Generar visualizaciones - AQUÍ ESTÁ EL CAMBIO
|
43 |
+
arc_diagrams = generate_arc_diagram(doc) # Quitamos lang_code
|
44 |
+
|
45 |
+
# Obtener palabras repetidas y texto resaltado
|
46 |
+
word_colors = get_repeated_words_colors(doc)
|
47 |
+
highlighted_text = highlight_repeated_words(doc, word_colors)
|
48 |
+
|
49 |
+
# Guardar el análisis en la base de datos
|
50 |
+
store_student_morphosyntax_result(
|
51 |
+
st.session_state.username,
|
52 |
+
text,
|
53 |
+
{
|
54 |
+
'arc_diagrams': arc_diagrams,
|
55 |
+
'pos_analysis': analysis['pos_analysis'],
|
56 |
+
'morphological_analysis': analysis['morphological_analysis'],
|
57 |
+
'sentence_structure': analysis['sentence_structure']
|
58 |
+
}
|
59 |
+
)
|
60 |
+
|
61 |
+
return {
|
62 |
+
'analysis': analysis,
|
63 |
+
'visualizations': arc_diagrams,
|
64 |
+
'highlighted_text': highlighted_text,
|
65 |
+
'success': True,
|
66 |
+
'message': t.get('MORPHOSYNTACTIC', {}).get('success_message', 'Analysis completed successfully')
|
67 |
+
}
|
68 |
+
|
69 |
+
except Exception as e:
|
70 |
+
logger.error(f"Error en el análisis morfosintáctico: {str(e)}")
|
71 |
+
return {
|
72 |
+
'analysis': None,
|
73 |
+
'visualizations': None,
|
74 |
+
'highlighted_text': None,
|
75 |
+
'success': False,
|
76 |
+
'message': t.get('MORPHOSYNTACTIC', {}).get('error_message', f'Error in analysis: {str(e)}')
|
77 |
+
}
|
78 |
+
|
79 |
+
|
80 |
+
def format_analysis_results(analysis_result, t):
|
81 |
+
"""
|
82 |
+
Formatea los resultados del análisis para su visualización.
|
83 |
+
|
84 |
+
Args:
|
85 |
+
analysis_result: Resultado del análisis morfosintáctico
|
86 |
+
t: Diccionario de traducciones
|
87 |
+
|
88 |
+
Returns:
|
89 |
+
dict: Resultados formateados para visualización
|
90 |
+
"""
|
91 |
+
morpho_t = t.get('MORPHOSYNTACTIC', {})
|
92 |
+
|
93 |
+
if not analysis_result['success']:
|
94 |
+
return {
|
95 |
+
'formatted_text': analysis_result['message'],
|
96 |
+
'visualizations': None
|
97 |
+
}
|
98 |
+
|
99 |
+
formatted_sections = []
|
100 |
+
|
101 |
+
# Formato para análisis POS
|
102 |
+
if 'pos_analysis' in analysis_result['analysis']:
|
103 |
+
pos_section = [f"### {morpho_t.get('pos_analysis', 'Part of Speech Analysis')}"]
|
104 |
+
for pos_item in analysis_result['analysis']['pos_analysis']:
|
105 |
+
pos_section.append(
|
106 |
+
f"- {morpho_t.get(pos_item['pos'], pos_item['pos'])}: "
|
107 |
+
f"{pos_item['count']} ({pos_item['percentage']}%)\n "
|
108 |
+
f"Ejemplos: {', '.join(pos_item['examples'])}"
|
109 |
+
)
|
110 |
+
formatted_sections.append('\n'.join(pos_section))
|
111 |
+
|
112 |
+
# Agregar otras secciones de formato según sea necesario
|
113 |
+
|
114 |
+
return {
|
115 |
+
'formatted_text': '\n\n'.join(formatted_sections),
|
116 |
+
'visualizations': analysis_result['visualizations'],
|
117 |
+
'highlighted_text': analysis_result['highlighted_text']
|
118 |
+
}
|
119 |
+
|
120 |
+
# Re-exportar las funciones y constantes necesarias
|
121 |
+
__all__ = [
|
122 |
+
'process_morphosyntactic_input',
|
123 |
+
'highlight_repeated_words',
|
124 |
+
'generate_arc_diagram',
|
125 |
+
'get_repeated_words_colors',
|
126 |
+
'get_detailed_pos_analysis',
|
127 |
+
'get_morphological_analysis',
|
128 |
+
'get_sentence_structure_analysis',
|
129 |
+
'perform_advanced_morphosyntactic_analysis',
|
130 |
+
'POS_COLORS',
|
131 |
+
'POS_TRANSLATIONS'
|
132 |
+
]
|
modules/morphosyntax/txt.txt
ADDED
File without changes
|
modules/semantic/__init_.py
ADDED
@@ -0,0 +1,17 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# modules/semantic/__init_.py
|
2 |
+
|
3 |
+
from .semantic_interface import (
|
4 |
+
display_semantic_interface,
|
5 |
+
display_semantic_results
|
6 |
+
)
|
7 |
+
from .semantic_process import (
|
8 |
+
process_semantic_input,
|
9 |
+
format_semantic_results
|
10 |
+
)
|
11 |
+
|
12 |
+
__all__ = [
|
13 |
+
'display_semantic_interface',
|
14 |
+
'display_semantic_results',
|
15 |
+
'process_semantic_input',
|
16 |
+
'format_semantic_results'
|
17 |
+
]
|
modules/semantic/__pycache__/flexible_analysis_handler.cpython-311.pyc
ADDED
Binary file (1.77 kB). View file
|
|
modules/semantic/__pycache__/semantic_float.cpython-311.pyc
ADDED
Binary file (5.45 kB). View file
|
|
modules/semantic/__pycache__/semantic_float68ok.cpython-311.pyc
ADDED
Binary file (4.87 kB). View file
|
|
modules/semantic/__pycache__/semantic_float86ok.cpython-311.pyc
ADDED
Binary file (4.57 kB). View file
|
|
modules/semantic/__pycache__/semantic_float_reset.cpython-311.pyc
ADDED
Binary file (3.37 kB). View file
|
|
modules/semantic/__pycache__/semantic_interface.cpython-311.pyc
ADDED
Binary file (1.1 kB). View file
|
|
modules/semantic/__pycache__/semantic_interfaceBackUp_2092024_1800.cpython-311.pyc
ADDED
Binary file (11.7 kB). View file
|
|
modules/semantic/__pycache__/semantic_interfaceBorrados.cpython-311.pyc
ADDED
Binary file (13.7 kB). View file
|
|