Spaces:
Sleeping
Sleeping
#modules/semantic/semantic_process.py | |
import streamlit as st | |
from ..text_analysis.semantic_analysis import ( | |
perform_semantic_analysis, | |
fig_to_bytes, | |
fig_to_html, | |
identify_key_concepts, | |
create_concept_graph, | |
visualize_concept_graph, | |
create_entity_graph, | |
visualize_entity_graph, | |
create_topic_graph, | |
visualize_topic_graph, | |
generate_summary, | |
extract_entities, | |
analyze_sentiment, | |
extract_topics | |
) | |
from ..database.semantic_mongo_db import store_student_semantic_result | |
import logging | |
logger = logging.getLogger(__name__) | |
def process_semantic_input(text, lang_code, nlp_models, t): | |
""" | |
Procesa el texto ingresado para realizar el análisis semántico. | |
Args: | |
text: Texto a analizar | |
lang_code: Código del idioma | |
nlp_models: Diccionario de modelos spaCy | |
t: Diccionario de traducciones | |
Returns: | |
dict: Resultados del análisis | |
""" | |
try: | |
# Realizar el análisis semántico | |
doc = nlp_models[lang_code](text) | |
# Obtener el análisis completo | |
analysis = perform_semantic_analysis(text, nlp_models[lang_code], lang_code) | |
# Guardar el análisis en la base de datos | |
store_student_semantic_result( | |
st.session_state.username, | |
text, | |
analysis | |
) | |
return { | |
'analysis': analysis, | |
'success': True, | |
'message': t.get('success_message', 'Analysis completed successfully') | |
} | |
except Exception as e: | |
logger.error(f"Error en el análisis semántico: {str(e)}") | |
return { | |
'analysis': None, | |
'success': False, | |
'message': t.get('error_message', f'Error in analysis: {str(e)}') | |
} | |
def format_semantic_results(analysis_result, t): | |
""" | |
Formatea los resultados del análisis para su visualización. | |
Args: | |
analysis_result: Resultado del análisis semántico | |
t: Diccionario de traducciones | |
Returns: | |
dict: Resultados formateados para visualización | |
""" | |
if not analysis_result['success']: | |
return { | |
'formatted_text': analysis_result['message'], | |
'visualizations': None | |
} | |
# Formatear los resultados | |
formatted_sections = [] | |
# Formatear conceptos clave | |
if 'key_concepts' in analysis_result['analysis']: | |
concepts_section = [f"### {t.get('key_concepts', 'Key Concepts')}"] | |
concepts_section.extend([ | |
f"- {concept}: {frequency:.2f}" | |
for concept, frequency in analysis_result['analysis']['key_concepts'] | |
]) | |
formatted_sections.append('\n'.join(concepts_section)) | |
return { | |
'formatted_text': '\n\n'.join(formatted_sections), | |
'visualizations': { | |
'concept_graph': analysis_result['analysis'].get('concept_graph'), | |
'entity_graph': analysis_result['analysis'].get('entity_graph') | |
} | |
} | |
# Re-exportar funciones necesarias | |
__all__ = [ | |
'process_semantic_input', | |
'format_semantic_results' | |
] | |