Update modules/ui/ui.py
Browse files- modules/ui/ui.py +215 -182
modules/ui/ui.py
CHANGED
@@ -580,181 +580,182 @@ def display_morphosyntax_analysis_interface(nlp_models, lang_code):
|
|
580 |
# Análisis morfosintáctico avanzado
|
581 |
advanced_analysis = perform_advanced_morphosyntactic_analysis(current_input, nlp_models[lang_code])
|
582 |
|
583 |
-
#
|
584 |
-
st.
|
585 |
-
|
586 |
-
|
587 |
-
|
588 |
-
|
589 |
-
|
590 |
-
st.
|
591 |
-
|
592 |
-
|
593 |
-
|
594 |
-
|
595 |
-
|
596 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
597 |
|
598 |
-
#
|
599 |
-
|
600 |
-
for i, sent_analysis in enumerate(advanced_analysis['sentence_structure']):
|
601 |
-
sentence_str = (
|
602 |
-
f"**{t['sentence']} {i+1}** "
|
603 |
-
f"{t['root']}: {sent_analysis['root']} ({sent_analysis['root_pos']}) -- "
|
604 |
-
f"{t['subjects']}: {', '.join(sent_analysis['subjects'])} -- "
|
605 |
-
f"{t['objects']}: {', '.join(sent_analysis['objects'])} -- "
|
606 |
-
f"{t['verbs']}: {', '.join(sent_analysis['verbs'])}"
|
607 |
-
)
|
608 |
-
st.markdown(sentence_str)
|
609 |
-
|
610 |
-
# Mostrar análisis de categorías gramaticales # Mostrar análisis morfológico
|
611 |
-
# Mostrar análisis de categorías gramaticales # Mostrar análisis morfológico
|
612 |
-
col1, col2 = st.columns(2)
|
613 |
|
614 |
-
|
615 |
-
|
616 |
-
|
617 |
-
|
618 |
-
|
619 |
-
|
620 |
-
|
621 |
-
# Renombrar las columnas para mayor claridad
|
622 |
-
pos_df = pos_df.rename(columns={
|
623 |
-
'pos': t['grammatical_category'],
|
624 |
-
'count': t['count'],
|
625 |
-
'percentage': t['percentage'],
|
626 |
-
'examples': t['examples']
|
627 |
-
})
|
628 |
-
|
629 |
-
# Mostrar el dataframe
|
630 |
-
st.dataframe(pos_df)
|
631 |
|
632 |
-
|
633 |
-
|
634 |
-
|
635 |
-
|
636 |
-
|
637 |
-
|
638 |
-
|
639 |
-
|
640 |
-
|
641 |
-
|
642 |
-
|
643 |
-
|
644 |
-
|
645 |
-
|
646 |
-
|
647 |
-
|
648 |
-
|
649 |
-
|
650 |
-
|
651 |
-
|
652 |
-
|
653 |
-
|
654 |
-
|
655 |
-
|
656 |
-
|
657 |
-
|
658 |
-
|
659 |
-
|
660 |
-
|
661 |
-
|
662 |
-
|
663 |
-
|
664 |
-
|
665 |
-
|
666 |
-
|
667 |
-
|
668 |
-
|
669 |
-
|
670 |
-
|
671 |
-
|
672 |
-
|
673 |
-
|
674 |
-
|
675 |
-
|
676 |
-
|
677 |
-
|
678 |
-
|
679 |
-
|
680 |
-
|
681 |
-
|
682 |
-
|
683 |
-
|
684 |
-
|
685 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
686 |
}
|
687 |
-
|
688 |
-
|
689 |
-
|
690 |
-
|
691 |
-
morph_translations = {
|
692 |
-
'es': {
|
693 |
-
'Gender': 'Género', 'Number': 'Número', 'Case': 'Caso', 'Definite': 'Definido',
|
694 |
-
'PronType': 'Tipo de Pronombre', 'Person': 'Persona', 'Mood': 'Modo',
|
695 |
-
'Tense': 'Tiempo', 'VerbForm': 'Forma Verbal', 'Voice': 'Voz',
|
696 |
-
'Fem': 'Femenino', 'Masc': 'Masculino', 'Sing': 'Singular', 'Plur': 'Plural',
|
697 |
-
'Ind': 'Indicativo', 'Sub': 'Subjuntivo', 'Imp': 'Imperativo', 'Inf': 'Infinitivo',
|
698 |
-
'Part': 'Participio', 'Ger': 'Gerundio', 'Pres': 'Presente', 'Past': 'Pasado',
|
699 |
-
'Fut': 'Futuro', 'Perf': 'Perfecto', 'Imp': 'Imperfecto'
|
700 |
-
},
|
701 |
-
'en': {
|
702 |
-
'Gender': 'Gender', 'Number': 'Number', 'Case': 'Case', 'Definite': 'Definite', 'PronType': 'Pronoun Type', 'Person': 'Person',
|
703 |
-
'Mood': 'Mood', 'Tense': 'Tense', 'VerbForm': 'Verb Form', 'Voice': 'Voice',
|
704 |
-
'Fem': 'Feminine', 'Masc': 'Masculine', 'Sing': 'Singular', 'Plur': 'Plural', 'Ind': 'Indicative',
|
705 |
-
'Sub': 'Subjunctive', 'Imp': 'Imperative', 'Inf': 'Infinitive', 'Part': 'Participle',
|
706 |
-
'Ger': 'Gerund', 'Pres': 'Present', 'Past': 'Past', 'Fut': 'Future', 'Perf': 'Perfect', 'Imp': 'Imperfect'
|
707 |
-
},
|
708 |
-
'fr': {
|
709 |
-
'Gender': 'Genre', 'Number': 'Nombre', 'Case': 'Cas', 'Definite': 'Défini', 'PronType': 'Type de Pronom',
|
710 |
-
'Person': 'Personne', 'Mood': 'Mode', 'Tense': 'Temps', 'VerbForm': 'Forme Verbale', 'Voice': 'Voix',
|
711 |
-
'Fem': 'Féminin', 'Masc': 'Masculin', 'Sing': 'Singulier', 'Plur': 'Pluriel', 'Ind': 'Indicatif',
|
712 |
-
'Sub': 'Subjonctif', 'Imp': 'Impératif', 'Inf': 'Infinitif', 'Part': 'Participe',
|
713 |
-
'Ger': 'Gérondif', 'Pres': 'Présent', 'Past': 'Passé', 'Fut': 'Futur', 'Perf': 'Parfait', 'Imp': 'Imparfait'
|
714 |
-
}
|
715 |
-
}
|
716 |
-
for key, value in morph_translations[lang_code].items():
|
717 |
-
morph_string = morph_string.replace(key, value)
|
718 |
-
return morph_string
|
719 |
-
|
720 |
-
morph_df[t['morphology']] = morph_df[t['morphology']].apply(lambda x: translate_morph(x, lang_code))
|
721 |
-
|
722 |
-
# Seleccionar y ordenar las columnas a mostrar
|
723 |
-
columns_to_display = [t['word'], t['lemma'], t['grammatical_category'], t['dependency'], t['morphology']]
|
724 |
-
columns_to_display = [col for col in columns_to_display if col in morph_df.columns]
|
725 |
-
|
726 |
-
# Mostrar el DataFrame
|
727 |
-
st.dataframe(morph_df[columns_to_display])
|
728 |
-
|
729 |
-
# Mostrar diagramas de arco (código existente)
|
730 |
-
with st.expander(t['arc_diagram'], expanded=True):
|
731 |
-
sentences = list(doc.sents)
|
732 |
-
arc_diagrams = []
|
733 |
-
for i, sent in enumerate(sentences):
|
734 |
-
st.subheader(f"{t['sentence']} {i+1}")
|
735 |
-
html = displacy.render(sent, style="dep", options={"distance": 100})
|
736 |
-
html = html.replace('height="375"', 'height="200"')
|
737 |
-
html = re.sub(r'<svg[^>]*>', lambda m: m.group(0).replace('height="450"', 'height="300"'), html)
|
738 |
-
html = re.sub(r'<g [^>]*transform="translate\((\d+),(\d+)\)"', lambda m: f'<g transform="translate({m.group(1)},50)"', html)
|
739 |
-
st.write(html, unsafe_allow_html=True)
|
740 |
-
arc_diagrams.append(html)
|
741 |
|
742 |
-
|
743 |
-
|
744 |
-
|
745 |
-
|
746 |
-
|
747 |
-
|
748 |
-
|
749 |
-
|
750 |
-
|
751 |
-
|
752 |
-
|
753 |
-
|
754 |
-
|
755 |
-
|
756 |
-
|
757 |
-
|
|
|
|
|
|
|
|
|
|
|
758 |
|
759 |
###############################################################################################################
|
760 |
def display_semantic_analysis_interface(nlp_models, lang_code):
|
@@ -824,14 +825,15 @@ def display_semantic_analysis_interface(nlp_models, lang_code):
|
|
824 |
# Realizar el análisis
|
825 |
analysis_result = perform_semantic_analysis(text_content, nlp_models[lang_code], lang_code)
|
826 |
|
827 |
-
|
828 |
-
|
829 |
-
|
830 |
-
|
|
|
831 |
|
832 |
-
|
833 |
-
|
834 |
-
|
835 |
|
836 |
# Guardar el resultado del análisis
|
837 |
if store_semantic_result(st.session_state.username, text_content, analysis_result):
|
@@ -879,10 +881,8 @@ def display_discourse_analysis_interface(nlp_models, lang_code):
|
|
879 |
st.header(t['title'])
|
880 |
|
881 |
col1, col2 = st.columns(2)
|
882 |
-
|
883 |
with col1:
|
884 |
uploaded_file1 = st.file_uploader(t['file_uploader1'], type=['txt'])
|
885 |
-
|
886 |
with col2:
|
887 |
uploaded_file2 = st.file_uploader(t['file_uploader2'], type=['txt'])
|
888 |
|
@@ -894,16 +894,49 @@ def display_discourse_analysis_interface(nlp_models, lang_code):
|
|
894 |
# Realizar el análisis
|
895 |
analysis_result = perform_discourse_analysis(text_content1, text_content2, nlp_models[lang_code], lang_code)
|
896 |
|
|
|
|
|
|
|
897 |
# Mostrar los resultados del análisis
|
898 |
-
|
899 |
|
900 |
# Guardar el resultado del análisis
|
901 |
if store_discourse_analysis_result(st.session_state.username, text_content1, text_content2, analysis_result):
|
902 |
st.success(t['success_message'])
|
903 |
else:
|
904 |
st.error(t['error_message'])
|
905 |
-
|
906 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
907 |
|
908 |
##################################################################################################
|
909 |
#def display_saved_discourse_analysis(analysis_data):
|
|
|
580 |
# Análisis morfosintáctico avanzado
|
581 |
advanced_analysis = perform_advanced_morphosyntactic_analysis(current_input, nlp_models[lang_code])
|
582 |
|
583 |
+
# Guardar el resultado en el estado de la sesión
|
584 |
+
st.session_state.morphosyntax_result = {
|
585 |
+
'doc': doc,
|
586 |
+
'advanced_analysis': advanced_analysis
|
587 |
+
}
|
588 |
+
|
589 |
+
# Mostrar resultados
|
590 |
+
display_morphosyntax_results(st.session_state.morphosyntax_result, lang_code, t)
|
591 |
+
|
592 |
+
elif 'morphosyntax_result' in st.session_state:
|
593 |
+
# Si hay un resultado guardado, mostrarlo
|
594 |
+
display_morphosyntax_results(st.session_state.morphosyntax_result, lang_code, t)
|
595 |
+
else:
|
596 |
+
st.warning(t['warning_message'])
|
597 |
+
|
598 |
+
def display_morphosyntax_results(result, lang_code, t):
|
599 |
+
doc = result['doc']
|
600 |
+
advanced_analysis = result['advanced_analysis']
|
601 |
+
|
602 |
+
# Mostrar leyenda (código existente)
|
603 |
+
st.markdown(f"##### {t['legend']}")
|
604 |
+
legend_html = "<div style='display: flex; flex-wrap: wrap;'>"
|
605 |
+
for pos, color in POS_COLORS.items():
|
606 |
+
if pos in POS_TRANSLATIONS[lang_code]:
|
607 |
+
legend_html += f"<div style='margin-right: 10px;'><span style='background-color: {color}; padding: 2px 5px;'>{POS_TRANSLATIONS[lang_code][pos]}</span></div>"
|
608 |
+
legend_html += "</div>"
|
609 |
+
st.markdown(legend_html, unsafe_allow_html=True)
|
610 |
+
|
611 |
+
# Mostrar análisis de palabras repetidas (código existente)
|
612 |
+
word_colors = get_repeated_words_colors(doc)
|
613 |
+
with st.expander(t['repeated_words'], expanded=True):
|
614 |
+
highlighted_text = highlight_repeated_words(doc, word_colors)
|
615 |
+
st.markdown(highlighted_text, unsafe_allow_html=True)
|
616 |
+
|
617 |
+
# Mostrar estructura de oraciones
|
618 |
+
with st.expander(t['sentence_structure'], expanded=True):
|
619 |
+
for i, sent_analysis in enumerate(advanced_analysis['sentence_structure']):
|
620 |
+
sentence_str = (
|
621 |
+
f"**{t['sentence']} {i+1}** "
|
622 |
+
f"{t['root']}: {sent_analysis['root']} ({sent_analysis['root_pos']}) -- "
|
623 |
+
f"{t['subjects']}: {', '.join(sent_analysis['subjects'])} -- "
|
624 |
+
f"{t['objects']}: {', '.join(sent_analysis['objects'])} -- "
|
625 |
+
f"{t['verbs']}: {', '.join(sent_analysis['verbs'])}"
|
626 |
+
)
|
627 |
+
st.markdown(sentence_str)
|
628 |
+
|
629 |
+
# Mostrar análisis de categorías gramaticales # Mostrar análisis morfológico
|
630 |
+
col1, col2 = st.columns(2)
|
631 |
+
|
632 |
+
with col1:
|
633 |
+
with st.expander(t['pos_analysis'], expanded=True):
|
634 |
+
pos_df = pd.DataFrame(advanced_analysis['pos_analysis'])
|
635 |
|
636 |
+
# Traducir las etiquetas POS a sus nombres en el idioma seleccionado
|
637 |
+
pos_df['pos'] = pos_df['pos'].map(lambda x: POS_TRANSLATIONS[lang_code].get(x, x))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
638 |
|
639 |
+
# Renombrar las columnas para mayor claridad
|
640 |
+
pos_df = pos_df.rename(columns={
|
641 |
+
'pos': t['grammatical_category'],
|
642 |
+
'count': t['count'],
|
643 |
+
'percentage': t['percentage'],
|
644 |
+
'examples': t['examples']
|
645 |
+
})
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
646 |
|
647 |
+
# Mostrar el dataframe
|
648 |
+
st.dataframe(pos_df)
|
649 |
+
|
650 |
+
with col2:
|
651 |
+
with st.expander(t['morphological_analysis'], expanded=True):
|
652 |
+
morph_df = pd.DataFrame(advanced_analysis['morphological_analysis'])
|
653 |
+
|
654 |
+
# Definir el mapeo de columnas
|
655 |
+
column_mapping = {
|
656 |
+
'text': t['word'],
|
657 |
+
'lemma': t['lemma'],
|
658 |
+
'pos': t['grammatical_category'],
|
659 |
+
'dep': t['dependency'],
|
660 |
+
'morph': t['morphology']
|
661 |
+
}
|
662 |
+
|
663 |
+
# Renombrar las columnas existentes
|
664 |
+
morph_df = morph_df.rename(columns={col: new_name for col, new_name in column_mapping.items() if col in morph_df.columns})
|
665 |
+
|
666 |
+
# Traducir las categorías gramaticales
|
667 |
+
morph_df[t['grammatical_category']] = morph_df[t['grammatical_category']].map(lambda x: POS_TRANSLATIONS[lang_code].get(x, x))
|
668 |
+
|
669 |
+
# Traducir las dependencias
|
670 |
+
dep_translations = {
|
671 |
+
'es': {
|
672 |
+
'ROOT': 'RAÍZ', 'nsubj': 'sujeto nominal', 'obj': 'objeto', 'iobj': 'objeto indirecto',
|
673 |
+
'csubj': 'sujeto clausal', 'ccomp': 'complemento clausal', 'xcomp': 'complemento clausal abierto',
|
674 |
+
'obl': 'oblicuo', 'vocative': 'vocativo', 'expl': 'expletivo', 'dislocated': 'dislocado',
|
675 |
+
'advcl': 'cláusula adverbial', 'advmod': 'modificador adverbial', 'discourse': 'discurso',
|
676 |
+
'aux': 'auxiliar', 'cop': 'cópula', 'mark': 'marcador', 'nmod': 'modificador nominal',
|
677 |
+
'appos': 'aposición', 'nummod': 'modificador numeral', 'acl': 'cláusula adjetiva',
|
678 |
+
'amod': 'modificador adjetival', 'det': 'determinante', 'clf': 'clasificador',
|
679 |
+
'case': 'caso', 'conj': 'conjunción', 'cc': 'coordinante', 'fixed': 'fijo',
|
680 |
+
'flat': 'plano', 'compound': 'compuesto', 'list': 'lista', 'parataxis': 'parataxis',
|
681 |
+
'orphan': 'huérfano', 'goeswith': 'va con', 'reparandum': 'reparación', 'punct': 'puntuación'
|
682 |
+
},
|
683 |
+
'en': {
|
684 |
+
'ROOT': 'ROOT', 'nsubj': 'nominal subject', 'obj': 'object',
|
685 |
+
'iobj': 'indirect object', 'csubj': 'clausal subject', 'ccomp': 'clausal complement', 'xcomp': 'open clausal complement',
|
686 |
+
'obl': 'oblique', 'vocative': 'vocative', 'expl': 'expletive', 'dislocated': 'dislocated', 'advcl': 'adverbial clause modifier',
|
687 |
+
'advmod': 'adverbial modifier', 'discourse': 'discourse element', 'aux': 'auxiliary', 'cop': 'copula', 'mark': 'marker',
|
688 |
+
'nmod': 'nominal modifier', 'appos': 'appositional modifier', 'nummod': 'numeric modifier', 'acl': 'clausal modifier of noun',
|
689 |
+
'amod': 'adjectival modifier', 'det': 'determiner', 'clf': 'classifier', 'case': 'case marking',
|
690 |
+
'conj': 'conjunct', 'cc': 'coordinating conjunction', 'fixed': 'fixed multiword expression',
|
691 |
+
'flat': 'flat multiword expression', 'compound': 'compound', 'list': 'list', 'parataxis': 'parataxis', 'orphan': 'orphan',
|
692 |
+
'goeswith': 'goes with', 'reparandum': 'reparandum', 'punct': 'punctuation'
|
693 |
+
},
|
694 |
+
'fr': {
|
695 |
+
'ROOT': 'RACINE', 'nsubj': 'sujet nominal', 'obj': 'objet', 'iobj': 'objet indirect',
|
696 |
+
'csubj': 'sujet phrastique', 'ccomp': 'complément phrastique', 'xcomp': 'complément phrastique ouvert', 'obl': 'oblique',
|
697 |
+
'vocative': 'vocatif', 'expl': 'explétif', 'dislocated': 'disloqué', 'advcl': 'clause adverbiale', 'advmod': 'modifieur adverbial',
|
698 |
+
'discourse': 'élément de discours', 'aux': 'auxiliaire', 'cop': 'copule', 'mark': 'marqueur', 'nmod': 'modifieur nominal',
|
699 |
+
'appos': 'apposition', 'nummod': 'modifieur numéral', 'acl': 'clause relative', 'amod': 'modifieur adjectival', 'det': 'déterminant',
|
700 |
+
'clf': 'classificateur', 'case': 'marqueur de cas', 'conj': 'conjonction', 'cc': 'coordination', 'fixed': 'expression figée',
|
701 |
+
'flat': 'construction plate', 'compound': 'composé', 'list': 'liste', 'parataxis': 'parataxe', 'orphan': 'orphelin',
|
702 |
+
'goeswith': 'va avec', 'reparandum': 'réparation', 'punct': 'ponctuation'
|
703 |
+
}
|
704 |
+
}
|
705 |
+
morph_df[t['dependency']] = morph_df[t['dependency']].map(lambda x: dep_translations[lang_code].get(x, x))
|
706 |
+
|
707 |
+
# Traducir la morfología
|
708 |
+
def translate_morph(morph_string, lang_code):
|
709 |
+
morph_translations = {
|
710 |
+
'es': {
|
711 |
+
'Gender': 'Género', 'Number': 'Número', 'Case': 'Caso', 'Definite': 'Definido',
|
712 |
+
'PronType': 'Tipo de Pronombre', 'Person': 'Persona', 'Mood': 'Modo',
|
713 |
+
'Tense': 'Tiempo', 'VerbForm': 'Forma Verbal', 'Voice': 'Voz',
|
714 |
+
'Fem': 'Femenino', 'Masc': 'Masculino', 'Sing': 'Singular', 'Plur': 'Plural',
|
715 |
+
'Ind': 'Indicativo', 'Sub': 'Subjuntivo', 'Imp': 'Imperativo', 'Inf': 'Infinitivo',
|
716 |
+
'Part': 'Participio', 'Ger': 'Gerundio', 'Pres': 'Presente', 'Past': 'Pasado',
|
717 |
+
'Fut': 'Futuro', 'Perf': 'Perfecto', 'Imp': 'Imperfecto'
|
718 |
+
},
|
719 |
+
'en': {
|
720 |
+
'Gender': 'Gender', 'Number': 'Number', 'Case': 'Case', 'Definite': 'Definite', 'PronType': 'Pronoun Type', 'Person': 'Person',
|
721 |
+
'Mood': 'Mood', 'Tense': 'Tense', 'VerbForm': 'Verb Form', 'Voice': 'Voice',
|
722 |
+
'Fem': 'Feminine', 'Masc': 'Masculine', 'Sing': 'Singular', 'Plur': 'Plural', 'Ind': 'Indicative',
|
723 |
+
'Sub': 'Subjunctive', 'Imp': 'Imperative', 'Inf': 'Infinitive', 'Part': 'Participle',
|
724 |
+
'Ger': 'Gerund', 'Pres': 'Present', 'Past': 'Past', 'Fut': 'Future', 'Perf': 'Perfect', 'Imp': 'Imperfect'
|
725 |
+
},
|
726 |
+
'fr': {
|
727 |
+
'Gender': 'Genre', 'Number': 'Nombre', 'Case': 'Cas', 'Definite': 'Défini', 'PronType': 'Type de Pronom',
|
728 |
+
'Person': 'Personne', 'Mood': 'Mode', 'Tense': 'Temps', 'VerbForm': 'Forme Verbale', 'Voice': 'Voix',
|
729 |
+
'Fem': 'Féminin', 'Masc': 'Masculin', 'Sing': 'Singulier', 'Plur': 'Pluriel', 'Ind': 'Indicatif',
|
730 |
+
'Sub': 'Subjonctif', 'Imp': 'Impératif', 'Inf': 'Infinitif', 'Part': 'Participe',
|
731 |
+
'Ger': 'Gérondif', 'Pres': 'Présent', 'Past': 'Passé', 'Fut': 'Futur', 'Perf': 'Parfait', 'Imp': 'Imparfait'
|
732 |
}
|
733 |
+
}
|
734 |
+
for key, value in morph_translations[lang_code].items():
|
735 |
+
morph_string = morph_string.replace(key, value)
|
736 |
+
return morph_string
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
737 |
|
738 |
+
morph_df[t['morphology']] = morph_df[t['morphology']].apply(lambda x: translate_morph(x, lang_code))
|
739 |
+
|
740 |
+
# Seleccionar y ordenar las columnas a mostrar
|
741 |
+
columns_to_display = [t['word'], t['lemma'], t['grammatical_category'], t['dependency'], t['morphology']]
|
742 |
+
columns_to_display = [col for col in columns_to_display if col in morph_df.columns]
|
743 |
+
|
744 |
+
# Mostrar el DataFrame
|
745 |
+
st.dataframe(morph_df[columns_to_display])
|
746 |
+
|
747 |
+
# Mostrar diagramas de arco (código existente)
|
748 |
+
with st.expander(t['arc_diagram'], expanded=True):
|
749 |
+
sentences = list(doc.sents)
|
750 |
+
arc_diagrams = []
|
751 |
+
for i, sent in enumerate(sentences):
|
752 |
+
st.subheader(f"{t['sentence']} {i+1}")
|
753 |
+
html = displacy.render(sent, style="dep", options={"distance": 100})
|
754 |
+
html = html.replace('height="375"', 'height="200"')
|
755 |
+
html = re.sub(r'<svg[^>]*>', lambda m: m.group(0).replace('height="450"', 'height="300"'), html)
|
756 |
+
html = re.sub(r'<g [^>]*transform="translate\((\d+),(\d+)\)"', lambda m: f'<g transform="translate({m.group(1)},50)"', html)
|
757 |
+
st.write(html, unsafe_allow_html=True)
|
758 |
+
arc_diagrams.append(html)
|
759 |
|
760 |
###############################################################################################################
|
761 |
def display_semantic_analysis_interface(nlp_models, lang_code):
|
|
|
825 |
# Realizar el análisis
|
826 |
analysis_result = perform_semantic_analysis(text_content, nlp_models[lang_code], lang_code)
|
827 |
|
828 |
+
def display_semantic_results(result, lang_code, t):
|
829 |
+
# Mostrar conceptos clave
|
830 |
+
with st.expander(t['key_concepts'], expanded=True):
|
831 |
+
concept_text = " | ".join([f"{concept} ({frequency:.2f})" for concept, frequency in analysis_result['key_concepts']])
|
832 |
+
st.write(concept_text)
|
833 |
|
834 |
+
# Mostrar el gráfico de relaciones conceptuales
|
835 |
+
with st.expander(t['conceptual_relations'], expanded=True):
|
836 |
+
st.pyplot(analysis_result['relations_graph'])
|
837 |
|
838 |
# Guardar el resultado del análisis
|
839 |
if store_semantic_result(st.session_state.username, text_content, analysis_result):
|
|
|
881 |
st.header(t['title'])
|
882 |
|
883 |
col1, col2 = st.columns(2)
|
|
|
884 |
with col1:
|
885 |
uploaded_file1 = st.file_uploader(t['file_uploader1'], type=['txt'])
|
|
|
886 |
with col2:
|
887 |
uploaded_file2 = st.file_uploader(t['file_uploader2'], type=['txt'])
|
888 |
|
|
|
894 |
# Realizar el análisis
|
895 |
analysis_result = perform_discourse_analysis(text_content1, text_content2, nlp_models[lang_code], lang_code)
|
896 |
|
897 |
+
# Guardar el resultado en el estado de la sesión
|
898 |
+
st.session_state.discourse_result = analysis_result
|
899 |
+
|
900 |
# Mostrar los resultados del análisis
|
901 |
+
display_discourse_results(st.session_state.discourse_result, lang_code, t)
|
902 |
|
903 |
# Guardar el resultado del análisis
|
904 |
if store_discourse_analysis_result(st.session_state.username, text_content1, text_content2, analysis_result):
|
905 |
st.success(t['success_message'])
|
906 |
else:
|
907 |
st.error(t['error_message'])
|
908 |
+
|
909 |
+
elif 'discourse_result' in st.session_state:
|
910 |
+
# Si hay un resultado guardado, mostrarlo
|
911 |
+
display_discourse_results(st.session_state.discourse_result, lang_code, t)
|
912 |
+
else:
|
913 |
+
st.warning(t['warning_message'])
|
914 |
+
|
915 |
+
def display_discourse_results(result, lang_code, t):
|
916 |
+
col1, col2 = st.columns(2)
|
917 |
+
|
918 |
+
with col1:
|
919 |
+
with st.expander(t['file_uploader1'], expanded=True):
|
920 |
+
st.pyplot(result['graph1'])
|
921 |
+
st.subheader(t['comparison'])
|
922 |
+
concept_text1 = " | ".join([f"{concept} ({frequency:.2f})" for concept, frequency in result['key_concepts1']])
|
923 |
+
st.write(concept_text1)
|
924 |
+
|
925 |
+
with col2:
|
926 |
+
with st.expander(t['file_uploader2'], expanded=True):
|
927 |
+
st.pyplot(result['graph2'])
|
928 |
+
st.subheader(t['comparison'])
|
929 |
+
concept_text2 = " | ".join([f"{concept} ({frequency:.2f})" for concept, frequency in result['key_concepts2']])
|
930 |
+
st.write(concept_text2)
|
931 |
+
|
932 |
+
# Aquí puedes añadir más visualizaciones o comparaciones entre los dos documentos
|
933 |
+
# Por ejemplo, podrías mostrar una tabla comparativa de los conceptos clave
|
934 |
+
st.subheader(t['comparison'])
|
935 |
+
df = pd.DataFrame({
|
936 |
+
t['file_uploader1']: dict(result['key_concepts1']),
|
937 |
+
t['file_uploader2']: dict(result['key_concepts2'])
|
938 |
+
}).fillna(0)
|
939 |
+
st.dataframe(df)
|
940 |
|
941 |
##################################################################################################
|
942 |
#def display_saved_discourse_analysis(analysis_data):
|