import streamlit as st import sparknlp import os import pandas as pd from sparknlp.base import * from sparknlp.annotator import * from pyspark.ml import Pipeline from sparknlp.pretrained import PretrainedPipeline from annotated_text import annotated_text # Page configuration st.set_page_config( layout="wide", page_title="Spark NLP Demos App", initial_sidebar_state="auto" ) # CSS for styling st.markdown(""" """, unsafe_allow_html=True) @st.cache_resource def init_spark(): return sparknlp.start() @st.cache_resource def create_pipeline(model): document_assembler = DocumentAssembler() \ .setInputCol('text') \ .setOutputCol('document') sentence_detector = SentenceDetector() \ .setInputCols(['document']) \ .setOutputCol('sentences') tokenizer = Tokenizer() \ .setInputCols(['sentences']) \ .setOutputCol('tokens') \ .setContextChars(['(', ')', '?', '!', '.', ',']) keywords = YakeKeywordExtraction() \ .setInputCols('tokens') \ .setOutputCol('keywords') \ .setMinNGrams(2) \ .setMaxNGrams(5) \ .setNKeywords(100) \ .setStopWords(StopWordsCleaner().getStopWords()) pipeline = Pipeline(stages=[ document_assembler, sentence_detector, tokenizer, keywords ]) return pipeline def fit_data(pipeline, data): empty_df = spark.createDataFrame([['']]).toDF('text') pipeline_model = pipeline.fit(empty_df) model = LightPipeline(pipeline_model) results = model.fullAnnotate(data)[0] return results def highlight_keywords(data): document_text = data["document"][0].result keywords = data["keywords"] annotations = [] last_index = 0 for keyword in keywords: keyword_text = keyword.result start_index = document_text.find(keyword_text, last_index) if start_index != -1: if start_index > last_index: annotations.append(document_text[last_index:start_index]) annotations.append((keyword_text, 'Key Word')) last_index = start_index + len(keyword_text) if last_index < len(document_text): annotations.append(document_text[last_index:]) annotated_text(*annotations) # Set up the page layout st.markdown('