Spaces:
Sleeping
Sleeping
import streamlit as st | |
import sparknlp | |
import os | |
import pandas as pd | |
from sparknlp.base import * | |
from sparknlp.annotator import * | |
from pyspark.ml import Pipeline | |
from sparknlp.pretrained import PretrainedPipeline | |
from annotated_text import annotated_text | |
# Page configuration | |
st.set_page_config( | |
layout="wide", | |
initial_sidebar_state="auto" | |
) | |
# CSS for styling | |
st.markdown(""" | |
<style> | |
.main-title { | |
font-size: 36px; | |
color: #4A90E2; | |
font-weight: bold; | |
text-align: center; | |
} | |
.section { | |
background-color: #f9f9f9; | |
padding: 10px; | |
border-radius: 10px; | |
margin-top: 10px; | |
} | |
.section p, .section ul { | |
color: #666666; | |
} | |
</style> | |
""", unsafe_allow_html=True) | |
def init_spark(): | |
return sparknlp.start() | |
def create_pipeline(model): | |
documentAssembler = DocumentAssembler() \ | |
.setInputCol("text") \ | |
.setOutputCol("document") | |
sentence_detector = SentenceDetector() \ | |
.setInputCols(["document"]) \ | |
.setOutputCol("sentence") | |
tokenizer = Tokenizer() \ | |
.setInputCols(["sentence"]) \ | |
.setOutputCol("token") | |
word_embeddings = WordEmbeddingsModel.pretrained("hebrew_cc_300d", "he") \ | |
.setInputCols(["sentence", "token"]) \ | |
.setOutputCol("embeddings") | |
ner = NerDLModel.pretrained("hebrewner_cc_300d", "he") \ | |
.setInputCols(["sentence", "token", "embeddings"]) \ | |
.setOutputCol("ner") | |
ner_converter = NerConverter().setInputCols(["sentence", "token", "ner"]).setOutputCol("ner_chunk") | |
pipeline = Pipeline(stages=[documentAssembler, sentence_detector, tokenizer, word_embeddings, ner, ner_converter]) | |
return pipeline | |
def fit_data(pipeline, data): | |
empty_df = spark.createDataFrame([['']]).toDF('text') | |
pipeline_model = pipeline.fit(empty_df) | |
model = LightPipeline(pipeline_model) | |
result = model.fullAnnotate(data) | |
return result | |
def annotate(data): | |
document, chunks, labels = data["Document"], data["NER Chunk"], data["NER Label"] | |
annotated_words = [] | |
for chunk, label in zip(chunks, labels): | |
parts = document.split(chunk, 1) | |
if parts[0]: | |
annotated_words.append(parts[0]) | |
annotated_words.append((chunk, label)) | |
document = parts[1] | |
if document: | |
annotated_words.append(document) | |
annotated_text(*annotated_words) | |
# Set up the page layout | |
st.markdown('<div class="main-title">Recognize entities in Persian text</div>', unsafe_allow_html=True) | |
st.markdown(""" | |
<div class="section"> | |
<p>Named Entity Recognition (NER) models identify and categorize important entities in a text. This page details a word embeddings-based NER model for Hebrew texts, using the <code>hebrew_cc_300d</code> word embeddings. The model is pretrained and available for use with Spark NLP.</p> | |
</div> | |
""", unsafe_allow_html=True) | |
# Sidebar content | |
model = st.sidebar.selectbox( | |
"Choose the pretrained model", | |
["hebrewner_cc_300d"], | |
help="For more info about the models visit: https://sparknlp.org/models" | |
) | |
# Reference notebook link in sidebar | |
link = """ | |
<a href="https://colab.research.google.com/github/JohnSnowLabs/spark-nlp-workshop/blob/master/tutorials/streamlit_notebooks/public/NER_HE.ipynb"> | |
<img src="https://colab.research.google.com/assets/colab-badge.svg" style="zoom: 1.3" alt="Open In Colab"/> | |
</a> | |
""" | |
st.sidebar.markdown('Reference notebook:') | |
st.sidebar.markdown(link, unsafe_allow_html=True) | |
# Load examples | |
examples = [ | |
"""ืืืชืืฆืื : ืกืคืจื ืืคื ืืจื ืืืจ ืขื ืง ืืืกืืก ืืืืืืืืื ืชืืืืืืืืื ืืืืื ืื ื ืืขืืื , ืืื ืื ืืชืงืคืืช ืืืืฉืืืช ืืืคื ืืจืืื ืืืืื ืืื ืกืืื ืืคื ืฉืืขืืื ืื ืืชืขืืจืจื ืืชืืฆืื ืืกืคืจืืื ืฉื ืืืืืืก ืื ืืืืืื , ืืืฃ ืืจื ืืกืืคืจ ืืฆืืื ืืืืืช ืขืฆืื , ืื ืืืจืกืืื , ืืขืจืื ืืช ืืกืคืจ " ืืกืืืืช ืฉืืืืืจื ืฆืืคื ืื ืืื ืฆ'ื " , ืฉืื ืืื ืืืืง ืืืช ืืืืช ืืช ืืขืืืืืช ืืืื ืืืช ืฉืขืืืื ืืกืชืื ืืจืืื ืขื ืืื ืฉืคืข ืฉื ืืืืจืื , ืืืงื ืืงืืจืืื ืืืืงื ืืงืืืื ืืกืคืจืื , ืืชืื ืขืช ืืจืืืื ืืช ืขื ืืืงืจืื ืฉืื ืื .""", | |
"""ืืืื ืงืืฆืจ ืืืจืืขื ืื ื ืชืขืกืง ืืื ืืื ืื ืืฉืืื ืืืืืื ืื ืฉืืื ืื ืืกืคืจ , ืืื ืืืฉื ืืืืชื ืฉื ืืจืื ืืืืืืืช , ืืืขืืช ืืืืืืจืืืช ืฉื ืืืืื ืจืื ืื ืืื ืฆื ืืื ืืืื , ืืื ื ืชืืงื ืื ืืฉื ืืื - ืืืืืืช ืืกืชืจ " ืืกืืจ ืฆืืื " - ืืกืืจ ืืฉืื ืืงืืื ืืืืืื ืืื ืืืฃ ืฉื ื , ืืชืคืงืืื ืืืื ืขื ืฆืืฆืื ืืฉืืฉืืช ืื ึถืจืื ึผืื ืืืช ืืงืืืื ืฉื ืฆืจืคืช , ืฉืื ืืืขืฉื ืฆืืฆืื ืืฉืืข ืืืจืื ืืืืืืืช , ืืืคืืื ืื , ืืืขืช ืืืจื ืืืกืืจ , ืืฉืืฉืืช ืืืืืืชืืช ืืืืืืืืืช ืฉื ืฆืจืคืช , ืื ืฉืืืืจ ืืืืื ืฉืืืื ืฆืจืคืช ืื ืืืืฆื ืืืืื .""", | |
"""ื 32 ืืืืงืืืืจ ืืชืคืขืื ืืื ื ืืขืืช ืืืจ ืืขืืชืื " ืืืกืืื ืืืื " ืืืืื ืืืืืช ืืืขืจืืฆื ืืช 21 : " ืืื ืขืฉื ืืืืืฉืื ืืืืื ืืืขื ืฆืืืช ืืืืืืจ ืื ืฉืืงื ืืืืจื ืฉื ืื ืืื ืืขืฉืืช ืืืขื ืืืืืืืื ืฆืืขืื ืืช ... ืื ืืืืืจ ืืื ืกืคืืจื ืืืืืืคื , ืืื ืืื ืืืื ืืืืืืืช ืืืื ... ืกืืืืจ ืื ืืื , ืขื ืฉืืื ืืืจื ืืื ืืืืช ืืืืฉืืข ืืื ืฆืจืคืชืืช ... ืื ืืืืืจ , ืชืืื ืืืืื ื ืืืืืื ืืช ืืืืื ืืื ื ืืืืืช ืกืืืื ืืื ืืืืชืชื ืืขื ืงืืช , ืื ืงืจืืช ืืกืฆืืกืืก " .""", | |
"""ืื ืืื ื ืืื ืืงืฆืชื ! ืืจื ืฉื ืกืืคืจืชื ืขื ืืืืขื ืืงืจืื ืืช ืืื ืง , ืขื ืืืชืื ืฉืืืืืื ืืืงืจืื ืืช , ืขื " ืงืืื ืื ืงื ืื " , ืขื ืื ืฉื ืืก"ืก ืืืืืืื ืืืืชื ืืื , ืขื ืืืืืืช ืืืืจ ืื ืงืจืืช ืืขืื ืืื ืฉืฉืืื ืืืจืงืืจืื , ืขื ืืื ืืืืื ืฉื ืงืจืขื ืืืจืืขืืช ืืืืชืืื , ืืืขืชืื ื ืฉืืจื ืืืืืืช ืืฆืขืืจืืช ืืืืื , ืืืชื ืืืชืืจ ืจืง ืืืื ืืืืืืช ืขื ื"ืกืืงืฆืื " .""", | |
"""ืฉืืืื ืืืฉืฃ ืืช ืชืืืืจื ืืืืฆืจืื ืืืฆืืืื ืฉื ืืืืจื: " ืืืืืจืื ื ืืชืจื ืืืื : 70 ืืืืจ ืืืืืืช ืขืกืง ืงืื , 300 ืืืืจ ืืืืืืช ืจืฉืช ืืขืกืง ืงืื , ืืื 1,500 ื - 3,500 ืืืืจ ืืืืืืช ืืืจืืช ืืืืืืช ืขื ืืชืจ ืจืืฉื ืืขื 500 ืืืฉืืื , ืืืืฆืขืืช ืืืฆืจื ืืฆ'ืง ืคืืื ื ืืงืกืคืจืก , ืืืื 15,000 ื - 20,000 ืืืืจ ืืขืกืง ืขื 3 ืขื 4 ืืชืจืื , ืืืจืืช ืืืืืืช ืขื ืืืืืจื ืืืืจืืช ืืฉืืขืืชืืื .""" | |
] | |
selected_text = st.selectbox("Select an example", examples) | |
custom_input = st.text_input("Try it with your own Sentence!") | |
text_to_analyze = custom_input if custom_input else selected_text | |
st.subheader('Full example text') | |
HTML_WRAPPER = """<div class="scroll entities" style="overflow-x: auto; border: 1px solid #e6e9ef; border-radius: 0.25rem; padding: 1rem; margin-bottom: 2.5rem; white-space:pre-wrap">{}</div>""" | |
st.markdown(HTML_WRAPPER.format(text_to_analyze), unsafe_allow_html=True) | |
# Initialize Spark and create pipeline | |
spark = init_spark() | |
pipeline = create_pipeline(model) | |
output = fit_data(pipeline, text_to_analyze) | |
# Display matched sentence | |
st.subheader("Processed output:") | |
results = { | |
'Document': output[0]['document'][0].result, | |
'NER Chunk': [n.result for n in output[0]['ner_chunk']], | |
"NER Label": [n.metadata['entity'] for n in output[0]['ner_chunk']] | |
} | |
annotate(results) | |
with st.expander("View DataFrame"): | |
df = pd.DataFrame({'NER Chunk': results['NER Chunk'], 'NER Label': results['NER Label']}) | |
df.index += 1 | |
st.dataframe(df) | |