Spaces:
Sleeping
Sleeping
Upload 10 files
Browse files- .streamlit/config.toml +3 -0
- Demo.py +155 -0
- Dockerfile +70 -0
- inputs/hebrewner_cc_300d/Example1.txt +2 -0
- inputs/hebrewner_cc_300d/Example2.txt +2 -0
- inputs/hebrewner_cc_300d/Example3.txt +2 -0
- inputs/hebrewner_cc_300d/Example4.txt +2 -0
- inputs/hebrewner_cc_300d/Example5.txt +2 -0
- pages/Workflow & Model Overview.py +270 -0
- requirements.txt +6 -0
.streamlit/config.toml
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
[theme]
|
2 |
+
base="light"
|
3 |
+
primaryColor="#29B4E8"
|
Demo.py
ADDED
@@ -0,0 +1,155 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
import sparknlp
|
3 |
+
import os
|
4 |
+
import pandas as pd
|
5 |
+
|
6 |
+
from sparknlp.base import *
|
7 |
+
from sparknlp.annotator import *
|
8 |
+
from pyspark.ml import Pipeline
|
9 |
+
from sparknlp.pretrained import PretrainedPipeline
|
10 |
+
from annotated_text import annotated_text
|
11 |
+
|
12 |
+
# Page configuration
|
13 |
+
st.set_page_config(
|
14 |
+
layout="wide",
|
15 |
+
initial_sidebar_state="auto"
|
16 |
+
)
|
17 |
+
|
18 |
+
# CSS for styling
|
19 |
+
st.markdown("""
|
20 |
+
<style>
|
21 |
+
.main-title {
|
22 |
+
font-size: 36px;
|
23 |
+
color: #4A90E2;
|
24 |
+
font-weight: bold;
|
25 |
+
text-align: center;
|
26 |
+
}
|
27 |
+
.section {
|
28 |
+
background-color: #f9f9f9;
|
29 |
+
padding: 10px;
|
30 |
+
border-radius: 10px;
|
31 |
+
margin-top: 10px;
|
32 |
+
}
|
33 |
+
.section p, .section ul {
|
34 |
+
color: #666666;
|
35 |
+
}
|
36 |
+
</style>
|
37 |
+
""", unsafe_allow_html=True)
|
38 |
+
|
39 |
+
@st.cache_resource
|
40 |
+
def init_spark():
|
41 |
+
return sparknlp.start()
|
42 |
+
|
43 |
+
@st.cache_resource
|
44 |
+
def create_pipeline(model):
|
45 |
+
documentAssembler = DocumentAssembler() \
|
46 |
+
.setInputCol("text") \
|
47 |
+
.setOutputCol("document")
|
48 |
+
|
49 |
+
sentence_detector = SentenceDetector() \
|
50 |
+
.setInputCols(["document"]) \
|
51 |
+
.setOutputCol("sentence")
|
52 |
+
|
53 |
+
tokenizer = Tokenizer() \
|
54 |
+
.setInputCols(["sentence"]) \
|
55 |
+
.setOutputCol("token")
|
56 |
+
|
57 |
+
word_embeddings = WordEmbeddingsModel.pretrained("hebrew_cc_300d", "he") \
|
58 |
+
.setInputCols(["sentence", "token"]) \
|
59 |
+
.setOutputCol("embeddings")
|
60 |
+
|
61 |
+
ner = NerDLModel.pretrained("hebrewner_cc_300d", "he") \
|
62 |
+
.setInputCols(["sentence", "token", "embeddings"]) \
|
63 |
+
.setOutputCol("ner")
|
64 |
+
|
65 |
+
ner_converter = NerConverter().setInputCols(["sentence", "token", "ner"]).setOutputCol("ner_chunk")
|
66 |
+
|
67 |
+
pipeline = Pipeline(stages=[documentAssembler, sentence_detector, tokenizer, word_embeddings, ner, ner_converter])
|
68 |
+
return pipeline
|
69 |
+
|
70 |
+
def fit_data(pipeline, data):
|
71 |
+
empty_df = spark.createDataFrame([['']]).toDF('text')
|
72 |
+
pipeline_model = pipeline.fit(empty_df)
|
73 |
+
model = LightPipeline(pipeline_model)
|
74 |
+
result = model.fullAnnotate(data)
|
75 |
+
return result
|
76 |
+
|
77 |
+
def annotate(data):
|
78 |
+
document, chunks, labels = data["Document"], data["NER Chunk"], data["NER Label"]
|
79 |
+
annotated_words = []
|
80 |
+
for chunk, label in zip(chunks, labels):
|
81 |
+
parts = document.split(chunk, 1)
|
82 |
+
if parts[0]:
|
83 |
+
annotated_words.append(parts[0])
|
84 |
+
annotated_words.append((chunk, label))
|
85 |
+
document = parts[1]
|
86 |
+
if document:
|
87 |
+
annotated_words.append(document)
|
88 |
+
annotated_text(*annotated_words)
|
89 |
+
|
90 |
+
# Set up the page layout
|
91 |
+
st.markdown('<div class="main-title">Recognize entities in Persian text</div>', unsafe_allow_html=True)
|
92 |
+
st.markdown("""
|
93 |
+
<div class="section">
|
94 |
+
<p>Named Entity Recognition (NER) models identify and categorize important entities in a text. This page details a word embeddings-based NER model for Hebrew texts, using the <code>hebrew_cc_300d</code> word embeddings. The model is pretrained and available for use with Spark NLP.</p>
|
95 |
+
</div>
|
96 |
+
""", unsafe_allow_html=True)
|
97 |
+
|
98 |
+
# Sidebar content
|
99 |
+
model = st.sidebar.selectbox(
|
100 |
+
"Choose the pretrained model",
|
101 |
+
["hebrewner_cc_300d"],
|
102 |
+
help="For more info about the models visit: https://sparknlp.org/models"
|
103 |
+
)
|
104 |
+
|
105 |
+
# Reference notebook link in sidebar
|
106 |
+
link = """
|
107 |
+
<a href="https://colab.research.google.com/github/JohnSnowLabs/spark-nlp-workshop/blob/master/tutorials/streamlit_notebooks/public/NER_HE.ipynb">
|
108 |
+
<img src="https://colab.research.google.com/assets/colab-badge.svg" style="zoom: 1.3" alt="Open In Colab"/>
|
109 |
+
</a>
|
110 |
+
"""
|
111 |
+
st.sidebar.markdown('Reference notebook:')
|
112 |
+
st.sidebar.markdown(link, unsafe_allow_html=True)
|
113 |
+
|
114 |
+
# Load examples
|
115 |
+
examples = [
|
116 |
+
"""ืืืชืืฆืื : ืกืคืจื ืืคื ืืจื ืืืจ ืขื ืง ืืืกืืก ืืืืืืืืื ืชืืืืืืืืื ืืืืื ืื ื ืืขืืื , ืืื ืื ืืชืงืคืืช ืืืืฉืืืช ืืืคื ืืจืืื ืืืืื ืืื ืกืืื ืืคื ืฉืืขืืื ืื ืืชืขืืจืจื ืืชืืฆืื ืืกืคืจืืื ืฉื ืืืืืืก ืื ืืืืืื , ืืืฃ ืืจื ืืกืืคืจ ืืฆืืื ืืืืืช ืขืฆืื , ืื ืืืจืกืืื , ืืขืจืื ืืช ืืกืคืจ " ืืกืืืืช ืฉืืืืืจื ืฆืืคื ืื ืืื ืฆ'ื " , ืฉืื ืืื ืืืืง ืืืช ืืืืช ืืช ืืขืืืืืช ืืืื ืืืช ืฉืขืืืื ืืกืชืื ืืจืืื ืขื ืืื ืฉืคืข ืฉื ืืืืจืื , ืืืงื ืืงืืจืืื ืืืืงื ืืงืืืื ืืกืคืจืื , ืืชืื ืขืช ืืจืืืื ืืช ืขื ืืืงืจืื ืฉืื ืื .""",
|
117 |
+
"""ืืืื ืงืืฆืจ ืืืจืืขื ืื ื ืชืขืกืง ืืื ืืื ืื ืืฉืืื ืืืืืื ืื ืฉืืื ืื ืืกืคืจ , ืืื ืืืฉื ืืืืชื ืฉื ืืจืื ืืืืืืืช , ืืืขืืช ืืืืืืจืืืช ืฉื ืืืืื ืจืื ืื ืืื ืฆื ืืื ืืืื , ืืื ื ืชืืงื ืื ืืฉื ืืื - ืืืืืืช ืืกืชืจ " ืืกืืจ ืฆืืื " - ืืกืืจ ืืฉืื ืืงืืื ืืืืืื ืืื ืืืฃ ืฉื ื , ืืชืคืงืืื ืืืื ืขื ืฆืืฆืื ืืฉืืฉ๏ฟฝ๏ฟฝืช ืื ึถืจืื ึผืื ืืืช ืืงืืืื ืฉื ืฆืจืคืช , ืฉืื ืืืขืฉื ืฆืืฆืื ืืฉืืข ืืืจืื ืืืืืืืช , ืืืคืืื ืื , ืืืขืช ืืืจื ืืืกืืจ , ืืฉืืฉืืช ืืืืืืชืืช ืืืืืืืืืช ืฉื ืฆืจืคืช , ืื ืฉืืืืจ ืืืืื ืฉืืืื ืฆืจืคืช ืื ืืืืฆื ืืืืื .""",
|
118 |
+
"""ื 32 ืืืืงืืืืจ ืืชืคืขืื ืืื ื ืืขืืช ืืืจ ืืขืืชืื " ืืืกืืื ืืืื " ืืืืื ืืืืืช ืืืขืจืืฆื ืืช 21 : " ืืื ืขืฉื ืืืืืฉืื ืืืืื ืืืขื ืฆืืืช ืืืืืืจ ืื ืฉืืงื ืืืืจื ืฉื ืื ืืื ืืขืฉืืช ืืืขื ืืืืืืืื ืฆืืขืื ืืช ... ืื ืืืืืจ ืืื ืกืคืืจื ืืืืืืคื , ืืื ืืื ืืืื ืืืืืืืช ืืืื ... ืกืืืืจ ืื ืืื , ืขื ืฉืืื ืืืจื ืืื ืืืืช ืืืืฉืืข ืืื ืฆืจืคืชืืช ... ืื ืืืืืจ , ืชืืื ืืืืื ื ืืืืืื ืืช ืืืืื ืืื ื ืืืืืช ืกืืืื ืืื ืืืืชืชื ืืขื ืงืืช , ืื ืงืจืืช ืืกืฆืืกืืก " .""",
|
119 |
+
"""ืื ืืื ื ืืื ืืงืฆืชื ! ืืจื ืฉื ืกืืคืจืชื ืขื ืืืืขื ืืงืจืื ืืช ืืื ืง , ืขื ืืืชืื ืฉืืืืืื ืืืงืจืื ืืช , ืขื " ืงืืื ืื ืงื ืื " , ืขื ืื ืฉื ืืก"ืก ืืืืืืื ืืืืชื ืืื , ืขื ืืืืืืช ืืืืจ ืื ืงืจืืช ืืขืื ืืื ืฉืฉืืื ืืืจืงืืจืื , ืขื ืืื ืืืืื ืฉื ืงืจืขื ืืืจืืขืืช ืืืืชืืื , ืืืขืชืื ื ืฉืืจื ืืืืืืช ืืฆืขืืจืืช ืืืืื , ืืืชื ืืืชืืจ ืจืง ืืืื ืืืืืืช ืขื ื"ืกืืงืฆืื " .""",
|
120 |
+
"""ืฉืืืื ืืืฉืฃ ืืช ืชืืืืจื ืืืืฆืจืื ืืืฆืืืื ืฉื ืืืืจื: " ืืืืืจืื ื ืืชืจื ืืืื : 70 ืืืืจ ืืืืืืช ืขืกืง ืงืื , 300 ืืืืจ ืืืืืืช ืจืฉืช ืืขืกืง ืงืื , ืืื 1,500 ื - 3,500 ืืืืจ ืืืืืืช ืืืจืืช ืืืืืืช ืขื ืืชืจ ืจืืฉื ืืขื 500 ืืืฉืืื , ืืืืฆืขืืช ืืืฆืจื ืืฆ'ืง ืคืืื ื ืืงืกืคืจืก , ืืืื 15,000 ื - 20,000 ืืืืจ ืืขืกืง ืขื 3 ืขื 4 ืืชืจืื , ืืืจืืช ืืืืืืช ืขื ืืืืืจื ืืืืจืืช ืืฉืืขืืชืืื ."""
|
121 |
+
]
|
122 |
+
|
123 |
+
selected_text = st.selectbox("Select an example", examples)
|
124 |
+
custom_input = st.text_input("Try it with your own Sentence!")
|
125 |
+
|
126 |
+
text_to_analyze = custom_input if custom_input else selected_text
|
127 |
+
|
128 |
+
st.subheader('Full example text')
|
129 |
+
HTML_WRAPPER = """<div class="scroll entities" style="overflow-x: auto; border: 1px solid #e6e9ef; border-radius: 0.25rem; padding: 1rem; margin-bottom: 2.5rem; white-space:pre-wrap">{}</div>"""
|
130 |
+
st.markdown(HTML_WRAPPER.format(text_to_analyze), unsafe_allow_html=True)
|
131 |
+
|
132 |
+
# Initialize Spark and create pipeline
|
133 |
+
spark = init_spark()
|
134 |
+
pipeline = create_pipeline(model)
|
135 |
+
output = fit_data(pipeline, text_to_analyze)
|
136 |
+
|
137 |
+
# Display matched sentence
|
138 |
+
st.subheader("Processed output:")
|
139 |
+
|
140 |
+
results = {
|
141 |
+
'Document': output[0]['document'][0].result,
|
142 |
+
'NER Chunk': [n.result for n in output[0]['ner_chunk']],
|
143 |
+
"NER Label": [n.metadata['entity'] for n in output[0]['ner_chunk']]
|
144 |
+
}
|
145 |
+
|
146 |
+
annotate(results)
|
147 |
+
|
148 |
+
with st.expander("View DataFrame"):
|
149 |
+
df = pd.DataFrame({'NER Chunk': results['NER Chunk'], 'NER Label': results['NER Label']})
|
150 |
+
df.index += 1
|
151 |
+
st.dataframe(df)
|
152 |
+
|
153 |
+
|
154 |
+
|
155 |
+
|
Dockerfile
ADDED
@@ -0,0 +1,70 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Download base image ubuntu 18.04
|
2 |
+
FROM ubuntu:18.04
|
3 |
+
|
4 |
+
# Set environment variables
|
5 |
+
ENV NB_USER jovyan
|
6 |
+
ENV NB_UID 1000
|
7 |
+
ENV HOME /home/${NB_USER}
|
8 |
+
|
9 |
+
# Install required packages
|
10 |
+
RUN apt-get update && apt-get install -y \
|
11 |
+
tar \
|
12 |
+
wget \
|
13 |
+
bash \
|
14 |
+
rsync \
|
15 |
+
gcc \
|
16 |
+
libfreetype6-dev \
|
17 |
+
libhdf5-serial-dev \
|
18 |
+
libpng-dev \
|
19 |
+
libzmq3-dev \
|
20 |
+
python3 \
|
21 |
+
python3-dev \
|
22 |
+
python3-pip \
|
23 |
+
unzip \
|
24 |
+
pkg-config \
|
25 |
+
software-properties-common \
|
26 |
+
graphviz \
|
27 |
+
openjdk-8-jdk \
|
28 |
+
ant \
|
29 |
+
ca-certificates-java \
|
30 |
+
&& apt-get clean \
|
31 |
+
&& update-ca-certificates -f;
|
32 |
+
|
33 |
+
# Install Python 3.8 and pip
|
34 |
+
RUN add-apt-repository ppa:deadsnakes/ppa \
|
35 |
+
&& apt-get update \
|
36 |
+
&& apt-get install -y python3.8 python3-pip \
|
37 |
+
&& apt-get clean;
|
38 |
+
|
39 |
+
# Set up JAVA_HOME
|
40 |
+
ENV JAVA_HOME /usr/lib/jvm/java-8-openjdk-amd64/
|
41 |
+
RUN mkdir -p ${HOME} \
|
42 |
+
&& echo "export JAVA_HOME=/usr/lib/jvm/java-8-openjdk-amd64/" >> ${HOME}/.bashrc \
|
43 |
+
&& chown -R ${NB_UID}:${NB_UID} ${HOME}
|
44 |
+
|
45 |
+
# Create a new user named "jovyan" with user ID 1000
|
46 |
+
RUN useradd -m -u ${NB_UID} ${NB_USER}
|
47 |
+
|
48 |
+
# Switch to the "jovyan" user
|
49 |
+
USER ${NB_USER}
|
50 |
+
|
51 |
+
# Set home and path variables for the user
|
52 |
+
ENV HOME=/home/${NB_USER} \
|
53 |
+
PATH=/home/${NB_USER}/.local/bin:$PATH
|
54 |
+
|
55 |
+
# Set the working directory to the user's home directory
|
56 |
+
WORKDIR ${HOME}
|
57 |
+
|
58 |
+
# Upgrade pip and install Python dependencies
|
59 |
+
RUN python3.8 -m pip install --upgrade pip
|
60 |
+
COPY requirements.txt /tmp/requirements.txt
|
61 |
+
RUN python3.8 -m pip install -r /tmp/requirements.txt
|
62 |
+
|
63 |
+
# Copy the application code into the container at /home/jovyan
|
64 |
+
COPY --chown=${NB_USER}:${NB_USER} . ${HOME}
|
65 |
+
|
66 |
+
# Expose port for Streamlit
|
67 |
+
EXPOSE 7860
|
68 |
+
|
69 |
+
# Define the entry point for the container
|
70 |
+
ENTRYPOINT ["streamlit", "run", "Demo.py", "--server.port=7860", "--server.address=0.0.0.0"]
|
inputs/hebrewner_cc_300d/Example1.txt
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
Example 1
|
2 |
+
ืืืชืืฆืื : ืกืคืจื ืืคื ืืจื ืืืจ ืขื ืง ืืืกืืก ืืืืืืืืื ืชืืืืืืืืื ืืืืื ืื ื ืืขืืื , ืืื ืื ืืชืงืคืืช ืืืืฉืืืช ืืืคื ืืจืืื ืืืืื ืืื ืกืืื ืืคื ืฉืืขืืื ืื ืืชืขืืจืจื ืืชืืฆืื ืืกืคืจืืื ืฉื ืืืืืืก ืื ืืืืืื , ืืืฃ ืืจื ืืกืืคืจ ืืฆืืื ืืืืืช ืขืฆืื , ืื ืืืจืกืืื , ืืขืจืื ืืช ืืกืคืจ " ืืกืืืืช ืฉืืืืืจื ืฆืืคื ืื ืืื ืฆ'ื " , ืฉืื ืืื ืืืืง ืืืช ืืืืช ืืช ืืขืืืืืช ืืืื ืืืช ืฉืขืืืื ืืกืชืื ืืจืืื ืขื ืืื ืฉืคืข ืฉื ืืืืจืื , ืืืงื ืืงืืจืืื ืืืืงื ืืงืืืื ืืกืคืจืื , ืืชืื ืขืช ืืจืืืื ืืช ืขื ืืืงืจืื ืฉืื ืื .
|
inputs/hebrewner_cc_300d/Example2.txt
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
Example 2
|
2 |
+
ืืืื ืงืืฆืจ ืืืจืืขื ืื ื ืชืขืกืง ืืื ืืื ืื ืืฉืืื ืืืืืื ืื ืฉืืื ืื ืืกืคืจ , ืืื ืืืฉื ืืืืชื ืฉื ืืจืื ืืืืืืืช , ืืืขืืช ืืืืืืจืืืช ืฉื ืืืืื ืจืื ืื ืืื ืฆื ืืื ืืืื , ืืื ื ืชืืงื ืื ืืฉื ืืื - ืืืืืืช ืืกืชืจ " ืืกืืจ ืฆืืื " - ืืกืืจ ืืฉืื ืืงืืื ืืืืืื ืืื ืืืฃ ืฉื ื , ืืชืคืงืืื ืืืื ืขื ืฆืืฆืื ืืฉืืฉืืช ืื ึถืจืื ึผืื ืืืช ืืงืืืื ืฉื ืฆืจืคืช , ืฉืื ืืืขืฉื ืฆืืฆืื ืืฉืืข ืืืจืื ืืืืืืืช , ืืืคืืื ืื , ืืืขืช ืืืจื ืืืกืืจ , ืืฉืืฉืืช ืืืืืืชืืช ืืืืืืืืืช ืฉื ืฆืจืคืช , ืื ืฉืืืืจ ืืืืื ืฉืืืื ืฆืจืคืช ืื ืืืืฆื ืืืืื .
|
inputs/hebrewner_cc_300d/Example3.txt
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
Example 3
|
2 |
+
ื 32 ืืืืงืืืืจ ืืชืคืขืื ืืื ื ืืขืืช ืืืจ ืืขืืชืื " ืืืกืืื ืืืื " ืืืืื ืืืืืช ืืืขืจืืฆื ืืช 21 : " ืืื ืขืฉื ืืืืืฉืื ืืืืื ืืืขื ืฆืืืช ืืืืืืจ ืื ืฉืืงื ืืืืจื ืฉื ืื ืืื ืืขืฉืืช ืืืขื ืืืืืืืื ืฆืืขืื ืืช ... ืื ืืืืืจ ืืื ืกืคืืจื ืืืืืืคื , ืืื ืืื ืืืื ืืืืืืืช ืืืื ... ืกืืืืจ ืื ืืื , ืขื ืฉืืื ืืืจื ืืื ืืืืช ืืืืฉืืข ืืื ืฆืจืคืชืืช ... ืื ืืืืืจ , ืชืืื ืืืืื ื ืืืืืื ืืช ืืืืื ืืื ื ืืืืืช ืกืืืื ืืื ืืืืชืชื ืืขื ืงืืช , ืื ืงืจืืช ืืกืฆืืกืืก " .
|
inputs/hebrewner_cc_300d/Example4.txt
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
Example 4
|
2 |
+
ืื ืืื ื ืืื ืืงืฆืชื ! ืืจื ืฉื ืกืืคืจืชื ืขื ืืืืขื ืืงืจืื ืืช ืืื ืง , ืขื ืืืชืื ืฉืืืืืื ืืืงืจืื ืืช , ืขื " ืงืืื ืื ืงื ืื " , ืขื ืื ืฉื ืืก"ืก ืืืืืืื ืืืืชื ืืื , ืขื ืืืืืืช ืืืืจ ืื ืงืจืืช ืืขืื ืืื ืฉืฉืืื ืืืจืงืืจืื , ืขื ืืื ืืืืื ืฉื ืงืจืขื ืืืจืืขืืช ืืืืชืืื , ืืืขืชืื ื ืฉืืจื ืืืืืืช ืืฆืขืืจืืช ืืืืื , ืืืชื ืืืชืืจ ืจืง ืืืื ืืืืืืช ืขื ื"ืกืืงืฆืื " .
|
inputs/hebrewner_cc_300d/Example5.txt
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
Example 5
|
2 |
+
ืฉืืืื ืืืฉืฃ ืืช ืชืืืืจื ืืืืฆืจืื ืืืฆืืืื ืฉื ืืืืจื: " ืืืืืจืื ื ืืชืจื ืืืื : 70 ืืืืจ ืืืืืืช ืขืกืง ืงืื , 300 ืืืืจ ืืืืืืช ืจืฉืช ืืขืกืง ืงืื , ืืื 1,500 ื - 3,500 ืืืืจ ืืืืืืช ืืืจืืช ืืืืืืช ืขื ืืชืจ ืจืืฉื ืืขื 500 ืืืฉืืื , ืืืืฆืขืืช ืืืฆืจื ืืฆ'ืง ืคืืื ื ืืงืกืคืจืก , ืืืื 15,000 ื - 20,000 ืืืืจ ืืขืกืง ืขื 3 ืขื 4 ืืชืจืื , ืืืจืืช ืืืืืืช ืขื ืืืืืจื ืืืืจืืช ืืฉืืขืืชืืื .
|
pages/Workflow & Model Overview.py
ADDED
@@ -0,0 +1,270 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
import pandas as pd
|
3 |
+
|
4 |
+
# Custom CSS for better styling
|
5 |
+
st.markdown("""
|
6 |
+
<style>
|
7 |
+
.main-title {
|
8 |
+
font-size: 36px;
|
9 |
+
color: #4A90E2;
|
10 |
+
font-weight: bold;
|
11 |
+
text-align: center;
|
12 |
+
}
|
13 |
+
.sub-title {
|
14 |
+
font-size: 24px;
|
15 |
+
color: #4A90E2;
|
16 |
+
margin-top: 20px;
|
17 |
+
}
|
18 |
+
.section {
|
19 |
+
background-color: #f9f9f9;
|
20 |
+
padding: 15px;
|
21 |
+
border-radius: 10px;
|
22 |
+
margin-top: 20px;
|
23 |
+
}
|
24 |
+
.section h2 {
|
25 |
+
font-size: 22px;
|
26 |
+
color: #4A90E2;
|
27 |
+
}
|
28 |
+
.section p, .section ul {
|
29 |
+
color: #666666;
|
30 |
+
}
|
31 |
+
.link {
|
32 |
+
color: #4A90E2;
|
33 |
+
text-decoration: none;
|
34 |
+
}
|
35 |
+
</style>
|
36 |
+
""", unsafe_allow_html=True)
|
37 |
+
|
38 |
+
# Main Title
|
39 |
+
st.markdown('<div class="main-title">Detect 10 Different Entities in Hebrew (hebrew_cc_300d embeddings)</div>', unsafe_allow_html=True)
|
40 |
+
|
41 |
+
# Introduction
|
42 |
+
st.markdown("""
|
43 |
+
<div class="section">
|
44 |
+
<p>Named Entity Recognition (NER) models identify and categorize important entities in a text. This page details a word embeddings-based NER model for Hebrew texts, using the <code>hebrew_cc_300d</code> word embeddings. The model is pretrained and available for use with Spark NLP.</p>
|
45 |
+
</div>
|
46 |
+
""", unsafe_allow_html=True)
|
47 |
+
|
48 |
+
# Model Description
|
49 |
+
st.markdown('<div class="sub-title">Description</div>', unsafe_allow_html=True)
|
50 |
+
st.markdown("""
|
51 |
+
<div class="section">
|
52 |
+
<p>This model uses Hebrew word embeddings to find 10 different types of entities in Hebrew text. It is trained using <code>hebrew_cc_300d</code> word embeddings, so please use the same embeddings in the pipeline. It can identify the following types of entities:</p>
|
53 |
+
<ul>
|
54 |
+
<li>PERS (Persons)</li>
|
55 |
+
<li>DATE (Dates)</li>
|
56 |
+
<li>ORG (Organizations)</li>
|
57 |
+
<li>LOC (Locations)</li>
|
58 |
+
<li>PERCENT (Percentage)</li>
|
59 |
+
<li>MONEY (Money)</li>
|
60 |
+
<li>TIME (Time)</li>
|
61 |
+
<li>MISC_AFF (Miscellaneous Affiliation)</li>
|
62 |
+
<li>MISC_EVENT (Miscellaneous Event)</li>
|
63 |
+
<li>MISC_ENT (Miscellaneous Entity)</li>
|
64 |
+
</ul>
|
65 |
+
</div>
|
66 |
+
""", unsafe_allow_html=True)
|
67 |
+
|
68 |
+
# Setup Instructions
|
69 |
+
st.markdown('<div class="sub-title">Setup</div>', unsafe_allow_html=True)
|
70 |
+
st.markdown('<p>To use the model, you need Spark NLP installed. You can install it using pip:</p>', unsafe_allow_html=True)
|
71 |
+
st.code("""
|
72 |
+
pip install spark-nlp
|
73 |
+
pip install pyspark
|
74 |
+
""", language="bash")
|
75 |
+
|
76 |
+
st.markdown("<p>Then, import Spark NLP and start a Spark session:</p>", unsafe_allow_html=True)
|
77 |
+
st.code("""
|
78 |
+
import sparknlp
|
79 |
+
|
80 |
+
# Start Spark Session
|
81 |
+
spark = sparknlp.start()
|
82 |
+
""", language='python')
|
83 |
+
|
84 |
+
# Example Usage
|
85 |
+
st.markdown('<div class="sub-title">Example Usage with Hebrew NER Model</div>', unsafe_allow_html=True)
|
86 |
+
st.markdown("""
|
87 |
+
<div class="section">
|
88 |
+
<p>Below is an example of how to set up and use the <code>hebrewner_cc_300d</code> model for named entity recognition in Hebrew:</p>
|
89 |
+
</div>
|
90 |
+
""", unsafe_allow_html=True)
|
91 |
+
st.code('''
|
92 |
+
from sparknlp.base import *
|
93 |
+
from sparknlp.annotator import *
|
94 |
+
from pyspark.ml import Pipeline
|
95 |
+
from pyspark.sql.functions import col, expr, round, concat, lit, explode
|
96 |
+
|
97 |
+
# Define the components of the pipeline
|
98 |
+
documentAssembler = DocumentAssembler() \\
|
99 |
+
.setInputCol("text") \\
|
100 |
+
.setOutputCol("document")
|
101 |
+
|
102 |
+
sentence_detector = SentenceDetector() \\
|
103 |
+
.setInputCols(["document"]) \\
|
104 |
+
.setOutputCol("sentence")
|
105 |
+
|
106 |
+
tokenizer = Tokenizer() \\
|
107 |
+
.setInputCols(["sentence"]) \\
|
108 |
+
.setOutputCol("token")
|
109 |
+
|
110 |
+
word_embeddings = WordEmbeddingsModel.pretrained("hebrew_cc_300d", "he") \\
|
111 |
+
.setInputCols(["sentence", "token"]) \\
|
112 |
+
.setOutputCol("embeddings")
|
113 |
+
|
114 |
+
ner = NerDLModel.pretrained("hebrewner_cc_300d", "he") \\
|
115 |
+
.setInputCols(["sentence", "token", "embeddings"]) \\
|
116 |
+
.setOutputCol("ner")
|
117 |
+
|
118 |
+
ner_converter = NerConverter().setInputCols(["sentence", "token", "ner"]).setOutputCol("ner_chunk")
|
119 |
+
|
120 |
+
# Create the pipeline
|
121 |
+
pipeline = Pipeline(stages=[documentAssembler, sentence_detector, tokenizer, word_embeddings, ner, ner_converter])
|
122 |
+
|
123 |
+
# Create sample data
|
124 |
+
example = """
|
125 |
+
ื- 25 ืืืืืืกื ืขืฆืจ ืืฉื"ื ืืช ืืืืื ืืื-ื'ืืืื , ืืืจื ืืจืื ื , ืฉืืืืก ืืืจืืื ืืคืช"ื ืืืืคืขื ืขื ืืื ืืืืืืืื. ืืื-ื'ืืืื ืืชืืืื ืืืงืื ืืืืืืช ืืจืืจ ืืืื ืืืงืจื ืขืจืืื ืืฉืจืื , ืืืฆืข ืคืืืืข ืืจืืืช ืืฉืจืื ืื ืืจืื , ืืคืืืข ืืืืจืืช ืืฉืจืืืืืช ืืืจืื ืืืืืืฃ ืืืืืื ืืื ืืฉืืจืจ ืืกืืจืื ืืืืืื ืืื.
|
126 |
+
"""
|
127 |
+
data = spark.createDataFrame([[example]]).toDF("text")
|
128 |
+
|
129 |
+
# Fit and transform data with the pipeline
|
130 |
+
result = pipeline.fit(data).transform(data)
|
131 |
+
|
132 |
+
# Select the result, entity
|
133 |
+
result.select(
|
134 |
+
expr("explode(ner_chunk) as ner_chunk")
|
135 |
+
).select(
|
136 |
+
col("ner_chunk.result").alias("chunk"),
|
137 |
+
col("ner_chunk.metadata").getItem("entity").alias("ner_label")
|
138 |
+
).show(truncate=False)
|
139 |
+
''', language="python")
|
140 |
+
|
141 |
+
import pandas as pd
|
142 |
+
|
143 |
+
# Create the data for the DataFrame
|
144 |
+
data = {
|
145 |
+
"chunk": [
|
146 |
+
"25 ืืืืืืกื",
|
147 |
+
"ืืฉื\"ื",
|
148 |
+
"ืืืืื ืืื-ื'ืืืื",
|
149 |
+
"ืืจืื ื",
|
150 |
+
"ืืคืช\"ื",
|
151 |
+
"ืืืืืืืื",
|
152 |
+
"ืืื-ื'ืืืื",
|
153 |
+
"ืืืื",
|
154 |
+
"ืขืจืืื",
|
155 |
+
"ืืฉืจืื",
|
156 |
+
"ืืจืืืช ืืฉืจืื",
|
157 |
+
"ืื ืืจืื",
|
158 |
+
"ืืฉืจืืืืืช",
|
159 |
+
"ืืืจืื"
|
160 |
+
],
|
161 |
+
"ner_label": [
|
162 |
+
"DATE",
|
163 |
+
"ORG",
|
164 |
+
"PERS",
|
165 |
+
"MISC_AFF",
|
166 |
+
"ORG",
|
167 |
+
"ORG",
|
168 |
+
"PERS",
|
169 |
+
"LOC",
|
170 |
+
"MISC_AFF",
|
171 |
+
"LOC",
|
172 |
+
"ORG",
|
173 |
+
"LOC",
|
174 |
+
"MISC_AFF",
|
175 |
+
"LOC"
|
176 |
+
]
|
177 |
+
}
|
178 |
+
|
179 |
+
# Creating the DataFrame
|
180 |
+
df = pd.DataFrame(data)
|
181 |
+
df.index += 1
|
182 |
+
st.dataframe(df)
|
183 |
+
|
184 |
+
# Model Information
|
185 |
+
st.markdown('<div class="sub-title">Model Information</div>', unsafe_allow_html=True)
|
186 |
+
st.markdown("""
|
187 |
+
<div class="section">
|
188 |
+
<p>The <code>hebrewner_cc_300d</code> model details are as follows:</p>
|
189 |
+
<ul>
|
190 |
+
<li><strong>Model Name:</strong> hebrewner_cc_300d</li>
|
191 |
+
<li><strong>Type:</strong> ner</li>
|
192 |
+
<li><strong>Compatibility:</strong> Spark NLP 4.0.2+</li>
|
193 |
+
<li><strong>License:</strong> Open Source</li>
|
194 |
+
<li><strong>Edition:</strong> Official</li>
|
195 |
+
<li><strong>Input Labels:</strong> [document, token, word_embeddings]</li>
|
196 |
+
<li><strong>Output Labels:</strong> [ner]</li>
|
197 |
+
<li><strong>Language:</strong> he</li>
|
198 |
+
<li><strong>Size:</strong> 14.8 MB</li>
|
199 |
+
</ul>
|
200 |
+
</div>
|
201 |
+
""", unsafe_allow_html=True)
|
202 |
+
|
203 |
+
# Benchmark Section
|
204 |
+
st.markdown('<div class="sub-title">Benchmark</div>', unsafe_allow_html=True)
|
205 |
+
st.markdown("""
|
206 |
+
<div class="section">
|
207 |
+
<p>Evaluating the performance of NER models is crucial to understanding their effectiveness in real-world applications. Below are the benchmark results for the <code>hebrewner_cc_300d</code> model, focusing on various named entity categories. The metrics used include precision, recall, and F1-score, which are standard for evaluating classification models.</p>
|
208 |
+
</div>
|
209 |
+
""", unsafe_allow_html=True)
|
210 |
+
st.markdown("""
|
211 |
+
---
|
212 |
+
| Label | TP | FP | FN | Precision | Recall | F1-Score |
|
213 |
+
|--------------|-----|-----|-----|-----------|---------|----------|
|
214 |
+
| I-TIME | 5 | 2 | 0 | 0.714286 | 1.000000| 0.833333 |
|
215 |
+
| I-MISC_AFF | 2 | 0 | 3 | 1.000000 | 0.400000| 0.571429 |
|
216 |
+
| B-MISC_EVENT | 7 | 0 | 1 | 1.000000 | 0.875000| 0.933333 |
|
217 |
+
| B-LOC | 180 | 24 | 37 | 0.882353 | 0.829493| 0.855107 |
|
218 |
+
| I-ORG | 124 | 47 | 38 | 0.725146 | 0.765432| 0.744745 |
|
219 |
+
| B-DATE | 50 | 4 | 7 | 0.925926 | 0.877193| 0.900901 |
|
220 |
+
| I-PERS | 157 | 10 | 15 | 0.940120 | 0.912791| 0.926254 |
|
221 |
+
| I-DATE | 39 | 7 | 8 | 0.847826 | 0.829787| 0.838710 |
|
222 |
+
| B-MISC_AFF | 132 | 11 | 9 | 0.923077 | 0.936170| 0.929577 |
|
223 |
+
| I-MISC_EVENT | 6 | 0 | 2 | 1.000000 | 0.750000| 0.857143 |
|
224 |
+
| B-TIME | 4 | 0 | 1 | 1.000000 | 0.800000| 0.888889 |
|
225 |
+
| I-PERCENT | 8 | 0 | 0 | 1.000000 | 1.000000| 1.000000 |
|
226 |
+
| I-MISC_ENT | 11 | 3 | 10 | 0.785714 | 0.523810| 0.628571 |
|
227 |
+
| B-MISC_ENT | 8 | 1 | 5 | 0.888889 | 0.615385| 0.727273 |
|
228 |
+
| I-LOC | 79 | 18 | 23 | 0.814433 | 0.774510| 0.793970 |
|
229 |
+
| B-PERS | 231 | 22 | 26 | 0.913044 | 0.898833| 0.905882 |
|
230 |
+
| B-MONEY | 36 | 2 | 2 | 0.947368 | 0.947368| 0.947368 |
|
231 |
+
| B-PERCENT | 28 | 3 | 0 | 0.903226 | 1.000000| 0.949152 |
|
232 |
+
| B-ORG | 166 | 41 | 37 | 0.801932 | 0.817734| 0.809756 |
|
233 |
+
| I-MONEY | 61 | 1 | 1 | 0.983871 | 0.983871| 0.983871 |
|
234 |
+
| Macro-average| 1334| 196 | 225 | 0.899861 | 0.826869| 0.861822 |
|
235 |
+
| Micro-average| 1334| 196 | 225 | 0.871895 | 0.855677| 0.863710 |
|
236 |
+
""", unsafe_allow_html=True)
|
237 |
+
|
238 |
+
# Summary
|
239 |
+
st.markdown('<div class="sub-title">Summary</div>', unsafe_allow_html=True)
|
240 |
+
st.markdown("""
|
241 |
+
<div class="section">
|
242 |
+
<p>This page provided an overview of the <code>hebrewner_cc_300d</code> model for Hebrew NER. We discussed how to set up and use the model with Spark NLP, including example code and results. We also provided details on the model's specifications and links to relevant resources for further exploration.</p>
|
243 |
+
</div>
|
244 |
+
""", unsafe_allow_html=True)
|
245 |
+
|
246 |
+
# References
|
247 |
+
st.markdown('<div class="sub-title">References</div>', unsafe_allow_html=True)
|
248 |
+
st.markdown("""
|
249 |
+
<div class="section">
|
250 |
+
<ul>
|
251 |
+
<li><a class="link" href="https://sparknlp.org/api/python/reference/autosummary/sparknlp/annotator/ner/ner_dl/index.html" target="_blank" rel="noopener">NerDLModel</a> annotator documentation</li>
|
252 |
+
<li>Model Used: <a class="link" href="https://sparknlp.org/2022/08/09/hebrewner_cc_300d_he_3_0.html" rel="noopener">hebrewner_cc_300d_he_3_0</a></li>
|
253 |
+
<li><a class="link" href="https://www.cs.bgu.ac.il/~elhadad/nlpproj/naama/" target="_blank" rel="noopener">Data Source</a></li>
|
254 |
+
<li><a class="link" href="https://nlp.johnsnowlabs.com/recognize_entitie" target="_blank" rel="noopener">Visualization demos for NER in Spark NLP</a></li>
|
255 |
+
<li><a class="link" href="https://www.johnsnowlabs.com/named-entity-recognition-ner-with-bert-in-spark-nlp/">Named Entity Recognition (NER) with BERT in Spark NLP</a></li>
|
256 |
+
</ul>
|
257 |
+
</div>
|
258 |
+
""", unsafe_allow_html=True)
|
259 |
+
|
260 |
+
# Community & Support
|
261 |
+
st.markdown('<div class="sub-title">Community & Support</div>', unsafe_allow_html=True)
|
262 |
+
st.markdown("""
|
263 |
+
<div class="section">
|
264 |
+
<ul>
|
265 |
+
<li><a class="link" href="https://sparknlp.org/" target="_blank">Official Website</a>: Documentation and examples</li>
|
266 |
+
<li><a class="link" href="https://github.com/JohnSnowLabs/spark-nlp" target="_blank">GitHub Repository</a>: Report issues or contribute</li>
|
267 |
+
<li><a class="link" href="https://forum.johnsnowlabs.com/" target="_blank">Community Forum</a>: Ask questions, share ideas, and get support</li>
|
268 |
+
</ul>
|
269 |
+
</div>
|
270 |
+
""", unsafe_allow_html=True)
|
requirements.txt
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
streamlit
|
2 |
+
st-annotated-text
|
3 |
+
pandas
|
4 |
+
numpy
|
5 |
+
spark-nlp
|
6 |
+
pyspark
|