abdullahmubeen10
commited on
Commit
•
2161bb1
1
Parent(s):
fe37b17
Upload 6 files
Browse files- .streamlit/config.toml +3 -0
- Demo.py +158 -0
- Dockerfile +70 -0
- images/ner.png +0 -0
- pages/Workflow & Model Overview.py +355 -0
- requirements.txt +6 -0
.streamlit/config.toml
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
[theme]
|
2 |
+
base="light"
|
3 |
+
primaryColor="#29B4E8"
|
Demo.py
ADDED
@@ -0,0 +1,158 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
import sparknlp
|
3 |
+
import os
|
4 |
+
import pandas as pd
|
5 |
+
|
6 |
+
from sparknlp.base import *
|
7 |
+
from sparknlp.annotator import *
|
8 |
+
from pyspark.ml import Pipeline
|
9 |
+
from sparknlp.pretrained import PretrainedPipeline
|
10 |
+
from annotated_text import annotated_text
|
11 |
+
|
12 |
+
# Page configuration
|
13 |
+
st.set_page_config(
|
14 |
+
layout="wide",
|
15 |
+
page_title="Spark NLP Demos App",
|
16 |
+
initial_sidebar_state="auto"
|
17 |
+
)
|
18 |
+
|
19 |
+
# CSS for styling
|
20 |
+
st.markdown("""
|
21 |
+
<style>
|
22 |
+
.main-title {
|
23 |
+
font-size: 36px;
|
24 |
+
color: #4A90E2;
|
25 |
+
font-weight: bold;
|
26 |
+
text-align: center;
|
27 |
+
}
|
28 |
+
.section p, .section ul {
|
29 |
+
color: #666666;
|
30 |
+
}
|
31 |
+
</style>
|
32 |
+
""", unsafe_allow_html=True)
|
33 |
+
|
34 |
+
@st.cache_resource
|
35 |
+
def init_spark():
|
36 |
+
return sparknlp.start()
|
37 |
+
|
38 |
+
@st.cache_resource
|
39 |
+
def create_pipeline(model):
|
40 |
+
|
41 |
+
documentAssembler = DocumentAssembler() \
|
42 |
+
.setInputCol('text') \
|
43 |
+
.setOutputCol('document')
|
44 |
+
|
45 |
+
tokenizer = Tokenizer() \
|
46 |
+
.setInputCols(['document']) \
|
47 |
+
.setOutputCol('token')
|
48 |
+
|
49 |
+
if model == "ner_dl":
|
50 |
+
embeddings = WordEmbeddingsModel.pretrained('glove_100d') \
|
51 |
+
.setInputCols(["document", 'token']) \
|
52 |
+
.setOutputCol("embeddings")
|
53 |
+
elif model == "ner_dl_bert":
|
54 |
+
embeddings = BertEmbeddings.pretrained('bert_base_cased','en') \
|
55 |
+
.setInputCols(['document', 'token']) \
|
56 |
+
.setOutputCol('embeddings')
|
57 |
+
|
58 |
+
ner_model = NerDLModel.pretrained(model, 'en') \
|
59 |
+
.setInputCols(['document', 'token', 'embeddings']) \
|
60 |
+
.setOutputCol('ner')
|
61 |
+
|
62 |
+
ner_converter = NerConverter() \
|
63 |
+
.setInputCols(['document', 'token', 'ner']) \
|
64 |
+
.setOutputCol('ner_chunk')
|
65 |
+
|
66 |
+
nlp_pipeline = Pipeline(
|
67 |
+
stages=[
|
68 |
+
documentAssembler,
|
69 |
+
tokenizer,
|
70 |
+
embeddings,
|
71 |
+
ner_model,
|
72 |
+
ner_converter])
|
73 |
+
|
74 |
+
return nlp_pipeline
|
75 |
+
|
76 |
+
def fit_data(pipeline, data):
|
77 |
+
empty_df = spark.createDataFrame([['']]).toDF('text')
|
78 |
+
pipeline_model = pipeline.fit(empty_df)
|
79 |
+
model = LightPipeline(pipeline_model)
|
80 |
+
result = model.fullAnnotate(data)
|
81 |
+
return result
|
82 |
+
|
83 |
+
def annotate(data):
|
84 |
+
document, chunks, labels = data["Document"], data["NER Chunk"], data["NER Label"]
|
85 |
+
annotated_words = []
|
86 |
+
for chunk, label in zip(chunks, labels):
|
87 |
+
parts = document.split(chunk, 1)
|
88 |
+
if parts[0]:
|
89 |
+
annotated_words.append(parts[0])
|
90 |
+
annotated_words.append((chunk, label))
|
91 |
+
document = parts[1]
|
92 |
+
if document:
|
93 |
+
annotated_words.append(document)
|
94 |
+
annotated_text(*annotated_words)
|
95 |
+
|
96 |
+
# Set up the page layout
|
97 |
+
st.markdown('<div class="main-title">State-of-the-Art Named Entity Recognition with Spark NLP</div>', unsafe_allow_html=True)
|
98 |
+
|
99 |
+
# Sidebar content
|
100 |
+
model = st.sidebar.selectbox(
|
101 |
+
"Choose the pretrained model",
|
102 |
+
["ner_dl", "ner_dl_bert"],
|
103 |
+
help="For more info about the models visit: https://sparknlp.org/models"
|
104 |
+
)
|
105 |
+
|
106 |
+
# Reference notebook link in sidebar
|
107 |
+
link = """
|
108 |
+
<a href="https://colab.research.google.com/github/JohnSnowLabs/spark-nlp-workshop/blob/master/tutorials/streamlit_notebooks/NER_EN.ipynb">
|
109 |
+
<img src="https://colab.research.google.com/assets/colab-badge.svg" style="zoom: 1.3" alt="Open In Colab"/>
|
110 |
+
</a>
|
111 |
+
"""
|
112 |
+
st.sidebar.markdown('Reference notebook:')
|
113 |
+
st.sidebar.markdown(link, unsafe_allow_html=True)
|
114 |
+
|
115 |
+
# Load examples
|
116 |
+
examples = [
|
117 |
+
"William Henry Gates III (born October 28, 1955) is an American business magnate, software developer, investor, and philanthropist. He is best known as the co-founder of Microsoft Corporation. During his career at Microsoft, Gates held the positions of chairman, chief executive officer (CEO), president and chief software architect, while also being the largest individual shareholder until May 2014. He is one of the best-known entrepreneurs and pioneers of the microcomputer revolution of the 1970s and 1980s. Born and raised in Seattle, Washington, Gates co-founded Microsoft with childhood friend Paul Allen in 1975, in Albuquerque, New Mexico; it went on to become the world's largest personal computer software company. Gates led the company as chairman and CEO until stepping down as CEO in January 2000, but he remained chairman and became chief software architect. During the late 1990s, Gates had been criticized for his business tactics, which have been considered anti-competitive. This opinion has been upheld by numerous court rulings. In June 2006, Gates announced that he would be transitioning to a part-time role at Microsoft and full-time work at the Bill & Melinda Gates Foundation, the private charitable foundation that he and his wife, Melinda Gates, established in 2000.[9] He gradually transferred his duties to Ray Ozzie and Craig Mundie. He stepped down as chairman of Microsoft in February 2014 and assumed a new post as technology adviser to support the newly appointed CEO Satya Nadella.",
|
118 |
+
"The Mona Lisa is a 16th century oil painting created by Leonardo. It's held at the Louvre in Paris.",
|
119 |
+
"When Sebastian Thrun started working on self-driving cars at Google in 2007, few people outside of the company took him seriously. “I can tell you very senior CEOs of major American car companies would shake my hand and turn away because I wasn’t worth talking to,” said Thrun, now the co-founder and CEO of online higher education startup Udacity, in an interview with Recode earlier this week.",
|
120 |
+
"Facebook is a social networking service launched as TheFacebook on February 4, 2004. It was founded by Mark Zuckerberg with his college roommates and fellow Harvard University students Eduardo Saverin, Andrew McCollum, Dustin Moskovitz and Chris Hughes. The website's membership was initially limited by the founders to Harvard students, but was expanded to other colleges in the Boston area, the Ivy League, and gradually most universities in the United States and Canada.",
|
121 |
+
"The history of natural language processing generally started in the 1950s, although work can be found from earlier periods. In 1950, Alan Turing published an article titled 'Computing Machinery and Intelligence' which proposed what is now called the Turing test as a criterion of intelligence",
|
122 |
+
"Geoffrey Everest Hinton is an English Canadian cognitive psychologist and computer scientist, most noted for his work on artificial neural networks. Since 2013 he divides his time working for Google and the University of Toronto. In 2017, he cofounded and became the Chief Scientific Advisor of the Vector Institute in Toronto.",
|
123 |
+
"When I told John that I wanted to move to Alaska, he warned me that I'd have trouble finding a Starbucks there.",
|
124 |
+
"Steven Paul Jobs was an American business magnate, industrial designer, investor, and media proprietor. He was the chairman, chief executive officer (CEO), and co-founder of Apple Inc., the chairman and majority shareholder of Pixar, a member of The Walt Disney Company's board of directors following its acquisition of Pixar, and the founder, chairman, and CEO of NeXT. Jobs is widely recognized as a pioneer of the personal computer revolution of the 1970s and 1980s, along with Apple co-founder Steve Wozniak. Jobs was born in San Francisco, California, and put up for adoption. He was raised in the San Francisco Bay Area. He attended Reed College in 1972 before dropping out that same year, and traveled through India in 1974 seeking enlightenment and studying Zen Buddhism.",
|
125 |
+
"Titanic is a 1997 American epic romance and disaster film directed, written, co-produced, and co-edited by James Cameron. Incorporating both historical and fictionalized aspects, it is based on accounts of the sinking of the RMS Titanic, and stars Leonardo DiCaprio and Kate Winslet as members of different social classes who fall in love aboard the ship during its ill-fated maiden voyage.",
|
126 |
+
"Other than being the king of the north, John Snow is a an english physician and a leader in the development of anaesthesia and medical hygiene. He is considered for being the first one using data to cure cholera outbreak in 1834."
|
127 |
+
]
|
128 |
+
|
129 |
+
# st.subheader("Automatically detect phrases expressing dates and normalize them with respect to a reference date.")
|
130 |
+
selected_text = st.selectbox("Select an example", examples)
|
131 |
+
custom_input = st.text_input("Try it with your own Sentence!")
|
132 |
+
|
133 |
+
text_to_analyze = custom_input if custom_input else selected_text
|
134 |
+
|
135 |
+
st.subheader('Full example text')
|
136 |
+
HTML_WRAPPER = """<div class="scroll entities" style="overflow-x: auto; border: 1px solid #e6e9ef; border-radius: 0.25rem; padding: 1rem; margin-bottom: 2.5rem; white-space:pre-wrap">{}</div>"""
|
137 |
+
st.markdown(HTML_WRAPPER.format(text_to_analyze), unsafe_allow_html=True)
|
138 |
+
|
139 |
+
# Initialize Spark and create pipeline
|
140 |
+
spark = init_spark()
|
141 |
+
pipeline = create_pipeline(model)
|
142 |
+
output = fit_data(pipeline, text_to_analyze)
|
143 |
+
|
144 |
+
# Display matched sentence
|
145 |
+
st.subheader("Processed output:")
|
146 |
+
|
147 |
+
results = {
|
148 |
+
'Document': output[0]['document'][0].result,
|
149 |
+
'NER Chunk': [n.result for n in output[0]['ner_chunk']],
|
150 |
+
"NER Label": [n.metadata['entity'] for n in output[0]['ner_chunk']]
|
151 |
+
}
|
152 |
+
|
153 |
+
annotate(results)
|
154 |
+
|
155 |
+
with st.expander("View DataFrame"):
|
156 |
+
df = pd.DataFrame({'NER Chunk': results['NER Chunk'], 'NER Label': results['NER Label']})
|
157 |
+
df.index += 1
|
158 |
+
st.dataframe(df)
|
Dockerfile
ADDED
@@ -0,0 +1,70 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Download base image ubuntu 18.04
|
2 |
+
FROM ubuntu:18.04
|
3 |
+
|
4 |
+
# Set environment variables
|
5 |
+
ENV NB_USER jovyan
|
6 |
+
ENV NB_UID 1000
|
7 |
+
ENV HOME /home/${NB_USER}
|
8 |
+
|
9 |
+
# Install required packages
|
10 |
+
RUN apt-get update && apt-get install -y \
|
11 |
+
tar \
|
12 |
+
wget \
|
13 |
+
bash \
|
14 |
+
rsync \
|
15 |
+
gcc \
|
16 |
+
libfreetype6-dev \
|
17 |
+
libhdf5-serial-dev \
|
18 |
+
libpng-dev \
|
19 |
+
libzmq3-dev \
|
20 |
+
python3 \
|
21 |
+
python3-dev \
|
22 |
+
python3-pip \
|
23 |
+
unzip \
|
24 |
+
pkg-config \
|
25 |
+
software-properties-common \
|
26 |
+
graphviz \
|
27 |
+
openjdk-8-jdk \
|
28 |
+
ant \
|
29 |
+
ca-certificates-java \
|
30 |
+
&& apt-get clean \
|
31 |
+
&& update-ca-certificates -f;
|
32 |
+
|
33 |
+
# Install Python 3.8 and pip
|
34 |
+
RUN add-apt-repository ppa:deadsnakes/ppa \
|
35 |
+
&& apt-get update \
|
36 |
+
&& apt-get install -y python3.8 python3-pip \
|
37 |
+
&& apt-get clean;
|
38 |
+
|
39 |
+
# Set up JAVA_HOME
|
40 |
+
ENV JAVA_HOME /usr/lib/jvm/java-8-openjdk-amd64/
|
41 |
+
RUN mkdir -p ${HOME} \
|
42 |
+
&& echo "export JAVA_HOME=/usr/lib/jvm/java-8-openjdk-amd64/" >> ${HOME}/.bashrc \
|
43 |
+
&& chown -R ${NB_UID}:${NB_UID} ${HOME}
|
44 |
+
|
45 |
+
# Create a new user named "jovyan" with user ID 1000
|
46 |
+
RUN useradd -m -u ${NB_UID} ${NB_USER}
|
47 |
+
|
48 |
+
# Switch to the "jovyan" user
|
49 |
+
USER ${NB_USER}
|
50 |
+
|
51 |
+
# Set home and path variables for the user
|
52 |
+
ENV HOME=/home/${NB_USER} \
|
53 |
+
PATH=/home/${NB_USER}/.local/bin:$PATH
|
54 |
+
|
55 |
+
# Set the working directory to the user's home directory
|
56 |
+
WORKDIR ${HOME}
|
57 |
+
|
58 |
+
# Upgrade pip and install Python dependencies
|
59 |
+
RUN python3.8 -m pip install --upgrade pip
|
60 |
+
COPY requirements.txt /tmp/requirements.txt
|
61 |
+
RUN python3.8 -m pip install -r /tmp/requirements.txt
|
62 |
+
|
63 |
+
# Copy the application code into the container at /home/jovyan
|
64 |
+
COPY --chown=${NB_USER}:${NB_USER} . ${HOME}
|
65 |
+
|
66 |
+
# Expose port for Streamlit
|
67 |
+
EXPOSE 7860
|
68 |
+
|
69 |
+
# Define the entry point for the container
|
70 |
+
ENTRYPOINT ["streamlit", "run", "Demo.py", "--server.port=7860", "--server.address=0.0.0.0"]
|
images/ner.png
ADDED
pages/Workflow & Model Overview.py
ADDED
@@ -0,0 +1,355 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
|
3 |
+
# Custom CSS for better styling
|
4 |
+
st.markdown("""
|
5 |
+
<style>
|
6 |
+
.main-title {
|
7 |
+
font-size: 36px;
|
8 |
+
color: #4A90E2;
|
9 |
+
font-weight: bold;
|
10 |
+
text-align: center;
|
11 |
+
}
|
12 |
+
.sub-title {
|
13 |
+
font-size: 24px;
|
14 |
+
color: #4A90E2;
|
15 |
+
margin-top: 20px;
|
16 |
+
}
|
17 |
+
.section {
|
18 |
+
background-color: #f9f9f9;
|
19 |
+
padding: 15px;
|
20 |
+
border-radius: 10px;
|
21 |
+
margin-top: 20px;
|
22 |
+
}
|
23 |
+
.section h2 {
|
24 |
+
font-size: 22px;
|
25 |
+
color: #4A90E2;
|
26 |
+
}
|
27 |
+
.section p, .section ul {
|
28 |
+
color: #666666;
|
29 |
+
}
|
30 |
+
.link {
|
31 |
+
color: #4A90E2;
|
32 |
+
text-decoration: none;
|
33 |
+
}
|
34 |
+
</style>
|
35 |
+
""", unsafe_allow_html=True)
|
36 |
+
|
37 |
+
# Main Title
|
38 |
+
st.markdown('<div class="main-title">The Ultimate Guide to Named Entity Recognition with Spark NLP</div>', unsafe_allow_html=True)
|
39 |
+
|
40 |
+
# Introduction
|
41 |
+
st.markdown("""
|
42 |
+
<div class="section">
|
43 |
+
<p>Named Entity Recognition (NER) is the task of identifying important words in a text and associating them with a category. For example, we may be interested in finding all the personal names in documents, or company names in news articles. Other examples include domain-specific uses such as identifying all disease names in a clinical text, or company trading codes in financial ones.</p>
|
44 |
+
<p>NER can be implemented with many approaches. In this post, we introduce two methods: using a manually crafted list of entities (gazetteer) or regular expressions, and using deep learning with the NerDL model. Both approaches leverage the scalability of Spark NLP with Python.</p>
|
45 |
+
</div>
|
46 |
+
""", unsafe_allow_html=True)
|
47 |
+
|
48 |
+
st.image("images/ner.png")
|
49 |
+
|
50 |
+
# Introduction to Spark NLP
|
51 |
+
st.markdown('<div class="sub-title">Introduction to Spark NLP</div>', unsafe_allow_html=True)
|
52 |
+
st.markdown("""
|
53 |
+
<div class="section">
|
54 |
+
<p>Spark NLP is an open-source library maintained by John Snow Labs. It is built on top of Apache Spark and Spark ML and provides simple, performant & accurate NLP annotations for machine learning pipelines that can scale easily in a distributed environment.</p>
|
55 |
+
<p>To install Spark NLP, you can simply use any package manager like conda or pip. For example, using pip you can simply run <code>pip install spark-nlp</code>. For different installation options, check the official <a href="https://nlp.johnsnowlabs.com/docs/en/install" target="_blank" class="link">documentation</a>.</p>
|
56 |
+
</div>
|
57 |
+
""", unsafe_allow_html=True)
|
58 |
+
|
59 |
+
# Using NerDL Model
|
60 |
+
st.markdown('<div class="sub-title">Using NerDL Model</div>', unsafe_allow_html=True)
|
61 |
+
st.markdown("""
|
62 |
+
<div class="section">
|
63 |
+
<p>The NerDL model in Spark NLP is a deep learning-based approach for NER tasks. It uses a Char CNNs - BiLSTM - CRF architecture that achieves state-of-the-art results in most datasets. The training data should be a labeled Spark DataFrame in the format of CoNLL 2003 IOB with annotation type columns.</p>
|
64 |
+
</div>
|
65 |
+
""", unsafe_allow_html=True)
|
66 |
+
|
67 |
+
# Setup Instructions
|
68 |
+
st.markdown('<div class="sub-title">Setup</div>', unsafe_allow_html=True)
|
69 |
+
st.markdown('<p>To install Spark NLP in Python, use your favorite package manager (conda, pip, etc.). For example:</p>', unsafe_allow_html=True)
|
70 |
+
st.code("""
|
71 |
+
pip install spark-nlp
|
72 |
+
pip install pyspark
|
73 |
+
""", language="bash")
|
74 |
+
|
75 |
+
st.markdown("<p>Then, import Spark NLP and start a Spark session:</p>", unsafe_allow_html=True)
|
76 |
+
st.code("""
|
77 |
+
import sparknlp
|
78 |
+
|
79 |
+
# Start Spark Session
|
80 |
+
spark = sparknlp.start()
|
81 |
+
""", language='python')
|
82 |
+
|
83 |
+
# Example Usage with NerDL Model
|
84 |
+
st.markdown('<div class="sub-title">Example Usage with NerDL Model</div>', unsafe_allow_html=True)
|
85 |
+
st.markdown("""
|
86 |
+
<div class="section">
|
87 |
+
<p>Below is an example of how to set up and use the NerDL model for named entity recognition:</p>
|
88 |
+
</div>
|
89 |
+
""", unsafe_allow_html=True)
|
90 |
+
st.code('''
|
91 |
+
from sparknlp.base import *
|
92 |
+
from sparknlp.annotator import *
|
93 |
+
from pyspark.ml import Pipeline
|
94 |
+
|
95 |
+
# Document Assembler
|
96 |
+
document_assembler = DocumentAssembler() \\
|
97 |
+
.setInputCol("text") \\
|
98 |
+
.setOutputCol("document")
|
99 |
+
|
100 |
+
# Sentence Detector
|
101 |
+
sentence_detector = SentenceDetector() \\
|
102 |
+
.setInputCols(["document"]) \\
|
103 |
+
.setOutputCol("sentence")
|
104 |
+
|
105 |
+
# Tokenizer
|
106 |
+
tokenizer = Tokenizer() \\
|
107 |
+
.setInputCols(["sentence"]) \\
|
108 |
+
.setOutputCol("token")
|
109 |
+
|
110 |
+
# Word Embeddings
|
111 |
+
embeddings = WordEmbeddingsModel.pretrained() \\
|
112 |
+
.setInputCols(["sentence", "token"]) \\
|
113 |
+
.setOutputCol("bert")
|
114 |
+
|
115 |
+
# NerDL Model
|
116 |
+
ner_tagger = NerDLModel.pretrained() \\
|
117 |
+
.setInputCols(["sentence", "token", "bert"]) \\
|
118 |
+
.setOutputCol("ner")
|
119 |
+
|
120 |
+
# Pipeline
|
121 |
+
pipeline = Pipeline().setStages([
|
122 |
+
document_assembler,
|
123 |
+
sentence_detector,
|
124 |
+
tokenizer,
|
125 |
+
embeddings,
|
126 |
+
ner_tagger
|
127 |
+
])
|
128 |
+
|
129 |
+
# Example sentence
|
130 |
+
example = """
|
131 |
+
William Henry Gates III (born October 28, 1955) is an American business magnate, software developer, investor, and philanthropist.
|
132 |
+
He is best known as the co-founder of Microsoft Corporation. Throughout his career at Microsoft, Gates held various positions,
|
133 |
+
including chairman, chief executive officer (CEO), president, and chief software architect. He was also the largest individual
|
134 |
+
shareholder until May 2014.
|
135 |
+
|
136 |
+
Gates is recognized as one of the foremost entrepreneurs and pioneers of the microcomputer revolution of the 1970s and 1980s.
|
137 |
+
Born and raised in Seattle, Washington, he co-founded Microsoft with childhood friend Paul Allen in 1975. Initially established
|
138 |
+
in Albuquerque, New Mexico, Microsoft grew to become the world’s largest personal computer software company.
|
139 |
+
|
140 |
+
Gates led Microsoft as chairman and CEO until January 2000, when he stepped down as CEO but continued as chairman and chief
|
141 |
+
software architect. During the late 1990s, Gates faced criticism for business practices considered anti-competitive, an opinion
|
142 |
+
upheld by numerous court rulings.
|
143 |
+
|
144 |
+
In June 2006, Gates announced his transition to a part-time role at Microsoft while dedicating full time to the Bill & Melinda Gates
|
145 |
+
Foundation, a private charitable organization he established with his wife, Melinda Gates, in 2000. Gates gradually transferred
|
146 |
+
his responsibilities to Ray Ozzie and Craig Mundie and stepped down as chairman of Microsoft in February 2014. He then assumed
|
147 |
+
the role of technology adviser to support the newly appointed CEO, Satya Nadella.
|
148 |
+
"""
|
149 |
+
|
150 |
+
data = spark.createDataFrame([[example]]).toDF("text")
|
151 |
+
|
152 |
+
# Transforming data
|
153 |
+
result = pipeline.fit(data).transform(data)
|
154 |
+
result.select("ner.result").show(truncate=False)
|
155 |
+
''', language="python")
|
156 |
+
|
157 |
+
st.text("""
|
158 |
+
+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
|
159 |
+
|result |
|
160 |
+
+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
|
161 |
+
|[O, B-PER, I-PER, I-PER, I-PER, O, O, O, O, O, O, O, O, O, B-MISC, O, O, O, O, O, O, O, O, O, O, O, O, O, O, O, O, O, O, O, B-ORG, I-ORG, O, O, O, O, O, B-ORG, O, B-PER, O, O, O, O, O, O, O, O, O, O, O, O, O, O, O, O, O, O, O, O, O, O, O, O, O, O, O, O, O, O, O, O, O, O, O, O, O, O, O, O, O, O, O, O, O, O, O, O, B-LOC, O, O, O, B-LOC, O, B-LOC, O, B-PER, O, B-ORG, O, O, O, B-PER, I-PER, O, O, O, O, B-LOC, O, B-LOC, I-LOC, O, O, O, O, O, O, O, O, O, O, O, O, O, O, B-PER, O, O, O, O, O, O, O, O, O, O, O, O, O, O, O, O, O, O, O, O, O, O, O, O, O, O, O, O, O, O, O, B-PER, O, O, O, O, O, O, O, O, O, O, O, O, O, O, O, O, O, O, O, O, O, O, O, O, O, O, O, O, B-PER, O, O, O, O, O, O, O, O, O, O, O, B-ORG, O, O, O, O, O, B-ORG, I-ORG, I-ORG, I-ORG, I-ORG, O, O, O, O, O, O, O, O, O, O, O, B-PER, I-PER, O, O, O, O, O, O, O, O, O, O, B-PER, I-PER, O, B-PER, I-PER, O, O, O, O, O, O, O, B-ORG, O, O, O, O, O, O, O, O, O, O, O, O, O, O, O, O, O, B-PER, I-PER, O]|
|
162 |
+
+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
|
163 |
+
""")
|
164 |
+
|
165 |
+
# Using EntityRuler Annotator
|
166 |
+
st.markdown('<div class="sub-title">Using EntityRuler Annotator</div>', unsafe_allow_html=True)
|
167 |
+
st.markdown("""
|
168 |
+
<div class="section">
|
169 |
+
<p>In addition to the deep learning-based approach, Spark NLP also supports a rule-based method for NER using the EntityRuler annotator. This method involves using a gazetteer or regular expressions to identify entities in the text.</p>
|
170 |
+
</div>
|
171 |
+
""", unsafe_allow_html=True)
|
172 |
+
|
173 |
+
# Example Usage with EntityRuler
|
174 |
+
st.markdown('<div class="sub-title">Example Usage with EntityRuler</div>', unsafe_allow_html=True)
|
175 |
+
st.markdown("""
|
176 |
+
<div class="section">
|
177 |
+
<p>For the NER tasks based on gazetteer list, we will use the EntityRuler annotator, which has both Approach and Model versions.</p>
|
178 |
+
<p>As this annotator consists in finding the entities based in a list of desired names, the EntityRulerApproach annotator will store the given list in the EntityRulerModel parameters. All we need is a JSON or CSV file with the list of names or regex rules. For example, we may use the following entities.json file:</p>
|
179 |
+
</div>
|
180 |
+
""", unsafe_allow_html=True)
|
181 |
+
st.code("""
|
182 |
+
[
|
183 |
+
{
|
184 |
+
"label": "PERSON",
|
185 |
+
"patterns": [
|
186 |
+
"John",
|
187 |
+
"John Snow"
|
188 |
+
]
|
189 |
+
},
|
190 |
+
{
|
191 |
+
"label": "PERSON",
|
192 |
+
"patterns": [
|
193 |
+
"Eddard",
|
194 |
+
"Eddard Stark"
|
195 |
+
]
|
196 |
+
},
|
197 |
+
{
|
198 |
+
"label": "LOCATION",
|
199 |
+
"patterns": [
|
200 |
+
"Winterfell"
|
201 |
+
]
|
202 |
+
},
|
203 |
+
{
|
204 |
+
"label": "DATE",
|
205 |
+
"patterns": [
|
206 |
+
"[0-9]{4}-[0-9]{1,2}-[0-9]{1,2}"
|
207 |
+
],
|
208 |
+
"regex": true
|
209 |
+
}
|
210 |
+
]
|
211 |
+
""", language="json")
|
212 |
+
|
213 |
+
# Pipeline Setup
|
214 |
+
st.markdown('<div class="sub-title">Pipeline Setup</div>', unsafe_allow_html=True)
|
215 |
+
st.code("""
|
216 |
+
from sparknlp.base import DocumentAssembler, Pipeline
|
217 |
+
from sparknlp.annotator import EntityRulerApproach, Tokenizer
|
218 |
+
|
219 |
+
document_assembler = DocumentAssembler() \\
|
220 |
+
.setInputCol("text") \\
|
221 |
+
.setOutputCol("document")
|
222 |
+
|
223 |
+
tokenizer = Tokenizer() \\
|
224 |
+
.setInputCols(["document"]) \\
|
225 |
+
.setOutputCol("token")
|
226 |
+
|
227 |
+
entity_ruler = EntityRulerApproach() \\
|
228 |
+
.setInputCols(["document", "token"]) \\
|
229 |
+
.setOutputCol("entity") \\
|
230 |
+
.setPatternsResource("entities.json")
|
231 |
+
|
232 |
+
pipeline = Pipeline(stages=[document_assembler, tokenizer, entity_ruler])
|
233 |
+
""", language="python")
|
234 |
+
|
235 |
+
# Example Sentences
|
236 |
+
st.markdown('<div class="sub-title">Example Sentences</div>', unsafe_allow_html=True)
|
237 |
+
st.code('''
|
238 |
+
example = """Game of Thrones was released in 2011-04-17.
|
239 |
+
Lord Eddard Stark was the head of House Stark.
|
240 |
+
John Snow lives in Winterfell."""
|
241 |
+
|
242 |
+
data = spark.createDataFrame([[example]]).toDF("text")
|
243 |
+
pipeline_model = pipeline.fit(data)
|
244 |
+
''', language="python")
|
245 |
+
|
246 |
+
# Save and Load Model
|
247 |
+
st.markdown('<div class="sub-title">We can Save and Load Model for future use (optional)</div>', unsafe_allow_html=True)
|
248 |
+
st.code("""
|
249 |
+
pipeline_model.stages[-1].write().overwrite().save('my_entityruler')
|
250 |
+
|
251 |
+
entity_ruler = EntityRulerModel.load("my_entityruler") \\
|
252 |
+
.setInputCols(["document", "token"]) \\
|
253 |
+
.setOutputCol("entity")
|
254 |
+
""", language="python")
|
255 |
+
|
256 |
+
# Result Visualization
|
257 |
+
st.markdown('<div class="sub-title">Result Visualization</div>', unsafe_allow_html=True)
|
258 |
+
st.code("""
|
259 |
+
result = pipeline_model.transform(data)
|
260 |
+
import pyspark.sql.functions as F
|
261 |
+
|
262 |
+
result.select(
|
263 |
+
F.explode(F.col("entity")).alias("entity")
|
264 |
+
).select(
|
265 |
+
F.col("entity.result").alias("keyword"),
|
266 |
+
F.col("entity.metadata").alias("metadata")
|
267 |
+
).select(
|
268 |
+
F.col("keyword"),
|
269 |
+
F.expr("metadata['entity']").alias("label")
|
270 |
+
).show()
|
271 |
+
""", language="python")
|
272 |
+
|
273 |
+
st.text("""
|
274 |
+
+------------+--------+
|
275 |
+
| keyword| label|
|
276 |
+
+------------+--------+
|
277 |
+
| 2011-04-17| DATE|
|
278 |
+
|Eddard Stark| PERSON|
|
279 |
+
| John Snow| PERSON|
|
280 |
+
| Winterfell|LOCATION|
|
281 |
+
+------------+--------+
|
282 |
+
""")
|
283 |
+
|
284 |
+
# Non-English Languages
|
285 |
+
st.markdown('<div class="sub-title">Non-English Languages</div>', unsafe_allow_html=True)
|
286 |
+
|
287 |
+
st.markdown("""
|
288 |
+
<p>The EntityRuler annotator utilizes the Aho-Corasick algorithm, which may not handle languages with unique characters or alphabets effectively. For example:</p>
|
289 |
+
<div class="section">
|
290 |
+
<ul>
|
291 |
+
<li>Spanish includes the ñ character.</li>
|
292 |
+
<li>Portuguese uses ç.</li>
|
293 |
+
<li>Many languages have accented characters (á, ú, ê, etc.).</li>
|
294 |
+
</ul>
|
295 |
+
</div>
|
296 |
+
<p>To accommodate these characters, use the <code>.setAlphabetResource</code> parameter.</p>
|
297 |
+
<p>When a character is missing from the alphabet, you might encounter errors like this:</p>
|
298 |
+
<pre><code>Py4JJavaError: An error occurred while calling o69.fit.
|
299 |
+
: java.lang.UnsupportedOperationException: Char ú not found on alphabet. Please check alphabet</code></pre>
|
300 |
+
<p>To define a custom alphabet, create a text file (e.g., <code>custom_alphabet.txt</code>) with all required characters:</p>
|
301 |
+
""", unsafe_allow_html=True)
|
302 |
+
st.code("""
|
303 |
+
abcdefghijklmnopqrstuvwxyz
|
304 |
+
ABCDEFGHIJKLMNOPQRSTUVWXYZ
|
305 |
+
áúéêçñ
|
306 |
+
ÁÚÉÊÇÑ
|
307 |
+
""")
|
308 |
+
|
309 |
+
st.markdown("""
|
310 |
+
<p>Alternatively, you can use predefined alphabets for common languages. For instance, for Spanish:</p>
|
311 |
+
""", unsafe_allow_html=True)
|
312 |
+
st.code("""
|
313 |
+
entity_ruler = (
|
314 |
+
EntityRulerApproach()
|
315 |
+
.setInputCols(["sentence"])
|
316 |
+
.setOutputCol("entity")
|
317 |
+
.setPatternsResource("locations.json")
|
318 |
+
.setAlphabetResource("Spanish")
|
319 |
+
)
|
320 |
+
""")
|
321 |
+
|
322 |
+
# Summary
|
323 |
+
st.markdown('<div class="sub-title">Summary</div>', unsafe_allow_html=True)
|
324 |
+
st.markdown("""
|
325 |
+
<div class="section">
|
326 |
+
<p>In this article, we talked about named entity recognition using both deep learning-based and rule-based methods. We introduced how to perform the task using the open-source Spark NLP library with Python, which can be used at scale in the Spark ecosystem. These methods can be used for natural language processing applications in various fields, including finance and healthcare.</p>
|
327 |
+
</div>
|
328 |
+
""", unsafe_allow_html=True)
|
329 |
+
|
330 |
+
# References
|
331 |
+
st.markdown('<div class="sub-title">References</div>', unsafe_allow_html=True)
|
332 |
+
st.markdown("""
|
333 |
+
<div class="section">
|
334 |
+
<ul>
|
335 |
+
<li><a class="link" href="https://nlp.johnsnowlabs.com/docs/en/annotators#entityruler" target="_blank" rel="noopener">EntityRuler</a> annotator documentation</li>
|
336 |
+
<li>Python Docs: <a class="link" href="https://nlp.johnsnowlabs.com/api/python/reference/autosummary/sparknlp/annotator/er/entity_ruler/index.html#sparknlp.annotator.er.entity_ruler.EntityRulerApproach" target="_blank" rel="noopener">EntityRulerApproach</a>, <a class="link" href="https://nlp.johnsnowlabs.com/api/python/reference/autosummary/sparknlp/annotator/er/entity_ruler/index.html#sparknlp.annotator.er.entity_ruler.EntityRulerModel">EntityRulerModel</a></li>
|
337 |
+
<li>Scala Docs: <a class="link" href="https://nlp.johnsnowlabs.com/api/com/johnsnowlabs/nlp/annotators/er/EntityRulerApproach.html" target="_blank" rel="noopener">EntityRulerApproach</a>, <a class="link" href="https://nlp.johnsnowlabs.com/api/com/johnsnowlabs/nlp/annotators/er/EntityRulerModel.html">EntityRulerModel</a></li>
|
338 |
+
<li><a class="link" href="https://nlp.johnsnowlabs.com/recognize_entitie" target="_blank" rel="noopener">Visualization demos for NER in Spark NLP</a></li>
|
339 |
+
<li><a class="link" href="https://www.johnsnowlabs.com/named-entity-recognition-ner-with-bert-in-spark-nlp/">Named Entity Recognition (NER) with BERT in Spark NLP</a></li>
|
340 |
+
</ul>
|
341 |
+
</div>
|
342 |
+
""", unsafe_allow_html=True)
|
343 |
+
|
344 |
+
st.markdown('<div class="sub-title">Community & Support</div>', unsafe_allow_html=True)
|
345 |
+
st.markdown("""
|
346 |
+
<div class="section">
|
347 |
+
<ul>
|
348 |
+
<li><a class="link" href="https://sparknlp.org/" target="_blank">Official Website</a>: Documentation and examples</li>
|
349 |
+
<li><a class="link" href="https://join.slack.com/t/spark-nlp/shared_invite/zt-198dipu77-L3UWNe_AJ8xqDk0ivmih5Q" target="_blank">Slack</a>: Live discussion with the community and team</li>
|
350 |
+
<li><a class="link" href="https://github.com/JohnSnowLabs/spark-nlp" target="_blank">GitHub</a>: Bug reports, feature requests, and contributions</li>
|
351 |
+
<li><a class="link" href="https://medium.com/spark-nlp" target="_blank">Medium</a>: Spark NLP articles</li>
|
352 |
+
<li><a class="link" href="https://www.youtube.com/channel/UCmFOjlpYEhxf_wJUDuz6xxQ/videos" target="_blank">YouTube</a>: Video tutorials</li>
|
353 |
+
</ul>
|
354 |
+
</div>
|
355 |
+
""", unsafe_allow_html=True)
|
requirements.txt
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
streamlit
|
2 |
+
st-annotated-text
|
3 |
+
pandas
|
4 |
+
numpy
|
5 |
+
spark-nlp
|
6 |
+
pyspark
|