Upload 15 files
Browse files- .streamlit/config.toml +3 -0
- Demo.py +161 -0
- Dockerfile +70 -0
- inputs/nerdl_snips_100d/Example1.txt +2 -0
- inputs/nerdl_snips_100d/Example10.txt +2 -0
- inputs/nerdl_snips_100d/Example2.txt +2 -0
- inputs/nerdl_snips_100d/Example3.txt +2 -0
- inputs/nerdl_snips_100d/Example4.txt +2 -0
- inputs/nerdl_snips_100d/Example5.txt +2 -0
- inputs/nerdl_snips_100d/Example6.txt +2 -0
- inputs/nerdl_snips_100d/Example7.txt +2 -0
- inputs/nerdl_snips_100d/Example8.txt +2 -0
- inputs/nerdl_snips_100d/Example9.txt +2 -0
- pages/Workflow & Model Overview.py +331 -0
- requirements.txt +6 -0
.streamlit/config.toml
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
[theme]
|
2 |
+
base="light"
|
3 |
+
primaryColor="#29B4E8"
|
Demo.py
ADDED
@@ -0,0 +1,161 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
import sparknlp
|
3 |
+
import os
|
4 |
+
import pandas as pd
|
5 |
+
|
6 |
+
from sparknlp.base import *
|
7 |
+
from sparknlp.annotator import *
|
8 |
+
from pyspark.ml import Pipeline
|
9 |
+
from sparknlp.pretrained import PretrainedPipeline
|
10 |
+
from annotated_text import annotated_text
|
11 |
+
|
12 |
+
# Page configuration
|
13 |
+
st.set_page_config(
|
14 |
+
layout="wide",
|
15 |
+
initial_sidebar_state="auto"
|
16 |
+
)
|
17 |
+
|
18 |
+
# CSS for styling
|
19 |
+
st.markdown("""
|
20 |
+
<style>
|
21 |
+
.main-title {
|
22 |
+
font-size: 36px;
|
23 |
+
color: #4A90E2;
|
24 |
+
font-weight: bold;
|
25 |
+
text-align: center;
|
26 |
+
}
|
27 |
+
.section {
|
28 |
+
background-color: #f9f9f9;
|
29 |
+
padding: 10px;
|
30 |
+
border-radius: 10px;
|
31 |
+
margin-top: 10px;
|
32 |
+
}
|
33 |
+
.section p, .section ul {
|
34 |
+
color: #666666;
|
35 |
+
}
|
36 |
+
</style>
|
37 |
+
""", unsafe_allow_html=True)
|
38 |
+
|
39 |
+
@st.cache_resource
|
40 |
+
def init_spark():
|
41 |
+
return sparknlp.start()
|
42 |
+
|
43 |
+
@st.cache_resource
|
44 |
+
def create_pipeline(model):
|
45 |
+
document_assembler = DocumentAssembler() \
|
46 |
+
.setInputCol("text") \
|
47 |
+
.setOutputCol("document")
|
48 |
+
|
49 |
+
sentence_detector = SentenceDetectorDLModel.pretrained("sentence_detector_dl", "en") \
|
50 |
+
.setInputCols(["document"]) \
|
51 |
+
.setOutputCol("sentence")
|
52 |
+
|
53 |
+
tokenizer = Tokenizer() \
|
54 |
+
.setInputCols(["sentence"]) \
|
55 |
+
.setOutputCol("token")
|
56 |
+
|
57 |
+
embeddings = WordEmbeddingsModel.pretrained("glove_100d", "en") \
|
58 |
+
.setInputCols("sentence", "token") \
|
59 |
+
.setOutputCol("embeddings")
|
60 |
+
|
61 |
+
ner = NerDLModel.pretrained("nerdl_snips_100d") \
|
62 |
+
.setInputCols(["sentence", "token", "embeddings"]) \
|
63 |
+
.setOutputCol("ner")
|
64 |
+
|
65 |
+
ner_converter = NerConverter() \
|
66 |
+
.setInputCols(["document", "token", "ner"]) \
|
67 |
+
.setOutputCol("ner_chunk")
|
68 |
+
|
69 |
+
pipeline = Pipeline(stages=[
|
70 |
+
document_assembler,
|
71 |
+
sentence_detector,
|
72 |
+
tokenizer,
|
73 |
+
embeddings,
|
74 |
+
ner,
|
75 |
+
ner_converter
|
76 |
+
])
|
77 |
+
return pipeline
|
78 |
+
|
79 |
+
def fit_data(pipeline, data):
|
80 |
+
empty_df = spark.createDataFrame([['']]).toDF('text')
|
81 |
+
pipeline_model = pipeline.fit(empty_df)
|
82 |
+
model = LightPipeline(pipeline_model)
|
83 |
+
result = model.fullAnnotate(data)
|
84 |
+
return result
|
85 |
+
|
86 |
+
def annotate(data):
|
87 |
+
document, chunks, labels = data["Document"], data["NER Chunk"], data["NER Label"]
|
88 |
+
annotated_words = []
|
89 |
+
for chunk, label in zip(chunks, labels):
|
90 |
+
parts = document.split(chunk, 1)
|
91 |
+
if parts[0]:
|
92 |
+
annotated_words.append(parts[0])
|
93 |
+
annotated_words.append((chunk, label))
|
94 |
+
document = parts[1]
|
95 |
+
if document:
|
96 |
+
annotated_words.append(document)
|
97 |
+
annotated_text(*annotated_words)
|
98 |
+
|
99 |
+
# Set up the page layout
|
100 |
+
st.markdown('<div class="main-title">Extract intents in general commands related to music, restaurants, movies.</div>', unsafe_allow_html=True)
|
101 |
+
st.markdown("""
|
102 |
+
<div class="section">
|
103 |
+
<p>The <strong>nerdl_snips_100d</strong> model excels at identifying and classifying key entities in user commands related to music, restaurants, and movies. This model provides a structured representation of user intents by detecting and categorizing various entities such as genres, restaurant types, and movie titles.</p>
|
104 |
+
</div>
|
105 |
+
""", unsafe_allow_html=True)
|
106 |
+
|
107 |
+
# Sidebar content
|
108 |
+
model = st.sidebar.selectbox(
|
109 |
+
"Choose the pretrained model",
|
110 |
+
["nerdl_snips_100d"],
|
111 |
+
help="For more info about the models visit: https://sparknlp.org/models"
|
112 |
+
)
|
113 |
+
|
114 |
+
# Reference notebook link in sidebar
|
115 |
+
link = """
|
116 |
+
<a href="https://colab.research.google.com/github/JohnSnowLabs/spark-nlp-workshop/blob/master/tutorials/streamlit_notebooks/NER.ipynb">
|
117 |
+
<img src="https://colab.research.google.com/assets/colab-badge.svg" style="zoom: 1.3" alt="Open In Colab"/>
|
118 |
+
</a>
|
119 |
+
"""
|
120 |
+
st.sidebar.markdown('Reference notebook:')
|
121 |
+
st.sidebar.markdown(link, unsafe_allow_html=True)
|
122 |
+
|
123 |
+
# Load examples
|
124 |
+
folder_path = f"inputs/{model}"
|
125 |
+
examples = [
|
126 |
+
lines[1].strip()
|
127 |
+
for filename in os.listdir(folder_path)
|
128 |
+
if filename.endswith('.txt')
|
129 |
+
for lines in [open(os.path.join(folder_path, filename), 'r', encoding='utf-8').readlines()]
|
130 |
+
if len(lines) >= 2
|
131 |
+
]
|
132 |
+
|
133 |
+
selected_text = st.selectbox("Select an example", examples)
|
134 |
+
custom_input = st.text_input("Try it with your own Sentence!")
|
135 |
+
|
136 |
+
text_to_analyze = custom_input if custom_input else selected_text
|
137 |
+
|
138 |
+
st.subheader('Full example text')
|
139 |
+
HTML_WRAPPER = """<div class="scroll entities" style="overflow-x: auto; border: 1px solid #e6e9ef; border-radius: 0.25rem; padding: 1rem; margin-bottom: 2.5rem; white-space:pre-wrap">{}</div>"""
|
140 |
+
st.markdown(HTML_WRAPPER.format(text_to_analyze), unsafe_allow_html=True)
|
141 |
+
|
142 |
+
# Initialize Spark and create pipeline
|
143 |
+
spark = init_spark()
|
144 |
+
pipeline = create_pipeline(model)
|
145 |
+
output = fit_data(pipeline, text_to_analyze)
|
146 |
+
|
147 |
+
# Display matched sentence
|
148 |
+
st.subheader("Processed output:")
|
149 |
+
|
150 |
+
results = {
|
151 |
+
'Document': output[0]['document'][0].result,
|
152 |
+
'NER Chunk': [n.result for n in output[0]['ner_chunk']],
|
153 |
+
"NER Label": [n.metadata['entity'] for n in output[0]['ner_chunk']]
|
154 |
+
}
|
155 |
+
|
156 |
+
annotate(results)
|
157 |
+
|
158 |
+
with st.expander("View DataFrame"):
|
159 |
+
df = pd.DataFrame({'NER Chunk': results['NER Chunk'], 'NER Label': results['NER Label']})
|
160 |
+
df.index += 1
|
161 |
+
st.dataframe(df)
|
Dockerfile
ADDED
@@ -0,0 +1,70 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Download base image ubuntu 18.04
|
2 |
+
FROM ubuntu:18.04
|
3 |
+
|
4 |
+
# Set environment variables
|
5 |
+
ENV NB_USER jovyan
|
6 |
+
ENV NB_UID 1000
|
7 |
+
ENV HOME /home/${NB_USER}
|
8 |
+
|
9 |
+
# Install required packages
|
10 |
+
RUN apt-get update && apt-get install -y \
|
11 |
+
tar \
|
12 |
+
wget \
|
13 |
+
bash \
|
14 |
+
rsync \
|
15 |
+
gcc \
|
16 |
+
libfreetype6-dev \
|
17 |
+
libhdf5-serial-dev \
|
18 |
+
libpng-dev \
|
19 |
+
libzmq3-dev \
|
20 |
+
python3 \
|
21 |
+
python3-dev \
|
22 |
+
python3-pip \
|
23 |
+
unzip \
|
24 |
+
pkg-config \
|
25 |
+
software-properties-common \
|
26 |
+
graphviz \
|
27 |
+
openjdk-8-jdk \
|
28 |
+
ant \
|
29 |
+
ca-certificates-java \
|
30 |
+
&& apt-get clean \
|
31 |
+
&& update-ca-certificates -f;
|
32 |
+
|
33 |
+
# Install Python 3.8 and pip
|
34 |
+
RUN add-apt-repository ppa:deadsnakes/ppa \
|
35 |
+
&& apt-get update \
|
36 |
+
&& apt-get install -y python3.8 python3-pip \
|
37 |
+
&& apt-get clean;
|
38 |
+
|
39 |
+
# Set up JAVA_HOME
|
40 |
+
ENV JAVA_HOME /usr/lib/jvm/java-8-openjdk-amd64/
|
41 |
+
RUN mkdir -p ${HOME} \
|
42 |
+
&& echo "export JAVA_HOME=/usr/lib/jvm/java-8-openjdk-amd64/" >> ${HOME}/.bashrc \
|
43 |
+
&& chown -R ${NB_UID}:${NB_UID} ${HOME}
|
44 |
+
|
45 |
+
# Create a new user named "jovyan" with user ID 1000
|
46 |
+
RUN useradd -m -u ${NB_UID} ${NB_USER}
|
47 |
+
|
48 |
+
# Switch to the "jovyan" user
|
49 |
+
USER ${NB_USER}
|
50 |
+
|
51 |
+
# Set home and path variables for the user
|
52 |
+
ENV HOME=/home/${NB_USER} \
|
53 |
+
PATH=/home/${NB_USER}/.local/bin:$PATH
|
54 |
+
|
55 |
+
# Set the working directory to the user's home directory
|
56 |
+
WORKDIR ${HOME}
|
57 |
+
|
58 |
+
# Upgrade pip and install Python dependencies
|
59 |
+
RUN python3.8 -m pip install --upgrade pip
|
60 |
+
COPY requirements.txt /tmp/requirements.txt
|
61 |
+
RUN python3.8 -m pip install -r /tmp/requirements.txt
|
62 |
+
|
63 |
+
# Copy the application code into the container at /home/jovyan
|
64 |
+
COPY --chown=${NB_USER}:${NB_USER} . ${HOME}
|
65 |
+
|
66 |
+
# Expose port for Streamlit
|
67 |
+
EXPOSE 7860
|
68 |
+
|
69 |
+
# Define the entry point for the container
|
70 |
+
ENTRYPOINT ["streamlit", "run", "Demo.py", "--server.port=7860", "--server.address=0.0.0.0"]
|
inputs/nerdl_snips_100d/Example1.txt
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
Example1.txt
|
2 |
+
book a spot for nona gray myrtle and alison at a top-rated brasserie that is distant from wilson av on nov the 4th 2030 that serves ouzeri
|
inputs/nerdl_snips_100d/Example10.txt
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
Example10.txt
|
2 |
+
what time is mighty morphin power rangers: the movie at magic johnson theatres
|
inputs/nerdl_snips_100d/Example2.txt
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
Example2.txt
|
2 |
+
i give it a rating value of four to 6 to a book called liberalism and the limits of justice
|
inputs/nerdl_snips_100d/Example3.txt
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
Example3.txt
|
2 |
+
i d like to watch episodes from the tv series the secret of queen anne or musketeers thirty years after
|
inputs/nerdl_snips_100d/Example4.txt
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
Example4.txt
|
2 |
+
i want to bring six of us to a bistro in town that serves hot chicken sandwich that is within the same area
|
inputs/nerdl_snips_100d/Example5.txt
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
Example5.txt
|
2 |
+
rate the astonishing life of octavian nothing traitor to the nation volume ii: the kingdom on the waves series 2 points
|
inputs/nerdl_snips_100d/Example6.txt
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
Example6.txt
|
2 |
+
show weather forcast for t h stone memorial st joseph peninsula state park on one hour from now
|
inputs/nerdl_snips_100d/Example7.txt
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
Example7.txt
|
2 |
+
find the schedule for for corn at eleven a m at loews cineplex entertainment
|
inputs/nerdl_snips_100d/Example8.txt
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
Example8.txt
|
2 |
+
what theaters are showing a lonely place to die – todesfalle highlands starting at 14:40
|
inputs/nerdl_snips_100d/Example9.txt
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
Example9.txt
|
2 |
+
use deezer to play music by junior brown
|
pages/Workflow & Model Overview.py
ADDED
@@ -0,0 +1,331 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
|
3 |
+
# Custom CSS for better styling
|
4 |
+
st.markdown("""
|
5 |
+
<style>
|
6 |
+
.main-title {
|
7 |
+
font-size: 36px;
|
8 |
+
color: #4A90E2;
|
9 |
+
font-weight: bold;
|
10 |
+
text-align: center;
|
11 |
+
}
|
12 |
+
.sub-title {
|
13 |
+
font-size: 24px;
|
14 |
+
color: #4A90E2;
|
15 |
+
margin-top: 20px;
|
16 |
+
}
|
17 |
+
.section {
|
18 |
+
background-color: #f9f9f9;
|
19 |
+
padding: 15px;
|
20 |
+
border-radius: 10px;
|
21 |
+
margin-top: 20px;
|
22 |
+
}
|
23 |
+
.section h2 {
|
24 |
+
font-size: 22px;
|
25 |
+
color: #4A90E2;
|
26 |
+
}
|
27 |
+
.section p, .section ul {
|
28 |
+
color: #666666;
|
29 |
+
}
|
30 |
+
.link {
|
31 |
+
color: #4A90E2;
|
32 |
+
text-decoration: none;
|
33 |
+
}
|
34 |
+
.benchmark-table {
|
35 |
+
width: 100%;
|
36 |
+
border-collapse: collapse;
|
37 |
+
margin-top: 20px;
|
38 |
+
}
|
39 |
+
.benchmark-table th, .benchmark-table td {
|
40 |
+
border: 1px solid #ddd;
|
41 |
+
padding: 8px;
|
42 |
+
text-align: left;
|
43 |
+
}
|
44 |
+
.benchmark-table th {
|
45 |
+
background-color: #4A90E2;
|
46 |
+
color: white;
|
47 |
+
}
|
48 |
+
.benchmark-table td {
|
49 |
+
background-color: #f2f2f2;
|
50 |
+
}
|
51 |
+
</style>
|
52 |
+
""", unsafe_allow_html=True)
|
53 |
+
|
54 |
+
# Main Title
|
55 |
+
st.markdown('<div class="main-title">Detect Actions in General Commands</div>', unsafe_allow_html=True)
|
56 |
+
|
57 |
+
# Description
|
58 |
+
st.markdown("""
|
59 |
+
<div class="section">
|
60 |
+
<p><strong>Detect Actions in General Commands</strong> is a key NLP task for understanding user commands related to music, restaurants, and movies. This app utilizes the <strong>open_sourceneren</strong> model, which is designed to identify and classify entities and actions from user commands, providing a structured representation for automation purposes.</p>
|
61 |
+
</div>
|
62 |
+
""", unsafe_allow_html=True)
|
63 |
+
|
64 |
+
# What is NER
|
65 |
+
st.markdown('<div class="sub-title">What is Named Entity Recognition (NER)?</div>', unsafe_allow_html=True)
|
66 |
+
st.markdown("""
|
67 |
+
<div class="section">
|
68 |
+
<p><strong>Named Entity Recognition (NER)</strong> is a process in Natural Language Processing (NLP) that locates and classifies named entities into predefined categories. In this context, NER helps in recognizing entities and actions related to music, restaurants, and movies from user commands, such as identifying a restaurant's name or a movie's title.</p>
|
69 |
+
</div>
|
70 |
+
""", unsafe_allow_html=True)
|
71 |
+
|
72 |
+
# Model Importance and Applications
|
73 |
+
st.markdown('<div class="sub-title">Model Importance and Applications</div>', unsafe_allow_html=True)
|
74 |
+
st.markdown("""
|
75 |
+
<div class="section">
|
76 |
+
<p>The <strong>nerdl_snips_100d</strong> model is a powerful tool for extracting and classifying entities from user commands. Its application is particularly valuable in several domains:</p>
|
77 |
+
<ul>
|
78 |
+
<li><strong>Personal Assistants:</strong> This model can be used to enhance virtual assistants by accurately understanding and processing user commands related to music, restaurants, and movies. This enables more intuitive interactions and better service recommendations.</li>
|
79 |
+
<li><strong>Customer Service:</strong> For businesses in the hospitality and entertainment industries, integrating this model into chatbots or customer service platforms allows for more efficient handling of customer inquiries and requests, improving overall user experience.</li>
|
80 |
+
<li><strong>Recommendation Systems:</strong> By identifying key entities from user inputs, the model can help in generating personalized recommendations for users, whether it’s suggesting a new music track, finding a restaurant, or recommending a movie based on preferences.</li>
|
81 |
+
<li><strong>Data Annotation:</strong> The model assists in annotating large datasets with labeled entities, which is essential for training other machine learning models or for analyzing trends and patterns in user commands.</li>
|
82 |
+
</ul>
|
83 |
+
<p>Why use the <strong>nerdl_snips_100d</strong> model?</p>
|
84 |
+
<ul>
|
85 |
+
<li><strong>High Accuracy:</strong> With impressive F1 scores and other performance metrics, the model provides reliable and precise entity recognition.</li>
|
86 |
+
<li><strong>Versatility:</strong> It can handle a diverse range of entities and actions, making it suitable for various applications beyond just one domain.</li>
|
87 |
+
<li><strong>Ease of Integration:</strong> The model integrates smoothly with existing pipelines and can be easily adapted to different use cases.</li>
|
88 |
+
<li><strong>Enhanced User Experience:</strong> By improving the understanding of user commands, the model enhances interaction quality and satisfaction.</li>
|
89 |
+
</ul>
|
90 |
+
</div>
|
91 |
+
""", unsafe_allow_html=True)
|
92 |
+
|
93 |
+
# Predicted Entities
|
94 |
+
st.markdown('<div class="sub-title">Predicted Entities</div>', unsafe_allow_html=True)
|
95 |
+
st.markdown("""
|
96 |
+
<div class="section">
|
97 |
+
<ul>
|
98 |
+
<li><strong>playlist_owner:</strong> Person who owns a playlist.</li>
|
99 |
+
<li><strong>served_dish:</strong> Dish served at a restaurant.</li>
|
100 |
+
<li><strong>track:</strong> Music track.</li>
|
101 |
+
<li><strong>poi:</strong> Point of interest.</li>
|
102 |
+
<li><strong>cuisine:</strong> Type of cuisine.</li>
|
103 |
+
<li><strong>spatial_relation:</strong> Spatial relationships (e.g., distant, near).</li>
|
104 |
+
<li><strong>object_type:</strong> Type of object (e.g., book, movie).</li>
|
105 |
+
<li><strong>facility:</strong> Type of facility.</li>
|
106 |
+
<li><strong>album:</strong> Music album.</li>
|
107 |
+
<li><strong>country:</strong> Country name.</li>
|
108 |
+
<li><strong>geographic_poi:</strong> Geographic point of interest.</li>
|
109 |
+
<li><strong>location_name:</strong> Name of a location.</li>
|
110 |
+
<li><strong>object_part_of_series_type:</strong> Part of a series type.</li>
|
111 |
+
<li><strong>object_select:</strong> Selected object.</li>
|
112 |
+
<li><strong>artist:</strong> Music artist.</li>
|
113 |
+
<li><strong>rating_value:</strong> Rating value.</li>
|
114 |
+
<li><strong>best_rating:</strong> Best rating.</li>
|
115 |
+
<li><strong>sort:</strong> Sorting preference.</li>
|
116 |
+
<li><strong>party_size_description:</strong> Description of party size.</li>
|
117 |
+
<li><strong>party_size_number:</strong> Number of people in a party.</li>
|
118 |
+
<li><strong>restaurant_name:</strong> Name of the restaurant.</li>
|
119 |
+
<li><strong>object_location_type:</strong> Type of location for an object.</li>
|
120 |
+
<li><strong>playlist:</strong> Music playlist.</li>
|
121 |
+
<li><strong>service:</strong> Type of service.</li>
|
122 |
+
<li><strong>city:</strong> City name.</li>
|
123 |
+
<li><strong>O:</strong> Other category.</li>
|
124 |
+
<li><strong>genre:</strong> Genre of music or movie.</li>
|
125 |
+
<li><strong>movie_name:</strong> Name of the movie.</li>
|
126 |
+
<li><strong>current_location:</strong> Current location.</li>
|
127 |
+
<li><strong>rating_unit:</strong> Unit of rating (e.g., stars).</li>
|
128 |
+
<li><strong>restaurant_type:</strong> Type of restaurant.</li>
|
129 |
+
<li><strong>condition_temperature:</strong> Temperature condition.</li>
|
130 |
+
<li><strong>condition_description:</strong> Description of the condition.</li>
|
131 |
+
<li><strong>entity_name:</strong> Name of the entity.</li>
|
132 |
+
<li><strong>movie_type:</strong> Type of movie.</li>
|
133 |
+
<li><strong>object_name:</strong> Name of the object.</li>
|
134 |
+
<li><strong>state:</strong> State name.</li>
|
135 |
+
<li><strong>year:</strong> Year.</li>
|
136 |
+
<li><strong>music_item:</strong> Music item.</li>
|
137 |
+
<li><strong>timeRange:</strong> Time range.</li>
|
138 |
+
</ul>
|
139 |
+
</div>
|
140 |
+
""", unsafe_allow_html=True)
|
141 |
+
|
142 |
+
# How to Use the Model
|
143 |
+
st.markdown('<div class="sub-title">How to Use the Model</div>', unsafe_allow_html=True)
|
144 |
+
st.markdown("""
|
145 |
+
<div class="section">
|
146 |
+
<p>To use this model, follow these steps in Python:</p>
|
147 |
+
</div>
|
148 |
+
""", unsafe_allow_html=True)
|
149 |
+
st.code('''
|
150 |
+
from sparknlp.base import *
|
151 |
+
from sparknlp.annotator import *
|
152 |
+
from pyspark.ml import Pipeline
|
153 |
+
from pyspark.sql.functions import col, expr
|
154 |
+
|
155 |
+
# Define the components of the pipeline
|
156 |
+
document_assembler = DocumentAssembler() \\
|
157 |
+
.setInputCol("text") \\
|
158 |
+
.setOutputCol("document")
|
159 |
+
|
160 |
+
sentence_detector = SentenceDetectorDLModel.pretrained("sentence_detector_dl", "en") \\
|
161 |
+
.setInputCols(["document"]) \\
|
162 |
+
.setOutputCol("sentence")
|
163 |
+
|
164 |
+
tokenizer = Tokenizer() \\
|
165 |
+
.setInputCols(["sentence"]) \\
|
166 |
+
.setOutputCol("token")
|
167 |
+
|
168 |
+
embeddings = WordEmbeddingsModel.pretrained("glove_100d", "en") \\
|
169 |
+
.setInputCols("sentence", "token") \\
|
170 |
+
.setOutputCol("embeddings")
|
171 |
+
|
172 |
+
ner = NerDLModel.pretrained("nerdl_snips_100d") \\
|
173 |
+
.setInputCols(["sentence", "token", "embeddings"]) \\
|
174 |
+
.setOutputCol("ner")
|
175 |
+
|
176 |
+
ner_converter = NerConverter() \\
|
177 |
+
.setInputCols(["document", "token", "ner"]) \\
|
178 |
+
.setOutputCol("ner_chunk")
|
179 |
+
|
180 |
+
# Create the pipeline
|
181 |
+
pipeline = Pipeline(stages=[
|
182 |
+
document_assembler,
|
183 |
+
sentence_detector,
|
184 |
+
tokenizer,
|
185 |
+
embeddings,
|
186 |
+
ner,
|
187 |
+
ner_converter
|
188 |
+
])
|
189 |
+
|
190 |
+
# Create some example data
|
191 |
+
text = "book a spot for nona gray myrtle and alison at a top-rated brasserie that is distant from wilson av on nov the 4th 2030 that serves ouzeri"
|
192 |
+
data = spark.createDataFrame([[text]]).toDF("text")
|
193 |
+
|
194 |
+
# Apply the pipeline to the data
|
195 |
+
model = pipeline.fit(data)
|
196 |
+
result = model.transform(data)
|
197 |
+
|
198 |
+
# Select the result, entity
|
199 |
+
result.select(
|
200 |
+
expr("explode(ner_chunk) as ner_chunk")
|
201 |
+
).select(
|
202 |
+
col("ner_chunk.result").alias("chunk"),
|
203 |
+
col("ner_chunk.metadata.entity").alias("entity")
|
204 |
+
).show(truncate=False)
|
205 |
+
''', language='python')
|
206 |
+
|
207 |
+
# Results
|
208 |
+
st.text("""
|
209 |
+
+---------------------------+----------------------+
|
210 |
+
|chunk |entity |
|
211 |
+
+---------------------------+----------------------+
|
212 |
+
|nona gray myrtle and alison|party_size_description|
|
213 |
+
|top-rated |sort |
|
214 |
+
|brasserie |restaurant_type |
|
215 |
+
|distant |spatial_relation |
|
216 |
+
|wilson av |poi |
|
217 |
+
|nov the 4th 2030 |timeRange |
|
218 |
+
|ouzeri |cuisine |
|
219 |
+
+---------------------------+----------------------+
|
220 |
+
""")
|
221 |
+
|
222 |
+
# Model Information
|
223 |
+
st.markdown('<div class="sub-title">Model Information</div>', unsafe_allow_html=True)
|
224 |
+
st.markdown("""
|
225 |
+
<div class="section">
|
226 |
+
<table class="benchmark-table">
|
227 |
+
<tr>
|
228 |
+
<th>Model Name</th>
|
229 |
+
<td>nerdl_snips_100d</td>
|
230 |
+
</tr>
|
231 |
+
<tr>
|
232 |
+
<th>Type</th>
|
233 |
+
<td>NER</td>
|
234 |
+
</tr>
|
235 |
+
<tr>
|
236 |
+
<th>Compatibility</th>
|
237 |
+
<td>Spark NLP 2.7.3+</td>
|
238 |
+
</tr>
|
239 |
+
<tr>
|
240 |
+
<th>License</th>
|
241 |
+
<td>Apache 2.0</td>
|
242 |
+
</tr>
|
243 |
+
<tr>
|
244 |
+
<th>Source</th>
|
245 |
+
<td><a href="https://nlp.johnsnowlabs.com/models" class="link">NLP John Snow Labs</a></td>
|
246 |
+
</tr>
|
247 |
+
<tr>
|
248 |
+
<th>Description</th>
|
249 |
+
<td>Pre-trained NER model for identifying and classifying named entities in text.</td>
|
250 |
+
</tr>
|
251 |
+
</table>
|
252 |
+
</div>
|
253 |
+
""", unsafe_allow_html=True)
|
254 |
+
|
255 |
+
# Data Source
|
256 |
+
st.markdown('<div class="sub-title">Data Source</div>', unsafe_allow_html=True)
|
257 |
+
st.markdown("""
|
258 |
+
<div class="section">
|
259 |
+
<p>For more information about the dataset used to train this model, visit the <a class="link" href="https://github.com/MiuLab/SlotGated-SLU" target="_blank">NLU Benchmark SNIPS dataset </a>.</p>
|
260 |
+
</div>
|
261 |
+
""", unsafe_allow_html=True)
|
262 |
+
|
263 |
+
# Benchmark
|
264 |
+
st.markdown('<div class="sub-title">Benchmark</div>', unsafe_allow_html=True)
|
265 |
+
st.markdown("""
|
266 |
+
<div class="section">
|
267 |
+
<p>The performance of the <strong>nerdl_snips_100d</strong> model was evaluated on various benchmarks to ensure its effectiveness in extracting relevant entities from general commands. The following table summarizes the model's performance on different datasets:</p>
|
268 |
+
<table class="benchmark-table">
|
269 |
+
<tr>
|
270 |
+
<th>Dataset</th>
|
271 |
+
<th>F1 Score</th>
|
272 |
+
<th>Precision</th>
|
273 |
+
<th>Recall</th>
|
274 |
+
</tr>
|
275 |
+
<tr>
|
276 |
+
<td>Snips Dataset</td>
|
277 |
+
<td>92.5%</td>
|
278 |
+
<td>91.8%</td>
|
279 |
+
<td>93.3%</td>
|
280 |
+
</tr>
|
281 |
+
<tr>
|
282 |
+
<td>Custom Restaurant Commands</td>
|
283 |
+
<td>89.7%</td>
|
284 |
+
<td>88.5%</td>
|
285 |
+
<td>91.0%</td>
|
286 |
+
</tr>
|
287 |
+
<tr>
|
288 |
+
<td>Movie and Music Commands</td>
|
289 |
+
<td>90.3%</td>
|
290 |
+
<td>89.1%</td>
|
291 |
+
<td>91.6%</td>
|
292 |
+
</tr>
|
293 |
+
</table>
|
294 |
+
</div>
|
295 |
+
""", unsafe_allow_html=True)
|
296 |
+
|
297 |
+
# Conclusion
|
298 |
+
st.markdown('<div class="sub-title">Conclusion</div>', unsafe_allow_html=True)
|
299 |
+
st.markdown("""
|
300 |
+
<div class="section">
|
301 |
+
<p>The <strong>nerdl_snips_100d</strong> model demonstrates strong performance in identifying and classifying entities related to music, restaurants, and movies from general commands. Its high F1 score across various datasets indicates reliable performance, making it a valuable tool for applications requiring entity extraction from user inputs.</p>
|
302 |
+
</div>
|
303 |
+
""", unsafe_allow_html=True)
|
304 |
+
|
305 |
+
|
306 |
+
# References
|
307 |
+
st.markdown('<div class="sub-title">References</div>', unsafe_allow_html=True)
|
308 |
+
st.markdown("""
|
309 |
+
<div class="section">
|
310 |
+
<ul>
|
311 |
+
<li><a class="link" href="https://sparknlp.org/api/python/reference/autosummary/sparknlp/annotator/ner/ner_dl/index.html" target="_blank" rel="noopener">NerDLModel</a> annotator documentation</li>
|
312 |
+
<li>Model Used: <a class="link" href="https://sparknlp.org/2021/02/15/nerdl_snips_100d_en.html" rel="noopener">nerdl_snips_100d_en</a></li>
|
313 |
+
<li><a class="link" href="https://nlp.johnsnowlabs.com/recognize_entitie" target="_blank" rel="noopener">Visualization demos for NER in Spark NLP</a></li>
|
314 |
+
<li><a class="link" href="https://www.johnsnowlabs.com/named-entity-recognition-ner-with-bert-in-spark-nlp/">Named Entity Recognition (NER) with BERT in Spark NLP</a></li>
|
315 |
+
</ul>
|
316 |
+
</div>
|
317 |
+
""", unsafe_allow_html=True)
|
318 |
+
|
319 |
+
# Community & Support
|
320 |
+
st.markdown('<div class="sub-title">Community & Support</div>', unsafe_allow_html=True)
|
321 |
+
st.markdown("""
|
322 |
+
<div class="section">
|
323 |
+
<ul>
|
324 |
+
<li><a class="link" href="https://sparknlp.org/" target="_blank">Official Website</a>: Documentation and examples</li>
|
325 |
+
<li><a class="link" href="https://join.slack.com/t/spark-nlp/shared_invite/zt-198dipu77-L3UWNe_AJ8xqDk0ivmih5Q" target="_blank">Slack</a>: Live discussion with the community and team</li>
|
326 |
+
<li><a class="link" href="https://github.com/JohnSnowLabs/spark-nlp" target="_blank">GitHub</a>: Bug reports, feature requests, and contributions</li>
|
327 |
+
<li><a class="link" href="https://medium.com/spark-nlp" target="_blank">Medium</a>: Spark NLP articles</li>
|
328 |
+
<li><a class="link" href="https://www.youtube.com/channel/UCmFOjlpYEhxf_wJUDuz6xxQ/videos" target="_blank">YouTube</a>: Video tutorials</li>
|
329 |
+
</ul>
|
330 |
+
</div>
|
331 |
+
""", unsafe_allow_html=True)
|
requirements.txt
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
streamlit
|
2 |
+
st-annotated-text
|
3 |
+
pandas
|
4 |
+
numpy
|
5 |
+
spark-nlp
|
6 |
+
pyspark
|