Spaces:
Sleeping
Sleeping
Upload 5 files
Browse files- .streamlit/config.toml +3 -0
- Demo.py +158 -0
- Dockerfile +70 -0
- pages/Workflow & Model Overview.py +220 -0
- requirements.txt +6 -0
.streamlit/config.toml
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
[theme]
|
2 |
+
base="light"
|
3 |
+
primaryColor="#29B4E8"
|
Demo.py
ADDED
@@ -0,0 +1,158 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
import sparknlp
|
3 |
+
import os
|
4 |
+
import pandas as pd
|
5 |
+
|
6 |
+
from sparknlp.base import *
|
7 |
+
from sparknlp.annotator import *
|
8 |
+
from pyspark.ml import Pipeline
|
9 |
+
from sparknlp.pretrained import PretrainedPipeline
|
10 |
+
from annotated_text import annotated_text
|
11 |
+
|
12 |
+
# Page configuration
|
13 |
+
st.set_page_config(
|
14 |
+
layout="wide",
|
15 |
+
initial_sidebar_state="auto"
|
16 |
+
)
|
17 |
+
|
18 |
+
# CSS for styling
|
19 |
+
st.markdown("""
|
20 |
+
<style>
|
21 |
+
.main-title {
|
22 |
+
font-size: 36px;
|
23 |
+
color: #4A90E2;
|
24 |
+
font-weight: bold;
|
25 |
+
text-align: center;
|
26 |
+
}
|
27 |
+
.section {
|
28 |
+
background-color: #f9f9f9;
|
29 |
+
padding: 10px;
|
30 |
+
border-radius: 10px;
|
31 |
+
margin-top: 10px;
|
32 |
+
}
|
33 |
+
.section p, .section ul {
|
34 |
+
color: #666666;
|
35 |
+
}
|
36 |
+
</style>
|
37 |
+
""", unsafe_allow_html=True)
|
38 |
+
|
39 |
+
@st.cache_resource
|
40 |
+
def init_spark():
|
41 |
+
return sparknlp.start()
|
42 |
+
|
43 |
+
@st.cache_resource
|
44 |
+
def create_pipeline(model):
|
45 |
+
document_assembler = DocumentAssembler() \
|
46 |
+
.setInputCol("text") \
|
47 |
+
.setOutputCol("document")
|
48 |
+
|
49 |
+
sentence_detector = SentenceDetector() \
|
50 |
+
.setInputCols(["document"]) \
|
51 |
+
.setOutputCol("sentence")
|
52 |
+
|
53 |
+
word_segmenter = WordSegmenterModel.pretrained("wordseg_kaist_ud", "ko") \
|
54 |
+
.setInputCols(["sentence"]) \
|
55 |
+
.setOutputCol("token")
|
56 |
+
|
57 |
+
embeddings = WordEmbeddingsModel.pretrained("glove_840B_300", "xx") \
|
58 |
+
.setInputCols(["document", "token"]) \
|
59 |
+
.setOutputCol("embeddings")
|
60 |
+
|
61 |
+
ner = NerDLModel.pretrained("ner_kmou_glove_840B_300d", "ko") \
|
62 |
+
.setInputCols(["document", "token", "embeddings"]) \
|
63 |
+
.setOutputCol("ner")
|
64 |
+
|
65 |
+
ner_converter = NerConverter().setInputCols(["document", "token", "ner"]).setOutputCol("ner_chunk")
|
66 |
+
|
67 |
+
pipeline = Pipeline(stages=[document_assembler, sentence_detector, word_segmenter, embeddings, ner, ner_converter])
|
68 |
+
return nlpPipeline
|
69 |
+
|
70 |
+
def fit_data(pipeline, data):
|
71 |
+
empty_df = spark.createDataFrame([['']]).toDF('text')
|
72 |
+
pipeline_model = pipeline.fit(empty_df)
|
73 |
+
model = LightPipeline(pipeline_model)
|
74 |
+
result = model.fullAnnotate(data)
|
75 |
+
return result
|
76 |
+
|
77 |
+
def annotate(data):
|
78 |
+
document, chunks, labels = data["Document"], data["NER Chunk"], data["NER Label"]
|
79 |
+
annotated_words = []
|
80 |
+
for chunk, label in zip(chunks, labels):
|
81 |
+
parts = document.split(chunk, 1)
|
82 |
+
if parts[0]:
|
83 |
+
annotated_words.append(parts[0])
|
84 |
+
annotated_words.append((chunk, label))
|
85 |
+
document = parts[1]
|
86 |
+
if document:
|
87 |
+
annotated_words.append(document)
|
88 |
+
annotated_text(*annotated_words)
|
89 |
+
|
90 |
+
# Set up the page layout
|
91 |
+
st.markdown('<div class="main-title">Recognize entities in Urdu text</div>', unsafe_allow_html=True)
|
92 |
+
st.markdown("""
|
93 |
+
<div class="section">
|
94 |
+
<p>This model uses the pre-trained <code>glove_840B_300</code> embeddings model from WordEmbeddings annotator as an input</p>
|
95 |
+
</div>
|
96 |
+
""", unsafe_allow_html=True)
|
97 |
+
|
98 |
+
# Sidebar content
|
99 |
+
model = st.sidebar.selectbox(
|
100 |
+
"Choose the pretrained model",
|
101 |
+
["ner_kmou_glove_840B_300d"],
|
102 |
+
help="For more info about the models visit: https://sparknlp.org/models"
|
103 |
+
)
|
104 |
+
|
105 |
+
# Reference notebook link in sidebar
|
106 |
+
link = """
|
107 |
+
<a href="https://colab.research.google.com/github/JohnSnowLabs/spark-nlp-workshop/blob/master/tutorials/streamlit_notebooks/public/NER_KO.ipynb">
|
108 |
+
<img src="https://colab.research.google.com/assets/colab-badge.svg" style="zoom: 1.3" alt="Open In Colab"/>
|
109 |
+
</a>
|
110 |
+
"""
|
111 |
+
st.sidebar.markdown('Reference notebook:')
|
112 |
+
st.sidebar.markdown(link, unsafe_allow_html=True)
|
113 |
+
|
114 |
+
# Load examples
|
115 |
+
examples = [
|
116 |
+
"""ARD , ZDF 등 공영 TV 와 바이에른주 방송 , 북부 독일 방송 등 은 이 날 한국 의 총선 소식 과 관련 , 여당 의 과반수 의석 확보 와 신당 의 득표 율 이 이번 선거 의 최대 관심사 이 라고 보도 하 ㄴ 데 잇 어 저녁 시간 부터 는 수 차례 에 걸치 어 개표 상황 과 정당 별 의석 전망 을 속보 로 전하 았 다 .""",
|
117 |
+
"""두 나라 관계 는 중국 의 인권 문제 와 핵확산 방지 문제 , 통상 문제 및 최근 의 F 16 전투기 대 대만 판매 등 을 놓 고 이미 위험선 상 에 오 아 있 는데 클린턴 행정부 의 등장 으로 양국 관계 가 더욱 경색 되 ㄹ 것 을 걱정 하 는 분위기 .""",
|
118 |
+
"""서울대 건축공학 과 를 졸업 하 ㄴ 이 씨 는 한국건축가협회""",
|
119 |
+
"""나 는 다시 순자 를 양동 에서 빼내 기 위하 아서 창신 팔동""",
|
120 |
+
"""헤라신전 서 채화 되 ㄴ 지 보름 , 지구 의 반바퀴 를 돌 아 제주공항 에 첫발 을 내디디 ㄴ 이래 로 열이틀""",
|
121 |
+
"""다음 은 홍콩 의 권위지 명보 와 일본 도쿄 ( 동경 ) 신문 이 24일""",
|
122 |
+
"""최 영사 가 우리 외교관 이 며 그 신변보호 책임 이 주재국 이 ㄴ 러시아 에 있 다는 점 에서 러시아 는 이 같 은 우리 정부 요구 에 응하 아야 하 ㄹ 의무 가 있 다 .""",
|
123 |
+
"""판 에 박 은 듯 하 ㄴ 깨끗 하 ㄴ 글씨 로 , 처음 단군 님 이 니 신라 , 백제 , 고구려 이 니 띄엄띄엄 어른 들 한테 서 귀결 로 들어오 던 얘기 들 이 참말 로 씌 어 있 었 다 ."""
|
124 |
+
]
|
125 |
+
|
126 |
+
selected_text = st.selectbox("Select an example", examples)
|
127 |
+
custom_input = st.text_input("Try it with your own Sentence!")
|
128 |
+
|
129 |
+
text_to_analyze = custom_input if custom_input else selected_text
|
130 |
+
|
131 |
+
st.subheader('Full example text')
|
132 |
+
HTML_WRAPPER = """<div class="scroll entities" style="overflow-x: auto; border: 1px solid #e6e9ef; border-radius: 0.25rem; padding: 1rem; margin-bottom: 2.5rem; white-space:pre-wrap">{}</div>"""
|
133 |
+
st.markdown(HTML_WRAPPER.format(text_to_analyze), unsafe_allow_html=True)
|
134 |
+
|
135 |
+
# Initialize Spark and create pipeline
|
136 |
+
spark = init_spark()
|
137 |
+
pipeline = create_pipeline(model)
|
138 |
+
output = fit_data(pipeline, text_to_analyze)
|
139 |
+
|
140 |
+
# Display matched sentence
|
141 |
+
st.subheader("Processed output:")
|
142 |
+
|
143 |
+
results = {
|
144 |
+
'Document': output[0]['document'][0].result,
|
145 |
+
'NER Chunk': [n.result for n in output[0]['ner_chunk']],
|
146 |
+
"NER Label": [n.metadata['entity'] for n in output[0]['ner_chunk']]
|
147 |
+
}
|
148 |
+
|
149 |
+
annotate(results)
|
150 |
+
|
151 |
+
with st.expander("View DataFrame"):
|
152 |
+
df = pd.DataFrame({'NER Chunk': results['NER Chunk'], 'NER Label': results['NER Label']})
|
153 |
+
df.index += 1
|
154 |
+
st.dataframe(df)
|
155 |
+
|
156 |
+
|
157 |
+
|
158 |
+
|
Dockerfile
ADDED
@@ -0,0 +1,70 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Download base image ubuntu 18.04
|
2 |
+
FROM ubuntu:18.04
|
3 |
+
|
4 |
+
# Set environment variables
|
5 |
+
ENV NB_USER jovyan
|
6 |
+
ENV NB_UID 1000
|
7 |
+
ENV HOME /home/${NB_USER}
|
8 |
+
|
9 |
+
# Install required packages
|
10 |
+
RUN apt-get update && apt-get install -y \
|
11 |
+
tar \
|
12 |
+
wget \
|
13 |
+
bash \
|
14 |
+
rsync \
|
15 |
+
gcc \
|
16 |
+
libfreetype6-dev \
|
17 |
+
libhdf5-serial-dev \
|
18 |
+
libpng-dev \
|
19 |
+
libzmq3-dev \
|
20 |
+
python3 \
|
21 |
+
python3-dev \
|
22 |
+
python3-pip \
|
23 |
+
unzip \
|
24 |
+
pkg-config \
|
25 |
+
software-properties-common \
|
26 |
+
graphviz \
|
27 |
+
openjdk-8-jdk \
|
28 |
+
ant \
|
29 |
+
ca-certificates-java \
|
30 |
+
&& apt-get clean \
|
31 |
+
&& update-ca-certificates -f;
|
32 |
+
|
33 |
+
# Install Python 3.8 and pip
|
34 |
+
RUN add-apt-repository ppa:deadsnakes/ppa \
|
35 |
+
&& apt-get update \
|
36 |
+
&& apt-get install -y python3.8 python3-pip \
|
37 |
+
&& apt-get clean;
|
38 |
+
|
39 |
+
# Set up JAVA_HOME
|
40 |
+
ENV JAVA_HOME /usr/lib/jvm/java-8-openjdk-amd64/
|
41 |
+
RUN mkdir -p ${HOME} \
|
42 |
+
&& echo "export JAVA_HOME=/usr/lib/jvm/java-8-openjdk-amd64/" >> ${HOME}/.bashrc \
|
43 |
+
&& chown -R ${NB_UID}:${NB_UID} ${HOME}
|
44 |
+
|
45 |
+
# Create a new user named "jovyan" with user ID 1000
|
46 |
+
RUN useradd -m -u ${NB_UID} ${NB_USER}
|
47 |
+
|
48 |
+
# Switch to the "jovyan" user
|
49 |
+
USER ${NB_USER}
|
50 |
+
|
51 |
+
# Set home and path variables for the user
|
52 |
+
ENV HOME=/home/${NB_USER} \
|
53 |
+
PATH=/home/${NB_USER}/.local/bin:$PATH
|
54 |
+
|
55 |
+
# Set the working directory to the user's home directory
|
56 |
+
WORKDIR ${HOME}
|
57 |
+
|
58 |
+
# Upgrade pip and install Python dependencies
|
59 |
+
RUN python3.8 -m pip install --upgrade pip
|
60 |
+
COPY requirements.txt /tmp/requirements.txt
|
61 |
+
RUN python3.8 -m pip install -r /tmp/requirements.txt
|
62 |
+
|
63 |
+
# Copy the application code into the container at /home/jovyan
|
64 |
+
COPY --chown=${NB_USER}:${NB_USER} . ${HOME}
|
65 |
+
|
66 |
+
# Expose port for Streamlit
|
67 |
+
EXPOSE 7860
|
68 |
+
|
69 |
+
# Define the entry point for the container
|
70 |
+
ENTRYPOINT ["streamlit", "run", "Demo.py", "--server.port=7860", "--server.address=0.0.0.0"]
|
pages/Workflow & Model Overview.py
ADDED
@@ -0,0 +1,220 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
|
3 |
+
# Custom CSS for better styling
|
4 |
+
st.markdown("""
|
5 |
+
<style>
|
6 |
+
.main-title {
|
7 |
+
font-size: 36px;
|
8 |
+
color: #4A90E2;
|
9 |
+
font-weight: bold;
|
10 |
+
text-align: center;
|
11 |
+
}
|
12 |
+
.sub-title {
|
13 |
+
font-size: 24px;
|
14 |
+
color: #4A90E2;
|
15 |
+
margin-top: 20px;
|
16 |
+
}
|
17 |
+
.section {
|
18 |
+
background-color: #f9f9f9;
|
19 |
+
padding: 15px;
|
20 |
+
border-radius: 10px;
|
21 |
+
margin-top: 20px;
|
22 |
+
}
|
23 |
+
.section h2 {
|
24 |
+
font-size: 22px;
|
25 |
+
color: #4A90E2;
|
26 |
+
}
|
27 |
+
.section p, .section ul {
|
28 |
+
color: #666666;
|
29 |
+
}
|
30 |
+
.link {
|
31 |
+
color: #4A90E2;
|
32 |
+
text-decoration: none;
|
33 |
+
}
|
34 |
+
</style>
|
35 |
+
""", unsafe_allow_html=True)
|
36 |
+
|
37 |
+
# Main Title
|
38 |
+
st.markdown('<div class="main-title">Named Entity Recognition for Korean (GloVe 840B 300d)</div>', unsafe_allow_html=True)
|
39 |
+
|
40 |
+
# Description
|
41 |
+
st.markdown('<div class="sub-title">Description</div>', unsafe_allow_html=True)
|
42 |
+
st.markdown("""
|
43 |
+
<div class="section">
|
44 |
+
<p>This model annotates named entities in a text, which can be used to find features such as names of people, places, and organizations in the BIO format. The model does not read words directly but instead reads word embeddings, which represent words as points such that more semantically similar words are closer together.</p>
|
45 |
+
<p>This model uses the pre-trained <code>glove_840B_300</code> embeddings model from WordEmbeddings annotator as an input, so be sure to use the same embeddings in the pipeline.</p>
|
46 |
+
</div>
|
47 |
+
""", unsafe_allow_html=True)
|
48 |
+
|
49 |
+
# Predicted Entities
|
50 |
+
st.markdown('<div class="sub-title">Predicted Entities</div>', unsafe_allow_html=True)
|
51 |
+
st.markdown("""
|
52 |
+
<div class="section">
|
53 |
+
<ul>
|
54 |
+
<li>Dates-DT</li>
|
55 |
+
<li>Locations-LC</li>
|
56 |
+
<li>Organizations-OG</li>
|
57 |
+
<li>Persons-PS</li>
|
58 |
+
<li>Time-TI</li>
|
59 |
+
</ul>
|
60 |
+
</div>
|
61 |
+
""", unsafe_allow_html=True)
|
62 |
+
|
63 |
+
# How to use
|
64 |
+
st.markdown('<div class="sub-title">How to use</div>', unsafe_allow_html=True)
|
65 |
+
st.markdown("""
|
66 |
+
<div class="section">
|
67 |
+
<p>To use this model, follow these steps in Python:</p>
|
68 |
+
</div>
|
69 |
+
""", unsafe_allow_html=True)
|
70 |
+
st.code("""
|
71 |
+
from sparknlp.base import *
|
72 |
+
from sparknlp.annotator import *
|
73 |
+
from pyspark.ml import Pipeline
|
74 |
+
|
75 |
+
# Define the components of the pipeline
|
76 |
+
document_assembler = DocumentAssembler() \\
|
77 |
+
.setInputCol("text") \\
|
78 |
+
.setOutputCol("document")
|
79 |
+
|
80 |
+
sentence_detector = SentenceDetector() \\
|
81 |
+
.setInputCols(["document"]) \\
|
82 |
+
.setOutputCol("sentence")
|
83 |
+
|
84 |
+
word_segmenter = WordSegmenterModel.pretrained("wordseg_kaist_ud", "ko") \\
|
85 |
+
.setInputCols(["sentence"]) \\
|
86 |
+
.setOutputCol("token")
|
87 |
+
|
88 |
+
embeddings = WordEmbeddingsModel.pretrained("glove_840B_300", "xx") \\
|
89 |
+
.setInputCols(["document", "token"]) \\
|
90 |
+
.setOutputCol("embeddings")
|
91 |
+
|
92 |
+
ner = NerDLModel.pretrained("ner_kmou_glove_840B_300d", "ko") \\
|
93 |
+
.setInputCols(["document", "token", "embeddings"]) \\
|
94 |
+
.setOutputCol("ner")
|
95 |
+
|
96 |
+
ner_converter = NerConverter().setInputCols(["document", "token", "ner"]).setOutputCol("ner_chunk")
|
97 |
+
|
98 |
+
# Create the pipeline
|
99 |
+
pipeline = Pipeline(stages=[document_assembler, sentence_detector, word_segmenter, embeddings, ner, ner_converter])
|
100 |
+
|
101 |
+
# Create sample data
|
102 |
+
example = spark.createDataFrame([['라이프니츠 의 주도 로 베를린 에 세우 어 지 ㄴ 베를린 과학아카데미']], ["text"])
|
103 |
+
|
104 |
+
# Fit and transform data with the pipeline
|
105 |
+
result = pipeline.fit(example).transform(example)
|
106 |
+
|
107 |
+
# Select the result, entity
|
108 |
+
result.select(
|
109 |
+
expr("explode(ner_chunk) as ner_chunk")
|
110 |
+
).select(
|
111 |
+
col("ner_chunk.result").alias("chunk"),
|
112 |
+
col("ner_chunk.metadata").getItem("entity").alias("ner_label")
|
113 |
+
).show(truncate=False)
|
114 |
+
""", language="python")
|
115 |
+
|
116 |
+
# Results
|
117 |
+
import pandas as pd
|
118 |
+
|
119 |
+
# Create the data for the DataFrame
|
120 |
+
data = {
|
121 |
+
"token": ["라이프니츠", "베를린", "과학아카데미"],
|
122 |
+
"ner": ["B-PS", "B-OG", "I-OG"]
|
123 |
+
}
|
124 |
+
|
125 |
+
# Creating the DataFrame
|
126 |
+
df = pd.DataFrame(data)
|
127 |
+
df.index += 1
|
128 |
+
st.dataframe(df)
|
129 |
+
|
130 |
+
# Model Information
|
131 |
+
st.markdown('<div class="sub-title">Model Information</div>', unsafe_allow_html=True)
|
132 |
+
st.markdown("""
|
133 |
+
<div class="section">
|
134 |
+
<p>The <code>ner_kmou_glove_840B_300d</code> model details are as follows:</p>
|
135 |
+
<ul>
|
136 |
+
<li><strong>Model Name:</strong> ner_kmou_glove_840B_300d</li>
|
137 |
+
<li><strong>Type:</strong> ner</li>
|
138 |
+
<li><strong>Compatibility:</strong> Spark NLP 2.7.0+</li>
|
139 |
+
<li><strong>License:</strong> Open Source</li>
|
140 |
+
<li><strong>Edition:</strong> Official</li>
|
141 |
+
<li><strong>Input Labels:</strong> [sentence, token, embeddings]</li>
|
142 |
+
<li><strong>Output Labels:</strong> [ner]</li>
|
143 |
+
<li><strong>Language:</strong> ko</li>
|
144 |
+
</ul>
|
145 |
+
</div>
|
146 |
+
""", unsafe_allow_html=True)
|
147 |
+
|
148 |
+
# Data Source
|
149 |
+
st.markdown('<div class="sub-title">Data Source</div>', unsafe_allow_html=True)
|
150 |
+
st.markdown("""
|
151 |
+
<div class="section">
|
152 |
+
<p>The model was trained by the Korea Maritime and Ocean University NLP data set.</p>
|
153 |
+
</div>
|
154 |
+
""", unsafe_allow_html=True)
|
155 |
+
|
156 |
+
# Benchmarking
|
157 |
+
st.markdown('<div class="sub-title">Benchmarking</div>', unsafe_allow_html=True)
|
158 |
+
st.markdown("""
|
159 |
+
<div class="section">
|
160 |
+
<p>Evaluating the performance of NER models is crucial to understanding their effectiveness in real-world applications. Below are the benchmark results for the <code>ner_kmou_glove_840B_300d</code> model, focusing on various named entity categories. The metrics used include precision, recall, and F1-score, which are standard for evaluating classification models.</p>
|
161 |
+
</div>
|
162 |
+
""", unsafe_allow_html=True)
|
163 |
+
st.markdown("""
|
164 |
+
---
|
165 |
+
| ner_tag | precision | recall | f1-score | support |
|
166 |
+
|:------------:|:---------:|:------:|:--------:|:-------:|
|
167 |
+
| B-DT | 0.95 | 0.29 | 0.44 | 132 |
|
168 |
+
| B-LC | 0.00 | 0.00 | 0.00 | 166 |
|
169 |
+
| B-OG | 1.00 | 0.06 | 0.11 | 149 |
|
170 |
+
| B-PS | 0.86 | 0.13 | 0.23 | 287 |
|
171 |
+
| B-TI | 0.50 | 0.05 | 0.09 | 20 |
|
172 |
+
| I-DT | 0.94 | 0.36 | 0.52 | 164 |
|
173 |
+
| I-LC | 0.00 | 0.00 | 0.00 | 4 |
|
174 |
+
| I-OG | 1.00 | 0.08 | 0.15 | 25 |
|
175 |
+
| I-PS | 1.00 | 0.08 | 0.15 | 12 |
|
176 |
+
| I-TI | 0.50 | 0.10 | 0.17 | 10 |
|
177 |
+
| O | 0.94 | 1.00 | 0.97 | 12830 |
|
178 |
+
| accuracy | 0.94 | 13799 | | |
|
179 |
+
| macro avg | 0.70 | 0.20 | 0.26 | 13799 |
|
180 |
+
| weighted avg | 0.93 | 0.94 | 0.92 | 13799 |
|
181 |
+
""", unsafe_allow_html=True)
|
182 |
+
|
183 |
+
st.markdown("""
|
184 |
+
<div class="section">
|
185 |
+
<p>These results demonstrate the model's ability to accurately identify and classify named entities in Korean text. Precision measures the accuracy of the positive predictions, recall measures the model's ability to find all relevant instances, and F1-score provides a balance between precision and recall.</p>
|
186 |
+
</div>
|
187 |
+
""", unsafe_allow_html=True)
|
188 |
+
|
189 |
+
# Conclusion/Summary
|
190 |
+
st.markdown('<div class="sub-title">Conclusion</div>', unsafe_allow_html=True)
|
191 |
+
st.markdown("""
|
192 |
+
<div class="section">
|
193 |
+
<p>The <code>ner_kmou_glove_840B_300d</code> model demonstrates effective named entity recognition in Korean texts, with varied performance metrics across different entity types. This model leverages <code>glove_840B_300</code> embeddings to enhance its understanding and accuracy in identifying entities like persons, locations, organizations, and more. Its integration into Spark NLP allows for efficient and scalable processing of Korean text data, making it a valuable tool for researchers and developers working with Korean language applications.</p>
|
194 |
+
</div>
|
195 |
+
""", unsafe_allow_html=True)
|
196 |
+
|
197 |
+
# References
|
198 |
+
st.markdown('<div class="sub-title">References</div>', unsafe_allow_html=True)
|
199 |
+
st.markdown("""
|
200 |
+
<div class="section">
|
201 |
+
<ul>
|
202 |
+
<li><a class="link" href="https://sparknlp.org/api/python/reference/autosummary/sparknlp/annotator/ner/ner_dl/index.html" target="_blank" rel="noopener">NerDLModel</a> annotator documentation</li>
|
203 |
+
<li>Model Used: <a class="link" href="https://sparknlp.org/2021/01/03/ner_ud_kaist_glove_840B_300d_ko.html" rel="noopener">ner_kmou_glove_840B_300d</a></li>
|
204 |
+
<li><a class="link" href="https://nlp.johnsnowlabs.com/recognize_entitie" target="_blank" rel="noopener">Visualization demos for NER in Spark NLP</a></li>
|
205 |
+
<li><a class="link" href="https://www.johnsnowlabs.com/named-entity-recognition-ner-with-bert-in-spark-nlp/">Named Entity Recognition (NER) with BERT in Spark NLP</a></li>
|
206 |
+
</ul>
|
207 |
+
</div>
|
208 |
+
""", unsafe_allow_html=True)
|
209 |
+
|
210 |
+
# Community & Support
|
211 |
+
st.markdown('<div class="sub-title">Community & Support</div>', unsafe_allow_html=True)
|
212 |
+
st.markdown("""
|
213 |
+
<div class="section">
|
214 |
+
<ul>
|
215 |
+
<li><a class="link" href="https://sparknlp.org/" target="_blank">Official Website</a>: Documentation and examples</li>
|
216 |
+
<li><a class="link" href="https://github.com/JohnSnowLabs/spark-nlp" target="_blank">GitHub Repository</a>: Report issues or contribute</li>
|
217 |
+
<li><a class="link" href="https://forum.johnsnowlabs.com/" target="_blank">Community Forum</a>: Ask questions, share ideas, and get support</li>
|
218 |
+
</ul>
|
219 |
+
</div>
|
220 |
+
""", unsafe_allow_html=True)
|
requirements.txt
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
streamlit
|
2 |
+
st-annotated-text
|
3 |
+
pandas
|
4 |
+
numpy
|
5 |
+
spark-nlp
|
6 |
+
pyspark
|