abdullahmubeen10 commited on
Commit
263259c
·
verified ·
1 Parent(s): 1127617

Upload 5 files

Browse files
.streamlit/config.toml ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ [theme]
2
+ base="light"
3
+ primaryColor="#29B4E8"
Demo.py ADDED
@@ -0,0 +1,161 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import sparknlp
3
+ import os
4
+ import pandas as pd
5
+
6
+ from sparknlp.base import *
7
+ from sparknlp.annotator import *
8
+ from pyspark.ml import Pipeline
9
+ from sparknlp.pretrained import PretrainedPipeline
10
+ from annotated_text import annotated_text
11
+
12
+ # Page configuration
13
+ st.set_page_config(
14
+ layout="wide",
15
+ initial_sidebar_state="auto"
16
+ )
17
+
18
+ # CSS for styling
19
+ st.markdown("""
20
+ <style>
21
+ .main-title {
22
+ font-size: 36px;
23
+ color: #4A90E2;
24
+ font-weight: bold;
25
+ text-align: center;
26
+ }
27
+ .section {
28
+ background-color: #f9f9f9;
29
+ padding: 10px;
30
+ border-radius: 10px;
31
+ margin-top: 10px;
32
+ }
33
+ .section p, .section ul {
34
+ color: #666666;
35
+ }
36
+ </style>
37
+ """, unsafe_allow_html=True)
38
+
39
+ @st.cache_resource
40
+ def init_spark():
41
+ return sparknlp.start()
42
+
43
+ @st.cache_resource
44
+ def create_pipeline(model):
45
+ document_assembler = DocumentAssembler() \
46
+ .setInputCol("text") \
47
+ .setOutputCol("document")
48
+
49
+ sentence_detector = SentenceDetector() \
50
+ .setInputCols(["document"]) \
51
+ .setOutputCol("sentence")
52
+
53
+ word_segmenter = WordSegmenterModel.pretrained("wordseg_large", "zh") \
54
+ .setInputCols(["sentence"]) \
55
+ .setOutputCol("token")
56
+
57
+ embeddings = BertEmbeddings.pretrained(name='bert_base_chinese', lang='zh') \
58
+ .setInputCols(["document", "token"]) \
59
+ .setOutputCol("embeddings")
60
+
61
+ ner = NerDLModel.pretrained(model, "zh") \
62
+ .setInputCols(["document", "token", "embeddings"]) \
63
+ .setOutputCol("ner")
64
+
65
+ ner_converter = NerConverter() \
66
+ .setInputCols(["sentence", "token", "ner"]) \
67
+ .setOutputCol("entities")
68
+
69
+ pipeline = Pipeline(stages=[document_assembler, sentence_detector, word_segmenter, embeddings, ner, ner_converter])
70
+ return pipeline
71
+
72
+ def fit_data(pipeline, data):
73
+ empty_df = spark.createDataFrame([['']]).toDF('text')
74
+ pipeline_model = pipeline.fit(empty_df)
75
+ model = LightPipeline(pipeline_model)
76
+ result = model.fullAnnotate(data)
77
+ return result
78
+
79
+ def annotate(data):
80
+ document, chunks, labels = data["Document"], data["NER Chunk"], data["NER Label"]
81
+ annotated_words = []
82
+ for chunk, label in zip(chunks, labels):
83
+ parts = document.split(chunk, 1)
84
+ if parts[0]:
85
+ annotated_words.append(parts[0])
86
+ annotated_words.append((chunk, label))
87
+ document = parts[1]
88
+ if document:
89
+ annotated_words.append(document)
90
+ annotated_text(*annotated_words)
91
+
92
+ # Set up the page layout
93
+ st.markdown('<div class="main-title">Recognize entities in Urdu text</div>', unsafe_allow_html=True)
94
+ st.markdown("""
95
+ <div class="section">
96
+ <p>This demo utilizes embeddings-based NER model for Urdu texts, using the urduvec_140M_300d word embeddings</p>
97
+ </div>
98
+ """, unsafe_allow_html=True)
99
+
100
+ # Sidebar content
101
+ model = st.sidebar.selectbox(
102
+ "Choose the pretrained model",
103
+ ["ner_msra_bert_768d", "ner_weibo_bert_768d"],
104
+ help="For more info about the models visit: https://sparknlp.org/models"
105
+ )
106
+
107
+ # Reference notebook link in sidebar
108
+ link = """
109
+ <a href="https://colab.research.google.com/github/JohnSnowLabs/spark-nlp-workshop/blob/master/tutorials/streamlit_notebooks/NER.ipynb">
110
+ <img src="https://colab.research.google.com/assets/colab-badge.svg" style="zoom: 1.3" alt="Open In Colab"/>
111
+ </a>
112
+ """
113
+ st.sidebar.markdown('Reference notebook:')
114
+ st.sidebar.markdown(link, unsafe_allow_html=True)
115
+
116
+ # Load examples
117
+ examples = [
118
+ "当 前 , 在 中共十五大 精 神 的 指 引 下 , 在 以 江泽民 同 志 为 核 心 的 中共中央 领 导 下 , 全 党 和 全 国 各 族 人 民 正 高 举 邓小平 理 论 伟 大 旗 帜 , 同 心 同 德 , 团 结 奋 斗 , 沿 着 建 设 有 中国 特 色 的 社 会 主 义 道 路 阔 步 前 进 。",
119
+ "中共中央 致 中国致公党十一大 的 贺 词 各 位 代 表 、 各 位 同 志 : 在 中国致公党第十一次全国代表大会 隆 重 召 开 之 际 , 中国共产党中央委员会 谨 向 大 会 表 示 热 烈 的 祝 贺 , 向 致公党 的 同 志 们 致 以 亲 切 的 问 候 !",
120
+ "数 百 名 华 人 、 华 侨 、 留 学 人 员 、 我 国 驻 纽约 总 领 事 馆 代 表 在 机 场 挥 舞 中 美 两 国 国 旗 , 热 烈 欢 迎 江 主 席 访 问 波士顿 。",
121
+ "到 机 场 迎 接 江 主 席 的 美 方 人 员 有 马萨诸塞州 州 长 和 波士顿 市 长 等 。",
122
+ "又 讯 中国 国 家 主 席 江泽民 1 日 上 午 应 邀 在 美国 著 名 学 府 哈佛大学 发 表 重 要 演 讲 。",
123
+ "江 主 席 来 到 哈佛大学 时 , 受 到 哈佛大学 校 长 陆登庭 及 哈佛 各 学 院 院 长 的 热 烈 欢 迎 。",
124
+ "本 报 纽约 1 0 月 3 1 日 电 记 者 陈特�� 、 周德武 报 道 : 今 天 晚 上 , 美中贸易全国委员会 和 美国中国商会 在 纽约 举 行 盛 大 宴 会 欢 迎 江泽民 主 席 。",
125
+ "哈佛大学 校 长 陆登庭 对 江 主 席 访 问 哈佛 并 发 表 演 讲 表 示 欢 迎 。",
126
+ "美中贸易全国委员会 主 席 费希尔 和 美国中国商会 会 长 沈被章 先 后 致 词 。"
127
+ ]
128
+
129
+ selected_text = st.selectbox("Select an example", examples)
130
+ custom_input = st.text_input("Try it with your own Sentence!")
131
+
132
+ text_to_analyze = custom_input if custom_input else selected_text
133
+
134
+ st.subheader('Full example text')
135
+ HTML_WRAPPER = """<div class="scroll entities" style="overflow-x: auto; border: 1px solid #e6e9ef; border-radius: 0.25rem; padding: 1rem; margin-bottom: 2.5rem; white-space:pre-wrap">{}</div>"""
136
+ st.markdown(HTML_WRAPPER.format(text_to_analyze), unsafe_allow_html=True)
137
+
138
+ # Initialize Spark and create pipeline
139
+ spark = init_spark()
140
+ pipeline = create_pipeline(model)
141
+ output = fit_data(pipeline, text_to_analyze)
142
+
143
+ # Display matched sentence
144
+ st.subheader("Processed output:")
145
+
146
+ results = {
147
+ 'Document': output[0]['document'][0].result,
148
+ 'NER Chunk': [n.result for n in output[0]['ner_chunk']],
149
+ "NER Label": [n.metadata['entity'] for n in output[0]['ner_chunk']]
150
+ }
151
+
152
+ annotate(results)
153
+
154
+ with st.expander("View DataFrame"):
155
+ df = pd.DataFrame({'NER Chunk': results['NER Chunk'], 'NER Label': results['NER Label']})
156
+ df.index += 1
157
+ st.dataframe(df)
158
+
159
+
160
+
161
+
Dockerfile ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Download base image ubuntu 18.04
2
+ FROM ubuntu:18.04
3
+
4
+ # Set environment variables
5
+ ENV NB_USER jovyan
6
+ ENV NB_UID 1000
7
+ ENV HOME /home/${NB_USER}
8
+
9
+ # Install required packages
10
+ RUN apt-get update && apt-get install -y \
11
+ tar \
12
+ wget \
13
+ bash \
14
+ rsync \
15
+ gcc \
16
+ libfreetype6-dev \
17
+ libhdf5-serial-dev \
18
+ libpng-dev \
19
+ libzmq3-dev \
20
+ python3 \
21
+ python3-dev \
22
+ python3-pip \
23
+ unzip \
24
+ pkg-config \
25
+ software-properties-common \
26
+ graphviz \
27
+ openjdk-8-jdk \
28
+ ant \
29
+ ca-certificates-java \
30
+ && apt-get clean \
31
+ && update-ca-certificates -f;
32
+
33
+ # Install Python 3.8 and pip
34
+ RUN add-apt-repository ppa:deadsnakes/ppa \
35
+ && apt-get update \
36
+ && apt-get install -y python3.8 python3-pip \
37
+ && apt-get clean;
38
+
39
+ # Set up JAVA_HOME
40
+ ENV JAVA_HOME /usr/lib/jvm/java-8-openjdk-amd64/
41
+ RUN mkdir -p ${HOME} \
42
+ && echo "export JAVA_HOME=/usr/lib/jvm/java-8-openjdk-amd64/" >> ${HOME}/.bashrc \
43
+ && chown -R ${NB_UID}:${NB_UID} ${HOME}
44
+
45
+ # Create a new user named "jovyan" with user ID 1000
46
+ RUN useradd -m -u ${NB_UID} ${NB_USER}
47
+
48
+ # Switch to the "jovyan" user
49
+ USER ${NB_USER}
50
+
51
+ # Set home and path variables for the user
52
+ ENV HOME=/home/${NB_USER} \
53
+ PATH=/home/${NB_USER}/.local/bin:$PATH
54
+
55
+ # Set the working directory to the user's home directory
56
+ WORKDIR ${HOME}
57
+
58
+ # Upgrade pip and install Python dependencies
59
+ RUN python3.8 -m pip install --upgrade pip
60
+ COPY requirements.txt /tmp/requirements.txt
61
+ RUN python3.8 -m pip install -r /tmp/requirements.txt
62
+
63
+ # Copy the application code into the container at /home/jovyan
64
+ COPY --chown=${NB_USER}:${NB_USER} . ${HOME}
65
+
66
+ # Expose port for Streamlit
67
+ EXPOSE 7860
68
+
69
+ # Define the entry point for the container
70
+ ENTRYPOINT ["streamlit", "run", "Demo.py", "--server.port=7860", "--server.address=0.0.0.0"]
pages/Workflow & Model Overview.py ADDED
@@ -0,0 +1,215 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+
3
+ # Custom CSS for better styling
4
+ st.markdown("""
5
+ <style>
6
+ .main-title {
7
+ font-size: 36px;
8
+ color: #4A90E2;
9
+ font-weight: bold;
10
+ text-align: center;
11
+ }
12
+ .sub-title {
13
+ font-size: 24px;
14
+ color: #4A90E2;
15
+ margin-top: 20px;
16
+ }
17
+ .section {
18
+ background-color: #f9f9f9;
19
+ padding: 15px;
20
+ border-radius: 10px;
21
+ margin-top: 20px;
22
+ }
23
+ .section h2 {
24
+ font-size: 22px;
25
+ color: #4A90E2;
26
+ }
27
+ .section p, .section ul {
28
+ color: #666666;
29
+ }
30
+ .link {
31
+ color: #4A90E2;
32
+ text-decoration: none;
33
+ }
34
+ </style>
35
+ """, unsafe_allow_html=True)
36
+
37
+ # Main Title
38
+ st.markdown('<div class="main-title">Named Entity Recognition for Chinese (BERT-MSRA Dataset)</div>', unsafe_allow_html=True)
39
+
40
+ # Description
41
+ st.markdown('<div class="sub-title">Description</div>', unsafe_allow_html=True)
42
+ st.markdown("""
43
+ <div class="section">
44
+ <p>This model annotates named entities in a text, which can be used to find features such as names of people, places, and organizations. The model does not read words directly but instead reads word embeddings, which represent words as points such that more semantically similar words are closer together.</p>
45
+ <p>This model uses the pre-trained <code>bert_base_chinese</code> embeddings model from BertEmbeddings annotator as an input, so be sure to use the same embeddings in the pipeline.</p>
46
+ </div>
47
+ """, unsafe_allow_html=True)
48
+
49
+ # Predicted Entities
50
+ st.markdown('<div class="sub-title">Predicted Entities</div>', unsafe_allow_html=True)
51
+ st.markdown("""
52
+ <div class="section">
53
+ <ul>
54
+ <li>Persons-PER</li>
55
+ <li>Locations-LOC</li>
56
+ <li>Organizations-ORG</li>
57
+ </ul>
58
+ </div>
59
+ """, unsafe_allow_html=True)
60
+
61
+ # How to use
62
+ st.markdown('<div class="sub-title">How to use</div>', unsafe_allow_html=True)
63
+ st.markdown("""
64
+ <div class="section">
65
+ <p>To use this model, follow these steps in Python:</p>
66
+ </div>
67
+ """, unsafe_allow_html=True)
68
+ st.code("""
69
+ from sparknlp.base import *
70
+ from sparknlp.annotator import *
71
+ from pyspark.ml import Pipeline
72
+
73
+ # Define the components of the pipeline
74
+ document_assembler = DocumentAssembler() \\
75
+ .setInputCol("text") \\
76
+ .setOutputCol("document")
77
+
78
+ sentence_detector = SentenceDetector() \\
79
+ .setInputCols(["document"]) \\
80
+ .setOutputCol("sentence")
81
+
82
+ word_segmenter = WordSegmenterModel.pretrained("wordseg_large", "zh") \\
83
+ .setInputCols(["sentence"]) \\
84
+ .setOutputCol("token")
85
+
86
+ embeddings = BertEmbeddings.pretrained(name='bert_base_chinese', lang='zh') \\
87
+ .setInputCols(["document", "token"]) \\
88
+ .setOutputCol("embeddings")
89
+
90
+ ner = NerDLModel.pretrained("ner_msra_bert_768d", "zh") \\
91
+ .setInputCols(["document", "token", "embeddings"]) \\
92
+ .setOutputCol("ner")
93
+
94
+ ner_converter = NerConverter() \\
95
+ .setInputCols(["sentence", "token", "ner"]) \\
96
+ .setOutputCol("entities")
97
+
98
+ # Create the pipeline
99
+ pipeline = Pipeline(stages=[document_assembler, sentence_detector, word_segmenter, embeddings, ner, ner_converter])
100
+
101
+ # Create sample data
102
+ example = spark.createDataFrame([['马云在浙江省杭州市出生,是阿里巴巴集团的主要创始人。']], ["text"])
103
+
104
+ # Fit and transform data with the pipeline
105
+ result = pipeline.fit(example).transform(example)
106
+
107
+ # Select the result, entity
108
+ result.select(
109
+ expr("explode(entities) as entity")
110
+ ).select(
111
+ col("entity.result").alias("chunk"),
112
+ col("entity.metadata").getItem("entity").alias("ner_label")
113
+ ).show(truncate=False)
114
+ """, language="python")
115
+
116
+ # Results
117
+ import pandas as pd
118
+
119
+ # Create the data for the DataFrame
120
+ data = {
121
+ "token": ["马云", "浙江省", "杭州市", "出生", "阿里巴巴集团", "创始人"],
122
+ "ner": ["PER", "LOC", "LOC", "ORG", "ORG", "PER"]
123
+ }
124
+
125
+ # Creating the DataFrame
126
+ df = pd.DataFrame(data)
127
+ df.index += 1
128
+ st.dataframe(df)
129
+
130
+ # Model Information
131
+ st.markdown('<div class="sub-title">Model Information</div>', unsafe_allow_html=True)
132
+ st.markdown("""
133
+ <div class="section">
134
+ <p>The <code>ner_msra_bert_768d</code> model details are as follows:</p>
135
+ <ul>
136
+ <li><strong>Model Name:</strong> ner_msra_bert_768d</li>
137
+ <li><strong>Type:</strong> ner</li>
138
+ <li><strong>Compatibility:</strong> Spark NLP 2.7.0+</li>
139
+ <li><strong>License:</strong> Open Source</li>
140
+ <li><strong>Edition:</strong> Official</li>
141
+ <li><strong>Input Labels:</strong> [sentence, token, embeddings]</li>
142
+ <li><strong>Output Labels:</strong> [ner]</li>
143
+ <li><strong>Language:</strong> zh</li>
144
+ </ul>
145
+ </div>
146
+ """, unsafe_allow_html=True)
147
+
148
+ # Data Source
149
+ st.markdown('<div class="sub-title">Data Source</div>', unsafe_allow_html=True)
150
+ st.markdown("""
151
+ <div class="section">
152
+ <p>The model was trained on the MSRA (Levow, 2006) data set created by “Microsoft Research Asia”.</p>
153
+ </div>
154
+ """, unsafe_allow_html=True)
155
+
156
+ # Benchmarking
157
+ st.markdown('<div class="sub-title">Benchmarking</div>', unsafe_allow_html=True)
158
+ st.markdown("""
159
+ <div class="section">
160
+ <p>Evaluating the performance of NER models is crucial to understanding their effectiveness in real-world applications. Below are the benchmark results for the <code>ner_msra_bert_768d</code> model, focusing on various named entity categories. The metrics used include precision, recall, and F1-score, which are standard for evaluating classification models.</p>
161
+ </div>
162
+ """, unsafe_allow_html=True)
163
+ st.markdown("""
164
+ ---
165
+ | ner_tag | precision | recall | f1-score | support |
166
+ |--------------|-----------|--------|----------|---------|
167
+ | LOC | 0.97 | 0.97 | 0.97 | 2777 |
168
+ | O | 1.00 | 1.00 | 1.00 | 146826 |
169
+ | ORG | 0.88 | 0.99 | 0.93 | 1292 |
170
+ | PER | 0.97 | 0.97 | 0.97 | 1430 |
171
+ | accuracy | 1.00 | 152325 | | |
172
+ | macro avg | 0.95 | 0.98 | 0.97 | 152325 |
173
+ | weighted avg | 1.00 | 1.00 | 1.00 | 152325 |
174
+ ---
175
+ """, unsafe_allow_html=True)
176
+
177
+ st.markdown("""
178
+ <div class="section">
179
+ <p>These results demonstrate the model's ability to accurately identify and classify named entities in Chinese text. Precision measures the accuracy of the positive predictions, recall measures the model's ability to find all relevant instances, and F1-score provides a balance between precision and recall.</p>
180
+ </div>
181
+ """, unsafe_allow_html=True)
182
+
183
+ # Conclusion/Summary
184
+ st.markdown('<div class="sub-title">Conclusion</div>', unsafe_allow_html=True)
185
+ st.markdown("""
186
+ <div class="section">
187
+ <p>The <code>ner_msra_bert_768d</code> model demonstrates effective named entity recognition in Chinese texts, with high performance metrics across different entity types. This model leverages <code>bert_base_chinese</code> embeddings to enhance its understanding and accuracy in identifying entities like persons, locations, and organizations. Its integration into Spark NLP allows for efficient and scalable processing of Chinese text data, making it a valuable tool for researchers and developers working with Chinese language applications.</p>
188
+ </div>
189
+ """, unsafe_allow_html=True)
190
+
191
+ # References
192
+ st.markdown('<div class="sub-title">References</div>', unsafe_allow_html=True)
193
+ st.markdown("""
194
+ <div class="section">
195
+ <ul>
196
+ <li><a class="link" href="https://sparknlp.org/api/python/reference/autosummary/sparknlp/annotator/ner/ner_dl/index.html" target="_blank" rel="noopener">NerDLModel</a> annotator documentation</li>
197
+ <li>Model Used: <a class="link" href="https://sparknlp.org/2021/01/03/ner_msra_bert_768d_zh.html" rel="noopener">ner_msra_bert_768d_zh</a></li>
198
+ <li>The model was trained on the <a class="link" href="https://www.aclweb.org/anthology/W06-0115/">MSRA (Levow, 2006)</a> data set created by “Microsoft Research Asia”.</li>
199
+ <li><a class="link" href="https://nlp.johnsnowlabs.com/recognize_entitie" target="_blank" rel="noopener">Visualization demos for NER in Spark NLP</a></li>
200
+ <li><a class="link" href="https://www.johnsnowlabs.com/named-entity-recognition-ner-with-bert-in-spark-nlp/">Named Entity Recognition (NER) with BERT in Spark NLP</a></li>
201
+ </ul>
202
+ </div>
203
+ """, unsafe_allow_html=True)
204
+
205
+ # Community & Support
206
+ st.markdown('<div class="sub-title">Community & Support</div>', unsafe_allow_html=True)
207
+ st.markdown("""
208
+ <div class="section">
209
+ <ul>
210
+ <li><a class="link" href="https://sparknlp.org/" target="_blank">Official Website</a>: Documentation and examples</li>
211
+ <li><a class="link" href="https://github.com/JohnSnowLabs/spark-nlp" target="_blank">GitHub Repository</a>: Report issues or contribute</li>
212
+ <li><a class="link" href="https://forum.johnsnowlabs.com/" target="_blank">Community Forum</a>: Ask questions, share ideas, and get support</li>
213
+ </ul>
214
+ </div>
215
+ """, unsafe_allow_html=True)
requirements.txt ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ streamlit
2
+ st-annotated-text
3
+ pandas
4
+ numpy
5
+ spark-nlp
6
+ pyspark