abdullahmubeen10 commited on
Commit
adb4d87
·
verified ·
1 Parent(s): 3f43a5b

Upload 12 files

Browse files
.streamlit/config.toml ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ [theme]
2
+ base="light"
3
+ primaryColor="#29B4E8"
Demo.py ADDED
@@ -0,0 +1,161 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import sparknlp
3
+ import os
4
+ import pandas as pd
5
+
6
+ from sparknlp.base import *
7
+ from sparknlp.annotator import *
8
+ from pyspark.ml import Pipeline
9
+ from sparknlp.pretrained import PretrainedPipeline
10
+ from annotated_text import annotated_text
11
+
12
+ # Page configuration
13
+ st.set_page_config(
14
+ layout="wide",
15
+ initial_sidebar_state="auto"
16
+ )
17
+
18
+ # CSS for styling
19
+ st.markdown("""
20
+ <style>
21
+ .main-title {
22
+ font-size: 36px;
23
+ color: #4A90E2;
24
+ font-weight: bold;
25
+ text-align: center;
26
+ }
27
+ .section {
28
+ background-color: #f9f9f9;
29
+ padding: 10px;
30
+ border-radius: 10px;
31
+ margin-top: 10px;
32
+ }
33
+ .section p, .section ul {
34
+ color: #666666;
35
+ }
36
+ </style>
37
+ """, unsafe_allow_html=True)
38
+
39
+ @st.cache_resource
40
+ def init_spark():
41
+ return sparknlp.start()
42
+
43
+ @st.cache_resource
44
+ def create_pipeline(model):
45
+ document_assembler = DocumentAssembler() \
46
+ .setInputCol("text") \
47
+ .setOutputCol("document")
48
+
49
+ sentence_detector = SentenceDetectorDLModel.pretrained("sentence_detector_dl", "en") \
50
+ .setInputCols(["document"]) \
51
+ .setOutputCol("sentence")
52
+
53
+ tokenizer = Tokenizer() \
54
+ .setInputCols(["sentence"]) \
55
+ .setOutputCol("token")
56
+
57
+ embeddings = WordEmbeddingsModel.pretrained("glove_100d", "en") \
58
+ .setInputCols("sentence", "token") \
59
+ .setOutputCol("embeddings")
60
+
61
+ ner = NerDLModel.pretrained("nerdl_snips_100d") \
62
+ .setInputCols(["sentence", "token", "embeddings"]) \
63
+ .setOutputCol("ner")
64
+
65
+ ner_converter = NerConverter() \
66
+ .setInputCols(["document", "token", "ner"]) \
67
+ .setOutputCol("ner_chunk")
68
+
69
+ pipeline = Pipeline(stages=[
70
+ document_assembler,
71
+ sentence_detector,
72
+ tokenizer,
73
+ embeddings,
74
+ ner,
75
+ ner_converter
76
+ ])
77
+ return pipeline
78
+
79
+ def fit_data(pipeline, data):
80
+ empty_df = spark.createDataFrame([['']]).toDF('text')
81
+ pipeline_model = pipeline.fit(empty_df)
82
+ model = LightPipeline(pipeline_model)
83
+ result = model.fullAnnotate(data)
84
+ return result
85
+
86
+ def annotate(data):
87
+ document, chunks, labels = data["Document"], data["NER Chunk"], data["NER Label"]
88
+ annotated_words = []
89
+ for chunk, label in zip(chunks, labels):
90
+ parts = document.split(chunk, 1)
91
+ if parts[0]:
92
+ annotated_words.append(parts[0])
93
+ annotated_words.append((chunk, label))
94
+ document = parts[1]
95
+ if document:
96
+ annotated_words.append(document)
97
+ annotated_text(*annotated_words)
98
+
99
+ # Set up the page layout
100
+ st.markdown('<div class="main-title">This model detects persons, locations and organizations in tweets.</div>', unsafe_allow_html=True)
101
+ st.markdown("""
102
+ <div class="section">
103
+ <p>bert_token_classifier_ner_btc model, which is trained on the Broad Twitter Corpus (BTC) dataset to detect entities with high accuracy. The model is based on BERT base-cased embeddings, which are integrated into the model, eliminating the need for separate embeddings in the NLP pipeline.</p>
104
+ </div>
105
+ """, unsafe_allow_html=True)
106
+
107
+ # Sidebar content
108
+ model = st.sidebar.selectbox(
109
+ "Choose the pretrained model",
110
+ ["bert_token_classifier_ner_btc"],
111
+ help="For more info about the models visit: https://sparknlp.org/models"
112
+ )
113
+
114
+ # Reference notebook link in sidebar
115
+ link = """
116
+ <a href="https://colab.research.google.com/github/JohnSnowLabs/spark-nlp-workshop/blob/master/tutorials/streamlit_notebooks/NER_BTC.ipynb">
117
+ <img src="https://colab.research.google.com/assets/colab-badge.svg" style="zoom: 1.3" alt="Open In Colab"/>
118
+ </a>
119
+ """
120
+ st.sidebar.markdown('Reference notebook:')
121
+ st.sidebar.markdown(link, unsafe_allow_html=True)
122
+
123
+ # Load examples
124
+ folder_path = f"inputs/{model}"
125
+ examples = [
126
+ lines[1].strip()
127
+ for filename in os.listdir(folder_path)
128
+ if filename.endswith('.txt')
129
+ for lines in [open(os.path.join(folder_path, filename), 'r', encoding='utf-8').readlines()]
130
+ if len(lines) >= 2
131
+ ]
132
+
133
+ selected_text = st.selectbox("Select an example", examples)
134
+ custom_input = st.text_input("Try it with your own Sentence!")
135
+
136
+ text_to_analyze = custom_input if custom_input else selected_text
137
+
138
+ st.subheader('Full example text')
139
+ HTML_WRAPPER = """<div class="scroll entities" style="overflow-x: auto; border: 1px solid #e6e9ef; border-radius: 0.25rem; padding: 1rem; margin-bottom: 2.5rem; white-space:pre-wrap">{}</div>"""
140
+ st.markdown(HTML_WRAPPER.format(text_to_analyze), unsafe_allow_html=True)
141
+
142
+ # Initialize Spark and create pipeline
143
+ spark = init_spark()
144
+ pipeline = create_pipeline(model)
145
+ output = fit_data(pipeline, text_to_analyze)
146
+
147
+ # Display matched sentence
148
+ st.subheader("Processed output:")
149
+
150
+ results = {
151
+ 'Document': output[0]['document'][0].result,
152
+ 'NER Chunk': [n.result for n in output[0]['ner_chunk']],
153
+ "NER Label": [n.metadata['entity'] for n in output[0]['ner_chunk']]
154
+ }
155
+
156
+ annotate(results)
157
+
158
+ with st.expander("View DataFrame"):
159
+ df = pd.DataFrame({'NER Chunk': results['NER Chunk'], 'NER Label': results['NER Label']})
160
+ df.index += 1
161
+ st.dataframe(df)
Dockerfile ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Download base image ubuntu 18.04
2
+ FROM ubuntu:18.04
3
+
4
+ # Set environment variables
5
+ ENV NB_USER jovyan
6
+ ENV NB_UID 1000
7
+ ENV HOME /home/${NB_USER}
8
+
9
+ # Install required packages
10
+ RUN apt-get update && apt-get install -y \
11
+ tar \
12
+ wget \
13
+ bash \
14
+ rsync \
15
+ gcc \
16
+ libfreetype6-dev \
17
+ libhdf5-serial-dev \
18
+ libpng-dev \
19
+ libzmq3-dev \
20
+ python3 \
21
+ python3-dev \
22
+ python3-pip \
23
+ unzip \
24
+ pkg-config \
25
+ software-properties-common \
26
+ graphviz \
27
+ openjdk-8-jdk \
28
+ ant \
29
+ ca-certificates-java \
30
+ && apt-get clean \
31
+ && update-ca-certificates -f;
32
+
33
+ # Install Python 3.8 and pip
34
+ RUN add-apt-repository ppa:deadsnakes/ppa \
35
+ && apt-get update \
36
+ && apt-get install -y python3.8 python3-pip \
37
+ && apt-get clean;
38
+
39
+ # Set up JAVA_HOME
40
+ ENV JAVA_HOME /usr/lib/jvm/java-8-openjdk-amd64/
41
+ RUN mkdir -p ${HOME} \
42
+ && echo "export JAVA_HOME=/usr/lib/jvm/java-8-openjdk-amd64/" >> ${HOME}/.bashrc \
43
+ && chown -R ${NB_UID}:${NB_UID} ${HOME}
44
+
45
+ # Create a new user named "jovyan" with user ID 1000
46
+ RUN useradd -m -u ${NB_UID} ${NB_USER}
47
+
48
+ # Switch to the "jovyan" user
49
+ USER ${NB_USER}
50
+
51
+ # Set home and path variables for the user
52
+ ENV HOME=/home/${NB_USER} \
53
+ PATH=/home/${NB_USER}/.local/bin:$PATH
54
+
55
+ # Set the working directory to the user's home directory
56
+ WORKDIR ${HOME}
57
+
58
+ # Upgrade pip and install Python dependencies
59
+ RUN python3.8 -m pip install --upgrade pip
60
+ COPY requirements.txt /tmp/requirements.txt
61
+ RUN python3.8 -m pip install -r /tmp/requirements.txt
62
+
63
+ # Copy the application code into the container at /home/jovyan
64
+ COPY --chown=${NB_USER}:${NB_USER} . ${HOME}
65
+
66
+ # Expose port for Streamlit
67
+ EXPOSE 7860
68
+
69
+ # Define the entry point for the container
70
+ ENTRYPOINT ["streamlit", "run", "Demo.py", "--server.port=7860", "--server.address=0.0.0.0"]
inputs/bert_token_classifier_ner_btc/Example1.txt ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ Wengers big mistakes is not being ruthless enough with ba...
2
+ Wengers big mistakes is not being ruthless enough with bad players.
inputs/bert_token_classifier_ner_btc/Example2.txt ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ Aguero goal . From being someone previously so reliable , he 's been terrible t...
2
+ Aguero goal . From being someone previously so reliable , he 's been terrible this year .
inputs/bert_token_classifier_ner_btc/Example3.txt ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ Paul Scholes approached Alex Ferguson about making a comeback . Ferguson clearly only too happy to a...
2
+ Paul Scholes approached Alex Ferguson about making a comeback . Ferguson clearly only too happy to accommodate him .
inputs/bert_token_classifier_ner_btc/Example4.txt ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ Wikipedia today , as soon as you load the website , hit ESC to prevent the 'blackout ' fro...
2
+ Wikipedia today , as soon as you load the website , hit ESC to prevent the 'blackout ' from loading.
inputs/bert_token_classifier_ner_btc/Example5.txt ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ David Attenborough shows us a duck billed...
2
+ David Attenborough shows us a duck billed platypus.
inputs/bert_token_classifier_ner_btc/Example6.txt ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ London GET UPDATES FROM P...
2
+ London GET UPDATES FROM Peter Hotez
inputs/bert_token_classifier_ner_btc/Example7.txt ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ Pentagram's Dominic Lippa is working on a new identity for University of Ar...
2
+ Pentagram's Dominic Lippa is working on a new identity for University of Arts London
pages/Workflow & Model Overview.py ADDED
@@ -0,0 +1,306 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+
3
+ # Custom CSS for better styling
4
+ st.markdown("""
5
+ <style>
6
+ .main-title {
7
+ font-size: 36px;
8
+ color: #4A90E2;
9
+ font-weight: bold;
10
+ text-align: center;
11
+ }
12
+ .sub-title {
13
+ font-size: 24px;
14
+ color: #4A90E2;
15
+ margin-top: 20px;
16
+ }
17
+ .section {
18
+ background-color: #f9f9f9;
19
+ padding: 15px;
20
+ border-radius: 10px;
21
+ margin-top: 20px;
22
+ }
23
+ .section h2 {
24
+ font-size: 22px;
25
+ color: #4A90E2;
26
+ }
27
+ .section p, .section ul {
28
+ color: #666666;
29
+ }
30
+ .link {
31
+ color: #4A90E2;
32
+ text-decoration: none;
33
+ }
34
+ .benchmark-table {
35
+ width: 100%;
36
+ border-collapse: collapse;
37
+ margin-top: 20px;
38
+ }
39
+ .benchmark-table th, .benchmark-table td {
40
+ border: 1px solid #ddd;
41
+ padding: 8px;
42
+ text-align: left;
43
+ }
44
+ .benchmark-table th {
45
+ background-color: #4A90E2;
46
+ color: white;
47
+ }
48
+ .benchmark-table td {
49
+ background-color: #f2f2f2;
50
+ }
51
+ </style>
52
+ """, unsafe_allow_html=True)
53
+
54
+ # Main Title
55
+ st.markdown('<div class="main-title">Detect Entities in Twitter Texts</div>', unsafe_allow_html=True)
56
+
57
+ # Description
58
+ st.markdown("""
59
+ <div class="section">
60
+ <p><strong>Detect Entities in Twitter Texts</strong> is a specialized NLP task focusing on identifying entities within Twitter-based texts. This app utilizes the <strong>bert_token_classifier_ner_btc</strong> model, which is trained on the Broad Twitter Corpus (BTC) dataset to detect entities with high accuracy. The model is based on BERT base-cased embeddings, which are integrated into the model, eliminating the need for separate embeddings in the NLP pipeline.</p>
61
+ </div>
62
+ """, unsafe_allow_html=True)
63
+
64
+ # What is Entity Recognition
65
+ st.markdown('<div class="sub-title">What is Entity Recognition?</div>', unsafe_allow_html=True)
66
+ st.markdown("""
67
+ <div class="section">
68
+ <p><strong>Entity Recognition</strong> is a task in Natural Language Processing (NLP) that involves identifying and classifying named entities in text into predefined categories. For Twitter texts, this model focuses on detecting entities such as people, locations, and organizations, which are crucial for understanding and analyzing social media content.</p>
69
+ </div>
70
+ """, unsafe_allow_html=True)
71
+
72
+ # Model Importance and Applications
73
+ st.markdown('<div class="sub-title">Model Importance and Applications</div>', unsafe_allow_html=True)
74
+ st.markdown("""
75
+ <div class="section">
76
+ <p>The <strong>bert_token_classifier_ner_btc</strong> model is highly effective for extracting named entities from Twitter texts. Its applications include:</p>
77
+ <ul>
78
+ <li><strong>Social Media Monitoring:</strong> The model can be used to identify and track mentions of people, organizations, and locations in social media posts, which is valuable for sentiment analysis and brand monitoring.</li>
79
+ <li><strong>Event Detection:</strong> By recognizing key entities, the model helps in detecting and summarizing events discussed on Twitter, such as breaking news or trending topics.</li>
80
+ <li><strong>Market Research:</strong> Companies can use the model to analyze customer opinions and identify trends related to their products or services based on entity mentions.</li>
81
+ <li><strong>Content Classification:</strong> The model aids in categorizing Twitter content based on the detected entities, which can be useful for organizing and filtering large volumes of social media data.</li>
82
+ </ul>
83
+ <p>Why use the <strong>bert_token_classifier_ner_btc</strong> model?</p>
84
+ <ul>
85
+ <li><strong>Pre-trained on BTC Dataset:</strong> The model is specifically trained on Twitter data, making it well-suited for handling social media text.</li>
86
+ <li><strong>Integrated BERT Embeddings:</strong> It uses BERT base-cased embeddings, providing strong performance without needing additional embedding components.</li>
87
+ <li><strong>High Accuracy:</strong> The model achieves impressive precision and recall, ensuring reliable entity detection.</li>
88
+ <li><strong>Ease of Use:</strong> Simplifies the process of entity recognition with minimal setup required.</li>
89
+ </ul>
90
+ </div>
91
+ """, unsafe_allow_html=True)
92
+
93
+ # Predicted Entities
94
+ st.markdown('<div class="sub-title">Predicted Entities</div>', unsafe_allow_html=True)
95
+ st.markdown("""
96
+ <div class="section">
97
+ <ul>
98
+ <li><strong>PER:</strong> Person's name.</li>
99
+ <li><strong>LOC:</strong> Location or place.</li>
100
+ <li><strong>ORG:</strong> Organization or company name.</li>
101
+ </ul>
102
+ </div>
103
+ """, unsafe_allow_html=True)
104
+
105
+ # How to Use the Model
106
+ st.markdown('<div class="sub-title">How to Use the Model</div>', unsafe_allow_html=True)
107
+ st.markdown("""
108
+ <div class="section">
109
+ <p>To use this model in Python, follow these steps:</p>
110
+ </div>
111
+ """, unsafe_allow_html=True)
112
+ st.code('''
113
+ from sparknlp.base import *
114
+ from sparknlp.annotator import *
115
+ from pyspark.ml import Pipeline
116
+ from pyspark.sql.functions import col, expr
117
+ import pandas as pd
118
+
119
+ # Define the components of the pipeline
120
+ document_assembler = DocumentAssembler() \\
121
+ .setInputCol("text") \\
122
+ .setOutputCol("document")
123
+
124
+ tokenizer = Tokenizer() \\
125
+ .setInputCols(["document"]) \\
126
+ .setOutputCol("token")
127
+
128
+ tokenClassifier = BertForTokenClassification.pretrained("bert_token_classifier_ner_btc", "en") \\
129
+ .setInputCols("token", "document") \\
130
+ .setOutputCol("ner") \\
131
+ .setCaseSensitive(True)
132
+
133
+ ner_converter = NerConverter() \\
134
+ .setInputCols(["document", "token", "ner"]) \\
135
+ .setOutputCol("ner_chunk")
136
+
137
+ # Create the pipeline
138
+ pipeline = Pipeline(stages=[
139
+ document_assembler,
140
+ tokenizer,
141
+ tokenClassifier,
142
+ ner_converter
143
+ ])
144
+
145
+ # Create some example data
146
+ test_sentences = ["Pentagram's Dominic Lippa is working on a new identity for University of Arts London."]
147
+ data = spark.createDataFrame(pd.DataFrame({'text': test_sentences}))
148
+
149
+ # Apply the pipeline to the data
150
+ model = pipeline.fit(spark.createDataFrame(pd.DataFrame({'text': ['']})))
151
+ result = model.transform(data)
152
+
153
+ # Display results
154
+ result.select(
155
+ expr("explode(ner_chunk) as ner_chunk")
156
+ ).select(
157
+ col("ner_chunk.result").alias("chunk"),
158
+ col("ner_chunk.metadata.entity").alias("ner_label")
159
+ ).show(truncate=False)
160
+ ''', language='python')
161
+
162
+ # Results
163
+ st.text("""
164
+ +-------------------------+---------+
165
+ |chunk |ner_label|
166
+ +-------------------------+---------+
167
+ |Pentagram's |ORG |
168
+ |Dominic Lippa |PER |
169
+ |University of Arts London|ORG |
170
+ +-------------------------+---------+
171
+ """)
172
+
173
+ # Model Information
174
+ st.markdown('<div class="sub-title">Model Information</div>', unsafe_allow_html=True)
175
+ st.markdown("""
176
+ <div class="section">
177
+ <table class="benchmark-table">
178
+ <tr>
179
+ <th>Model Name</th>
180
+ <td>bert_token_classifier_ner_btc</td>
181
+ </tr>
182
+ <tr>
183
+ <th>Compatibility</th>
184
+ <td>Spark NLP 3.2.2+</td>
185
+ </tr>
186
+ <tr>
187
+ <th>License</th>
188
+ <td>Open Source</td>
189
+ </tr>
190
+ <tr>
191
+ <th>Edition</th>
192
+ <td>Official</td>
193
+ </tr>
194
+ <tr>
195
+ <th>Input Labels</th>
196
+ <td>[sentence, token]</td>
197
+ </tr>
198
+ <tr>
199
+ <th>Output Labels</th>
200
+ <td>[ner]</td>
201
+ </tr>
202
+ <tr>
203
+ <th>Language</th>
204
+ <td>en</td>
205
+ </tr>
206
+ <tr>
207
+ <th>Case Sensitive</th>
208
+ <td>true</td>
209
+ </tr>
210
+ <tr>
211
+ <th>Max Sentence Length</th>
212
+ <td>128</td>
213
+ </tr>
214
+ </table>
215
+ </div>
216
+ """, unsafe_allow_html=True)
217
+
218
+ # Data Source
219
+ st.markdown('<div class="sub-title">Data Source</div>', unsafe_allow_html=True)
220
+ st.markdown("""
221
+ <div class="section">
222
+ <p>For more information about the dataset used to train this model, visit the <a class="link" href="https://github.com/juand-r/entity-recognition-datasets/tree/master/data/BTC" target="_blank">Broad Twitter Corpus (BTC)</a>.</p>
223
+ </div>
224
+ """, unsafe_allow_html=True)
225
+
226
+ # Benchmark
227
+ st.markdown('<div class="sub-title">Benchmarking</div>', unsafe_allow_html=True)
228
+ st.markdown("""
229
+ <div class="section">
230
+ <p>The <strong>bert_token_classifier_ner_btc</strong> model has been evaluated on various benchmarks, including the following metrics:</p>
231
+ <table class="benchmark-table">
232
+ <tr>
233
+ <th>Label</th>
234
+ <th>Precision</th>
235
+ <th>Recall</th>
236
+ <th>F1 Score</th>
237
+ <th>Support</th>
238
+ </tr>
239
+ <tr>
240
+ <td>PER</td>
241
+ <td>0.93</td>
242
+ <td>0.92</td>
243
+ <td>0.92</td>
244
+ <td>1200</td>
245
+ </tr>
246
+ <tr>
247
+ <td>LOC</td>
248
+ <td>0.90</td>
249
+ <td>0.89</td>
250
+ <td>0.89</td>
251
+ <td>800</td>
252
+ </tr>
253
+ <tr>
254
+ <td>ORG</td>
255
+ <td>0.94</td>
256
+ <td>0.93</td>
257
+ <td>0.93</td>
258
+ <td>1000</td>
259
+ </tr>
260
+ <tr>
261
+ <td>Average</td>
262
+ <td>0.92</td>
263
+ <td>0.91</td>
264
+ <td>0.91</td>
265
+ <td>3000</td>
266
+ </tr>
267
+ </table>
268
+ </div>
269
+ """, unsafe_allow_html=True)
270
+
271
+ # Conclusion
272
+ st.markdown('<div class="sub-title">Conclusion</div>', unsafe_allow_html=True)
273
+ st.markdown("""
274
+ <div class="section">
275
+ <p>The <strong>bert_token_classifier_ner_btc</strong> model offers a powerful and effective solution for detecting entities in Twitter texts. Its training on the Broad Twitter Corpus (BTC) ensures that it is well-adapted to handle the unique characteristics of social media language.</p>
276
+ <p>With high accuracy in identifying people, locations, and organizations, this model is invaluable for applications ranging from social media monitoring to market research and event detection. Its integration of BERT base-cased embeddings allows for robust entity recognition with minimal setup required.</p>
277
+ <p>For anyone looking to enhance their social media analysis capabilities or improve their NLP workflows, leveraging this model can significantly streamline the process of extracting and classifying named entities from Twitter content.</p>
278
+ </div>
279
+ """, unsafe_allow_html=True)
280
+
281
+ # References
282
+ st.markdown('<div class="sub-title">References</div>', unsafe_allow_html=True)
283
+ st.markdown("""
284
+ <div class="section">
285
+ <ul>
286
+ <li><a class="link" href="https://sparknlp.org/api/com/johnsnowlabs/nlp/annotators/classifier/dl/BertForTokenClassification.html" target="_blank" rel="noopener">BertForTokenClassification</a> annotator documentation</li>
287
+ <li>Model Used: <a class="link" href="https://sparknlp.org/2021/09/09/bert_token_classifier_ner_btc_en.html" rel="noopener">bert_token_classifier_ner_btc_en</a></li>
288
+ <li><a class="link" href="https://nlp.johnsnowlabs.com/recognize_entitie" target="_blank" rel="noopener">Visualization demos for NER in Spark NLP</a></li>
289
+ <li><a class="link" href="https://www.johnsnowlabs.com/named-entity-recognition-ner-with-bert-in-spark-nlp/">Named Entity Recognition (NER) with BERT in Spark NLP</a></li>
290
+ </ul>
291
+ </div>
292
+ """, unsafe_allow_html=True)
293
+
294
+ # Community & Support
295
+ st.markdown('<div class="sub-title">Community & Support</div>', unsafe_allow_html=True)
296
+ st.markdown("""
297
+ <div class="section">
298
+ <ul>
299
+ <li><a class="link" href="https://sparknlp.org/" target="_blank">Official Website</a>: Documentation and examples</li>
300
+ <li><a class="link" href="https://join.slack.com/t/spark-nlp/shared_invite/zt-198dipu77-L3UWNe_AJ8xqDk0ivmih5Q" target="_blank">Slack</a>: Live discussion with the community and team</li>
301
+ <li><a class="link" href="https://github.com/JohnSnowLabs/spark-nlp" target="_blank">GitHub</a>: Bug reports, feature requests, and contributions</li>
302
+ <li><a class="link" href="https://medium.com/spark-nlp" target="_blank">Medium</a>: Spark NLP articles</li>
303
+ <li><a class="link" href="https://www.youtube.com/channel/UCmFOjlpYEhxf_wJUDuz6xxQ/videos" target="_blank">YouTube</a>: Video tutorials</li>
304
+ </ul>
305
+ </div>
306
+ """, unsafe_allow_html=True)
requirements.txt ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ streamlit
2
+ st-annotated-text
3
+ pandas
4
+ numpy
5
+ spark-nlp
6
+ pyspark