abdullahmubeen10 commited on
Commit
12f20ca
Β·
verified Β·
1 Parent(s): d9e5152

Upload 10 files

Browse files
.streamlit/config.toml ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ [theme]
2
+ base="light"
3
+ primaryColor="#29B4E8"
Demo.py ADDED
@@ -0,0 +1,156 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import sparknlp
3
+ import os
4
+ import pandas as pd
5
+
6
+ from sparknlp.base import *
7
+ from sparknlp.annotator import *
8
+ from pyspark.ml import Pipeline
9
+ from sparknlp.pretrained import PretrainedPipeline
10
+ from annotated_text import annotated_text
11
+
12
+ # Page configuration
13
+ st.set_page_config(
14
+ layout="wide",
15
+ initial_sidebar_state="auto"
16
+ )
17
+
18
+ # CSS for styling
19
+ st.markdown("""
20
+ <style>
21
+ .main-title {
22
+ font-size: 36px;
23
+ color: #4A90E2;
24
+ font-weight: bold;
25
+ text-align: center;
26
+ }
27
+ .section {
28
+ background-color: #f9f9f9;
29
+ padding: 10px;
30
+ border-radius: 10px;
31
+ margin-top: 10px;
32
+ }
33
+ .section p, .section ul {
34
+ color: #666666;
35
+ }
36
+ </style>
37
+ """, unsafe_allow_html=True)
38
+
39
+ @st.cache_resource
40
+ def init_spark():
41
+ return sparknlp.start()
42
+
43
+ @st.cache_resource
44
+ def create_pipeline(model):
45
+ document_assembler = DocumentAssembler() \
46
+ .setInputCol("text") \
47
+ .setOutputCol("document")
48
+
49
+ sentence_detector = SentenceDetectorDLModel.pretrained("sentence_detector_dl", "en") \
50
+ .setInputCols(["document"]) \
51
+ .setOutputCol("sentence")
52
+
53
+ tokenizer = Tokenizer() \
54
+ .setInputCols(["sentence"]) \
55
+ .setOutputCol("token")
56
+
57
+ token_classifier = RoBertaForTokenClassification.pretrained("roberta_token_classifier_timex_semeval", "en") \
58
+ .setInputCols(["sentence", "token"]) \
59
+ .setOutputCol("ner")
60
+
61
+ ner_converter = NerConverter() \
62
+ .setInputCols(["sentence", "token", "ner"]) \
63
+ .setOutputCol("ner_chunk")
64
+
65
+ pipeline = Pipeline(stages=[
66
+ document_assembler,
67
+ sentence_detector,
68
+ tokenizer,
69
+ token_classifier,
70
+ ner_converter
71
+ ])
72
+ return pipeline
73
+
74
+ def fit_data(pipeline, data):
75
+ empty_df = spark.createDataFrame([['']]).toDF('text')
76
+ pipeline_model = pipeline.fit(empty_df)
77
+ model = LightPipeline(pipeline_model)
78
+ result = model.fullAnnotate(data)
79
+ return result
80
+
81
+ def annotate(data):
82
+ document, chunks, labels = data["Document"], data["NER Chunk"], data["NER Label"]
83
+ annotated_words = []
84
+ for chunk, label in zip(chunks, labels):
85
+ parts = document.split(chunk, 1)
86
+ if parts[0]:
87
+ annotated_words.append(parts[0])
88
+ annotated_words.append((chunk, label))
89
+ document = parts[1]
90
+ if document:
91
+ annotated_words.append(document)
92
+ annotated_text(*annotated_words)
93
+
94
+ # Set up the page layout
95
+ st.markdown('<div class="main-title">Detect Time-related Terminology</div>', unsafe_allow_html=True)
96
+ st.markdown("""
97
+ <div class="section">
98
+ <p>Identify and classify time-related entities in text to provide a structured representation of temporal information. This model detects various time expressions, such as dates, times, intervals, and more, enabling automated systems to process and respond to time-sensitive queries accurately and efficiently</p>
99
+ </div>
100
+ """, unsafe_allow_html=True)
101
+
102
+ # Sidebar content
103
+ model = st.sidebar.selectbox(
104
+ "Choose the pretrained model",
105
+ ["roberta_token_classifier_timex_semeval"],
106
+ help="For more info about the models visit: https://sparknlp.org/models"
107
+ )
108
+
109
+ # Reference notebook link in sidebar
110
+ link = """
111
+ <a href="https://colab.research.google.com/github/JohnSnowLabs/spark-nlp-workshop/blob/master/tutorials/streamlit_notebooks/NER.ipynb">
112
+ <img src="https://colab.research.google.com/assets/colab-badge.svg" style="zoom: 1.3" alt="Open In Colab"/>
113
+ </a>
114
+ """
115
+ st.sidebar.markdown('Reference notebook:')
116
+ st.sidebar.markdown(link, unsafe_allow_html=True)
117
+
118
+ # Load examples
119
+ folder_path = f"inputs/{model}"
120
+ examples = [
121
+ lines[1].strip()
122
+ for filename in os.listdir(folder_path)
123
+ if filename.endswith('.txt')
124
+ for lines in [open(os.path.join(folder_path, filename), 'r', encoding='utf-8').readlines()]
125
+ if len(lines) >= 2
126
+ ]
127
+
128
+ selected_text = st.selectbox("Select an example", examples)
129
+ custom_input = st.text_input("Try it with your own Sentence!")
130
+
131
+ text_to_analyze = custom_input if custom_input else selected_text
132
+
133
+ st.subheader('Full example text')
134
+ HTML_WRAPPER = """<div class="scroll entities" style="overflow-x: auto; border: 1px solid #e6e9ef; border-radius: 0.25rem; padding: 1rem; margin-bottom: 2.5rem; white-space:pre-wrap">{}</div>"""
135
+ st.markdown(HTML_WRAPPER.format(text_to_analyze), unsafe_allow_html=True)
136
+
137
+ # Initialize Spark and create pipeline
138
+ spark = init_spark()
139
+ pipeline = create_pipeline(model)
140
+ output = fit_data(pipeline, text_to_analyze)
141
+
142
+ # Display matched sentence
143
+ st.subheader("Processed output:")
144
+
145
+ results = {
146
+ 'Document': output[0]['document'][0].result,
147
+ 'NER Chunk': [n.result for n in output[0]['ner_chunk']],
148
+ "NER Label": [n.metadata['entity'] for n in output[0]['ner_chunk']]
149
+ }
150
+
151
+ annotate(results)
152
+
153
+ with st.expander("View DataFrame"):
154
+ df = pd.DataFrame({'NER Chunk': results['NER Chunk'], 'NER Label': results['NER Label']})
155
+ df.index += 1
156
+ st.dataframe(df)
Dockerfile ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Download base image ubuntu 18.04
2
+ FROM ubuntu:18.04
3
+
4
+ # Set environment variables
5
+ ENV NB_USER jovyan
6
+ ENV NB_UID 1000
7
+ ENV HOME /home/${NB_USER}
8
+
9
+ # Install required packages
10
+ RUN apt-get update && apt-get install -y \
11
+ tar \
12
+ wget \
13
+ bash \
14
+ rsync \
15
+ gcc \
16
+ libfreetype6-dev \
17
+ libhdf5-serial-dev \
18
+ libpng-dev \
19
+ libzmq3-dev \
20
+ python3 \
21
+ python3-dev \
22
+ python3-pip \
23
+ unzip \
24
+ pkg-config \
25
+ software-properties-common \
26
+ graphviz \
27
+ openjdk-8-jdk \
28
+ ant \
29
+ ca-certificates-java \
30
+ && apt-get clean \
31
+ && update-ca-certificates -f;
32
+
33
+ # Install Python 3.8 and pip
34
+ RUN add-apt-repository ppa:deadsnakes/ppa \
35
+ && apt-get update \
36
+ && apt-get install -y python3.8 python3-pip \
37
+ && apt-get clean;
38
+
39
+ # Set up JAVA_HOME
40
+ ENV JAVA_HOME /usr/lib/jvm/java-8-openjdk-amd64/
41
+ RUN mkdir -p ${HOME} \
42
+ && echo "export JAVA_HOME=/usr/lib/jvm/java-8-openjdk-amd64/" >> ${HOME}/.bashrc \
43
+ && chown -R ${NB_UID}:${NB_UID} ${HOME}
44
+
45
+ # Create a new user named "jovyan" with user ID 1000
46
+ RUN useradd -m -u ${NB_UID} ${NB_USER}
47
+
48
+ # Switch to the "jovyan" user
49
+ USER ${NB_USER}
50
+
51
+ # Set home and path variables for the user
52
+ ENV HOME=/home/${NB_USER} \
53
+ PATH=/home/${NB_USER}/.local/bin:$PATH
54
+
55
+ # Set the working directory to the user's home directory
56
+ WORKDIR ${HOME}
57
+
58
+ # Upgrade pip and install Python dependencies
59
+ RUN python3.8 -m pip install --upgrade pip
60
+ COPY requirements.txt /tmp/requirements.txt
61
+ RUN python3.8 -m pip install -r /tmp/requirements.txt
62
+
63
+ # Copy the application code into the container at /home/jovyan
64
+ COPY --chown=${NB_USER}:${NB_USER} . ${HOME}
65
+
66
+ # Expose port for Streamlit
67
+ EXPOSE 7860
68
+
69
+ # Define the entry point for the container
70
+ ENTRYPOINT ["streamlit", "run", "Demo.py", "--server.port=7860", "--server.address=0.0.0.0"]
inputs/roberta_token_classifier_timex_semeval/Example1.txt ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ Model training was started at 22:12C and it took 3 days from Tuesday ...
2
+ Model training was started at 22:12C and it took 3 days from Tuesday to Friday.
inputs/roberta_token_classifier_timex_semeval/Example2.txt ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ My college certificate program will be between January and June and classes will be from Monday t...
2
+ My college certificate program will be between January and June and classes will be from Monday to Thursday
inputs/roberta_token_classifier_timex_semeval/Example3.txt ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ Compared to the previous year in Canada, the skiing season star...
2
+ Compared to the previous year in Canada, the skiing season started early.
inputs/roberta_token_classifier_timex_semeval/Example4.txt ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ Spring walks, which have been made since 2011, started on the first Sunday of April this year, the w...
2
+ Spring walks, which have been made since 2011, started on the first Sunday of April this year, the walking event, which attracted a lot of attention, started at 2 PM and lasted for 3 hours and 20 minutes.
inputs/roberta_token_classifier_timex_semeval/Example5.txt ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ if a New Yorker plans to meet someone in Los Angeles at 9 AM, and makes a calendar entry at 9 AM (wh...
2
+ if a New Yorker plans to meet someone in Los Angeles at 9 AM, and makes a calendar entry at 9 AM (which the computer assumes is New York time), the calendar entry will be at 6 AM if taking the computer's time zone.
pages/Workflow & Model Overview.py ADDED
@@ -0,0 +1,275 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+
3
+ # Custom CSS for better styling
4
+ st.markdown("""
5
+ <style>
6
+ .main-title {
7
+ font-size: 36px;
8
+ color: #4A90E2;
9
+ font-weight: bold;
10
+ text-align: center;
11
+ }
12
+ .sub-title {
13
+ font-size: 24px;
14
+ color: #4A90E2;
15
+ margin-top: 20px;
16
+ }
17
+ .section {
18
+ background-color: #f9f9f9;
19
+ padding: 15px;
20
+ border-radius: 10px;
21
+ margin-top: 20px;
22
+ }
23
+ .section h2 {
24
+ font-size: 22px;
25
+ color: #4A90E2;
26
+ }
27
+ .section p, .section ul {
28
+ color: #666666;
29
+ }
30
+ .link {
31
+ color: #4A90E2;
32
+ text-decoration: none;
33
+ }
34
+ .benchmark-table {
35
+ width: 100%;
36
+ border-collapse: collapse;
37
+ margin-top: 20px;
38
+ }
39
+ .benchmark-table th, .benchmark-table td {
40
+ border: 1px solid #ddd;
41
+ padding: 8px;
42
+ text-align: left;
43
+ }
44
+ .benchmark-table th {
45
+ background-color: #4A90E2;
46
+ color: white;
47
+ }
48
+ </style>
49
+ """, unsafe_allow_html=True)
50
+
51
+ # Main Title
52
+ st.markdown('<div class="main-title">Detect Time-related Terminology</div>', unsafe_allow_html=True)
53
+
54
+ # Description
55
+ st.markdown("""
56
+ <div class="section">
57
+ <p><strong>Detect Time-related Terminology</strong> is a crucial NLP task that involves identifying and classifying key temporal entities in text. This app leverages the <strong>roberta_token_classifier_timex_semeval</strong> model, which has been imported from Hugging Face and trained to detect time-related terminology using RoBERTa embeddings and RobertaForTokenClassification for NER purposes.</p>
58
+ </div>
59
+ """, unsafe_allow_html=True)
60
+
61
+ # What is NER
62
+ st.markdown('<div class="sub-title">What is Named Entity Recognition (NER)?</div>', unsafe_allow_html=True)
63
+ st.markdown("""
64
+ <div class="section">
65
+ <p><strong>Named Entity Recognition (NER)</strong> is a process in Natural Language Processing (NLP) that locates and classifies named entities into predefined categories such as dates, times, periods, and other temporal expressions. For example, in the sentence "Model training was started at 22:12C and it took 3 days from Tuesday to Friday," NER helps identify '22:12C' as a time period, '3 days' as a calendar interval, and 'Tuesday' and 'Friday' as days of the week.</p>
66
+ <p>NER models are trained to understand the context and semantics of entities within text, enabling automated systems to recognize and categorize these entities accurately. This capability is essential for developing intelligent systems capable of processing and responding to user queries efficiently.</p>
67
+ </div>
68
+ """, unsafe_allow_html=True)
69
+
70
+ # Predicted Entities
71
+ st.markdown('<div class="sub-title">Predicted Entities</div>', unsafe_allow_html=True)
72
+ st.markdown("""
73
+ <div class="section">
74
+ <ul>
75
+ <li><strong>Period:</strong> Specific times such as "22:12C".</li>
76
+ <li><strong>Year:</strong> Years like "2023".</li>
77
+ <li><strong>Calendar-Interval:</strong> Intervals such as "3 days".</li>
78
+ <li><strong>Month-Of-Year:</strong> Months like "January".</li>
79
+ <li><strong>Day-Of-Month:</strong> Specific days like "15th".</li>
80
+ <li><strong>Day-Of-Week:</strong> Days like "Tuesday".</li>
81
+ <li><strong>Hour-Of-Day:</strong> Hours such as "10 AM".</li>
82
+ <li><strong>Minute-Of-Hour:</strong> Minutes like "45".</li>
83
+ <li><strong>Number:</strong> Numerical values like "3".</li>
84
+ <li><strong>Second-Of-Minute:</strong> Seconds like "30".</li>
85
+ <li><strong>Time-Zone:</strong> Time zones such as "PST".</li>
86
+ <li><strong>Part-Of-Day:</strong> Parts of the day like "morning".</li>
87
+ <li><strong>Season-Of-Year:</strong> Seasons like "summer".</li>
88
+ <li><strong>AMPM-Of-Day:</strong> "AM" or "PM".</li>
89
+ <li><strong>Part-Of-Week:</strong> Parts of the week like "weekend".</li>
90
+ <li><strong>Week-Of-Year:</strong> Weeks like "week 42".</li>
91
+ <li><strong>Two-Digit-Year:</strong> Years represented in two digits like "'99".</li>
92
+ <li><strong>Sum:</strong> Total values of time periods, e.g., "3 days and 2 hours".</li>
93
+ <li><strong>Difference:</strong> Subtracted time periods, e.g., "5 days ago".</li>
94
+ <li><strong>Union:</strong> Combination of multiple time-related entities.</li>
95
+ <li><strong>Intersection:</strong> Overlapping time periods.</li>
96
+ <li><strong>Every-Nth:</strong> Repeated intervals, e.g., "every 3rd day".</li>
97
+ <li><strong>This:</strong> Referring to the current period, e.g., "this week".</li>
98
+ <li><strong>Last:</strong> Referring to the previous period, e.g., "last year".</li>
99
+ <li><strong>Next:</strong> Referring to the following period, e.g., "next month".</li>
100
+ <li><strong>Before:</strong> Time before a specific point, e.g., "before noon".</li>
101
+ <li><strong>After:</strong> Time after a specific point, e.g., "after 5 PM".</li>
102
+ <li><strong>Between:</strong> Time between two points, e.g., "between Monday and Friday".</li>
103
+ <li><strong>NthFromStart:</strong> Nth position from the start.</li>
104
+ <li><strong>NthFromEnd:</strong> Nth position from the end.</li>
105
+ <li><strong>Frequency:</strong> How often something occurs, e.g., "weekly".</li>
106
+ <li><strong>Modifier:</strong> Modifiers for time-related entities.</li>
107
+ </ul>
108
+ </div>
109
+ """, unsafe_allow_html=True)
110
+
111
+ # How to Use the Model
112
+ st.markdown('<div class="sub-title">How to Use the Model</div>', unsafe_allow_html=True)
113
+ st.markdown("""
114
+ <div class="section">
115
+ <p>To use this model, follow these steps in Python:</p>
116
+ </div>
117
+ """, unsafe_allow_html=True)
118
+ st.code('''
119
+ from sparknlp.base import *
120
+ from sparknlp.annotator import *
121
+ from pyspark.ml import Pipeline
122
+ from pyspark.sql.functions import col, expr, round, concat, lit
123
+
124
+ # Define the components of the pipeline
125
+ document_assembler = DocumentAssembler() \\
126
+ .setInputCol("text") \\
127
+ .setOutputCol("document")
128
+
129
+ sentence_detector = SentenceDetectorDLModel.pretrained("sentence_detector_dl", "en") \\
130
+ .setInputCols(["document"]) \\
131
+ .setOutputCol("sentence")
132
+
133
+ tokenizer = Tokenizer() \\
134
+ .setInputCols(["sentence"]) \\
135
+ .setOutputCol("token")
136
+
137
+ token_classifier = RoBertaForTokenClassification.pretrained("roberta_token_classifier_timex_semeval", "en") \\
138
+ .setInputCols(["sentence", "token"]) \\
139
+ .setOutputCol("ner")
140
+
141
+ ner_converter = NerConverter() \\
142
+ .setInputCols(["sentence", "token", "ner"]) \\
143
+ .setOutputCol("ner_chunk")
144
+
145
+ # Create the pipeline
146
+ pipeline = Pipeline(stages=[
147
+ document_assembler,
148
+ sentence_detector,
149
+ tokenizer,
150
+ token_classifier,
151
+ ner_converter
152
+ ])
153
+
154
+ # Create some example data
155
+ text = "Model training was started at 22:12C and it took 3 days from Tuesday to Friday."
156
+ data = spark.createDataFrame([[text]]).toDF("text")
157
+
158
+ # Apply the pipeline to the data
159
+ model = pipeline.fit(data)
160
+ result = model.transform(data)
161
+
162
+ # Select the result, entity
163
+ result.select(
164
+ expr("explode(ner_chunk) as ner_chunk")
165
+ ).select(
166
+ col("ner_chunk.result").alias("chunk"),
167
+ col("ner_chunk.metadata.entity").alias("entity")
168
+ ).show(truncate=False)
169
+ ''', language='python')
170
+
171
+ # Results
172
+
173
+ st.text("""
174
+ +-------+-----------------+
175
+ |chunk |entity |
176
+ +-------+-----------------+
177
+ |took |Frequency |
178
+ |3 |Number |
179
+ |days |Calendar-Interval|
180
+ |Tuesday|Day-Of-Week |
181
+ |to |Between |
182
+ |Friday |Day-Of-Week |
183
+ +-------+-----------------+
184
+ """)
185
+
186
+ # Model Information
187
+ st.markdown('<div class="sub-title">Model Information</div>', unsafe_allow_html=True)
188
+ st.markdown("""
189
+ <div class="section">
190
+ <table class="benchmark-table">
191
+ <tr>
192
+ <th>Model Name</th>
193
+ <td>roberta_token_classifier_timex_semeval</td>
194
+ </tr>
195
+ <tr>
196
+ <th>Compatibility</th>
197
+ <td>Spark NLP 3.3.4+</td>
198
+ </tr>
199
+ <tr>
200
+ <th>License</th>
201
+ <td>Open Source</td>
202
+ </tr>
203
+ <tr>
204
+ <th>Edition</th>
205
+ <td>Official</td>
206
+ </tr>
207
+ <tr>
208
+ <th>Input Labels</th>
209
+ <td>[sentence, token]</td>
210
+ </tr>
211
+ <tr>
212
+ <th>Output Labels</th>
213
+ <td>[ner]</td>
214
+ </tr>
215
+ <tr>
216
+ <th>Language</th>
217
+ <td>en</td>
218
+ </tr>
219
+ <tr>
220
+ <th>Size</th>
221
+ <td>439.5 MB</td>
222
+ </tr>
223
+ <tr>
224
+ <th>Case sensitive</th>
225
+ <td>true</td>
226
+ </tr>
227
+ <tr>
228
+ <th>Max sentence length</th>
229
+ <td>256</td>
230
+ </tr>
231
+ </table>
232
+ </div>
233
+ """, unsafe_allow_html=True)
234
+
235
+ # Data Source
236
+ st.markdown('<div class="sub-title">Data Source</div>', unsafe_allow_html=True)
237
+ st.markdown("""
238
+ <div class="section">
239
+ <p>For more information about the dataset used to train this model, visit the <a class="link" href="https://huggingface.co/clulab/roberta-timex-semeval" target="_blank">Hugging Face page</a>.</p>
240
+ </div>
241
+ """, unsafe_allow_html=True)
242
+
243
+ # Conclusion
244
+ st.markdown('<div class="sub-title">Conclusion</div>', unsafe_allow_html=True)
245
+ st.markdown("""
246
+ <div class="section">
247
+ <p>Detecting time-related terminology is essential for a wide range of applications. This model, leveraging RoBERTa embeddings and RobertaForTokenClassification, provides robust capabilities for identifying and classifying temporal entities within text.</p>
248
+ <p>By integrating this model into your systems, you can enhance scheduling, event tracking, historical data analysis, and more. The high accuracy and comprehensive coverage of time-related entities make this model a valuable tool for many applications.</p>
249
+ </div>
250
+ """, unsafe_allow_html=True)
251
+
252
+ # References
253
+ st.markdown('<div class="sub-title">References</div>', unsafe_allow_html=True)
254
+ st.markdown("""
255
+ <div class="section">
256
+ <ul>
257
+ <li><a class="link" href="https://sparknlp.org/api/com/johnsnowlabs/nlp/annotators/classifier/dl/RoBertaForTokenClassification.html#!=(x$1:Any):Boolean" target="_blank" rel="noopener">RoBertaForTokenClassification</a> annotator documentation</li>
258
+ <li>Model Used: <a class="link" href="https://sparknlp.org/2021/12/28/roberta_token_classifier_timex_semeval_en.html" rel="noopener">roberta_token_classifier_timex_semeval_en</a></li>
259
+ <li><a class="link" href="https://nlp.johnsnowlabs.com/recognize_entitie" target="_blank" rel="noopener">Visualization demos for NER in Spark NLP</a></li>
260
+ <li><a class="link" href="https://www.johnsnowlabs.com/named-entity-recognition-ner-with-bert-in-spark-nlp/">Named Entity Recognition (NER) with BERT in Spark NLP</a></li>
261
+ </ul>
262
+ </div>
263
+ """, unsafe_allow_html=True)
264
+
265
+ # Community & Support
266
+ st.markdown('<div class="sub-title">Community & Support</div>', unsafe_allow_html=True)
267
+ st.markdown("""
268
+ <div class="section">
269
+ <ul>
270
+ <li><a class="link" href="https://sparknlp.org/" target="_blank">Official Website</a>: Documentation and examples</li>
271
+ <li><a class="link" href="https://github.com/JohnSnowLabs/spark-nlp" target="_blank">GitHub Repository</a>: Report issues or contribute</li>
272
+ <li><a class="link" href="https://forum.johnsnowlabs.com/" target="_blank">Community Forum</a>: Ask questions, share ideas, and connect with other users</li>
273
+ </ul>
274
+ </div>
275
+ """, unsafe_allow_html=True)
requirements.txt ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ streamlit
2
+ st-annotated-text
3
+ pandas
4
+ numpy
5
+ spark-nlp
6
+ pyspark