aemin commited on
Commit
5b97320
·
1 Parent(s): 3a464f9

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +136 -27
app.py CHANGED
@@ -61,18 +61,43 @@ st.sidebar.markdown(logo_html, unsafe_allow_html=True)
61
 
62
 
63
  #sidebar info
64
- model_name= ["nerdl_fewnerd_100d"]
65
  st.sidebar.title("Pretrained model to test")
66
  selected_model = st.sidebar.selectbox("", model_name)
67
 
68
  ######## Main Page #########
69
- app_title= "Detect up to 8 entity types in general domain texts"
70
- app_description= "Named Entity Recognition model aimed to detect up to 8 entity types from general domain texts. This model was trained on the Few-NERD/inter public dataset using Spark NLP, and is available in Spark NLP Models hub (https://nlp.johnsnowlabs.com/models)"
71
- st.title(app_title)
72
- st.markdown("<h2>"+app_description+"</h2>" , unsafe_allow_html=True)
73
  if selected_model == "nerdl_fewnerd_100d":
 
 
 
 
74
  st.markdown("**`PERSON`** **,** **`ORGANIZATION`** **,** **`LOCATION`** **,** **`ART`** **,** **`BUILDING`** **,** **`PRODUCT`** **,** **`EVENT`** **,** **`OTHER`**", unsafe_allow_html=True)
75
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
76
  st.subheader("")
77
 
78
 
@@ -91,31 +116,103 @@ def get_pipeline(text):
91
  .setInputCols(["sentence"])\
92
  .setOutputCol("token")
93
 
94
- embeddings= WordEmbeddingsModel.pretrained("glove_100d")\
95
- .setInputCols(["sentence", "token"])\
96
- .setOutputCol("embeddings")
97
-
98
-
99
- ner= NerDLModel.pretrained("nerdl_fewnerd_100d")\
100
- .setInputCols(["document", "token", "embeddings"])\
101
- .setOutputCol("ner")
102
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
103
 
104
- ner_converter= NerConverter()\
105
- .setInputCols(["sentence", "token", "ner"])\
106
- .setOutputCol("ner_chunk")
107
 
108
 
109
- pipeline = Pipeline(
110
- stages = [
111
- documentAssembler,
112
- sentenceDetector,
113
- tokenizer,
114
- embeddings,
115
- ner,
116
- ner_converter
117
- ])
118
-
119
  empty_df = spark.createDataFrame([[""]]).toDF("text")
120
  pipeline_model = pipeline.fit(empty_df)
121
 
@@ -126,7 +223,13 @@ def get_pipeline(text):
126
 
127
 
128
 
129
- text= st.text_input("Type here your text and press enter to run:", value="12 Corazones ('12 Hearts') is Spanish-language dating game show produced in the United States for the television network Telemundo since January 2005, based on its namesake Argentine TV show format. The show is filmed in Los Angeles and revolves around the twelve Zodiac signs that identify each contestant. In 2008, Ho filmed a cameo in the Steven Spielberg feature film The Cloverfield Paradox, as a news pundit.")
 
 
 
 
 
 
130
 
131
  #placeholder for warning
132
  placeholder= st.empty()
@@ -151,3 +254,9 @@ labels = st.sidebar.multiselect(
151
 
152
  show_html2(text, df, labels, "Text annotated with identified Named Entities")
153
 
 
 
 
 
 
 
 
61
 
62
 
63
  #sidebar info
64
+ model_name= ["nerdl_fewnerd_100d", "bert_large_token_classifier_ontonote", "ner_mit_movie_complex_distilbert_base_cased", "ner_conll_albert_large_uncased"]
65
  st.sidebar.title("Pretrained model to test")
66
  selected_model = st.sidebar.selectbox("", model_name)
67
 
68
  ######## Main Page #########
 
 
 
 
69
  if selected_model == "nerdl_fewnerd_100d":
70
+ app_title= "Detect up to 8 entity types in general domain texts"
71
+ app_description= "Named Entity Recognition model aimed to detect up to 8 entity types from general domain texts. This model was trained on the Few-NERD/inter public dataset using Spark NLP, and it is available in Spark NLP Models hub (https://nlp.johnsnowlabs.com/models)"
72
+ st.title(app_title)
73
+ st.markdown("<h2>"+app_description+"</h2>" , unsafe_allow_html=True)
74
  st.markdown("**`PERSON`** **,** **`ORGANIZATION`** **,** **`LOCATION`** **,** **`ART`** **,** **`BUILDING`** **,** **`PRODUCT`** **,** **`EVENT`** **,** **`OTHER`**", unsafe_allow_html=True)
75
 
76
+ elif selected_model== "bert_large_token_classifier_ontonote":
77
+ app_title= "Detect up to 18 entity types in general domain texts"
78
+ app_description= "Named Entity Recognition model aimed to detect up to 18 entity types from general domain texts. This model is a fine-tuned BERT model that is ready to use for Named Entity Recognition and achieves state-of-the-art performance for the NER task, and it is available in Spark NLP Models hub (https://nlp.johnsnowlabs.com/models)"
79
+ st.title(app_title)
80
+ st.markdown("<h2>"+app_description+"</h2>" , unsafe_allow_html=True)
81
+ st.markdown("""**`CARDINAL`** **,** **`DATE`** **,** **`EVENT`** **,** **`FAC`** **,** **`GPE`** **,** **`LANGUAGE`** **,** **`LAW`** **,** **`LOC`**,
82
+ **`MONEY`** **,** **`NORP`** **,** **`ORDINAL`** **,** **`ORG`** **,** **`PERCENT`** **,** **`PERCENT`** **,** **`PERSON`** **,** **`PRODUCT`**,
83
+ **`QUANTITY`** **,** **`TIME`** **,** **`WORK_OF_ART` **""", unsafe_allow_html=True)
84
+
85
+
86
+ elif selected_model== "ner_mit_movie_complex_distilbert_base_cased":
87
+ app_title= "Detect up to 12 entity types in movie domain texts"
88
+ app_description= "Named Entity Recognition model aimed to detect up to 12 entity types from movie domain texts. This model was trained on the MIT Movie Corpus complex queries dataset to detect movie trivia using Spark NLP, and it is available in Spark NLP Models hub (https://nlp.johnsnowlabs.com/models)"
89
+ st.title(app_title)
90
+ st.markdown("<h2>"+app_description+"</h2>" , unsafe_allow_html=True)
91
+ st.markdown("""**`ACTOR`** **,** **`AWARD`** **,** **`CHARACTER_NAME`** **,** **`DIRECTOR`** **,** **`GENRE`** **,** **`OPINION`** **,** **`ORIGIN`** **,** **`PLOT`**,
92
+ **`QUOTE`** **,** **`RELATIONSHIP`** **,** **`SOUNDTRACK`** **,** **`YEAR` **""", unsafe_allow_html=True)
93
+
94
+ elif selected_model=="ner_conll_albert_large_uncased":
95
+ app_title= "Detect up to 4 entity types in general domain texts"
96
+ app_description= "Named Entity Recognition model aimed to detect up to 4 entity types from general domain texts. This model was trained on the CoNLL 2003 text corpus using Spark NLP, and it is available in Spark NLP Models hub (https://nlp.johnsnowlabs.com/models)"
97
+ st.title(app_title)
98
+ st.markdown("<h2>"+app_description+"</h2>" , unsafe_allow_html=True)
99
+ st.markdown("**`PER`** **,** **`LOC`** **,** **`ORG`** **,** **`MISC` **", unsafe_allow_html=True)
100
+
101
  st.subheader("")
102
 
103
 
 
116
  .setInputCols(["sentence"])\
117
  .setOutputCol("token")
118
 
 
 
 
 
 
 
 
 
119
 
120
+ if selected_model=="nerdl_fewnerd_100d":
121
+ embeddings= WordEmbeddingsModel.pretrained("glove_100d")\
122
+ .setInputCols(["sentence", "token"])\
123
+ .setOutputCol("embeddings")
124
+
125
+ ner= NerDLModel.pretrained(selected_model)\
126
+ .setInputCols(["document", "token", "embeddings"])\
127
+ .setOutputCol("ner")
128
+
129
+
130
+ ner_converter= NerConverter()\
131
+ .setInputCols(["sentence", "token", "ner"])\
132
+ .setOutputCol("ner_chunk")
133
+
134
+
135
+ pipeline = Pipeline(
136
+ stages = [
137
+ documentAssembler,
138
+ sentenceDetector,
139
+ tokenizer,
140
+ embeddings,
141
+ ner,
142
+ ner_converter
143
+ ])
144
+
145
+ elif selected_model=="bert_large_token_classifier_ontonote":
146
+ tokenClassifier = BertForTokenClassification \
147
+ .pretrained('bert_large_token_classifier_ontonote', 'en') \
148
+ .setInputCols(['token', 'document']) \
149
+ .setOutputCol('ner') \
150
+ .setCaseSensitive(True) \
151
+ .setMaxSentenceLength(512)
152
+
153
+ ner_converter= NerConverter()\
154
+ .setInputCols(["document", "token", "ner"])\
155
+ .setOutputCol("ner_chunk")
156
+
157
+ pipeline = Pipeline(
158
+ stages = [
159
+ documentAssembler,
160
+ sentenceDetector,
161
+ tokenizer,
162
+ tokenClassifier,
163
+ ner_converter
164
+ ])
165
+
166
+ elif selected_model=="ner_mit_movie_complex_distilbert_base_cased":
167
+ embeddings = DistilBertEmbeddings\
168
+ .pretrained('distilbert_base_cased', 'en')\
169
+ .setInputCols(["token", "document"])\
170
+ .setOutputCol("embeddings")
171
+
172
+ ner = NerDLModel.pretrained('ner_mit_movie_complex_distilbert_base_cased', 'en') \
173
+ .setInputCols(['document', 'token', 'embeddings']) \
174
+ .setOutputCol('ner')
175
+
176
+ ner_converter= NerConverter()\
177
+ .setInputCols(["document", "token", "ner"])\
178
+ .setOutputCol("ner_chunk")
179
+
180
+ pipeline = Pipeline(
181
+ stages = [
182
+ documentAssembler,
183
+ sentenceDetector,
184
+ tokenizer,
185
+ embeddings,
186
+ ner,
187
+ ner_converter
188
+ ])
189
+
190
+ elif selected_model=="ner_conll_albert_large_uncased":
191
+ embeddings = AlbertEmbeddings\
192
+ .pretrained('albert_large_uncased', 'en')\
193
+ .setInputCols(["document", "token"])\
194
+ .setOutputCol("embeddings")
195
+
196
+ ner = NerDLModel.pretrained('ner_conll_albert_large_uncased', 'en') \
197
+ .setInputCols(['document', 'token', 'embeddings']) \
198
+ .setOutputCol('ner')
199
+
200
+ ner_converter = NerConverter()\
201
+ .setInputCols(["document","token","ner"])\
202
+ .setOutputCol("ner_chunk")
203
+
204
+ pipeline = Pipeline(
205
+ stages = [
206
+ documentAssembler,
207
+ sentenceDetector,
208
+ tokenizer,
209
+ embeddings,
210
+ ner,
211
+ ner_converter
212
+ ])
213
 
 
 
 
214
 
215
 
 
 
 
 
 
 
 
 
 
 
216
  empty_df = spark.createDataFrame([[""]]).toDF("text")
217
  pipeline_model = pipeline.fit(empty_df)
218
 
 
223
 
224
 
225
 
226
+ if selected_model=="ner_mit_movie_complex_distilbert_base_cased":
227
+ text= st.text_input("Type here your text and press enter to run:", value="It's only appropriate that Solaris, Russian filmmaker Andrei Tarkovsky's psychological sci-fi classic from 1972, contains an equally original and mind-bending score. Solaris explores the inadequacies of time and memory on an enigmatic planet below a derelict space station. To reinforce the film's chilling setting, Tarkovsky commissioned composer Eduard Artemiev to construct an electronic soundscape reflecting planet Solaris' amorphous and mysterious surface")
228
+
229
+
230
+ else:
231
+ text= st.text_input("Type here your text and press enter to run:", value="12 Corazones ('12 Hearts') is Spanish-language dating game show produced in the United States for the television network Telemundo since January 2005, based on its namesake Argentine TV show format. The show is filmed in Los Angeles and revolves around the twelve Zodiac signs that identify each contestant. In 2008, Ho filmed a cameo in the Steven Spielberg feature film The Cloverfield Paradox, as a news pundit.")
232
+
233
 
234
  #placeholder for warning
235
  placeholder= st.empty()
 
254
 
255
  show_html2(text, df, labels, "Text annotated with identified Named Entities")
256
 
257
+
258
+ try_link="""<a href="https://colab.research.google.com/github/JohnSnowLabs/spark-nlp-workshop/blob/master/tutorials/Certification_Trainings/Public/3.SparkNLP_Pretrained_Models.ipynb"><img src="https://colab.research.google.com/assets/colab-badge.svg" style="zoom: 1.3" alt="Open In Colab"/></a>"""
259
+ st.sidebar.title('')
260
+ st.sidebar.markdown('Try it yourself:')
261
+ st.sidebar.markdown(try_link, unsafe_allow_html=True)
262
+