Spaces:
Runtime error
Runtime error
Commit
·
a297ab2
1
Parent(s):
ebe078d
Create app.py
Browse files
app.py
ADDED
@@ -0,0 +1,134 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
import torch
|
3 |
+
from transformers import pipeline
|
4 |
+
import spacy
|
5 |
+
from spacy import displacy
|
6 |
+
import plotly.express as px
|
7 |
+
import numpy as np
|
8 |
+
|
9 |
+
st.set_page_config(page_title="NIU NLP Prototype")
|
10 |
+
|
11 |
+
st.title("Natural Language Processing Prototype")
|
12 |
+
st.write("_This web application is intended for educational use, please do not upload any classified, proprietary, or sensitive information._")
|
13 |
+
st.subheader("__Which natural language processing task would you like to try?__")
|
14 |
+
st.write("- __Sentiment Analysis:__ Identifying whether a piece of text has a positive or negative sentiment.")
|
15 |
+
st.write("- __Named Entity Recognition:__ Identifying all geopolitical entities, organizations, people, locations, or dates in a body of text.")
|
16 |
+
st.write("- __Text Classification:__ Placing a piece of text into one or more categories.")
|
17 |
+
st.write("- __Text Summarization:__ Condensing larger bodies of text into smaller bodies of text.")
|
18 |
+
|
19 |
+
option = st.selectbox('Please select from the list',('','Sentiment Analysis','Named Entity Recognition', 'Text Classification','Text Summarization'))
|
20 |
+
|
21 |
+
@st.cache(allow_output_mutation=True, show_spinner=False)
|
22 |
+
def Loading_Model_1():
|
23 |
+
sum2 = pipeline("summarization",framework="pt")
|
24 |
+
return sum2
|
25 |
+
|
26 |
+
@st.cache(allow_output_mutation=True, show_spinner=False)
|
27 |
+
def Loading_Model_2():
|
28 |
+
class1 = pipeline("zero-shot-classification",framework="pt")
|
29 |
+
return class1
|
30 |
+
|
31 |
+
@st.cache(allow_output_mutation=True, show_spinner=False)
|
32 |
+
def Loading_Model_3():
|
33 |
+
sentiment = pipeline("sentiment-analysis", framework="pt")
|
34 |
+
return sentiment
|
35 |
+
|
36 |
+
@st.cache(allow_output_mutation=True, show_spinner=False)
|
37 |
+
def Loading_Model_4():
|
38 |
+
nlp = spacy.load('en_core_web_sm')
|
39 |
+
return nlp
|
40 |
+
|
41 |
+
@st.cache(allow_output_mutation=True)
|
42 |
+
def entRecognizer(entDict, typeEnt):
|
43 |
+
entList = [ent for ent in entDict if entDict[ent] == typeEnt]
|
44 |
+
return entList
|
45 |
+
|
46 |
+
def plot_result(top_topics, scores):
|
47 |
+
top_topics = np.array(top_topics)
|
48 |
+
scores = np.array(scores)
|
49 |
+
scores *= 100
|
50 |
+
fig = px.bar(x=scores, y=top_topics, orientation='h',
|
51 |
+
labels={'x': 'Probability', 'y': 'Category'},
|
52 |
+
text=scores,
|
53 |
+
range_x=(0,115),
|
54 |
+
title='Top Predictions',
|
55 |
+
color=np.linspace(0,1,len(scores)),
|
56 |
+
color_continuous_scale="Bluered")
|
57 |
+
fig.update(layout_coloraxis_showscale=False)
|
58 |
+
fig.update_traces(texttemplate='%{text:0.1f}%', textposition='outside')
|
59 |
+
st.plotly_chart(fig)
|
60 |
+
|
61 |
+
with st.spinner(text="Please wait for the models to load. This should take approximately 60 seconds."):
|
62 |
+
sum2 = Loading_Model_1()
|
63 |
+
class1 = Loading_Model_2()
|
64 |
+
sentiment = Loading_Model_3()
|
65 |
+
nlp = Loading_Model_4()
|
66 |
+
|
67 |
+
if option == 'Text Classification':
|
68 |
+
cat1 = st.text_input('Enter each possible category name (separated by a comma). Maximum 5 categories.')
|
69 |
+
text = st.text_area('Enter Text Below:', height=200)
|
70 |
+
#uploaded_file = st.file_uploader("Choose a file", type=['txt'])
|
71 |
+
submit = st.button('Generate')
|
72 |
+
if submit:
|
73 |
+
st.subheader("Classification Results:")
|
74 |
+
labels1 = cat1.strip().split(',')
|
75 |
+
result = class1(text, candidate_labels=labels1)
|
76 |
+
cat1name = result['labels'][0]
|
77 |
+
cat1prob = result['scores'][0]
|
78 |
+
st.write('Category: {} | Probability: {:.1f}%'.format(cat1name,(cat1prob*100)))
|
79 |
+
plot_result(result['labels'][::-1][-10:], result['scores'][::-1][-10:])
|
80 |
+
|
81 |
+
if option == 'Text Summarization':
|
82 |
+
max_lengthy = st.slider('Maximum summary length (words)', min_value=30, max_value=150, value=60, step=10)
|
83 |
+
num_beamer = st.slider('Speed vs quality of summary (1 is fastest)', min_value=1, max_value=8, value=4, step=1)
|
84 |
+
text = st.text_area('Enter Text Below (maximum 800 words):', height=300)
|
85 |
+
#uploaded_file = st.file_uploader("Choose a file", type=['txt'])
|
86 |
+
submit = st.button('Generate')
|
87 |
+
if submit:
|
88 |
+
st.subheader("Summary:")
|
89 |
+
with st.spinner(text="This may take a moment..."):
|
90 |
+
summWords = sum2(text, max_length=max_lengthy, min_length=15, num_beams=num_beamer, do_sample=True, early_stopping=True, repetition_penalty=1.5, length_penalty=1.5)
|
91 |
+
text2 =summWords[0]["summary_text"] #re.sub(r'\s([?.!"](?:\s|$))', r'\1', )
|
92 |
+
st.write(text2)
|
93 |
+
|
94 |
+
if option == 'Sentiment Analysis':
|
95 |
+
text = st.text_area('Enter Text Below:', height=200)
|
96 |
+
#uploaded_file = st.file_uploader("Choose a file", type=['txt'])
|
97 |
+
submit = st.button('Generate')
|
98 |
+
if submit:
|
99 |
+
st.subheader("Sentiment:")
|
100 |
+
result = sentiment(text)
|
101 |
+
sent = result[0]['label']
|
102 |
+
cert = result[0]['score']
|
103 |
+
st.write('Text Sentiment: {} | Probability: {:.1f}%'.format(sent,(cert*100)))
|
104 |
+
|
105 |
+
if option == 'Named Entity Recognition':
|
106 |
+
text = st.text_area('Enter Text Below:', height=300)
|
107 |
+
#uploaded_file = st.file_uploader("Choose a file", type=['txt'])
|
108 |
+
submit = st.button('Generate')
|
109 |
+
if submit:
|
110 |
+
entities = []
|
111 |
+
entityLabels = []
|
112 |
+
doc = nlp(text)
|
113 |
+
for ent in doc.ents:
|
114 |
+
entities.append(ent.text)
|
115 |
+
entityLabels.append(ent.label_)
|
116 |
+
entDict = dict(zip(entities, entityLabels))
|
117 |
+
entOrg = entRecognizer(entDict, "ORG")
|
118 |
+
entPerson = entRecognizer(entDict, "PERSON")
|
119 |
+
entDate = entRecognizer(entDict, "DATE")
|
120 |
+
entGPE = entRecognizer(entDict, "GPE")
|
121 |
+
entLoc = entRecognizer(entDict, "LOC")
|
122 |
+
options = {"ents": ["ORG", "GPE", "PERSON", "LOC", "DATE"]}
|
123 |
+
HTML_WRAPPER = """<div style="overflow-x: auto; border: 1px solid #e6e9ef; border-radius: 0.25rem; padding: 1rem; margin-bottom: 2.5rem">{}</div>"""
|
124 |
+
|
125 |
+
st.subheader("List of Named Entities:")
|
126 |
+
st.write("Geopolitical Entities (GPE): " + str(entGPE))
|
127 |
+
st.write("People (PERSON): " + str(entPerson))
|
128 |
+
st.write("Organizations (ORG): " + str(entOrg))
|
129 |
+
st.write("Dates (DATE): " + str(entDate))
|
130 |
+
st.write("Locations (LOC): " + str(entLoc))
|
131 |
+
st.subheader("Original Text with Entities Highlighted")
|
132 |
+
html = displacy.render(doc, style="ent", options=options)
|
133 |
+
html = html.replace("\n", " ")
|
134 |
+
st.write(HTML_WRAPPER.format(html), unsafe_allow_html=True)
|