Spaces:
Sleeping
Sleeping
Upload app.py
Browse files
app.py
ADDED
@@ -0,0 +1,96 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from transformers import AutoTokenizer, TFAutoModel
|
2 |
+
import tensorflow as tf
|
3 |
+
#from keras.preprocessing.sequence import pad_sequences
|
4 |
+
from tensorflow.keras.preprocessing.sequence import pad_sequences
|
5 |
+
import pickle
|
6 |
+
import numpy as np
|
7 |
+
from keras.models import load_model
|
8 |
+
import streamlit as st
|
9 |
+
import io
|
10 |
+
import PyPDF2
|
11 |
+
import re
|
12 |
+
from PIL import Image
|
13 |
+
|
14 |
+
image = Image.open('header-image.png')
|
15 |
+
st.image(image)
|
16 |
+
|
17 |
+
|
18 |
+
def preprocess(text):
|
19 |
+
# Define a regular expression pattern for URLs, non-alphabetic characters, and user names
|
20 |
+
pattern = re.compile(r'https?://\S+|[^0-9A-Za-z\' t]|@\w+')
|
21 |
+
# Use the regular expression to find all URLs, non-alphabetic characters, and user names in the text
|
22 |
+
matches = pattern.findall(text)
|
23 |
+
#Replace the URLs, non-alphabetic characters, and user names with an empty string
|
24 |
+
for match in matches:
|
25 |
+
text = text.replace(match, ' ')
|
26 |
+
return text
|
27 |
+
|
28 |
+
|
29 |
+
|
30 |
+
def predict(new_data):
|
31 |
+
#Load the trained model
|
32 |
+
# Create a LabelEncoder object
|
33 |
+
with open("labelencoder1.pkl", 'rb') as f:
|
34 |
+
le = pickle.load(f)
|
35 |
+
model= tf.keras.models.load_model("biobert-rnn1.h5")
|
36 |
+
tokenizer = AutoTokenizer.from_pretrained("dmis-lab/biobert-base-cased-v1.1")
|
37 |
+
biobert_model = TFAutoModel.from_pretrained("dmis-lab/biobert-base-cased-v1.1", from_pt=True)
|
38 |
+
X_tokens = [tokenizer.encode(text, add_special_tokens=True) for text in new_data.split()]
|
39 |
+
X_padded = pad_sequences(X_tokens, maxlen=22, dtype='long', truncating='post', padding='post')
|
40 |
+
X_tensor = tf.convert_to_tensor(X_padded)
|
41 |
+
X_embeddings = biobert_model(X_tensor)[0]
|
42 |
+
pred=model.predict(X_embeddings)
|
43 |
+
predicted_labels = list(le.inverse_transform(np.argmax(pred, axis=1)))
|
44 |
+
text=new_data.split()
|
45 |
+
prev_label=" "
|
46 |
+
data=[]
|
47 |
+
labels=[]
|
48 |
+
for i,(word,label) in enumerate(zip(text,predicted_labels)):
|
49 |
+
if label!="Other":
|
50 |
+
label=label.split('-')[1]
|
51 |
+
if prev_label==label:
|
52 |
+
data[-1]=data[-1]+" "+word
|
53 |
+
else:
|
54 |
+
data.append(word)
|
55 |
+
labels.append(label)
|
56 |
+
prev_label=label
|
57 |
+
return(data,labels)
|
58 |
+
|
59 |
+
def highlight(sentence):
|
60 |
+
highlighted_text = ""
|
61 |
+
entity_colors = {"Symptom":"#87cefa","Medical Condition":"#ffb6c1"}
|
62 |
+
words, labels = predict(sentence)
|
63 |
+
for words, label in zip(words, labels):
|
64 |
+
if label!="Other" and words!="a":
|
65 |
+
if label in ["Medical Condition","Symptom"]:
|
66 |
+
word_color = entity_colors.get(label, "yellow")
|
67 |
+
label_color = entity_colors.get(label + '-label', "<b>black</b>")
|
68 |
+
highlighted_text += f'<mark style="background-color: {word_color}; color: {label_color}; padding: 0 0.25rem; border-radius: 0.25rem; border: 2px solid {word_color}; border-bottom-width: 1px">{words}<sup style="background-color: white; color: black; border: 1px solid black; border-radius: 2px; padding: 0 0.15rem; font-size: 70%; margin-left: 0.15rem; font-weight: bold;">{label}</sup></mark> '
|
69 |
+
else:
|
70 |
+
highlighted_text += f'{words} '
|
71 |
+
else:
|
72 |
+
highlighted_text += f'{words} '
|
73 |
+
st.markdown(highlighted_text, unsafe_allow_html=True)
|
74 |
+
|
75 |
+
st.subheader('Named Entity Recognizer for Oral Medicine and Radiology')
|
76 |
+
sentence = st.text_area('Enter some text:')
|
77 |
+
|
78 |
+
st.write("OR")
|
79 |
+
|
80 |
+
selected_options = st.selectbox(
|
81 |
+
'Choose a text from dropdown: ',
|
82 |
+
(" ",
|
83 |
+
'Anemia and gingival bleeding are connected in that anemia can be a contributing cause to the occurrence of gingival bleeding . Anemia is a condition characterized by a shortage in the number or quality of red blood cells, which can lead to a reduced ability of the blood to carry oxygen throughout the body.',
|
84 |
+
'Hemophilia is a genetic illness that mainly affects the blood ability to clot properly. Individuals with significant hemophilia are at an elevated possibility of experiencing unforeseen bleeding episodes, which can occur in various parts of the body, including the mouth. Gingival bleeding can be a sign of hemophilia and can present as gum bleeding or mouth sores.',
|
85 |
+
"Von Willebrand disease VWD is a genetic condition that impairs the blood's ability to clot properly. One of the symptoms of VWD is spontaneous gingival bleeding , which can occur without any apparent cause or trauma")) # set default to None
|
86 |
+
|
87 |
+
|
88 |
+
# Define the colors for each label
|
89 |
+
|
90 |
+
if st.button('Analyze'):
|
91 |
+
if sentence:
|
92 |
+
highlight(sentence)
|
93 |
+
elif selected_options:
|
94 |
+
highlight(selected_options)
|
95 |
+
else:
|
96 |
+
st.write('Please enter a sentence or select an option from the dropdown or upload a file.')
|