Sibinraj commited on
Commit
1e85b60
·
1 Parent(s): d42fc4d

Upload app.py

Browse files
Files changed (1) hide show
  1. app.py +176 -0
app.py ADDED
@@ -0,0 +1,176 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pickle
2
+ import numpy as np
3
+ import streamlit as st
4
+ import cv2
5
+ import tensorflow as tf
6
+ from tqdm import tqdm
7
+ from PIL import Image
8
+ import os
9
+ from tensorflow.keras.preprocessing import sequence
10
+ from tensorflow.keras.saving import load_model
11
+
12
+ st.title("DL-Classifier")
13
+
14
+ task = st.selectbox('Select One',("Choose any","Sentiment Classification", 'Tumor Detection'))
15
+
16
+
17
+ #choosing tumor detection
18
+ #CNN
19
+ if task=="Tumor Detection":
20
+ st.subheader("Tumor Detection")
21
+ model_path = os.path.join(os.getcwd(), 'cnn_model.h5')
22
+ cnn_model = load_model(model_path)
23
+
24
+ img =st.file_uploader("choose the image",type=('jpg','jpeg','png'))
25
+ def cnn_make_prediction(img,cnn_model):
26
+ img=Image.open(img)
27
+ img=img.resize((128,128))
28
+ img=np.array(img)
29
+ input_img = np.expand_dims(img, axis=0)
30
+ res = cnn_model.predict(input_img)
31
+ if res:
32
+ return"Tumor"
33
+ else:
34
+ return"No Tumor"
35
+ #if img != None:
36
+ #img_f="D:/SEM 3/DL/DL-ALGORITHMS/CNN/tumor_detection/tumordata/"
37
+ #sub_dir=os.listdir(img_f)
38
+ # cel_path=os.path.join(sub_dir,img_f)DL-models/app.py
39
+ #cel_img=os.listdir(cel_path)
40
+ #img_p=cel_img + img.name
41
+ #pred=cnn_make_prediction(img_p,cnn_model)
42
+ #st.write(pred)
43
+
44
+ if img is not None:
45
+
46
+ st.image(img, caption="Uploaded Image.", use_column_width=False, width=200)
47
+ st.write("")
48
+
49
+ if st.button("Detect Tumor"):
50
+ result =cnn_make_prediction(img, cnn_model)
51
+ st.subheader("Tumor Detection Result")
52
+ st.write(f"**{result}**")
53
+
54
+
55
+ #choosing sentiment classification
56
+
57
+ if task=="Sentiment Classification":
58
+ st.subheader("Sentiment Classification")
59
+ clss_model= st.radio("Select Classification Model:",("RNN","DNN","Backpropagation",'Perceptron','LSTM'))
60
+ select_model=None
61
+
62
+ if clss_model=="RNN":
63
+
64
+ model_path = os.path.join(os.getcwd(), 'rnn_model.h5')
65
+ rnn_model = load_model(model_path)
66
+ with open("rnn_tokeniser.pkl",'rb') as tokeniser_file:
67
+ rnn_tokeniser=pickle.load(tokeniser_file)
68
+
69
+ st.subheader('RNN Spam Classification')
70
+
71
+ input=st.text_area("Enter your message here:")
72
+ def rnn_pred(input):
73
+ max_length=10
74
+ encoded_test = rnn_tokeniser.texts_to_sequences(input)
75
+ padded_test = tf.keras.preprocessing.sequence.pad_sequences(encoded_test, maxlen=max_length, padding='post')
76
+ predict= (rnn_model.predict(padded_test) > 0.5).astype("int32")
77
+ if predict:
78
+ return "Spam "
79
+ else:
80
+ return "Not Spam"
81
+ if st.button('Check'):
82
+ pred=rnn_pred([input])
83
+ st.write(pred)
84
+
85
+ if clss_model=='Perceptron':
86
+ with open("perceptron_model_saved.pkl",'rb') as model_file:
87
+ percep_model=pickle.load(model_file)
88
+ with open('perceptron_tokeniser_saved.pkl','rb') as model_file:
89
+ percep_token=pickle.load(model_file)
90
+ st.subheader('Perceptron Spam Classification')
91
+ input= st.text_area("Enter your text here")
92
+
93
+ def percep_pred(input):
94
+ encoded_test_p = percep_token.texts_to_sequences([input])
95
+ padded_test_p = tf.keras.preprocessing.sequence.pad_sequences(encoded_test_p, maxlen=10)
96
+ predict_p= percep_model.predict(padded_test_p)
97
+ if predict_p:
98
+ return "Spam"
99
+ else:
100
+ return "Not Spam"
101
+ if st.button("Check"):
102
+ pred=percep_pred([input])
103
+ st.write(pred)
104
+
105
+
106
+ if clss_model=="Backpropagation":
107
+ with open('bp_model.pkl','rb') as model_file:
108
+ bp_model=pickle.load(model_file)
109
+ with open('backrpop_tokeniser.pkl','rb') as model_file:
110
+ bp_tokeniser=pickle.load(model_file)
111
+ st.subheader('Backpropagation Spam Classification')
112
+ input= st.text_area("Enter your text here")
113
+
114
+
115
+ def back_pred(input):
116
+ encoded_test = bp_tokeniser.texts_to_sequences([input])
117
+ padded_test = tf.keras.preprocessing.sequence.pad_sequences(encoded_test, maxlen=10)
118
+ predict= bp_model.predict(padded_test)
119
+ if predict:
120
+ return "Spam"
121
+ else:
122
+ return "Not Spam"
123
+ if st.button("Check"):
124
+ pred=back_pred([input])
125
+ st.write(pred)
126
+
127
+ if clss_model=="DNN":
128
+
129
+ model_path = os.path.join(os.getcwd(), 'dnn_model.h5')
130
+ dnn_model = load_model(model_path)
131
+
132
+ with open("dnn_tokeniser.pkl",'rb') as file:
133
+ dnn_tokeniser=pickle.load(file)
134
+ st.subheader('DNN Spam Classification')
135
+ input= st.text_area("Enter your text here")
136
+
137
+ def dnn_pred(input):
138
+ encoded_test = dnn_tokeniser.texts_to_sequences([input])
139
+ padded_test = tf.keras.preprocessing.sequence.pad_sequences(encoded_test, maxlen=500)
140
+ predict= dnn_model.predict(padded_test)
141
+ if predict:
142
+ return "Spam"
143
+ else:
144
+ return "Not Spam"
145
+ if st.button('Check'):
146
+ pred=dnn_pred([input])
147
+ st.write(pred)
148
+
149
+
150
+ if clss_model=="LSTM":
151
+ model_path = os.path.join(os.getcwd(), 'lstm_model.h5')
152
+ lstm_model = load_model(model_path)
153
+
154
+ with open("lstm_tokeniser.pkl",'rb') as file:
155
+ lstm_tokeniser=pickle.load(file)
156
+ st.subheader('Movie Review Classification')
157
+ inp=st.text_area("Enter your review")
158
+ def lstm_make_predictions(inp, model):
159
+ inp = lstm_tokeniser.texts_to_sequences(inp)
160
+ inp = sequence.pad_sequences(inp, maxlen=500)
161
+ res = (model.predict(inp) > 0.5).astype("int32")
162
+ if res:
163
+ return "Negative"
164
+ else:
165
+ return "Positive"
166
+
167
+
168
+ if st.button('Check'):
169
+ pred = lstm_make_predictions([inp], lstm_model)
170
+ st.write(pred)
171
+
172
+
173
+
174
+
175
+
176
+