Sibinraj commited on
Commit
a95d150
·
1 Parent(s): 966f549

Upload app.py

Browse files
Files changed (1) hide show
  1. app.py +175 -0
app.py ADDED
@@ -0,0 +1,175 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pickle
2
+ import numpy as np
3
+ import streamlit as st
4
+ import cv2
5
+ import tensorflow as tf
6
+ from tqdm import tqdm
7
+ from PIL import Image
8
+ import os
9
+ from tensorflow.keras.preprocessing import sequence
10
+ from tensorflow.keras.saving import load_model
11
+
12
+ st.title("DL-Classifier")
13
+
14
+ task = st.selectbox('Select One',("Choose any","Sentiment Classification", 'Tumor Detection'))
15
+
16
+
17
+ #choosing tumor detection
18
+ #CNN
19
+ if task=="Tumor Detection":
20
+ st.subheader("Tumor Detection")
21
+ model_path = os.path.join(os.getcwd(), 'cnn_model.h5')
22
+ cnn_model = load_model(model_path)
23
+
24
+ img =st.file_uploader("choose the image",type=('jpg','jpeg','png'))
25
+ def cnn_make_prediction(img,cnn_model):
26
+ img=Image.open(img)
27
+ img=img.resize((128,128))
28
+ img=np.array(img)
29
+ input_img = np.expand_dims(img, axis=0)
30
+ res = cnn_model.predict(input_img)
31
+ if res:
32
+ return"Tumor"
33
+ else:
34
+ return"No Tumor"
35
+ #if img != None:
36
+ #img_f="D:/SEM 3/DL/DL-ALGORITHMS/CNN/tumor_detection/tumordata/"
37
+ #sub_dir=os.listdir(img_f)
38
+ # cel_path=os.path.join(sub_dir,img_f)DL-models/app.py
39
+ #cel_img=os.listdir(cel_path)
40
+ #img_p=cel_img + img.name
41
+ #pred=cnn_make_prediction(img_p,cnn_model)
42
+ #st.write(pred)
43
+
44
+ if img is not None:
45
+
46
+ st.image(img, caption="Uploaded Image.", use_column_width=False, width=200)
47
+ st.write("")
48
+
49
+ if st.button("Detect Tumor"):
50
+ result =cnn_make_prediction(img, cnn_model)
51
+ st.subheader("Tumor Detection Result")
52
+ st.write(f"**{result}**")
53
+
54
+
55
+ #choosing sentiment classification
56
+
57
+ if task=="Sentiment Classification":
58
+ st.subheader("Sentiment Classification")
59
+ clss_model= st.radio("Select Classification Model:",("RNN","DNN","Backpropagation",'Perceptron','LSTM'))
60
+ select_model=None
61
+
62
+ if clss_model=="RNN":
63
+
64
+ model_path = os.path.join(os.getcwd(), 'rnn_model.h5')
65
+ rnn_model = load_model(model_path)
66
+ with open("rnn_tokeniser.pkl",'rb') as tokeniser_file:
67
+ rnn_tokeniser=pickle.load(tokeniser_file)
68
+
69
+ st.subheader('RNN Spam Classification')
70
+
71
+ input=st.text_area("Enter your message here:")
72
+ def rnn_pred(input):
73
+ max_length=10
74
+ encoded_test = rnn_tokeniser.texts_to_sequences(input)
75
+ padded_test = tf.keras.preprocessing.sequence.pad_sequences(encoded_test, maxlen=max_length, padding='post')
76
+ predict= (rnn_model.predict(padded_test) > 0.5).astype("int32")
77
+ if predict:
78
+ return "Spam "
79
+ else:
80
+ return "Not Spam"
81
+ if st.button('Check'):
82
+ pred=rnn_pred([input])
83
+ st.write(pred)
84
+
85
+ if clss_model=='Perceptron':
86
+ with open("perceptron_model_saved.pkl",'rb') as model_file:
87
+ percep_model=pickle.load(model_file)
88
+ with open('perceptron_tokeniser_saved.pkl','rb') as model_file:
89
+ percep_token=pickle.load(model_file)
90
+ st.subheader('Perceptron Spam Classification')
91
+ input= st.text_area("Enter your text here")
92
+
93
+ def percep_pred(input):
94
+ encoded_test_p = percep_token.texts_to_sequences([input])
95
+ padded_test_p = tf.keras.preprocessing.sequence.pad_sequences(encoded_test_p, maxlen=10)
96
+ predict_p= percep_model.predict(padded_test_p)
97
+ if predict_p:
98
+ return "Spam"
99
+ else:
100
+ return "Not Spam"
101
+ if st.button("Check"):
102
+ pred=percep_pred([input])
103
+ st.write(pred)
104
+
105
+
106
+ if clss_model=="Backpropagation":
107
+ with open('bp_model.pkl','rb') as model_file:
108
+ bp_model=pickle.load(model_file)
109
+ with open('backrpop_tokeniser.pkl','rb') as model_file:
110
+ bp_tokeniser=pickle.load(model_file)
111
+ st.subheader('Movie Review Classification using Backpropagation')
112
+ inp = st.text_area('Enter message')
113
+ def bp_make_predictions(inp, model):
114
+ encoded_inp = bp_tokeniser.texts_to_sequences([inp])
115
+ padded_inp = sequence.pad_sequences(encoded_inp, maxlen=500)
116
+ res = model.predict(padded_inp)
117
+ if res:
118
+ return "Negative"
119
+ else:
120
+ return "Positive"
121
+
122
+ if st.button('Check'):
123
+ pred = bp_make_predictions([inp], bp_model)
124
+ st.write(pred)
125
+
126
+ if clss_model=="DNN":
127
+
128
+ model_path = os.path.join(os.getcwd(), 'dnn_model.h5')
129
+ dnn_model = load_model(model_path)
130
+ with open("dnn_tokeniser.pkl",'rb') as file:
131
+ dnn_tokeniser = pickle.load(file)
132
+
133
+ st.subheader('SMS Spam Classification using DNN')
134
+ inp = st.text_area('Enter message')
135
+
136
+ def dnn_make_predictions(inp, model):
137
+ encoded_inp = dnn_tokeniser.texts_to_sequences(inp)
138
+ padded_inp = sequence.pad_sequences(encoded_inp, maxlen=10, padding='post')
139
+ res = (model.predict(padded_inp) > 0.5).astype("int32")
140
+ if res:
141
+ return "Spam"
142
+ else:
143
+ return "Not Spam"
144
+
145
+ if st.button('Check'):
146
+ pred = dnn_make_predictions([inp], dnn_model)
147
+ st.write(pred)
148
+
149
+ if clss_model=="LSTM":
150
+ model_path = os.path.join(os.getcwd(), 'lstm_model.h5')
151
+ lstm_model = load_model(model_path)
152
+
153
+ with open("lstm_tokeniser.pkl",'rb') as file:
154
+ lstm_tokeniser=pickle.load(file)
155
+ st.subheader('Movie Review Classification')
156
+ inp=st.text_area("Enter your review")
157
+ def lstm_make_predictions(inp, model):
158
+ inp = lstm_tokeniser.texts_to_sequences(inp)
159
+ inp = sequence.pad_sequences(inp, maxlen=500)
160
+ res = (model.predict(inp) > 0.5).astype("int32")
161
+ if res:
162
+ return "Negative"
163
+ else:
164
+ return "Positive"
165
+
166
+
167
+ if st.button('Check'):
168
+ pred = lstm_make_predictions([inp], lstm_model)
169
+ st.write(pred)
170
+
171
+
172
+
173
+
174
+
175
+