import pickle import numpy as np import streamlit as st import cv2 import tensorflow as tf from tqdm import tqdm from PIL import Image import os from tensorflow.keras.preprocessing import sequence from tensorflow.keras.saving import load_model st.title("DL-Classifier") task1 = st.selectbox('Select One',("Choose any","Sentiment Classification", 'Tumor Detection')) #choosing tumor detection #CNN if task1=="Tumor Detection": st.subheader("Tumor Detection") model_path = os.path.join(os.getcwd(), 'cnn_model.h5') cnn_model = load_model(model_path) img =st.file_uploader("choose the image",type=('jpg','jpeg','png')) def cnn_make_prediction(img,cnn_model): img=Image.open(img) img=img.resize((128,128)) img=np.array(img) input_img = np.expand_dims(img, axis=0) res = cnn_model.predict(input_img) if res: return"Tumor" else: return"No Tumor" if img != None: img_f="D:/SEM 3/DL/DL-ALGORITHMS/CNN/tumor_detection/tumordata/pred/" img_p=img_f + img.name pred=cnn_make_prediction(img_p,cnn_model) st.write(pred) #choosing classification if task1=="Sentiment Classification": st.subheader("Sentiment Classification") clss_model= st.radio("Select Classification Model:",("RNN","DNN","Backpropagation",'Perceptron','LSTM')) select_model=None if clss_model=="RNN": model_path = os.path.join(os.getcwd(), 'rnn_model.h5') rnn_model = load_model(model_path) with open("rnn_tokeniser.pkl",'rb') as tokeniser_file: rnn_tokeniser=pickle.load(tokeniser_file) st.subheader('RNN Spam Classification') input=st.text_input("Enter your message here:") def rnn_pred(input): max_length=10 encoded_test = rnn_tokeniser.texts_to_sequences(input) padded_test = tf.keras.preprocessing.sequence.pad_sequences(encoded_test, maxlen=max_length, padding='post') predict= (rnn_model.predict(padded_test) > 0.5).astype("int32") if predict: return "Spam " else: return "Not Spam" if st.button('Check'): pred=rnn_pred([input]) st.write(pred) if clss_model=='Perceptron': with open("perceptron_model_saved.pkl",'rb') as model_file: percep_model=pickle.load(model_file) with open('perceptron_tokeniser_saved.pkl','rb') as model_file: percep_token=pickle.load(model_file) st.subheader('Perceptron Spam Classification') input= st.text_input("Enter your text here") def percep_pred(input): encoded_test_p = percep_token.texts_to_sequences([input]) padded_test_p = tf.keras.preprocessing.sequence.pad_sequences(encoded_test_p, maxlen=10) predict_p= percep_model.predict(padded_test_p) if predict_p: return "Spam" else: return "Not Spam" if st.button("Check"): pred=percep_pred([input]) st.write(pred) if clss_model=="Backpropagation": with open('backprop_model.pkl','rb') as model_file: bp_model=pickle.load(model_file) with open('backrpop_tokeniser.pkl','rb') as model_file: bp_tokeniser=pickle.load(model_file) st.subheader('Backpropagation Spam Classification') input= st.text_input("Enter your text here") def back_pred(input): encoded_test = bp_tokeniser.texts_to_sequences([input]) padded_test = tf.keras.preprocessing.sequence.pad_sequences(encoded_test, maxlen=10) predict= bp_model.predict(padded_test) if predict: return "Spam" else: return "Not Spam" if st.button("Check"): pred=back_pred([input]) st.write(pred) if clss_model=="DNN": model_path = os.path.join(os.getcwd(), 'dnn_model.h5') dnn_model = load_model(model_path) with open("dnn_tokeniser.pkl",'rb') as file: dnn_tokeniser=pickle.load(file) st.subheader('DNN Spam Classification') input= st.text_input("Enter your text here") def dnn_pred(input): encoded_test = dnn_tokeniser.texts_to_sequences([input]) padded_test = tf.keras.preprocessing.sequence.pad_sequences(encoded_test, maxlen=500) predict= dnn_model.predict(padded_test) if predict: return "Spam" else: return "Not Spam" if st.button('Check'): pred=dnn_pred([input]) st.write(pred) if clss_model=="LSTM": model_path = os.path.join(os.getcwd(), 'lstm_model.h5') lstm_model = load_model(model_path) with open("lstm_tokeniser.pkl",'rb') as file: lstm_tokeniser=pickle.load(file) st.subheader('Movie Review Classification') inp=st.text_area("Enter your review") def lstm_make_predictions(inp, model): inp = lstm_tokeniser.texts_to_sequences(inp) inp = sequence.pad_sequences(inp, maxlen=500) res = (model.predict(inp) > 0.5).astype("int32") if res: return "Negative" else: return "Positive" if st.button('Check'): pred = lstm_make_predictions([inp], lstm_model) st.write(pred)