# import the rquired libraries. import numpy as np import cv2 from keras.models import load_model import streamlit as st from tensorflow import keras from tensorflow.keras.preprocessing.image import img_to_array from streamlit_webrtc import webrtc_streamer, VideoTransformerBase, RTCConfiguration, VideoProcessorBase, WebRtcMode emotion_labels = ['Angry','Disgust','Fear','Happy','Neutral', 'Sad', 'Surprise'] classifier =load_model('model_78.h5') classifier.load_weights("model_weights_78.h5") try: face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml') except Exception: st.write("Error loading cascade classifiers") class VideoTransformer(VideoTransformerBase): def transform(self, frame): img = frame.to_ndarray(format="bgr24") img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) faces = face_cascade.detectMultiScale( image=img_gray, scaleFactor=1.3, minNeighbors=5) for (x, y, w, h) in faces: cv2.rectangle(img=img, pt1=(x, y), pt2=( x + w, y + h), color=(0, 255, 255), thickness=2) roi_gray = img_gray[y:y + h, x:x + w] roi_gray = cv2.resize(roi_gray, (48, 48), interpolation=cv2.INTER_AREA) if np.sum([roi_gray]) != 0: roi = roi_gray.astype('float') / 255.0 roi = img_to_array(roi) roi = np.expand_dims(roi, axis=0) prediction = classifier.predict(roi)[0] maxindex = int(np.argmax(prediction)) finalout = emotion_labels[maxindex] output = str(finalout) label_position = (x, y-10) cv2.putText(img, output, label_position, cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2) return img def main(): # Face Analysis Application # st.title("Real Time Face Emotion Detection Application 😠🤮😨😀😐😔😮") activiteis = ["Home", "Live Face Emotion Detection"] choice = st.sidebar.selectbox("Select Activity", activiteis) # Homepage. if choice == "Home": html_temp_home1 = """