LovnishVerma's picture
Update app.py
123d6a8 verified
raw
history blame
2.89 kB
import streamlit as st
from streamlit_webrtc import webrtc_streamer, VideoTransformerBase, VideoFrame
import cv2
import numpy as np
from datetime import datetime
from keras.models import load_model
import sqlite3
import os
# Database Initialization
DB_NAME = "emotion_detection.db"
def initialize_database():
conn = sqlite3.connect(DB_NAME)
cursor = conn.cursor()
cursor.execute("""
CREATE TABLE IF NOT EXISTS face_data (
id INTEGER PRIMARY KEY AUTOINCREMENT,
name TEXT NOT NULL,
emotion TEXT NOT NULL,
timestamp TEXT NOT NULL
)
""")
conn.commit()
conn.close()
initialize_database()
# Load emotion detection model
@st.cache_resource
def load_emotion_model():
return load_model('CNN_Model_acc_75.h5')
emotion_model = load_emotion_model()
emotion_labels = ['angry', 'fear', 'happy', 'neutral', 'sad', 'surprise']
# Video Transformer for Streamlit WebRTC
class EmotionDetector(VideoTransformerBase):
def __init__(self):
self.model = emotion_model
def transform(self, frame: VideoFrame) -> VideoFrame:
img = frame.to_ndarray(format="bgr24")
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = cv2.CascadeClassifier(cv2.data.haarcascades + "haarcascade_frontalface_default.xml").detectMultiScale(
gray, scaleFactor=1.1, minNeighbors=5, minSize=(48, 48)
)
for (x, y, w, h) in faces:
face = gray[y:y + h, x:x + w]
face_resized = cv2.resize(face, (48, 48))
face_normalized = face_resized / 255.0
face_reshaped = np.reshape(face_normalized, (1, 48, 48, 1))
prediction = self.model.predict(face_reshaped)
emotion = emotion_labels[np.argmax(prediction[0])]
# Draw bounding box and label
cv2.rectangle(img, (x, y), (x + w, y + h), (0, 255, 0), 2)
cv2.putText(img, emotion, (x, y - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.9, (0, 255, 0), 2)
return VideoFrame.from_ndarray(img, format="bgr24")
# Sidebar menu
menu = st.sidebar.selectbox("Menu", ["Home", "View Records"])
if menu == "Home":
st.title("Real-Time Emotion Detection")
st.write("Using your camera for real-time emotion detection.")
webrtc_streamer(
key="emotion-detection",
video_transformer_factory=EmotionDetector,
media_stream_constraints={"video": True, "audio": False},
)
elif menu == "View Records":
st.title("View Records")
st.subheader("Recent Records")
conn = sqlite3.connect(DB_NAME)
cursor = conn.cursor()
cursor.execute("SELECT name, emotion, timestamp FROM face_data ORDER BY timestamp DESC LIMIT 5")
records = cursor.fetchall()
conn.close()
for record in records:
st.write(f"**Name**: {record[0]}, **Emotion**: {record[1]}, **Timestamp**: {record[2]}")