LovnishVerma's picture
Update app.py
4bc696c verified
raw
history blame
6.29 kB
import sqlite3
import streamlit as st
import cv2
import numpy as np
import time
import os
from keras.models import load_model
from PIL import Image
import tempfile
# Larger title
st.markdown("<h1 style='text-align: center;'>Emotion Detection with Face Recognition</h1>", unsafe_allow_html=True)
# Smaller subtitle
st.markdown("<h3 style='text-align: center;'>angry, fear, happy, neutral, sad, surprise</h3>", unsafe_allow_html=True)
# Database setup
DATABASE_NAME = "emotion_recognition.db"
def init_db():
conn = sqlite3.connect(DATABASE_NAME)
cursor = conn.cursor()
cursor.execute('''
CREATE TABLE IF NOT EXISTS registered_faces (
id INTEGER PRIMARY KEY AUTOINCREMENT,
name TEXT NOT NULL,
image BLOB NOT NULL
)
''')
cursor.execute('''
CREATE TABLE IF NOT EXISTS attendance_log (
id INTEGER PRIMARY KEY AUTOINCREMENT,
name TEXT NOT NULL,
emotion TEXT NOT NULL,
timestamp DATETIME DEFAULT CURRENT_TIMESTAMP
)
''')
conn.commit()
conn.close()
init_db()
def register_face(name, image):
conn = sqlite3.connect(DATABASE_NAME)
cursor = conn.cursor()
cursor.execute("INSERT INTO registered_faces (name, image) VALUES (?, ?)", (name, image))
conn.commit()
conn.close()
def fetch_registered_faces():
conn = sqlite3.connect(DATABASE_NAME)
cursor = conn.cursor()
cursor.execute("SELECT id, name FROM registered_faces")
rows = cursor.fetchall()
conn.close()
return rows
def log_attendance(name, emotion):
conn = sqlite3.connect(DATABASE_NAME)
cursor = conn.cursor()
cursor.execute("INSERT INTO attendance_log (name, emotion) VALUES (?, ?)", (name, emotion))
conn.commit()
conn.close()
def fetch_recent_activity():
conn = sqlite3.connect(DATABASE_NAME)
cursor = conn.cursor()
cursor.execute("SELECT name, emotion, timestamp FROM attendance_log ORDER BY timestamp DESC LIMIT 10")
rows = cursor.fetchall()
conn.close()
return rows
# Load the emotion model
@st.cache_resource
def load_emotion_model():
model = load_model('CNN_Model_acc_75.h5')
return model
model = load_emotion_model()
# Emotion labels
emotion_labels = ['angry', 'fear', 'happy', 'neutral', 'sad', 'surprise']
# Sidebar options
sidebar_choice = st.sidebar.selectbox("Choose an option", ["Emotion Detection", "Register New Face", "View Registered Faces", "Recent Activity"])
if sidebar_choice == "Register New Face":
st.header("Register New Face")
name = st.text_input("Enter Name")
uploaded_image = st.file_uploader("Upload Face Image", type=["png", "jpg", "jpeg"])
if name and uploaded_image:
image = np.array(Image.open(uploaded_image))
_, buffer = cv2.imencode('.jpg', image)
register_face(name, buffer.tobytes())
st.success(f"Successfully registered {name}!")
elif sidebar_choice == "View Registered Faces":
st.header("Registered Faces")
faces = fetch_registered_faces()
if faces:
for face_id, name in faces:
st.write(f"ID: {face_id}, Name: {name}")
else:
st.write("No faces registered yet.")
elif sidebar_choice == "Recent Activity":
st.header("Recent Activity (Attendance Log)")
logs = fetch_recent_activity()
if logs:
for name, emotion, timestamp in logs:
st.write(f"Name: {name}, Emotion: {emotion}, Timestamp: {timestamp}")
else:
st.write("No recent activity found.")
else: # Emotion Detection
st.sidebar.write("Emotion Labels: Angry, Fear, Happy, Neutral, Sad, Surprise")
upload_choice = st.radio("Choose input source", ["Upload Image", "Upload Video", "Camera"])
def process_frame(frame):
gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml').detectMultiScale(gray_frame, scaleFactor=1.1, minNeighbors=5, minSize=(30, 30))
result_text = ""
for (x, y, w, h) in faces:
roi_gray = gray_frame[y:y+h, x:x+w]
roi_color = frame[y:y+h, x:x+w]
face_roi = cv2.resize(roi_color, (48, 48))
face_roi = cv2.cvtColor(face_roi, cv2.COLOR_BGR2RGB) / 255.0
face_roi = np.expand_dims(face_roi, axis=0)
predictions = model.predict(face_roi)
emotion = emotion_labels[np.argmax(predictions[0])]
label = "Unknown" # Placeholder for face recognition (add later)
log_attendance(label, emotion)
result_text = f"{label} is feeling {emotion}"
cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 2)
cv2.putText(frame, result_text, (x, y-10), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)
return frame, result_text
if upload_choice == "Upload Image":
uploaded_image = st.file_uploader("Upload Image", type=["png", "jpg", "jpeg"])
if uploaded_image:
image = np.array(Image.open(uploaded_image))
frame, result_text = process_frame(image)
st.image(frame, caption='Processed Image', use_column_width=True)
st.markdown(f"<h3 style='text-align: center;'>{result_text}</h3>", unsafe_allow_html=True)
elif upload_choice == "Upload Video":
uploaded_video = st.file_uploader("Upload Video", type=["mp4", "avi", "mkv"])
if uploaded_video:
with tempfile.NamedTemporaryFile(delete=False) as tfile:
tfile.write(uploaded_video.read())
video_source = cv2.VideoCapture(tfile.name)
while True:
ret, frame = video_source.read()
if not ret:
break
frame, result_text = process_frame(frame)
st.image(frame, channels="BGR", use_column_width=True)
elif upload_choice == "Camera":
image = st.camera_input("Take a picture")
if image:
frame = np.array(Image.open(image))
frame, result_text = process_frame(frame)
st.image(frame, caption='Processed Image', use_column_width=True)
st.markdown(f"<h3 style='text-align: center;'>{result_text}</h3>", unsafe_allow_html=True)