LovnishVerma's picture
Update app.py
f1e17c0 verified
raw
history blame
7.78 kB
import sqlite3
import streamlit as st
import cv2
import numpy as np
import os
from keras.models import load_model
from datetime import datetime
from PIL import Image
# Database setup
DATABASE_NAME = "emotion_recognition.db"
KNOWN_FACES_DIR = "known_faces"
if not os.path.exists(KNOWN_FACES_DIR):
os.makedirs(KNOWN_FACES_DIR)
def init_db():
conn = sqlite3.connect(DATABASE_NAME)
cursor = conn.cursor()
cursor.execute('''
CREATE TABLE IF NOT EXISTS attendance_log (
id INTEGER PRIMARY KEY AUTOINCREMENT,
name TEXT NOT NULL,
emotion TEXT NOT NULL,
timestamp DATETIME DEFAULT CURRENT_TIMESTAMP
)
''')
conn.commit()
conn.close()
init_db()
def log_attendance(name, emotion):
conn = sqlite3.connect(DATABASE_NAME)
cursor = conn.cursor()
cursor.execute("INSERT INTO attendance_log (name, emotion) VALUES (?, ?)", (name, emotion))
conn.commit()
conn.close()
def fetch_recent_activity():
conn = sqlite3.connect(DATABASE_NAME)
cursor = conn.cursor()
cursor.execute("SELECT name, emotion, timestamp FROM attendance_log ORDER BY timestamp DESC LIMIT 10")
rows = cursor.fetchall()
conn.close()
return rows
# Load pre-trained emotion detection model
@st.cache_resource
def load_emotion_model():
model = load_model('CNN_Model_acc_75.h5')
return model
emotion_model = load_emotion_model()
emotion_labels = ['angry', 'fear', 'happy', 'neutral', 'sad', 'surprise']
# Initialize LBPH face recognizer
face_recognizer = cv2.face.LBPHFaceRecognizer_create()
def train_recognizer():
faces = []
labels = []
for name in os.listdir(KNOWN_FACES_DIR):
for filename in os.listdir(os.path.join(KNOWN_FACES_DIR, name)):
filepath = os.path.join(KNOWN_FACES_DIR, name, filename)
image = cv2.imread(filepath, cv2.IMREAD_GRAYSCALE)
faces.append(image)
labels.append(name)
label_ids = {name: idx for idx, name in enumerate(set(labels))}
label_ids_rev = {idx: name for name, idx in label_ids.items()}
labels = [label_ids[label] for label in labels]
face_recognizer.train(faces, np.array(labels))
return label_ids_rev
label_ids_rev = train_recognizer()
# Sidebar options
sidebar_choice = st.sidebar.selectbox("Choose an option", ["Emotion Detection", "Register New Face", "View Recent Activity"])
# Main App Logic
if sidebar_choice == "Register New Face":
st.header("Register New Face")
name = st.text_input("Enter Name")
capture_button = st.button("Capture Face via Camera")
if capture_button and name:
cap = cv2.VideoCapture(0)
st.write("Capturing face... Look into the camera.")
captured_faces = []
face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml')
while len(captured_faces) < 5:
ret, frame = cap.read()
if not ret:
st.error("Error capturing video")
break
gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray_frame, scaleFactor=1.1, minNeighbors=5, minSize=(100, 100))
for (x, y, w, h) in faces:
face_roi = gray_frame[y:y + h, x:x + w]
captured_faces.append(face_roi)
cv2.rectangle(frame, (x, y), (x + w, y + h), (255, 0, 0), 2)
cv2.imshow("Face Registration", frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
# Save faces
person_dir = os.path.join(KNOWN_FACES_DIR, name)
if not os.path.exists(person_dir):
os.makedirs(person_dir)
for i, face in enumerate(captured_faces):
cv2.imwrite(os.path.join(person_dir, f"{name}_{i}.jpg"), face)
label_ids_rev = train_recognizer()
st.success(f"{name} has been registered successfully!")
elif sidebar_choice == "View Recent Activity":
st.header("Recent Activity")
logs = fetch_recent_activity()
if logs:
for name, emotion, timestamp in logs:
st.write(f"Name: {name}, Emotion: {emotion}, Timestamp: {timestamp}")
else:
st.write("No recent activity found.")
else: # Emotion Detection
st.header("Emotion Detection with Face Recognition")
mode = st.radio("Choose mode", ["Image", "Camera"])
face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml')
def process_frame(frame):
gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray_frame, scaleFactor=1.1, minNeighbors=5, minSize=(100, 100))
result_text = ""
for (x, y, w, h) in faces:
face_roi = gray_frame[y:y + h, x:x + w]
face_resized = cv2.resize(face_roi, (150, 150))
label_id, confidence = face_recognizer.predict(face_resized)
label = label_ids_rev.get(label_id, "Unknown")
# Emotion Detection
face_color = cv2.resize(frame[y:y + h, x:x + w], (48, 48)) / 255.0
face_color = np.expand_dims(cv2.cvtColor(face_color, cv2.COLOR_BGR2RGB), axis=0)
emotion_prediction = emotion_model.predict(face_color)
emotion = emotion_labels[np.argmax(emotion_prediction[0])]
# Log Attendance
log_attendance(label, emotion)
# Annotate frame
result_text = f"{label} is feeling {emotion}"
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
cv2.putText(frame, result_text, (x, y - 10), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)
return frame, result_text
if mode == "Image":
uploaded_image = st.file_uploader("Upload an Image", type=["jpg", "jpeg", "png"])
name_for_image = st.text_input("Enter Name (optional, for registration):")
register_image_button = st.button("Register Image")
if uploaded_image:
image = np.array(Image.open(uploaded_image))
gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray_image, scaleFactor=1.1, minNeighbors=5, minSize=(100, 100))
if len(faces) > 0:
for (x, y, w, h) in faces:
face_roi = gray_image[y:y + h, x:x + w]
cv2.rectangle(image, (x, y), (x + w, y + h), (0, 255, 0), 2) # Annotate detected face
# Save face if user provides a name
if register_image_button and name_for_image:
person_dir = os.path.join(KNOWN_FACES_DIR, name_for_image)
if not os.path.exists(person_dir):
os.makedirs(person_dir)
face_filename = os.path.join(person_dir, f"{name_for_image}_{datetime.now().strftime('%Y%m%d_%H%M%S')}.jpg")
cv2.imwrite(face_filename, face_roi)
st.success(f"Face for {name_for_image} has been saved successfully!")
label_ids_rev = train_recognizer() # Retrain recognizer after adding new face
else:
st.warning("No face detected in the uploaded image. Please try another image.")
st.image(image, caption="Processed Image with Face Annotations")
elif mode == "Camera":
cap = cv2.VideoCapture(0)
st.write("Press 'q' to exit.")
while True:
ret, frame = cap.read()
if not ret:
break
frame, result_text = process_frame(frame)
cv2.imshow("Emotion Detection", frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()