|
import os |
|
import cv2 |
|
import numpy as np |
|
import streamlit as st |
|
from datetime import datetime |
|
from tensorflow.keras.models import load_model |
|
|
|
|
|
KNOWN_FACES_DIR = "known_faces" |
|
EMOTION_MODEL_PATH = "CNN_Model_acc_75.h5" |
|
CASCADE_PATH = "haarcascade_frontalface_default.xml" |
|
|
|
|
|
IMG_SIZE = (200, 200) |
|
|
|
|
|
emotion_model = load_model(EMOTION_MODEL_PATH) |
|
face_cascade = cv2.CascadeClassifier(CASCADE_PATH) |
|
face_recognizer = cv2.face.LBPHFaceRecognizer_create() |
|
|
|
|
|
def load_emotion_labels(): |
|
return ["Angry", "Disgust", "Fear", "Happy", "Sad", "Surprise", "Neutral"] |
|
|
|
def train_recognizer(): |
|
faces = [] |
|
labels = [] |
|
label_map = {} |
|
for idx, person_name in enumerate(os.listdir(KNOWN_FACES_DIR)): |
|
person_path = os.path.join(KNOWN_FACES_DIR, person_name) |
|
if not os.path.isdir(person_path): |
|
continue |
|
label_map[idx] = person_name |
|
for filename in os.listdir(person_path): |
|
filepath = os.path.join(person_path, filename) |
|
if filepath.lower().endswith(('.jpg', '.jpeg', '.png')): |
|
img = cv2.imread(filepath, cv2.IMREAD_GRAYSCALE) |
|
if img is not None: |
|
faces.append(img) |
|
labels.append(idx) |
|
if len(faces) == 0: |
|
st.warning("No valid training data found. Add faces first.") |
|
return {} |
|
face_recognizer.train(faces, np.array(labels)) |
|
return {v: k for k, v in label_map.items()} |
|
|
|
def detect_faces(image): |
|
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) |
|
faces = face_cascade.detectMultiScale(gray, scaleFactor=1.1, minNeighbors=5) |
|
return gray, faces |
|
|
|
def detect_emotions(face_img): |
|
resized_face = cv2.resize(face_img, (48, 48)) |
|
normalized_face = resized_face / 255.0 |
|
reshaped_face = np.expand_dims(normalized_face, axis=(0, -1)) |
|
emotion_probabilities = emotion_model.predict(reshaped_face) |
|
emotion_idx = np.argmax(emotion_probabilities) |
|
return load_emotion_labels()[emotion_idx] |
|
|
|
|
|
st.title("Face Recognition and Emotion Detection") |
|
st.sidebar.title("Options") |
|
option = st.sidebar.selectbox("Choose an action", ["Home", "Register New Face", "Recognize Faces"]) |
|
|
|
|
|
if option != "Register New Face": |
|
label_map = train_recognizer() |
|
|
|
if option == "Home": |
|
st.write("Use the sidebar to register new faces or recognize them.") |
|
|
|
elif option == "Register New Face": |
|
person_name = st.text_input("Enter the person's name") |
|
capture_mode = st.radio("Select input method", ["Use Camera", "Upload Image(s)"]) |
|
|
|
if person_name and st.button("Register Face"): |
|
person_dir = os.path.join(KNOWN_FACES_DIR, person_name) |
|
os.makedirs(person_dir, exist_ok=True) |
|
|
|
if capture_mode == "Use Camera": |
|
st.warning("Ensure you are running this locally to access the camera.") |
|
|
|
cap = cv2.VideoCapture(0) |
|
if not cap.isOpened(): |
|
st.error("Could not access the camera. Make sure it's connected and permissions are granted.") |
|
else: |
|
|
|
ret, frame = cap.read() |
|
if ret: |
|
st.image(frame, channels="BGR") |
|
cap.release() |
|
|
|
elif capture_mode == "Upload Image(s)": |
|
uploaded_files = st.file_uploader("Upload images", type=["jpg", "jpeg", "png"], accept_multiple_files=True) |
|
if uploaded_files: |
|
for uploaded_file in uploaded_files: |
|
img = cv2.imdecode(np.frombuffer(uploaded_file.read(), np.uint8), cv2.IMREAD_COLOR) |
|
gray, faces = detect_faces(img) |
|
for (x, y, w, h) in faces: |
|
face_img = gray[y:y+h, x:x+w] |
|
resized_img = cv2.resize(face_img, IMG_SIZE) |
|
timestamp = datetime.now().strftime("%Y%m%d%H%M%S") |
|
filepath = os.path.join(person_dir, f"{timestamp}.jpg") |
|
cv2.imwrite(filepath, resized_img) |
|
st.success(f"Faces registered successfully for {person_name}!") |
|
label_map = train_recognizer() |
|
|
|
elif option == "Recognize Faces": |
|
uploaded_file = st.file_uploader("Upload an image", type=["jpg", "jpeg", "png"]) |
|
if uploaded_file: |
|
img = cv2.imdecode(np.frombuffer(uploaded_file.read(), np.uint8), cv2.IMREAD_COLOR) |
|
gray, faces = detect_faces(img) |
|
for (x, y, w, h) in faces: |
|
face_img = gray[y:y+h, x:x+w] |
|
resized_img = cv2.resize(face_img, IMG_SIZE) |
|
label, confidence = face_recognizer.predict(resized_img) |
|
name = label_map.get(label, "Unknown") |
|
emotion = detect_emotions(face_img) |
|
cv2.rectangle(img, (x, y), (x+w, y+h), (0, 255, 0), 2) |
|
cv2.putText(img, f"{name}, {emotion}", (x, y-10), cv2.FONT_HERSHEY_SIMPLEX, 0.9, (0, 255, 0), 2) |
|
st.image(img, channels="BGR") |
|
|