|
import face_recognition |
|
import os |
|
import cv2 |
|
import numpy as np |
|
import time |
|
from keras.models import load_model |
|
from PIL import Image |
|
import streamlit as st |
|
|
|
|
|
st.markdown("<h1 style='text-align: center;'>Emotion & Face Recognition</h1>", unsafe_allow_html=True) |
|
st.markdown("<h3 style='text-align: center;'>angry, fear, happy, neutral, sad, surprise</h3>", unsafe_allow_html=True) |
|
|
|
|
|
KNOWN_FACES_DIR = "known_faces" |
|
|
|
|
|
@st.cache_resource |
|
def load_emotion_model(): |
|
return load_model("CNN_Model_acc_75.h5") |
|
|
|
emotion_model = load_emotion_model() |
|
|
|
|
|
face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + "haarcascade_frontalface_default.xml") |
|
emotion_labels = ['angry', 'fear', 'happy', 'neutral', 'sad', 'surprise'] |
|
img_shape = 48 |
|
|
|
|
|
known_faces = {"names": [], "encodings": []} |
|
|
|
def load_faces_from_folder(folder_path): |
|
""" |
|
Load known faces from a folder, using filenames as names. |
|
""" |
|
for filename in os.listdir(folder_path): |
|
if filename.endswith(('.jpg', '.jpeg', '.png')): |
|
name = os.path.splitext(filename)[0] |
|
image_path = os.path.join(folder_path, filename) |
|
|
|
|
|
image = face_recognition.load_image_file(image_path) |
|
face_encodings = face_recognition.face_encodings(image) |
|
|
|
if face_encodings: |
|
known_faces["names"].append(name) |
|
known_faces["encodings"].append(face_encodings[0]) |
|
print(f"Loaded face for {name}") |
|
else: |
|
print(f"No face detected in {filename}") |
|
|
|
|
|
load_faces_from_folder(KNOWN_FACES_DIR) |
|
|
|
def recognize_face(unknown_face_encoding): |
|
""" |
|
Compare an unknown face with the known faces and return the closest match. |
|
""" |
|
matches = face_recognition.compare_faces(known_faces["encodings"], unknown_face_encoding, tolerance=0.6) |
|
if True in matches: |
|
match_index = matches.index(True) |
|
return known_faces["names"][match_index] |
|
return "Unknown" |
|
|
|
def detect_emotion(face_image): |
|
""" |
|
Predict the emotion of a face using the emotion detection model. |
|
""" |
|
face_resized = cv2.resize(face_image, (img_shape, img_shape)) |
|
face_resized = np.expand_dims(face_resized, axis=0) |
|
face_resized = face_resized / 255.0 |
|
predictions = emotion_model.predict(face_resized) |
|
return emotion_labels[np.argmax(predictions)] |
|
|
|
def process_frame_with_recognition_and_emotion(frame): |
|
""" |
|
Detect faces, recognize names, and detect emotions in the frame. |
|
""" |
|
gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) |
|
rgb_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) |
|
faces = face_cascade.detectMultiScale(gray_frame, scaleFactor=1.1, minNeighbors=5, minSize=(30, 30)) |
|
|
|
for (x, y, w, h) in faces: |
|
|
|
face_image = rgb_frame[y:y+h, x:x+w] |
|
face_encodings = face_recognition.face_encodings(face_image) |
|
|
|
if face_encodings: |
|
name = recognize_face(face_encodings[0]) |
|
else: |
|
name = "Unknown" |
|
|
|
|
|
emotion = detect_emotion(frame[y:y+h, x:x+w]) |
|
|
|
|
|
display_text = f"{name} is Feeling {emotion}" |
|
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2) |
|
cv2.putText(frame, display_text, (x, y - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 255, 0), 2) |
|
|
|
return frame |
|
|
|
def video_feed(video_source): |
|
""" |
|
Display video feed with face recognition and emotion detection. |
|
""" |
|
frame_placeholder = st.empty() |
|
|
|
while True: |
|
ret, frame = video_source.read() |
|
if not ret: |
|
break |
|
|
|
frame = process_frame_with_recognition_and_emotion(frame) |
|
frame_placeholder.image(frame, channels="BGR", use_column_width=True) |
|
|
|
|
|
upload_choice = st.sidebar.radio("Choose input source", ["Upload Image", "Upload Video", "Camera"]) |
|
|
|
if upload_choice == "Camera": |
|
video_source = cv2.VideoCapture(0) |
|
video_feed(video_source) |
|
|
|
elif upload_choice == "Upload Video": |
|
uploaded_video = st.file_uploader("Upload Video", type=["mp4", "mov", "avi", "mkv", "webm"]) |
|
if uploaded_video: |
|
with tempfile.NamedTemporaryFile(delete=False) as tfile: |
|
tfile.write(uploaded_video.read()) |
|
video_source = cv2.VideoCapture(tfile.name) |
|
video_feed(video_source) |
|
|
|
elif upload_choice == "Upload Image": |
|
uploaded_image = st.file_uploader("Upload Image", type=["png", "jpg", "jpeg"]) |
|
if uploaded_image: |
|
image = Image.open(uploaded_image) |
|
frame = np.array(image) |
|
frame = process_frame_with_recognition_and_emotion(frame) |
|
st.image(frame, caption="Processed Image", use_column_width=True) |
|
|