File size: 5,006 Bytes
9cd5b33
 
c7b74ee
 
 
 
 
9cd5b33
c7b74ee
9cd5b33
 
 
 
 
 
 
 
c7b74ee
 
9cd5b33
c7b74ee
9cd5b33
c7b74ee
9cd5b33
 
 
 
c7b74ee
9cd5b33
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c7b74ee
9cd5b33
c7b74ee
 
 
9cd5b33
 
 
c7b74ee
9cd5b33
 
 
 
c7b74ee
9cd5b33
 
 
 
 
c7b74ee
9cd5b33
c7b74ee
 
 
 
9cd5b33
 
 
 
c7b74ee
 
 
 
 
 
9cd5b33
c7b74ee
 
9cd5b33
 
c7b74ee
 
9cd5b33
c7b74ee
 
 
 
 
 
 
 
 
 
 
9cd5b33
c7b74ee
 
 
9cd5b33
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
import face_recognition
import os
import cv2
import numpy as np
import time
from keras.models import load_model
from PIL import Image
import streamlit as st

# Streamlit UI Setup
st.markdown("<h1 style='text-align: center;'>Emotion & Face Recognition</h1>", unsafe_allow_html=True)
st.markdown("<h3 style='text-align: center;'>angry, fear, happy, neutral, sad, surprise</h3>", unsafe_allow_html=True)

# Known faces folder path
KNOWN_FACES_DIR = "known_faces"

# Load emotion detection model
@st.cache_resource
def load_emotion_model():
    return load_model("CNN_Model_acc_75.h5")

emotion_model = load_emotion_model()

# Face detection model
face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + "haarcascade_frontalface_default.xml")
emotion_labels = ['angry', 'fear', 'happy', 'neutral', 'sad', 'surprise']
img_shape = 48

# Known faces dictionary
known_faces = {"names": [], "encodings": []}

def load_faces_from_folder(folder_path):
    """
    Load known faces from a folder, using filenames as names.
    """
    for filename in os.listdir(folder_path):
        if filename.endswith(('.jpg', '.jpeg', '.png')):
            name = os.path.splitext(filename)[0]
            image_path = os.path.join(folder_path, filename)
            
            # Load and encode the image
            image = face_recognition.load_image_file(image_path)
            face_encodings = face_recognition.face_encodings(image)
            
            if face_encodings:  # Ensure a face is found
                known_faces["names"].append(name)
                known_faces["encodings"].append(face_encodings[0])
                print(f"Loaded face for {name}")
            else:
                print(f"No face detected in {filename}")

# Load known faces
load_faces_from_folder(KNOWN_FACES_DIR)

def recognize_face(unknown_face_encoding):
    """
    Compare an unknown face with the known faces and return the closest match.
    """
    matches = face_recognition.compare_faces(known_faces["encodings"], unknown_face_encoding, tolerance=0.6)
    if True in matches:
        match_index = matches.index(True)
        return known_faces["names"][match_index]
    return "Unknown"

def detect_emotion(face_image):
    """
    Predict the emotion of a face using the emotion detection model.
    """
    face_resized = cv2.resize(face_image, (img_shape, img_shape))
    face_resized = np.expand_dims(face_resized, axis=0)
    face_resized = face_resized / 255.0  # Normalize the image
    predictions = emotion_model.predict(face_resized)
    return emotion_labels[np.argmax(predictions)]

def process_frame_with_recognition_and_emotion(frame):
    """
    Detect faces, recognize names, and detect emotions in the frame.
    """
    gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    rgb_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
    faces = face_cascade.detectMultiScale(gray_frame, scaleFactor=1.1, minNeighbors=5, minSize=(30, 30))

    for (x, y, w, h) in faces:
        # Get the face area
        face_image = rgb_frame[y:y+h, x:x+w]
        face_encodings = face_recognition.face_encodings(face_image)

        if face_encodings:
            name = recognize_face(face_encodings[0])  # Recognize the face
        else:
            name = "Unknown"

        # Predict emotion
        emotion = detect_emotion(frame[y:y+h, x:x+w])

        # Display name and emotion
        display_text = f"{name} is Feeling {emotion}"
        cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
        cv2.putText(frame, display_text, (x, y - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 255, 0), 2)

    return frame

def video_feed(video_source):
    """
    Display video feed with face recognition and emotion detection.
    """
    frame_placeholder = st.empty()  # Placeholder for updating frames

    while True:
        ret, frame = video_source.read()
        if not ret:
            break

        frame = process_frame_with_recognition_and_emotion(frame)
        frame_placeholder.image(frame, channels="BGR", use_column_width=True)

# Sidebar options
upload_choice = st.sidebar.radio("Choose input source", ["Upload Image", "Upload Video", "Camera"])

if upload_choice == "Camera":
    video_source = cv2.VideoCapture(0)  # Access webcam
    video_feed(video_source)

elif upload_choice == "Upload Video":
    uploaded_video = st.file_uploader("Upload Video", type=["mp4", "mov", "avi", "mkv", "webm"])
    if uploaded_video:
        with tempfile.NamedTemporaryFile(delete=False) as tfile:
            tfile.write(uploaded_video.read())
            video_source = cv2.VideoCapture(tfile.name)
            video_feed(video_source)

elif upload_choice == "Upload Image":
    uploaded_image = st.file_uploader("Upload Image", type=["png", "jpg", "jpeg"])
    if uploaded_image:
        image = Image.open(uploaded_image)
        frame = np.array(image)
        frame = process_frame_with_recognition_and_emotion(frame)
        st.image(frame, caption="Processed Image", use_column_width=True)