File size: 6,291 Bytes
4bc696c
2d75c41
c7b74ee
 
 
2d75c41
c7b74ee
 
2d75c41
c7b74ee
2d75c41
 
 
 
9cd5b33
 
4bc696c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9cd5b33
2d75c41
c7b74ee
 
4bc696c
2d75c41
c7b74ee
2d75c41
c7b74ee
2d75c41
9cd5b33
c7b74ee
4bc696c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
import sqlite3
import streamlit as st
import cv2
import numpy as np
import time
import os
from keras.models import load_model
from PIL import Image
import tempfile

# Larger title
st.markdown("<h1 style='text-align: center;'>Emotion Detection with Face Recognition</h1>", unsafe_allow_html=True)

# Smaller subtitle
st.markdown("<h3 style='text-align: center;'>angry, fear, happy, neutral, sad, surprise</h3>", unsafe_allow_html=True)

# Database setup
DATABASE_NAME = "emotion_recognition.db"

def init_db():
    conn = sqlite3.connect(DATABASE_NAME)
    cursor = conn.cursor()
    cursor.execute('''
        CREATE TABLE IF NOT EXISTS registered_faces (
            id INTEGER PRIMARY KEY AUTOINCREMENT,
            name TEXT NOT NULL,
            image BLOB NOT NULL
        )
    ''')
    cursor.execute('''
        CREATE TABLE IF NOT EXISTS attendance_log (
            id INTEGER PRIMARY KEY AUTOINCREMENT,
            name TEXT NOT NULL,
            emotion TEXT NOT NULL,
            timestamp DATETIME DEFAULT CURRENT_TIMESTAMP
        )
    ''')
    conn.commit()
    conn.close()

init_db()

def register_face(name, image):
    conn = sqlite3.connect(DATABASE_NAME)
    cursor = conn.cursor()
    cursor.execute("INSERT INTO registered_faces (name, image) VALUES (?, ?)", (name, image))
    conn.commit()
    conn.close()

def fetch_registered_faces():
    conn = sqlite3.connect(DATABASE_NAME)
    cursor = conn.cursor()
    cursor.execute("SELECT id, name FROM registered_faces")
    rows = cursor.fetchall()
    conn.close()
    return rows

def log_attendance(name, emotion):
    conn = sqlite3.connect(DATABASE_NAME)
    cursor = conn.cursor()
    cursor.execute("INSERT INTO attendance_log (name, emotion) VALUES (?, ?)", (name, emotion))
    conn.commit()
    conn.close()

def fetch_recent_activity():
    conn = sqlite3.connect(DATABASE_NAME)
    cursor = conn.cursor()
    cursor.execute("SELECT name, emotion, timestamp FROM attendance_log ORDER BY timestamp DESC LIMIT 10")
    rows = cursor.fetchall()
    conn.close()
    return rows

# Load the emotion model
@st.cache_resource
def load_emotion_model():
    model = load_model('CNN_Model_acc_75.h5')
    return model

model = load_emotion_model()

# Emotion labels
emotion_labels = ['angry', 'fear', 'happy', 'neutral', 'sad', 'surprise']

# Sidebar options
sidebar_choice = st.sidebar.selectbox("Choose an option", ["Emotion Detection", "Register New Face", "View Registered Faces", "Recent Activity"])

if sidebar_choice == "Register New Face":
    st.header("Register New Face")
    name = st.text_input("Enter Name")
    uploaded_image = st.file_uploader("Upload Face Image", type=["png", "jpg", "jpeg"])
    if name and uploaded_image:
        image = np.array(Image.open(uploaded_image))
        _, buffer = cv2.imencode('.jpg', image)
        register_face(name, buffer.tobytes())
        st.success(f"Successfully registered {name}!")

elif sidebar_choice == "View Registered Faces":
    st.header("Registered Faces")
    faces = fetch_registered_faces()
    if faces:
        for face_id, name in faces:
            st.write(f"ID: {face_id}, Name: {name}")
    else:
        st.write("No faces registered yet.")

elif sidebar_choice == "Recent Activity":
    st.header("Recent Activity (Attendance Log)")
    logs = fetch_recent_activity()
    if logs:
        for name, emotion, timestamp in logs:
            st.write(f"Name: {name}, Emotion: {emotion}, Timestamp: {timestamp}")
    else:
        st.write("No recent activity found.")

else:  # Emotion Detection
    st.sidebar.write("Emotion Labels: Angry, Fear, Happy, Neutral, Sad, Surprise")

    upload_choice = st.radio("Choose input source", ["Upload Image", "Upload Video", "Camera"])

    def process_frame(frame):
        gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        faces = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml').detectMultiScale(gray_frame, scaleFactor=1.1, minNeighbors=5, minSize=(30, 30))

        result_text = ""
        for (x, y, w, h) in faces:
            roi_gray = gray_frame[y:y+h, x:x+w]
            roi_color = frame[y:y+h, x:x+w]
            face_roi = cv2.resize(roi_color, (48, 48))
            face_roi = cv2.cvtColor(face_roi, cv2.COLOR_BGR2RGB) / 255.0
            face_roi = np.expand_dims(face_roi, axis=0)

            predictions = model.predict(face_roi)
            emotion = emotion_labels[np.argmax(predictions[0])]

            label = "Unknown"  # Placeholder for face recognition (add later)
            log_attendance(label, emotion)

            result_text = f"{label} is feeling {emotion}"
            cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 2)
            cv2.putText(frame, result_text, (x, y-10), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)
        return frame, result_text

    if upload_choice == "Upload Image":
        uploaded_image = st.file_uploader("Upload Image", type=["png", "jpg", "jpeg"])
        if uploaded_image:
            image = np.array(Image.open(uploaded_image))
            frame, result_text = process_frame(image)
            st.image(frame, caption='Processed Image', use_column_width=True)
            st.markdown(f"<h3 style='text-align: center;'>{result_text}</h3>", unsafe_allow_html=True)

    elif upload_choice == "Upload Video":
        uploaded_video = st.file_uploader("Upload Video", type=["mp4", "avi", "mkv"])
        if uploaded_video:
            with tempfile.NamedTemporaryFile(delete=False) as tfile:
                tfile.write(uploaded_video.read())
                video_source = cv2.VideoCapture(tfile.name)
                while True:
                    ret, frame = video_source.read()
                    if not ret:
                        break
                    frame, result_text = process_frame(frame)
                    st.image(frame, channels="BGR", use_column_width=True)

    elif upload_choice == "Camera":
        image = st.camera_input("Take a picture")
        if image:
            frame = np.array(Image.open(image))
            frame, result_text = process_frame(frame)
            st.image(frame, caption='Processed Image', use_column_width=True)
            st.markdown(f"<h3 style='text-align: center;'>{result_text}</h3>", unsafe_allow_html=True)