Update app.py
Browse files
app.py
CHANGED
@@ -7,7 +7,7 @@ from PIL import Image
|
|
7 |
import sqlite3
|
8 |
from huggingface_hub import HfApi
|
9 |
from datetime import datetime
|
10 |
-
import
|
11 |
|
12 |
# Constants
|
13 |
KNOWN_FACES_DIR = "known_faces" # Directory to save user images
|
@@ -136,18 +136,49 @@ if st.checkbox("Show registered students"):
|
|
136 |
st.write(f"**Name:** {name}, **Roll No:** {roll_no}, **Timestamp:** {timestamp}")
|
137 |
st.image(image_path, caption=f"{name} ({roll_no})", use_column_width=True)
|
138 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
139 |
# Face and Emotion Detection Function
|
140 |
def detect_faces_and_emotions(image):
|
141 |
-
# Convert the image to
|
142 |
-
|
143 |
|
144 |
# Detect faces using OpenCV's Haar Cascade
|
145 |
-
|
146 |
-
faces = face_cascade.detectMultiScale(rgb_image, scaleFactor=1.3, minNeighbors=5)
|
147 |
|
148 |
-
# If faces are detected, predict emotions
|
149 |
for (x, y, w, h) in faces:
|
150 |
-
face =
|
151 |
resized_face = cv2.resize(face, (48, 48)) # Resize face to 48x48
|
152 |
rgb_face = cv2.cvtColor(resized_face, cv2.COLOR_BGR2RGB)
|
153 |
normalized_face = rgb_face / 255.0
|
@@ -156,9 +187,14 @@ def detect_faces_and_emotions(image):
|
|
156 |
# Predict the emotion
|
157 |
emotion_prediction = emotion_model.predict(reshaped_face)
|
158 |
emotion_label = np.argmax(emotion_prediction)
|
159 |
-
|
|
|
|
|
|
|
|
|
|
|
160 |
|
161 |
-
return None
|
162 |
|
163 |
# UI for Emotion Detection (Only using webcam now)
|
164 |
if st.sidebar.selectbox("Menu", ["Register Student", "Face Recognition and Emotion Detection", "View Attendance"]) == "Face Recognition and Emotion Detection":
|
@@ -170,9 +206,11 @@ if st.sidebar.selectbox("Menu", ["Register Student", "Face Recognition and Emoti
|
|
170 |
img = Image.open(camera_image)
|
171 |
img_array = np.array(img)
|
172 |
|
173 |
-
# Detect emotion in the captured image
|
174 |
-
emotion_label = detect_faces_and_emotions(img_array)
|
|
|
175 |
if emotion_label:
|
176 |
st.success(f"Emotion Detected: {emotion_label}")
|
|
|
177 |
else:
|
178 |
st.warning("No face detected.")
|
|
|
7 |
import sqlite3
|
8 |
from huggingface_hub import HfApi
|
9 |
from datetime import datetime
|
10 |
+
from sklearn.preprocessing import LabelEncoder
|
11 |
|
12 |
# Constants
|
13 |
KNOWN_FACES_DIR = "known_faces" # Directory to save user images
|
|
|
136 |
st.write(f"**Name:** {name}, **Roll No:** {roll_no}, **Timestamp:** {timestamp}")
|
137 |
st.image(image_path, caption=f"{name} ({roll_no})", use_column_width=True)
|
138 |
|
139 |
+
# Initialize OpenCV's face detector (Haar Cascade)
|
140 |
+
face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml')
|
141 |
+
|
142 |
+
# Initialize LBPH face recognizer
|
143 |
+
face_recognizer = cv2.face_LBPHFaceRecognizer_create()
|
144 |
+
|
145 |
+
# Function to load and train face recognizer
|
146 |
+
def train_face_recognizer():
|
147 |
+
faces = []
|
148 |
+
labels = []
|
149 |
+
label_encoder = LabelEncoder()
|
150 |
+
|
151 |
+
# Load known faces
|
152 |
+
for filename in os.listdir(KNOWN_FACES_DIR):
|
153 |
+
if filename.endswith(".jpg"):
|
154 |
+
image_path = os.path.join(KNOWN_FACES_DIR, filename)
|
155 |
+
image = cv2.imread(image_path)
|
156 |
+
gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
|
157 |
+
|
158 |
+
# Detect face(s)
|
159 |
+
faces_detected = face_cascade.detectMultiScale(gray_image, 1.3, 5)
|
160 |
+
for (x, y, w, h) in faces_detected:
|
161 |
+
face = gray_image[y:y+h, x:x+w]
|
162 |
+
faces.append(face)
|
163 |
+
labels.append(filename.split('_')[0]) # Assuming name is in the filename
|
164 |
+
|
165 |
+
labels = label_encoder.fit_transform(labels)
|
166 |
+
face_recognizer.train(faces, np.array(labels))
|
167 |
+
st.success("Face recognizer trained successfully!")
|
168 |
+
|
169 |
+
train_face_recognizer()
|
170 |
+
|
171 |
# Face and Emotion Detection Function
|
172 |
def detect_faces_and_emotions(image):
|
173 |
+
# Convert the image to grayscale for face detection
|
174 |
+
gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
|
175 |
|
176 |
# Detect faces using OpenCV's Haar Cascade
|
177 |
+
faces = face_cascade.detectMultiScale(gray_image, scaleFactor=1.3, minNeighbors=5)
|
|
|
178 |
|
179 |
+
# If faces are detected, predict emotions and recognize faces
|
180 |
for (x, y, w, h) in faces:
|
181 |
+
face = gray_image[y:y+h, x:x+w]
|
182 |
resized_face = cv2.resize(face, (48, 48)) # Resize face to 48x48
|
183 |
rgb_face = cv2.cvtColor(resized_face, cv2.COLOR_BGR2RGB)
|
184 |
normalized_face = rgb_face / 255.0
|
|
|
187 |
# Predict the emotion
|
188 |
emotion_prediction = emotion_model.predict(reshaped_face)
|
189 |
emotion_label = np.argmax(emotion_prediction)
|
190 |
+
|
191 |
+
# Recognize the face
|
192 |
+
label, confidence = face_recognizer.predict(face)
|
193 |
+
recognized_label = label_encoder.inverse_transform([label])[0]
|
194 |
+
|
195 |
+
return EMOTION_LABELS[emotion_label], recognized_label
|
196 |
|
197 |
+
return None, None
|
198 |
|
199 |
# UI for Emotion Detection (Only using webcam now)
|
200 |
if st.sidebar.selectbox("Menu", ["Register Student", "Face Recognition and Emotion Detection", "View Attendance"]) == "Face Recognition and Emotion Detection":
|
|
|
206 |
img = Image.open(camera_image)
|
207 |
img_array = np.array(img)
|
208 |
|
209 |
+
# Detect emotion and recognize face in the captured image
|
210 |
+
emotion_label, recognized_label = detect_faces_and_emotions(img_array)
|
211 |
+
|
212 |
if emotion_label:
|
213 |
st.success(f"Emotion Detected: {emotion_label}")
|
214 |
+
st.success(f"Face Recognized as: {recognized_label}")
|
215 |
else:
|
216 |
st.warning("No face detected.")
|