LovnishVerma commited on
Commit
01eea27
·
verified ·
1 Parent(s): eea58c2

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +29 -40
app.py CHANGED
@@ -5,7 +5,7 @@ import time
5
  import os
6
  from keras.models import load_model
7
  from PIL import Image
8
- import dlib
9
  import pymongo
10
  from datetime import datetime
11
  import tempfile
@@ -42,63 +42,52 @@ emotion_labels = ['angry', 'fear', 'happy', 'neutral', 'sad', 'surprise']
42
  # Load known faces and names
43
  known_faces = []
44
  known_names = []
45
- face_recognizer = cv2.face.LBPHFaceRecognizer_create()
46
 
47
  def load_known_faces():
48
  folder_path = "known_faces" # Folder containing known face images
49
  for image_name in os.listdir(folder_path):
50
  if image_name.endswith(('.jpg', '.jpeg', '.png')):
51
  image_path = os.path.join(folder_path, image_name)
52
- image = cv2.imread(image_path)
53
- gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
54
- # Detect face in the image
55
- detector = dlib.get_frontal_face_detector()
56
- faces = detector(gray)
57
-
58
- for face in faces:
59
- x, y, w, h = (face.left(), face.top(), face.width(), face.height())
60
- roi_gray = gray[y:y+h, x:x+w]
61
- known_faces.append(roi_gray)
62
  known_names.append(image_name.split('.')[0]) # Assuming file name is the person's name
63
 
64
- # Train the recognizer with the known faces
65
- face_recognizer.train(known_faces, np.array([i for i in range(len(known_faces))]))
66
-
67
  load_known_faces()
68
 
69
- # Dlib face detector
70
- detector = dlib.get_frontal_face_detector()
71
-
72
  # Process a single frame
73
  def process_frame(frame):
74
- gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
75
- faces = detector(gray)
76
-
 
 
 
 
77
  result_text = "" # Initialize result text
78
 
79
- if len(faces) > 0:
80
- for face in faces:
81
- x, y, w, h = (face.left(), face.top(), face.width(), face.height())
82
- roi_color = frame[y:y+h, x:x+w]
83
- roi_gray = gray[y:y+h, x:x+w]
84
-
85
- # Apply histogram equalization for better feature extraction
86
- roi_gray = cv2.equalizeHist(roi_gray)
87
-
88
- face_roi = cv2.resize(roi_color, (48, 48))
 
 
 
89
  face_roi = cv2.cvtColor(face_roi, cv2.COLOR_BGR2RGB)
 
90
  face_roi = np.expand_dims(face_roi, axis=0) / 255.0 # Normalize
91
 
92
- # Emotion detection
93
  predictions = model.predict(face_roi)
94
  emotion = emotion_labels[np.argmax(predictions[0])]
95
 
96
- # Face recognition
97
- name = "Unknown"
98
- label, confidence = face_recognizer.predict(roi_gray)
99
- if confidence < 100:
100
- name = known_names[label]
101
-
102
  # Format result text
103
  result_text = f"{name} is feeling {emotion}"
104
 
@@ -115,8 +104,8 @@ def process_frame(frame):
115
  print(f"Data inserted into MongoDB: {document}")
116
 
117
  # Draw bounding box and label
118
- cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
119
- cv2.putText(frame, result_text, (x, y - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.9, (0, 255, 0), 2)
120
 
121
  return frame, result_text
122
 
 
5
  import os
6
  from keras.models import load_model
7
  from PIL import Image
8
+ import face_recognition
9
  import pymongo
10
  from datetime import datetime
11
  import tempfile
 
42
  # Load known faces and names
43
  known_faces = []
44
  known_names = []
 
45
 
46
  def load_known_faces():
47
  folder_path = "known_faces" # Folder containing known face images
48
  for image_name in os.listdir(folder_path):
49
  if image_name.endswith(('.jpg', '.jpeg', '.png')):
50
  image_path = os.path.join(folder_path, image_name)
51
+ image = face_recognition.load_image_file(image_path)
52
+ encoding = face_recognition.face_encodings(image)
53
+
54
+ if encoding: # Ensure encoding was found
55
+ known_faces.append(encoding[0]) # Store the first encoding (assumes 1 face per image)
 
 
 
 
 
56
  known_names.append(image_name.split('.')[0]) # Assuming file name is the person's name
57
 
 
 
 
58
  load_known_faces()
59
 
 
 
 
60
  # Process a single frame
61
  def process_frame(frame):
62
+ # Convert the image to RGB for face_recognition
63
+ rgb_frame = frame[:, :, ::-1]
64
+
65
+ # Detect faces
66
+ face_locations = face_recognition.face_locations(rgb_frame)
67
+ face_encodings = face_recognition.face_encodings(rgb_frame, face_locations)
68
+
69
  result_text = "" # Initialize result text
70
 
71
+ if face_encodings:
72
+ for (top, right, bottom, left), face_encoding in zip(face_locations, face_encodings):
73
+ # Check if the detected face matches any known faces
74
+ matches = face_recognition.compare_faces(known_faces, face_encoding)
75
+ name = "Unknown"
76
+
77
+ # Find the name of the recognized face
78
+ if True in matches:
79
+ first_match_index = matches.index(True)
80
+ name = known_names[first_match_index]
81
+
82
+ # Emotion detection
83
+ face_roi = frame[top:bottom, left:right]
84
  face_roi = cv2.cvtColor(face_roi, cv2.COLOR_BGR2RGB)
85
+ face_roi = cv2.resize(face_roi, (48, 48))
86
  face_roi = np.expand_dims(face_roi, axis=0) / 255.0 # Normalize
87
 
 
88
  predictions = model.predict(face_roi)
89
  emotion = emotion_labels[np.argmax(predictions[0])]
90
 
 
 
 
 
 
 
91
  # Format result text
92
  result_text = f"{name} is feeling {emotion}"
93
 
 
104
  print(f"Data inserted into MongoDB: {document}")
105
 
106
  # Draw bounding box and label
107
+ cv2.rectangle(frame, (left, top), (right, bottom), (0, 255, 0), 2)
108
+ cv2.putText(frame, result_text, (left, top - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.9, (0, 255, 0), 2)
109
 
110
  return frame, result_text
111