LovnishVerma commited on
Commit
3ab75c1
·
verified ·
1 Parent(s): 277b9e1

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +39 -34
app.py CHANGED
@@ -5,13 +5,10 @@ import time
5
  import os
6
  from keras.models import load_model
7
  from PIL import Image
8
- from mtcnn import MTCNN # MTCNN for better face detection
9
- from keras.preprocessing import image
10
- from tensorflow.keras.applications.inception_v3 import preprocess_input
11
  import pymongo
12
  from datetime import datetime
13
  import tempfile
14
- from facenet_pytorch import MTCNN, InceptionResnetV1 # FaceNet model for face recognition
15
 
16
  # MongoDB Atlas Connection String
17
  MONGO_URI = "mongodb+srv://test:[email protected]/?retryWrites=true&w=majority"
@@ -42,57 +39,67 @@ print("Time taken to load model: ", time.time() - start)
42
  # Emotion labels
43
  emotion_labels = ['angry', 'fear', 'happy', 'neutral', 'sad', 'surprise']
44
 
45
- # Load FaceNet Model for Face Recognition
46
- @st.cache_resource
47
- def load_facenet_model():
48
- # Load FaceNet model for face recognition
49
- facenet_model = InceptionResnetV1(pretrained='vggface2').eval()
50
- return facenet_model
 
 
 
 
 
 
 
 
 
 
 
51
 
52
- facenet_model = load_facenet_model()
 
 
 
 
53
 
54
- # MTCNN for face detection
55
- detector = MTCNN()
 
 
56
 
57
  # Process a single frame
58
  def process_frame(frame):
59
- # Detect faces
60
- faces = detector.detect_faces(frame)
61
 
62
  result_text = "" # Initialize result text
63
 
64
- if len(faces) > 0:
65
  for face in faces:
66
- x, y, w, h = face['box']
67
  roi_color = frame[y:y+h, x:x+w]
 
68
 
69
  # Apply histogram equalization for better feature extraction
70
- roi_gray = cv2.cvtColor(roi_color, cv2.COLOR_BGR2GRAY)
71
  roi_gray = cv2.equalizeHist(roi_gray)
72
 
73
- # Emotion detection
74
  face_roi = cv2.resize(roi_color, (48, 48))
75
  face_roi = cv2.cvtColor(face_roi, cv2.COLOR_BGR2RGB)
76
  face_roi = np.expand_dims(face_roi, axis=0) / 255.0 # Normalize
77
 
 
78
  predictions = model.predict(face_roi)
79
  emotion = emotion_labels[np.argmax(predictions[0])]
80
 
81
- # Extract embedding for face recognition using FaceNet
82
- face_embedding = facenet_model.predict(preprocess_input(np.expand_dims(face['keypoints'], axis=0)))
83
-
84
- # Compare face embeddings with known faces (replace with your own database)
85
- known_faces = [] # Load known face embeddings from database
86
- known_names = [] # Corresponding names
87
-
88
  name = "Unknown"
89
- min_distance = float('inf')
 
 
90
 
91
- for i, known_face in enumerate(known_faces):
92
- distance = np.linalg.norm(face_embedding - known_face)
93
- if distance < min_distance:
94
- min_distance = distance
95
- name = known_names[i]
96
 
97
  # Save data to MongoDB if face is recognized (name != Unknown)
98
  if name != "Unknown":
@@ -106,8 +113,6 @@ def process_frame(frame):
106
  collection.insert_one(document)
107
  print(f"Data inserted into MongoDB: {document}")
108
 
109
- result_text = f"{name} is feeling {emotion}"
110
-
111
  # Draw bounding box and label
112
  cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
113
  cv2.putText(frame, result_text, (x, y - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.9, (0, 255, 0), 2)
 
5
  import os
6
  from keras.models import load_model
7
  from PIL import Image
 
 
 
8
  import pymongo
9
  from datetime import datetime
10
  import tempfile
11
+ from facenet_pytorch import MTCNN
12
 
13
  # MongoDB Atlas Connection String
14
  MONGO_URI = "mongodb+srv://test:[email protected]/?retryWrites=true&w=majority"
 
39
  # Emotion labels
40
  emotion_labels = ['angry', 'fear', 'happy', 'neutral', 'sad', 'surprise']
41
 
42
+ # Initialize MTCNN for face detection
43
+ mtcnn = MTCNN()
44
+
45
+ # Load known faces and names
46
+ known_faces = []
47
+ known_names = []
48
+ face_recognizer = cv2.face.LBPHFaceRecognizer_create()
49
+
50
+ def load_known_faces():
51
+ folder_path = "known_faces" # Folder containing known face images
52
+ for image_name in os.listdir(folder_path):
53
+ if image_name.endswith(('.jpg', '.jpeg', '.png')):
54
+ image_path = os.path.join(folder_path, image_name)
55
+ image = cv2.imread(image_path)
56
+ gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
57
+ # Detect face in the image
58
+ faces = mtcnn.detect(image)[0] # Use the correct detect method
59
 
60
+ for face in faces:
61
+ x, y, w, h = face[0], face[1], face[2], face[3]
62
+ roi_gray = gray[y:y+h, x:x+w]
63
+ known_faces.append(roi_gray)
64
+ known_names.append(image_name.split('.')[0]) # Assuming file name is the person's name
65
 
66
+ # Train the recognizer with the known faces
67
+ face_recognizer.train(known_faces, np.array([i for i in range(len(known_faces))]))
68
+
69
+ load_known_faces()
70
 
71
  # Process a single frame
72
  def process_frame(frame):
73
+ gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
74
+ faces = mtcnn.detect(frame)[0] # Use the correct detect method
75
 
76
  result_text = "" # Initialize result text
77
 
78
+ if faces is not None and len(faces) > 0:
79
  for face in faces:
80
+ x, y, w, h = face[0], face[1], face[2], face[3]
81
  roi_color = frame[y:y+h, x:x+w]
82
+ roi_gray = gray[y:y+h, x:x+w]
83
 
84
  # Apply histogram equalization for better feature extraction
 
85
  roi_gray = cv2.equalizeHist(roi_gray)
86
 
 
87
  face_roi = cv2.resize(roi_color, (48, 48))
88
  face_roi = cv2.cvtColor(face_roi, cv2.COLOR_BGR2RGB)
89
  face_roi = np.expand_dims(face_roi, axis=0) / 255.0 # Normalize
90
 
91
+ # Emotion detection
92
  predictions = model.predict(face_roi)
93
  emotion = emotion_labels[np.argmax(predictions[0])]
94
 
95
+ # Face recognition
 
 
 
 
 
 
96
  name = "Unknown"
97
+ label, confidence = face_recognizer.predict(roi_gray)
98
+ if confidence < 100:
99
+ name = known_names[label]
100
 
101
+ # Format result text
102
+ result_text = f"{name} is feeling {emotion}"
 
 
 
103
 
104
  # Save data to MongoDB if face is recognized (name != Unknown)
105
  if name != "Unknown":
 
113
  collection.insert_one(document)
114
  print(f"Data inserted into MongoDB: {document}")
115
 
 
 
116
  # Draw bounding box and label
117
  cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
118
  cv2.putText(frame, result_text, (x, y - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.9, (0, 255, 0), 2)