LovnishVerma commited on
Commit
6361d4f
·
verified ·
1 Parent(s): 3f84249

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +75 -105
app.py CHANGED
@@ -1,111 +1,103 @@
1
  import streamlit as st
2
  import cv2
3
  import numpy as np
4
- import time
5
- import os
 
6
  from keras.models import load_model
7
  from PIL import Image
8
  import pymongo
9
- from datetime import datetime
10
  import tempfile
11
- from facenet_pytorch import MTCNN
12
 
13
  # MongoDB Atlas Connection String
14
  MONGO_URI = "mongodb+srv://test:[email protected]/?retryWrites=true&w=majority"
15
-
16
- # Connect to MongoDB
17
  client = pymongo.MongoClient(MONGO_URI)
18
  db = client.get_database("emotion_detection")
19
  collection = db.get_collection("face_data")
20
 
21
- # Larger title
22
  st.markdown("<h1 style='text-align: center;'>Emotion Detection with Face Recognition</h1>", unsafe_allow_html=True)
23
-
24
- # Smaller subtitle
25
  st.markdown("<h3 style='text-align: center;'>angry, fear, happy, neutral, sad, surprise</h3>", unsafe_allow_html=True)
26
 
27
- # Start time for measuring performance
28
- start = time.time()
29
-
30
  # Load the emotion detection model
31
  @st.cache_resource
32
  def load_emotion_model():
33
- model = load_model('CNN_Model_acc_75.h5') # Ensure this file is in your Space
34
  return model
35
 
36
- model = load_emotion_model()
37
- print("Time taken to load model: ", time.time() - start)
38
-
39
- # Emotion labels
40
  emotion_labels = ['angry', 'fear', 'happy', 'neutral', 'sad', 'surprise']
41
 
42
- # Initialize MTCNN for face detection
 
43
  mtcnn = MTCNN()
44
 
45
- # Load known faces and names
46
  known_faces = []
47
  known_names = []
48
- face_recognizer = cv2.face.LBPHFaceRecognizer_create()
49
 
50
  def load_known_faces():
51
  folder_path = "known_faces" # Folder containing known face images
52
  for image_name in os.listdir(folder_path):
53
- if image_name.endswith(('.jpg', '.jpeg', '.png')):
54
  image_path = os.path.join(folder_path, image_name)
55
- image = cv2.imread(image_path)
56
- gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
57
- # Detect face in the image using mtcnn
58
- faces, _ = mtcnn.detect(image) # Use the correct method detect()
59
-
60
- if faces is not None:
61
- for face in faces:
62
- # Convert bounding box coordinates to integers
63
- x, y, w, h = map(int, face) # Ensure the coordinates are integers
64
- roi_gray = gray[y:y+h, x:x+w]
65
- known_faces.append(roi_gray)
66
- known_names.append(image_name.split('.')[0]) # Assuming file name is the person's name
67
-
68
- # Train the recognizer with the known faces
69
- if known_faces: # Only train if we have faces
70
- face_recognizer.train(known_faces, np.array([i for i in range(len(known_faces))]))
71
 
72
  load_known_faces()
73
 
74
- # Process a single frame
75
- def process_frame(frame):
76
- gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
77
- faces, _ = mtcnn.detect(frame) # Use the correct detect method
78
-
79
- result_text = "" # Initialize result text
80
-
81
- if faces is not None and len(faces) > 0:
82
- for face in faces:
83
- # Convert bounding box coordinates to integers
84
- x, y, w, h = map(int, face) # Ensure the coordinates are integers
85
- roi_color = frame[y:y+h, x:x+w]
86
- roi_gray = gray[y:y+h, x:x+w]
87
-
88
- # Apply histogram equalization for better feature extraction
89
- roi_gray = cv2.equalizeHist(roi_gray)
90
 
91
- face_roi = cv2.resize(roi_color, (48, 48))
92
- face_roi = cv2.cvtColor(face_roi, cv2.COLOR_BGR2RGB)
93
- face_roi = np.expand_dims(face_roi, axis=0) / 255.0 # Normalize
94
-
95
- # Emotion detection
96
- predictions = model.predict(face_roi)
 
 
 
 
 
 
 
 
 
97
  emotion = emotion_labels[np.argmax(predictions[0])]
98
 
99
  # Face recognition
100
- name = "Unknown"
101
- label, confidence = face_recognizer.predict(roi_gray)
102
- if confidence < 100:
103
- name = known_names[label]
104
 
105
- # Format result text
106
- result_text = f"{name} is feeling {emotion}"
 
 
107
 
108
- # Save data to MongoDB if face is recognized (name != Unknown)
109
  if name != "Unknown":
110
  timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
111
  document = {
@@ -113,45 +105,35 @@ def process_frame(frame):
113
  "emotion": emotion,
114
  "timestamp": timestamp
115
  }
116
-
117
- # Check if the same record already exists (to prevent duplicates)
118
- existing_record = collection.find_one({"name": name, "timestamp": timestamp})
119
- if not existing_record:
120
- # Insert the data into MongoDB
121
  collection.insert_one(document)
122
- print(f"Data inserted into MongoDB: {document}")
123
-
124
- # Draw bounding box and label
125
- cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
126
- cv2.putText(frame, result_text, (x, y - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.9, (0, 255, 0), 2)
127
 
 
 
 
 
128
  else:
129
  result_text = "No face detected!"
130
 
131
  return frame, result_text
132
 
133
- # Video feed display
134
  def video_feed(video_source):
135
- frame_placeholder = st.empty() # Placeholder for displaying video frames
136
- text_placeholder = st.empty() # Placeholder for displaying result text
137
 
138
  while True:
139
  ret, frame = video_source.read()
140
  if not ret:
141
  break
142
-
143
  frame, result_text = process_frame(frame)
144
-
145
- # Display frame and result text
146
  frame_placeholder.image(frame, channels="BGR", use_column_width=True)
147
  text_placeholder.markdown(f"<h3 style='text-align: center;'>{result_text}</h3>", unsafe_allow_html=True)
148
 
149
- # Sidebar for user input source selection
150
  upload_choice = st.sidebar.radio("Choose Input Source", ["Upload Image", "Upload Video", "Camera"])
151
 
152
  if upload_choice == "Camera":
153
  image = st.camera_input("Take a picture")
154
-
155
  if image:
156
  frame = np.array(Image.open(image))
157
  frame, result_text = process_frame(frame)
@@ -159,8 +141,7 @@ if upload_choice == "Camera":
159
  st.markdown(f"<h3 style='text-align: center;'>{result_text}</h3>", unsafe_allow_html=True)
160
 
161
  elif upload_choice == "Upload Image":
162
- uploaded_image = st.file_uploader("Upload Image", type=["png", "jpg", "jpeg", "gif"])
163
-
164
  if uploaded_image:
165
  image = Image.open(uploaded_image)
166
  frame = np.array(image)
@@ -169,29 +150,18 @@ elif upload_choice == "Upload Image":
169
  st.markdown(f"<h3 style='text-align: center;'>{result_text}</h3>", unsafe_allow_html=True)
170
 
171
  elif upload_choice == "Upload Video":
172
- uploaded_video = st.file_uploader("Upload Video", type=["mp4", "mov", "avi", "mkv", "webm"])
173
-
174
  if uploaded_video:
175
  with tempfile.NamedTemporaryFile(delete=False) as tfile:
176
  tfile.write(uploaded_video.read())
177
  video_source = cv2.VideoCapture(tfile.name)
178
  video_feed(video_source)
179
 
180
- # Display the latest 5 records stored in MongoDB with timestamp and proper alignment
181
- st.markdown("### MongoDB Records")
182
- records = collection.find().sort("timestamp", -1).limit(5) # Limit to 5 records
183
-
184
  for record in records:
185
- col1, col2, col3 = st.columns([3, 3, 2])
186
- with col1:
187
- st.write(f"**Name**: {record['name']}")
188
- with col2:
189
- st.write(f"**Emotion**: {record['emotion']}")
190
- with col3:
191
- st.write(f"**Timestamp**: {record['timestamp']}")
192
-
193
- # Delete record
194
- delete_button = st.button("Delete", key=f"delete_{record['_id']}")
195
- if delete_button:
196
- collection.delete_one({"_id": record["_id"]})
197
- st.success(f"Record with ID {record['_id']} has been deleted.")
 
1
  import streamlit as st
2
  import cv2
3
  import numpy as np
4
+ from datetime import datetime
5
+ import torch
6
+ from facenet_pytorch import MTCNN, InceptionResnetV1
7
  from keras.models import load_model
8
  from PIL import Image
9
  import pymongo
10
+ import os
11
  import tempfile
 
12
 
13
  # MongoDB Atlas Connection String
14
  MONGO_URI = "mongodb+srv://test:[email protected]/?retryWrites=true&w=majority"
 
 
15
  client = pymongo.MongoClient(MONGO_URI)
16
  db = client.get_database("emotion_detection")
17
  collection = db.get_collection("face_data")
18
 
19
+ # Page title
20
  st.markdown("<h1 style='text-align: center;'>Emotion Detection with Face Recognition</h1>", unsafe_allow_html=True)
 
 
21
  st.markdown("<h3 style='text-align: center;'>angry, fear, happy, neutral, sad, surprise</h3>", unsafe_allow_html=True)
22
 
 
 
 
23
  # Load the emotion detection model
24
  @st.cache_resource
25
  def load_emotion_model():
26
+ model = load_model('CNN_Model_acc_75.h5') # Ensure this file is present
27
  return model
28
 
29
+ emotion_model = load_emotion_model()
 
 
 
30
  emotion_labels = ['angry', 'fear', 'happy', 'neutral', 'sad', 'surprise']
31
 
32
+ # Initialize FaceNet model and MTCNN
33
+ facenet = InceptionResnetV1(pretrained='vggface2').eval()
34
  mtcnn = MTCNN()
35
 
36
+ # Known faces and embeddings
37
  known_faces = []
38
  known_names = []
 
39
 
40
  def load_known_faces():
41
  folder_path = "known_faces" # Folder containing known face images
42
  for image_name in os.listdir(folder_path):
43
+ if image_name.endswith(('.jpg', '.jpeg', '.png')):
44
  image_path = os.path.join(folder_path, image_name)
45
+ image = Image.open(image_path).convert("RGB")
46
+ face, _ = mtcnn.detect(image)
47
+
48
+ if face is not None:
49
+ face_box = face[0].astype(int)
50
+ cropped_face = image.crop((face_box[0], face_box[1], face_box[2], face_box[3]))
51
+ cropped_face = cropped_face.resize((160, 160))
52
+ face_tensor = np.array(cropped_face).transpose(2, 0, 1) / 255.0
53
+ face_tensor = torch.tensor(face_tensor, dtype=torch.float32).unsqueeze(0)
54
+
55
+ with torch.no_grad():
56
+ embedding = facenet(face_tensor).numpy()
57
+
58
+ known_faces.append(embedding)
59
+ known_names.append(image_name.split('.')[0])
 
60
 
61
  load_known_faces()
62
 
63
+ def recognize_face(embedding):
64
+ min_distance = float('inf')
65
+ name = "Unknown"
66
+ for idx, known_embedding in enumerate(known_faces):
67
+ distance = np.linalg.norm(known_embedding - embedding)
68
+ if distance < min_distance and distance < 0.6: # Threshold for similarity
69
+ min_distance = distance
70
+ name = known_names[idx]
71
+ return name
 
 
 
 
 
 
 
72
 
73
+ def process_frame(frame):
74
+ frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
75
+ faces, _ = mtcnn.detect(frame_rgb)
76
+ result_text = ""
77
+
78
+ if faces is not None:
79
+ for face_box in faces:
80
+ x1, y1, x2, y2 = map(int, face_box)
81
+ cropped_face = frame_rgb[y1:y2, x1:x2]
82
+ resized_face = cv2.resize(cropped_face, (48, 48))
83
+ face_normalized = resized_face / 255.0
84
+ face_array = np.expand_dims(face_normalized, axis=0)
85
+
86
+ # Emotion prediction
87
+ predictions = emotion_model.predict(face_array)
88
  emotion = emotion_labels[np.argmax(predictions[0])]
89
 
90
  # Face recognition
91
+ cropped_face_for_recognition = cv2.resize(cropped_face, (160, 160))
92
+ face_tensor = np.array(cropped_face_for_recognition).transpose(2, 0, 1) / 255.0
93
+ face_tensor = torch.tensor(face_tensor, dtype=torch.float32).unsqueeze(0)
 
94
 
95
+ with torch.no_grad():
96
+ face_embedding = facenet(face_tensor).numpy()
97
+
98
+ name = recognize_face(face_embedding)
99
 
100
+ # Save record in MongoDB
101
  if name != "Unknown":
102
  timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
103
  document = {
 
105
  "emotion": emotion,
106
  "timestamp": timestamp
107
  }
108
+ if not collection.find_one({"name": name, "timestamp": timestamp}):
 
 
 
 
109
  collection.insert_one(document)
 
 
 
 
 
110
 
111
+ # Display result
112
+ cv2.rectangle(frame, (x1, y1), (x2, y2), (0, 255, 0), 2)
113
+ result_text = f"{name} is feeling {emotion}"
114
+ cv2.putText(frame, result_text, (x1, y1 - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.9, (0, 255, 0), 2)
115
  else:
116
  result_text = "No face detected!"
117
 
118
  return frame, result_text
119
 
 
120
  def video_feed(video_source):
121
+ frame_placeholder = st.empty()
122
+ text_placeholder = st.empty()
123
 
124
  while True:
125
  ret, frame = video_source.read()
126
  if not ret:
127
  break
 
128
  frame, result_text = process_frame(frame)
 
 
129
  frame_placeholder.image(frame, channels="BGR", use_column_width=True)
130
  text_placeholder.markdown(f"<h3 style='text-align: center;'>{result_text}</h3>", unsafe_allow_html=True)
131
 
132
+ # Sidebar for input source
133
  upload_choice = st.sidebar.radio("Choose Input Source", ["Upload Image", "Upload Video", "Camera"])
134
 
135
  if upload_choice == "Camera":
136
  image = st.camera_input("Take a picture")
 
137
  if image:
138
  frame = np.array(Image.open(image))
139
  frame, result_text = process_frame(frame)
 
141
  st.markdown(f"<h3 style='text-align: center;'>{result_text}</h3>", unsafe_allow_html=True)
142
 
143
  elif upload_choice == "Upload Image":
144
+ uploaded_image = st.file_uploader("Upload Image", type=["png", "jpg", "jpeg"])
 
145
  if uploaded_image:
146
  image = Image.open(uploaded_image)
147
  frame = np.array(image)
 
150
  st.markdown(f"<h3 style='text-align: center;'>{result_text}</h3>", unsafe_allow_html=True)
151
 
152
  elif upload_choice == "Upload Video":
153
+ uploaded_video = st.file_uploader("Upload Video", type=["mp4", "mov", "avi"])
 
154
  if uploaded_video:
155
  with tempfile.NamedTemporaryFile(delete=False) as tfile:
156
  tfile.write(uploaded_video.read())
157
  video_source = cv2.VideoCapture(tfile.name)
158
  video_feed(video_source)
159
 
160
+ # Display recent MongoDB records
161
+ st.markdown("### Recent Records")
162
+ records = collection.find().sort("timestamp", -1).limit(5)
 
163
  for record in records:
164
+ col1, col2, col3 = st.columns(3)
165
+ col1.write(f"**Name**: {record['name']}")
166
+ col2.write(f"**Emotion**: {record['emotion']}")
167
+ col3.write(f"**Timestamp**: {record['timestamp']}")