LovnishVerma commited on
Commit
f837deb
·
verified ·
1 Parent(s): 060878a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +176 -69
app.py CHANGED
@@ -1,74 +1,181 @@
1
- import os
2
  import cv2
3
  import numpy as np
4
- import face_recognition
5
- import streamlit as st
 
6
  from PIL import Image
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7
 
8
- # List for storing images and class names
9
- Images = []
10
- classnames = []
11
-
12
- # Directory where known faces are stored
13
- directory = "known_faces" # Directory containing images of known faces
14
-
15
- # Load images and classnames from the directory
16
- myList = os.listdir(directory)
17
-
18
- for cls in myList:
19
- if os.path.splitext(cls)[1] in [".jpg", ".jpeg"]:
20
- img_path = os.path.join(directory, cls)
21
- curImg = cv2.imread(img_path)
22
- Images.append(curImg)
23
- classnames.append(os.path.splitext(cls)[0])
24
-
25
- # Function to find face encodings
26
- def findEncodings(Images):
27
- encodeList = []
28
- for img in Images:
29
- img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
30
- encode = face_recognition.face_encodings(img)[0]
31
- encodeList.append(encode)
32
- return encodeList
33
-
34
- # Find encodings for known faces
35
- encodeListknown = findEncodings(Images)
36
-
37
- # Streamlit UI for capturing image using the camera
38
- img_file_buffer = st.camera_input("Take a picture")
39
-
40
- if img_file_buffer is not None:
41
- # Open the captured image using PIL
42
- test_image = Image.open(img_file_buffer)
43
- image = np.asarray(test_image)
44
-
45
- # Resize and convert image for face recognition
46
- imgS = cv2.resize(image, (0, 0), None, 0.25, 0.25) # Resize for faster processing
47
- imgS = cv2.cvtColor(imgS, cv2.COLOR_BGR2RGB)
48
 
49
- # Find faces in the current frame
50
- facesCurFrame = face_recognition.face_locations(imgS)
51
- encodesCurFrame = face_recognition.face_encodings(imgS, facesCurFrame)
52
-
53
- name = "Unknown" # Default name for unknown faces
54
-
55
- if len(encodesCurFrame) > 0:
56
- for encodeFace, faceLoc in zip(encodesCurFrame, facesCurFrame):
57
- # Compare the face with known faces
58
- matches = face_recognition.compare_faces(encodeListknown, encodeFace)
59
- faceDis = face_recognition.face_distance(encodeListknown, encodeFace)
60
- matchIndex = np.argmin(faceDis) # Find the closest match
61
-
62
- if matches[matchIndex]:
63
- name = classnames[matchIndex].upper() # Assign name if match is found
64
-
65
- # Draw a rectangle around the face and label it with the name
66
- y1, x2, y2, x1 = faceLoc
67
- y1, x2, y2, x1 = y1 * 4, x2 * 4, y2 * 4, x1 * 4 # Scale coordinates back
68
- cv2.rectangle(image, (x1, y1), (x2, y2), (0, 255, 0), 2)
69
- cv2.rectangle(image, (x1, y2 - 35), (x2, y2), (0, 255, 0), cv2.FILLED)
70
- cv2.putText(image, name, (x1 + 6, y2 - 6), cv2.FONT_HERSHEY_COMPLEX, 1, (255, 255, 255), 2)
71
-
72
- # Display the image with the bounding box and label
73
- st.image(image, caption='Processed Image', use_column_width=True)
74
- st.markdown(f"<h3 style='text-align: center;'>{name} is recognized</h3>", unsafe_allow_html=True)
 
1
+ import streamlit as st
2
  import cv2
3
  import numpy as np
4
+ import time
5
+ import os
6
+ from keras.models import load_model
7
  from PIL import Image
8
+ from mtcnn import MTCNN # MTCNN for better face detection
9
+ from keras.preprocessing import image
10
+ from tensorflow.keras.applications.inception_v3 import preprocess_input
11
+ import pymongo
12
+ from datetime import datetime
13
+ import tempfile
14
+ from facenet_pytorch import MTCNN, InceptionResnetV1 # FaceNet model for face recognition
15
+
16
+ # MongoDB Atlas Connection String
17
+ MONGO_URI = "mongodb+srv://test:[email protected]/?retryWrites=true&w=majority"
18
+
19
+ # Connect to MongoDB
20
+ client = pymongo.MongoClient(MONGO_URI)
21
+ db = client.get_database("emotion_detection")
22
+ collection = db.get_collection("face_data")
23
+
24
+ # Larger title
25
+ st.markdown("<h1 style='text-align: center;'>Emotion Detection with Face Recognition</h1>", unsafe_allow_html=True)
26
+
27
+ # Smaller subtitle
28
+ st.markdown("<h3 style='text-align: center;'>angry, fear, happy, neutral, sad, surprise</h3>", unsafe_allow_html=True)
29
+
30
+ # Start time for measuring performance
31
+ start = time.time()
32
+
33
+ # Load the emotion detection model
34
+ @st.cache_resource
35
+ def load_emotion_model():
36
+ model = load_model('CNN_Model_acc_75.h5') # Ensure this file is in your Space
37
+ return model
38
+
39
+ model = load_emotion_model()
40
+ print("Time taken to load model: ", time.time() - start)
41
+
42
+ # Emotion labels
43
+ emotion_labels = ['angry', 'fear', 'happy', 'neutral', 'sad', 'surprise']
44
+
45
+ # Load FaceNet Model for Face Recognition
46
+ @st.cache_resource
47
+ def load_facenet_model():
48
+ # Load FaceNet model for face recognition
49
+ facenet_model = InceptionResnetV1(pretrained='vggface2').eval()
50
+ return facenet_model
51
+
52
+ facenet_model = load_facenet_model()
53
+
54
+ # MTCNN for face detection
55
+ detector = MTCNN()
56
+
57
+ # Process a single frame
58
+ def process_frame(frame):
59
+ # Detect faces
60
+ faces = detector.detect_faces(frame)
61
+
62
+ result_text = "" # Initialize result text
63
+
64
+ if len(faces) > 0:
65
+ for face in faces:
66
+ x, y, w, h = face['box']
67
+ roi_color = frame[y:y+h, x:x+w]
68
+
69
+ # Apply histogram equalization for better feature extraction
70
+ roi_gray = cv2.cvtColor(roi_color, cv2.COLOR_BGR2GRAY)
71
+ roi_gray = cv2.equalizeHist(roi_gray)
72
+
73
+ # Emotion detection
74
+ face_roi = cv2.resize(roi_color, (48, 48))
75
+ face_roi = cv2.cvtColor(face_roi, cv2.COLOR_BGR2RGB)
76
+ face_roi = np.expand_dims(face_roi, axis=0) / 255.0 # Normalize
77
+
78
+ predictions = model.predict(face_roi)
79
+ emotion = emotion_labels[np.argmax(predictions[0])]
80
+
81
+ # Extract embedding for face recognition using FaceNet
82
+ face_embedding = facenet_model.predict(preprocess_input(np.expand_dims(face['keypoints'], axis=0)))
83
+
84
+ # Compare face embeddings with known faces (replace with your own database)
85
+ known_faces = [] # Load known face embeddings from database
86
+ known_names = [] # Corresponding names
87
+
88
+ name = "Unknown"
89
+ min_distance = float('inf')
90
+
91
+ for i, known_face in enumerate(known_faces):
92
+ distance = np.linalg.norm(face_embedding - known_face)
93
+ if distance < min_distance:
94
+ min_distance = distance
95
+ name = known_names[i]
96
+
97
+ # Save data to MongoDB if face is recognized (name != Unknown)
98
+ if name != "Unknown":
99
+ timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
100
+ document = {
101
+ "name": name,
102
+ "emotion": emotion,
103
+ "timestamp": timestamp
104
+ }
105
+ # Insert the data into MongoDB
106
+ collection.insert_one(document)
107
+ print(f"Data inserted into MongoDB: {document}")
108
+
109
+ result_text = f"{name} is feeling {emotion}"
110
+
111
+ # Draw bounding box and label
112
+ cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
113
+ cv2.putText(frame, result_text, (x, y - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.9, (0, 255, 0), 2)
114
+
115
+ return frame, result_text
116
+
117
+ # Video feed display
118
+ def video_feed(video_source):
119
+ frame_placeholder = st.empty() # Placeholder for displaying video frames
120
+ text_placeholder = st.empty() # Placeholder for displaying result text
121
+
122
+ while True:
123
+ ret, frame = video_source.read()
124
+ if not ret:
125
+ break
126
+
127
+ frame, result_text = process_frame(frame)
128
+
129
+ # Display frame and result text
130
+ frame_placeholder.image(frame, channels="BGR", use_column_width=True)
131
+ text_placeholder.markdown(f"<h3 style='text-align: center;'>{result_text}</h3>", unsafe_allow_html=True)
132
+
133
+ # Sidebar for user input source selection
134
+ upload_choice = st.sidebar.radio("Choose Input Source", ["Upload Image", "Upload Video", "Camera"])
135
+
136
+ if upload_choice == "Camera":
137
+ image = st.camera_input("Take a picture")
138
+
139
+ if image:
140
+ frame = np.array(Image.open(image))
141
+ frame, result_text = process_frame(frame)
142
+ st.image(frame, caption='Processed Image', use_column_width=True)
143
+ st.markdown(f"<h3 style='text-align: center;'>{result_text}</h3>", unsafe_allow_html=True)
144
+
145
+ elif upload_choice == "Upload Image":
146
+ uploaded_image = st.file_uploader("Upload Image", type=["png", "jpg", "jpeg", "gif"])
147
+
148
+ if uploaded_image:
149
+ image = Image.open(uploaded_image)
150
+ frame = np.array(image)
151
+ frame, result_text = process_frame(frame)
152
+ st.image(frame, caption='Processed Image', use_column_width=True)
153
+ st.markdown(f"<h3 style='text-align: center;'>{result_text}</h3>", unsafe_allow_html=True)
154
+
155
+ elif upload_choice == "Upload Video":
156
+ uploaded_video = st.file_uploader("Upload Video", type=["mp4", "mov", "avi", "mkv", "webm"])
157
+
158
+ if uploaded_video:
159
+ with tempfile.NamedTemporaryFile(delete=False) as tfile:
160
+ tfile.write(uploaded_video.read())
161
+ video_source = cv2.VideoCapture(tfile.name)
162
+ video_feed(video_source)
163
+
164
+ # Display the records stored in MongoDB with latest records on top
165
+ st.markdown("### MongoDB Records")
166
+ records = collection.find().sort("timestamp", -1) # Sort records by timestamp in descending order
167
 
168
+ for record in records:
169
+ col1, col2, col3 = st.columns([3, 3, 1])
170
+ with col1:
171
+ st.write(f"**Name**: {record['name']}")
172
+ with col2:
173
+ st.write(f"**Emotion**: {record['emotion']}")
174
+ with col3:
175
+ st.write(f"**Timestamp**: {record['timestamp']}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
176
 
177
+ # Delete record button
178
+ delete_button = st.button(f"Delete {record['_id']}", key=record['_id'])
179
+ if delete_button:
180
+ collection.delete_one({"_id": record["_id"]})
181
+ st.success(f"Record with ID {record['_id']} has been deleted.")