LovnishVerma commited on
Commit
4ebde4b
·
verified ·
1 Parent(s): 89d4bd2

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +69 -172
app.py CHANGED
@@ -1,177 +1,74 @@
1
- import streamlit as st
2
  import cv2
3
  import numpy as np
4
- import time
5
- import os
6
- import requests
7
- from keras.models import load_model
8
- from PIL import Image
9
  import face_recognition
10
- import pymongo
11
- from datetime import datetime
12
- import tempfile
13
-
14
- # MongoDB Atlas Connection String
15
- MONGO_URI = "mongodb+srv://test:[email protected]/?retryWrites=true&w=majority"
16
-
17
- # Connect to MongoDB
18
- client = pymongo.MongoClient(MONGO_URI)
19
- db = client.get_database("emotion_detection")
20
- collection = db.get_collection("face_data")
21
-
22
- # Larger title
23
- st.markdown("<h1 style='text-align: center;'>Emotion Detection with Face Recognition</h1>", unsafe_allow_html=True)
24
-
25
- # Smaller subtitle
26
- st.markdown("<h3 style='text-align: center;'>angry, fear, happy, neutral, sad, surprise</h3>", unsafe_allow_html=True)
27
-
28
- # Start time for measuring performance
29
- start = time.time()
30
-
31
- # Load the emotion detection model
32
- @st.cache_resource
33
- def load_emotion_model():
34
- model = load_model('CNN_Model_acc_75.h5') # Ensure this file is in your Space
35
- return model
36
-
37
- model = load_emotion_model()
38
- st.write("Time taken to load model: ", time.time() - start)
39
-
40
- # Emotion labels
41
- emotion_labels = ['angry', 'fear', 'happy', 'neutral', 'sad', 'surprise']
42
-
43
- # Load known faces and names
44
- known_faces = []
45
- known_names = []
46
-
47
- def load_known_faces():
48
- folder_path = "known_faces" # Folder containing known face images
49
- for image_name in os.listdir(folder_path):
50
- if image_name.endswith(('.jpg', '.jpeg', '.png')):
51
- image_path = os.path.join(folder_path, image_name)
52
- image = face_recognition.load_image_file(image_path)
53
- encoding = face_recognition.face_encodings(image)
54
-
55
- if encoding: # Ensure encoding was found
56
- known_faces.append(encoding[0]) # Store the first encoding (assumes 1 face per image)
57
- known_names.append(image_name.split('.')[0]) # Assuming file name is the person's name
58
-
59
- load_known_faces()
60
-
61
- # Process a single frame
62
- def process_frame(frame):
63
- # Convert the image to RGB for face_recognition
64
- rgb_frame = frame[:, :, ::-1]
65
-
66
- # Detect faces
67
- face_locations = face_recognition.face_locations(rgb_frame)
68
- face_encodings = face_recognition.face_encodings(rgb_frame, face_locations)
69
-
70
- result_text = "" # Initialize result text
71
-
72
- if face_encodings:
73
- for (top, right, bottom, left), face_encoding in zip(face_locations, face_encodings):
74
- # Check if the detected face matches any known faces
75
- matches = face_recognition.compare_faces(known_faces, face_encoding)
76
- name = "Unknown"
77
-
78
- # Find the name of the recognized face
79
- if True in matches:
80
- first_match_index = matches.index(True)
81
- name = known_names[first_match_index]
82
-
83
- # Emotion detection
84
- face_roi = frame[top:bottom, left:right]
85
- face_roi = cv2.cvtColor(face_roi, cv2.COLOR_BGR2RGB)
86
- face_roi = cv2.resize(face_roi, (48, 48))
87
- face_roi = np.expand_dims(face_roi, axis=0) / 255.0 # Normalize
88
-
89
- predictions = model.predict(face_roi)
90
- emotion = emotion_labels[np.argmax(predictions[0])]
91
-
92
- # Format result text
93
- result_text = f"{name} is feeling {emotion}"
94
-
95
- # Save data to MongoDB if face is recognized (name != Unknown)
96
- if name != "Unknown":
97
- timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
98
- document = {
99
- "name": name,
100
- "emotion": emotion,
101
- "timestamp": timestamp
102
- }
103
- # Insert the data into MongoDB
104
- collection.insert_one(document)
105
- print(f"Data inserted into MongoDB: {document}")
106
-
107
- # Draw bounding box and label
108
- cv2.rectangle(frame, (left, top), (right, bottom), (0, 255, 0), 2)
109
- cv2.putText(frame, result_text, (left, top - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.9, (0, 255, 0), 2)
110
-
111
- return frame, result_text
112
-
113
- # Video feed display
114
- def video_feed(video_source):
115
- frame_placeholder = st.empty() # Placeholder for displaying video frames
116
- text_placeholder = st.empty() # Placeholder for displaying result text
117
-
118
- while True:
119
- ret, frame = video_source.read()
120
- if not ret:
121
- break
122
-
123
- frame, result_text = process_frame(frame)
124
-
125
- # Display frame and result text
126
- frame_placeholder.image(frame, channels="BGR", use_column_width=True)
127
- text_placeholder.markdown(f"<h3 style='text-align: center;'>{result_text}</h3>", unsafe_allow_html=True)
128
-
129
- # Sidebar for user input source selection
130
- upload_choice = st.sidebar.radio("Choose Input Source", ["Upload Image", "Upload Video", "Camera"])
131
-
132
- if upload_choice == "Camera":
133
- image = st.camera_input("Take a picture")
134
-
135
- if image:
136
- frame = np.array(Image.open(image))
137
- frame, result_text = process_frame(frame)
138
- st.image(frame, caption='Processed Image', use_column_width=True)
139
- st.markdown(f"<h3 style='text-align: center;'>{result_text}</h3>", unsafe_allow_html=True)
140
-
141
- elif upload_choice == "Upload Image":
142
- uploaded_image = st.file_uploader("Upload Image", type=["png", "jpg", "jpeg", "gif"])
143
-
144
- if uploaded_image:
145
- image = Image.open(uploaded_image)
146
- frame = np.array(image)
147
- frame, result_text = process_frame(frame)
148
- st.image(frame, caption='Processed Image', use_column_width=True)
149
- st.markdown(f"<h3 style='text-align: center;'>{result_text}</h3>", unsafe_allow_html=True)
150
-
151
- elif upload_choice == "Upload Video":
152
- uploaded_video = st.file_uploader("Upload Video", type=["mp4", "mov", "avi", "mkv", "webm"])
153
-
154
- if uploaded_video:
155
- with tempfile.NamedTemporaryFile(delete=False) as tfile:
156
- tfile.write(uploaded_video.read())
157
- video_source = cv2.VideoCapture(tfile.name)
158
- video_feed(video_source)
159
-
160
- # Display the records stored in MongoDB with latest records on top
161
- st.markdown("### MongoDB Records")
162
- records = collection.find().sort("timestamp", -1) # Sort records by timestamp in descending order
163
 
164
- for record in records:
165
- col1, col2, col3 = st.columns([3, 3, 1])
166
- with col1:
167
- st.write(f"**Name**: {record['name']}")
168
- with col2:
169
- st.write(f"**Emotion**: {record['emotion']}")
170
- with col3:
171
- st.write(f"**Timestamp**: {record['timestamp']}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
172
 
173
- # Delete record button
174
- delete_button = st.button(f"Delete {record['_id']}", key=record['_id'])
175
- if delete_button:
176
- collection.delete_one({"_id": record["_id"]})
177
- st.success(f"Record with ID {record['_id']} has been deleted.")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
  import cv2
3
  import numpy as np
 
 
 
 
 
4
  import face_recognition
5
+ import streamlit as st
6
+ from PIL import Image
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7
 
8
+ # List for storing images and class names
9
+ Images = []
10
+ classnames = []
11
+
12
+ # Directory where known faces are stored
13
+ directory = "known_faces" # Directory containing images of known faces
14
+
15
+ # Load images and classnames from the directory
16
+ myList = os.listdir(directory)
17
+
18
+ for cls in myList:
19
+ if os.path.splitext(cls)[1] in [".jpg", ".jpeg"]:
20
+ img_path = os.path.join(directory, cls)
21
+ curImg = cv2.imread(img_path)
22
+ Images.append(curImg)
23
+ classnames.append(os.path.splitext(cls)[0])
24
+
25
+ # Function to find face encodings
26
+ def findEncodings(Images):
27
+ encodeList = []
28
+ for img in Images:
29
+ img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
30
+ encode = face_recognition.face_encodings(img)[0]
31
+ encodeList.append(encode)
32
+ return encodeList
33
+
34
+ # Find encodings for known faces
35
+ encodeListknown = findEncodings(Images)
36
+
37
+ # Streamlit UI for capturing image using the camera
38
+ img_file_buffer = st.camera_input("Take a picture")
39
+
40
+ if img_file_buffer is not None:
41
+ # Open the captured image using PIL
42
+ test_image = Image.open(img_file_buffer)
43
+ image = np.asarray(test_image)
44
+
45
+ # Resize and convert image for face recognition
46
+ imgS = cv2.resize(image, (0, 0), None, 0.25, 0.25) # Resize for faster processing
47
+ imgS = cv2.cvtColor(imgS, cv2.COLOR_BGR2RGB)
48
 
49
+ # Find faces in the current frame
50
+ facesCurFrame = face_recognition.face_locations(imgS)
51
+ encodesCurFrame = face_recognition.face_encodings(imgS, facesCurFrame)
52
+
53
+ name = "Unknown" # Default name for unknown faces
54
+
55
+ if len(encodesCurFrame) > 0:
56
+ for encodeFace, faceLoc in zip(encodesCurFrame, facesCurFrame):
57
+ # Compare the face with known faces
58
+ matches = face_recognition.compare_faces(encodeListknown, encodeFace)
59
+ faceDis = face_recognition.face_distance(encodeListknown, encodeFace)
60
+ matchIndex = np.argmin(faceDis) # Find the closest match
61
+
62
+ if matches[matchIndex]:
63
+ name = classnames[matchIndex].upper() # Assign name if match is found
64
+
65
+ # Draw a rectangle around the face and label it with the name
66
+ y1, x2, y2, x1 = faceLoc
67
+ y1, x2, y2, x1 = y1 * 4, x2 * 4, y2 * 4, x1 * 4 # Scale coordinates back
68
+ cv2.rectangle(image, (x1, y1), (x2, y2), (0, 255, 0), 2)
69
+ cv2.rectangle(image, (x1, y2 - 35), (x2, y2), (0, 255, 0), cv2.FILLED)
70
+ cv2.putText(image, name, (x1 + 6, y2 - 6), cv2.FONT_HERSHEY_COMPLEX, 1, (255, 255, 255), 2)
71
+
72
+ # Display the image with the bounding box and label
73
+ st.image(image, caption='Processed Image', use_column_width=True)
74
+ st.markdown(f"<h3 style='text-align: center;'>{name} is recognized</h3>", unsafe_allow_html=True)