LovnishVerma commited on
Commit
123d6a8
·
verified ·
1 Parent(s): 74a8091

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +40 -159
app.py CHANGED
@@ -1,19 +1,15 @@
1
  import streamlit as st
 
2
  import cv2
3
  import numpy as np
4
  from datetime import datetime
5
- import torch
6
- from facenet_pytorch import MTCNN, InceptionResnetV1
7
  from keras.models import load_model
8
- from PIL import Image
9
  import sqlite3
10
  import os
11
- import tempfile
12
 
13
- # SQLite Database
14
  DB_NAME = "emotion_detection.db"
15
 
16
- # Initialize SQLite Database
17
  def initialize_database():
18
  conn = sqlite3.connect(DB_NAME)
19
  cursor = conn.cursor()
@@ -30,173 +26,58 @@ def initialize_database():
30
 
31
  initialize_database()
32
 
33
- # Load the emotion detection model
34
  @st.cache_resource
35
  def load_emotion_model():
36
- model = load_model('CNN_Model_acc_75.h5') # Ensure the file exists
37
- return model
38
 
39
  emotion_model = load_emotion_model()
40
  emotion_labels = ['angry', 'fear', 'happy', 'neutral', 'sad', 'surprise']
41
 
42
- # Initialize FaceNet model and MTCNN
43
- facenet = InceptionResnetV1(pretrained='vggface2').eval()
44
- mtcnn = MTCNN()
45
-
46
- # Directory for known faces
47
- KNOWN_FACES_DIR = "known_faces"
48
- if not os.path.exists(KNOWN_FACES_DIR):
49
- os.makedirs(KNOWN_FACES_DIR)
50
-
51
- # Load known faces and embeddings
52
- known_faces = []
53
- known_names = []
54
-
55
- def load_known_faces():
56
- for image_name in os.listdir(KNOWN_FACES_DIR):
57
- if image_name.endswith(('.jpg', '.jpeg', '.png')):
58
- image_path = os.path.join(KNOWN_FACES_DIR, image_name)
59
- image = Image.open(image_path).convert("RGB")
60
- face, _ = mtcnn.detect(image)
61
-
62
- if face is not None:
63
- face_box = face[0].astype(int)
64
- cropped_face = image.crop((face_box[0], face_box[1], face_box[2], face_box[3]))
65
- cropped_face = cropped_face.resize((160, 160))
66
- face_tensor = np.array(cropped_face).transpose(2, 0, 1) / 255.0
67
- face_tensor = torch.tensor(face_tensor, dtype=torch.float32).unsqueeze(0)
68
-
69
- with torch.no_grad():
70
- embedding = facenet(face_tensor).numpy()
71
-
72
- known_faces.append(embedding)
73
- known_names.append(image_name.split('.')[0])
74
-
75
- load_known_faces()
76
-
77
- def recognize_face(embedding):
78
- min_distance = float('inf')
79
- name = "Unknown"
80
- for idx, known_embedding in enumerate(known_faces):
81
- distance = np.linalg.norm(known_embedding - embedding)
82
- if distance < min_distance and distance < 0.6: # Threshold
83
- min_distance = distance
84
- name = known_names[idx]
85
- return name
86
-
87
- def process_frame(frame):
88
- frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
89
- faces, _ = mtcnn.detect(frame_rgb)
90
- result_text = ""
91
-
92
- if faces is not None:
93
- for face_box in faces:
94
- x1, y1, x2, y2 = map(int, face_box)
95
- cropped_face = frame_rgb[y1:y2, x1:x2]
96
- resized_face = cv2.resize(cropped_face, (48, 48))
97
- face_normalized = resized_face / 255.0
98
- face_array = np.expand_dims(face_normalized, axis=0)
99
-
100
- # Emotion prediction
101
- predictions = emotion_model.predict(face_array)
102
- emotion = emotion_labels[np.argmax(predictions[0])]
103
-
104
- # Face recognition
105
- cropped_face_for_recognition = cv2.resize(cropped_face, (160, 160))
106
- face_tensor = np.array(cropped_face_for_recognition).transpose(2, 0, 1) / 255.0
107
- face_tensor = torch.tensor(face_tensor, dtype=torch.float32).unsqueeze(0)
108
-
109
- with torch.no_grad():
110
- face_embedding = facenet(face_tensor).numpy()
111
-
112
- name = recognize_face(face_embedding)
113
-
114
- # Save record in SQLite
115
- if name != "Unknown":
116
- timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
117
- conn = sqlite3.connect(DB_NAME)
118
- cursor = conn.cursor()
119
- cursor.execute("""
120
- INSERT INTO face_data (name, emotion, timestamp)
121
- VALUES (?, ?, ?)
122
- """, (name, emotion, timestamp))
123
- conn.commit()
124
- conn.close()
125
-
126
- # Display result
127
- cv2.rectangle(frame, (x1, y1), (x2, y2), (0, 255, 0), 2)
128
- result_text = f"{name} is feeling {emotion}"
129
- cv2.putText(frame, result_text, (x1, y1 - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.9, (0, 255, 0), 2)
130
- else:
131
- result_text = "No face detected!"
132
-
133
- return frame, result_text
134
 
135
  # Sidebar menu
136
- menu = st.sidebar.selectbox("Menu", ["Home", "Register New Face", "View Records"])
137
 
138
  if menu == "Home":
139
- st.title("Emotion Detection")
140
- st.write("Choose input source to start detection.")
141
- upload_choice = st.radio("Choose Input Source", ["Camera", "Upload Image", "Upload Video"])
142
-
143
- if upload_choice == "Camera":
144
- cap = cv2.VideoCapture(0)
145
- if not cap.isOpened():
146
- st.error("Unable to access the camera.")
147
- else:
148
- while True:
149
- ret, frame = cap.read()
150
- if not ret:
151
- break
152
- frame, result_text = process_frame(frame)
153
- st.image(frame, channels="BGR")
154
- st.write(result_text)
155
- cap.release()
156
-
157
- elif upload_choice == "Upload Image":
158
- uploaded_image = st.file_uploader("Upload Image", type=["png", "jpg", "jpeg"])
159
- if uploaded_image:
160
- image = Image.open(uploaded_image)
161
- frame = np.array(image)
162
- frame, result_text = process_frame(frame)
163
- st.image(frame)
164
- st.write(result_text)
165
-
166
- elif upload_choice == "Upload Video":
167
- uploaded_video = st.file_uploader("Upload Video", type=["mp4", "mov", "avi"])
168
- if uploaded_video:
169
- with tempfile.NamedTemporaryFile(delete=False) as temp_file:
170
- temp_file.write(uploaded_video.read())
171
- video_source = cv2.VideoCapture(temp_file.name)
172
- while video_source.isOpened():
173
- ret, frame = video_source.read()
174
- if not ret:
175
- break
176
- frame, result_text = process_frame(frame)
177
- st.image(frame, channels="BGR")
178
- st.write(result_text)
179
- video_source.release()
180
-
181
- elif menu == "Register New Face":
182
- st.title("Register New Face")
183
- name = st.text_input("Enter Name")
184
- if st.button("Capture Image"):
185
- cap = cv2.VideoCapture(0)
186
- if not cap.isOpened():
187
- st.error("Unable to access the camera.")
188
- else:
189
- ret, frame = cap.read()
190
- if ret:
191
- image_path = os.path.join(KNOWN_FACES_DIR, f"{name}.jpg")
192
- cv2.imwrite(image_path, frame)
193
- st.success(f"Face registered successfully for {name}")
194
- load_known_faces() # Refresh known faces
195
- cap.release()
196
 
197
  elif menu == "View Records":
198
  st.title("View Records")
199
  st.subheader("Recent Records")
 
200
  conn = sqlite3.connect(DB_NAME)
201
  cursor = conn.cursor()
202
  cursor.execute("SELECT name, emotion, timestamp FROM face_data ORDER BY timestamp DESC LIMIT 5")
 
1
  import streamlit as st
2
+ from streamlit_webrtc import webrtc_streamer, VideoTransformerBase, VideoFrame
3
  import cv2
4
  import numpy as np
5
  from datetime import datetime
 
 
6
  from keras.models import load_model
 
7
  import sqlite3
8
  import os
 
9
 
10
+ # Database Initialization
11
  DB_NAME = "emotion_detection.db"
12
 
 
13
  def initialize_database():
14
  conn = sqlite3.connect(DB_NAME)
15
  cursor = conn.cursor()
 
26
 
27
  initialize_database()
28
 
29
+ # Load emotion detection model
30
  @st.cache_resource
31
  def load_emotion_model():
32
+ return load_model('CNN_Model_acc_75.h5')
 
33
 
34
  emotion_model = load_emotion_model()
35
  emotion_labels = ['angry', 'fear', 'happy', 'neutral', 'sad', 'surprise']
36
 
37
+ # Video Transformer for Streamlit WebRTC
38
+ class EmotionDetector(VideoTransformerBase):
39
+ def __init__(self):
40
+ self.model = emotion_model
41
+
42
+ def transform(self, frame: VideoFrame) -> VideoFrame:
43
+ img = frame.to_ndarray(format="bgr24")
44
+ gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
45
+ faces = cv2.CascadeClassifier(cv2.data.haarcascades + "haarcascade_frontalface_default.xml").detectMultiScale(
46
+ gray, scaleFactor=1.1, minNeighbors=5, minSize=(48, 48)
47
+ )
48
+
49
+ for (x, y, w, h) in faces:
50
+ face = gray[y:y + h, x:x + w]
51
+ face_resized = cv2.resize(face, (48, 48))
52
+ face_normalized = face_resized / 255.0
53
+ face_reshaped = np.reshape(face_normalized, (1, 48, 48, 1))
54
+
55
+ prediction = self.model.predict(face_reshaped)
56
+ emotion = emotion_labels[np.argmax(prediction[0])]
57
+
58
+ # Draw bounding box and label
59
+ cv2.rectangle(img, (x, y), (x + w, y + h), (0, 255, 0), 2)
60
+ cv2.putText(img, emotion, (x, y - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.9, (0, 255, 0), 2)
61
+
62
+ return VideoFrame.from_ndarray(img, format="bgr24")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
63
 
64
  # Sidebar menu
65
+ menu = st.sidebar.selectbox("Menu", ["Home", "View Records"])
66
 
67
  if menu == "Home":
68
+ st.title("Real-Time Emotion Detection")
69
+ st.write("Using your camera for real-time emotion detection.")
70
+
71
+ webrtc_streamer(
72
+ key="emotion-detection",
73
+ video_transformer_factory=EmotionDetector,
74
+ media_stream_constraints={"video": True, "audio": False},
75
+ )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
76
 
77
  elif menu == "View Records":
78
  st.title("View Records")
79
  st.subheader("Recent Records")
80
+
81
  conn = sqlite3.connect(DB_NAME)
82
  cursor = conn.cursor()
83
  cursor.execute("SELECT name, emotion, timestamp FROM face_data ORDER BY timestamp DESC LIMIT 5")