LovnishVerma commited on
Commit
ec5e0a9
·
verified ·
1 Parent(s): f171e09

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +90 -193
app.py CHANGED
@@ -1,211 +1,108 @@
1
  import os
2
  import cv2
3
  import numpy as np
4
- import sqlite3
5
  import streamlit as st
6
  from datetime import datetime
7
- from PIL import Image
8
- from keras.models import load_model
9
 
10
- # Constants
11
- DATABASE_NAME = "emotion_recognition.db"
12
  KNOWN_FACES_DIR = "known_faces"
13
- if not os.path.exists(KNOWN_FACES_DIR):
14
- os.makedirs(KNOWN_FACES_DIR)
15
-
16
- # Initialize Database
17
- def init_db():
18
- conn = sqlite3.connect(DATABASE_NAME)
19
- cursor = conn.cursor()
20
- cursor.execute('''
21
- CREATE TABLE IF NOT EXISTS attendance_log (
22
- id INTEGER PRIMARY KEY AUTOINCREMENT,
23
- name TEXT NOT NULL,
24
- emotion TEXT NOT NULL,
25
- timestamp DATETIME DEFAULT CURRENT_TIMESTAMP
26
- )
27
- ''')
28
- conn.commit()
29
- conn.close()
30
-
31
- init_db()
32
-
33
- def log_attendance(name, emotion):
34
- conn = sqlite3.connect(DATABASE_NAME)
35
- cursor = conn.cursor()
36
- cursor.execute("INSERT INTO attendance_log (name, emotion) VALUES (?, ?)", (name, emotion))
37
- conn.commit()
38
- conn.close()
39
 
40
- def fetch_recent_activity():
41
- conn = sqlite3.connect(DATABASE_NAME)
42
- cursor = conn.cursor()
43
- cursor.execute("SELECT name, emotion, timestamp FROM attendance_log ORDER BY timestamp DESC LIMIT 10")
44
- rows = cursor.fetchall()
45
- conn.close()
46
- return rows
47
-
48
- # Load Emotion Detection Model
49
- @st.cache_resource
50
- def load_emotion_model():
51
- model = load_model('CNN_Model_acc_75.h5')
52
- return model
53
-
54
- emotion_model = load_emotion_model()
55
- emotion_labels = ['angry', 'fear', 'happy', 'neutral', 'sad', 'surprise']
56
 
57
- # Initialize LBPH Face Recognizer
 
 
58
  face_recognizer = cv2.face.LBPHFaceRecognizer_create()
59
 
 
 
 
 
60
  def train_recognizer():
61
  faces = []
62
  labels = []
63
- for name in os.listdir(KNOWN_FACES_DIR):
64
- person_dir = os.path.join(KNOWN_FACES_DIR, name)
65
- if not os.path.isdir(person_dir):
 
66
  continue
67
- for filename in os.listdir(person_dir):
68
- filepath = os.path.join(person_dir, filename)
69
- if not filepath.lower().endswith(('.jpg', '.jpeg', '.png')):
70
- continue
71
- image = cv2.imread(filepath, cv2.IMREAD_GRAYSCALE)
72
- if image is not None:
73
- faces.append(image)
74
- labels.append(name)
75
- if not faces or not labels:
76
- raise ValueError("No valid training data found in the known faces directory.")
77
- label_ids = {name: idx for idx, name in enumerate(set(labels))}
78
- label_ids_rev = {idx: name for name, idx in label_ids.items()}
79
- labels = [label_ids[label] for label in labels]
80
  face_recognizer.train(faces, np.array(labels))
81
- return label_ids_rev
82
-
83
- label_ids_rev = train_recognizer()
84
-
85
- # Sidebar Options
86
- sidebar_choice = st.sidebar.selectbox("Choose an option", ["Emotion Detection", "Register New Face", "View Recent Activity"])
87
-
88
- # Main App Logic
89
- if sidebar_choice == "Register New Face":
90
- st.header("Register New Face")
91
- name = st.text_input("Enter Name")
92
- use_camera = st.checkbox("Use Camera to Capture Face")
93
- capture_button = st.button("Capture Face" if use_camera else "Upload Image")
94
-
95
- if use_camera and capture_button and name:
96
- cap = cv2.VideoCapture(0)
97
- st.write("Capturing face... Look into the camera.")
98
- captured_faces = []
99
- face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml')
100
-
101
- while len(captured_faces) < 5: # Capture 5 images
102
- ret, frame = cap.read()
103
- if not ret:
104
- st.error("Error capturing video")
105
- break
106
-
107
- gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
108
- faces = face_cascade.detectMultiScale(gray_frame, scaleFactor=1.1, minNeighbors=5, minSize=(100, 100))
109
-
110
- for (x, y, w, h) in faces:
111
- face_roi = gray_frame[y:y + h, x:x + w]
112
- captured_faces.append(face_roi)
113
- cv2.rectangle(frame, (x, y), (x + w, y + h), (255, 0, 0), 2)
114
-
115
- cv2.imshow("Face Registration", frame)
116
- if cv2.waitKey(1) & 0xFF == ord('q'):
117
- break
118
-
119
- cap.release()
120
- cv2.destroyAllWindows()
121
-
122
- if captured_faces:
123
- person_dir = os.path.join(KNOWN_FACES_DIR, name)
124
- if not os.path.exists(person_dir):
125
- os.makedirs(person_dir)
126
- for i, face in enumerate(captured_faces):
127
- cv2.imwrite(os.path.join(person_dir, f"{name}_{i}.jpg"), face)
128
-
129
- label_ids_rev = train_recognizer()
130
- st.success(f"{name} has been registered successfully with {len(captured_faces)} captured images!")
131
- else:
132
- st.warning("No faces captured. Please try again.")
133
-
134
- elif not use_camera and capture_button and name:
135
- uploaded_image = st.file_uploader("Upload an Image", type=["jpg", "jpeg", "png"])
136
- if uploaded_image:
137
- image = np.array(Image.open(uploaded_image))
138
- gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
139
- face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml')
140
- faces = face_cascade.detectMultiScale(gray_image, scaleFactor=1.1, minNeighbors=5, minSize=(100, 100))
141
-
142
- if len(faces) > 0:
143
- person_dir = os.path.join(KNOWN_FACES_DIR, name)
144
- if not os.path.exists(person_dir):
145
- os.makedirs(person_dir)
146
  for (x, y, w, h) in faces:
147
- face_roi = gray_image[y:y + h, x:x + w]
148
- face_filename = os.path.join(person_dir, f"{name}_{datetime.now().strftime('%Y%m%d_%H%M%S')}.jpg")
149
- cv2.imwrite(face_filename, face_roi)
150
- label_ids_rev = train_recognizer()
151
- st.success(f"Face for {name} has been registered successfully!")
152
- else:
153
- st.warning("No face detected. Try another image.")
154
- else:
155
- st.warning("Please upload an image.")
156
-
157
- elif sidebar_choice == "View Recent Activity":
158
- st.header("Recent Activity")
159
- logs = fetch_recent_activity()
160
- if logs:
161
- for name, emotion, timestamp in logs:
162
- st.write(f"Name: {name}, Emotion: {emotion}, Timestamp: {timestamp}")
163
- else:
164
- st.write("No recent activity found.")
165
-
166
- else:
167
- st.header("Emotion Detection with Face Recognition")
168
- mode = st.radio("Choose mode", ["Image", "Camera"])
169
- face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml')
170
-
171
- def process_frame(frame):
172
- gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
173
- faces = face_cascade.detectMultiScale(gray_frame, scaleFactor=1.1, minNeighbors=5, minSize=(100, 100))
174
- result_text = ""
175
  for (x, y, w, h) in faces:
176
- face_roi = gray_frame[y:y + h, x:x + w]
177
- face_resized = cv2.resize(face_roi, (150, 150))
178
- label_id, confidence = face_recognizer.predict(face_resized)
179
- label = label_ids_rev.get(label_id, "Unknown")
180
-
181
- # Emotion Detection
182
- face_color = cv2.resize(frame[y:y + h, x:x + w], (48, 48)) / 255.0
183
- face_color = np.expand_dims(cv2.cvtColor(face_color, cv2.COLOR_BGR2RGB), axis=0)
184
- emotion_prediction = emotion_model.predict(face_color)
185
- emotion = emotion_labels[np.argmax(emotion_prediction[0])]
186
-
187
- log_attendance(label, emotion)
188
- result_text = f"{label} is feeling {emotion}"
189
- cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
190
- cv2.putText(frame, result_text, (x, y - 10), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)
191
- return frame, result_text
192
-
193
- if mode == "Image":
194
- uploaded_image = st.file_uploader("Upload an Image", type=["jpg", "jpeg", "png"])
195
- if uploaded_image:
196
- image = np.array(Image.open(uploaded_image))
197
- frame, result_text = process_frame(image)
198
- st.image(frame, caption=result_text)
199
- elif mode == "Camera":
200
- cap = cv2.VideoCapture(0)
201
- st.write("Press 'q' to exit.")
202
- while True:
203
- ret, frame = cap.read()
204
- if not ret:
205
- break
206
- frame, result_text = process_frame(frame)
207
- cv2.imshow("Emotion Detection", frame)
208
- if cv2.waitKey(1) & 0xFF == ord('q'):
209
- break
210
- cap.release()
211
- cv2.destroyAllWindows()
 
1
  import os
2
  import cv2
3
  import numpy as np
 
4
  import streamlit as st
5
  from datetime import datetime
6
+ from tensorflow.keras.models import load_model
 
7
 
8
+ # Directories
 
9
  KNOWN_FACES_DIR = "known_faces"
10
+ EMOTION_MODEL_PATH = "emotion_model.h5"
11
+ CASCADE_PATH = "haarcascade_frontalface_default.xml"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
12
 
13
+ # Constants
14
+ IMG_SIZE = (200, 200)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
15
 
16
+ # Load models
17
+ emotion_model = load_model(EMOTION_MODEL_PATH)
18
+ face_cascade = cv2.CascadeClassifier(CASCADE_PATH)
19
  face_recognizer = cv2.face.LBPHFaceRecognizer_create()
20
 
21
+ # Helper Functions
22
+ def load_emotion_labels():
23
+ return ["Angry", "Disgust", "Fear", "Happy", "Sad", "Surprise", "Neutral"]
24
+
25
  def train_recognizer():
26
  faces = []
27
  labels = []
28
+ label_map = {}
29
+ for idx, person_name in enumerate(os.listdir(KNOWN_FACES_DIR)):
30
+ person_path = os.path.join(KNOWN_FACES_DIR, person_name)
31
+ if not os.path.isdir(person_path):
32
  continue
33
+ label_map[idx] = person_name
34
+ for filename in os.listdir(person_path):
35
+ filepath = os.path.join(person_path, filename)
36
+ if filepath.lower().endswith(('.jpg', '.jpeg', '.png')):
37
+ img = cv2.imread(filepath, cv2.IMREAD_GRAYSCALE)
38
+ if img is not None:
39
+ faces.append(img)
40
+ labels.append(idx)
41
+ if len(faces) == 0:
42
+ st.warning("No valid training data found. Add faces first.")
43
+ return {}
 
 
44
  face_recognizer.train(faces, np.array(labels))
45
+ return {v: k for k, v in label_map.items()}
46
+
47
+ def detect_faces(image):
48
+ gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
49
+ faces = face_cascade.detectMultiScale(gray, scaleFactor=1.1, minNeighbors=5)
50
+ return gray, faces
51
+
52
+ def detect_emotions(face_img):
53
+ resized_face = cv2.resize(face_img, (48, 48))
54
+ normalized_face = resized_face / 255.0
55
+ reshaped_face = np.expand_dims(normalized_face, axis=(0, -1))
56
+ emotion_probabilities = emotion_model.predict(reshaped_face)
57
+ emotion_idx = np.argmax(emotion_probabilities)
58
+ return load_emotion_labels()[emotion_idx]
59
+
60
+ # Streamlit App
61
+ st.title("Face Recognition and Emotion Detection")
62
+ st.sidebar.title("Options")
63
+ option = st.sidebar.selectbox("Choose an action", ["Home", "Register New Face", "Recognize Faces"])
64
+
65
+ # Train the recognizer initially
66
+ if option != "Register New Face":
67
+ label_map = train_recognizer()
68
+
69
+ if option == "Home":
70
+ st.write("Use the sidebar to register new faces or recognize them.")
71
+
72
+ elif option == "Register New Face":
73
+ person_name = st.text_input("Enter the person's name")
74
+ capture_mode = st.radio("Select input method", ["Use Camera", "Upload Image"])
75
+
76
+ if person_name and st.button("Register Face"):
77
+ person_dir = os.path.join(KNOWN_FACES_DIR, person_name)
78
+ os.makedirs(person_dir, exist_ok=True)
79
+ if capture_mode == "Use Camera":
80
+ st.warning("Switch to a device with a camera for this option.")
81
+ elif capture_mode == "Upload Image":
82
+ uploaded_file = st.file_uploader("Upload an image", type=["jpg", "jpeg", "png"])
83
+ if uploaded_file:
84
+ img = cv2.imdecode(np.frombuffer(uploaded_file.read(), np.uint8), cv2.IMREAD_COLOR)
85
+ gray, faces = detect_faces(img)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
86
  for (x, y, w, h) in faces:
87
+ face_img = gray[y:y+h, x:x+w]
88
+ resized_img = cv2.resize(face_img, IMG_SIZE)
89
+ timestamp = datetime.now().strftime("%Y%m%d%H%M%S")
90
+ filepath = os.path.join(person_dir, f"{timestamp}.jpg")
91
+ cv2.imwrite(filepath, resized_img)
92
+ st.success("Face registered successfully!")
93
+ label_map = train_recognizer()
94
+
95
+ elif option == "Recognize Faces":
96
+ uploaded_file = st.file_uploader("Upload an image", type=["jpg", "jpeg", "png"])
97
+ if uploaded_file:
98
+ img = cv2.imdecode(np.frombuffer(uploaded_file.read(), np.uint8), cv2.IMREAD_COLOR)
99
+ gray, faces = detect_faces(img)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
100
  for (x, y, w, h) in faces:
101
+ face_img = gray[y:y+h, x:x+w]
102
+ resized_img = cv2.resize(face_img, IMG_SIZE)
103
+ label, confidence = face_recognizer.predict(resized_img)
104
+ name = label_map.get(label, "Unknown")
105
+ emotion = detect_emotions(face_img)
106
+ cv2.rectangle(img, (x, y), (x+w, y+h), (0, 255, 0), 2)
107
+ cv2.putText(img, f"{name}, {emotion}", (x, y-10), cv2.FONT_HERSHEY_SIMPLEX, 0.9, (0, 255, 0), 2)
108
+ st.image(img, channels="BGR")