LovnishVerma commited on
Commit
5caf045
·
verified ·
1 Parent(s): a6e12c0

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +98 -94
app.py CHANGED
@@ -2,31 +2,20 @@ import sqlite3
2
  import streamlit as st
3
  import cv2
4
  import numpy as np
5
- import time
6
  import os
7
  from keras.models import load_model
 
8
  from PIL import Image
9
- import tempfile
10
-
11
- # Larger title
12
- st.markdown("<h1 style='text-align: center;'>Emotion Detection with Face Recognition</h1>", unsafe_allow_html=True)
13
-
14
- # Smaller subtitle
15
- st.markdown("<h3 style='text-align: center;'>angry, fear, happy, neutral, sad, surprise</h3>", unsafe_allow_html=True)
16
 
17
  # Database setup
18
  DATABASE_NAME = "emotion_recognition.db"
 
 
 
19
 
20
  def init_db():
21
  conn = sqlite3.connect(DATABASE_NAME)
22
  cursor = conn.cursor()
23
- cursor.execute('''
24
- CREATE TABLE IF NOT EXISTS registered_faces (
25
- id INTEGER PRIMARY KEY AUTOINCREMENT,
26
- name TEXT NOT NULL,
27
- image BLOB NOT NULL
28
- )
29
- ''')
30
  cursor.execute('''
31
  CREATE TABLE IF NOT EXISTS attendance_log (
32
  id INTEGER PRIMARY KEY AUTOINCREMENT,
@@ -40,21 +29,6 @@ def init_db():
40
 
41
  init_db()
42
 
43
- def register_face(name, image):
44
- conn = sqlite3.connect(DATABASE_NAME)
45
- cursor = conn.cursor()
46
- cursor.execute("INSERT INTO registered_faces (name, image) VALUES (?, ?)", (name, image))
47
- conn.commit()
48
- conn.close()
49
-
50
- def fetch_registered_faces():
51
- conn = sqlite3.connect(DATABASE_NAME)
52
- cursor = conn.cursor()
53
- cursor.execute("SELECT id, name FROM registered_faces")
54
- rows = cursor.fetchall()
55
- conn.close()
56
- return rows
57
-
58
  def log_attendance(name, emotion):
59
  conn = sqlite3.connect(DATABASE_NAME)
60
  cursor = conn.cursor()
@@ -70,41 +44,77 @@ def fetch_recent_activity():
70
  conn.close()
71
  return rows
72
 
73
- # Load the emotion model
74
  @st.cache_resource
75
  def load_emotion_model():
76
  model = load_model('CNN_Model_acc_75.h5')
77
  return model
78
 
79
- model = load_emotion_model()
80
-
81
- # Emotion labels
82
  emotion_labels = ['angry', 'fear', 'happy', 'neutral', 'sad', 'surprise']
83
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
84
  # Sidebar options
85
- sidebar_choice = st.sidebar.selectbox("Choose an option", ["Emotion Detection", "Register New Face", "View Registered Faces", "Recent Activity"])
86
 
 
87
  if sidebar_choice == "Register New Face":
88
  st.header("Register New Face")
89
  name = st.text_input("Enter Name")
90
- uploaded_image = st.file_uploader("Upload Face Image", type=["png", "jpg", "jpeg"])
91
- if name and uploaded_image:
92
- image = np.array(Image.open(uploaded_image))
93
- _, buffer = cv2.imencode('.jpg', image)
94
- register_face(name, buffer.tobytes())
95
- st.success(f"Successfully registered {name}!")
96
-
97
- elif sidebar_choice == "View Registered Faces":
98
- st.header("Registered Faces")
99
- faces = fetch_registered_faces()
100
- if faces:
101
- for face_id, name in faces:
102
- st.write(f"ID: {face_id}, Name: {name}")
103
- else:
104
- st.write("No faces registered yet.")
105
-
106
- elif sidebar_choice == "Recent Activity":
107
- st.header("Recent Activity (Attendance Log)")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
108
  logs = fetch_recent_activity()
109
  if logs:
110
  for name, emotion, timestamp in logs:
@@ -113,58 +123,52 @@ elif sidebar_choice == "Recent Activity":
113
  st.write("No recent activity found.")
114
 
115
  else: # Emotion Detection
116
- st.sidebar.write("Emotion Labels: Angry, Fear, Happy, Neutral, Sad, Surprise")
117
-
118
- upload_choice = st.radio("Choose input source", ["Upload Image", "Upload Video", "Camera"])
119
 
120
  def process_frame(frame):
121
  gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
122
- faces = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml').detectMultiScale(gray_frame, scaleFactor=1.1, minNeighbors=5, minSize=(30, 30))
123
-
124
  result_text = ""
125
  for (x, y, w, h) in faces:
126
- roi_gray = gray_frame[y:y+h, x:x+w]
127
- roi_color = frame[y:y+h, x:x+w]
128
- face_roi = cv2.resize(roi_color, (48, 48))
129
- face_roi = cv2.cvtColor(face_roi, cv2.COLOR_BGR2RGB) / 255.0
130
- face_roi = np.expand_dims(face_roi, axis=0)
131
-
132
- predictions = model.predict(face_roi)
133
- emotion = emotion_labels[np.argmax(predictions[0])]
134
-
135
- label = "Unknown" # Placeholder for face recognition (add later)
 
 
136
  log_attendance(label, emotion)
137
 
 
138
  result_text = f"{label} is feeling {emotion}"
139
- cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 2)
140
- cv2.putText(frame, result_text, (x, y-10), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)
141
  return frame, result_text
142
 
143
- if upload_choice == "Upload Image":
144
- uploaded_image = st.file_uploader("Upload Image", type=["png", "jpg", "jpeg"])
145
  if uploaded_image:
146
  image = np.array(Image.open(uploaded_image))
147
  frame, result_text = process_frame(image)
148
- st.image(frame, caption='Processed Image', use_column_width=True)
149
- st.markdown(f"<h3 style='text-align: center;'>{result_text}</h3>", unsafe_allow_html=True)
150
-
151
- elif upload_choice == "Upload Video":
152
- uploaded_video = st.file_uploader("Upload Video", type=["mp4", "avi", "mkv"])
153
- if uploaded_video:
154
- with tempfile.NamedTemporaryFile(delete=False) as tfile:
155
- tfile.write(uploaded_video.read())
156
- video_source = cv2.VideoCapture(tfile.name)
157
- while True:
158
- ret, frame = video_source.read()
159
- if not ret:
160
- break
161
- frame, result_text = process_frame(frame)
162
- st.image(frame, channels="BGR", use_column_width=True)
163
-
164
- elif upload_choice == "Camera":
165
- image = st.camera_input("Take a picture")
166
- if image:
167
- frame = np.array(Image.open(image))
168
  frame, result_text = process_frame(frame)
169
- st.image(frame, caption='Processed Image', use_column_width=True)
170
- st.markdown(f"<h3 style='text-align: center;'>{result_text}</h3>", unsafe_allow_html=True)
 
 
 
 
2
  import streamlit as st
3
  import cv2
4
  import numpy as np
 
5
  import os
6
  from keras.models import load_model
7
+ from datetime import datetime
8
  from PIL import Image
 
 
 
 
 
 
 
9
 
10
  # Database setup
11
  DATABASE_NAME = "emotion_recognition.db"
12
+ KNOWN_FACES_DIR = "known_faces"
13
+ if not os.path.exists(KNOWN_FACES_DIR):
14
+ os.makedirs(KNOWN_FACES_DIR)
15
 
16
  def init_db():
17
  conn = sqlite3.connect(DATABASE_NAME)
18
  cursor = conn.cursor()
 
 
 
 
 
 
 
19
  cursor.execute('''
20
  CREATE TABLE IF NOT EXISTS attendance_log (
21
  id INTEGER PRIMARY KEY AUTOINCREMENT,
 
29
 
30
  init_db()
31
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
32
  def log_attendance(name, emotion):
33
  conn = sqlite3.connect(DATABASE_NAME)
34
  cursor = conn.cursor()
 
44
  conn.close()
45
  return rows
46
 
47
+ # Load pre-trained emotion detection model
48
  @st.cache_resource
49
  def load_emotion_model():
50
  model = load_model('CNN_Model_acc_75.h5')
51
  return model
52
 
53
+ emotion_model = load_emotion_model()
 
 
54
  emotion_labels = ['angry', 'fear', 'happy', 'neutral', 'sad', 'surprise']
55
 
56
+ # Initialize LBPH face recognizer
57
+ face_recognizer = cv2.face.LBPHFaceRecognizer_create()
58
+
59
+ def train_recognizer():
60
+ faces = []
61
+ labels = []
62
+ for name in os.listdir(KNOWN_FACES_DIR):
63
+ for filename in os.listdir(os.path.join(KNOWN_FACES_DIR, name)):
64
+ filepath = os.path.join(KNOWN_FACES_DIR, name, filename)
65
+ image = cv2.imread(filepath, cv2.IMREAD_GRAYSCALE)
66
+ faces.append(image)
67
+ labels.append(name)
68
+ label_ids = {name: idx for idx, name in enumerate(set(labels))}
69
+ label_ids_rev = {idx: name for name, idx in label_ids.items()}
70
+ labels = [label_ids[label] for label in labels]
71
+ face_recognizer.train(faces, np.array(labels))
72
+ return label_ids_rev
73
+
74
+ label_ids_rev = train_recognizer()
75
+
76
  # Sidebar options
77
+ sidebar_choice = st.sidebar.selectbox("Choose an option", ["Emotion Detection", "Register New Face", "View Recent Activity"])
78
 
79
+ # Main App Logic
80
  if sidebar_choice == "Register New Face":
81
  st.header("Register New Face")
82
  name = st.text_input("Enter Name")
83
+ capture_button = st.button("Capture Face via Camera")
84
+ if capture_button and name:
85
+ cap = cv2.VideoCapture(0)
86
+ st.write("Capturing face... Look into the camera.")
87
+ captured_faces = []
88
+ face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml')
89
+
90
+ while len(captured_faces) < 5:
91
+ ret, frame = cap.read()
92
+ if not ret:
93
+ st.error("Error capturing video")
94
+ break
95
+ gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
96
+ faces = face_cascade.detectMultiScale(gray_frame, scaleFactor=1.1, minNeighbors=5, minSize=(100, 100))
97
+ for (x, y, w, h) in faces:
98
+ face_roi = gray_frame[y:y + h, x:x + w]
99
+ captured_faces.append(face_roi)
100
+ cv2.rectangle(frame, (x, y), (x + w, y + h), (255, 0, 0), 2)
101
+ cv2.imshow("Face Registration", frame)
102
+ if cv2.waitKey(1) & 0xFF == ord('q'):
103
+ break
104
+ cap.release()
105
+ cv2.destroyAllWindows()
106
+
107
+ # Save faces
108
+ person_dir = os.path.join(KNOWN_FACES_DIR, name)
109
+ if not os.path.exists(person_dir):
110
+ os.makedirs(person_dir)
111
+ for i, face in enumerate(captured_faces):
112
+ cv2.imwrite(os.path.join(person_dir, f"{name}_{i}.jpg"), face)
113
+ label_ids_rev = train_recognizer()
114
+ st.success(f"{name} has been registered successfully!")
115
+
116
+ elif sidebar_choice == "View Recent Activity":
117
+ st.header("Recent Activity")
118
  logs = fetch_recent_activity()
119
  if logs:
120
  for name, emotion, timestamp in logs:
 
123
  st.write("No recent activity found.")
124
 
125
  else: # Emotion Detection
126
+ st.header("Emotion Detection with Face Recognition")
127
+ mode = st.radio("Choose mode", ["Image", "Camera"])
128
+ face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml')
129
 
130
  def process_frame(frame):
131
  gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
132
+ faces = face_cascade.detectMultiScale(gray_frame, scaleFactor=1.1, minNeighbors=5, minSize=(100, 100))
 
133
  result_text = ""
134
  for (x, y, w, h) in faces:
135
+ face_roi = gray_frame[y:y + h, x:x + w]
136
+ face_resized = cv2.resize(face_roi, (150, 150))
137
+ label_id, confidence = face_recognizer.predict(face_resized)
138
+ label = label_ids_rev.get(label_id, "Unknown")
139
+
140
+ # Emotion Detection
141
+ face_color = cv2.resize(frame[y:y + h, x:x + w], (48, 48)) / 255.0
142
+ face_color = np.expand_dims(cv2.cvtColor(face_color, cv2.COLOR_BGR2RGB), axis=0)
143
+ emotion_prediction = emotion_model.predict(face_color)
144
+ emotion = emotion_labels[np.argmax(emotion_prediction[0])]
145
+
146
+ # Log Attendance
147
  log_attendance(label, emotion)
148
 
149
+ # Annotate frame
150
  result_text = f"{label} is feeling {emotion}"
151
+ cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
152
+ cv2.putText(frame, result_text, (x, y - 10), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)
153
  return frame, result_text
154
 
155
+ if mode == "Image":
156
+ uploaded_image = st.file_uploader("Upload an Image", type=["jpg", "jpeg", "png"])
157
  if uploaded_image:
158
  image = np.array(Image.open(uploaded_image))
159
  frame, result_text = process_frame(image)
160
+ st.image(frame, caption=result_text)
161
+
162
+ elif mode == "Camera":
163
+ cap = cv2.VideoCapture(0)
164
+ st.write("Press 'q' to exit.")
165
+ while True:
166
+ ret, frame = cap.read()
167
+ if not ret:
168
+ break
 
 
 
 
 
 
 
 
 
 
 
169
  frame, result_text = process_frame(frame)
170
+ cv2.imshow("Emotion Detection", frame)
171
+ if cv2.waitKey(1) & 0xFF == ord('q'):
172
+ break
173
+ cap.release()
174
+ cv2.destroyAllWindows()