LovnishVerma commited on
Commit
56a50b1
·
verified ·
1 Parent(s): 37b9e8b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +92 -164
app.py CHANGED
@@ -1,170 +1,98 @@
1
- import sqlite3
2
- import streamlit as st
3
- import cv2
4
- import numpy as np
5
- import time
6
  import os
7
- from keras.models import load_model
8
- from PIL import Image
9
- import tempfile
10
-
11
- # Larger title
12
- st.markdown("<h1 style='text-align: center;'>Emotion Detection with Face Recognition</h1>", unsafe_allow_html=True)
13
-
14
- # Smaller subtitle
15
- st.markdown("<h3 style='text-align: center;'>angry, fear, happy, neutral, sad, surprise</h3>", unsafe_allow_html=True)
16
-
17
- # Database setup
18
- DATABASE_NAME = "emotion_recognition.db"
19
-
20
- def init_db():
21
- conn = sqlite3.connect(DATABASE_NAME)
22
- cursor = conn.cursor()
23
- cursor.execute('''
24
- CREATE TABLE IF NOT EXISTS registered_faces (
25
- id INTEGER PRIMARY KEY AUTOINCREMENT,
26
- name TEXT NOT NULL,
27
- image BLOB NOT NULL
28
- )
29
- ''')
30
- cursor.execute('''
31
- CREATE TABLE IF NOT EXISTS attendance_log (
32
- id INTEGER PRIMARY KEY AUTOINCREMENT,
33
- name TEXT NOT NULL,
34
- emotion TEXT NOT NULL,
35
- timestamp DATETIME DEFAULT CURRENT_TIMESTAMP
36
- )
37
- ''')
38
- conn.commit()
39
- conn.close()
40
-
41
- init_db()
42
-
43
- def register_face(name, image):
44
- conn = sqlite3.connect(DATABASE_NAME)
45
- cursor = conn.cursor()
46
- cursor.execute("INSERT INTO registered_faces (name, image) VALUES (?, ?)", (name, image))
47
- conn.commit()
48
- conn.close()
49
-
50
- def fetch_registered_faces():
51
- conn = sqlite3.connect(DATABASE_NAME)
52
- cursor = conn.cursor()
53
- cursor.execute("SELECT id, name FROM registered_faces")
54
- rows = cursor.fetchall()
55
- conn.close()
56
- return rows
57
-
58
- def log_attendance(name, emotion):
59
- conn = sqlite3.connect(DATABASE_NAME)
60
- cursor = conn.cursor()
61
- cursor.execute("INSERT INTO attendance_log (name, emotion) VALUES (?, ?)", (name, emotion))
62
- conn.commit()
63
- conn.close()
64
-
65
- def fetch_recent_activity():
66
- conn = sqlite3.connect(DATABASE_NAME)
67
- cursor = conn.cursor()
68
- cursor.execute("SELECT name, emotion, timestamp FROM attendance_log ORDER BY timestamp DESC LIMIT 10")
69
- rows = cursor.fetchall()
70
- conn.close()
71
- return rows
72
-
73
- # Load the emotion model
74
- @st.cache_resource
75
- def load_emotion_model():
76
- model = load_model('CNN_Model_acc_75.h5')
77
- return model
78
-
79
- model = load_emotion_model()
80
-
81
- # Emotion labels
82
- emotion_labels = ['angry', 'fear', 'happy', 'neutral', 'sad', 'surprise']
83
-
84
- # Sidebar options
85
- sidebar_choice = st.sidebar.selectbox("Choose an option", ["Emotion Detection", "Register New Face", "View Registered Faces", "Recent Activity"])
86
-
87
- if sidebar_choice == "Register New Face":
88
- st.header("Register New Face")
89
- name = st.text_input("Enter Name")
90
- uploaded_image = st.file_uploader("Upload Face Image", type=["png", "jpg", "jpeg"])
91
- if name and uploaded_image:
92
- image = np.array(Image.open(uploaded_image))
93
- _, buffer = cv2.imencode('.jpg', image)
94
- register_face(name, buffer.tobytes())
95
- st.success(f"Successfully registered {name}!")
96
-
97
- elif sidebar_choice == "View Registered Faces":
98
- st.header("Registered Faces")
99
- faces = fetch_registered_faces()
100
- if faces:
101
- for face_id, name in faces:
102
- st.write(f"ID: {face_id}, Name: {name}")
103
- else:
104
- st.write("No faces registered yet.")
105
-
106
- elif sidebar_choice == "Recent Activity":
107
- st.header("Recent Activity (Attendance Log)")
108
- logs = fetch_recent_activity()
109
- if logs:
110
- for name, emotion, timestamp in logs:
111
- st.write(f"Name: {name}, Emotion: {emotion}, Timestamp: {timestamp}")
112
- else:
113
- st.write("No recent activity found.")
114
 
115
- else: # Emotion Detection
116
- st.sidebar.write("Emotion Labels: Angry, Fear, Happy, Neutral, Sad, Surprise")
117
 
118
- upload_choice = st.radio("Choose input source", ["Upload Image", "Upload Video", "Camera"])
 
 
119
 
120
- def process_frame(frame):
121
- gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
122
- faces = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml').detectMultiScale(gray_frame, scaleFactor=1.1, minNeighbors=5, minSize=(30, 30))
 
 
 
 
 
 
 
 
 
 
 
123
 
124
- result_text = ""
125
  for (x, y, w, h) in faces:
126
- roi_gray = gray_frame[y:y+h, x:x+w]
127
- roi_color = frame[y:y+h, x:x+w]
128
- face_roi = cv2.resize(roi_color, (48, 48))
129
- face_roi = cv2.cvtColor(face_roi, cv2.COLOR_BGR2RGB) / 255.0
130
- face_roi = np.expand_dims(face_roi, axis=0)
131
-
132
- predictions = model.predict(face_roi)
133
- emotion = emotion_labels[np.argmax(predictions[0])]
134
-
135
- label = "Unknown" # Placeholder for face recognition (add later)
136
- log_attendance(label, emotion)
137
-
138
- result_text = f"{label} is feeling {emotion}"
139
- cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 2)
140
- cv2.putText(frame, result_text, (x, y-10), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)
141
- return frame, result_text
142
-
143
- if upload_choice == "Upload Image":
144
- uploaded_image = st.file_uploader("Upload Image", type=["png", "jpg", "jpeg"])
145
- if uploaded_image:
146
- image = np.array(Image.open(uploaded_image))
147
- frame, result_text = process_frame(image)
148
- st.image(frame, caption='Processed Image', use_column_width=True)
149
- st.markdown(f"<h3 style='text-align: center;'>{result_text}</h3>", unsafe_allow_html=True)
150
-
151
- elif upload_choice == "Upload Video":
152
- uploaded_video = st.file_uploader("Upload Video", type=["mp4", "avi", "mkv"])
153
- if uploaded_video:
154
- with tempfile.NamedTemporaryFile(delete=False) as tfile:
155
- tfile.write(uploaded_video.read())
156
- video_source = cv2.VideoCapture(tfile.name)
157
- while True:
158
- ret, frame = video_source.read()
159
- if not ret:
160
- break
161
- frame, result_text = process_frame(frame)
162
- st.image(frame, channels="BGR", use_column_width=True)
163
-
164
- elif upload_choice == "Camera":
165
- image = st.camera_input("Take a picture")
166
- if image:
167
- frame = np.array(Image.open(image))
168
- frame, result_text = process_frame(frame)
169
- st.image(frame, caption='Processed Image', use_column_width=True)
170
- st.markdown(f"<h3 style='text-align: center;'>{result_text}</h3>", unsafe_allow_html=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import os
2
+ import cv2
3
+ import streamlit as st
4
+ from datetime import datetime
5
+
6
+ # Constants
7
+ KNOWN_FACES_DIR = "known_faces"
8
+ os.makedirs(KNOWN_FACES_DIR, exist_ok=True)
9
+
10
+ # Global State
11
+ if "activity_log" not in st.session_state:
12
+ st.session_state.activity_log = []
13
+
14
+ # Streamlit App Title
15
+ st.title("Face Detection and Registration App")
16
+
17
+ st.sidebar.header("Navigation")
18
+ page = st.sidebar.radio("Choose an option:", ["Real-time Face Detection", "Register New Face", "View Registered Faces", "Recent Activity"])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
19
 
20
+ # Initialize Cascade Classifier
21
+ face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + "haarcascade_frontalface_default.xml")
22
 
23
+ # Helper Function for Real-time Face Detection
24
+ def start_camera(register_mode=False, face_name=None):
25
+ cap = cv2.VideoCapture(0) # Open the camera
26
 
27
+ if not cap.isOpened():
28
+ st.error("Unable to access the camera. Please check your webcam or permissions.")
29
+ return
30
+
31
+ stframe = st.empty() # Streamlit placeholder for video frames
32
+
33
+ while cap.isOpened():
34
+ ret, frame = cap.read()
35
+ if not ret:
36
+ st.error("Failed to grab frame from camera.")
37
+ break
38
+
39
+ gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
40
+ faces = face_cascade.detectMultiScale(gray, scaleFactor=1.1, minNeighbors=5, minSize=(50, 50))
41
 
 
42
  for (x, y, w, h) in faces:
43
+ # Draw rectangle
44
+ cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
45
+
46
+ if register_mode and face_name:
47
+ # Save the cropped face image
48
+ face_img = frame[y:y + h, x:x + w]
49
+ face_filename = f"{face_name}_{datetime.now().strftime('%Y%m%d%H%M%S')}.jpg"
50
+ face_path = os.path.join(KNOWN_FACES_DIR, face_filename)
51
+ cv2.imwrite(face_path, face_img)
52
+
53
+ st.session_state.activity_log.append(
54
+ f"Registered face: {face_name} at {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}"
55
+ )
56
+ st.success(f"Face registered successfully as {face_name}!")
57
+ return # Stop after registering
58
+
59
+ stframe.image(frame, channels="BGR", use_column_width=True)
60
+
61
+ cap.release()
62
+ cv2.destroyAllWindows()
63
+
64
+ # Pages
65
+ if page == "Real-time Face Detection":
66
+ st.header("Real-time Face Detection")
67
+ if st.button("Start Camera"):
68
+ start_camera()
69
+
70
+ elif page == "Register New Face":
71
+ st.header("Register New Face")
72
+ name = st.text_input("Enter a name for the face:")
73
+
74
+ if st.button("Start Registration"):
75
+ if name.strip():
76
+ st.info(f"Looking for face to register as '{name}'...")
77
+ start_camera(register_mode=True, face_name=name)
78
+ else:
79
+ st.error("Please enter a valid name!")
80
+
81
+ elif page == "View Registered Faces":
82
+ st.header("Registered Faces")
83
+ faces = os.listdir(KNOWN_FACES_DIR)
84
+
85
+ if faces:
86
+ st.write(f"*Total Registered Faces: {len(faces)}*")
87
+ for face_file in faces:
88
+ st.write(face_file)
89
+ else:
90
+ st.write("No registered faces found.")
91
+
92
+ elif page == "Recent Activity":
93
+ st.header("Recent Activity Log")
94
+ if st.session_state.activity_log:
95
+ for log in reversed(st.session_state.activity_log):
96
+ st.write(log)
97
+ else:
98
+ st.write("No recent activity to display.")