LovnishVerma commited on
Commit
3b6bd75
·
verified ·
1 Parent(s): 17d4b7a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +94 -76
app.py CHANGED
@@ -1,88 +1,106 @@
1
  import streamlit as st
2
- from streamlit_webrtc import webrtc_streamer, VideoTransformerBase, VideoFrame
3
  import cv2
 
4
  import numpy as np
5
- from datetime import datetime
6
- from keras.models import load_model
7
- import sqlite3
8
  import os
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9
 
10
- # Database Initialization
11
- DB_NAME = "emotion_detection.db"
12
-
13
- def initialize_database():
14
- conn = sqlite3.connect(DB_NAME)
15
- cursor = conn.cursor()
16
- cursor.execute("""
17
- CREATE TABLE IF NOT EXISTS face_data (
18
- id INTEGER PRIMARY KEY AUTOINCREMENT,
19
- name TEXT NOT NULL,
20
- emotion TEXT NOT NULL,
21
- timestamp TEXT NOT NULL
22
- )
23
- """)
24
- conn.commit()
25
- conn.close()
26
-
27
- initialize_database()
28
-
29
- # Load emotion detection model
30
- @st.cache_resource
31
- def load_emotion_model():
32
- return load_model('CNN_Model_acc_75.h5')
33
-
34
- emotion_model = load_emotion_model()
35
- emotion_labels = ['angry', 'fear', 'happy', 'neutral', 'sad', 'surprise']
36
-
37
- # Video Transformer for Streamlit WebRTC
38
- class EmotionDetector(VideoTransformerBase):
39
  def __init__(self):
40
- self.model = emotion_model
 
 
 
 
 
 
41
 
42
- def transform(self, frame: VideoFrame) -> VideoFrame:
 
 
 
 
43
  img = frame.to_ndarray(format="bgr24")
44
  gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
45
- faces = cv2.CascadeClassifier(cv2.data.haarcascades + "haarcascade_frontalface_default.xml").detectMultiScale(
46
- gray, scaleFactor=1.1, minNeighbors=5, minSize=(48, 48)
47
- )
48
 
 
 
 
 
49
  for (x, y, w, h) in faces:
50
- face = gray[y:y + h, x:x + w]
51
- face_resized = cv2.resize(face, (48, 48))
52
- face_normalized = face_resized / 255.0
53
- face_reshaped = np.reshape(face_normalized, (1, 48, 48, 1))
54
-
55
- prediction = self.model.predict(face_reshaped)
56
- emotion = emotion_labels[np.argmax(prediction[0])]
57
-
58
- # Draw bounding box and label
59
  cv2.rectangle(img, (x, y), (x + w, y + h), (0, 255, 0), 2)
60
- cv2.putText(img, emotion, (x, y - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.9, (0, 255, 0), 2)
61
-
62
- return VideoFrame.from_ndarray(img, format="bgr24")
63
-
64
- # Sidebar menu
65
- menu = st.sidebar.selectbox("Menu", ["Home", "View Records"])
66
-
67
- if menu == "Home":
68
- st.title("Real-Time Emotion Detection")
69
- st.write("Using your camera for real-time emotion detection.")
70
-
71
- webrtc_streamer(
72
- key="emotion-detection",
73
- video_transformer_factory=EmotionDetector,
74
- media_stream_constraints={"video": True, "audio": False},
75
- )
76
-
77
- elif menu == "View Records":
78
- st.title("View Records")
79
- st.subheader("Recent Records")
80
-
81
- conn = sqlite3.connect(DB_NAME)
82
- cursor = conn.cursor()
83
- cursor.execute("SELECT name, emotion, timestamp FROM face_data ORDER BY timestamp DESC LIMIT 5")
84
- records = cursor.fetchall()
85
- conn.close()
86
-
87
- for record in records:
88
- st.write(f"**Name**: {record[0]}, **Emotion**: {record[1]}, **Timestamp**: {record[2]}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import streamlit as st
2
+ from streamlit_webrtc import webrtc_streamer, VideoTransformerBase
3
  import cv2
4
+ import av
5
  import numpy as np
 
 
 
6
  import os
7
+ from datetime import datetime
8
+
9
+ # Directory to store registered faces
10
+ KNOWN_FACES_DIR = "known_faces"
11
+ os.makedirs(KNOWN_FACES_DIR, exist_ok=True)
12
+
13
+ # Page configuration
14
+ st.set_page_config(page_title="Face Detection App", layout="wide")
15
+
16
+ # Title and Description
17
+ st.title("Face Detection and Registration App")
18
+ st.write("""
19
+ This app allows you to:
20
+ 1. Detect faces in real-time using your webcam.
21
+ 2. Register new faces with names.
22
+ 3. View the list of registered faces.
23
+ 4. Show recent activity logs.
24
+ """)
25
 
26
+ # Initialize log list
27
+ activity_log = []
28
+
29
+ # Define Video Processor
30
+ class VideoProcessor(VideoTransformerBase):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
31
  def __init__(self):
32
+ self.face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + "haarcascade_frontalface_default.xml")
33
+ self.registering_face = False
34
+ self.face_name = None
35
+
36
+ def set_registering_mode(self, name):
37
+ self.registering_face = True
38
+ self.face_name = name
39
 
40
+ def stop_registering(self):
41
+ self.registering_face = False
42
+ self.face_name = None
43
+
44
+ def recv(self, frame):
45
  img = frame.to_ndarray(format="bgr24")
46
  gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
 
 
 
47
 
48
+ # Detect faces
49
+ faces = self.face_cascade.detectMultiScale(gray, scaleFactor=1.1, minNeighbors=5, minSize=(50, 50))
50
+
51
+ # Draw rectangles and save face images if in registering mode
52
  for (x, y, w, h) in faces:
 
 
 
 
 
 
 
 
 
53
  cv2.rectangle(img, (x, y), (x + w, y + h), (0, 255, 0), 2)
54
+ if self.registering_face:
55
+ face_img = img[y:y + h, x:x + w]
56
+ face_path = os.path.join(KNOWN_FACES_DIR, f"{self.face_name}_{datetime.now().strftime('%Y%m%d%H%M%S')}.jpg")
57
+ cv2.imwrite(face_path, face_img)
58
+ self.registering_face = False
59
+ st.session_state.activity_log.append(f"Face registered: {self.face_name} at {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}")
60
+
61
+ return av.VideoFrame.from_ndarray(img, format="bgr24")
62
+
63
+
64
+ # Sidebar for navigation
65
+ st.sidebar.title("Options")
66
+ app_mode = st.sidebar.radio("Choose the mode:", ["Real-time Face Detection", "Register a New Face", "View Registered Faces", "Recent Activity"])
67
+
68
+ # Global activity log state
69
+ if "activity_log" not in st.session_state:
70
+ st.session_state.activity_log = []
71
+
72
+ # Handle the selected mode
73
+ if app_mode == "Real-time Face Detection":
74
+ st.header("Real-time Face Detection")
75
+ webrtc_streamer(key="face_detection", video_processor_factory=VideoProcessor)
76
+
77
+ elif app_mode == "Register a New Face":
78
+ st.header("Register a New Face")
79
+ name = st.text_input("Enter the name for the new face:")
80
+ if st.button("Start Registration"):
81
+ if not name.strip():
82
+ st.error("Name cannot be empty!")
83
+ else:
84
+ processor = webrtc_streamer(key="register_face", video_processor_factory=VideoProcessor)
85
+ if processor.video_processor:
86
+ processor.video_processor.set_registering_mode(name)
87
+ st.success(f"Registering face for '{name}'. Please look into the camera.")
88
+
89
+ elif app_mode == "View Registered Faces":
90
+ st.header("Registered Faces")
91
+ faces = os.listdir(KNOWN_FACES_DIR)
92
+ if faces:
93
+ st.write(f"**Total Faces Registered:** {len(faces)}")
94
+ for face in faces:
95
+ st.write(face)
96
+ else:
97
+ st.write("No faces have been registered yet.")
98
+
99
+ elif app_mode == "Recent Activity":
100
+ st.header("Recent Activity Log")
101
+ if st.session_state.activity_log:
102
+ for log in reversed(st.session_state.activity_log):
103
+ st.write(log)
104
+ else:
105
+ st.write("No activity logged yet.")
106
+