LovnishVerma commited on
Commit
4bc696c
·
verified ·
1 Parent(s): e89a8b7

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +144 -117
app.py CHANGED
@@ -1,3 +1,4 @@
 
1
  import streamlit as st
2
  import cv2
3
  import numpy as np
@@ -13,131 +14,157 @@ st.markdown("<h1 style='text-align: center;'>Emotion Detection with Face Recogni
13
  # Smaller subtitle
14
  st.markdown("<h3 style='text-align: center;'>angry, fear, happy, neutral, sad, surprise</h3>", unsafe_allow_html=True)
15
 
16
- start = time.time()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
17
 
18
  # Load the emotion model
19
  @st.cache_resource
20
  def load_emotion_model():
21
- model = load_model('CNN_Model_acc_75.h5') # Ensure this file is in your Space
22
  return model
23
 
24
  model = load_emotion_model()
25
- print("time taken to load model: ", time.time() - start)
26
 
27
  # Emotion labels
28
  emotion_labels = ['angry', 'fear', 'happy', 'neutral', 'sad', 'surprise']
29
 
30
- # Load known faces (from images in a folder)
31
- known_faces = []
32
- known_names = []
33
- face_recognizer = cv2.face.LBPHFaceRecognizer_create()
34
-
35
- def load_known_faces():
36
- folder_path = "known_faces" # Place your folder with known faces here
37
- for image_name in os.listdir(folder_path):
38
- if image_name.endswith(('.jpg', '.jpeg', '.png')):
39
- image_path = os.path.join(folder_path, image_name)
40
- image = cv2.imread(image_path)
41
- gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
42
- # Detect face in the image
43
- faces = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml').detectMultiScale(gray, scaleFactor=1.1, minNeighbors=5, minSize=(30, 30))
44
-
45
- for (x, y, w, h) in faces:
46
- roi_gray = gray[y:y+h, x:x+w]
47
- # We only need the face, so we crop it and store it for training
48
- known_faces.append(roi_gray)
49
- known_names.append(image_name.split('.')[0]) # Assuming file name is the person's name
50
-
51
- # Train the recognizer with the known faces
52
- face_recognizer.train(known_faces, np.array([i for i in range(len(known_faces))]))
53
-
54
- load_known_faces()
55
-
56
- # Face detection using OpenCV
57
- face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml')
58
- img_shape = 48
59
-
60
- def process_frame(frame):
61
- gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
62
- faces = face_cascade.detectMultiScale(gray_frame, scaleFactor=1.1, minNeighbors=5, minSize=(30, 30))
63
-
64
- result_text = "" # Initialize the result text for display
65
-
66
- for (x, y, w, h) in faces:
67
- roi_gray = gray_frame[y:y+h, x:x+w]
68
- roi_color = frame[y:y+h, x:x+w]
69
- face_roi = cv2.resize(roi_color, (img_shape, img_shape)) # Resize to 48x48
70
- face_roi = cv2.cvtColor(face_roi, cv2.COLOR_BGR2RGB) # Convert to RGB (3 channels)
71
- face_roi = np.expand_dims(face_roi, axis=0) # Add batch dimension
72
- face_roi = face_roi / 255.0 # Normalize the image
73
-
74
- # Emotion detection
75
- predictions = model.predict(face_roi)
76
- emotion = emotion_labels[np.argmax(predictions[0])]
77
-
78
- # Face recognition using LBPH
79
- label, confidence = face_recognizer.predict(roi_gray)
80
- name = "Unknown"
81
- if confidence < 100:
82
- name = known_names[label]
83
-
84
- # Format the result text as "Name is feeling Emotion"
85
- result_text = f"{name} is feeling {emotion}"
86
-
87
- # Draw bounding box and label on the frame
88
- cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
89
- cv2.putText(frame, result_text, (x, y - 10), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)
90
-
91
- return frame, result_text
92
-
93
- # Video feed
94
- def video_feed(video_source):
95
- frame_placeholder = st.empty() # This placeholder will be used to replace frames in-place
96
- text_placeholder = st.empty() # This placeholder will display the result text
97
-
98
- while True:
99
- ret, frame = video_source.read()
100
- if not ret:
101
- break
102
-
103
- frame, result_text = process_frame(frame)
104
-
105
- # Display the frame in the placeholder
106
- frame_placeholder.image(frame, channels="BGR", use_column_width=True)
107
-
108
- # Display the result text in the text placeholder
109
- text_placeholder.markdown(f"<h3 style='text-align: center;'>{result_text}</h3>", unsafe_allow_html=True)
110
-
111
- # Sidebar for video or image upload
112
- upload_choice = st.sidebar.radio("Choose input source", ["Upload Image", "Upload Video", "Camera"])
113
-
114
- if upload_choice == "Camera":
115
- # Use Streamlit's built-in camera input widget for capturing images from the webcam
116
- image = st.camera_input("Take a picture")
117
-
118
- if image is not None:
119
- # Convert the image to a numpy array
120
- frame = np.array(Image.open(image))
121
- frame, result_text = process_frame(frame)
122
- st.image(frame, caption='Processed Image', use_column_width=True)
123
- st.markdown(f"<h3 style='text-align: center;'>{result_text}</h3>", unsafe_allow_html=True)
124
-
125
- elif upload_choice == "Upload Image":
126
- uploaded_image = st.file_uploader("Upload Image", type=["png", "jpg", "jpeg", "gif"])
127
- if uploaded_image:
128
- image = Image.open(uploaded_image)
129
- frame = np.array(image)
130
- frame, result_text = process_frame(frame)
131
- st.image(frame, caption='Processed Image', use_column_width=True)
132
- st.markdown(f"<h3 style='text-align: center;'>{result_text}</h3>", unsafe_allow_html=True)
133
-
134
- elif upload_choice == "Upload Video":
135
- uploaded_video = st.file_uploader("Upload Video", type=["mp4", "mov", "avi", "mkv", "webm"])
136
- if uploaded_video:
137
- # Temporarily save the video to disk
138
- with tempfile.NamedTemporaryFile(delete=False) as tfile:
139
- tfile.write(uploaded_video.read())
140
- video_source = cv2.VideoCapture(tfile.name)
141
- video_feed(video_source)
142
-
143
- st.sidebar.write("Emotion Labels: Angry, Fear, Happy, Neutral, Sad, Surprise")
 
1
+ import sqlite3
2
  import streamlit as st
3
  import cv2
4
  import numpy as np
 
14
  # Smaller subtitle
15
  st.markdown("<h3 style='text-align: center;'>angry, fear, happy, neutral, sad, surprise</h3>", unsafe_allow_html=True)
16
 
17
+ # Database setup
18
+ DATABASE_NAME = "emotion_recognition.db"
19
+
20
+ def init_db():
21
+ conn = sqlite3.connect(DATABASE_NAME)
22
+ cursor = conn.cursor()
23
+ cursor.execute('''
24
+ CREATE TABLE IF NOT EXISTS registered_faces (
25
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
26
+ name TEXT NOT NULL,
27
+ image BLOB NOT NULL
28
+ )
29
+ ''')
30
+ cursor.execute('''
31
+ CREATE TABLE IF NOT EXISTS attendance_log (
32
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
33
+ name TEXT NOT NULL,
34
+ emotion TEXT NOT NULL,
35
+ timestamp DATETIME DEFAULT CURRENT_TIMESTAMP
36
+ )
37
+ ''')
38
+ conn.commit()
39
+ conn.close()
40
+
41
+ init_db()
42
+
43
+ def register_face(name, image):
44
+ conn = sqlite3.connect(DATABASE_NAME)
45
+ cursor = conn.cursor()
46
+ cursor.execute("INSERT INTO registered_faces (name, image) VALUES (?, ?)", (name, image))
47
+ conn.commit()
48
+ conn.close()
49
+
50
+ def fetch_registered_faces():
51
+ conn = sqlite3.connect(DATABASE_NAME)
52
+ cursor = conn.cursor()
53
+ cursor.execute("SELECT id, name FROM registered_faces")
54
+ rows = cursor.fetchall()
55
+ conn.close()
56
+ return rows
57
+
58
+ def log_attendance(name, emotion):
59
+ conn = sqlite3.connect(DATABASE_NAME)
60
+ cursor = conn.cursor()
61
+ cursor.execute("INSERT INTO attendance_log (name, emotion) VALUES (?, ?)", (name, emotion))
62
+ conn.commit()
63
+ conn.close()
64
+
65
+ def fetch_recent_activity():
66
+ conn = sqlite3.connect(DATABASE_NAME)
67
+ cursor = conn.cursor()
68
+ cursor.execute("SELECT name, emotion, timestamp FROM attendance_log ORDER BY timestamp DESC LIMIT 10")
69
+ rows = cursor.fetchall()
70
+ conn.close()
71
+ return rows
72
 
73
  # Load the emotion model
74
  @st.cache_resource
75
  def load_emotion_model():
76
+ model = load_model('CNN_Model_acc_75.h5')
77
  return model
78
 
79
  model = load_emotion_model()
 
80
 
81
  # Emotion labels
82
  emotion_labels = ['angry', 'fear', 'happy', 'neutral', 'sad', 'surprise']
83
 
84
+ # Sidebar options
85
+ sidebar_choice = st.sidebar.selectbox("Choose an option", ["Emotion Detection", "Register New Face", "View Registered Faces", "Recent Activity"])
86
+
87
+ if sidebar_choice == "Register New Face":
88
+ st.header("Register New Face")
89
+ name = st.text_input("Enter Name")
90
+ uploaded_image = st.file_uploader("Upload Face Image", type=["png", "jpg", "jpeg"])
91
+ if name and uploaded_image:
92
+ image = np.array(Image.open(uploaded_image))
93
+ _, buffer = cv2.imencode('.jpg', image)
94
+ register_face(name, buffer.tobytes())
95
+ st.success(f"Successfully registered {name}!")
96
+
97
+ elif sidebar_choice == "View Registered Faces":
98
+ st.header("Registered Faces")
99
+ faces = fetch_registered_faces()
100
+ if faces:
101
+ for face_id, name in faces:
102
+ st.write(f"ID: {face_id}, Name: {name}")
103
+ else:
104
+ st.write("No faces registered yet.")
105
+
106
+ elif sidebar_choice == "Recent Activity":
107
+ st.header("Recent Activity (Attendance Log)")
108
+ logs = fetch_recent_activity()
109
+ if logs:
110
+ for name, emotion, timestamp in logs:
111
+ st.write(f"Name: {name}, Emotion: {emotion}, Timestamp: {timestamp}")
112
+ else:
113
+ st.write("No recent activity found.")
114
+
115
+ else: # Emotion Detection
116
+ st.sidebar.write("Emotion Labels: Angry, Fear, Happy, Neutral, Sad, Surprise")
117
+
118
+ upload_choice = st.radio("Choose input source", ["Upload Image", "Upload Video", "Camera"])
119
+
120
+ def process_frame(frame):
121
+ gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
122
+ faces = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml').detectMultiScale(gray_frame, scaleFactor=1.1, minNeighbors=5, minSize=(30, 30))
123
+
124
+ result_text = ""
125
+ for (x, y, w, h) in faces:
126
+ roi_gray = gray_frame[y:y+h, x:x+w]
127
+ roi_color = frame[y:y+h, x:x+w]
128
+ face_roi = cv2.resize(roi_color, (48, 48))
129
+ face_roi = cv2.cvtColor(face_roi, cv2.COLOR_BGR2RGB) / 255.0
130
+ face_roi = np.expand_dims(face_roi, axis=0)
131
+
132
+ predictions = model.predict(face_roi)
133
+ emotion = emotion_labels[np.argmax(predictions[0])]
134
+
135
+ label = "Unknown" # Placeholder for face recognition (add later)
136
+ log_attendance(label, emotion)
137
+
138
+ result_text = f"{label} is feeling {emotion}"
139
+ cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 2)
140
+ cv2.putText(frame, result_text, (x, y-10), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)
141
+ return frame, result_text
142
+
143
+ if upload_choice == "Upload Image":
144
+ uploaded_image = st.file_uploader("Upload Image", type=["png", "jpg", "jpeg"])
145
+ if uploaded_image:
146
+ image = np.array(Image.open(uploaded_image))
147
+ frame, result_text = process_frame(image)
148
+ st.image(frame, caption='Processed Image', use_column_width=True)
149
+ st.markdown(f"<h3 style='text-align: center;'>{result_text}</h3>", unsafe_allow_html=True)
150
+
151
+ elif upload_choice == "Upload Video":
152
+ uploaded_video = st.file_uploader("Upload Video", type=["mp4", "avi", "mkv"])
153
+ if uploaded_video:
154
+ with tempfile.NamedTemporaryFile(delete=False) as tfile:
155
+ tfile.write(uploaded_video.read())
156
+ video_source = cv2.VideoCapture(tfile.name)
157
+ while True:
158
+ ret, frame = video_source.read()
159
+ if not ret:
160
+ break
161
+ frame, result_text = process_frame(frame)
162
+ st.image(frame, channels="BGR", use_column_width=True)
163
+
164
+ elif upload_choice == "Camera":
165
+ image = st.camera_input("Take a picture")
166
+ if image:
167
+ frame = np.array(Image.open(image))
168
+ frame, result_text = process_frame(frame)
169
+ st.image(frame, caption='Processed Image', use_column_width=True)
170
+ st.markdown(f"<h3 style='text-align: center;'>{result_text}</h3>", unsafe_allow_html=True)