File size: 9,207 Bytes
56a50b1 a4d9380 94a1319 a6b4b4a 07efbbd 94a1319 07efbbd 62baa62 07efbbd 56a50b1 4b17a12 07efbbd 62baa62 07efbbd 62baa62 07efbbd 62baa62 6a0f76d 07efbbd a6b4b4a 07efbbd a6b4b4a 07efbbd 62baa62 07efbbd 94a1319 07efbbd c850a8d a6b4b4a 07efbbd a6b4b4a 07efbbd c850a8d a0c7be1 c850a8d a6b4b4a 07efbbd a6b4b4a 07efbbd c850a8d a6b4b4a 6a0f76d c850a8d a0c7be1 a6b4b4a c850a8d a6b4b4a 07efbbd a6b4b4a c850a8d 07efbbd a6b4b4a 07efbbd a6b4b4a 07efbbd a6b4b4a 07efbbd ebc268d a6b4b4a 07efbbd a6b4b4a 07efbbd e4120b8 addbe9f e4120b8 a6b4b4a 07efbbd a6b4b4a 07efbbd addbe9f 07efbbd 94a1319 a6b4b4a 07efbbd a6b4b4a 07efbbd a6b4b4a 07efbbd a6b4b4a 07efbbd a6b4b4a 6a0f76d 07efbbd 6a0f76d 07efbbd 6a0f76d a6b4b4a 07efbbd |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 |
import streamlit as st
import cv2
import numpy as np
import os
import sqlite3
from PIL import Image
from keras.models import load_model
from huggingface_hub import HfApi
import tempfile
# Constants
KNOWN_FACES_DIR = "known_faces"
DATABASE = "students.db"
EMOTION_MODEL_FILE = "CNN_Model_acc_75.h5"
EMOTION_LABELS = ["Angry", "Disgust", "Fear", "Happy", "Sad", "Surprise", "Neutral"]
REPO_NAME = "face_and_emotion_detection"
REPO_ID = "LovnishVerma/" + REPO_NAME
IMG_SHAPE = 48
hf_token = os.getenv("upload")
# Ensure the Hugging Face token is available
if not hf_token:
st.error("Hugging Face token not found. Please set the environment variable.")
st.stop()
# Initialize Hugging Face API
api = HfApi()
# Create Hugging Face repository
def create_hugging_face_repo():
try:
api.create_repo(repo_id=REPO_ID, repo_type="space", space_sdk="streamlit", token=hf_token, exist_ok=True)
st.success(f"Repository '{REPO_NAME}' is ready on Hugging Face!")
except Exception as e:
st.error(f"Error creating Hugging Face repository: {e}")
# Load the emotion model once, using caching
@st.cache_resource
def load_emotion_model():
try:
model = load_model(EMOTION_MODEL_FILE)
return model
except Exception as e:
st.error(f"Error loading emotion model: {e}")
st.stop()
emotion_model = load_emotion_model()
# Initialize the face recognizer
face_recognizer = cv2.face.LBPHFaceRecognizer_create()
# Database functions
def initialize_database():
"""
Initializes the SQLite database by creating a table to store student data.
"""
with sqlite3.connect(DATABASE) as conn:
conn.execute("""
CREATE TABLE IF NOT EXISTS students (
id INTEGER PRIMARY KEY AUTOINCREMENT,
name TEXT NOT NULL,
roll_no TEXT NOT NULL UNIQUE,
image_path TEXT NOT NULL,
timestamp DATETIME DEFAULT CURRENT_TIMESTAMP
)
""")
conn.commit()
# Initialize the database
initialize_database()
def save_to_database(name, roll_no, image_path):
"""
Saves student data (name, roll number, image path) to the SQLite database.
Ensures roll number is unique.
"""
with sqlite3.connect(DATABASE) as conn:
try:
conn.execute("""
INSERT INTO students (name, roll_no, image_path)
VALUES (?, ?, ?)
""", (name, roll_no, image_path))
conn.commit()
st.success("Data saved successfully!")
except sqlite3.IntegrityError:
st.error("Roll number already exists!")
def save_image_to_hugging_face(image, name, roll_no):
"""
Saves the captured image locally in the 'known_faces' directory and uploads it to Hugging Face.
"""
if not os.path.exists(KNOWN_FACES_DIR):
os.makedirs(KNOWN_FACES_DIR)
filename = f"{name}_{roll_no}.jpg"
local_path = os.path.join(KNOWN_FACES_DIR, filename)
# Saving the image to the correct directory
image.save(local_path)
try:
api.upload_file(
path_or_fileobj=local_path,
path_in_repo=filename,
repo_id=REPO_ID,
repo_type="space",
token=hf_token
)
st.success(f"Image uploaded to Hugging Face: {filename}")
except Exception as e:
st.error(f"Error uploading image to Hugging Face: {e}")
return local_path
# Load known faces
def load_known_faces():
"""
Loads known faces from the 'known_faces' directory and trains the recognizer.
"""
known_faces = []
known_names = []
for image_name in os.listdir(KNOWN_FACES_DIR):
if image_name.endswith(('.jpg', '.jpeg', '.png')):
image_path = os.path.join(KNOWN_FACES_DIR, image_name)
image = cv2.imread(image_path)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
faces = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml').detectMultiScale(
gray, scaleFactor=1.1, minNeighbors=5, minSize=(30, 30)
)
for (x, y, w, h) in faces:
roi_gray = gray[y:y+h, x:x+w]
known_faces.append(roi_gray)
known_names.append(image_name.split('.')[0]) # Assuming file name is the person's name
if known_faces:
face_recognizer.train(known_faces, np.array([i for i in range(len(known_faces))]))
else:
st.warning("No known faces found for training.")
return known_names
# Load known faces at the start
known_names = load_known_faces()
# Process frame for both emotion detection and face recognition
def process_frame(frame):
gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml').detectMultiScale(
gray_frame, scaleFactor=1.1, minNeighbors=5, minSize=(30, 30)
)
result_text = ""
for (x, y, w, h) in faces:
roi_gray = gray_frame[y:y+h, x:x+w]
roi_color = frame[y:y+h, x:x+w]
face_roi = cv2.resize(roi_color, (IMG_SHAPE, IMG_SHAPE))
face_roi = cv2.cvtColor(face_roi, cv2.COLOR_BGR2RGB)
face_roi = np.expand_dims(face_roi, axis=0) / 255.0
predictions = emotion_model.predict(face_roi)
emotion = EMOTION_LABELS[np.argmax(predictions[0])]
label, confidence = face_recognizer.predict(roi_gray)
name = "Unknown"
if confidence < 100:
name = known_names[label]
result_text = f"{name} is feeling {emotion}"
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
cv2.putText(frame, result_text, (x, y - 10), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)
return frame, result_text
# Video feed handler
def video_feed(video_source):
frame_placeholder = st.empty()
text_placeholder = st.empty()
while True:
ret, frame = video_source.read()
if not ret:
break
frame, result_text = process_frame(frame)
frame_placeholder.image(frame, channels="BGR", use_column_width=True)
text_placeholder.markdown(f"<h3 style='text-align: center;'>{result_text}</h3>", unsafe_allow_html=True)
# Streamlit interface
def main():
st.title("Student Registration with Face Recognition and Emotion Detection")
# Step 1: Student Registration
registration_mode = st.sidebar.radio("Choose an option", ["Register Student", "Face and Emotion Recognition"])
if registration_mode == "Register Student":
name = st.text_input("Enter your name")
roll_no = st.text_input("Enter your roll number")
capture_mode = st.radio("Choose an option to upload your image", ["Use Webcam", "Upload File"])
if capture_mode == "Use Webcam":
picture = st.camera_input("Take a picture")
else:
picture = st.file_uploader("Upload an image", type=["jpg", "jpeg", "png"])
if st.button("Register"):
if not name or not roll_no:
st.error("Please fill in both name and roll number.")
elif not picture:
st.error("Please upload or capture an image.")
else:
try:
image = Image.open(picture)
image_path = save_image_to_hugging_face(image, name, roll_no)
save_to_database(name, roll_no, image_path)
except Exception as e:
st.error(f"An error occurred: {e}")
elif registration_mode == "Face and Emotion Recognition":
upload_choice = st.radio("Choose input source", ["Upload Image", "Upload Video", "Camera"])
if upload_choice == "Camera":
image = st.camera_input("Take a picture")
if image:
frame = np.array(Image.open(image))
frame, result_text = process_frame(frame)
st.image(frame, caption='Processed Image', use_column_width=True)
st.markdown(f"<h3 style='text-align: center;'>{result_text}</h3>", unsafe_allow_html=True)
elif upload_choice == "Upload Image":
uploaded_image = st.file_uploader("Upload Image", type=["png", "jpg", "jpeg", "gif"])
if uploaded_image:
image = Image.open(uploaded_image)
frame = np.array(image)
frame, result_text = process_frame(frame)
st.image(frame, caption='Processed Image', use_column_width=True)
st.markdown(f"<h3 style='text-align: center;'>{result_text}</h3>", unsafe_allow_html=True)
elif upload_choice == "Upload Video":
video_file = st.file_uploader("Upload Video", type=["mp4", "mov", "avi"])
if video_file:
temp_video_file = tempfile.NamedTemporaryFile(delete=False)
temp_video_file.write(video_file.read())
temp_video_file.close()
video_source = cv2.VideoCapture(temp_video_file.name)
video_feed(video_source)
if __name__ == "__main__":
main()
|