import streamlit as st import cv2 import numpy as np import os import sqlite3 from PIL import Image from keras.models import load_model from huggingface_hub import HfApi import tempfile # Constants KNOWN_FACES_DIR = "known_faces" DATABASE = "students.db" EMOTION_MODEL_FILE = "CNN_Model_acc_75.h5" EMOTION_LABELS = ["Angry", "Disgust", "Fear", "Happy", "Sad", "Surprise", "Neutral"] REPO_NAME = "face_and_emotion_detection" REPO_ID = "LovnishVerma/" + REPO_NAME IMG_SHAPE = 48 hf_token = os.getenv("upload") # Ensure the Hugging Face token is available if not hf_token: st.error("Hugging Face token not found. Please set the environment variable.") st.stop() # Initialize Hugging Face API api = HfApi() # Create Hugging Face repository def create_hugging_face_repo(): try: api.create_repo(repo_id=REPO_ID, repo_type="space", space_sdk="streamlit", token=hf_token, exist_ok=True) st.success(f"Repository '{REPO_NAME}' is ready on Hugging Face!") except Exception as e: st.error(f"Error creating Hugging Face repository: {e}") # Load the emotion model once, using caching @st.cache_resource def load_emotion_model(): try: model = load_model(EMOTION_MODEL_FILE) return model except Exception as e: st.error(f"Error loading emotion model: {e}") st.stop() emotion_model = load_emotion_model() # Initialize the face recognizer face_recognizer = cv2.face.LBPHFaceRecognizer_create() # Database functions def initialize_database(): """ Initializes the SQLite database by creating a table to store student data. """ with sqlite3.connect(DATABASE) as conn: conn.execute(""" CREATE TABLE IF NOT EXISTS students ( id INTEGER PRIMARY KEY AUTOINCREMENT, name TEXT NOT NULL, roll_no TEXT NOT NULL UNIQUE, image_path TEXT NOT NULL, timestamp DATETIME DEFAULT CURRENT_TIMESTAMP ) """) conn.commit() # Initialize the database initialize_database() def save_to_database(name, roll_no, image_path): """ Saves student data (name, roll number, image path) to the SQLite database. Ensures roll number is unique. """ with sqlite3.connect(DATABASE) as conn: try: conn.execute(""" INSERT INTO students (name, roll_no, image_path) VALUES (?, ?, ?) """, (name, roll_no, image_path)) conn.commit() st.success("Data saved successfully!") except sqlite3.IntegrityError: st.error("Roll number already exists!") def save_image_to_hugging_face(image, name, roll_no): """ Saves the captured image locally in the 'known_faces' directory and uploads it to Hugging Face. """ if not os.path.exists(KNOWN_FACES_DIR): os.makedirs(KNOWN_FACES_DIR) filename = f"{name}_{roll_no}.jpg" local_path = os.path.join(KNOWN_FACES_DIR, filename) # Saving the image to the correct directory image.save(local_path) try: api.upload_file( path_or_fileobj=local_path, path_in_repo=filename, repo_id=REPO_ID, repo_type="space", token=hf_token ) st.success(f"Image uploaded to Hugging Face: {filename}") except Exception as e: st.error(f"Error uploading image to Hugging Face: {e}") return local_path # Load known faces def load_known_faces(): """ Loads known faces from the 'known_faces' directory and trains the recognizer. """ known_faces = [] known_names = [] for image_name in os.listdir(KNOWN_FACES_DIR): if image_name.endswith(('.jpg', '.jpeg', '.png')): image_path = os.path.join(KNOWN_FACES_DIR, image_name) image = cv2.imread(image_path) gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) faces = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml').detectMultiScale( gray, scaleFactor=1.1, minNeighbors=5, minSize=(30, 30) ) for (x, y, w, h) in faces: roi_gray = gray[y:y+h, x:x+w] known_faces.append(roi_gray) known_names.append(image_name.split('.')[0]) # Assuming file name is the person's name if known_faces: face_recognizer.train(known_faces, np.array([i for i in range(len(known_faces))])) else: st.warning("No known faces found for training.") return known_names # Load known faces at the start known_names = load_known_faces() # Process frame for both emotion detection and face recognition def process_frame(frame): gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) faces = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml').detectMultiScale( gray_frame, scaleFactor=1.1, minNeighbors=5, minSize=(30, 30) ) result_text = "" for (x, y, w, h) in faces: roi_gray = gray_frame[y:y+h, x:x+w] roi_color = frame[y:y+h, x:x+w] face_roi = cv2.resize(roi_color, (IMG_SHAPE, IMG_SHAPE)) face_roi = cv2.cvtColor(face_roi, cv2.COLOR_BGR2RGB) face_roi = np.expand_dims(face_roi, axis=0) / 255.0 predictions = emotion_model.predict(face_roi) emotion = EMOTION_LABELS[np.argmax(predictions[0])] label, confidence = face_recognizer.predict(roi_gray) name = "Unknown" if confidence < 100: name = known_names[label] result_text = f"{name} is feeling {emotion}" cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2) cv2.putText(frame, result_text, (x, y - 10), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2) return frame, result_text # Video feed handler def video_feed(video_source): frame_placeholder = st.empty() text_placeholder = st.empty() while True: ret, frame = video_source.read() if not ret: break frame, result_text = process_frame(frame) frame_placeholder.image(frame, channels="BGR", use_column_width=True) text_placeholder.markdown(f"