LovnishVerma's picture
Update app.py
46acd56 verified
raw
history blame
5.31 kB
import streamlit as st
import cv2
import os
import numpy as np
from keras.models import load_model
from PIL import Image
from huggingface_hub import HfApi
from datetime import datetime
# Constants
KNOWN_FACES_DIR = "known_faces" # Directory to save user images
EMOTION_MODEL_FILE = "CNN_Model_acc_75.h5"
EMOTION_LABELS = ["Angry", "Disgust", "Fear", "Happy", "Sad", "Surprise", "Neutral"]
REPO_NAME = "face_and_emotion_detection"
REPO_ID = f"LovnishVerma/{REPO_NAME}"
# Ensure the directories exist
os.makedirs(KNOWN_FACES_DIR, exist_ok=True)
# Initialize Hugging Face API
hf_token = os.getenv("upload") # Replace with your actual Hugging Face token
api = HfApi()
# Load emotion detection model
try:
emotion_model = load_model(EMOTION_MODEL_FILE)
except Exception as e:
st.error(f"Error loading emotion model: {e}")
st.stop()
# Face and Emotion Detection Function
def detect_faces_and_emotions(image):
"""Detect faces and emotions in the image"""
gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml')
faces = face_cascade.detectMultiScale(gray_image, scaleFactor=1.3, minNeighbors=5)
emotion_label = None
for (x, y, w, h) in faces:
face = gray_image[y:y+h, x:x+w]
resized_face = cv2.resize(face, (48, 48)) # Resize face to 48x48
rgb_face = cv2.cvtColor(resized_face, cv2.COLOR_GRAY2RGB)
normalized_face = rgb_face / 255.0
reshaped_face = np.reshape(normalized_face, (1, 48, 48, 3))
# Predict the emotion
emotion_prediction = emotion_model.predict(reshaped_face)
emotion_label = np.argmax(emotion_prediction)
return faces, EMOTION_LABELS[emotion_label] if emotion_label else None
# Face Recognition Function
def recognize_face(image):
"""Recognize the face in the uploaded image by comparing with known faces"""
recognizer = cv2.face.LBPHFaceRecognizer_create()
known_faces = []
labels = []
# Load known faces from the directory
for filename in os.listdir(KNOWN_FACES_DIR):
if filename.endswith(".jpg"):
image_path = os.path.join(KNOWN_FACES_DIR, filename)
known_image = cv2.imread(image_path)
gray_image = cv2.cvtColor(known_image, cv2.COLOR_BGR2GRAY)
faces = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml').detectMultiScale(gray_image, scaleFactor=1.3, minNeighbors=5)
for (x, y, w, h) in faces:
face = gray_image[y:y+h, x:x+w]
known_faces.append(face)
labels.append(filename.split(".")[0]) # Use image name as label
if known_faces:
recognizer.train(known_faces, np.array(range(len(labels)))) # Train recognizer with known faces
# Detect faces in the uploaded image
gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
faces = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml').detectMultiScale(gray_image, scaleFactor=1.3, minNeighbors=5)
recognized_name = "Unknown"
for (x, y, w, h) in faces:
face = gray_image[y:y+h, x:x+w]
label, confidence = recognizer.predict(face)
if confidence < 100: # Confidence threshold
recognized_name = labels[label] # Get the name from labels
return recognized_name
# Streamlit UI
st.title("Student Registration with Face Recognition and Emotion Detection")
# Input fields for student details
name = st.text_input("Enter your name")
roll_no = st.text_input("Enter your roll number")
# Choose input method for the image (webcam or file upload)
capture_mode = st.radio("Choose an option to upload your image", ["Use Webcam", "Upload File"])
# Handle webcam capture or file upload
if capture_mode == "Use Webcam":
picture = st.camera_input("Take a picture") # Capture image using webcam
elif capture_mode == "Upload File":
picture = st.file_uploader("Upload an image", type=["jpg", "jpeg", "png"]) # Upload image from file system
# Save data and process image on button click
if st.button("Register"):
if not name or not roll_no:
st.error("Please fill in both name and roll number.")
elif not picture:
st.error("Please upload or capture an image.")
else:
try:
# Open the image based on capture mode
if capture_mode == "Use Webcam" and picture:
image = Image.open(picture)
elif capture_mode == "Upload File" and picture:
image = Image.open(picture)
# Convert the image to numpy array for processing
img_array = np.array(image)
# Detect faces and emotions
faces, emotion_label = detect_faces_and_emotions(img_array)
if faces:
st.success(f"Emotion Detected: {emotion_label}")
else:
st.warning("No face detected.")
# Perform face recognition
recognized_name = recognize_face(img_array)
st.success(f"Face Recognized as: {recognized_name}")
except Exception as e:
st.error(f"An error occurred: {e}")