File size: 5,312 Bytes
56a50b1
46acd56
bb6896d
46acd56
 
8fa939c
bb6896d
46acd56
bb6896d
 
 
46acd56
 
bb6896d
 
 
46acd56
bb6896d
 
 
46acd56
bb6896d
 
46acd56
 
 
 
 
 
a34751a
46acd56
 
 
 
 
 
a34751a
46acd56
 
 
 
 
 
 
077c41e
46acd56
 
 
 
 
 
 
 
 
 
bb6896d
46acd56
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
bb6896d
a34751a
46acd56
bb6896d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8fa939c
bb6896d
 
 
 
 
 
46acd56
 
 
 
 
 
 
 
 
 
 
 
 
 
8fa939c
bb6896d
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
import streamlit as st
import cv2
import os
import numpy as np
from keras.models import load_model
from PIL import Image
from huggingface_hub import HfApi
from datetime import datetime

# Constants
KNOWN_FACES_DIR = "known_faces"  # Directory to save user images
EMOTION_MODEL_FILE = "CNN_Model_acc_75.h5"
EMOTION_LABELS = ["Angry", "Disgust", "Fear", "Happy", "Sad", "Surprise", "Neutral"]
REPO_NAME = "face_and_emotion_detection"
REPO_ID = f"LovnishVerma/{REPO_NAME}"

# Ensure the directories exist
os.makedirs(KNOWN_FACES_DIR, exist_ok=True)

# Initialize Hugging Face API
hf_token = os.getenv("upload")  # Replace with your actual Hugging Face token
api = HfApi()

# Load emotion detection model
try:
    emotion_model = load_model(EMOTION_MODEL_FILE)
except Exception as e:
    st.error(f"Error loading emotion model: {e}")
    st.stop()

# Face and Emotion Detection Function
def detect_faces_and_emotions(image):
    """Detect faces and emotions in the image"""
    gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml')
    faces = face_cascade.detectMultiScale(gray_image, scaleFactor=1.3, minNeighbors=5)
    
    emotion_label = None
    for (x, y, w, h) in faces:
        face = gray_image[y:y+h, x:x+w]
        resized_face = cv2.resize(face, (48, 48))  # Resize face to 48x48
        rgb_face = cv2.cvtColor(resized_face, cv2.COLOR_GRAY2RGB)
        normalized_face = rgb_face / 255.0
        reshaped_face = np.reshape(normalized_face, (1, 48, 48, 3))
        
        # Predict the emotion
        emotion_prediction = emotion_model.predict(reshaped_face)
        emotion_label = np.argmax(emotion_prediction)
    
    return faces, EMOTION_LABELS[emotion_label] if emotion_label else None

# Face Recognition Function
def recognize_face(image):
    """Recognize the face in the uploaded image by comparing with known faces"""
    recognizer = cv2.face.LBPHFaceRecognizer_create()
    
    known_faces = []
    labels = []
    
    # Load known faces from the directory
    for filename in os.listdir(KNOWN_FACES_DIR):
        if filename.endswith(".jpg"):
            image_path = os.path.join(KNOWN_FACES_DIR, filename)
            known_image = cv2.imread(image_path)
            gray_image = cv2.cvtColor(known_image, cv2.COLOR_BGR2GRAY)
            faces = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml').detectMultiScale(gray_image, scaleFactor=1.3, minNeighbors=5)
            
            for (x, y, w, h) in faces:
                face = gray_image[y:y+h, x:x+w]
                known_faces.append(face)
                labels.append(filename.split(".")[0])  # Use image name as label
    
    if known_faces:
        recognizer.train(known_faces, np.array(range(len(labels))))  # Train recognizer with known faces
    
    # Detect faces in the uploaded image
    gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    faces = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml').detectMultiScale(gray_image, scaleFactor=1.3, minNeighbors=5)
    
    recognized_name = "Unknown"
    for (x, y, w, h) in faces:
        face = gray_image[y:y+h, x:x+w]
        label, confidence = recognizer.predict(face)
        if confidence < 100:  # Confidence threshold
            recognized_name = labels[label]  # Get the name from labels
    
    return recognized_name

# Streamlit UI
st.title("Student Registration with Face Recognition and Emotion Detection")

# Input fields for student details
name = st.text_input("Enter your name")
roll_no = st.text_input("Enter your roll number")

# Choose input method for the image (webcam or file upload)
capture_mode = st.radio("Choose an option to upload your image", ["Use Webcam", "Upload File"])

# Handle webcam capture or file upload
if capture_mode == "Use Webcam":
    picture = st.camera_input("Take a picture")  # Capture image using webcam
elif capture_mode == "Upload File":
    picture = st.file_uploader("Upload an image", type=["jpg", "jpeg", "png"])  # Upload image from file system

# Save data and process image on button click
if st.button("Register"):
    if not name or not roll_no:
        st.error("Please fill in both name and roll number.")
    elif not picture:
        st.error("Please upload or capture an image.")
    else:
        try:
            # Open the image based on capture mode
            if capture_mode == "Use Webcam" and picture:
                image = Image.open(picture)
            elif capture_mode == "Upload File" and picture:
                image = Image.open(picture)
            
            # Convert the image to numpy array for processing
            img_array = np.array(image)
            
            # Detect faces and emotions
            faces, emotion_label = detect_faces_and_emotions(img_array)
            if faces:
                st.success(f"Emotion Detected: {emotion_label}")
            else:
                st.warning("No face detected.")
            
            # Perform face recognition
            recognized_name = recognize_face(img_array)
            st.success(f"Face Recognized as: {recognized_name}")
        
        except Exception as e:
            st.error(f"An error occurred: {e}")