LovnishVerma's picture
Update app.py
65b65aa verified
raw
history blame
8.04 kB
import streamlit as st
import cv2
import os
import numpy as np
from keras.models import load_model
from PIL import Image
import sqlite3
from datetime import datetime
# Constants
ROOT_DIR = os.getcwd() # Root directory of the project
DATABASE = "students.db" # SQLite database file to store student information
EMOTION_MODEL_FILE = "CNN_Model_acc_75.h5"
EMOTION_LABELS = ["Angry", "Disgust", "Fear", "Happy", "Sad", "Surprise", "Neutral"]
# Load the emotion detection model
try:
emotion_model = load_model(EMOTION_MODEL_FILE)
except Exception as e:
st.error(f"Error loading emotion model: {e}")
st.stop()
# Database Functions
def initialize_database():
""" Initializes the SQLite database by creating the students table if it doesn't exist. """
conn = sqlite3.connect(DATABASE)
cursor = conn.cursor()
cursor.execute("""
CREATE TABLE IF NOT EXISTS students (
id INTEGER PRIMARY KEY AUTOINCREMENT,
name TEXT NOT NULL,
roll_no TEXT NOT NULL UNIQUE,
image_path TEXT NOT NULL,
timestamp DATETIME DEFAULT CURRENT_TIMESTAMP
)
""")
conn.commit()
conn.close()
def save_to_database(name, roll_no, image_path):
""" Saves the student's data to the database. """
conn = sqlite3.connect(DATABASE)
cursor = conn.cursor()
try:
cursor.execute("""
INSERT INTO students (name, roll_no, image_path)
VALUES (?, ?, ?)
""", (name, roll_no, image_path))
conn.commit()
st.success("Data saved successfully!")
except sqlite3.IntegrityError:
st.error("Roll number already exists!")
finally:
conn.close()
def save_image_to_root_directory(image, name, roll_no):
""" Saves the image locally in the root directory. """
# Construct the local file path
filename = f"{name}_{roll_no}.jpg"
local_path = os.path.join(ROOT_DIR, filename)
try:
# Convert image to RGB if necessary
if image.mode != "RGB":
image = image.convert("RGB")
# Save the image to the root directory
image.save(local_path)
st.success(f"Image saved to {local_path}.")
except Exception as e:
st.error(f"Error saving image: {e}")
return local_path
# Initialize the database when the app starts
initialize_database()
# Streamlit user interface (UI)
st.title("Student Registration with Image Upload and Face Recognition")
# Input fields for student details
name = st.text_input("Enter your name")
roll_no = st.text_input("Enter your roll number")
# Choose input method for the image (webcam or file upload)
capture_mode = st.radio("Choose an option to upload your image", ["Use Webcam", "Upload File"])
# Handle webcam capture or file upload
if capture_mode == "Use Webcam":
picture = st.camera_input("Take a picture") # Capture image using webcam
elif capture_mode == "Upload File":
picture = st.file_uploader("Upload an image", type=["jpg", "jpeg", "png"]) # Upload image from file system
# Save data and process image on button click
if st.button("Register"):
if not name or not roll_no:
st.error("Please fill in both name and roll number.")
elif not picture:
st.error("Please upload or capture an image.")
else:
try:
# Open the image based on capture mode
if capture_mode == "Use Webcam" and picture:
image = Image.open(picture)
elif capture_mode == "Upload File" and picture:
image = Image.open(picture)
# Save the image locally in the root directory
image_path = save_image_to_root_directory(image, name, roll_no)
save_to_database(name, roll_no, image_path)
except Exception as e:
st.error(f"An error occurred: {e}")
# Face and Emotion Detection Function
def detect_faces_and_emotions(image):
gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml')
faces = face_cascade.detectMultiScale(gray_image, scaleFactor=1.3, minNeighbors=5)
for (x, y, w, h) in faces:
face = gray_image[y:y+h, x:x+w]
resized_face = cv2.resize(face, (48, 48)) # Resize face to 48x48
rgb_face = cv2.cvtColor(resized_face, cv2.COLOR_GRAY2RGB)
normalized_face = rgb_face / 255.0
reshaped_face = np.reshape(normalized_face, (1, 48, 48, 3))
# Predict the emotion
emotion_prediction = emotion_model.predict(reshaped_face)
emotion_label = np.argmax(emotion_prediction)
return EMOTION_LABELS[emotion_label]
return None
# Face Recognition: Compare uploaded image with all images in the root directory
def recognize_face(image_path):
""" Compares the uploaded image with all images in the root directory """
img = cv2.imread(image_path)
gray_image = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml')
faces = face_cascade.detectMultiScale(gray_image, scaleFactor=1.3, minNeighbors=5)
recognized_name = None
for (x, y, w, h) in faces:
face = gray_image[y:y+h, x:x+w]
for filename in os.listdir(ROOT_DIR):
if filename.endswith(('.jpg', '.jpeg', '.png')):
stored_image = cv2.imread(os.path.join(ROOT_DIR, filename))
stored_gray = cv2.cvtColor(stored_image, cv2.COLOR_BGR2GRAY)
stored_faces = face_cascade.detectMultiScale(stored_gray)
for (sx, sy, sw, sh) in stored_faces:
stored_face = stored_gray[sy:sy+sh, sx:sx+sw]
resized_stored_face = cv2.resize(stored_face, (48, 48))
rgb_stored_face = cv2.cvtColor(resized_stored_face, cv2.COLOR_GRAY2RGB)
stored_normalized_face = rgb_stored_face / 255.0
stored_reshaped_face = np.reshape(stored_normalized_face, (1, 48, 48, 3))
# Compare the faces (you can use a more advanced method like facial embeddings, but for simplicity, this is just basic comparison)
if np.allclose(stored_reshaped_face, face):
recognized_name = filename.split('_')[0] # Extract the name from the file name
break
return recognized_name
# UI for Emotion and Face Detection
if st.sidebar.selectbox("Menu", ["Register Student", "Face Recognition and Emotion Detection"]) == "Face Recognition and Emotion Detection":
st.subheader("Recognize Faces and Detect Emotions")
action = st.radio("Choose Action", ["Upload Image", "Use Webcam"])
if action == "Upload Image":
uploaded_file = st.file_uploader("Upload Image", type=["jpg", "jpeg", "png"])
if uploaded_file:
img = Image.open(uploaded_file)
img_array = np.array(img)
emotion_label = detect_faces_and_emotions(img_array)
recognized_name = recognize_face(uploaded_file)
if emotion_label:
st.success(f"Emotion Detected: {emotion_label}")
if recognized_name:
st.success(f"Face Recognized: {recognized_name}")
else:
st.warning("No face detected.")
elif action == "Use Webcam":
st.info("Use the camera input widget to capture an image.")
camera_image = st.camera_input("Take a picture")
if camera_image:
img = Image.open(camera_image)
img_array = np.array(img)
emotion_label = detect_faces_and_emotions(img_array)
recognized_name = recognize_face(camera_image)
if emotion_label:
st.success(f"Emotion Detected: {emotion_label}")
if recognized_name:
st.success(f"Face Recognized: {recognized_name}")
else:
st.warning("No face detected.")