File size: 4,327 Bytes
2d75c41 c7b74ee 2d75c41 c7b74ee 2d75c41 c7b74ee 2d75c41 9cd5b33 2d75c41 9cd5b33 2d75c41 c7b74ee 2d75c41 c7b74ee 2d75c41 c7b74ee 2d75c41 9cd5b33 c7b74ee 2d75c41 6f924cf 2d75c41 6f924cf 2d75c41 c7b74ee 2d75c41 6f924cf 2d75c41 6f924cf 2d75c41 c7b74ee 2d75c41 c7b74ee 2d75c41 c7b74ee 2d75c41 c7b74ee 2d75c41 c7b74ee 2d75c41 c7b74ee 2d75c41 c7b74ee 2d75c41 c7b74ee 2d75c41 c7b74ee 2d75c41 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 |
import streamlit as st
import cv2
import numpy as np
import time
import os
from keras.models import load_model
from PIL import Image
import tempfile
# Larger title
st.markdown("<h1 style='text-align: center;'>Emotion Detection with Face Recognition</h1>", unsafe_allow_html=True)
# Smaller subtitle
st.markdown("<h3 style='text-align: center;'>angry, fear, happy, neutral, sad, surprise</h3>", unsafe_allow_html=True)
start = time.time()
# Load the emotion model
@st.cache_resource
def load_emotion_model():
model = load_model('CNN_Model_acc_75.h5') # Ensure this file is in your Space
return model
model = load_emotion_model()
print("time taken to load model: ", time.time() - start)
# Emotion labels
emotion_labels = ['angry', 'fear', 'happy', 'neutral', 'sad', 'surprise']
# Load known faces (from images in a folder)
known_faces = []
known_names = []
def load_known_faces():
folder_path = "known_faces" # Place your folder with known faces here
for image_name in os.listdir(folder_path):
if image_name.endswith(('.jpg', '.jpeg', '.png')):
image_path = os.path.join(folder_path, image_name)
image = cv2.imread(image_path)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
faces = cv2.face.LBPHFaceRecognizer_create()
faces.train([gray], np.array([0])) # This is simplified, train with multiple images
encoding = faces.getHistograms()
if encoding:
known_faces.append(encoding)
known_names.append(image_name.split('.')[0]) # Assuming file name is the person's name
load_known_faces()
# Face detection using OpenCV
face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml')
img_shape = 48
def process_frame(frame):
gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray_frame, scaleFactor=1.1, minNeighbors=5, minSize=(30, 30))
for (x, y, w, h) in faces:
roi_gray = gray_frame[y:y+h, x:x+w]
roi_color = frame[y:y+h, x:x+w]
face_roi = cv2.resize(roi_color, (img_shape, img_shape))
face_roi = np.expand_dims(face_roi, axis=0)
face_roi = face_roi / float(img_shape)
# Emotion detection
predictions = model.predict(face_roi)
emotion = emotion_labels[np.argmax(predictions[0])]
# Face recognition using LBPH
label, confidence = cv2.face.LBPHFaceRecognizer_create().predict(roi_gray)
name = "Unknown"
if confidence < 100:
name = known_names[label]
# Draw bounding box and label on the frame
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
cv2.putText(frame, f"{name} - {emotion}", (x, y - 10), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)
return frame
# Video feed
def video_feed(video_source):
frame_placeholder = st.empty() # This placeholder will be used to replace frames in-place
while True:
ret, frame = video_source.read()
if not ret:
break
frame = process_frame(frame)
# Display the frame in the placeholder
frame_placeholder.image(frame, channels="BGR", use_column_width=True)
# Sidebar for video or image upload
upload_choice = st.sidebar.radio("Choose input source", ["Upload Video", "Upload Image", "Camera"])
if upload_choice == "Camera":
# Access camera
video_source = cv2.VideoCapture(0)
video_feed(video_source)
elif upload_choice == "Upload Video":
uploaded_video = st.file_uploader("Upload Video", type=["mp4", "mov", "avi", "mkv", "webm"])
if uploaded_video:
# Temporarily save the video to disk
with tempfile.NamedTemporaryFile(delete=False) as tfile:
tfile.write(uploaded_video.read())
video_source = cv2.VideoCapture(tfile.name)
video_feed(video_source)
elif upload_choice == "Upload Image":
uploaded_image = st.file_uploader("Upload Image", type=["png", "jpg", "jpeg", "gif"])
if uploaded_image:
image = Image.open(uploaded_image)
frame = np.array(image)
frame = process_frame(frame)
st.image(frame, caption='Processed Image', use_column_width=True)
st.sidebar.write("Emotion Labels: Angry, Fear, Happy, Neutral, Sad, Surprise")
|