File size: 5,428 Bytes
56a50b1 a4d9380 94a1319 8fa939c 07efbbd 8fa939c 07efbbd 56a50b1 8fa939c 07efbbd 8fa939c a6b4b4a 8fa939c a6b4b4a 8fa939c 94a1319 a6b4b4a 8fa939c a6b4b4a 8fa939c a6b4b4a 8fa939c |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 |
import streamlit as st
import cv2
import numpy as np
import time
from keras.models import load_model
from PIL import Image
from huggingface_hub import HfApi, Repository
import os
import tempfile
# Page configuration
st.set_page_config(page_title="Emotion Detection", layout="centered")
# Title and Subtitle
st.markdown("<h1 style='text-align: center;'>Emotion Detection</h1>", unsafe_allow_html=True)
st.markdown("<h3 style='text-align: center;'>angry, fear, happy, neutral, sad, surprise</h3>", unsafe_allow_html=True)
# Load Model
@st.cache_resource
def load_emotion_model():
model = load_model('CNN_Model_acc_75.h5')
return model
start_time = time.time()
model = load_emotion_model()
st.write(f"Model loaded in {time.time() - start_time:.2f} seconds.")
# Emotion labels and constants
emotion_labels = ['angry', 'fear', 'happy', 'neutral', 'sad', 'surprise']
img_shape = 48
face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml')
def process_frame(frame):
"""Detect faces and predict emotions."""
gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray_frame, scaleFactor=1.1, minNeighbors=5, minSize=(30, 30))
for (x, y, w, h) in faces:
roi_gray = gray_frame[y:y+h, x:x+w]
roi_color = frame[y:y+h, x:x+w]
face_roi = cv2.resize(roi_color, (img_shape, img_shape))
face_roi = np.expand_dims(face_roi, axis=0)
face_roi = face_roi / float(img_shape)
predictions = model.predict(face_roi)
emotion = emotion_labels[np.argmax(predictions[0])]
# Draw rectangle and emotion label
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
cv2.putText(frame, emotion, (x, y + h + 20), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 255, 0), 2)
return frame
# Sidebar for input selection
st.sidebar.title("Choose Input Source")
upload_choice = st.sidebar.radio("Select:", ["Camera", "Upload Video", "Upload Image", "Upload to Hugging Face"])
if upload_choice == "Camera":
# Use Streamlit's camera input widget
st.sidebar.info("Click a picture to analyze emotion.")
picture = st.camera_input("Take a picture")
if picture:
image = Image.open(picture)
frame = np.array(image)
frame = process_frame(frame)
st.image(frame, caption="Processed Image", use_column_width=True)
elif upload_choice == "Upload Video":
uploaded_video = st.file_uploader("Upload Video", type=["mp4", "mov", "avi", "mkv", "webm"])
if uploaded_video:
with tempfile.NamedTemporaryFile(delete=False) as tfile:
tfile.write(uploaded_video.read())
video_source = cv2.VideoCapture(tfile.name)
frame_placeholder = st.empty()
while video_source.isOpened():
ret, frame = video_source.read()
if not ret:
break
frame = process_frame(frame)
frame_placeholder.image(frame, channels="BGR", use_column_width=True)
video_source.release()
elif upload_choice == "Upload Image":
uploaded_image = st.file_uploader("Upload Image", type=["png", "jpg", "jpeg"])
if uploaded_image:
image = Image.open(uploaded_image)
frame = np.array(image)
frame = process_frame(frame)
st.image(frame, caption="Processed Image", use_column_width=True)
elif upload_choice == "Upload to Hugging Face":
st.sidebar.info("Upload images to the 'known_faces' directory in the Hugging Face repository.")
# Configure Hugging Face Repository
REPO_NAME = "face_and_emotion_detection"
REPO_ID = "LovnishVerma/" + REPO_NAME
hf_token = os.getenv("upload") # Set your Hugging Face token as an environment variable
if not hf_token:
st.error("Hugging Face token not found. Please set it as an environment variable named 'HF_TOKEN'.")
st.stop()
# Initialize Hugging Face API
api = HfApi()
def create_hugging_face_repo():
"""Create or verify the Hugging Face repository."""
try:
api.create_repo(repo_id=REPO_ID, repo_type="dataset", token=hf_token, exist_ok=True)
st.success(f"Repository '{REPO_NAME}' is ready on Hugging Face!")
except Exception as e:
st.error(f"Error creating Hugging Face repository: {e}")
def upload_to_hugging_face(file):
"""Upload a file to the Hugging Face repository."""
try:
with tempfile.NamedTemporaryFile(delete=False, suffix=".jpg") as temp_file:
temp_file.write(file.read())
temp_file_path = temp_file.name
api.upload_file(
path_or_fileobj=temp_file_path,
path_in_repo=f"known_faces/{os.path.basename(temp_file_path)}",
repo_id=REPO_ID,
token=hf_token,
)
st.success("File uploaded successfully to Hugging Face!")
except Exception as e:
st.error(f"Error uploading file to Hugging Face: {e}")
# Create the repository if it doesn't exist
create_hugging_face_repo()
# Upload image file
hf_uploaded_image = st.file_uploader("Upload Image to Hugging Face", type=["png", "jpg", "jpeg"])
if hf_uploaded_image:
upload_to_hugging_face(hf_uploaded_image)
st.sidebar.write("Emotion Labels: Angry, Fear, Happy, Neutral, Sad, Surprise") |