Spaces:
Sleeping
Sleeping
File size: 2,052 Bytes
46274ff bb9fde7 c096457 36dac79 46274ff c096457 36dac79 21ef691 36dac79 1effd41 c096457 36dac79 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 |
import streamlit as st
import numpy as np
from PIL import Image
import onnxruntime as ort
import cv2
# Set the page config
st.set_page_config(page_title="Emotion Recognition App", layout="centered")
st.title("Emotion Recognition App")
# Upload an image
uploaded_file = st.file_uploader("Upload an image", type=["jpg", "jpeg", "png"])
# Load the ONNX model using onnxruntime
@st.cache_resource
def load_model():
# Path to the uploaded ONNX model (should be the name of the model file you uploaded)
model_path = "emotion_model.onnx"
return ort.InferenceSession(model_path)
# Load the model
emotion_model = load_model()
# Class labels for facial emotions (based on the training dataset)
emotion_labels = ['Anger', 'Disgust', 'Fear', 'Happiness', 'Sadness', 'Surprise', 'Neutral']
# Preprocess image to match model input requirements
def preprocess_image(image):
# Convert image to grayscale and resize to match the input size expected by the model
image_gray = cv2.cvtColor(np.array(image), cv2.COLOR_RGB2GRAY)
image_resized = cv2.resize(image_gray, (64, 64)) # The model expects 64x64 input
image_normalized = image_resized / 255.0 # Normalize to [0, 1] range
image_reshaped = np.expand_dims(image_normalized, axis=0) # Add batch dimension
image_reshaped = np.expand_dims(image_reshaped, axis=0) # Add channel dimension (1 channel for grayscale)
return image_reshaped.astype(np.float32)
# Process the uploaded image
if uploaded_file is not None:
# Open and preprocess the image
image = Image.open(uploaded_file).convert("RGB")
processed_image = preprocess_image(image)
# Perform inference
input_name = emotion_model.get_inputs()[0].name
outputs = emotion_model.run(None, {input_name: processed_image})
predicted_class = np.argmax(outputs[0], axis=1)[0] # Get the index of the highest probability
predicted_emotion = emotion_labels[predicted_class]
# Display the results
st.image(image, caption=f"Detected Emotion: {predicted_emotion}", use_column_width=True)
|