Spaces:
Sleeping
Sleeping
import streamlit as st | |
import numpy as np | |
from PIL import Image | |
import onnxruntime as ort | |
import cv2 | |
# Set the page config | |
st.set_page_config(page_title="Emotion Recognition App", layout="centered") | |
st.title("Emotion Recognition App") | |
# Upload an image | |
uploaded_file = st.file_uploader("Upload an image", type=["jpg", "jpeg", "png"]) | |
# Load the ONNX model using onnxruntime | |
def load_model(): | |
# Path to the uploaded ONNX model (should be the name of the model file you uploaded) | |
model_path = "emotion_model.onnx" | |
return ort.InferenceSession(model_path) | |
# Load the model | |
emotion_model = load_model() | |
# Class labels for facial emotions (based on the training dataset) | |
emotion_labels = ['Anger', 'Disgust', 'Fear', 'Happiness', 'Sadness', 'Surprise', 'Neutral'] | |
# Preprocess image to match model input requirements | |
def preprocess_image(image): | |
# Convert image to grayscale and resize to match the input size expected by the model | |
image_gray = cv2.cvtColor(np.array(image), cv2.COLOR_RGB2GRAY) | |
image_resized = cv2.resize(image_gray, (64, 64)) # The model expects 64x64 input | |
image_normalized = image_resized / 255.0 # Normalize to [0, 1] range | |
image_reshaped = np.expand_dims(image_normalized, axis=0) # Add batch dimension | |
image_reshaped = np.expand_dims(image_reshaped, axis=0) # Add channel dimension (1 channel for grayscale) | |
return image_reshaped.astype(np.float32) | |
# Process the uploaded image | |
if uploaded_file is not None: | |
# Open and preprocess the image | |
image = Image.open(uploaded_file).convert("RGB") | |
processed_image = preprocess_image(image) | |
# Perform inference | |
input_name = emotion_model.get_inputs()[0].name | |
outputs = emotion_model.run(None, {input_name: processed_image}) | |
predicted_class = np.argmax(outputs[0], axis=1)[0] # Get the index of the highest probability | |
predicted_emotion = emotion_labels[predicted_class] | |
# Display the results | |
st.image(image, caption=f"Detected Emotion: {predicted_emotion}", use_column_width=True) | |