import cv2 import numpy as np import onnxruntime as ort import streamlit as st from PIL import Image # Load the ONNX model def load_model(model_path='onnx_model.onnx'): # Load the ONNX model model = ort.InferenceSession(model_path) return model # Preprocess the image def preprocess_image(image): image = cv2.cvtColor(np.array(image), cv2.COLOR_RGB2BGR) # Convert to BGR (OpenCV format) # Resize the image to 48x48 as per the error message (model's expected input size) image_resized = cv2.resize(image, (48, 48)) # Resize to 48x48 # Convert to grayscale if the model expects a single channel image_gray = cv2.cvtColor(image_resized, cv2.COLOR_BGR2GRAY) # Convert to grayscale # If the model expects 3 channels, keep the image in RGB (3 channels) # image_resized = cv2.cvtColor(image_resized, cv2.COLOR_BGR2RGB) # For RGB input # Add batch dimension image_input = np.expand_dims(image_gray, axis=0) # Add batch dimension image_input = np.expand_dims(image_input, axis=0) # Add channel dimension (for grayscale) image_input = image_input.astype(np.float32) / 255.0 # Normalize the image return image_input # Map the raw output to emotions def get_emotion_from_output(output): emotion_labels = ['Anger', 'Disgust', 'Fear', 'Happiness', 'Sadness', 'Surprise', 'Neutral'] # Get the index of the highest value in the output (i.e., predicted emotion) emotion_index = np.argmax(output) confidence = output[0][emotion_index] # Confidence of the prediction emotion = emotion_labels[emotion_index] # Corresponding emotion label return emotion, confidence # Predict emotion using the ONNX model def predict_emotion_onnx(model, image_input): # Get the input name and output name for the ONNX model input_name = model.get_inputs()[0].name output_name = model.get_outputs()[0].name # Run the model prediction = model.run([output_name], {input_name: image_input}) return prediction[0] # Streamlit UI st.title("Emotion Detection") # Upload an image uploaded_file = st.file_uploader("Upload an image", type=["jpg", "jpeg", "png"]) if uploaded_file is not None: # Open and display the uploaded image image = Image.open(uploaded_file) st.image(image, caption="Uploaded Image", use_column_width=True) # Load model onnx_model = load_model() # Preprocess the image image_input = preprocess_image(image) # Get emotion prediction emotion_prediction = predict_emotion_onnx(onnx_model, image_input) # Get the emotion label and confidence emotion_label, confidence = get_emotion_from_output(emotion_prediction) # Display the predicted emotion and confidence st.write(f"Predicted Emotion: {emotion_label}") st.write(f"Confidence: {confidence:.2f}")