File size: 2,409 Bytes
231be40
1f130ae
 
7ffdd20
412adf0
 
1f130ae
 
46274ff
5cde790
 
 
 
 
 
 
 
1f130ae
 
 
d71fc08
 
 
 
 
 
 
 
 
 
 
 
 
7ffdd20
1effd41
1f130ae
412adf0
 
 
 
5cde790
 
 
 
 
 
 
 
412adf0
1f130ae
 
412adf0
1f130ae
412adf0
c096457
7ffdd20
 
36dac79
1f130ae
 
7ffdd20
1f130ae
5cde790
36dac79
5cde790
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
import onnxruntime as ort
import numpy as np
import cv2
from PIL import Image
import streamlit as st

# Load ONNX model
onnx_model = ort.InferenceSession("onnx_model.onnx")

# Emotion labels (same as the model's output classes)
emotion_labels = ["Anger", "Disgust", "Fear", "Happy", "Sadness", "Surprise", "Neutral"]

# Softmax function to convert logits to probabilities
def softmax(logits):
    exp_logits = np.exp(logits - np.max(logits))  # Stability trick
    return exp_logits / np.sum(exp_logits)

# Preprocess image function
def preprocess_image(image):
    """Preprocess image to match model input requirements"""
    # Convert the image to grayscale
    image = cv2.cvtColor(np.array(image), cv2.COLOR_RGB2GRAY)
    
    # Resize image to 48x48 (model's expected input size)
    image_resized = cv2.resize(image, (48, 48))
    
    # Add batch dimension and channels (for grayscale: 1 channel)
    image_input = np.expand_dims(image_resized, axis=0)  # Add batch dimension (1, 48, 48)
    image_input = np.expand_dims(image_input, axis=1)  # Add channel dimension (1, 1, 48, 48)
    
    # Normalize the image
    image_input = image_input.astype(np.float32) / 255.0
    
    return image_input

# Predict emotion using the ONNX model
def predict_emotion_onnx(onnx_model, image_input):
    input_name = onnx_model.get_inputs()[0].name
    output_name = onnx_model.get_outputs()[0].name
    prediction = onnx_model.run([output_name], {input_name: image_input})
    
    # Apply softmax to the output logits
    probabilities = softmax(prediction[0][0])  # We assume batch size of 1
    
    # Get the predicted emotion label (index of the highest probability)
    predicted_class = np.argmax(probabilities)
    
    return emotion_labels[predicted_class], probabilities[predicted_class]

# Streamlit interface
st.title("Emotion Recognition with ONNX")

uploaded_file = st.file_uploader("Upload an image", type=["jpg", "jpeg", "png"])

if uploaded_file is not None:
    image = Image.open(uploaded_file)
    st.image(image, caption="Uploaded Image", use_column_width=True)

    # Preprocess the image
    image_input = preprocess_image(image)

    # Predict the emotion
    emotion_label, probability = predict_emotion_onnx(onnx_model, image_input)

    # Display the predicted emotion and probability
    st.write(f"Predicted Emotion: {emotion_label}")
    st.write(f"Confidence: {probability:.2f}")