Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,89 +1,89 @@
|
|
1 |
import cv2
|
2 |
import numpy as np
|
3 |
-
import os
|
4 |
import onnxruntime as ort
|
5 |
-
import streamlit as st
|
6 |
from PIL import Image
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
7 |
|
8 |
-
# Preprocess
|
9 |
-
def
|
10 |
-
image
|
11 |
-
image_resized = cv2.
|
|
|
12 |
image_input = np.expand_dims(image_resized, axis=0) # Add batch dimension
|
13 |
-
image_input = np.expand_dims(image_input, axis=0) # Add channel dimension
|
14 |
-
image_input = image_input.astype(np.float32) / 255.0 # Normalize
|
15 |
return image_input
|
16 |
|
17 |
-
#
|
18 |
-
def
|
19 |
-
"""
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
#
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
#
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
raise FileNotFoundError(f"Model file {model_path} not found!")
|
59 |
-
emotion_model = ort.InferenceSession(model_path)
|
60 |
-
return emotion_model
|
61 |
-
|
62 |
-
# Predict emotion using the ONNX model
|
63 |
-
def predict_emotion(image_input):
|
64 |
-
emotion_model = load_model()
|
65 |
-
input_name = emotion_model.get_inputs()[0].name
|
66 |
-
output_name = emotion_model.get_outputs()[0].name
|
67 |
-
prediction = emotion_model.run([output_name], {input_name: image_input})
|
68 |
-
return np.argmax(prediction[0])
|
69 |
-
|
70 |
-
# Streamlit app code
|
71 |
-
st.title("Emotion Recognition App")
|
72 |
-
|
73 |
-
# Upload an image
|
74 |
-
uploaded_file = st.file_uploader("Upload an image", type=["jpg", "jpeg", "png"])
|
75 |
-
|
76 |
-
# If an image is uploaded
|
77 |
if uploaded_file is not None:
|
78 |
# Open and display the uploaded image
|
79 |
image = Image.open(uploaded_file)
|
80 |
st.image(image, caption="Uploaded Image", use_column_width=True)
|
81 |
|
82 |
-
# Preprocess
|
83 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
84 |
|
85 |
-
#
|
86 |
-
|
87 |
|
88 |
-
# Display the
|
89 |
-
st.write(f"Detected Emotion: {
|
|
|
1 |
import cv2
|
2 |
import numpy as np
|
|
|
3 |
import onnxruntime as ort
|
|
|
4 |
from PIL import Image
|
5 |
+
import streamlit as st
|
6 |
+
|
7 |
+
# Step 1: Preprocess Image for ONNX Model (224x224 input required)
|
8 |
+
def preprocess_for_onnx(image):
|
9 |
+
"""Preprocess image for the ONNX emotion model."""
|
10 |
+
image_resized = cv2.resize(image, (224, 224)) # Resize to match ONNX input
|
11 |
+
image_normalized = image_resized.astype(np.float32) / 255.0 # Normalize image
|
12 |
+
image_input = np.transpose(image_normalized, (2, 0, 1)) # Change dimension to (C, H, W)
|
13 |
+
image_input = np.expand_dims(image_input, axis=0) # Add batch dimension
|
14 |
+
return image_input
|
15 |
|
16 |
+
# Step 2: Preprocess Image for AffectNet Model (48x48 grayscale input)
|
17 |
+
def preprocess_for_affectnet(image):
|
18 |
+
"""Preprocess image for AffectNet (grayscale, 48x48)."""
|
19 |
+
image_resized = cv2.cvtColor(np.array(image), cv2.COLOR_RGB2GRAY) # Convert to grayscale
|
20 |
+
image_resized = cv2.resize(image_resized, (48, 48)) # Resize to 48x48
|
21 |
image_input = np.expand_dims(image_resized, axis=0) # Add batch dimension
|
22 |
+
image_input = np.expand_dims(image_input, axis=0) # Add channel dimension for grayscale
|
23 |
+
image_input = image_input.astype(np.float32) / 255.0 # Normalize image
|
24 |
return image_input
|
25 |
|
26 |
+
# Step 3: Load ONNX Model
|
27 |
+
def load_onnx_model(model_path):
|
28 |
+
"""Load the ONNX emotion detection model."""
|
29 |
+
return ort.InferenceSession(model_path)
|
30 |
+
|
31 |
+
# Step 4: Predict Emotion with ONNX Model
|
32 |
+
def predict_emotion_onnx(onnx_model, image_input):
|
33 |
+
"""Predict emotion using the ONNX model."""
|
34 |
+
input_name = onnx_model.get_inputs()[0].name
|
35 |
+
output_name = onnx_model.get_outputs()[0].name
|
36 |
+
prediction = onnx_model.run([output_name], {input_name: image_input})
|
37 |
+
return prediction
|
38 |
+
|
39 |
+
# Step 5: Predict Emotion with AffectNet (Placeholder)
|
40 |
+
def predict_emotion_affectnet(image_input):
|
41 |
+
"""Predict emotion using AffectNet (Placeholder function)."""
|
42 |
+
# Placeholder for the actual AffectNet prediction
|
43 |
+
# This would involve loading a model trained on AffectNet and making predictions.
|
44 |
+
emotion = "happy" # Example: This would be the predicted emotion
|
45 |
+
return emotion
|
46 |
+
|
47 |
+
# Step 6: Combine Predictions from Both Models
|
48 |
+
def combine_predictions(affectnet_emotion, onnx_emotion):
|
49 |
+
"""Combine predictions from AffectNet and ONNX models."""
|
50 |
+
if affectnet_emotion == onnx_emotion:
|
51 |
+
return affectnet_emotion # If both models agree, return the same emotion
|
52 |
+
else:
|
53 |
+
return "Uncertain" # If they disagree, return "Uncertain"
|
54 |
+
|
55 |
+
# Step 7: Main Application Logic
|
56 |
+
# Load the ONNX model from the application directory
|
57 |
+
onnx_model_path = 'onnx_model.onnx' # Path to your ONNX model
|
58 |
+
onnx_model = load_onnx_model(onnx_model_path)
|
59 |
+
|
60 |
+
# Step 8: Streamlit App Logic (User Interaction)
|
61 |
+
st.title('Emotion Detection App')
|
62 |
+
|
63 |
+
# Upload image via Streamlit interface
|
64 |
+
uploaded_file = st.file_uploader("Choose an image...", type=["jpg", "jpeg", "png"])
|
65 |
+
|
66 |
+
# If an image is uploaded, proceed with emotion prediction
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
67 |
if uploaded_file is not None:
|
68 |
# Open and display the uploaded image
|
69 |
image = Image.open(uploaded_file)
|
70 |
st.image(image, caption="Uploaded Image", use_column_width=True)
|
71 |
|
72 |
+
# Preprocess image for AffectNet (grayscale)
|
73 |
+
image_input_affectnet = preprocess_for_affectnet(image)
|
74 |
+
|
75 |
+
# Preprocess image for ONNX model (RGB)
|
76 |
+
image_input_onnx = preprocess_for_onnx(np.array(image))
|
77 |
+
|
78 |
+
# Predict emotion using AffectNet model
|
79 |
+
affectnet_emotion = predict_emotion_affectnet(image_input_affectnet)
|
80 |
+
|
81 |
+
# Predict emotion using ONNX model
|
82 |
+
onnx_prediction = predict_emotion_onnx(onnx_model, image_input_onnx)
|
83 |
+
onnx_emotion = onnx_prediction[0][0] # Assuming the model outputs a single emotion label
|
84 |
|
85 |
+
# Combine predictions from both models
|
86 |
+
final_emotion = combine_predictions(affectnet_emotion, onnx_emotion)
|
87 |
|
88 |
+
# Display the final emotion prediction
|
89 |
+
st.write(f"Detected Emotion: {final_emotion}")
|