Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,27 +1,11 @@
|
|
1 |
-
import streamlit as st
|
2 |
-
import onnxruntime as ort
|
3 |
import cv2
|
4 |
import numpy as np
|
|
|
|
|
5 |
from PIL import Image
|
6 |
|
7 |
-
#
|
8 |
-
|
9 |
-
st.title("Emotion Recognition App")
|
10 |
-
|
11 |
-
# Load the ONNX model using onnxruntime
|
12 |
-
@st.cache_resource
|
13 |
-
def load_model():
|
14 |
-
model_path = "onnx_model.onnx" # Ensure this is the correct path to your uploaded ONNX model
|
15 |
-
return ort.InferenceSession(model_path)
|
16 |
-
|
17 |
-
# Load the emotion detection model
|
18 |
-
emotion_model = load_model()
|
19 |
-
|
20 |
-
# Process the uploaded image
|
21 |
-
uploaded_file = st.file_uploader("Upload an image", type=["jpg", "jpeg", "png"])
|
22 |
-
|
23 |
-
def preprocess_image(image):
|
24 |
-
"""Preprocess image to match model input requirements"""
|
25 |
image = cv2.cvtColor(np.array(image), cv2.COLOR_RGB2GRAY) # Convert image to grayscale
|
26 |
image_resized = cv2.resize(image, (48, 48)) # Resize image to 48x48
|
27 |
image_input = np.expand_dims(image_resized, axis=0) # Add batch dimension
|
@@ -29,27 +13,48 @@ def preprocess_image(image):
|
|
29 |
image_input = image_input.astype(np.float32) / 255.0 # Normalize the image
|
30 |
return image_input
|
31 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
32 |
def predict_emotion(image_input):
|
33 |
-
|
34 |
input_name = emotion_model.get_inputs()[0].name
|
35 |
output_name = emotion_model.get_outputs()[0].name
|
36 |
prediction = emotion_model.run([output_name], {input_name: image_input})
|
37 |
-
|
38 |
-
|
|
|
|
|
39 |
|
40 |
-
#
|
41 |
-
|
42 |
-
"""Map emotion index to a human-readable emotion"""
|
43 |
-
emotion_map = {
|
44 |
-
0: "Anger",
|
45 |
-
1: "Disgust",
|
46 |
-
2: "Fear",
|
47 |
-
3: "Happiness",
|
48 |
-
4: "Sadness",
|
49 |
-
5: "Surprise",
|
50 |
-
6: "Neutral"
|
51 |
-
}
|
52 |
-
return emotion_map.get(emotion, "Unknown")
|
53 |
|
54 |
# If an image is uploaded
|
55 |
if uploaded_file is not None:
|
@@ -61,8 +66,7 @@ if uploaded_file is not None:
|
|
61 |
image_input = preprocess_image(image)
|
62 |
|
63 |
# Predict the emotion
|
64 |
-
|
65 |
-
emotion_label = display_emotion(emotion)
|
66 |
|
67 |
# Display the predicted emotion
|
68 |
st.write(f"Detected Emotion: {emotion_label}")
|
|
|
|
|
|
|
1 |
import cv2
|
2 |
import numpy as np
|
3 |
+
import onnxruntime as ort # Import onnxruntime for model inference
|
4 |
+
import streamlit as st
|
5 |
from PIL import Image
|
6 |
|
7 |
+
# Preprocess image to match model input requirements
|
8 |
+
def preprocess_image(image, face_landmarks=None):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
9 |
image = cv2.cvtColor(np.array(image), cv2.COLOR_RGB2GRAY) # Convert image to grayscale
|
10 |
image_resized = cv2.resize(image, (48, 48)) # Resize image to 48x48
|
11 |
image_input = np.expand_dims(image_resized, axis=0) # Add batch dimension
|
|
|
13 |
image_input = image_input.astype(np.float32) / 255.0 # Normalize the image
|
14 |
return image_input
|
15 |
|
16 |
+
# Check if smile is present in the facial landmarks
|
17 |
+
def check_for_smile(face_landmarks):
|
18 |
+
"""Simple rule to check for smile based on landmarks"""
|
19 |
+
mouth = face_landmarks['bottom_lip'] + face_landmarks['top_lip']
|
20 |
+
mouth_distance = np.linalg.norm(np.array(mouth[0]) - np.array(mouth[-1]))
|
21 |
+
if mouth_distance > 30: # This threshold might need adjustment
|
22 |
+
return True
|
23 |
+
return False
|
24 |
+
|
25 |
+
# Display emotion with post-processing to check for smiles
|
26 |
+
def display_emotion_with_smile(emotion, face_landmarks=None):
|
27 |
+
if emotion == 6 and face_landmarks: # 'Neutral' is typically 6 in the emotion_map
|
28 |
+
if check_for_smile(face_landmarks):
|
29 |
+
return "Happiness" # Override neutral with happiness if a smile is detected
|
30 |
+
return display_emotion(emotion) # Otherwise return the normal emotion
|
31 |
+
|
32 |
+
# Predict emotion with smile detection
|
33 |
+
def predict_emotion_with_smile(image_input, face_landmarks=None):
|
34 |
+
"""Run inference and predict the emotion, considering smile detection"""
|
35 |
+
emotion = predict_emotion(image_input) # Normal emotion prediction
|
36 |
+
emotion_label = display_emotion_with_smile(emotion, face_landmarks)
|
37 |
+
return emotion_label
|
38 |
+
|
39 |
+
# Load ONNX model
|
40 |
+
def load_model():
|
41 |
+
model_path = '"onnx_model.onnx"' # Make sure to set the correct path
|
42 |
+
emotion_model = ort.InferenceSession(model_path)
|
43 |
+
return emotion_model
|
44 |
+
|
45 |
+
# Predict emotion using the ONNX model
|
46 |
def predict_emotion(image_input):
|
47 |
+
emotion_model = load_model()
|
48 |
input_name = emotion_model.get_inputs()[0].name
|
49 |
output_name = emotion_model.get_outputs()[0].name
|
50 |
prediction = emotion_model.run([output_name], {input_name: image_input})
|
51 |
+
return np.argmax(prediction[0])
|
52 |
+
|
53 |
+
# Streamlit app code
|
54 |
+
st.title("Emotion Recognition App")
|
55 |
|
56 |
+
# Upload an image
|
57 |
+
uploaded_file = st.file_uploader("Upload an image", type=["jpg", "jpeg", "png"])
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
58 |
|
59 |
# If an image is uploaded
|
60 |
if uploaded_file is not None:
|
|
|
66 |
image_input = preprocess_image(image)
|
67 |
|
68 |
# Predict the emotion
|
69 |
+
emotion_label = predict_emotion_with_smile(image_input)
|
|
|
70 |
|
71 |
# Display the predicted emotion
|
72 |
st.write(f"Detected Emotion: {emotion_label}")
|