Ahmadkhan12 commited on
Commit
412adf0
·
verified ·
1 Parent(s): ca00c20

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +74 -74
app.py CHANGED
@@ -1,89 +1,89 @@
1
  import cv2
2
  import numpy as np
3
- import os
4
  import onnxruntime as ort
5
- import streamlit as st
6
  from PIL import Image
 
 
 
 
 
 
 
 
 
 
7
 
8
- # Preprocess image to match model input requirements
9
- def preprocess_image(image, face_landmarks=None):
10
- image = cv2.cvtColor(np.array(image), cv2.COLOR_RGB2GRAY) # Convert image to grayscale
11
- image_resized = cv2.resize(image, (48, 48)) # Resize image to 48x48
 
12
  image_input = np.expand_dims(image_resized, axis=0) # Add batch dimension
13
- image_input = np.expand_dims(image_input, axis=0) # Add channel dimension (for grayscale)
14
- image_input = image_input.astype(np.float32) / 255.0 # Normalize the image
15
  return image_input
16
 
17
- # Check if smile is present in the facial landmarks
18
- def check_for_smile(face_landmarks):
19
- """Simple rule to check for smile based on landmarks"""
20
- mouth = face_landmarks['bottom_lip'] + face_landmarks['top_lip']
21
- mouth_distance = np.linalg.norm(np.array(mouth[0]) - np.array(mouth[-1]))
22
- if mouth_distance > 30: # This threshold might need adjustment
23
- return True
24
- return False
25
-
26
- # Display emotion with post-processing to check for smiles
27
- def display_emotion(emotion):
28
- """Map predicted emotion index to a label"""
29
- emotion_map = {
30
- 0: "Anger",
31
- 1: "Disgust",
32
- 2: "Fear",
33
- 3: "Happiness",
34
- 4: "Sadness",
35
- 5: "Surprise",
36
- 6: "Neutral"
37
- }
38
- return emotion_map.get(emotion, "Unknown")
39
-
40
- # Display emotion with smile detection
41
- def display_emotion_with_smile(emotion, face_landmarks=None):
42
- if emotion == 6 and face_landmarks: # 'Neutral' is typically 6 in the emotion_map
43
- if check_for_smile(face_landmarks):
44
- return "Happiness" # Override neutral with happiness if a smile is detected
45
- return display_emotion(emotion) # Otherwise return the normal emotion
46
-
47
- # Predict emotion with smile detection
48
- def predict_emotion_with_smile(image_input, face_landmarks=None):
49
- """Run inference and predict the emotion, considering smile detection"""
50
- emotion = predict_emotion(image_input) # Normal emotion prediction
51
- emotion_label = display_emotion_with_smile(emotion, face_landmarks)
52
- return emotion_label
53
-
54
- # Load ONNX model
55
- def load_model():
56
- model_path = 'onnx_model.onnx' # Make sure this is the correct path
57
- if not os.path.exists(model_path):
58
- raise FileNotFoundError(f"Model file {model_path} not found!")
59
- emotion_model = ort.InferenceSession(model_path)
60
- return emotion_model
61
-
62
- # Predict emotion using the ONNX model
63
- def predict_emotion(image_input):
64
- emotion_model = load_model()
65
- input_name = emotion_model.get_inputs()[0].name
66
- output_name = emotion_model.get_outputs()[0].name
67
- prediction = emotion_model.run([output_name], {input_name: image_input})
68
- return np.argmax(prediction[0])
69
-
70
- # Streamlit app code
71
- st.title("Emotion Recognition App")
72
-
73
- # Upload an image
74
- uploaded_file = st.file_uploader("Upload an image", type=["jpg", "jpeg", "png"])
75
-
76
- # If an image is uploaded
77
  if uploaded_file is not None:
78
  # Open and display the uploaded image
79
  image = Image.open(uploaded_file)
80
  st.image(image, caption="Uploaded Image", use_column_width=True)
81
 
82
- # Preprocess the image
83
- image_input = preprocess_image(image)
 
 
 
 
 
 
 
 
 
 
84
 
85
- # Predict the emotion
86
- emotion_label = predict_emotion_with_smile(image_input)
87
 
88
- # Display the predicted emotion
89
- st.write(f"Detected Emotion: {emotion_label}")
 
1
  import cv2
2
  import numpy as np
 
3
  import onnxruntime as ort
 
4
  from PIL import Image
5
+ import streamlit as st
6
+
7
+ # Step 1: Preprocess Image for ONNX Model (224x224 input required)
8
+ def preprocess_for_onnx(image):
9
+ """Preprocess image for the ONNX emotion model."""
10
+ image_resized = cv2.resize(image, (224, 224)) # Resize to match ONNX input
11
+ image_normalized = image_resized.astype(np.float32) / 255.0 # Normalize image
12
+ image_input = np.transpose(image_normalized, (2, 0, 1)) # Change dimension to (C, H, W)
13
+ image_input = np.expand_dims(image_input, axis=0) # Add batch dimension
14
+ return image_input
15
 
16
+ # Step 2: Preprocess Image for AffectNet Model (48x48 grayscale input)
17
+ def preprocess_for_affectnet(image):
18
+ """Preprocess image for AffectNet (grayscale, 48x48)."""
19
+ image_resized = cv2.cvtColor(np.array(image), cv2.COLOR_RGB2GRAY) # Convert to grayscale
20
+ image_resized = cv2.resize(image_resized, (48, 48)) # Resize to 48x48
21
  image_input = np.expand_dims(image_resized, axis=0) # Add batch dimension
22
+ image_input = np.expand_dims(image_input, axis=0) # Add channel dimension for grayscale
23
+ image_input = image_input.astype(np.float32) / 255.0 # Normalize image
24
  return image_input
25
 
26
+ # Step 3: Load ONNX Model
27
+ def load_onnx_model(model_path):
28
+ """Load the ONNX emotion detection model."""
29
+ return ort.InferenceSession(model_path)
30
+
31
+ # Step 4: Predict Emotion with ONNX Model
32
+ def predict_emotion_onnx(onnx_model, image_input):
33
+ """Predict emotion using the ONNX model."""
34
+ input_name = onnx_model.get_inputs()[0].name
35
+ output_name = onnx_model.get_outputs()[0].name
36
+ prediction = onnx_model.run([output_name], {input_name: image_input})
37
+ return prediction
38
+
39
+ # Step 5: Predict Emotion with AffectNet (Placeholder)
40
+ def predict_emotion_affectnet(image_input):
41
+ """Predict emotion using AffectNet (Placeholder function)."""
42
+ # Placeholder for the actual AffectNet prediction
43
+ # This would involve loading a model trained on AffectNet and making predictions.
44
+ emotion = "happy" # Example: This would be the predicted emotion
45
+ return emotion
46
+
47
+ # Step 6: Combine Predictions from Both Models
48
+ def combine_predictions(affectnet_emotion, onnx_emotion):
49
+ """Combine predictions from AffectNet and ONNX models."""
50
+ if affectnet_emotion == onnx_emotion:
51
+ return affectnet_emotion # If both models agree, return the same emotion
52
+ else:
53
+ return "Uncertain" # If they disagree, return "Uncertain"
54
+
55
+ # Step 7: Main Application Logic
56
+ # Load the ONNX model from the application directory
57
+ onnx_model_path = 'onnx_model.onnx' # Path to your ONNX model
58
+ onnx_model = load_onnx_model(onnx_model_path)
59
+
60
+ # Step 8: Streamlit App Logic (User Interaction)
61
+ st.title('Emotion Detection App')
62
+
63
+ # Upload image via Streamlit interface
64
+ uploaded_file = st.file_uploader("Choose an image...", type=["jpg", "jpeg", "png"])
65
+
66
+ # If an image is uploaded, proceed with emotion prediction
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
67
  if uploaded_file is not None:
68
  # Open and display the uploaded image
69
  image = Image.open(uploaded_file)
70
  st.image(image, caption="Uploaded Image", use_column_width=True)
71
 
72
+ # Preprocess image for AffectNet (grayscale)
73
+ image_input_affectnet = preprocess_for_affectnet(image)
74
+
75
+ # Preprocess image for ONNX model (RGB)
76
+ image_input_onnx = preprocess_for_onnx(np.array(image))
77
+
78
+ # Predict emotion using AffectNet model
79
+ affectnet_emotion = predict_emotion_affectnet(image_input_affectnet)
80
+
81
+ # Predict emotion using ONNX model
82
+ onnx_prediction = predict_emotion_onnx(onnx_model, image_input_onnx)
83
+ onnx_emotion = onnx_prediction[0][0] # Assuming the model outputs a single emotion label
84
 
85
+ # Combine predictions from both models
86
+ final_emotion = combine_predictions(affectnet_emotion, onnx_emotion)
87
 
88
+ # Display the final emotion prediction
89
+ st.write(f"Detected Emotion: {final_emotion}")