Ahmadkhan12 commited on
Commit
46e2aed
·
verified ·
1 Parent(s): 9a5dd5f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +44 -53
app.py CHANGED
@@ -1,74 +1,65 @@
1
- import onnxruntime as ort
2
- import numpy as np
3
  import cv2
4
- from PIL import Image
 
5
  import streamlit as st
6
- import kagglehub # Import kagglehub to load the AffectNet dataset
7
-
8
- # Download the AffectNet dataset
9
- path = kagglehub.dataset_download("fatihkgg/affectnet-yolo-format")
10
- print("Path to AffectNet dataset:", path)
11
-
12
- # Emotion labels for AffectNet
13
- emotion_labels = ["Anger", "Disgust", "Fear", "Happy", "Sadness", "Surprise", "Neutral"]
14
-
15
- # Load ONNX model
16
- onnx_model = ort.InferenceSession("onnx_model.onnx")
17
 
18
- # Softmax function to convert logits to probabilities
19
- def softmax(logits):
20
- exp_logits = np.exp(logits - np.max(logits)) # Stability trick
21
- return exp_logits / np.sum(exp_logits)
 
22
 
23
- # Preprocess image function for ONNX model
24
  def preprocess_image(image):
25
- """Preprocess image to match model input requirements"""
26
- # Convert the image to grayscale
27
- image = cv2.cvtColor(np.array(image), cv2.COLOR_RGB2GRAY)
28
-
29
- # Resize image to 48x48 (model's expected input size)
30
- image_resized = cv2.resize(image, (48, 48))
31
-
32
- # Add batch dimension and channels (for grayscale: 1 channel)
33
- image_input = np.expand_dims(image_resized, axis=0) # Add batch dimension (1, 48, 48)
34
- image_input = np.expand_dims(image_input, axis=1) # Add channel dimension (1, 1, 48, 48)
35
-
36
- # Normalize the image
37
- image_input = image_input.astype(np.float32) / 255.0
38
-
39
  return image_input
40
 
 
 
 
 
 
 
 
 
 
41
  # Predict emotion using the ONNX model
42
- def predict_emotion_onnx(onnx_model, image_input):
43
- input_name = onnx_model.get_inputs()[0].name
44
- output_name = onnx_model.get_outputs()[0].name
45
- prediction = onnx_model.run([output_name], {input_name: image_input})
46
-
47
- # Apply softmax to the output logits
48
- probabilities = softmax(prediction[0][0]) # We assume batch size of 1
49
-
50
- # Get the predicted emotion label (index of the highest probability)
51
- predicted_class = np.argmax(probabilities)
52
-
53
- return emotion_labels[predicted_class], probabilities[predicted_class]
54
 
55
- # Streamlit interface
56
- st.title("Emotion Recognition with ONNX and AffectNet")
57
 
58
- # File uploader to upload images
59
  uploaded_file = st.file_uploader("Upload an image", type=["jpg", "jpeg", "png"])
60
 
61
  if uploaded_file is not None:
 
62
  image = Image.open(uploaded_file)
63
  st.image(image, caption="Uploaded Image", use_column_width=True)
64
 
 
 
 
65
  # Preprocess the image
66
  image_input = preprocess_image(image)
67
 
68
- # Predict the emotion
69
- emotion_label, probability = predict_emotion_onnx(onnx_model, image_input)
70
 
71
- # Display the predicted emotion and probability
72
- st.write(f"Predicted Emotion: {emotion_label}")
73
- st.write(f"Confidence: {probability:.2f}")
74
 
 
 
 
 
 
 
1
  import cv2
2
+ import numpy as np
3
+ import onnxruntime as ort
4
  import streamlit as st
5
+ from PIL import Image
 
 
 
 
 
 
 
 
 
 
6
 
7
+ # Load the ONNX model
8
+ def load_model(model_path='onnx_model.onnx'):
9
+ # Load the ONNX model
10
+ model = ort.InferenceSession(model_path)
11
+ return model
12
 
13
+ # Preprocess the image
14
  def preprocess_image(image):
15
+ image = cv2.cvtColor(np.array(image), cv2.COLOR_RGB2BGR) # Convert to BGR (OpenCV format)
16
+ image_resized = cv2.resize(image, (224, 224)) # Resize to 224x224
17
+ image_input = np.expand_dims(image_resized, axis=0) # Add batch dimension
18
+ image_input = image_input.transpose(0, 3, 1, 2) # Change dimensions to (1, 3, 224, 224)
19
+ image_input = image_input.astype(np.float32) / 255.0 # Normalize the image
 
 
 
 
 
 
 
 
 
20
  return image_input
21
 
22
+ # Map the raw output to emotions
23
+ def get_emotion_from_output(output):
24
+ emotion_labels = ['Anger', 'Disgust', 'Fear', 'Happiness', 'Sadness', 'Surprise', 'Neutral']
25
+ # Get the index of the highest value in the output (i.e., predicted emotion)
26
+ emotion_index = np.argmax(output)
27
+ confidence = output[0][emotion_index] # Confidence of the prediction
28
+ emotion = emotion_labels[emotion_index] # Corresponding emotion label
29
+ return emotion, confidence
30
+
31
  # Predict emotion using the ONNX model
32
+ def predict_emotion_onnx(model, image_input):
33
+ # Get the input name and output name for the ONNX model
34
+ input_name = model.get_inputs()[0].name
35
+ output_name = model.get_outputs()[0].name
36
+ # Run the model
37
+ prediction = model.run([output_name], {input_name: image_input})
38
+ return prediction[0]
 
 
 
 
 
39
 
40
+ # Streamlit UI
41
+ st.title("Emotion Detection")
42
 
43
+ # Upload an image
44
  uploaded_file = st.file_uploader("Upload an image", type=["jpg", "jpeg", "png"])
45
 
46
  if uploaded_file is not None:
47
+ # Open and display the uploaded image
48
  image = Image.open(uploaded_file)
49
  st.image(image, caption="Uploaded Image", use_column_width=True)
50
 
51
+ # Load model
52
+ onnx_model = load_model()
53
+
54
  # Preprocess the image
55
  image_input = preprocess_image(image)
56
 
57
+ # Get emotion prediction
58
+ emotion_prediction = predict_emotion_onnx(onnx_model, image_input)
59
 
60
+ # Get the emotion label and confidence
61
+ emotion_label, confidence = get_emotion_from_output(emotion_prediction)
 
62
 
63
+ # Display the predicted emotion and confidence
64
+ st.write(f"Predicted Emotion: {emotion_label}")
65
+ st.write(f"Confidence: {confidence:.2f}")