Ahmadkhan12 commited on
Commit
4bec973
·
verified ·
1 Parent(s): 46e2aed

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +14 -32
app.py CHANGED
@@ -13,9 +13,19 @@ def load_model(model_path='onnx_model.onnx'):
13
  # Preprocess the image
14
  def preprocess_image(image):
15
  image = cv2.cvtColor(np.array(image), cv2.COLOR_RGB2BGR) # Convert to BGR (OpenCV format)
16
- image_resized = cv2.resize(image, (224, 224)) # Resize to 224x224
17
- image_input = np.expand_dims(image_resized, axis=0) # Add batch dimension
18
- image_input = image_input.transpose(0, 3, 1, 2) # Change dimensions to (1, 3, 224, 224)
 
 
 
 
 
 
 
 
 
 
19
  image_input = image_input.astype(np.float32) / 255.0 # Normalize the image
20
  return image_input
21
 
@@ -34,32 +44,4 @@ def predict_emotion_onnx(model, image_input):
34
  input_name = model.get_inputs()[0].name
35
  output_name = model.get_outputs()[0].name
36
  # Run the model
37
- prediction = model.run([output_name], {input_name: image_input})
38
- return prediction[0]
39
-
40
- # Streamlit UI
41
- st.title("Emotion Detection")
42
-
43
- # Upload an image
44
- uploaded_file = st.file_uploader("Upload an image", type=["jpg", "jpeg", "png"])
45
-
46
- if uploaded_file is not None:
47
- # Open and display the uploaded image
48
- image = Image.open(uploaded_file)
49
- st.image(image, caption="Uploaded Image", use_column_width=True)
50
-
51
- # Load model
52
- onnx_model = load_model()
53
-
54
- # Preprocess the image
55
- image_input = preprocess_image(image)
56
-
57
- # Get emotion prediction
58
- emotion_prediction = predict_emotion_onnx(onnx_model, image_input)
59
-
60
- # Get the emotion label and confidence
61
- emotion_label, confidence = get_emotion_from_output(emotion_prediction)
62
-
63
- # Display the predicted emotion and confidence
64
- st.write(f"Predicted Emotion: {emotion_label}")
65
- st.write(f"Confidence: {confidence:.2f}")
 
13
  # Preprocess the image
14
  def preprocess_image(image):
15
  image = cv2.cvtColor(np.array(image), cv2.COLOR_RGB2BGR) # Convert to BGR (OpenCV format)
16
+
17
+ # Resize the image to 48x48 as per the error message (model's expected input size)
18
+ image_resized = cv2.resize(image, (48, 48)) # Resize to 48x48
19
+
20
+ # Convert to grayscale if the model expects a single channel
21
+ image_gray = cv2.cvtColor(image_resized, cv2.COLOR_BGR2GRAY) # Convert to grayscale
22
+
23
+ # If the model expects 3 channels, keep the image in RGB (3 channels)
24
+ # image_resized = cv2.cvtColor(image_resized, cv2.COLOR_BGR2RGB) # For RGB input
25
+
26
+ # Add batch dimension
27
+ image_input = np.expand_dims(image_gray, axis=0) # Add batch dimension
28
+ image_input = np.expand_dims(image_input, axis=0) # Add channel dimension (for grayscale)
29
  image_input = image_input.astype(np.float32) / 255.0 # Normalize the image
30
  return image_input
31
 
 
44
  input_name = model.get_inputs()[0].name
45
  output_name = model.get_outputs()[0].name
46
  # Run the model
47
+ prediction = model.run([out