Ahmadkhan12 commited on
Commit
abf5487
·
verified ·
1 Parent(s): f56a327

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +18 -20
app.py CHANGED
@@ -1,6 +1,5 @@
1
  import streamlit as st
2
- from fer import FER
3
- import cv2
4
  from PIL import Image
5
  import numpy as np
6
 
@@ -11,30 +10,29 @@ st.title("Emotion Recognition for Autism Support")
11
  uploaded_image = st.file_uploader("Upload an Image with a Face", type=["jpg", "jpeg", "png"])
12
 
13
  if uploaded_image:
14
- # Load the image
15
  image = Image.open(uploaded_image)
16
  st.image(image, caption="Uploaded Image", use_column_width=True)
17
 
18
- # Convert PIL Image to NumPy Array
19
  img_np = np.array(image)
20
 
21
- # Convert RGB to BGR (required by OpenCV)
22
- img_bgr = cv2.cvtColor(img_np, cv2.COLOR_RGB2BGR)
 
 
23
 
24
- # Initialize FER detector
25
- detector = FER(mtcnn=False)
26
-
27
- # Detect emotions
28
- st.write("Analyzing emotions...")
29
- result = detector.detect_emotions(img_bgr)
30
-
31
- if result:
32
- for face in result:
33
- emotions = face["emotions"]
34
  st.write("Detected Emotions:")
35
  st.json(emotions)
36
- else:
37
- st.warning("No faces detected in the image. Please try another image.")
38
 
39
- # Provide Friendly Feedback
40
- st.write("Emotion recognition will be refined in future updates!")
 
 
 
 
 
 
 
1
  import streamlit as st
2
+ from deepface import DeepFace
 
3
  from PIL import Image
4
  import numpy as np
5
 
 
10
  uploaded_image = st.file_uploader("Upload an Image with a Face", type=["jpg", "jpeg", "png"])
11
 
12
  if uploaded_image:
13
+ # Display the uploaded image
14
  image = Image.open(uploaded_image)
15
  st.image(image, caption="Uploaded Image", use_column_width=True)
16
 
17
+ # Convert PIL Image to NumPy array
18
  img_np = np.array(image)
19
 
20
+ try:
21
+ # Perform Emotion Analysis
22
+ st.write("Analyzing emotions...")
23
+ result = DeepFace.analyze(img_path=img_np, actions=["emotion"], enforce_detection=True)
24
 
25
+ # Extract and Display Emotions
26
+ if result and "emotion" in result:
27
+ emotions = result["emotion"]
 
 
 
 
 
 
 
28
  st.write("Detected Emotions:")
29
  st.json(emotions)
 
 
30
 
31
+ # Provide Friendly Feedback
32
+ dominant_emotion = max(emotions, key=emotions.get)
33
+ st.success(f"The dominant emotion is: {dominant_emotion}")
34
+ else:
35
+ st.warning("No emotions detected. Please try with another image.")
36
+
37
+ except Exception as e:
38
+ st.error(f"An error occurred: {e}")