Ahmadkhan12 commited on
Commit
21ef691
·
verified ·
1 Parent(s): af88cea

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +18 -56
app.py CHANGED
@@ -1,7 +1,7 @@
1
  import streamlit as st
2
- import cv2
3
  import numpy as np
4
  from PIL import Image
 
5
 
6
  # Set the page config
7
  st.set_page_config(page_title="Emotion Recognition App", layout="centered")
@@ -11,23 +11,12 @@ st.title("Emotion Recognition App")
11
  # Upload an image
12
  uploaded_file = st.file_uploader("Upload an image", type=["jpg", "jpeg", "png"])
13
 
14
- # Load OpenCV's face detection model
15
- face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + "haarcascade_frontalface_default.xml")
 
 
16
 
17
- # Load ONNX emotion detection model
18
- emotion_model_path = "emotion_recognition.onnx" # Replace with your model path
19
- emotion_net = cv2.dnn.readNetFromONNX(emotion_model_path)
20
-
21
- # Emotion labels (based on model documentation)
22
- emotion_labels = ["Angry", "Disgust", "Fear", "Happy", "Sad", "Surprise", "Neutral"]
23
-
24
- # Resize image to reduce memory usage
25
- def resize_image(image, max_size=(800, 800)):
26
- """
27
- Resizes the image to the specified maximum size while maintaining aspect ratio.
28
- """
29
- image.thumbnail(max_size, Image.Resampling.LANCZOS)
30
- return image
31
 
32
  # Process the uploaded image
33
  if uploaded_file is not None:
@@ -35,46 +24,19 @@ if uploaded_file is not None:
35
  if uploaded_file.size > 10 * 1024 * 1024: # 10 MB limit
36
  st.error("File too large. Please upload an image smaller than 10 MB.")
37
  else:
38
- # Open and resize the image
39
- image = Image.open(uploaded_file)
40
- image = resize_image(image)
41
-
42
- # Convert image to numpy array
43
- image_np = np.array(image)
44
-
45
- # Convert image to grayscale for face detection
46
- gray_image = cv2.cvtColor(image_np, cv2.COLOR_RGB2GRAY)
47
-
48
- # Detect faces
49
- faces = face_cascade.detectMultiScale(gray_image, scaleFactor=1.1, minNeighbors=5, minSize=(30, 30))
50
-
51
- if len(faces) > 0:
52
- for (x, y, w, h) in faces:
53
- # Extract face ROI
54
- face_roi = image_np[y:y+h, x:x+w]
55
- face_blob = cv2.dnn.blobFromImage(face_roi, 1.0, (64, 64), (104, 117, 123), swapRB=True)
56
-
57
- # Predict emotion
58
- emotion_net.setInput(face_blob)
59
- predictions = emotion_net.forward()
60
- emotion_idx = np.argmax(predictions)
61
- emotion = emotion_labels[emotion_idx]
62
 
63
- # Draw rectangle around the face
64
- cv2.rectangle(image_np, (x, y), (x+w, y+h), (0, 255, 0), 2)
65
 
66
- # Display emotion
67
- cv2.putText(
68
- image_np,
69
- emotion,
70
- (x, y - 10),
71
- cv2.FONT_HERSHEY_SIMPLEX,
72
- 0.9,
73
- (255, 0, 0),
74
- 2,
75
- )
76
 
77
- # Display the processed image
78
- st.image(image_np, caption="Processed Image", use_column_width=True)
79
  else:
80
- st.warning("No faces detected in the image.")
 
1
  import streamlit as st
 
2
  import numpy as np
3
  from PIL import Image
4
+ from transformers import pipeline
5
 
6
  # Set the page config
7
  st.set_page_config(page_title="Emotion Recognition App", layout="centered")
 
11
  # Upload an image
12
  uploaded_file = st.file_uploader("Upload an image", type=["jpg", "jpeg", "png"])
13
 
14
+ # Allocate the Hugging Face pipeline
15
+ @st.cache_resource # Cache the model to avoid reloading it
16
+ def load_model():
17
+ return pipeline("image-classification", model="Xenova/facial_emotions_image_detection")
18
 
19
+ emotion_classifier = load_model()
 
 
 
 
 
 
 
 
 
 
 
 
 
20
 
21
  # Process the uploaded image
22
  if uploaded_file is not None:
 
24
  if uploaded_file.size > 10 * 1024 * 1024: # 10 MB limit
25
  st.error("File too large. Please upload an image smaller than 10 MB.")
26
  else:
27
+ # Open and preprocess the image
28
+ image = Image.open(uploaded_file).convert("RGB")
29
+ image_resized = image.resize((224, 224)) # Resize to match model input size
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
30
 
31
+ # Convert image to numpy array and predict emotion
32
+ predictions = emotion_classifier(image_resized)
33
 
34
+ # Extract the top prediction
35
+ if predictions:
36
+ top_prediction = predictions[0] # Assuming the model returns a list of predictions
37
+ emotion = top_prediction["label"]
38
+ confidence = top_prediction["score"]
 
 
 
 
 
39
 
40
+ st.image(image, caption=f"Detected Emotion: {emotion} (Confidence: {confidence:.2f})", use_column_width=True)
 
41
  else:
42
+ st.warning("Unable to determine emotion. Try another image.")