LovnishVerma commited on
Commit
9cd5b33
·
verified ·
1 Parent(s): c7b74ee

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +92 -51
app.py CHANGED
@@ -1,97 +1,138 @@
1
- import streamlit as st
 
2
  import cv2
3
  import numpy as np
4
  import time
5
- # Larger title
6
- st.markdown("<h1 style='text-align: center;'>Emotion Detection</h1>", unsafe_allow_html=True)
7
-
8
- # Smaller subtitle
9
- st.markdown("<h3 style='text-align: center;'>angry, fear, happy, neutral, sad, surprise</h3>", unsafe_allow_html=True)
10
- start = time.time()
11
  from keras.models import load_model
12
- import tempfile
13
  from PIL import Image
 
14
 
 
 
 
 
 
 
 
 
15
  @st.cache_resource
16
  def load_emotion_model():
17
- model = load_model('CNN_Model_acc_75.h5')
18
- return model
19
 
20
- # Load the model
21
- model = load_emotion_model()
22
- print("time taken to load model : " , time.time() - start)
23
- img_shape = 48
24
- emotion_labels = ['angry', 'fear', 'happy', 'neutral', 'sad', 'surprise']
25
- face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml')
26
 
 
 
 
 
27
 
28
- def process_frame(frame):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
29
  gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
 
30
  faces = face_cascade.detectMultiScale(gray_frame, scaleFactor=1.1, minNeighbors=5, minSize=(30, 30))
31
 
32
  for (x, y, w, h) in faces:
33
- roi_gray = gray_frame[y:y+h, x:x+w]
34
- roi_color = frame[y:y+h, x:x+w]
 
35
 
36
- face_roi = cv2.resize(roi_color, (img_shape, img_shape))
37
- face_roi = np.expand_dims(face_roi, axis=0)
38
- face_roi = face_roi / float(img_shape)
39
- predictions = model.predict(face_roi)
40
- emotion = emotion_labels[np.argmax(predictions[0])]
41
 
 
 
 
 
 
42
  cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
43
- cv2.putText(frame, emotion, (x, y+h), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)
44
 
45
  return frame
46
 
47
- # def video_feed(video_source):
48
- # # Read and process video frames
49
- # while True:
50
- # ret, frame = video_source.read()
51
- # if not ret:
52
- # break
53
- # frame = process_frame(frame)
54
- # st.image(frame, channels="BGR")
55
-
56
  def video_feed(video_source):
57
- # Create a placeholder to display the frames
58
- frame_placeholder = st.empty() # This placeholder will be used to replace frames in-place
 
 
59
 
60
  while True:
61
  ret, frame = video_source.read()
62
  if not ret:
63
  break
64
 
65
- frame = process_frame(frame)
66
-
67
- # Display the frame in the placeholder
68
  frame_placeholder.image(frame, channels="BGR", use_column_width=True)
69
 
70
-
71
-
72
- # Sidebar for video or image upload
73
- upload_choice = st.sidebar.radio("Choose input source", [ "Upload Video", "Upload Image" ,"Camera"])
74
 
75
  if upload_choice == "Camera":
76
- # Access camera
77
- video_source = cv2.VideoCapture(0)
78
  video_feed(video_source)
79
 
80
  elif upload_choice == "Upload Video":
81
  uploaded_video = st.file_uploader("Upload Video", type=["mp4", "mov", "avi", "mkv", "webm"])
82
  if uploaded_video:
83
- # Temporarily save the video to disk
84
  with tempfile.NamedTemporaryFile(delete=False) as tfile:
85
  tfile.write(uploaded_video.read())
86
  video_source = cv2.VideoCapture(tfile.name)
87
  video_feed(video_source)
88
 
89
  elif upload_choice == "Upload Image":
90
- uploaded_image = st.file_uploader("Upload Image", type=["png", "jpg", "jpeg", "gif"])
91
  if uploaded_image:
92
  image = Image.open(uploaded_image)
93
  frame = np.array(image)
94
- frame = process_frame(frame)
95
- st.image(frame, caption='Processed Image', use_column_width=True)
96
-
97
- st.sidebar.write("Emotion Labels: Angry, Fear, Happy, Neutral, Sad, Surprise")
 
1
+ import face_recognition
2
+ import os
3
  import cv2
4
  import numpy as np
5
  import time
 
 
 
 
 
 
6
  from keras.models import load_model
 
7
  from PIL import Image
8
+ import streamlit as st
9
 
10
+ # Streamlit UI Setup
11
+ st.markdown("<h1 style='text-align: center;'>Emotion & Face Recognition</h1>", unsafe_allow_html=True)
12
+ st.markdown("<h3 style='text-align: center;'>angry, fear, happy, neutral, sad, surprise</h3>", unsafe_allow_html=True)
13
+
14
+ # Known faces folder path
15
+ KNOWN_FACES_DIR = "known_faces"
16
+
17
+ # Load emotion detection model
18
  @st.cache_resource
19
  def load_emotion_model():
20
+ return load_model("CNN_Model_acc_75.h5")
 
21
 
22
+ emotion_model = load_emotion_model()
 
 
 
 
 
23
 
24
+ # Face detection model
25
+ face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + "haarcascade_frontalface_default.xml")
26
+ emotion_labels = ['angry', 'fear', 'happy', 'neutral', 'sad', 'surprise']
27
+ img_shape = 48
28
 
29
+ # Known faces dictionary
30
+ known_faces = {"names": [], "encodings": []}
31
+
32
+ def load_faces_from_folder(folder_path):
33
+ """
34
+ Load known faces from a folder, using filenames as names.
35
+ """
36
+ for filename in os.listdir(folder_path):
37
+ if filename.endswith(('.jpg', '.jpeg', '.png')):
38
+ name = os.path.splitext(filename)[0]
39
+ image_path = os.path.join(folder_path, filename)
40
+
41
+ # Load and encode the image
42
+ image = face_recognition.load_image_file(image_path)
43
+ face_encodings = face_recognition.face_encodings(image)
44
+
45
+ if face_encodings: # Ensure a face is found
46
+ known_faces["names"].append(name)
47
+ known_faces["encodings"].append(face_encodings[0])
48
+ print(f"Loaded face for {name}")
49
+ else:
50
+ print(f"No face detected in {filename}")
51
+
52
+ # Load known faces
53
+ load_faces_from_folder(KNOWN_FACES_DIR)
54
+
55
+ def recognize_face(unknown_face_encoding):
56
+ """
57
+ Compare an unknown face with the known faces and return the closest match.
58
+ """
59
+ matches = face_recognition.compare_faces(known_faces["encodings"], unknown_face_encoding, tolerance=0.6)
60
+ if True in matches:
61
+ match_index = matches.index(True)
62
+ return known_faces["names"][match_index]
63
+ return "Unknown"
64
+
65
+ def detect_emotion(face_image):
66
+ """
67
+ Predict the emotion of a face using the emotion detection model.
68
+ """
69
+ face_resized = cv2.resize(face_image, (img_shape, img_shape))
70
+ face_resized = np.expand_dims(face_resized, axis=0)
71
+ face_resized = face_resized / 255.0 # Normalize the image
72
+ predictions = emotion_model.predict(face_resized)
73
+ return emotion_labels[np.argmax(predictions)]
74
+
75
+ def process_frame_with_recognition_and_emotion(frame):
76
+ """
77
+ Detect faces, recognize names, and detect emotions in the frame.
78
+ """
79
  gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
80
+ rgb_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
81
  faces = face_cascade.detectMultiScale(gray_frame, scaleFactor=1.1, minNeighbors=5, minSize=(30, 30))
82
 
83
  for (x, y, w, h) in faces:
84
+ # Get the face area
85
+ face_image = rgb_frame[y:y+h, x:x+w]
86
+ face_encodings = face_recognition.face_encodings(face_image)
87
 
88
+ if face_encodings:
89
+ name = recognize_face(face_encodings[0]) # Recognize the face
90
+ else:
91
+ name = "Unknown"
 
92
 
93
+ # Predict emotion
94
+ emotion = detect_emotion(frame[y:y+h, x:x+w])
95
+
96
+ # Display name and emotion
97
+ display_text = f"{name} is Feeling {emotion}"
98
  cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
99
+ cv2.putText(frame, display_text, (x, y - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 255, 0), 2)
100
 
101
  return frame
102
 
 
 
 
 
 
 
 
 
 
103
  def video_feed(video_source):
104
+ """
105
+ Display video feed with face recognition and emotion detection.
106
+ """
107
+ frame_placeholder = st.empty() # Placeholder for updating frames
108
 
109
  while True:
110
  ret, frame = video_source.read()
111
  if not ret:
112
  break
113
 
114
+ frame = process_frame_with_recognition_and_emotion(frame)
 
 
115
  frame_placeholder.image(frame, channels="BGR", use_column_width=True)
116
 
117
+ # Sidebar options
118
+ upload_choice = st.sidebar.radio("Choose input source", ["Upload Image", "Upload Video", "Camera"])
 
 
119
 
120
  if upload_choice == "Camera":
121
+ video_source = cv2.VideoCapture(0) # Access webcam
 
122
  video_feed(video_source)
123
 
124
  elif upload_choice == "Upload Video":
125
  uploaded_video = st.file_uploader("Upload Video", type=["mp4", "mov", "avi", "mkv", "webm"])
126
  if uploaded_video:
 
127
  with tempfile.NamedTemporaryFile(delete=False) as tfile:
128
  tfile.write(uploaded_video.read())
129
  video_source = cv2.VideoCapture(tfile.name)
130
  video_feed(video_source)
131
 
132
  elif upload_choice == "Upload Image":
133
+ uploaded_image = st.file_uploader("Upload Image", type=["png", "jpg", "jpeg"])
134
  if uploaded_image:
135
  image = Image.open(uploaded_image)
136
  frame = np.array(image)
137
+ frame = process_frame_with_recognition_and_emotion(frame)
138
+ st.image(frame, caption="Processed Image", use_column_width=True)