LovnishVerma commited on
Commit
c4479c0
·
verified ·
1 Parent(s): 34e6362

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +114 -18
app.py CHANGED
@@ -3,23 +3,119 @@ import cv2
3
  import numpy as np
4
  import streamlit as st
5
  from datetime import datetime
 
6
 
7
  # Directories
8
- KNOWN_FACES_DIR = "./known_faces" # Ensure this path is writable on your local machine
9
-
10
- # Ensure the known faces directory exists
11
- os.makedirs(KNOWN_FACES_DIR, exist_ok=True)
12
-
13
- if capture_mode == "Upload Image(s)":
14
- uploaded_files = st.file_uploader("Upload images", type=["jpg", "jpeg", "png"], accept_multiple_files=True)
15
- if uploaded_files:
16
- for uploaded_file in uploaded_files:
17
- img = cv2.imdecode(np.frombuffer(uploaded_file.read(), np.uint8), cv2.IMREAD_COLOR)
18
- gray, faces = detect_faces(img)
19
- for (x, y, w, h) in faces:
20
- face_img = gray[y:y+h, x:x+w]
21
- resized_img = cv2.resize(face_img, IMG_SIZE)
22
- timestamp = datetime.now().strftime("%Y%m%d%H%M%S")
23
- filepath = os.path.join(KNOWN_FACES_DIR, f"{timestamp}.jpg")
24
- cv2.imwrite(filepath, resized_img)
25
- st.success(f"Faces registered successfully for {person_name}!")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3
  import numpy as np
4
  import streamlit as st
5
  from datetime import datetime
6
+ from tensorflow.keras.models import load_model
7
 
8
  # Directories
9
+ KNOWN_FACES_DIR = "known_faces"
10
+ EMOTION_MODEL_PATH = "CNN_Model_acc_75.h5"
11
+ CASCADE_PATH = "haarcascade_frontalface_default.xml"
12
+
13
+ # Constants
14
+ IMG_SIZE = (200, 200)
15
+
16
+ # Load models
17
+ emotion_model = load_model(EMOTION_MODEL_PATH)
18
+ face_cascade = cv2.CascadeClassifier(CASCADE_PATH)
19
+ face_recognizer = cv2.face.LBPHFaceRecognizer_create()
20
+
21
+ # Helper Functions
22
+ def load_emotion_labels():
23
+ return ["Angry", "Disgust", "Fear", "Happy", "Sad", "Surprise", "Neutral"]
24
+
25
+ def train_recognizer():
26
+ faces = []
27
+ labels = []
28
+ label_map = {}
29
+ for idx, person_name in enumerate(os.listdir(KNOWN_FACES_DIR)):
30
+ person_path = os.path.join(KNOWN_FACES_DIR, person_name)
31
+ if not os.path.isdir(person_path):
32
+ continue
33
+ label_map[idx] = person_name
34
+ for filename in os.listdir(person_path):
35
+ filepath = os.path.join(person_path, filename)
36
+ if filepath.lower().endswith(('.jpg', '.jpeg', '.png')):
37
+ img = cv2.imread(filepath, cv2.IMREAD_GRAYSCALE)
38
+ if img is not None:
39
+ faces.append(img)
40
+ labels.append(idx)
41
+ if len(faces) == 0:
42
+ st.warning("No valid training data found. Add faces first.")
43
+ return {}
44
+ face_recognizer.train(faces, np.array(labels))
45
+ return {v: k for k, v in label_map.items()}
46
+
47
+ def detect_faces(image):
48
+ gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
49
+ faces = face_cascade.detectMultiScale(gray, scaleFactor=1.1, minNeighbors=5)
50
+ return gray, faces
51
+
52
+ def detect_emotions(face_img):
53
+ resized_face = cv2.resize(face_img, (48, 48))
54
+ normalized_face = resized_face / 255.0
55
+ reshaped_face = np.expand_dims(normalized_face, axis=(0, -1))
56
+ emotion_probabilities = emotion_model.predict(reshaped_face)
57
+ emotion_idx = np.argmax(emotion_probabilities)
58
+ return load_emotion_labels()[emotion_idx]
59
+
60
+ # Streamlit App
61
+ st.title("Face Recognition and Emotion Detection")
62
+ st.sidebar.title("Options")
63
+ option = st.sidebar.selectbox("Choose an action", ["Home", "Register New Face", "Recognize Faces"])
64
+
65
+ # Train the recognizer initially
66
+ if option != "Register New Face":
67
+ label_map = train_recognizer()
68
+
69
+ if option == "Home":
70
+ st.write("Use the sidebar to register new faces or recognize them.")
71
+
72
+ elif option == "Register New Face":
73
+ person_name = st.text_input("Enter the person's name")
74
+ capture_mode = st.radio("Select input method", ["Use Camera", "Upload Image(s)"])
75
+
76
+ if person_name and st.button("Register Face"):
77
+ person_dir = os.path.join(KNOWN_FACES_DIR, person_name)
78
+ os.makedirs(person_dir, exist_ok=True)
79
+
80
+ if capture_mode == "Use Camera":
81
+ st.warning("Ensure you are running this locally to access the camera.")
82
+ # Camera logic (only available locally)
83
+ cap = cv2.VideoCapture(0)
84
+ if not cap.isOpened():
85
+ st.error("Could not access the camera. Make sure it's connected and permissions are granted.")
86
+ else:
87
+ # Capture a frame from the camera
88
+ ret, frame = cap.read()
89
+ if ret:
90
+ st.image(frame, channels="BGR")
91
+ cap.release()
92
+
93
+ elif capture_mode == "Upload Image(s)":
94
+ uploaded_files = st.file_uploader("Upload images", type=["jpg", "jpeg", "png"], accept_multiple_files=True)
95
+ if uploaded_files:
96
+ for uploaded_file in uploaded_files:
97
+ img = cv2.imdecode(np.frombuffer(uploaded_file.read(), np.uint8), cv2.IMREAD_COLOR)
98
+ gray, faces = detect_faces(img)
99
+ for (x, y, w, h) in faces:
100
+ face_img = gray[y:y+h, x:x+w]
101
+ resized_img = cv2.resize(face_img, IMG_SIZE)
102
+ timestamp = datetime.now().strftime("%Y%m%d%H%M%S")
103
+ filepath = os.path.join(person_dir, f"{timestamp}.jpg")
104
+ cv2.imwrite(filepath, resized_img)
105
+ st.success(f"Faces registered successfully for {person_name}!")
106
+ label_map = train_recognizer()
107
+
108
+ elif option == "Recognize Faces":
109
+ uploaded_file = st.file_uploader("Upload an image", type=["jpg", "jpeg", "png"])
110
+ if uploaded_file:
111
+ img = cv2.imdecode(np.frombuffer(uploaded_file.read(), np.uint8), cv2.IMREAD_COLOR)
112
+ gray, faces = detect_faces(img)
113
+ for (x, y, w, h) in faces:
114
+ face_img = gray[y:y+h, x:x+w]
115
+ resized_img = cv2.resize(face_img, IMG_SIZE)
116
+ label, confidence = face_recognizer.predict(resized_img)
117
+ name = label_map.get(label, "Unknown")
118
+ emotion = detect_emotions(face_img)
119
+ cv2.rectangle(img, (x, y), (x+w, y+h), (0, 255, 0), 2)
120
+ cv2.putText(img, f"{name}, {emotion}", (x, y-10), cv2.FONT_HERSHEY_SIMPLEX, 0.9, (0, 255, 0), 2)
121
+ st.image(img, channels="BGR")