LovnishVerma commited on
Commit
52a6fef
·
verified ·
1 Parent(s): 29be7f9

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +13 -20
app.py CHANGED
@@ -5,7 +5,7 @@ import time
5
  import os
6
  from keras.models import load_model
7
  from PIL import Image
8
- import mediapipe as mp
9
  import pymongo
10
  from datetime import datetime
11
  import tempfile
@@ -52,11 +52,11 @@ def load_known_faces():
52
  image = cv2.imread(image_path)
53
  gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
54
  # Detect face in the image
55
- faces = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml').detectMultiScale(
56
- gray, scaleFactor=1.1, minNeighbors=5, minSize=(30, 30)
57
- )
58
 
59
- for (x, y, w, h) in faces:
 
60
  roi_gray = gray[y:y+h, x:x+w]
61
  known_faces.append(roi_gray)
62
  known_names.append(image_name.split('.')[0]) # Assuming file name is the person's name
@@ -66,28 +66,21 @@ def load_known_faces():
66
 
67
  load_known_faces()
68
 
69
- # Mediapipe face detection
70
- mp_face_detection = mp.solutions.face_detection.FaceDetection(min_detection_confidence=0.8)
71
 
72
  # Process a single frame
73
  def process_frame(frame):
74
- rgb_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
75
- results = mp_face_detection.process(rgb_frame)
76
 
77
  result_text = "" # Initialize result text
78
 
79
- if results.detections:
80
- for detection in results.detections:
81
- bboxC = detection.location_data.relative_bounding_box
82
- h, w, _ = frame.shape
83
- bbox = (
84
- int(bboxC.xmin * w), int(bboxC.ymin * h),
85
- int(bboxC.width * w), int(bboxC.height * h)
86
- )
87
- x, y, w, h = bbox
88
-
89
  roi_color = frame[y:y+h, x:x+w]
90
- roi_gray = cv2.cvtColor(roi_color, cv2.COLOR_BGR2GRAY)
91
 
92
  # Apply histogram equalization for better feature extraction
93
  roi_gray = cv2.equalizeHist(roi_gray)
 
5
  import os
6
  from keras.models import load_model
7
  from PIL import Image
8
+ import dlib
9
  import pymongo
10
  from datetime import datetime
11
  import tempfile
 
52
  image = cv2.imread(image_path)
53
  gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
54
  # Detect face in the image
55
+ detector = dlib.get_frontal_face_detector()
56
+ faces = detector(gray)
 
57
 
58
+ for face in faces:
59
+ x, y, w, h = (face.left(), face.top(), face.width(), face.height())
60
  roi_gray = gray[y:y+h, x:x+w]
61
  known_faces.append(roi_gray)
62
  known_names.append(image_name.split('.')[0]) # Assuming file name is the person's name
 
66
 
67
  load_known_faces()
68
 
69
+ # Dlib face detector
70
+ detector = dlib.get_frontal_face_detector()
71
 
72
  # Process a single frame
73
  def process_frame(frame):
74
+ gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
75
+ faces = detector(gray)
76
 
77
  result_text = "" # Initialize result text
78
 
79
+ if len(faces) > 0:
80
+ for face in faces:
81
+ x, y, w, h = (face.left(), face.top(), face.width(), face.height())
 
 
 
 
 
 
 
82
  roi_color = frame[y:y+h, x:x+w]
83
+ roi_gray = gray[y:y+h, x:x+w]
84
 
85
  # Apply histogram equalization for better feature extraction
86
  roi_gray = cv2.equalizeHist(roi_gray)