Futuretop commited on
Commit
74460b6
·
verified ·
1 Parent(s): 6ae5ff2

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +8 -16
app.py CHANGED
@@ -1,7 +1,5 @@
1
  from PIL import Image
2
  from transformers import BlipProcessor, BlipForConditionalGeneration
3
- import torch
4
- import cv2
5
  import numpy as np
6
  from deepface import DeepFace
7
  import gradio as gr
@@ -33,21 +31,15 @@ def analyze_image(image_pil):
33
  out = model.generate(**inputs)
34
  caption = processor.decode(out[0], skip_special_tokens=True)
35
 
36
- # Face detection (Improved parameters + correct color conversion)
37
- face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + "haarcascade_frontalface_default.xml")
38
- gray = cv2.cvtColor(np.array(image_pil), cv2.COLOR_RGB2GRAY) # Ensures color format is correct
39
-
40
- # 🔍 More sensitive detection parameters
41
- faces = face_cascade.detectMultiScale(
42
- gray,
43
- scaleFactor=1.05, # increased sensitivity
44
- minNeighbors=3, # lower neighbor count = more detections
45
- minSize=(30, 30) # ensure even smaller faces are caught
46
- )
47
 
48
  face_infos = []
49
- for (x, y, w, h) in faces:
50
- face_crop = image_np[y:y+h, x:x+w]
51
  try:
52
  analysis = DeepFace.analyze(face_crop, actions=['age', 'gender', 'emotion'], enforce_detection=False)
53
  age = analysis[0]['age']
@@ -93,7 +85,7 @@ def analyze_image(image_pil):
93
  # Generate 15 sentences
94
  sentences = []
95
  sentences.append(f"According to the BLIP model, the scene can be described as: \"{caption}\".")
96
- sentences.append(f"The image contains {num_faces} visible face(s) detected by OpenCV.")
97
 
98
  gender_desc = []
99
  if gender_counts["Man"] > 0:
 
1
  from PIL import Image
2
  from transformers import BlipProcessor, BlipForConditionalGeneration
 
 
3
  import numpy as np
4
  from deepface import DeepFace
5
  import gradio as gr
 
31
  out = model.generate(**inputs)
32
  caption = processor.decode(out[0], skip_special_tokens=True)
33
 
34
+ # Face detection using DeepFace
35
+ try:
36
+ faces = DeepFace.extract_faces(img_path=image_pil, detector_backend="retinaface", enforce_detection=False)
37
+ except Exception:
38
+ faces = []
 
 
 
 
 
 
39
 
40
  face_infos = []
41
+ for face_data in faces:
42
+ face_crop = face_data["face"]
43
  try:
44
  analysis = DeepFace.analyze(face_crop, actions=['age', 'gender', 'emotion'], enforce_detection=False)
45
  age = analysis[0]['age']
 
85
  # Generate 15 sentences
86
  sentences = []
87
  sentences.append(f"According to the BLIP model, the scene can be described as: \"{caption}\".")
88
+ sentences.append(f"The image contains {num_faces} visible face(s) detected using DeepFace (RetinaFace backend).")
89
 
90
  gender_desc = []
91
  if gender_counts["Man"] > 0: