Saad0KH commited on
Commit
c034823
·
verified ·
1 Parent(s): c4af530

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +9 -1
app.py CHANGED
@@ -1,5 +1,6 @@
1
  from __future__ import annotations
2
  import pathlib
 
3
  import cv2
4
  import gradio as gr
5
  import huggingface_hub
@@ -11,6 +12,7 @@ from PIL import Image
11
  TITLE = "insightface Person Detection"
12
  DESCRIPTION = "https://github.com/deepinsight/insightface/tree/master/examples/person_detection"
13
 
 
14
  def load_model():
15
  path = huggingface_hub.hf_hub_download("public-data/insightface", "models/scrfd_person_2.5g.onnx")
16
  options = ort.SessionOptions()
@@ -22,6 +24,7 @@ def load_model():
22
  model = insightface.model_zoo.retinaface.RetinaFace(model_file=path, session=session)
23
  return model
24
 
 
25
  def detect_person(
26
  img: np.ndarray, detector: insightface.model_zoo.retinaface.RetinaFace
27
  ) -> tuple[np.ndarray, np.ndarray]:
@@ -37,6 +40,7 @@ def detect_person(
37
  vbboxes[:, 3] = kpss[:, 4, 1]
38
  return bboxes, vbboxes
39
 
 
40
  def visualize(image: np.ndarray, bboxes: np.ndarray, vbboxes: np.ndarray) -> np.ndarray:
41
  res = image.copy()
42
  for i in range(bboxes.shape[0]):
@@ -55,18 +59,21 @@ def visualize(image: np.ndarray, bboxes: np.ndarray, vbboxes: np.ndarray) -> np.
55
  cv2.circle(res, (vx2, vy2), 1, color, 2)
56
  return res
57
 
 
58
  def extract_persons(image: np.ndarray, bboxes: np.ndarray) -> list[Image.Image]:
59
  person_images = []
60
  for bbox in bboxes:
61
  x1, y1, x2, y2 = bbox
62
  person_image = image[y1:y2, x1:x2] # Crop the detected person
63
- person_pil_image = Image.fromarray(person_image)
64
  person_images.append(person_pil_image)
65
  return person_images
66
 
 
67
  detector = load_model()
68
  detector.prepare(-1, nms_thresh=0.5, input_size=(640, 640))
69
 
 
70
  def detect(image: np.ndarray) -> tuple[Image.Image, list[Image.Image]]:
71
  image = image[:, :, ::-1] # RGB -> BGR
72
  bboxes, vbboxes = detect_person(image, detector)
@@ -74,6 +81,7 @@ def detect(image: np.ndarray) -> tuple[Image.Image, list[Image.Image]]:
74
  person_images = extract_persons(res, bboxes)
75
  return Image.fromarray(res[:, :, ::-1], 'RGB'), person_images # BGR -> RGB
76
 
 
77
  examples = sorted(pathlib.Path("images").glob("*.jpg"))
78
 
79
  demo = gr.Interface(
 
1
  from __future__ import annotations
2
  import pathlib
3
+ import io
4
  import cv2
5
  import gradio as gr
6
  import huggingface_hub
 
12
  TITLE = "insightface Person Detection"
13
  DESCRIPTION = "https://github.com/deepinsight/insightface/tree/master/examples/person_detection"
14
 
15
+
16
  def load_model():
17
  path = huggingface_hub.hf_hub_download("public-data/insightface", "models/scrfd_person_2.5g.onnx")
18
  options = ort.SessionOptions()
 
24
  model = insightface.model_zoo.retinaface.RetinaFace(model_file=path, session=session)
25
  return model
26
 
27
+
28
  def detect_person(
29
  img: np.ndarray, detector: insightface.model_zoo.retinaface.RetinaFace
30
  ) -> tuple[np.ndarray, np.ndarray]:
 
40
  vbboxes[:, 3] = kpss[:, 4, 1]
41
  return bboxes, vbboxes
42
 
43
+
44
  def visualize(image: np.ndarray, bboxes: np.ndarray, vbboxes: np.ndarray) -> np.ndarray:
45
  res = image.copy()
46
  for i in range(bboxes.shape[0]):
 
59
  cv2.circle(res, (vx2, vy2), 1, color, 2)
60
  return res
61
 
62
+
63
  def extract_persons(image: np.ndarray, bboxes: np.ndarray) -> list[Image.Image]:
64
  person_images = []
65
  for bbox in bboxes:
66
  x1, y1, x2, y2 = bbox
67
  person_image = image[y1:y2, x1:x2] # Crop the detected person
68
+ person_pil_image = Image.fromarray(person_image).convert('RGB') # Convert to RGB
69
  person_images.append(person_pil_image)
70
  return person_images
71
 
72
+
73
  detector = load_model()
74
  detector.prepare(-1, nms_thresh=0.5, input_size=(640, 640))
75
 
76
+
77
  def detect(image: np.ndarray) -> tuple[Image.Image, list[Image.Image]]:
78
  image = image[:, :, ::-1] # RGB -> BGR
79
  bboxes, vbboxes = detect_person(image, detector)
 
81
  person_images = extract_persons(res, bboxes)
82
  return Image.fromarray(res[:, :, ::-1], 'RGB'), person_images # BGR -> RGB
83
 
84
+
85
  examples = sorted(pathlib.Path("images").glob("*.jpg"))
86
 
87
  demo = gr.Interface(