Saad0KH commited on
Commit
70eacb3
·
verified ·
1 Parent(s): 1678586

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +40 -11
app.py CHANGED
@@ -9,7 +9,6 @@ import insightface
9
  import numpy as np
10
  import onnxruntime as ort
11
  from PIL import Image
12
- import io
13
 
14
  TITLE = "insightface Person Detection"
15
  DESCRIPTION = "https://github.com/deepinsight/insightface/tree/master/examples/person_detection"
@@ -21,7 +20,7 @@ def load_model():
21
  options.intra_op_num_threads = 8
22
  options.inter_op_num_threads = 8
23
  session = ort.InferenceSession(
24
- path, sess_options=options, providers=["CPUExecutionProvider"]
25
  )
26
  model = insightface.model_zoo.retinaface.RetinaFace(model_file=path, session=session)
27
  return model
@@ -29,10 +28,37 @@ def load_model():
29
 
30
  def detect_person(
31
  img: np.ndarray, detector: insightface.model_zoo.retinaface.RetinaFace
32
- ) -> np.ndarray:
33
- bboxes, _ = detector.detect(img)
34
  bboxes = np.round(bboxes[:, :4]).astype(int)
35
- return bboxes
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
36
 
37
 
38
  def extract_persons(image: np.ndarray, bboxes: np.ndarray) -> list[np.ndarray]:
@@ -53,11 +79,15 @@ detector = load_model()
53
  detector.prepare(-1, nms_thresh=0.5, input_size=(640, 640))
54
 
55
 
56
- def detect(image: np.ndarray) -> list[Image.Image]:
57
  image = image[:, :, ::-1] # RGB -> BGR
58
- bboxes = detect_person(image, detector)
59
- person_images = extract_persons(image, bboxes) # Extract each person as a separate image
60
- return [convert_to_pil_image(person_img[:, :, ::-1]) for person_img in person_images] # BGR -> RGB
 
 
 
 
61
 
62
 
63
  examples = sorted(pathlib.Path("images").glob("*.jpg"))
@@ -65,9 +95,8 @@ examples = sorted(pathlib.Path("images").glob("*.jpg"))
65
  demo = gr.Interface(
66
  fn=detect,
67
  inputs=gr.Image(label="Input", type="numpy"),
68
- outputs=gr.Gallery(label="Detected Persons"), # Display multiple images in a gallery
69
  examples=examples,
70
- cache_examples=False, # Disable caching of examples
71
  examples_per_page=30,
72
  title=TITLE,
73
  description=DESCRIPTION,
 
9
  import numpy as np
10
  import onnxruntime as ort
11
  from PIL import Image
 
12
 
13
  TITLE = "insightface Person Detection"
14
  DESCRIPTION = "https://github.com/deepinsight/insightface/tree/master/examples/person_detection"
 
20
  options.intra_op_num_threads = 8
21
  options.inter_op_num_threads = 8
22
  session = ort.InferenceSession(
23
+ path, sess_options=options, providers=["CPUExecutionProvider", "CUDAExecutionProvider"]
24
  )
25
  model = insightface.model_zoo.retinaface.RetinaFace(model_file=path, session=session)
26
  return model
 
28
 
29
  def detect_person(
30
  img: np.ndarray, detector: insightface.model_zoo.retinaface.RetinaFace
31
+ ) -> tuple[np.ndarray, np.ndarray]:
32
+ bboxes, kpss = detector.detect(img)
33
  bboxes = np.round(bboxes[:, :4]).astype(int)
34
+ kpss = np.round(kpss).astype(int)
35
+ kpss[:, :, 0] = np.clip(kpss[:, :, 0], 0, img.shape[1])
36
+ kpss[:, :, 1] = np.clip(kpss[:, :, 1], 0, img.shape[0])
37
+ vbboxes = bboxes.copy()
38
+ vbboxes[:, 0] = kpss[:, 0, 0]
39
+ vbboxes[:, 1] = kpss[:, 0, 1]
40
+ vbboxes[:, 2] = kpss[:, 4, 0]
41
+ vbboxes[:, 3] = kpss[:, 4, 1]
42
+ return bboxes, vbboxes
43
+
44
+
45
+ def visualize(image: np.ndarray, bboxes: np.ndarray, vbboxes: np.ndarray) -> np.ndarray:
46
+ res = image.copy()
47
+ for i in range(bboxes.shape[0]):
48
+ bbox = bboxes[i]
49
+ vbbox = vbboxes[i]
50
+ x1, y1, x2, y2 = bbox
51
+ vx1, vy1, vx2, vy2 = vbbox
52
+ cv2.rectangle(res, (x1, y1), (x2, y2), (0, 255, 0), 1)
53
+ alpha = 0.8
54
+ color = (255, 0, 0)
55
+ for c in range(3):
56
+ res[vy1:vy2, vx1:vx2, c] = res[vy1:vy2, vx1:vx2, c] * alpha + color[c] * (1.0 - alpha)
57
+ cv2.circle(res, (vx1, vy1), 1, color, 2)
58
+ cv2.circle(res, (vx1, vy2), 1, color, 2)
59
+ cv2.circle(res, (vx2, vy1), 1, color, 2)
60
+ cv2.circle(res, (vx2, vy2), 1, color, 2)
61
+ return res
62
 
63
 
64
  def extract_persons(image: np.ndarray, bboxes: np.ndarray) -> list[np.ndarray]:
 
79
  detector.prepare(-1, nms_thresh=0.5, input_size=(640, 640))
80
 
81
 
82
+ def detect(image: np.ndarray) -> tuple[Image.Image, list[Image.Image]]:
83
  image = image[:, :, ::-1] # RGB -> BGR
84
+ bboxes, vbboxes = detect_person(image, detector)
85
+ res = visualize(image, bboxes, vbboxes)
86
+ person_images = extract_persons(image, bboxes)
87
+ # Convert the images to PIL Image format
88
+ result_image = convert_to_pil_image(res[:, :, ::-1]) # BGR -> RGB
89
+ person_images_pil = [convert_to_pil_image(person_img[:, :, ::-1]) for person_img in person_images] # BGR -> RGB
90
+ return result_image, person_images_pil
91
 
92
 
93
  examples = sorted(pathlib.Path("images").glob("*.jpg"))
 
95
  demo = gr.Interface(
96
  fn=detect,
97
  inputs=gr.Image(label="Input", type="numpy"),
98
+ outputs=[gr.Image(label="Processed Image"), gr.Gallery(label="Detected Persons")],
99
  examples=examples,
 
100
  examples_per_page=30,
101
  title=TITLE,
102
  description=DESCRIPTION,