Saad0KH commited on
Commit
b4aea34
·
verified ·
1 Parent(s): 3a83ac2

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +12 -17
app.py CHANGED
@@ -8,6 +8,7 @@ import huggingface_hub
8
  import insightface
9
  import numpy as np
10
  import onnxruntime as ort
 
11
 
12
  TITLE = "insightface Person Detection"
13
  DESCRIPTION = "https://github.com/deepinsight/insightface/tree/master/examples/person_detection"
@@ -30,23 +31,17 @@ def detect_person(
30
  ) -> tuple[np.ndarray, np.ndarray]:
31
  bboxes, kpss = detector.detect(img)
32
  bboxes = np.round(bboxes[:, :4]).astype(int)
33
- kpss = np.round(kpss).astype(int)
34
- kpss[:, :, 0] = np.clip(kpss[:, :, 0], 0, img.shape[1])
35
- kpss[:, :, 1] = np.clip(kpss[:, :, 1], 0, img.shape[0])
36
- vbboxes = bboxes.copy()
37
- vbboxes[:, 0] = kpss[:, 0, 0]
38
- vbboxes[:, 1] = kpss[:, 0, 1]
39
- vbboxes[:, 2] = kpss[:, 4, 0]
40
- vbboxes[:, 3] = kpss[:, 4, 1]
41
- return bboxes, vbboxes
42
-
43
-
44
- def extract_persons(image: np.ndarray, bboxes: np.ndarray) -> list[np.ndarray]:
45
  person_images = []
46
  for bbox in bboxes:
47
  x1, y1, x2, y2 = bbox
48
  person_image = image[y1:y2, x1:x2] # Crop the detected person
49
- person_images.append(person_image)
 
 
50
  return person_images
51
 
52
 
@@ -54,11 +49,11 @@ detector = load_model()
54
  detector.prepare(-1, nms_thresh=0.5, input_size=(640, 640))
55
 
56
 
57
- def detect(image: np.ndarray) -> list[np.ndarray]:
58
  image = image[:, :, ::-1] # RGB -> BGR
59
- bboxes, vbboxes = detect_person(image, detector)
60
  person_images = extract_persons(image, bboxes) # Extract each person as a separate image
61
- return [person_img[:, :, ::-1] for person_img in person_images] # BGR -> RGB
62
 
63
 
64
  examples = sorted(pathlib.Path("images").glob("*.jpg"))
@@ -66,7 +61,7 @@ examples = sorted(pathlib.Path("images").glob("*.jpg"))
66
  demo = gr.Interface(
67
  fn=detect,
68
  inputs=gr.Image(label="Input", type="numpy"),
69
- outputs=gr.Gallery(label="Detected Persons"), # Display a gallery of cropped images
70
  examples=examples,
71
  examples_per_page=30,
72
  title=TITLE,
 
8
  import insightface
9
  import numpy as np
10
  import onnxruntime as ort
11
+ from PIL import Image # Importer PIL pour manipuler les images
12
 
13
  TITLE = "insightface Person Detection"
14
  DESCRIPTION = "https://github.com/deepinsight/insightface/tree/master/examples/person_detection"
 
31
  ) -> tuple[np.ndarray, np.ndarray]:
32
  bboxes, kpss = detector.detect(img)
33
  bboxes = np.round(bboxes[:, :4]).astype(int)
34
+ return bboxes
35
+
36
+
37
+ def extract_persons(image: np.ndarray, bboxes: np.ndarray) -> list[Image.Image]:
 
 
 
 
 
 
 
 
38
  person_images = []
39
  for bbox in bboxes:
40
  x1, y1, x2, y2 = bbox
41
  person_image = image[y1:y2, x1:x2] # Crop the detected person
42
+ # Convertir en image PIL pour garantir la compatibilité
43
+ pil_image = Image.fromarray(cv2.cvtColor(person_image, cv2.COLOR_BGR2RGB))
44
+ person_images.append(pil_image)
45
  return person_images
46
 
47
 
 
49
  detector.prepare(-1, nms_thresh=0.5, input_size=(640, 640))
50
 
51
 
52
+ def detect(image: np.ndarray) -> list[Image.Image]:
53
  image = image[:, :, ::-1] # RGB -> BGR
54
+ bboxes = detect_person(image, detector)
55
  person_images = extract_persons(image, bboxes) # Extract each person as a separate image
56
+ return person_images
57
 
58
 
59
  examples = sorted(pathlib.Path("images").glob("*.jpg"))
 
61
  demo = gr.Interface(
62
  fn=detect,
63
  inputs=gr.Image(label="Input", type="numpy"),
64
+ outputs=gr.Gallery(label="Detected Persons").style(grid=2, height="auto"), # Display a gallery of cropped images
65
  examples=examples,
66
  examples_per_page=30,
67
  title=TITLE,