Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -8,6 +8,7 @@ import huggingface_hub
|
|
8 |
import insightface
|
9 |
import numpy as np
|
10 |
import onnxruntime as ort
|
|
|
11 |
|
12 |
TITLE = "insightface Person Detection"
|
13 |
DESCRIPTION = "https://github.com/deepinsight/insightface/tree/master/examples/person_detection"
|
@@ -30,23 +31,17 @@ def detect_person(
|
|
30 |
) -> tuple[np.ndarray, np.ndarray]:
|
31 |
bboxes, kpss = detector.detect(img)
|
32 |
bboxes = np.round(bboxes[:, :4]).astype(int)
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
vbboxes[:, 0] = kpss[:, 0, 0]
|
38 |
-
vbboxes[:, 1] = kpss[:, 0, 1]
|
39 |
-
vbboxes[:, 2] = kpss[:, 4, 0]
|
40 |
-
vbboxes[:, 3] = kpss[:, 4, 1]
|
41 |
-
return bboxes, vbboxes
|
42 |
-
|
43 |
-
|
44 |
-
def extract_persons(image: np.ndarray, bboxes: np.ndarray) -> list[np.ndarray]:
|
45 |
person_images = []
|
46 |
for bbox in bboxes:
|
47 |
x1, y1, x2, y2 = bbox
|
48 |
person_image = image[y1:y2, x1:x2] # Crop the detected person
|
49 |
-
|
|
|
|
|
50 |
return person_images
|
51 |
|
52 |
|
@@ -54,11 +49,11 @@ detector = load_model()
|
|
54 |
detector.prepare(-1, nms_thresh=0.5, input_size=(640, 640))
|
55 |
|
56 |
|
57 |
-
def detect(image: np.ndarray) -> list[
|
58 |
image = image[:, :, ::-1] # RGB -> BGR
|
59 |
-
bboxes
|
60 |
person_images = extract_persons(image, bboxes) # Extract each person as a separate image
|
61 |
-
return
|
62 |
|
63 |
|
64 |
examples = sorted(pathlib.Path("images").glob("*.jpg"))
|
@@ -66,7 +61,7 @@ examples = sorted(pathlib.Path("images").glob("*.jpg"))
|
|
66 |
demo = gr.Interface(
|
67 |
fn=detect,
|
68 |
inputs=gr.Image(label="Input", type="numpy"),
|
69 |
-
outputs=gr.Gallery(label="Detected Persons"), # Display a gallery of cropped images
|
70 |
examples=examples,
|
71 |
examples_per_page=30,
|
72 |
title=TITLE,
|
|
|
8 |
import insightface
|
9 |
import numpy as np
|
10 |
import onnxruntime as ort
|
11 |
+
from PIL import Image # Importer PIL pour manipuler les images
|
12 |
|
13 |
TITLE = "insightface Person Detection"
|
14 |
DESCRIPTION = "https://github.com/deepinsight/insightface/tree/master/examples/person_detection"
|
|
|
31 |
) -> tuple[np.ndarray, np.ndarray]:
|
32 |
bboxes, kpss = detector.detect(img)
|
33 |
bboxes = np.round(bboxes[:, :4]).astype(int)
|
34 |
+
return bboxes
|
35 |
+
|
36 |
+
|
37 |
+
def extract_persons(image: np.ndarray, bboxes: np.ndarray) -> list[Image.Image]:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
38 |
person_images = []
|
39 |
for bbox in bboxes:
|
40 |
x1, y1, x2, y2 = bbox
|
41 |
person_image = image[y1:y2, x1:x2] # Crop the detected person
|
42 |
+
# Convertir en image PIL pour garantir la compatibilité
|
43 |
+
pil_image = Image.fromarray(cv2.cvtColor(person_image, cv2.COLOR_BGR2RGB))
|
44 |
+
person_images.append(pil_image)
|
45 |
return person_images
|
46 |
|
47 |
|
|
|
49 |
detector.prepare(-1, nms_thresh=0.5, input_size=(640, 640))
|
50 |
|
51 |
|
52 |
+
def detect(image: np.ndarray) -> list[Image.Image]:
|
53 |
image = image[:, :, ::-1] # RGB -> BGR
|
54 |
+
bboxes = detect_person(image, detector)
|
55 |
person_images = extract_persons(image, bboxes) # Extract each person as a separate image
|
56 |
+
return person_images
|
57 |
|
58 |
|
59 |
examples = sorted(pathlib.Path("images").glob("*.jpg"))
|
|
|
61 |
demo = gr.Interface(
|
62 |
fn=detect,
|
63 |
inputs=gr.Image(label="Input", type="numpy"),
|
64 |
+
outputs=gr.Gallery(label="Detected Persons").style(grid=2, height="auto"), # Display a gallery of cropped images
|
65 |
examples=examples,
|
66 |
examples_per_page=30,
|
67 |
title=TITLE,
|