Spaces:
Running
Running
File size: 2,324 Bytes
516d9b1 b4aea34 516d9b1 bef99af 516d9b1 bef99af 516d9b1 08ba7c3 bef99af 516d9b1 bfe786b b4aea34 3a83ac2 516d9b1 3a83ac2 b4aea34 3a83ac2 516d9b1 bef99af b4aea34 516d9b1 b4aea34 3a83ac2 b4aea34 516d9b1 bef99af 516d9b1 bef99af b4aea34 08ba7c3 bef99af |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 |
#!/usr/bin/env python
from __future__ import annotations
import pathlib
import cv2
import gradio as gr
import huggingface_hub
import insightface
import numpy as np
import onnxruntime as ort
from PIL import Image # Importer PIL pour manipuler les images
TITLE = "insightface Person Detection"
DESCRIPTION = "https://github.com/deepinsight/insightface/tree/master/examples/person_detection"
def load_model():
path = huggingface_hub.hf_hub_download("public-data/insightface", "models/scrfd_person_2.5g.onnx")
options = ort.SessionOptions()
options.intra_op_num_threads = 8
options.inter_op_num_threads = 8
session = ort.InferenceSession(
path, sess_options=options, providers=["CPUExecutionProvider", "CUDAExecutionProvider"]
)
model = insightface.model_zoo.retinaface.RetinaFace(model_file=path, session=session)
return model
def detect_person(
img: np.ndarray, detector: insightface.model_zoo.retinaface.RetinaFace
) -> tuple[np.ndarray, np.ndarray]:
bboxes, kpss = detector.detect(img)
bboxes = np.round(bboxes[:, :4]).astype(int)
return bboxes
def extract_persons(image: np.ndarray, bboxes: np.ndarray) -> list[Image.Image]:
person_images = []
for bbox in bboxes:
x1, y1, x2, y2 = bbox
person_image = image[y1:y2, x1:x2] # Crop the detected person
# Convertir en image PIL pour garantir la compatibilité
pil_image = Image.fromarray(cv2.cvtColor(person_image, cv2.COLOR_BGR2RGB))
person_images.append(pil_image)
return person_images
detector = load_model()
detector.prepare(-1, nms_thresh=0.5, input_size=(640, 640))
def detect(image: np.ndarray) -> list[Image.Image]:
image = image[:, :, ::-1] # RGB -> BGR
bboxes = detect_person(image, detector)
person_images = extract_persons(image, bboxes) # Extract each person as a separate image
return person_images
examples = sorted(pathlib.Path("images").glob("*.jpg"))
demo = gr.Interface(
fn=detect,
inputs=gr.Image(label="Input", type="numpy"),
outputs=gr.Gallery(label="Detected Persons").style(grid=2, height="auto"), # Display a gallery of cropped images
examples=examples,
examples_per_page=30,
title=TITLE,
description=DESCRIPTION,
)
if __name__ == "__main__":
demo.queue(max_size=10).launch()
|