Saad0KH commited on
Commit
1a75096
·
verified ·
1 Parent(s): 061dadc

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +71 -45
app.py CHANGED
@@ -1,60 +1,86 @@
1
- from PIL import Image
2
- import numpy as np
3
- import io
 
 
 
 
4
  import gradio as gr
 
 
 
 
5
 
6
- def encode_pil_to_bytes(pil_image: Image.Image, format: str, **params) -> bytes:
7
- with io.BytesIO() as output_bytes:
8
- pil_image.save(output_bytes, format=format, **params)
9
- output_bytes.seek(0) # Rewind the BytesIO object to the beginning
10
- return output_bytes.read()
11
 
12
- def save_pil_to_cache(pil_image: Image.Image, format: str) -> bytes:
13
- return encode_pil_to_bytes(pil_image, format)
 
 
 
 
 
 
 
 
14
 
15
- def extract_persons(image: np.ndarray, bboxes: np.ndarray) -> list[Image.Image]:
16
- person_images = []
17
- for bbox in bboxes:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
18
  x1, y1, x2, y2 = bbox
19
- person_image = image[y1:y2, x1:x2] # Crop the detected person
20
- person_pil_image = Image.fromarray(person_image).convert('RGB') # Convert to RGB
21
- person_images.append(person_pil_image)
22
- return person_images
 
 
 
 
 
 
 
 
 
 
23
 
24
- def detect(image: np.ndarray) -> tuple[Image.Image, list[Image.Image]]:
25
  if image is None:
26
- return None, []
27
 
28
  image = image[:, :, ::-1] # RGB -> BGR
29
  bboxes, vbboxes = detect_person(image, detector)
30
  res = visualize(image, bboxes, vbboxes)
31
- person_images = extract_persons(res, bboxes)
32
- processed_image = Image.fromarray(res[:, :, ::-1], 'RGB') # BGR -> RGB
33
- return processed_image, person_images
34
-
35
- def process_image(image: Image.Image) -> tuple[Image.Image, list[Image.Image]]:
36
- try:
37
- np_image = np.array(image)
38
- processed_image, person_images = detect(np_image)
39
- return processed_image, person_images
40
- except Exception as e:
41
- print(f"An error occurred: {e}")
42
- return None, []
43
-
44
- def build_gradio_interface():
45
- with gr.Blocks() as demo:
46
- gr.Markdown("## Person Detection App")
47
-
48
- with gr.Row():
49
- input_image = gr.Image(type="pil", label="Upload an Image")
50
- output_image = gr.Image(type="pil", label="Processed Image")
51
- gallery = gr.Gallery(label="Detected Persons")
52
 
53
- input_image.change(fn=process_image, inputs=input_image, outputs=[output_image, gallery])
54
 
55
- return demo
 
 
 
 
 
 
 
 
56
 
57
- # Example usage
58
  if __name__ == "__main__":
59
- demo = build_gradio_interface()
60
- demo.launch()
 
1
+ #!/usr/bin/env python
2
+
3
+ from __future__ import annotations
4
+
5
+ import pathlib
6
+
7
+ import cv2
8
  import gradio as gr
9
+ import huggingface_hub
10
+ import insightface
11
+ import numpy as np
12
+ import onnxruntime as ort
13
 
14
+ TITLE = "insightface Person Detection"
15
+ DESCRIPTION = "https://github.com/deepinsight/insightface/tree/master/examples/person_detection"
 
 
 
16
 
17
+ def load_model():
18
+ path = huggingface_hub.hf_hub_download("public-data/insightface", "models/scrfd_person_2.5g.onnx")
19
+ options = ort.SessionOptions()
20
+ options.intra_op_num_threads = 8
21
+ options.inter_op_num_threads = 8
22
+ session = ort.InferenceSession(
23
+ path, sess_options=options, providers=["CPUExecutionProvider", "CUDAExecutionProvider"]
24
+ )
25
+ model = insightface.model_zoo.retinaface.RetinaFace(model_file=path, session=session)
26
+ return model
27
 
28
+ def detect_person(
29
+ img: np.ndarray, detector: insightface.model_zoo.retinaface.RetinaFace
30
+ ) -> tuple[np.ndarray, np.ndarray]:
31
+ bboxes, kpss = detector.detect(img)
32
+ bboxes = np.round(bboxes[:, :4]).astype(int)
33
+ kpss = np.round(kpss).astype(int)
34
+ kpss[:, :, 0] = np.clip(kpss[:, :, 0], 0, img.shape[1])
35
+ kpss[:, :, 1] = np.clip(kpss[:, :, 1], 0, img.shape[0])
36
+ vbboxes = bboxes.copy()
37
+ vbboxes[:, 0] = kpss[:, 0, 0]
38
+ vbboxes[:, 1] = kpss[:, 0, 1]
39
+ vbboxes[:, 2] = kpss[:, 4, 0]
40
+ vbboxes[:, 3] = kpss[:, 4, 1]
41
+ return bboxes, vbboxes
42
+
43
+ def visualize(image: np.ndarray, bboxes: np.ndarray, vbboxes: np.ndarray) -> np.ndarray:
44
+ res = image.copy()
45
+ for i in range(bboxes.shape[0]):
46
+ bbox = bboxes[i]
47
+ vbbox = vbboxes[i]
48
  x1, y1, x2, y2 = bbox
49
+ vx1, vy1, vx2, vy2 = vbbox
50
+ cv2.rectangle(res, (x1, y1), (x2, y2), (0, 255, 0), 1)
51
+ alpha = 0.8
52
+ color = (255, 0, 0)
53
+ for c in range(3):
54
+ res[vy1:vy2, vx1:vx2, c] = res[vy1:vy2, vx1:vx2, c] * alpha + color[c] * (1.0 - alpha)
55
+ cv2.circle(res, (vx1, vy1), 1, color, 2)
56
+ cv2.circle(res, (vx1, vy2), 1, color, 2)
57
+ cv2.circle(res, (vx2, vy1), 1, color, 2)
58
+ cv2.circle(res, (vx2, vy2), 1, color, 2)
59
+ return res
60
+
61
+ detector = load_model()
62
+ detector.prepare(-1, nms_thresh=0.5, input_size=(640, 640))
63
 
64
+ def detect(image: np.ndarray) -> np.ndarray:
65
  if image is None:
66
+ return np.array([]) # Retourne une image vide si aucune image n'est fournie
67
 
68
  image = image[:, :, ::-1] # RGB -> BGR
69
  bboxes, vbboxes = detect_person(image, detector)
70
  res = visualize(image, bboxes, vbboxes)
71
+ return res[:, :, ::-1] # BGR -> RGB
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
72
 
73
+ examples = sorted(pathlib.Path("images").glob("*.jpg"))
74
 
75
+ demo = gr.Interface(
76
+ fn=detect,
77
+ inputs=gr.Image(label="Input", type="numpy"),
78
+ outputs=gr.Image(label="Output"),
79
+ examples=examples,
80
+ examples_per_page=30,
81
+ title=TITLE,
82
+ description=DESCRIPTION,
83
+ )
84
 
 
85
  if __name__ == "__main__":
86
+ demo.queue(max_size=10).launch()