Saad0KH commited on
Commit
061dadc
·
verified ·
1 Parent(s): 6801b2a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +35 -75
app.py CHANGED
@@ -1,63 +1,16 @@
1
- from __future__ import annotations
 
2
  import io
3
- import cv2
4
  import gradio as gr
5
- import huggingface_hub
6
- import insightface
7
- import numpy as np
8
- import onnxruntime as ort
9
- from PIL import Image
10
-
11
- TITLE = "insightface Person Detection"
12
- DESCRIPTION = "https://github.com/deepinsight/insightface/tree/master/examples/person_detection"
13
 
 
 
 
 
 
14
 
15
- def load_model():
16
- path = huggingface_hub.hf_hub_download("public-data/insightface", "models/scrfd_person_2.5g.onnx")
17
- options = ort.SessionOptions()
18
- options.intra_op_num_threads = 8
19
- options.inter_op_num_threads = 8
20
- session = ort.InferenceSession(
21
- path, sess_options=options, providers=["CPUExecutionProvider"]
22
- )
23
- model = insightface.model_zoo.retinaface.RetinaFace(model_file=path, session=session)
24
- return model
25
-
26
-
27
- def detect_person(
28
- img: np.ndarray, detector: insightface.model_zoo.retinaface.RetinaFace
29
- ) -> tuple[np.ndarray, np.ndarray]:
30
- bboxes, kpss = detector.detect(img)
31
- bboxes = np.round(bboxes[:, :4]).astype(int)
32
- kpss = np.round(kpss).astype(int)
33
- kpss[:, :, 0] = np.clip(kpss[:, :, 0], 0, img.shape[1])
34
- kpss[:, :, 1] = np.clip(kpss[:, :, 1], 0, img.shape[0])
35
- vbboxes = bboxes.copy()
36
- vbboxes[:, 0] = kpss[:, 0, 0]
37
- vbboxes[:, 1] = kpss[:, 0, 1]
38
- vbboxes[:, 2] = kpss[:, 4, 0]
39
- vbboxes[:, 3] = kpss[:, 4, 1]
40
- return bboxes, vbboxes
41
-
42
-
43
- def visualize(image: np.ndarray, bboxes: np.ndarray, vbboxes: np.ndarray) -> np.ndarray:
44
- res = image.copy()
45
- for i in range(bboxes.shape[0]):
46
- bbox = bboxes[i]
47
- vbbox = vbboxes[i]
48
- x1, y1, x2, y2 = bbox
49
- vx1, vy1, vx2, vy2 = vbbox
50
- cv2.rectangle(res, (x1, y1), (x2, y2), (0, 255, 0), 1)
51
- alpha = 0.8
52
- color = (255, 0, 0)
53
- for c in range(3):
54
- res[vy1:vy2, vx1:vx2, c] = res[vy1:vy2, vx1:vx2, c] * alpha + color[c] * (1.0 - alpha)
55
- cv2.circle(res, (vx1, vy1), 1, color, 2)
56
- cv2.circle(res, (vx1, vy2), 1, color, 2)
57
- cv2.circle(res, (vx2, vy1), 1, color, 2)
58
- cv2.circle(res, (vx2, vy2), 1, color, 2)
59
- return res
60
-
61
 
62
  def extract_persons(image: np.ndarray, bboxes: np.ndarray) -> list[Image.Image]:
63
  person_images = []
@@ -65,18 +18,9 @@ def extract_persons(image: np.ndarray, bboxes: np.ndarray) -> list[Image.Image]:
65
  x1, y1, x2, y2 = bbox
66
  person_image = image[y1:y2, x1:x2] # Crop the detected person
67
  person_pil_image = Image.fromarray(person_image).convert('RGB') # Convert to RGB
68
- with io.BytesIO() as output:
69
- person_pil_image.save(output, format='PNG') # Save as PNG
70
- output.seek(0) # Move to the start of the BytesIO buffer
71
- person_pil_image = Image.open(output) # Reopen to ensure format
72
- person_images.append(person_pil_image)
73
  return person_images
74
 
75
-
76
- detector = load_model()
77
- detector.prepare(-1, nms_thresh=0.5, input_size=(640, 640))
78
-
79
-
80
  def detect(image: np.ndarray) -> tuple[Image.Image, list[Image.Image]]:
81
  if image is None:
82
  return None, []
@@ -85,16 +29,32 @@ def detect(image: np.ndarray) -> tuple[Image.Image, list[Image.Image]]:
85
  bboxes, vbboxes = detect_person(image, detector)
86
  res = visualize(image, bboxes, vbboxes)
87
  person_images = extract_persons(res, bboxes)
88
- return Image.fromarray(res[:, :, ::-1], 'RGB'), person_images # BGR -> RGB
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
89
 
 
90
 
91
- demo = gr.Interface(
92
- fn=detect,
93
- inputs=gr.Image(label="Input", type="numpy"),
94
- outputs=[gr.Image(label="Processed Image", type="numpy"), gr.Gallery(label="Detected Persons", type="numpy")],
95
- title=TITLE,
96
- description=DESCRIPTION,
97
- )
98
 
 
99
  if __name__ == "__main__":
100
- demo.queue(max_size=10).launch()
 
 
1
+ from PIL import Image
2
+ import numpy as np
3
  import io
 
4
  import gradio as gr
 
 
 
 
 
 
 
 
5
 
6
+ def encode_pil_to_bytes(pil_image: Image.Image, format: str, **params) -> bytes:
7
+ with io.BytesIO() as output_bytes:
8
+ pil_image.save(output_bytes, format=format, **params)
9
+ output_bytes.seek(0) # Rewind the BytesIO object to the beginning
10
+ return output_bytes.read()
11
 
12
+ def save_pil_to_cache(pil_image: Image.Image, format: str) -> bytes:
13
+ return encode_pil_to_bytes(pil_image, format)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
14
 
15
  def extract_persons(image: np.ndarray, bboxes: np.ndarray) -> list[Image.Image]:
16
  person_images = []
 
18
  x1, y1, x2, y2 = bbox
19
  person_image = image[y1:y2, x1:x2] # Crop the detected person
20
  person_pil_image = Image.fromarray(person_image).convert('RGB') # Convert to RGB
21
+ person_images.append(person_pil_image)
 
 
 
 
22
  return person_images
23
 
 
 
 
 
 
24
  def detect(image: np.ndarray) -> tuple[Image.Image, list[Image.Image]]:
25
  if image is None:
26
  return None, []
 
29
  bboxes, vbboxes = detect_person(image, detector)
30
  res = visualize(image, bboxes, vbboxes)
31
  person_images = extract_persons(res, bboxes)
32
+ processed_image = Image.fromarray(res[:, :, ::-1], 'RGB') # BGR -> RGB
33
+ return processed_image, person_images
34
+
35
+ def process_image(image: Image.Image) -> tuple[Image.Image, list[Image.Image]]:
36
+ try:
37
+ np_image = np.array(image)
38
+ processed_image, person_images = detect(np_image)
39
+ return processed_image, person_images
40
+ except Exception as e:
41
+ print(f"An error occurred: {e}")
42
+ return None, []
43
+
44
+ def build_gradio_interface():
45
+ with gr.Blocks() as demo:
46
+ gr.Markdown("## Person Detection App")
47
+
48
+ with gr.Row():
49
+ input_image = gr.Image(type="pil", label="Upload an Image")
50
+ output_image = gr.Image(type="pil", label="Processed Image")
51
+ gallery = gr.Gallery(label="Detected Persons")
52
 
53
+ input_image.change(fn=process_image, inputs=input_image, outputs=[output_image, gallery])
54
 
55
+ return demo
 
 
 
 
 
 
56
 
57
+ # Example usage
58
  if __name__ == "__main__":
59
+ demo = build_gradio_interface()
60
+ demo.launch()