Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -47,4 +47,51 @@ def visualize(image: np.ndarray, bboxes: np.ndarray, vbboxes: np.ndarray) -> np.
|
|
47 |
vbbox = vbboxes[i]
|
48 |
x1, y1, x2, y2 = bbox
|
49 |
vx1, vy1, vx2, vy2 = vbbox
|
50 |
-
cv2.rectangle(res, (x1, y1), (x2, y2), (0, 255,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
47 |
vbbox = vbboxes[i]
|
48 |
x1, y1, x2, y2 = bbox
|
49 |
vx1, vy1, vx2, vy2 = vbbox
|
50 |
+
cv2.rectangle(res, (x1, y1), (x2, y2), (0, 255, 0), 1)
|
51 |
+
alpha = 0.8
|
52 |
+
color = (255, 0, 0)
|
53 |
+
for c in range(3):
|
54 |
+
res[vy1:vy2, vx1:vx2, c] = res[vy1:vy2, vx1:vx2, c] * alpha + color[c] * (1.0 - alpha)
|
55 |
+
cv2.circle(res, (vx1, vy1), 1, color, 2)
|
56 |
+
cv2.circle(res, (vx1, vy2), 1, color, 2)
|
57 |
+
cv2.circle(res, (vx2, vy1), 1, color, 2)
|
58 |
+
cv2.circle(res, (vx2, vy2), 1, color, 2)
|
59 |
+
return res
|
60 |
+
|
61 |
+
|
62 |
+
def extract_persons(image: np.ndarray, bboxes: np.ndarray) -> list[Image.Image]:
|
63 |
+
person_images = []
|
64 |
+
for bbox in bboxes:
|
65 |
+
x1, y1, x2, y2 = bbox
|
66 |
+
person_image = image[y1:y2, x1:x2] # Crop the detected person
|
67 |
+
person_pil_image = Image.fromarray(person_image)
|
68 |
+
person_images.append(person_pil_image)
|
69 |
+
return person_images
|
70 |
+
|
71 |
+
|
72 |
+
detector = load_model()
|
73 |
+
detector.prepare(-1, nms_thresh=0.5, input_size=(640, 640))
|
74 |
+
|
75 |
+
|
76 |
+
def detect(image: np.ndarray) -> tuple[Image.Image, list[Image.Image]]:
|
77 |
+
image = image[:, :, ::-1] # RGB -> BGR
|
78 |
+
bboxes, vbboxes = detect_person(image, detector)
|
79 |
+
res = visualize(image, bboxes, vbboxes)
|
80 |
+
person_images = extract_persons(res, bboxes)
|
81 |
+
return Image.fromarray(res[:, :, ::-1], 'RGB'), person_images # BGR -> RGB
|
82 |
+
|
83 |
+
|
84 |
+
examples = sorted(pathlib.Path("images").glob("*.jpg"))
|
85 |
+
|
86 |
+
demo = gr.Interface(
|
87 |
+
fn=detect,
|
88 |
+
inputs=gr.Image(label="Input", type="numpy"),
|
89 |
+
outputs=[gr.Image(label="Processed Image", type="numpy"), gr.Gallery(label="Detected Persons", type="numpy")],
|
90 |
+
examples=examples,
|
91 |
+
examples_per_page=30,
|
92 |
+
title=TITLE,
|
93 |
+
description=DESCRIPTION,
|
94 |
+
)
|
95 |
+
|
96 |
+
if __name__ == "__main__":
|
97 |
+
demo.queue(max_size=10).launch()
|