Spaces:
Sleeping
Sleeping
cheng
commited on
Commit
·
9196eaa
1
Parent(s):
f558a24
modify for PIL
Browse files
app.py
CHANGED
@@ -37,26 +37,27 @@ ckpt_filenmae = "groundingdino_swint_ogc.pth"
|
|
37 |
|
38 |
|
39 |
def detection(image):
|
40 |
-
sub_images =
|
41 |
-
|
|
|
|
|
42 |
|
43 |
-
return
|
44 |
|
45 |
|
46 |
-
def
|
47 |
equ = E2P.Equirectangular(image)
|
48 |
-
FOV = picture_fov
|
49 |
y_axis = 0
|
50 |
|
51 |
sub_images = []
|
52 |
while y_axis <= 0:
|
53 |
z_axis = -150
|
54 |
while z_axis <= 90:
|
55 |
-
img = equ.GetPerspective(
|
56 |
# cv2.imwrite(f'{directory_name}_{z_axis}z.jpg', img)
|
57 |
sub_images.append(img)
|
58 |
-
z_axis +=
|
59 |
-
y_axis +=
|
60 |
return sub_images
|
61 |
|
62 |
|
@@ -94,9 +95,14 @@ def image_transform_grounding_for_vis(init_image):
|
|
94 |
model = load_model_hf(config_file, ckpt_repo_id, ckpt_filenmae)
|
95 |
|
96 |
|
97 |
-
def run_grounding(input_image
|
98 |
-
|
|
|
|
|
99 |
original_size = init_image.size
|
|
|
|
|
|
|
100 |
|
101 |
_, image_tensor = image_transform_grounding(init_image)
|
102 |
image_pil: Image = image_transform_grounding_for_vis(init_image)
|
@@ -123,7 +129,7 @@ if __name__ == "__main__":
|
|
123 |
|
124 |
with gr.Column():
|
125 |
gallery = gr.Gallery(label="Detection Results").style(
|
126 |
-
columns=[3], preview=
|
127 |
|
128 |
run_button.click(fn=detection, inputs=[
|
129 |
input_image], outputs=[gallery])
|
|
|
37 |
|
38 |
|
39 |
def detection(image):
|
40 |
+
sub_images = process_panorama(image)
|
41 |
+
predict_images = []
|
42 |
+
for sub_image in sub_images:
|
43 |
+
predict_images.append(run_grounding(sub_image))
|
44 |
|
45 |
+
return predict_images
|
46 |
|
47 |
|
48 |
+
def process_panorama(image):
|
49 |
equ = E2P.Equirectangular(image)
|
|
|
50 |
y_axis = 0
|
51 |
|
52 |
sub_images = []
|
53 |
while y_axis <= 0:
|
54 |
z_axis = -150
|
55 |
while z_axis <= 90:
|
56 |
+
img = equ.GetPerspective(picture_fov, z_axis, y_axis, picture_height, picture_width)
|
57 |
# cv2.imwrite(f'{directory_name}_{z_axis}z.jpg', img)
|
58 |
sub_images.append(img)
|
59 |
+
z_axis += picture_fov
|
60 |
+
y_axis += picture_fov
|
61 |
return sub_images
|
62 |
|
63 |
|
|
|
95 |
model = load_model_hf(config_file, ckpt_repo_id, ckpt_filenmae)
|
96 |
|
97 |
|
98 |
+
def run_grounding(input_image):
|
99 |
+
cv2_img_rgb = cv2.cvtColor(input_image, cv2.COLOR_BGR2RGB)
|
100 |
+
pil_img = Image.fromarray(cv2_img_rgb)
|
101 |
+
init_image = pil_img.convert("RGB")
|
102 |
original_size = init_image.size
|
103 |
+
grounding_caption = "traffic sign"
|
104 |
+
box_threshold = 0.25
|
105 |
+
text_threshold = 0.25
|
106 |
|
107 |
_, image_tensor = image_transform_grounding(init_image)
|
108 |
image_pil: Image = image_transform_grounding_for_vis(init_image)
|
|
|
129 |
|
130 |
with gr.Column():
|
131 |
gallery = gr.Gallery(label="Detection Results").style(
|
132 |
+
columns=[3], preview=True, object_fit="none")
|
133 |
|
134 |
run_button.click(fn=detection, inputs=[
|
135 |
input_image], outputs=[gallery])
|