Spaces:
Running
on
T4
Running
on
T4
AAAAAAyq
commited on
Commit
•
fc4ad97
1
Parent(s):
0eb155c
Update the contour code
Browse files- app.py +9 -6
- app_debug.py +11 -8
app.py
CHANGED
@@ -18,7 +18,6 @@ def fast_process(annotations, image, high_quality, device):
|
|
18 |
|
19 |
original_h = image.height
|
20 |
original_w = image.width
|
21 |
-
image = image.convert('RGBA')
|
22 |
# fig = plt.figure(figsize=(10, 10))
|
23 |
# plt.imshow(image)
|
24 |
if high_quality == True:
|
@@ -48,7 +47,7 @@ def fast_process(annotations, image, high_quality, device):
|
|
48 |
if isinstance(annotations, torch.Tensor):
|
49 |
annotations = annotations.cpu().numpy()
|
50 |
|
51 |
-
if high_quality
|
52 |
contour_all = []
|
53 |
temp = np.zeros((original_h, original_w,1))
|
54 |
for i, mask in enumerate(annotations):
|
@@ -58,14 +57,18 @@ def fast_process(annotations, image, high_quality, device):
|
|
58 |
contours, _ = cv2.findContours(annotation, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
|
59 |
for contour in contours:
|
60 |
contour_all.append(contour)
|
61 |
-
cv2.drawContours(temp, contour_all, -1, (255, 255, 255),
|
62 |
-
color = np.array([0 / 255, 0 / 255, 255 / 255, 0.
|
63 |
contour_mask = temp / 255 * color.reshape(1, 1, -1)
|
64 |
-
overlay_contour = Image.fromarray((contour_mask * 255).astype(np.uint8), 'RGBA')
|
65 |
-
image.paste(overlay_contour, (0, 0), overlay_contour)
|
66 |
# plt.imshow(contour_mask)
|
|
|
|
|
67 |
overlay_inner = Image.fromarray((inner_mask * 255).astype(np.uint8), 'RGBA')
|
68 |
image.paste(overlay_inner, (0, 0), overlay_inner)
|
|
|
|
|
|
|
|
|
69 |
|
70 |
return image
|
71 |
# plt.axis('off')
|
|
|
18 |
|
19 |
original_h = image.height
|
20 |
original_w = image.width
|
|
|
21 |
# fig = plt.figure(figsize=(10, 10))
|
22 |
# plt.imshow(image)
|
23 |
if high_quality == True:
|
|
|
47 |
if isinstance(annotations, torch.Tensor):
|
48 |
annotations = annotations.cpu().numpy()
|
49 |
|
50 |
+
if high_quality:
|
51 |
contour_all = []
|
52 |
temp = np.zeros((original_h, original_w,1))
|
53 |
for i, mask in enumerate(annotations):
|
|
|
57 |
contours, _ = cv2.findContours(annotation, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
|
58 |
for contour in contours:
|
59 |
contour_all.append(contour)
|
60 |
+
cv2.drawContours(temp, contour_all, -1, (255, 255, 255), 3)
|
61 |
+
color = np.array([0 / 255, 0 / 255, 255 / 255, 0.9])
|
62 |
contour_mask = temp / 255 * color.reshape(1, 1, -1)
|
|
|
|
|
63 |
# plt.imshow(contour_mask)
|
64 |
+
image = image.convert('RGBA')
|
65 |
+
|
66 |
overlay_inner = Image.fromarray((inner_mask * 255).astype(np.uint8), 'RGBA')
|
67 |
image.paste(overlay_inner, (0, 0), overlay_inner)
|
68 |
+
|
69 |
+
if high_quality:
|
70 |
+
overlay_contour = Image.fromarray((contour_mask * 255).astype(np.uint8), 'RGBA')
|
71 |
+
image.paste(overlay_contour, (0, 0), overlay_contour)
|
72 |
|
73 |
return image
|
74 |
# plt.axis('off')
|
app_debug.py
CHANGED
@@ -18,7 +18,6 @@ def fast_process(annotations, image, high_quality, device):
|
|
18 |
|
19 |
original_h = image.height
|
20 |
original_w = image.width
|
21 |
-
image = image.convert('RGBA')
|
22 |
# fig = plt.figure(figsize=(10, 10))
|
23 |
# plt.imshow(image)
|
24 |
if high_quality == True:
|
@@ -48,7 +47,7 @@ def fast_process(annotations, image, high_quality, device):
|
|
48 |
if isinstance(annotations, torch.Tensor):
|
49 |
annotations = annotations.cpu().numpy()
|
50 |
|
51 |
-
if high_quality
|
52 |
contour_all = []
|
53 |
temp = np.zeros((original_h, original_w,1))
|
54 |
for i, mask in enumerate(annotations):
|
@@ -58,14 +57,18 @@ def fast_process(annotations, image, high_quality, device):
|
|
58 |
contours, _ = cv2.findContours(annotation, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
|
59 |
for contour in contours:
|
60 |
contour_all.append(contour)
|
61 |
-
cv2.drawContours(temp, contour_all, -1, (255, 255, 255),
|
62 |
-
color = np.array([0 / 255, 0 / 255, 255 / 255, 0.
|
63 |
contour_mask = temp / 255 * color.reshape(1, 1, -1)
|
64 |
-
overlay_contour = Image.fromarray((contour_mask * 255).astype(np.uint8), 'RGBA')
|
65 |
-
image.paste(overlay_contour, (0, 0), overlay_contour)
|
66 |
# plt.imshow(contour_mask)
|
|
|
|
|
67 |
overlay_inner = Image.fromarray((inner_mask * 255).astype(np.uint8), 'RGBA')
|
68 |
image.paste(overlay_inner, (0, 0), overlay_inner)
|
|
|
|
|
|
|
|
|
69 |
|
70 |
return image
|
71 |
# plt.axis('off')
|
@@ -176,10 +179,10 @@ def predict(input, input_size=512, high_visual_quality=False):
|
|
176 |
app_interface = gr.Interface(fn=predict,
|
177 |
inputs=[gr.Image(type='pil'),
|
178 |
gr.components.Slider(minimum=512, maximum=1024, value=1024, step=64, label='input_size'),
|
179 |
-
gr.components.Checkbox(value=
|
180 |
# outputs=['plot'],
|
181 |
outputs=gr.Image(type='pil'),
|
182 |
-
examples=[["assets/sa_8776.jpg"
|
183 |
# # ["assets/sa_1309.jpg", 1024]],
|
184 |
# examples=[["assets/sa_192.jpg"], ["assets/sa_414.jpg"],
|
185 |
# ["assets/sa_561.jpg"], ["assets/sa_862.jpg"],
|
|
|
18 |
|
19 |
original_h = image.height
|
20 |
original_w = image.width
|
|
|
21 |
# fig = plt.figure(figsize=(10, 10))
|
22 |
# plt.imshow(image)
|
23 |
if high_quality == True:
|
|
|
47 |
if isinstance(annotations, torch.Tensor):
|
48 |
annotations = annotations.cpu().numpy()
|
49 |
|
50 |
+
if high_quality:
|
51 |
contour_all = []
|
52 |
temp = np.zeros((original_h, original_w,1))
|
53 |
for i, mask in enumerate(annotations):
|
|
|
57 |
contours, _ = cv2.findContours(annotation, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
|
58 |
for contour in contours:
|
59 |
contour_all.append(contour)
|
60 |
+
cv2.drawContours(temp, contour_all, -1, (255, 255, 255), 3)
|
61 |
+
color = np.array([0 / 255, 0 / 255, 255 / 255, 0.9])
|
62 |
contour_mask = temp / 255 * color.reshape(1, 1, -1)
|
|
|
|
|
63 |
# plt.imshow(contour_mask)
|
64 |
+
image = image.convert('RGBA')
|
65 |
+
|
66 |
overlay_inner = Image.fromarray((inner_mask * 255).astype(np.uint8), 'RGBA')
|
67 |
image.paste(overlay_inner, (0, 0), overlay_inner)
|
68 |
+
|
69 |
+
if high_quality:
|
70 |
+
overlay_contour = Image.fromarray((contour_mask * 255).astype(np.uint8), 'RGBA')
|
71 |
+
image.paste(overlay_contour, (0, 0), overlay_contour)
|
72 |
|
73 |
return image
|
74 |
# plt.axis('off')
|
|
|
179 |
app_interface = gr.Interface(fn=predict,
|
180 |
inputs=[gr.Image(type='pil'),
|
181 |
gr.components.Slider(minimum=512, maximum=1024, value=1024, step=64, label='input_size'),
|
182 |
+
gr.components.Checkbox(value=True, label='high_visual_quality')],
|
183 |
# outputs=['plot'],
|
184 |
outputs=gr.Image(type='pil'),
|
185 |
+
examples=[["assets/sa_8776.jpg"]],
|
186 |
# # ["assets/sa_1309.jpg", 1024]],
|
187 |
# examples=[["assets/sa_192.jpg"], ["assets/sa_414.jpg"],
|
188 |
# ["assets/sa_561.jpg"], ["assets/sa_862.jpg"],
|