Karin0616
commited on
Commit
·
c91d842
1
Parent(s):
1f6afca
annotated test
Browse files
app.py
CHANGED
@@ -1,7 +1,4 @@
|
|
1 |
import gradio as gr
|
2 |
-
import random
|
3 |
-
|
4 |
-
from matplotlib import gridspec
|
5 |
import matplotlib.pyplot as plt
|
6 |
import numpy as np
|
7 |
from PIL import Image
|
@@ -16,35 +13,33 @@ model = TFSegformerForSemanticSegmentation.from_pretrained(
|
|
16 |
)
|
17 |
|
18 |
def ade_palette():
|
19 |
-
|
20 |
return [
|
21 |
-
[204, 87, 92],
|
22 |
[112, 185, 212], # sidewalk (Blue)
|
23 |
[196, 160, 122], # building (Brown)
|
24 |
[106, 135, 242], # wall (Light Blue)
|
25 |
-
[91, 192, 222],
|
26 |
[255, 192, 203], # pole (Pink)
|
27 |
[176, 224, 230], # traffic light (Light Blue)
|
28 |
-
[222, 49, 99],
|
29 |
-
[139, 69, 19],
|
30 |
-
[255, 0, 0],
|
31 |
-
[0, 0, 255],
|
32 |
[255, 228, 181], # person (Peach)
|
33 |
-
[128, 0, 0],
|
34 |
-
[0, 128, 0],
|
35 |
-
[255, 99, 71],
|
36 |
-
[0, 255, 0],
|
37 |
-
[128, 0, 128],
|
38 |
-
[255, 255, 0],
|
39 |
-
[128, 0, 128]
|
40 |
-
|
41 |
]
|
42 |
|
43 |
-
labels_list = [
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
|
49 |
colormap = np.asarray(ade_palette())
|
50 |
|
@@ -56,28 +51,7 @@ def label_to_color_image(label):
|
|
56 |
raise ValueError("label value too large.")
|
57 |
return colormap[label]
|
58 |
|
59 |
-
def
|
60 |
-
fig = plt.figure(figsize=(20, 15))
|
61 |
-
|
62 |
-
grid_spec = gridspec.GridSpec(1, 2, width_ratios=[6, 1])
|
63 |
-
|
64 |
-
plt.subplot(grid_spec[0])
|
65 |
-
plt.imshow(pred_img)
|
66 |
-
plt.axis('off')
|
67 |
-
LABEL_NAMES = np.asarray(labels_list)
|
68 |
-
FULL_LABEL_MAP = np.arange(len(LABEL_NAMES)).reshape(len(LABEL_NAMES), 1)
|
69 |
-
FULL_COLOR_MAP = label_to_color_image(FULL_LABEL_MAP)
|
70 |
-
|
71 |
-
unique_labels = np.unique(seg.numpy().astype("uint8"))
|
72 |
-
ax = plt.subplot(grid_spec[1])
|
73 |
-
plt.imshow(FULL_COLOR_MAP[unique_labels].astype(np.uint8), interpolation="nearest")
|
74 |
-
ax.yaxis.tick_right()
|
75 |
-
plt.yticks(range(len(unique_labels)), LABEL_NAMES[unique_labels])
|
76 |
-
plt.xticks([], [])
|
77 |
-
ax.tick_params(width=0.0, labelsize=25)
|
78 |
-
return fig
|
79 |
-
|
80 |
-
def sepia(input_img):
|
81 |
input_img = Image.fromarray(input_img)
|
82 |
|
83 |
inputs = feature_extractor(images=input_img, return_tensors="tf")
|
@@ -93,123 +67,45 @@ def sepia(input_img):
|
|
93 |
color_seg = np.zeros(
|
94 |
(seg.shape[0], seg.shape[1], 3), dtype=np.uint8
|
95 |
) # height, width, 3
|
|
|
96 |
for label, color in enumerate(colormap):
|
97 |
-
|
|
|
98 |
|
99 |
# Show image + mask
|
100 |
pred_img = np.array(input_img) * 0.5 + color_seg * 0.5
|
101 |
pred_img = pred_img.astype(np.uint8)
|
102 |
|
103 |
-
fig =
|
|
|
|
|
104 |
return fig
|
105 |
|
106 |
-
|
107 |
-
|
108 |
-
|
109 |
-
|
110 |
-
|
111 |
-
|
112 |
-
|
113 |
-
|
114 |
-
|
115 |
-
"traffic light",
|
116 |
-
"traffic sign",
|
117 |
-
"vegetation",
|
118 |
-
"terrain",
|
119 |
-
"sky",
|
120 |
-
"person",
|
121 |
-
"rider",
|
122 |
-
"car",
|
123 |
-
"truck",
|
124 |
-
"bus",
|
125 |
-
"train",
|
126 |
-
"motorcycle",
|
127 |
-
"bicycle"
|
128 |
-
]
|
129 |
-
|
130 |
-
with gr.Row():
|
131 |
-
num_boxes = gr.Slider(1, 1, 1, step=0, label="Number of boxes")
|
132 |
-
num_segments = gr.Slider(0, 19, 1, step=1, label="Number of segments")
|
133 |
-
|
134 |
-
with gr.Row():
|
135 |
-
img_input = gr.Image()
|
136 |
-
img_output = gr.AnnotatedImage(
|
137 |
-
color_map={
|
138 |
-
"road": "#CC575C",
|
139 |
-
"sidewalk": "#70B9D4",
|
140 |
-
"building": "#C4A07A",
|
141 |
-
"wall": "#6A87F2",
|
142 |
-
"fence": "#5BC0DE",
|
143 |
-
"pole": "#FFC0CB",
|
144 |
-
"traffic light": "#B0E0E6",
|
145 |
-
"traffic sign": "#DE3163",
|
146 |
-
"vegetation": "#8B4513",
|
147 |
-
"terrain": "#FF0000",
|
148 |
-
"sky": "#0000FF",
|
149 |
-
"person": "#FFE4B5",
|
150 |
-
"rider": "#800000",
|
151 |
-
"car": "#008000",
|
152 |
-
"truck": "#FF6347",
|
153 |
-
"bus": "#00FF00",
|
154 |
-
"train": "#800080",
|
155 |
-
"motorcycle": "#FFFF00",
|
156 |
-
"bicycle": "#800080"}
|
157 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
158 |
|
159 |
-
|
160 |
-
selected_section = gr.Textbox(label="Selected Section")
|
161 |
-
|
162 |
-
|
163 |
-
def section(img, num_boxes, num_segments):
|
164 |
-
sections = []
|
165 |
-
|
166 |
-
for a in range(num_boxes):
|
167 |
-
x = random.randint(0, img.shape[1])
|
168 |
-
y = random.randint(0, img.shape[0])
|
169 |
-
w = random.randint(0, img.shape[1] - x)
|
170 |
-
h = random.randint(0, img.shape[0] - y)
|
171 |
-
sections.append(((x, y, x + w, y + h), section_labels[a]))
|
172 |
-
for b in range(num_segments):
|
173 |
-
x = random.randint(0, img.shape[1])
|
174 |
-
y = random.randint(0, img.shape[0])
|
175 |
-
r = random.randint(0, min(x, y, img.shape[1] - x, img.shape[0] - y))
|
176 |
-
mask = np.zeros(img.shape[:2])
|
177 |
-
for i in range(img.shape[0]):
|
178 |
-
for j in range(img.shape[1]):
|
179 |
-
dist_square = (i - y) ** 2 + (j - x) ** 2
|
180 |
-
if dist_square < r ** 2:
|
181 |
-
mask[i, j] = round((r ** 2 - dist_square) / r ** 2 * 4) / 4
|
182 |
-
sections.append((mask, section_labels[b + num_boxes]))
|
183 |
-
return (img, sections)
|
184 |
-
|
185 |
-
|
186 |
-
section_btn.click(section, [img_input, num_boxes, num_segments], img_output)
|
187 |
-
|
188 |
-
|
189 |
-
def select_section(evt: gr.SelectData):
|
190 |
-
return section_labels[evt.index]
|
191 |
-
|
192 |
-
|
193 |
-
img_output.select(select_section, None, selected_section)
|
194 |
-
|
195 |
-
demo = gr.Interface(fn=sepia,
|
196 |
-
inputs=gr.Image(shape=(564,846)),
|
197 |
-
outputs=['plot'],
|
198 |
-
live=True,
|
199 |
-
examples=["city1.jpg","city2.jpg","city3.jpg"],
|
200 |
-
allow_flagging='never',
|
201 |
-
title="This is a machine learning activity project at Kyunggi University.",
|
202 |
-
theme="darkpeach",
|
203 |
-
css="""
|
204 |
-
body {
|
205 |
-
background-color: dark;
|
206 |
-
color: white; /* 폰트 색상 수정 */
|
207 |
-
font-family: Arial, sans-serif; /* 폰트 패밀리 수정 */
|
208 |
-
}
|
209 |
-
"""
|
210 |
-
|
211 |
-
)
|
212 |
-
|
213 |
-
|
214 |
-
demo.launch()
|
215 |
-
|
|
|
1 |
import gradio as gr
|
|
|
|
|
|
|
2 |
import matplotlib.pyplot as plt
|
3 |
import numpy as np
|
4 |
from PIL import Image
|
|
|
13 |
)
|
14 |
|
15 |
def ade_palette():
|
|
|
16 |
return [
|
17 |
+
[204, 87, 92], # road (Reddish)
|
18 |
[112, 185, 212], # sidewalk (Blue)
|
19 |
[196, 160, 122], # building (Brown)
|
20 |
[106, 135, 242], # wall (Light Blue)
|
21 |
+
[91, 192, 222], # fence (Turquoise)
|
22 |
[255, 192, 203], # pole (Pink)
|
23 |
[176, 224, 230], # traffic light (Light Blue)
|
24 |
+
[222, 49, 99], # traffic sign (Red)
|
25 |
+
[139, 69, 19], # vegetation (Brown)
|
26 |
+
[255, 0, 0], # terrain (Red)
|
27 |
+
[0, 0, 255], # sky (Blue)
|
28 |
[255, 228, 181], # person (Peach)
|
29 |
+
[128, 0, 0], # rider (Maroon)
|
30 |
+
[0, 128, 0], # car (Green)
|
31 |
+
[255, 99, 71], # truck (Tomato)
|
32 |
+
[0, 255, 0], # bus (Lime)
|
33 |
+
[128, 0, 128], # train (Purple)
|
34 |
+
[255, 255, 0], # motorcycle (Yellow)
|
35 |
+
[128, 0, 128] # bicycle (Purple)
|
|
|
36 |
]
|
37 |
|
38 |
+
labels_list = [
|
39 |
+
"road", "sidewalk", "building", "wall", "fence", "pole", "traffic light",
|
40 |
+
"traffic sign", "vegetation", "terrain", "sky", "person", "rider", "car",
|
41 |
+
"truck", "bus", "train", "motorcycle", "bicycle"
|
42 |
+
]
|
43 |
|
44 |
colormap = np.asarray(ade_palette())
|
45 |
|
|
|
51 |
raise ValueError("label value too large.")
|
52 |
return colormap[label]
|
53 |
|
54 |
+
def apply_sepia(input_img, selected_labels):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
55 |
input_img = Image.fromarray(input_img)
|
56 |
|
57 |
inputs = feature_extractor(images=input_img, return_tensors="tf")
|
|
|
67 |
color_seg = np.zeros(
|
68 |
(seg.shape[0], seg.shape[1], 3), dtype=np.uint8
|
69 |
) # height, width, 3
|
70 |
+
|
71 |
for label, color in enumerate(colormap):
|
72 |
+
if labels_list[label] in selected_labels:
|
73 |
+
color_seg[seg.numpy() == label, :] = color
|
74 |
|
75 |
# Show image + mask
|
76 |
pred_img = np.array(input_img) * 0.5 + color_seg * 0.5
|
77 |
pred_img = pred_img.astype(np.uint8)
|
78 |
|
79 |
+
fig, ax = plt.subplots()
|
80 |
+
ax.imshow(pred_img)
|
81 |
+
ax.axis('off')
|
82 |
return fig
|
83 |
|
84 |
+
# Gradio Interface
|
85 |
+
iface = gr.Interface(
|
86 |
+
fn=apply_sepia,
|
87 |
+
inputs=[
|
88 |
+
gr.Image(shape=(564, 846)),
|
89 |
+
gr.CheckboxGroup(
|
90 |
+
choices=[(label, label) for label in labels_list],
|
91 |
+
default=[labels_list[0]],
|
92 |
+
label="Select Labels"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
93 |
)
|
94 |
+
],
|
95 |
+
outputs=gr.AnnotatedImage(type="plot", loc="center", scale=True),
|
96 |
+
live=True,
|
97 |
+
examples=["city1.jpg", "city2.jpg", "city3.jpg"],
|
98 |
+
allow_flagging='never',
|
99 |
+
title="Semantic Segmentation with Color Highlighting",
|
100 |
+
description="Apply sepia effect to the image with color highlighting for selected labels.",
|
101 |
+
theme="darkpeach",
|
102 |
+
css="""
|
103 |
+
body {
|
104 |
+
background-color: dark;
|
105 |
+
color: white;
|
106 |
+
font-family: Arial, sans-serif;
|
107 |
+
}
|
108 |
+
"""
|
109 |
+
)
|
110 |
|
111 |
+
iface.launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|