Spaces:
Runtime error
Runtime error
finish
Browse files
app.py
CHANGED
@@ -109,37 +109,26 @@ def sepia(input_img):
|
|
109 |
|
110 |
def segment_and_caption(input_img):
|
111 |
input_img = Image.fromarray(input_img)
|
112 |
-
|
113 |
-
# ์ธ๊ทธ๋ฉํ
์ด์
์ํ
|
114 |
inputs = feature_extractor(images=input_img, return_tensors="tf")
|
115 |
outputs = seg_model(**inputs)
|
116 |
logits = outputs.logits
|
117 |
-
|
118 |
logits = tf.transpose(logits, [0, 2, 3, 1])
|
119 |
logits = tf.image.resize(
|
120 |
logits, input_img.size[::-1]
|
121 |
)
|
122 |
seg = tf.math.argmax(logits, axis=-1)[0]
|
123 |
-
|
124 |
-
# ์ธ๊ทธ๋ฉํ
์ด์
๊ฒฐ๊ณผ๋ฅผ ํ
์คํธ๋ก ๋ณํ
|
125 |
seg_text = ""
|
126 |
for label, label_name in enumerate(labels_list):
|
127 |
count = np.sum(seg.numpy() == label)
|
128 |
seg_text += f"{label_name}: {count} pixels\n"
|
129 |
-
|
130 |
-
# ์ด๋ฏธ์ง ์บก์
์์ฑ
|
131 |
caption = caption_model.generate(input_img, max_length=20, num_return_sequences=1, return_dict_in_generate=True)
|
132 |
caption_text = caption[0]['text']
|
133 |
-
|
134 |
-
# ์ธ๊ทธ๋ฉํ
์ด์
๊ฒฐ๊ณผ์ ์บก์
์ ๋ฐํ
|
135 |
return input_img, seg_text, caption_text
|
136 |
|
137 |
|
138 |
-
|
139 |
-
|
140 |
demo = gr.Interface(fn=segment_and_caption,
|
141 |
inputs=gr.Image(shape=(1024, 1024)),
|
142 |
-
outputs=["image","text", "text"],
|
143 |
examples=["city-1.jpg", "city-2.jpg", "city-3.jpg", "city-4.jpg", "city-5.jpg"],
|
144 |
allow_flagging='never')
|
145 |
|
|
|
109 |
|
110 |
def segment_and_caption(input_img):
|
111 |
input_img = Image.fromarray(input_img)
|
|
|
|
|
112 |
inputs = feature_extractor(images=input_img, return_tensors="tf")
|
113 |
outputs = seg_model(**inputs)
|
114 |
logits = outputs.logits
|
|
|
115 |
logits = tf.transpose(logits, [0, 2, 3, 1])
|
116 |
logits = tf.image.resize(
|
117 |
logits, input_img.size[::-1]
|
118 |
)
|
119 |
seg = tf.math.argmax(logits, axis=-1)[0]
|
|
|
|
|
120 |
seg_text = ""
|
121 |
for label, label_name in enumerate(labels_list):
|
122 |
count = np.sum(seg.numpy() == label)
|
123 |
seg_text += f"{label_name}: {count} pixels\n"
|
|
|
|
|
124 |
caption = caption_model.generate(input_img, max_length=20, num_return_sequences=1, return_dict_in_generate=True)
|
125 |
caption_text = caption[0]['text']
|
|
|
|
|
126 |
return input_img, seg_text, caption_text
|
127 |
|
128 |
|
|
|
|
|
129 |
demo = gr.Interface(fn=segment_and_caption,
|
130 |
inputs=gr.Image(shape=(1024, 1024)),
|
131 |
+
outputs=["image", "text", "text"],
|
132 |
examples=["city-1.jpg", "city-2.jpg", "city-3.jpg", "city-4.jpg", "city-5.jpg"],
|
133 |
allow_flagging='never')
|
134 |
|