dd
Browse files
app.py
ADDED
@@ -0,0 +1,58 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
import requests
|
3 |
+
import torch
|
4 |
+
from PIL import Image
|
5 |
+
from transformers import AutoImageProcessor, Mask2FormerForUniversalSegmentation
|
6 |
+
|
7 |
+
|
8 |
+
def greet(url):
|
9 |
+
|
10 |
+
processor = AutoImageProcessor.from_pretrained("facebook/mask2former-swin-large-cityscapes-semantic")
|
11 |
+
model = Mask2FormerForUniversalSegmentation.from_pretrained("facebook/mask2former-swin-large-cityscapes-semantic")
|
12 |
+
|
13 |
+
image = Image.open(requests.get(url, stream=True).raw)
|
14 |
+
inputs = processor(images=image, return_tensors="pt")
|
15 |
+
|
16 |
+
with torch.no_grad():
|
17 |
+
outputs = model(**inputs)
|
18 |
+
# model predicts class_queries_logits of shape `(batch_size, num_queries)`
|
19 |
+
# and masks_queries_logits of shape `(batch_size, num_queries, height, width)`
|
20 |
+
class_queries_logits = outputs.class_queries_logits
|
21 |
+
masks_queries_logits = outputs.masks_queries_logits
|
22 |
+
|
23 |
+
# you can pass them to processor for postprocessing
|
24 |
+
predicted_semantic_map = processor.post_process_semantic_segmentation(outputs, target_sizes=[image.size[::-1]])[0]
|
25 |
+
|
26 |
+
color_map = {
|
27 |
+
0: (0, 0, 0), # ν΄λμ€ 0: κ²μμ
|
28 |
+
1: (255, 255, 255), # ν΄λμ€ 1: ν°μ
|
29 |
+
2: (255, 0, 0),
|
30 |
+
3: (0, 255, 0),
|
31 |
+
4: (0, 0, 255),
|
32 |
+
5: (255, 255, 0),
|
33 |
+
6: (255, 0, 255),
|
34 |
+
7: (0, 255, 255),
|
35 |
+
# λ€λ₯Έ ν΄λμ€μ λν μμ μ§μ
|
36 |
+
}
|
37 |
+
#semantic_image = Image.new('RGB', predicted_semantic_map.shape[1:][::-1])[0]
|
38 |
+
semantic_image = Image.new('RGB', (predicted_semantic_map.shape[1], predicted_semantic_map.shape[0]))
|
39 |
+
pixels = semantic_image.load()
|
40 |
+
for y in range(predicted_semantic_map.shape[0]):
|
41 |
+
for x in range(predicted_semantic_map.shape[1]):
|
42 |
+
class_id = predicted_semantic_map[y, x].item()
|
43 |
+
color = color_map.get(class_id, (0, 0, 0))
|
44 |
+
pixels[x, y] = color
|
45 |
+
|
46 |
+
return pixels
|
47 |
+
|
48 |
+
|
49 |
+
url = "https://www.cityscapes-dataset.com/wordpress/wp-content/uploads/2015/07/muenster00.png"
|
50 |
+
greet(url)
|
51 |
+
|
52 |
+
iface = gr.Interface(
|
53 |
+
fn=greet,
|
54 |
+
inputs=gr.Image(value=url),
|
55 |
+
outputs="image"
|
56 |
+
)
|
57 |
+
|
58 |
+
iface.launch(share=True)
|