|
import gradio as gr |
|
import requests |
|
import torch |
|
from PIL import Image |
|
from transformers import AutoImageProcessor, Mask2FormerForUniversalSegmentation |
|
|
|
|
|
def greet(url): |
|
|
|
processor = AutoImageProcessor.from_pretrained("facebook/mask2former-swin-large-cityscapes-semantic") |
|
model = Mask2FormerForUniversalSegmentation.from_pretrained("facebook/mask2former-swin-large-cityscapes-semantic") |
|
|
|
image = Image.open(requests.get(url, stream=True).raw) |
|
inputs = processor(images=image, return_tensors="pt") |
|
|
|
with torch.no_grad(): |
|
outputs = model(**inputs) |
|
|
|
|
|
class_queries_logits = outputs.class_queries_logits |
|
masks_queries_logits = outputs.masks_queries_logits |
|
|
|
|
|
predicted_semantic_map = processor.post_process_semantic_segmentation(outputs, target_sizes=[image.size[::-1]])[0] |
|
|
|
color_map = { |
|
0: (0, 0, 0), |
|
1: (255, 255, 255), |
|
2: (255, 0, 0), |
|
3: (0, 255, 0), |
|
4: (0, 0, 255), |
|
5: (255, 255, 0), |
|
6: (255, 0, 255), |
|
7: (0, 255, 255), |
|
|
|
} |
|
|
|
semantic_image = Image.new('RGB', (predicted_semantic_map.shape[1], predicted_semantic_map.shape[0])) |
|
pixels = semantic_image.load() |
|
for y in range(predicted_semantic_map.shape[0]): |
|
for x in range(predicted_semantic_map.shape[1]): |
|
class_id = predicted_semantic_map[y, x].item() |
|
color = color_map.get(class_id, (0, 0, 0)) |
|
pixels[x, y] = color |
|
|
|
return pixels |
|
|
|
|
|
url = "http://images.cocodataset.org/val2017/000000039769.jpg" |
|
greet(url) |
|
|
|
iface = gr.Interface( |
|
fn=greet, |
|
inputs=gr.Image(value=url), |
|
outputs="image" |
|
) |
|
|
|
iface.launch() |
|
|