File size: 1,475 Bytes
88e5715
 
 
 
 
8a40f69
88e5715
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8a40f69
 
 
 
 
 
 
 
 
 
 
88e5715
 
083d7ab
88e5715
 
 
 
 
 
 
 
8a40f69
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
import gradio as gr
import requests
import torch
from PIL import Image
from transformers import AutoImageProcessor, Mask2FormerForUniversalSegmentation
import numpy as np


def greet(url):

    processor = AutoImageProcessor.from_pretrained("facebook/mask2former-swin-large-cityscapes-semantic")
    model = Mask2FormerForUniversalSegmentation.from_pretrained("facebook/mask2former-swin-large-cityscapes-semantic")

    image = Image.open(requests.get(url, stream=True).raw)
    inputs = processor(images=image, return_tensors="pt")

    with torch.no_grad():
        outputs = model(**inputs)
    # model predicts class_queries_logits of shape `(batch_size, num_queries)`
    # and masks_queries_logits of shape `(batch_size, num_queries, height, width)`
    class_queries_logits = outputs.class_queries_logits
    masks_queries_logits = outputs.masks_queries_logits

    # you can pass them to processor for postprocessing
    predicted_semantic_map = processor.post_process_semantic_segmentation(outputs, target_sizes=[url.size])[0]

    sepia_filter = np.array([
        [0.393, 0.769, 0.189],
        [0.349, 0.686, 0.168],
        [0.272, 0.534, 0.131]
    ])
    sepia_img = predicted_semantic_map.dot(sepia_filter.T)
    sepia_img /= sepia_img.max()

    return sepia_img


url = "http://images.cocodataset.org/val2017/000000039769.jpg"
greet(url)

iface = gr.Interface(
    fn=greet,
    inputs=gr.Image(value=url),
    outputs="image"
)

iface.launch(debug = True)