Spaces:
Build error
Build error
File size: 1,731 Bytes
518b62b fed4914 518b62b 7392a8a 518b62b a0a86a1 fed4914 518b62b 7392a8a |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 |
import numpy as np
import gradio as gr
import torch
from torch import nn
from transformers import SegformerForSemanticSegmentation, SegformerFeatureExtractor
#extractor = AutoFeatureExtractor.from_pretrained("andresgtn/segformer-b0-finetuned-ade-64-64-finetuned-semantic-sidewalk")
extractor = SegformerFeatureExtractor()
model = SegformerForSemanticSegmentation.from_pretrained("andresgtn/segformer-b0-finetuned-ade-64-64-finetuned-semantic-sidewalk")
def rescale_output_image(logits, image):
upsampled_logits = nn.functional.interpolate(
logits,
size=image.shape[::-1][1:][::-1], # (height, width)
mode='bilinear',
align_corners=False
)
pred_seg = upsampled_logits.argmax(dim=1)[0]
return pred_seg
# classify function
def classify(im):
inputs = extractor(images=im, return_tensors="pt")#.to("cuda")
outputs = model(**inputs)
logits = outputs.logits
#classes = logits[0].detach().cpu().numpy().argmax(axis=0)
#classes = rescale_output_image(logits, im).detach().cpu().numpy()
classes = rescale_output_image(logits, im).detach().numpy()
colors = np.array([[128,0,0], [128,128,0], [0, 0, 128], [128,0,128], [0, 0, 0]])
return colors[classes]
# sample images
sample_images = [["https://s3.amazonaws.com/moonup/production/uploads/1664719956531-611f9702593efbee33a4f7c9.png"],
["https://s3.amazonaws.com/moonup/production/uploads/1664719956737-611f9702593efbee33a4f7c9.png"]]
# define gradio interface
title = "Semantic segmentation on sidewalk images"
description = "Drop an image of a sidewalk"
interface = gr.Interface(classify, gr.Image(), 'image', examples=sample_images,
description=description, title=title)# FILL HERE
interface.launch(debug=False) |