vobecant
commited on
Commit
·
f3c703d
1
Parent(s):
004843f
Initial commit.
Browse files- app.py +2 -8
- examples/cs1.jpg +0 -0
- examples/cs2.jpg +0 -0
- examples/cs3.jpg +0 -0
- examples/cs4.jpg +0 -0
- examples/cs5.jpg +0 -0
app.py
CHANGED
@@ -4,12 +4,11 @@ import requests
|
|
4 |
import torch
|
5 |
import yaml
|
6 |
from PIL import Image
|
7 |
-
from torchvision import transforms
|
8 |
-
|
9 |
from segmenter_model import utils
|
10 |
from segmenter_model.factory import create_segmenter
|
11 |
from segmenter_model.fpn_picie import PanopticFPN
|
12 |
from segmenter_model.utils import colorize_one, map2cs
|
|
|
13 |
|
14 |
# WEIGHTS = './weights/segmenter.pth
|
15 |
WEIGHTS = './weights/segmenter_nusc.pth'
|
@@ -155,18 +154,12 @@ def predict(input_img):
|
|
155 |
input_img = transform(input_img_pil)
|
156 |
input_img = torch.unsqueeze(input_img, 0)
|
157 |
|
158 |
-
print('Loaded and prepaded image.')
|
159 |
-
|
160 |
with torch.no_grad():
|
161 |
segmentation = segment_segmenter(input_img, model, window_size, window_stride).squeeze().detach()
|
162 |
-
print('Segmented image.')
|
163 |
segmentation_remap = remap(segmentation)
|
164 |
-
print('Remapped image.')
|
165 |
|
166 |
drawing_pseudo = colorize_one(segmentation_remap)
|
167 |
-
print('Pseudo colors done.')
|
168 |
drawing_cs = map2cs(segmentation_remap)
|
169 |
-
print('CS colors done.')
|
170 |
|
171 |
drawing_cs = transforms.ToPILImage()(drawing_cs).resize(input_img_pil.size)
|
172 |
drawing_blend_cs = blend_images(input_img_pil, drawing_cs)
|
@@ -181,6 +174,7 @@ title = "Drive&Segment"
|
|
181 |
description = 'Gradio Demo accompanying paper "Drive&Segment: Unsupervised Semantic Segmentation of Urban Scenes via Cross-modal Distillation"\nBecause of the CPU-only inference, it might take up to 20s for large images.\nRight now, I use the Segmenter model trained on nuScenes and with 256x256 patches (for the sake of speed).'
|
182 |
# article = "<p style='text-align: center'><a href='TODO' target='_blank'>Project Page</a> | <a href='codelink' target='_blank'>Github</a></p>"
|
183 |
examples = ['examples/img5.jpeg', 'examples/100.jpeg', 'examples/39076.jpeg', 'examples/img1.jpg', 'examples/snow1.jpg']
|
|
|
184 |
|
185 |
# predict(examples[0])
|
186 |
|
|
|
4 |
import torch
|
5 |
import yaml
|
6 |
from PIL import Image
|
|
|
|
|
7 |
from segmenter_model import utils
|
8 |
from segmenter_model.factory import create_segmenter
|
9 |
from segmenter_model.fpn_picie import PanopticFPN
|
10 |
from segmenter_model.utils import colorize_one, map2cs
|
11 |
+
from torchvision import transforms
|
12 |
|
13 |
# WEIGHTS = './weights/segmenter.pth
|
14 |
WEIGHTS = './weights/segmenter_nusc.pth'
|
|
|
154 |
input_img = transform(input_img_pil)
|
155 |
input_img = torch.unsqueeze(input_img, 0)
|
156 |
|
|
|
|
|
157 |
with torch.no_grad():
|
158 |
segmentation = segment_segmenter(input_img, model, window_size, window_stride).squeeze().detach()
|
|
|
159 |
segmentation_remap = remap(segmentation)
|
|
|
160 |
|
161 |
drawing_pseudo = colorize_one(segmentation_remap)
|
|
|
162 |
drawing_cs = map2cs(segmentation_remap)
|
|
|
163 |
|
164 |
drawing_cs = transforms.ToPILImage()(drawing_cs).resize(input_img_pil.size)
|
165 |
drawing_blend_cs = blend_images(input_img_pil, drawing_cs)
|
|
|
174 |
description = 'Gradio Demo accompanying paper "Drive&Segment: Unsupervised Semantic Segmentation of Urban Scenes via Cross-modal Distillation"\nBecause of the CPU-only inference, it might take up to 20s for large images.\nRight now, I use the Segmenter model trained on nuScenes and with 256x256 patches (for the sake of speed).'
|
175 |
# article = "<p style='text-align: center'><a href='TODO' target='_blank'>Project Page</a> | <a href='codelink' target='_blank'>Github</a></p>"
|
176 |
examples = ['examples/img5.jpeg', 'examples/100.jpeg', 'examples/39076.jpeg', 'examples/img1.jpg', 'examples/snow1.jpg']
|
177 |
+
examples += ['examples/cs{}.png'.format(i) for i in range(1, 6)]
|
178 |
|
179 |
# predict(examples[0])
|
180 |
|
examples/cs1.jpg
ADDED
![]() |
examples/cs2.jpg
ADDED
![]() |
examples/cs3.jpg
ADDED
![]() |
examples/cs4.jpg
ADDED
![]() |
examples/cs5.jpg
ADDED
![]() |