vobecant
commited on
Commit
·
03d409b
1
Parent(s):
d37387f
Initial commit.
Browse files
app.py
CHANGED
@@ -13,6 +13,7 @@ from torchvision import transforms
|
|
13 |
# WEIGHTS = './weights/segmenter.pth
|
14 |
WEIGHTS = './weights/segmenter_nusc.pth'
|
15 |
FULL = True
|
|
|
16 |
ALPHA = 0.5
|
17 |
|
18 |
|
@@ -170,10 +171,10 @@ def predict(input_img):
|
|
170 |
return drawing_blend_pseudo, drawing_blend_cs
|
171 |
|
172 |
|
173 |
-
title = "Drive&Segment
|
174 |
description = 'Gradio Demo accompanying paper "Drive&Segment: Unsupervised Semantic Segmentation of Urban Scenes via Cross-modal Distillation"\nBecause of the CPU-only inference, it might take up to 20s for large images.\nRight now, it uses the Segmenter model trained on nuScenes and with a simplified inference scheme (for the sake of speed).'
|
175 |
# article = "<p style='text-align: center'><a href='https://vobecant.github.io/DriveAndSegment/' target='_blank'>Project Page</a> | <a href='https://github.com/vobecant/DriveAndSegment' target='_blank'>Github</a></p>"
|
176 |
-
article="""
|
177 |
<h1 align="center">🚙📷 Drive&Segment: Unsupervised Semantic Segmentation of Urban Scenes via Cross-modal Distillation</h1>
|
178 |
|
179 |
<h2 align="center">
|
@@ -202,21 +203,21 @@ the raw non-curated data collected by cars which, equipped with 📷 cameras and
|
|
202 |
|
203 |
Example of **pseudo** segmentation.
|
204 |
|
205 |
-
 for i in range(2, 5)]
|
221 |
|
222 |
# predict(examples[0])
|
@@ -231,4 +232,4 @@ iface = gr.Interface(predict, inputs=gr.inputs.Image(type='filepath'), title=tit
|
|
231 |
# examples=examples)
|
232 |
|
233 |
# iface.launch(show_error=True, share=True)
|
234 |
-
iface.launch(enable_queue=True, cache_examples=
|
|
|
13 |
# WEIGHTS = './weights/segmenter.pth
|
14 |
WEIGHTS = './weights/segmenter_nusc.pth'
|
15 |
FULL = True
|
16 |
+
CACHE = False
|
17 |
ALPHA = 0.5
|
18 |
|
19 |
|
|
|
171 |
return drawing_blend_pseudo, drawing_blend_cs
|
172 |
|
173 |
|
174 |
+
title = '<h1 align="center">Drive&Segment</h1>'
|
175 |
description = 'Gradio Demo accompanying paper "Drive&Segment: Unsupervised Semantic Segmentation of Urban Scenes via Cross-modal Distillation"\nBecause of the CPU-only inference, it might take up to 20s for large images.\nRight now, it uses the Segmenter model trained on nuScenes and with a simplified inference scheme (for the sake of speed).'
|
176 |
# article = "<p style='text-align: center'><a href='https://vobecant.github.io/DriveAndSegment/' target='_blank'>Project Page</a> | <a href='https://github.com/vobecant/DriveAndSegment' target='_blank'>Github</a></p>"
|
177 |
+
article = """
|
178 |
<h1 align="center">🚙📷 Drive&Segment: Unsupervised Semantic Segmentation of Urban Scenes via Cross-modal Distillation</h1>
|
179 |
|
180 |
<h2 align="center">
|
|
|
203 |
|
204 |
Example of **pseudo** segmentation.
|
205 |
|
206 |
+

|
207 |
|
208 |
### Cityscapes segmentation.
|
209 |
|
210 |
Two examples of pseudo segmentation mapped to the 19 ground-truth classes of the Cityscapes dataset by using Hungarian
|
211 |
algorithm.
|
212 |
|
213 |
+

|
214 |
+

|
215 |
"""
|
216 |
+
examples = [ # 'examples/img5.jpeg',
|
217 |
+
'examples/100.jpeg',
|
218 |
+
'examples/39076.jpeg',
|
219 |
+
'examples/img1.jpg',
|
220 |
+
'examples/snow1.jpg']
|
221 |
examples += ['examples/cs{}.jpg'.format(i) for i in range(2, 5)]
|
222 |
|
223 |
# predict(examples[0])
|
|
|
232 |
# examples=examples)
|
233 |
|
234 |
# iface.launch(show_error=True, share=True)
|
235 |
+
iface.launch(enable_queue=True, cache_examples=CACHE)
|