vobecant commited on
Commit
03d409b
·
1 Parent(s): d37387f

Initial commit.

Browse files
Files changed (1) hide show
  1. app.py +12 -11
app.py CHANGED
@@ -13,6 +13,7 @@ from torchvision import transforms
13
  # WEIGHTS = './weights/segmenter.pth
14
  WEIGHTS = './weights/segmenter_nusc.pth'
15
  FULL = True
 
16
  ALPHA = 0.5
17
 
18
 
@@ -170,10 +171,10 @@ def predict(input_img):
170
  return drawing_blend_pseudo, drawing_blend_cs
171
 
172
 
173
- title = "Drive&Segment"
174
  description = 'Gradio Demo accompanying paper "Drive&Segment: Unsupervised Semantic Segmentation of Urban Scenes via Cross-modal Distillation"\nBecause of the CPU-only inference, it might take up to 20s for large images.\nRight now, it uses the Segmenter model trained on nuScenes and with a simplified inference scheme (for the sake of speed).'
175
  # article = "<p style='text-align: center'><a href='https://vobecant.github.io/DriveAndSegment/' target='_blank'>Project Page</a> | <a href='https://github.com/vobecant/DriveAndSegment' target='_blank'>Github</a></p>"
176
- article="""
177
  <h1 align="center">🚙📷 Drive&Segment: Unsupervised Semantic Segmentation of Urban Scenes via Cross-modal Distillation</h1>
178
 
179
  <h2 align="center">
@@ -202,21 +203,21 @@ the raw non-curated data collected by cars which, equipped with 📷 cameras and
202
 
203
  Example of **pseudo** segmentation.
204
 
205
- ![](https://github.com/vobecant/DriveAndSegment/blob/main/sources/video128_blend03_v2_10fps_640px_lanczos.gif)
206
 
207
  ### Cityscapes segmentation.
208
 
209
  Two examples of pseudo segmentation mapped to the 19 ground-truth classes of the Cityscapes dataset by using Hungarian
210
  algorithm.
211
 
212
- ![](https://github.com/vobecant/DriveAndSegment/blob/main/sources/video_stuttgart00_remap_blended03_20fps_crop.gif)
213
- ![](https://github.com/vobecant/DriveAndSegment/blob/main/sources/video_stuttgart01_remap_blended03_20fps_crop2.gif)
214
  """
215
- examples = [ #'examples/img5.jpeg',
216
- 'examples/100.jpeg',
217
- 'examples/39076.jpeg',
218
- 'examples/img1.jpg',
219
- 'examples/snow1.jpg']
220
  examples += ['examples/cs{}.jpg'.format(i) for i in range(2, 5)]
221
 
222
  # predict(examples[0])
@@ -231,4 +232,4 @@ iface = gr.Interface(predict, inputs=gr.inputs.Image(type='filepath'), title=tit
231
  # examples=examples)
232
 
233
  # iface.launch(show_error=True, share=True)
234
- iface.launch(enable_queue=True, cache_examples=FULL)
 
13
  # WEIGHTS = './weights/segmenter.pth
14
  WEIGHTS = './weights/segmenter_nusc.pth'
15
  FULL = True
16
+ CACHE = False
17
  ALPHA = 0.5
18
 
19
 
 
171
  return drawing_blend_pseudo, drawing_blend_cs
172
 
173
 
174
+ title = '<h1 align="center">Drive&Segment</h1>'
175
  description = 'Gradio Demo accompanying paper "Drive&Segment: Unsupervised Semantic Segmentation of Urban Scenes via Cross-modal Distillation"\nBecause of the CPU-only inference, it might take up to 20s for large images.\nRight now, it uses the Segmenter model trained on nuScenes and with a simplified inference scheme (for the sake of speed).'
176
  # article = "<p style='text-align: center'><a href='https://vobecant.github.io/DriveAndSegment/' target='_blank'>Project Page</a> | <a href='https://github.com/vobecant/DriveAndSegment' target='_blank'>Github</a></p>"
177
+ article = """
178
  <h1 align="center">🚙📷 Drive&Segment: Unsupervised Semantic Segmentation of Urban Scenes via Cross-modal Distillation</h1>
179
 
180
  <h2 align="center">
 
203
 
204
  Example of **pseudo** segmentation.
205
 
206
+ ![](https://drive.google.com/uc?export=view&id=1xB_JdIL2bC6ka2hbs0wgXYtLpb1r07hw)
207
 
208
  ### Cityscapes segmentation.
209
 
210
  Two examples of pseudo segmentation mapped to the 19 ground-truth classes of the Cityscapes dataset by using Hungarian
211
  algorithm.
212
 
213
+ ![](https://drive.google.com/uc?export=view&id=1vHF2DugjXr4FdXX3gW65GRPArNL5urEH)
214
+ ![](https://drive.google.com/uc?export=view&id=1WI_5lmF_YoVFXdWDnPT29rhPnlylh7QV)
215
  """
216
+ examples = [ # 'examples/img5.jpeg',
217
+ 'examples/100.jpeg',
218
+ 'examples/39076.jpeg',
219
+ 'examples/img1.jpg',
220
+ 'examples/snow1.jpg']
221
  examples += ['examples/cs{}.jpg'.format(i) for i in range(2, 5)]
222
 
223
  # predict(examples[0])
 
232
  # examples=examples)
233
 
234
  # iface.launch(show_error=True, share=True)
235
+ iface.launch(enable_queue=True, cache_examples=CACHE)