vobecant commited on
Commit
d05fd36
·
1 Parent(s): 2a88595

Initial commit.

Browse files
Files changed (1) hide show
  1. app.py +4 -3
app.py CHANGED
@@ -12,7 +12,7 @@ from torchvision import transforms
12
 
13
  # WEIGHTS = './weights/segmenter.pth
14
  WEIGHTS = './weights/segmenter_nusc.pth'
15
- FULL = False
16
  ALPHA = 0.5
17
 
18
 
@@ -171,8 +171,8 @@ def predict(input_img):
171
 
172
 
173
  title = "Drive&Segment"
174
- description = 'Gradio Demo accompanying paper "Drive&Segment: Unsupervised Semantic Segmentation of Urban Scenes via Cross-modal Distillation"\nBecause of the CPU-only inference, it might take up to 20s for large images.\nRight now, I use the Segmenter model trained on nuScenes and with 256x256 patches (for the sake of speed).'
175
- # article = "<p style='text-align: center'><a href='TODO' target='_blank'>Project Page</a> | <a href='codelink' target='_blank'>Github</a></p>"
176
  examples = [ #'examples/img5.jpeg',
177
  'examples/100.jpeg',
178
  'examples/39076.jpeg',
@@ -183,6 +183,7 @@ examples += ['examples/cs{}.jpg'.format(i) for i in range(2, 5)]
183
  # predict(examples[0])
184
 
185
  iface = gr.Interface(predict, inputs=gr.inputs.Image(type='filepath'), title=title, description=description,
 
186
  outputs=[gr.outputs.Image(label="Pseudo segmentation", type="pil"),
187
  gr.outputs.Image(label="Mapping to Cityscapes", type="pil")],
188
  examples=examples)
 
12
 
13
  # WEIGHTS = './weights/segmenter.pth
14
  WEIGHTS = './weights/segmenter_nusc.pth'
15
+ FULL = True
16
  ALPHA = 0.5
17
 
18
 
 
171
 
172
 
173
  title = "Drive&Segment"
174
+ description = 'Gradio Demo accompanying paper "Drive&Segment: Unsupervised Semantic Segmentation of Urban Scenes via Cross-modal Distillation"\nBecause of the CPU-only inference, it might take up to 20s for large images.\nRight now, it uses the Segmenter model trained on nuScenes and with a simplified inference scheme (for the sake of speed).'
175
+ article = "<p style='text-align: center'><a href='https://vobecant.github.io/DriveAndSegment/' target='_blank'>Project Page</a> | <a href='https://github.com/vobecant/DriveAndSegment' target='_blank'>Github</a></p>"
176
  examples = [ #'examples/img5.jpeg',
177
  'examples/100.jpeg',
178
  'examples/39076.jpeg',
 
183
  # predict(examples[0])
184
 
185
  iface = gr.Interface(predict, inputs=gr.inputs.Image(type='filepath'), title=title, description=description,
186
+ article=article,
187
  outputs=[gr.outputs.Image(label="Pseudo segmentation", type="pil"),
188
  gr.outputs.Image(label="Mapping to Cityscapes", type="pil")],
189
  examples=examples)