vobecant
commited on
Commit
·
deeb0e8
1
Parent(s):
bccc9c2
Initial commit.
Browse files
app.py
CHANGED
@@ -182,17 +182,18 @@ def predict(input_img, cs_mapping):
|
|
182 |
title = "Drive&Segment"
|
183 |
description = 'Gradio Demo accompanying paper "Drive&Segment: Unsupervised Semantic Segmentation of Urban Scenes via Cross-modal Distillation"\nBecause of the CPU-only inference, it might take up to 20s for large images.\nRight now, I use the Segmenter model trained on nuScenes and with 256x256 patches (for the sake of speed).'
|
184 |
# article = "<p style='text-align: center'><a href='TODO' target='_blank'>Project Page</a> | <a href='codelink' target='_blank'>Github</a></p>"
|
185 |
-
examples = [
|
186 |
-
['examples/img1.jpg', False], ['examples/snow1.jpg',False]]
|
187 |
|
188 |
# predict(examples[0])
|
189 |
|
190 |
-
iface = gr.Interface(predict,
|
191 |
"image", title=title, description=description,
|
|
|
|
|
192 |
examples=examples)
|
193 |
# iface = gr.Interface(predict, gr.inputs.Image(type='filepath'),
|
194 |
# "image", title=title, description=description,
|
195 |
# examples=examples)
|
196 |
|
197 |
# iface.launch(show_error=True, share=True)
|
198 |
-
iface.launch(
|
|
|
182 |
title = "Drive&Segment"
|
183 |
description = 'Gradio Demo accompanying paper "Drive&Segment: Unsupervised Semantic Segmentation of Urban Scenes via Cross-modal Distillation"\nBecause of the CPU-only inference, it might take up to 20s for large images.\nRight now, I use the Segmenter model trained on nuScenes and with 256x256 patches (for the sake of speed).'
|
184 |
# article = "<p style='text-align: center'><a href='TODO' target='_blank'>Project Page</a> | <a href='codelink' target='_blank'>Github</a></p>"
|
185 |
+
examples = ['examples/img5.jpeg', 'examples/100.jpeg', 'examples/39076.jpeg', 'examples/img1.jpg', 'examples/snow1.jpg']
|
|
|
186 |
|
187 |
# predict(examples[0])
|
188 |
|
189 |
+
iface = gr.Interface(predict, gr.inputs.Image(type='filepath'),
|
190 |
"image", title=title, description=description,
|
191 |
+
outputs=[gr.outputs.Image(label="Pseudo segmentation", type="pil"),
|
192 |
+
gr.outputs.Image(label="Mapping to Cityscapes", type="pil")],
|
193 |
examples=examples)
|
194 |
# iface = gr.Interface(predict, gr.inputs.Image(type='filepath'),
|
195 |
# "image", title=title, description=description,
|
196 |
# examples=examples)
|
197 |
|
198 |
# iface.launch(show_error=True, share=True)
|
199 |
+
iface.launch(enable_queue=True, cache_examples=True)
|