Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -239,10 +239,22 @@ def sepia(input_img):
|
|
239 |
fig = draw_plot(pred_img, seg)
|
240 |
return fig
|
241 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
242 |
demo = gr.Interface(sepia,
|
243 |
gr.inputs.Image(type="filepath"),
|
244 |
outputs=['plot'],
|
245 |
examples=["ADE_val_00000001.jpeg"],
|
246 |
-
allow_flagging='never'
|
|
|
|
|
|
|
247 |
|
248 |
demo.launch()
|
|
|
239 |
fig = draw_plot(pred_img, seg)
|
240 |
return fig
|
241 |
|
242 |
+
title = "SegFormer(ADE20k) in TensorFlow"
|
243 |
+
description = """
|
244 |
+
|
245 |
+
This is demo TensorFlow SegFormer from 🤗 `transformers` official package. The pre-trained model is optimized to segment scene specific images. We are currently using ONNX model converted from the TensorFlow based SegFormer to improve the latency. The average latency of an inference is 21 and 8 seconds for TensorFlow and ONNX converted models respectively (in Colab).
|
246 |
+
|
247 |
+
"""
|
248 |
+
|
249 |
+
article = "Check out the [repository](https://github.com/deep-diver/segformer-tf-transformers) to find out how to make inference, finetune the model with custom dataset, and further information."
|
250 |
+
|
251 |
demo = gr.Interface(sepia,
|
252 |
gr.inputs.Image(type="filepath"),
|
253 |
outputs=['plot'],
|
254 |
examples=["ADE_val_00000001.jpeg"],
|
255 |
+
allow_flagging='never',
|
256 |
+
title=title,
|
257 |
+
description=description,
|
258 |
+
article=article)
|
259 |
|
260 |
demo.launch()
|