Update app.py
Browse files
app.py
CHANGED
@@ -8,10 +8,10 @@ import tensorflow as tf
|
|
8 |
from transformers import SegformerFeatureExtractor, TFSegformerForSemanticSegmentation
|
9 |
|
10 |
feature_extractor = SegformerFeatureExtractor.from_pretrained(
|
11 |
-
"nvidia/segformer-
|
12 |
)
|
13 |
model = TFSegformerForSemanticSegmentation.from_pretrained(
|
14 |
-
"nvidia/segformer-
|
15 |
)
|
16 |
|
17 |
def ade_palette():
|
@@ -235,7 +235,7 @@ def sepia(input_img):
|
|
235 |
demo = gr.Interface(fn=sepia,
|
236 |
inputs=gr.Image(shape=(400, 600)),
|
237 |
outputs=['plot'],
|
238 |
-
examples=["
|
239 |
allow_flagging='never')
|
240 |
|
241 |
|
|
|
8 |
from transformers import SegformerFeatureExtractor, TFSegformerForSemanticSegmentation
|
9 |
|
10 |
feature_extractor = SegformerFeatureExtractor.from_pretrained(
|
11 |
+
"nvidia/segformer-b2-finetuned-ade-512-512"
|
12 |
)
|
13 |
model = TFSegformerForSemanticSegmentation.from_pretrained(
|
14 |
+
"nvidia/segformer-b2-finetuned-ade-512-512"
|
15 |
)
|
16 |
|
17 |
def ade_palette():
|
|
|
235 |
demo = gr.Interface(fn=sepia,
|
236 |
inputs=gr.Image(shape=(400, 600)),
|
237 |
outputs=['plot'],
|
238 |
+
examples=["kitchen.jpg", "bridge.jpg", "red.jpg", "livingroom.jpg"],
|
239 |
allow_flagging='never')
|
240 |
|
241 |
|