Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -35,7 +35,7 @@ def yoloe_inference(image, prompts, target_image, model_id, image_size, conf_thr
|
|
35 |
)
|
36 |
if target_image:
|
37 |
model.predict(source=image, imgsz=image_size, conf=conf_thresh, iou=iou_thresh, return_vpe=True, **kwargs)
|
38 |
-
model.set_classes(["
|
39 |
model.predictor = None # unset VPPredictor
|
40 |
image = target_image
|
41 |
kwargs = {}
|
@@ -233,6 +233,11 @@ with gradio_app:
|
|
233 |
We introduce **YOLOE(ye)**, a highly **efficient**, **unified**, and **open** object detection and segmentation model, like human eye, under different prompt mechanisms, like *texts*, *visual inputs*, and *prompt-free paradigm*.
|
234 |
"""
|
235 |
)
|
|
|
|
|
|
|
|
|
|
|
236 |
with gr.Row():
|
237 |
with gr.Column():
|
238 |
app()
|
|
|
35 |
)
|
36 |
if target_image:
|
37 |
model.predict(source=image, imgsz=image_size, conf=conf_thresh, iou=iou_thresh, return_vpe=True, **kwargs)
|
38 |
+
model.set_classes(["object0"], model.predictor.vpe)
|
39 |
model.predictor = None # unset VPPredictor
|
40 |
image = target_image
|
41 |
kwargs = {}
|
|
|
233 |
We introduce **YOLOE(ye)**, a highly **efficient**, **unified**, and **open** object detection and segmentation model, like human eye, under different prompt mechanisms, like *texts*, *visual inputs*, and *prompt-free paradigm*.
|
234 |
"""
|
235 |
)
|
236 |
+
gr.Markdown(
|
237 |
+
"""
|
238 |
+
If desired objects are not identified, pleaset set a **smaller** confidence threshold, eg., for visual prompts with handcrafted shape or cross-image prompts.
|
239 |
+
"""
|
240 |
+
)
|
241 |
with gr.Row():
|
242 |
with gr.Column():
|
243 |
app()
|