|
import gradio as gr |
|
import torch |
|
from sahi.prediction import ObjectPrediction |
|
from sahi.utils.cv import visualize_object_predictions, read_image |
|
from ultralyticsplus import YOLO, render_result |
|
|
|
image_path = [ |
|
['test/web form.jpg', 'foduucom/web-form-ui-field-detection', 640, 0.25, 0.45], |
|
['test/web form2.jpg', 'foduucom/web-form-ui-field-detection', 640, 0.25, 0.45] |
|
] |
|
|
|
|
|
def yolov8_inference( |
|
image: gr.inputs.Image = None, |
|
model_path: gr.inputs.Dropdown = None, |
|
image_size: gr.inputs.Slider = 640, |
|
conf_threshold: gr.inputs.Slider = 0.25, |
|
iou_threshold: gr.inputs.Slider = 0.45, |
|
): |
|
""" |
|
YOLOv8 inference function |
|
Args: |
|
image: Input image |
|
model_path: Path to the model |
|
image_size: Image size |
|
conf_threshold: Confidence threshold |
|
iou_threshold: IOU threshold |
|
Returns: |
|
Rendered image |
|
""" |
|
model = YOLO(model_path) |
|
model.overrides['conf'] = conf_threshold |
|
model.overrides['iou']= iou_threshold |
|
model.overrides['agnostic_nms'] = False |
|
model.overrides['max_det'] = 1000 |
|
image = read_image(image) |
|
results = model.predict(image) |
|
render = render_result(model=model, image=image, result=results[0]) |
|
|
|
return render |
|
|
|
|
|
inputs = [ |
|
gr.inputs.Image(type="filepath", label="Input Image"), |
|
gr.inputs.Dropdown(["foduucom/web-form-ui-field-detection"], |
|
default="foduucom/web-form-ui-field-detection", label="Model"), |
|
gr.inputs.Slider(minimum=320, maximum=1280, default=640, step=32, label="Image Size"), |
|
gr.inputs.Slider(minimum=0.0, maximum=1.0, default=0.25, step=0.05, label="Confidence Threshold"), |
|
gr.inputs.Slider(minimum=0.0, maximum=1.0, default=0.45, step=0.05, label="IOU Threshold"), |
|
] |
|
|
|
outputs = gr.outputs.Image(type="filepath", label="Output Image") |
|
title = "Ui form : web form ui field Detection in Images" |
|
|
|
interface_image = gr.Interface( |
|
fn=yolov8_inference, |
|
inputs=inputs, |
|
outputs=outputs, |
|
title=title, |
|
examples=image_path, |
|
cache_examples=False, |
|
theme='huggingface' |
|
) |
|
|
|
gr.TabbedInterface( |
|
[interface_image], |
|
tab_names=['Image inference'] |
|
).queue().launch() |