Last commit not found
import gradio as gr | |
import sahi | |
import torch | |
from ultralyticsplus import YOLO | |
# Download example images | |
sahi.utils.file.download_from_url( | |
"https://raw.githubusercontent.com/kadirnar/dethub/main/data/images/highway.jpg", | |
"highway.jpg", | |
) | |
sahi.utils.file.download_from_url( | |
"https://raw.githubusercontent.com/obss/sahi/main/tests/data/small-vehicles1.jpeg", | |
"small-vehicles1.jpeg", | |
) | |
sahi.utils.file.download_from_url( | |
"https://raw.githubusercontent.com/ultralytics/yolov5/master/data/images/zidane.jpg", | |
"zidane.jpg", | |
) | |
# Define the model names | |
model_names = [ | |
"yolov8n-seg.pt", | |
"yolov8s-seg.pt", | |
"yolov8m-seg.pt", | |
"yolov8l-seg.pt", | |
"yolov8x-seg.pt", | |
] | |
# Set the initial model | |
current_model_name = "yolov8m-seg.pt" | |
model = YOLO(current_model_name) | |
def yolov8_inference( | |
image: gr.Image = None, | |
model_name: gr.Dropdown = None, | |
image_size: gr.Slider = 640, | |
conf_threshold: gr.Slider = 0.25, | |
iou_threshold: gr.Slider = 0.45, | |
): | |
""" | |
YOLOv8 inference function to return masks and label names for each detected object | |
Args: | |
image: Input image | |
model_name: Name of the model | |
image_size: Image size | |
conf_threshold: Confidence threshold | |
iou_threshold: IOU threshold | |
Returns: | |
Object masks, coordinates, and label names | |
""" | |
global model | |
global current_model_name | |
# Check if a new model is selected | |
if model_name != current_model_name: | |
model = YOLO(model_name) | |
current_model_name = model_name | |
# Set the confidence and IOU thresholds | |
model.overrides["conf"] = conf_threshold | |
model.overrides["iou"] = iou_threshold | |
# Perform model prediction | |
results = model.predict(image, imgsz=image_size, return_outputs=True) | |
# Extract masks, coordinates, and label names | |
output = [] | |
for result in results: | |
for mask, box in zip(result.masks.xy, result.boxes): | |
label = model.names[int(box.cls[0])] | |
mask_coords = mask.tolist() # Convert mask coordinates to list format | |
output.append({"label": label, "mask_coords": mask_coords}) | |
return output | |
# Define Gradio input and output components | |
inputs = [ | |
gr.Image(type="filepath", label="Input Image"), | |
gr.Dropdown( | |
model_names, | |
value=current_model_name, | |
label="Model type", | |
), | |
gr.Slider(minimum=320, maximum=1280, value=640, step=32, label="Image Size"), | |
gr.Slider( | |
minimum=0.0, maximum=1.0, value=0.25, step=0.05, label="Confidence Threshold" | |
), | |
gr.Slider(minimum=0.0, maximum=1.0, value=0.45, step=0.05, label="IOU Threshold"), | |
] | |
# Output the object masks and label names as a JSON-like format | |
outputs = gr.JSON(label="Detected Objects with Masks and Labels") | |
# Title and example inputs | |
title = "Ultralytics YOLOv8 Segmentation Demo" | |
examples = [ | |
["zidane.jpg", "yolov8m-seg.pt", 640, 0.6, 0.45], | |
["highway.jpg", "yolov8m-seg.pt", 640, 0.25, 0.45], | |
["small-vehicles1.jpeg", "yolov8m-seg.pt", 640, 0.25, 0.45], | |
] | |
# Create the Gradio interface | |
demo_app = gr.Interface( | |
fn=yolov8_inference, | |
inputs=inputs, | |
outputs=outputs, | |
title=title, | |
examples=examples, | |
cache_examples=True, | |
theme="default", | |
) | |
# Launch the app | |
demo_app.queue().launch(debug=True) |