Spaces:
Build error
Build error
File size: 2,768 Bytes
14cfc9a 3185613 14cfc9a 3185613 14cfc9a 3185613 14cfc9a 2e4037e 14cfc9a bd723e6 dd818f0 14cfc9a 3185613 14cfc9a 1079c88 dd818f0 14cfc9a dd818f0 14cfc9a dd818f0 14cfc9a 5c3be0c 14cfc9a 9170af9 14cfc9a 3185613 2e4037e 14cfc9a 1f2fa62 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 |
import gradio as gr
import torch
from ultralyticsplus import YOLO, render_result
torch.hub.download_url_to_file(
'https://cdn.theatlantic.com/thumbor/xoh2WVVSx4F2uboG9xbT5BDprtM=/0x0:4939x2778/960x540/media/img/mt/2023/11/LON68717_copy/original.jpg',
'one.jpg')
torch.hub.download_url_to_file(
'https://i.ytimg.com/vi/lZQX2mmLo2s/maxresdefault.jpg',
'two.jpg')
torch.hub.download_url_to_file(
'https://assets.bwbx.io/images/users/iqjWHBFdfxIU/ioQgA.854d7s/v1/-1x-1.jpg',
'three.jpg')
torch.hub.download_url_to_file(
'https://cdn.apartmenttherapy.info/image/upload/f_jpg,q_auto:eco,c_fill,g_auto,w_1500,ar_1:1/at%2Fhouse%20tours%2Farchive%2FTour%20a%20Colorful%20Home%20in%20Montreal%2Ffada199d36b084830ef3563b555887f31851ca55',
'four.jpg')
def yoloV8_func(image: gr.Image = None,
image_size: gr.Slider = 640,
conf_threshold: gr.Slider = 0.4,
iou_threshold: gr.Slider = 0.50):
"""
This function performs YOLOv8 object detection on the given image.
"""
# Load the YOLOv8 model from the 'best.pt' checkpoint
model_path = "YOLO-best.pt"
model = YOLO(model_path)
# Perform object detection on the input image using the YOLOv8 model
results = model.predict(image,
conf=conf_threshold,
iou=iou_threshold,
imgsz=image_size)
# Print the detected objects' information (class, coordinates, and probability)
box = results[0].boxes
print("Object type:", box.cls)
print("Coordinates:", box.xyxy)
print("Probability:", box.conf)
# Render the output image with bounding boxes around detected objects
render = render_result(model=model, image=image, result=results[0], rect_th = 4, text_th = 4)
return render
inputs = [
gr.Image(type="filepath", label="Input Image"),
gr.Slider(minimum=320, maximum=1280, value=640,
step=32, label="Image Size"),
gr.Slider(minimum=0.0, maximum=1.0, value=0.25,
step=0.05, label="Confidence Threshold"),
gr.Slider(minimum=0.0, maximum=1.0, value=0.45,
step=0.05, label="IOU Threshold"),
]
outputs = gr.Image(type="filepath", label="Output Image")
title = "YOLOv8 Custom Object Detection by Uyen Nguyen"
examples = [['one.jpg', 900, 0.5, 0.8],
['two.jpg', 1152, 0.05, 0.05],
['three.jpg', 1024, 0.25, 0.25],
['four.jpg', 832, 0.3, 0.3]]
yolo_app = gr.Interface(
fn=yoloV8_func,
inputs=inputs,
outputs=outputs,
title=title,
examples=examples,
cache_examples=True,
)
# Launch the Gradio interface in debug mode with queue enabled
yolo_app.launch(debug=True, share=True) |