Spaces:
Sleeping
Sleeping
File size: 1,599 Bytes
9fb219f 0f5109e 9fb219f 0f5109e 9fb219f 0f5109e 9fb219f 0f5109e 9fb219f 0f5109e 9fb219f 0f5109e 9fb219f 2bb2622 9fb219f 0f5109e 2bb2622 0f5109e 2bb2622 0f5109e 9fb219f 0f5109e 2bb2622 9fb219f 2bb2622 9fb219f 0f5109e 9fb219f 0f5109e |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 |
import gradio as gr
import torch
from ultralytics import YOLO
import cv2
import numpy as np
# Load the trained YOLO model
model = YOLO("yolo11n.pt") # Ensure this matches the model file in your repo
# Define the function for making predictions
def predict(image):
# Convert to OpenCV format
img = cv2.cvtColor(np.array(image), cv2.COLOR_RGB2BGR)
# Run YOLOv8 inference
results = model(img)
detected_objects = [] # Store detected object names and confidence
# Draw bounding boxes on the image
for r in results:
for box in r.boxes:
x1, y1, x2, y2 = map(int, box.xyxy[0]) # Get bounding box coordinates
conf = float(box.conf[0]) # Confidence score
cls = int(box.cls[0]) # Class index
label = f"{model.names[cls]} {conf:.2f}"
detected_objects.append(label) # Save object name and confidence
cv2.rectangle(img, (x1, y1), (x2, y2), (0, 255, 0), 2)
cv2.putText(img, label, (x1, y1 - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
# Convert back to PIL format
return cv2.cvtColor(img, cv2.COLOR_BGR2RGB), "\n".join(detected_objects) if detected_objects else "No objects detected."
# Create the Gradio interface
iface = gr.Interface(
fn=predict,
inputs=gr.Image(type="pil"),
outputs=[gr.Image(type="pil"), gr.Textbox(label="Detected Objects")],
title="YOLOv8 Fruit Detection",
description="Upload an image and the model will detect objects and list them."
)
# Launch the Gradio app
if __name__ == "__main__":
iface.launch()
|