|
import gradio as gr |
|
import cv2 |
|
import torch |
|
import numpy as np |
|
|
|
|
|
model = torch.hub.load('ultralytics/yolov5', 'yolov5s', pretrained=True) |
|
|
|
|
|
def run_inference(image): |
|
|
|
image = np.array(image) |
|
|
|
|
|
results = model(image) |
|
|
|
|
|
detections = results.pandas().xyxy[0] |
|
|
|
|
|
object_counts = detections['name'].value_counts() |
|
|
|
|
|
count_text = "\n".join([f"{obj}: {count}" for obj, count in object_counts.items()]) |
|
|
|
|
|
annotated_image = results.render()[0] |
|
annotated_image = cv2.cvtColor(annotated_image, cv2.COLOR_BGR2RGB) |
|
|
|
return annotated_image, count_text |
|
|
|
|
|
interface = gr.Interface( |
|
fn=run_inference, |
|
inputs=gr.Image(type="pil"), |
|
outputs=[ |
|
gr.Image(type="pil"), |
|
gr.Textbox(label="Object Counts", lines=5, interactive=False) |
|
], |
|
title="YOLOv5 Object Detection with Counts", |
|
description="Upload an image to run YOLOv5 object detection, see the annotated results, and view the count of detected objects by category." |
|
) |
|
|
|
|
|
interface.launch() |
|
|