|
import gradio as gr |
|
import cv2 |
|
import torch |
|
import numpy as np |
|
|
|
|
|
model = torch.hub.load('ultralytics/yolov5', 'yolov5s', pretrained=True) |
|
|
|
|
|
def run_inference(image): |
|
|
|
image = np.array(image) |
|
|
|
|
|
results = model(image) |
|
|
|
|
|
detections = results.pandas().xyxy[0] |
|
|
|
|
|
object_counts = detections['name'].value_counts().to_dict() |
|
|
|
|
|
annotated_image = results.render()[0] |
|
annotated_image = cv2.cvtColor(annotated_image, cv2.COLOR_BGR2RGB) |
|
|
|
return annotated_image, object_counts |
|
|
|
|
|
interface = gr.Interface( |
|
fn=run_inference, |
|
inputs=gr.Image(type="pil"), |
|
outputs=[ |
|
gr.Image(type="pil"), |
|
gr.JSON(label="Object Counts") |
|
], |
|
title="YOLOv5 Object Detection with Counts", |
|
description="Upload an image to run YOLOv5 object detection, see the annotated results, and view the count of detected objects by category." |
|
) |
|
|
|
|
|
interface.launch() |
|
|