File size: 3,523 Bytes
107566b 5f09a7f 107566b 5f09a7f 107566b 94c1933 107566b 5f09a7f 107566b 94c1933 0687d58 94c1933 5f09a7f 107566b 0687d58 107566b c675e3b 107566b c19e6a7 94c1933 0687d58 94c1933 c675e3b 107566b |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 |
import gradio as gr
import cv2
import torch
import numpy as np
# Load the YOLOv5 model
model = torch.hub.load('ultralytics/yolov5', 'yolov5s', pretrained=True)
# Function to run inference on an image and count objects
def run_inference(image):
# Convert the image from PIL format to a format compatible with OpenCV
image = np.array(image)
# Run YOLOv5 inference
results = model(image)
# Extract detection results
detections = results.pandas().xyxy[0]
# Count objects by category
object_counts = detections['name'].value_counts()
# Create a formatted string to show object counts
count_text = "\n".join([f"{obj}: {count}" for obj, count in object_counts.items()])
# Convert the annotated image from BGR to RGB for display
annotated_image = results.render()[0]
annotated_image = cv2.cvtColor(annotated_image, cv2.COLOR_BGR2RGB)
return annotated_image, count_text
# Create the Gradio interface with enhanced UI
interface = gr.Interface(
fn=run_inference,
inputs=gr.Image(type="pil"),
outputs=[
gr.Image(type="pil"),
gr.Textbox(label="Object Counts", lines=5, interactive=False) # Display counts as text
],
title="YOLOv5 Object Detection with Counts",
description="Upload an image to run YOLOv5 object detection, see the annotated results, and view the count of detected objects by category.",
css="""
body {
font-family: 'Arial', sans-serif;
background: linear-gradient(135deg, #FF6F61, #FF9F9F, #FFEB3B);
animation: gradientBG 5s ease infinite;
margin: 0;
padding: 0;
color: white;
height: 100vh;
display: flex;
justify-content: center;
align-items: center;
text-align: center;
}
@keyframes gradientBG {
0% { background-position: 0% 50%; }
50% { background-position: 100% 50%; }
100% { background-position: 0% 50%; }
}
.gradio-container {
background: rgba(0, 0, 0, 0.5);
border-radius: 20px;
padding: 20px;
width: 90%;
max-width: 700px;
box-shadow: 0 8px 15px rgba(0, 0, 0, 0.5);
}
.gradio-header {
font-size: 2.5rem;
font-weight: bold;
color: #FFEB3B;
}
.gradio-description {
font-size: 1.2rem;
color: #ffffff;
margin-top: 10px;
font-style: italic;
}
.gr-button {
background: linear-gradient(90deg, #FF6F61, #FF9F9F);
color: white;
padding: 1rem 2rem;
font-size: 1.1rem;
border-radius: 10px;
border: none;
cursor: pointer;
transition: transform 0.3s ease, background 0.3s ease;
box-shadow: 0 5px 15px rgba(0, 0, 0, 0.2);
}
.gr-button:hover {
background: linear-gradient(90deg, #FF9F9F, #FF6F61);
transform: scale(1.05);
}
.gr-button:active {
background: linear-gradient(90deg, #FF6F61, #FF9F9F);
transform: scale(1.1);
}
.gr-image-container {
margin-top: 20px;
border-radius: 15px;
box-shadow: 0 5px 10px rgba(0, 0, 0, 0.3);
}
.gr-textbox {
background-color: #333;
color: #FFEB3B;
border: none;
padding: 10px;
border-radius: 10px;
font-size: 1rem;
width: 100%;
text-align: left;
}
.gr-textbox:focus {
outline: none;
border: 2px solid #FF6F61;
}
"""
)
# Launch the app
interface.launch()
|