File size: 4,947 Bytes
107566b 5f09a7f 107566b 5f09a7f 107566b 94c1933 107566b 5f09a7f 107566b 94c1933 0687d58 94c1933 5f09a7f 107566b 0687d58 107566b c675e3b 107566b c19e6a7 94c1933 0687d58 94c1933 65bb158 b0c46bf c675e3b e8e5865 c675e3b e8e5865 c675e3b e8e5865 c675e3b 64ac3b9 e8e5865 c675e3b e8e5865 d7027c8 64ac3b9 d7027c8 c675e3b e8e5865 c675e3b e8e5865 c675e3b 64ac3b9 c675e3b e8e5865 c675e3b d42d2ff c675e3b d42d2ff c675e3b d42d2ff 64ac3b9 e8e5865 c675e3b d42d2ff c675e3b d42d2ff c675e3b d42d2ff c675e3b d42d2ff c675e3b 64ac3b9 c675e3b 64ac3b9 c675e3b 3837b65 64ac3b9 3837b65 64ac3b9 e8e5865 c675e3b 107566b |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 |
import gradio as gr
import cv2
import torch
import numpy as np
# Load the YOLOv5 model
model = torch.hub.load('ultralytics/yolov5', 'yolov5s', pretrained=True)
# Function to run inference on an image and count objects
def run_inference(image):
# Convert the image from PIL format to a format compatible with OpenCV
image = np.array(image)
# Run YOLOv5 inference
results = model(image)
# Extract detection results
detections = results.pandas().xyxy[0]
# Count objects by category
object_counts = detections['name'].value_counts()
# Create a formatted string to show object counts
count_text = "\n".join([f"{obj}: {count}" for obj, count in object_counts.items()])
# Convert the annotated image from BGR to RGB for display
annotated_image = results.render()[0]
annotated_image = cv2.cvtColor(annotated_image, cv2.COLOR_BGR2RGB)
return annotated_image, count_text
# Create the Gradio interface with enhanced UI
interface = gr.Interface(
fn=run_inference,
inputs=gr.Image(type="pil"),
outputs=[
gr.Image(type="pil"),
gr.Textbox(label="Object Counts", lines=5, interactive=False) # Display counts as text
],
title="DTECTIFY, The Object Detection with Counts",
description="Upload an image and let Detectify detect and count objects in real-time. Get annotated results with bounding boxes and an instant count of objects by category. Fast, accurate, and easy-to-use for all your object detection needs!",
css="""
/* General body and background settings */
body {
font-family: 'Arial', sans-serif;
background: linear-gradient(135deg, #FF6F61, #FF9F9F, #FFEB3B);
animation: gradientBG 5s ease infinite;
margin: 0;
padding: 0;
color: white;
height: 100vh;
display: flex;
justify-content: center;
align-items: center;
text-align: center;
overflow-y: auto;
}
@keyframes gradientBG {
0% { background-position: 0% 50%; }
50% { background-position: 100% 50%; }
100% { background-position: 0% 50%; }
}
/* Main container styles */
.gradio-container {
background: rgba(0, 0, 0, 0.5);
border-radius: 20px;
padding: 30px;
width: 100%;
max-width: 800px;
box-shadow: 0 8px 15px rgba(0, 0, 0, 0.5);
overflow-y: auto;
animation: fadeIn 1s ease-out;
}
/* Fade in effect */
@keyframes fadeIn {
from {
opacity: 0;
}
to {
opacity: 1;
}
}
/* Header styling */
.gradio-header {
font-size: 2.5rem;
font-weight: bold;
color: #FFEB3B;
}
/* Description styling */
.gradio-description {
font-size: 1.2rem;
color: #ffffff;
margin-top: 10px;
font-style: italic;
max-width: 700px;
margin-left: auto;
margin-right: auto;
}
/* Button styling with hover effect */
.gr-button {
background: linear-gradient(90deg, #4CAF50, #FFC107);
color: white;
padding: 1rem 2rem;
font-size: 1.2rem;
border-radius: 12px;
border: none;
cursor: pointer;
transition: transform 0.3s ease, background 0.3s ease, box-shadow 0.3s ease;
box-shadow: 0 5px 10px rgba(0, 0, 0, 0.2);
margin-top: 20px;
width: 200px; /* Fixed width for consistency */
}
.gr-button:hover {
background: linear-gradient(90deg, #FFC107, #4CAF50);
transform: scale(1.05);
box-shadow: 0 8px 20px rgba(0, 0, 0, 0.3);
}
.gr-button:active {
background: linear-gradient(90deg, #4CAF50, #FFC107);
transform: scale(1.1);
box-shadow: 0 10px 25px rgba(0, 0, 0, 0.4);
}
/* Image container styling */
.gr-image-container {
margin-top: 20px;
border-radius: 15px;
box-shadow: 0 5px 10px rgba(0, 0, 0, 0.3);
}
/* Textbox styling */
.gr-textbox {
background-color: #333;
color: #FFEB3B;
border: none;
padding: 10px;
border-radius: 10px;
font-size: 1rem;
width: 100%;
text-align: left;
}
.gr-textbox:focus {
outline: none;
border: 2px solid #FF6F61;
}
/* Button text formatting */
.gradio-button {
text-transform: uppercase;
}
/* Mobile responsiveness */
@media screen and (max-width: 768px) {
.gradio-container {
padding: 15px;
width: 90%;
}
.gradio-header {
font-size: 2rem;
}
.gr-button {
width: 100%;
}
}
/* Desktop and larger screen adjustments */
@media screen and (min-width: 1024px) {
.gr-button {
width: 250px;
}
}
"""
)
# Launch the app
interface.launch()
|