import gradio as gr import cv2 import torch import numpy as np # Load the YOLOv5 model model = torch.hub.load('ultralytics/yolov5', 'yolov5s', pretrained=True) # Function to run inference on an image and count objects def run_inference(image): # Convert the image from PIL format to a format compatible with OpenCV image = np.array(image) # Run YOLOv5 inference results = model(image) # Extract detection results detections = results.pandas().xyxy[0] # Count objects by category object_counts = detections['name'].value_counts() # Create a formatted string to show object counts count_text = "\n".join([f"{obj}: {count}" for obj, count in object_counts.items()]) # Convert the annotated image from BGR to RGB for display annotated_image = results.render()[0] annotated_image = cv2.cvtColor(annotated_image, cv2.COLOR_BGR2RGB) return annotated_image, count_text # Create the Gradio interface with enhanced UI interface = gr.Interface( fn=run_inference, inputs=gr.Image(type="pil"), outputs=[ gr.Image(type="pil"), gr.Textbox(label="Object Counts", lines=5, interactive=False) # Display counts as text ], title="YOLOv5 Object Detection with Counts", description="Upload an image to run YOLOv5 object detection, see the annotated results, and view the count of detected objects by category.", css=""" body { font-family: 'Arial', sans-serif; background: linear-gradient(135deg, #FF6F61, #FF9F9F, #FFEB3B); animation: gradientBG 5s ease infinite; margin: 0; padding: 0; color: white; height: 100vh; display: flex; justify-content: center; align-items: center; text-align: center; } @keyframes gradientBG { 0% { background-position: 0% 50%; } 50% { background-position: 100% 50%; } 100% { background-position: 0% 50%; } } .gradio-container { background: rgba(0, 0, 0, 0.5); border-radius: 20px; padding: 20px; width: 90%; max-width: 700px; box-shadow: 0 8px 15px rgba(0, 0, 0, 0.5); } .gradio-header { font-size: 2.5rem; font-weight: bold; color: #FFEB3B; } .gradio-description { font-size: 1.2rem; color: #ffffff; margin-top: 10px; font-style: italic; } .gr-button { background: linear-gradient(90deg, #FF6F61, #FF9F9F); color: white; padding: 1rem 2rem; font-size: 1.1rem; border-radius: 10px; border: none; cursor: pointer; transition: transform 0.3s ease, background 0.3s ease; box-shadow: 0 5px 15px rgba(0, 0, 0, 0.2); } .gr-button:hover { background: linear-gradient(90deg, #FF9F9F, #FF6F61); transform: scale(1.05); } .gr-button:active { background: linear-gradient(90deg, #FF6F61, #FF9F9F); transform: scale(1.1); } .gr-image-container { margin-top: 20px; border-radius: 15px; box-shadow: 0 5px 10px rgba(0, 0, 0, 0.3); } .gr-textbox { background-color: #333; color: #FFEB3B; border: none; padding: 10px; border-radius: 10px; font-size: 1rem; width: 100%; text-align: left; } .gr-textbox:focus { outline: none; border: 2px solid #FF6F61; } """ ) # Launch the app interface.launch()