Spaces:
Sleeping
Sleeping
import cv2 | |
import numpy as np | |
import torch | |
import gradio as gr | |
from ultralytics import YOLO | |
# Load YOLOv12x model | |
MODEL_PATH = "yolov12x.pt" # Ensure the model is uploaded to the Hugging Face Space | |
model = YOLO(MODEL_PATH) | |
# COCO dataset class ID for trucks | |
TRUCK_CLASS_ID = 7 # "truck" | |
def count_trucks(video_path): | |
cap = cv2.VideoCapture(video_path) | |
if not cap.isOpened(): | |
return "Error: Unable to open video file." | |
frame_count = 0 | |
truck_counts = [] | |
frame_skip = 5 # Process every 5th frame for efficiency | |
while True: | |
ret, frame = cap.read() | |
if not ret: | |
break # End of video | |
frame_count += 1 | |
if frame_count % frame_skip != 0: | |
continue # Skip frames to improve efficiency | |
# Run YOLOv12x inference | |
results = model(frame, verbose=False) | |
truck_count = 0 | |
for result in results: | |
for box in result.boxes: | |
class_id = int(box.cls.item()) # Get class ID | |
confidence = float(box.conf.item()) # Get confidence score | |
# Count only trucks | |
if class_id == TRUCK_CLASS_ID and confidence > 0.5: | |
truck_count += 1 | |
truck_counts.append(truck_count) | |
cap.release() | |
return { | |
"Trucks in a Frame": int(np.max(truck_counts)) if truck_counts else 0 | |
} | |
# Gradio UI function | |
def analyze_video(video_file): | |
result = count_trucks(video_file) | |
return "\n".join([f"{key}: {value}" for key, value in result.items()]) | |
# Define Gradio interface | |
iface = gr.Interface( | |
fn=analyze_video, | |
inputs=gr.Video(label="Upload Video"), | |
outputs=gr.Textbox(label="Truck Count"), | |
title="YOLOv12x Truck Counter", | |
description="Upload a video to count trucks using YOLOv12x." | |
) | |
# Launch the Gradio app | |
if __name__ == "__main__": | |
iface.launch() | |