|
import gradio as gr |
|
import cv2 |
|
import numpy as np |
|
import torch |
|
from ultralytics import YOLO |
|
|
|
|
|
model = YOLO("yolov8x.pt") |
|
|
|
|
|
TRUCK_CLASS_ID = 7 |
|
|
|
def count_trucks(video_path): |
|
cap = cv2.VideoCapture(video_path) |
|
if not cap.isOpened(): |
|
return "Error: Unable to open video file." |
|
|
|
frame_count = 0 |
|
truck_count_per_frame = [] |
|
frame_skip = 5 |
|
|
|
while True: |
|
ret, frame = cap.read() |
|
if not ret: |
|
break |
|
|
|
frame_count += 1 |
|
if frame_count % frame_skip != 0: |
|
continue |
|
|
|
|
|
results = model(frame, verbose=False) |
|
|
|
truck_count = 0 |
|
for result in results: |
|
for box in result.boxes: |
|
class_id = int(box.cls.item()) |
|
confidence = float(box.conf.item()) |
|
|
|
if class_id == TRUCK_CLASS_ID and confidence > 0.6: |
|
truck_count += 1 |
|
|
|
truck_count_per_frame.append(truck_count) |
|
|
|
cap.release() |
|
|
|
return { |
|
"Total Trucks in Video": int(np.max(truck_count_per_frame)) if truck_count_per_frame else 0 |
|
} |
|
|
|
|
|
def analyze_video(video_file): |
|
result = count_trucks(video_file) |
|
return "\n".join([f"{key}: {value}" for key, value in result.items()]) |
|
|
|
|
|
interface = gr.Interface( |
|
fn=analyze_video, |
|
inputs=gr.Video(label="Upload Video"), |
|
outputs=gr.Textbox(label="Truck Counting Results"), |
|
title="YOLOv12x-based Truck Counter", |
|
description="Upload a video to detect and count trucks using YOLOv12x." |
|
) |
|
|
|
|
|
interface.launch() |
|
|