File size: 2,823 Bytes
0a9b429
 
 
 
 
dccecf1
0a9b429
 
4e8a148
0a9b429
 
4e8a148
 
0a9b429
4e8a148
 
 
 
0a9b429
 
 
 
4e8a148
0a9b429
4e5fb8f
0a9b429
4e8a148
0a9b429
 
 
 
 
 
 
 
 
 
 
 
4e8a148
0a9b429
 
 
 
 
4e8a148
 
 
 
 
 
 
 
0a9b429
4e8a148
 
4e5fb8f
 
 
 
 
 
 
 
 
0a9b429
 
 
4e8a148
0a9b429
 
 
4e8a148
0a9b429
 
 
 
 
 
c3cdf4b
4e8a148
 
0a9b429
 
 
 
4e8a148
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
import cv2
import numpy as np
import torch
import gradio as gr
from ultralytics import YOLO
from sort import Sort

# Load YOLOv12x model
MODEL_PATH = "yolov12x.pt"
model = YOLO(MODEL_PATH)

# COCO dataset class ID for truck
TRUCK_CLASS_ID = 7  # "truck"

# Initialize SORT tracker
tracker = Sort()

def count_unique_trucks(video_path):
    cap = cv2.VideoCapture(video_path)
    if not cap.isOpened():
        return "Error: Unable to open video file."

    unique_truck_ids = set()
    frame_skip = 5  # Process every 5th frame for efficiency
    truck_tracker_history = {}  # To store truck ID with frame count

    frame_count = 0
    while True:
        ret, frame = cap.read()
        if not ret:
            break  # End of video

        frame_count += 1
        if frame_count % frame_skip != 0:
            continue  # Skip frames to improve efficiency

        # Run YOLOv12x inference
        results = model(frame, verbose=False)

        detections = []
        for result in results:
            for box in result.boxes:
                class_id = int(box.cls.item())  # Get class ID
                confidence = float(box.conf.item())  # Get confidence score

                # Track only trucks
                if class_id == TRUCK_CLASS_ID and confidence > 0.5:
                    x1, y1, x2, y2 = map(int, box.xyxy[0])  # Get bounding box
                    detections.append([x1, y1, x2, y2, confidence])

        if len(detections) > 0:
            detections = np.array(detections)
            tracked_objects = tracker.update(detections)

            for obj in tracked_objects:
                truck_id = int(obj[4])  # Unique ID assigned by SORT

                # Only add truck ID if it appears in multiple frames (avoid one-frame detections)
                if truck_id not in truck_tracker_history:
                    truck_tracker_history[truck_id] = frame_count
                else:
                    # If truck ID appears in multiple frames, count as unique
                    if frame_count - truck_tracker_history[truck_id] > frame_skip:
                        unique_truck_ids.add(truck_id)
                        truck_tracker_history[truck_id] = frame_count

    cap.release()

    return {"Total Unique Trucks": len(unique_truck_ids)}

# Gradio UI function
def analyze_video(video_file):
    result = count_unique_trucks(video_file)
    return "\n".join([f"{key}: {value}" for key, value in result.items()])

# Define Gradio interface
iface = gr.Interface(
    fn=analyze_video,
    inputs=gr.Video(label="Upload Video"),
    outputs=gr.Textbox(label="Analysis Result"),
    title="YOLOv12x Unique Truck Counter",
    description="Upload a video to count unique trucks using YOLOv12x and SORT tracking."
)

# Launch the Gradio app
if __name__ == "__main__":
    iface.launch()