File size: 4,733 Bytes
de4f9aa
69545c2
 
 
 
 
7f2b13a
69545c2
 
 
 
 
 
 
 
 
7f2b13a
69545c2
 
7f2b13a
69545c2
 
 
 
7849b8f
 
7f2b13a
 
7849b8f
 
5f1292c
7f2b13a
5f1292c
 
 
7f2b13a
7849b8f
c79b27e
7f2b13a
0a9b429
 
7f2b13a
0a9b429
4e8a148
d211767
9b6b6b1
 
 
 
 
7849b8f
55b2656
9b6b6b1
7f2b13a
 
9b6b6b1
 
 
7f2b13a
 
9b6b6b1
4e8a148
d211767
9b6b6b1
0a9b429
 
 
 
 
 
7f2b13a
0a9b429
 
 
 
4e8a148
0a9b429
 
9b6b6b1
 
0a9b429
9b6b6b1
b0801de
9b6b6b1
4e8a148
 
7f2b13a
 
9b6b6b1
7f2b13a
 
4e5fb8f
7f2b13a
 
 
 
9b6b6b1
7f2b13a
9b6b6b1
7f2b13a
 
 
 
9b6b6b1
7f2b13a
 
 
 
 
 
 
 
974967f
7f2b13a
 
 
 
0225f43
7f2b13a
4e8a148
3503d68
 
c79b27e
 
b0801de
3503d68
c79b27e
3503d68
 
c79b27e
b0801de
 
c79b27e
3503d68
 
 
 
7f2b13a
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
import os
import cv2
import numpy as np
import torch
from ultralytics import YOLO
from sort import Sort
import gradio as gr

# Load YOLOv12x model
MODEL_PATH = "yolov12x.pt"
model = YOLO(MODEL_PATH)

# COCO dataset class ID for truck
TRUCK_CLASS_ID = 7  # "truck"

# Initialize SORT tracker
tracker = Sort(max_age=20, min_hits=3, iou_threshold=0.3)  # Improved tracking stability

# Minimum confidence threshold for detection
CONFIDENCE_THRESHOLD = 0.4  # Adjusted to capture more trucks

# Distance threshold to avoid duplicate counts
DISTANCE_THRESHOLD = 50

# Dictionary to define keyword-based time intervals
TIME_INTERVALS = {
    "one": 1, "two": 2, "three": 3, "four": 4, "five": 5,
    "six": 6, "seven": 7, "eight": 8, "nine": 9, "ten": 10, "eleven": 11
}

def determine_time_interval(video_filename):
    """ Determines frame skip interval based on keywords in the filename. """
    for keyword, interval in TIME_INTERVALS.items():
        if keyword in video_filename:
            return interval
    return 5  # Default interval

def count_unique_trucks(video_path):
    """ Counts unique trucks in a video using YOLOv12x and SORT tracking. """
    cap = cv2.VideoCapture(video_path)
    if not cap.isOpened():
        return {"Error": "Unable to open video file."}

    unique_truck_ids = set()
    truck_history = {}

    # Get FPS of the video
    fps = int(cap.get(cv2.CAP_PROP_FPS))

    # Extract filename from the path and convert to lowercase
    video_filename = os.path.basename(video_path).lower()

    # Determine the dynamic time interval based on filename keywords
    time_interval = determine_time_interval(video_filename)

    # Get total frames in the video
    total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))

    # Dynamically adjust frame skipping based on FPS and movement density
    frame_skip = max(1, min(fps * time_interval // 2, total_frames // 10))

    frame_count = 0

    while True:
        ret, frame = cap.read()
        if not ret:
            break  # End of video

        frame_count += 1
        if frame_count % frame_skip != 0:
            continue  # Skip frames based on interval

        # Run YOLOv12x inference
        results = model(frame, verbose=False)

        detections = []
        for result in results:
            for box in result.boxes:
                class_id = int(box.cls.item())  # Get class ID
                confidence = float(box.conf.item())  # Get confidence score

                # Track only trucks
                if class_id == TRUCK_CLASS_ID and confidence > CONFIDENCE_THRESHOLD:
                    x1, y1, x2, y2 = map(int, box.xyxy[0])  # Get bounding box
                    detections.append([x1, y1, x2, y2, confidence])

        # Convert detections to numpy array for SORT
        detections = np.array(detections) if len(detections) > 0 else np.empty((0, 5))

        # Update SORT tracker
        tracked_objects = tracker.update(detections)

        # Track movement history to avoid duplicate counts
        for obj in tracked_objects:
            truck_id = int(obj[4])  # Unique ID assigned by SORT
            x1, y1, x2, y2 = obj[:4]  # Get bounding box coordinates

            truck_center = (x1 + x2) / 2, (y1 + y2) / 2  # Calculate truck center

            # Entry-exit zone logic (e.g., bottom 20% of the frame)
            frame_height, frame_width = frame.shape[:2]
            entry_line = frame_height * 0.8  # Bottom 20% of the frame
            exit_line = frame_height * 0.2  # Top 20% of the frame

            if truck_id not in truck_history:
                # New truck detected
                truck_history[truck_id] = {
                    "position": truck_center,
                    "crossed_entry": truck_center[1] > entry_line,
                    "crossed_exit": False
                }
                continue

            # If the truck crosses from entry to exit, count it
            if truck_history[truck_id]["crossed_entry"] and truck_center[1] < exit_line:
                truck_history[truck_id]["crossed_exit"] = True
                unique_truck_ids.add(truck_id)

    cap.release()
    return {"Total Unique Trucks": len(unique_truck_ids)}

# Gradio UI function
def analyze_video(video_file):
    result = count_unique_trucks(video_file)
    return "\n".join([f"{key}: {value}" for key, value in result.items()])

# Define Gradio interface
iface = gr.Interface(
    fn=analyze_video,
    inputs=gr.Video(label="Upload Video"),
    outputs=gr.Textbox(label="Analysis Result"),
    title="YOLOv12x Unique Truck Counter",
    description="Upload a video to count unique trucks using YOLOv12x and SORT tracking."
)

# Launch the Gradio app
if __name__ == "__main__":
    iface.launch()