File size: 2,483 Bytes
9e147b4
 
 
ae10331
f329a38
99a40c7
ae10331
f329a38
99a40c7
ae10331
 
99a40c7
f329a38
 
 
ae10331
9e147b4
99a40c7
 
 
9e147b4
f329a38
ae10331
99a40c7
 
9e147b4
 
ae10331
99a40c7
ae10331
 
f329a38
99a40c7
ae10331
 
99a40c7
f329a38
ae10331
 
 
 
f329a38
99a40c7
ae10331
f329a38
 
 
 
 
 
 
 
 
 
99a40c7
f329a38
 
 
 
99a40c7
9e147b4
99a40c7
9e147b4
f329a38
9e147b4
 
99a40c7
2543827
ae10331
99a40c7
2543827
99a40c7
9e147b4
2543827
9e147b4
ae10331
f329a38
 
9e147b4
 
99a40c7
2543827
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
import gradio as gr
import cv2
import numpy as np
from ultralytics import YOLO
from sort import Sort  # SORT tracker

# Load YOLOv8 model (pre-trained on COCO dataset)
model = YOLO("yolov8x.pt")  # Highest accuracy version

# Class label for trucks (COCO dataset)
TRUCK_CLASS_ID = 7  # "truck" in COCO dataset

# Initialize SORT tracker
tracker = Sort()  

def count_trucks(video_path):
    cap = cv2.VideoCapture(video_path)
    if not cap.isOpened():
        return "Error: Unable to open video file."

    frame_count = 0
    unique_truck_ids = set()
    frame_skip = 5  # Process every 5th frame for efficiency

    while True:
        ret, frame = cap.read()
        if not ret:
            break  # End of video

        frame_count += 1
        if frame_count % frame_skip != 0:
            continue  # Skip frames for efficiency

        # Run YOLOv8 inference
        results = model(frame, verbose=False)

        detections = []
        for result in results:
            for box in result.boxes:
                class_id = int(box.cls.item())  # Get class ID
                confidence = float(box.conf.item())  # Get confidence score
                x1, y1, x2, y2 = map(int, box.xyxy[0])  # Get bounding box

                if class_id == TRUCK_CLASS_ID and confidence > 0.6:
                    detections.append([x1, y1, x2, y2, confidence])  # Append detection

        # Convert to numpy array for SORT input
        if len(detections) > 0:
            detections = np.array(detections)
        else:
            detections = np.empty((0, 5))  # Empty array when no trucks detected

        # Update tracker
        tracked_objects = tracker.update(detections)

        # Store unique truck IDs
        for obj in tracked_objects:
            truck_id = int(obj[4])  # SORT assigns unique IDs
            unique_truck_ids.add(truck_id)

    cap.release()

    return {
        "Total Unique Trucks in Video": len(unique_truck_ids)
    }

# Gradio UI function
def analyze_video(video_file):
    result = count_trucks(video_file)
    return "\n".join([f"{key}: {value}" for key, value in result.items()])

# Gradio Interface
interface = gr.Interface(
    fn=analyze_video,
    inputs=gr.Video(label="Upload Video"),
    outputs=gr.Textbox(label="Truck Counting Results"),
    title="YOLOv8-based Truck Counter with Object Tracking",
    description="Upload a video to detect and count unique trucks using YOLOv8 and SORT tracker."
)

# Launch app
interface.launch()