File size: 3,263 Bytes
b0801de
0a9b429
 
 
 
4e8a148
d211767
0a9b429
9bb7cec
 
b0801de
9bb7cec
4e8a148
d211767
0a9b429
 
 
 
 
 
 
611534d
0a9b429
 
 
 
4e8a148
0a9b429
 
 
 
 
4e8a148
b0801de
4e8a148
 
 
 
 
 
0a9b429
4e8a148
 
a210028
4e5fb8f
d211767
a210028
d211767
 
 
a210028
 
b0801de
d211767
4e5fb8f
d211767
 
 
 
 
 
 
 
0a9b429
 
b0801de
4e8a148
3503d68
 
b0801de
 
 
3503d68
 
b0801de
3503d68
 
611534d
 
94dab0b
611534d
b0801de
 
 
3503d68
 
 
 
611534d
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
def count_unique_trucks(video_path, frame_skip_factor=2):
    cap = cv2.VideoCapture(video_path)
    if not cap.isOpened():
        return "Error: Unable to open video file."

    unique_truck_ids = set()
    truck_history = {}

    # Get FPS of the video
    fps = int(cap.get(cv2.CAP_PROP_FPS))
    frame_skip = fps * frame_skip_factor  # Skip frames based on the dynamic factor

    frame_count = 0

    while True:
        ret, frame = cap.read()
        if not ret:
            break  # End of video

        frame_count += 1
        if frame_count % frame_skip != 0:
            continue  # Skip frames dynamically

        # Run YOLOv12x inference
        results = model(frame, verbose=False)

        detections = []
        for result in results:
            for box in result.boxes:
                class_id = int(box.cls.item())  # Get class ID
                confidence = float(box.conf.item())  # Get confidence score

                # Track only trucks
                if class_id == TRUCK_CLASS_ID and confidence > CONFIDENCE_THRESHOLD:
                    x1, y1, x2, y2 = map(int, box.xyxy[0])  # Get bounding box
                    detections.append([x1, y1, x2, y2, confidence])

        if len(detections) > 0:
            detections = np.array(detections)
            tracked_objects = tracker.update(detections)

            for obj in tracked_objects:
                truck_id = int(obj[4])  # Unique ID assigned by SORT
                x1, y1, x2, y2 = obj[:4]  # Get the bounding box coordinates

                truck_center = (x1 + x2) / 2, (y1 + y2) / 2  # Calculate the center of the truck

                # If truck is already in history, check the movement distance
                if truck_id in truck_history:
                    last_position = truck_history[truck_id]["position"]
                    distance = np.linalg.norm(np.array(truck_center) - np.array(last_position))

                    if distance > DISTANCE_THRESHOLD:
                        # If the truck moved significantly, count as new
                        unique_truck_ids.add(truck_id)

                else:
                    # If truck is not in history, add it
                    truck_history[truck_id] = {
                        "frame_count": frame_count,
                        "position": truck_center
                    }
                    unique_truck_ids.add(truck_id)

    cap.release()

    return {"Total Unique Trucks": len(unique_truck_ids)}

# Gradio UI function
def analyze_video(video_file, frame_skip_factor):
    result = count_unique_trucks(video_file, frame_skip_factor)
    return "\n".join([f"{key}: {value}" for key, value in result.items()])

# Define Gradio interface
import gradio as gr
iface = gr.Interface(
    fn=analyze_video,
    inputs=[
        gr.Video(label="Upload Video"),
        gr.Slider(minimum=1, maximum=10, step=1, value=2, label="Frame Skip Factor"),  # Fixed default value
    ],
    outputs=gr.Textbox(label="Analysis Result"),
    title="YOLOv12x Unique Truck Counter",
    description="Upload a video to count unique trucks using YOLOv12x and SORT tracking. Adjust the frame skip factor to control processing speed."
)

# Launch the Gradio app
if __name__ == "__main__":
    iface.launch()