hb-setosys's picture
Update app.py
7f2b13a verified
raw
history blame
4.73 kB
import os
import cv2
import numpy as np
import torch
from ultralytics import YOLO
from sort import Sort
import gradio as gr
# Load YOLOv12x model
MODEL_PATH = "yolov12x.pt"
model = YOLO(MODEL_PATH)
# COCO dataset class ID for truck
TRUCK_CLASS_ID = 7 # "truck"
# Initialize SORT tracker
tracker = Sort(max_age=20, min_hits=3, iou_threshold=0.3) # Improved tracking stability
# Minimum confidence threshold for detection
CONFIDENCE_THRESHOLD = 0.4 # Adjusted to capture more trucks
# Distance threshold to avoid duplicate counts
DISTANCE_THRESHOLD = 50
# Dictionary to define keyword-based time intervals
TIME_INTERVALS = {
"one": 1, "two": 2, "three": 3, "four": 4, "five": 5,
"six": 6, "seven": 7, "eight": 8, "nine": 9, "ten": 10, "eleven": 11
}
def determine_time_interval(video_filename):
""" Determines frame skip interval based on keywords in the filename. """
for keyword, interval in TIME_INTERVALS.items():
if keyword in video_filename:
return interval
return 5 # Default interval
def count_unique_trucks(video_path):
""" Counts unique trucks in a video using YOLOv12x and SORT tracking. """
cap = cv2.VideoCapture(video_path)
if not cap.isOpened():
return {"Error": "Unable to open video file."}
unique_truck_ids = set()
truck_history = {}
# Get FPS of the video
fps = int(cap.get(cv2.CAP_PROP_FPS))
# Extract filename from the path and convert to lowercase
video_filename = os.path.basename(video_path).lower()
# Determine the dynamic time interval based on filename keywords
time_interval = determine_time_interval(video_filename)
# Get total frames in the video
total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
# Dynamically adjust frame skipping based on FPS and movement density
frame_skip = max(1, min(fps * time_interval // 2, total_frames // 10))
frame_count = 0
while True:
ret, frame = cap.read()
if not ret:
break # End of video
frame_count += 1
if frame_count % frame_skip != 0:
continue # Skip frames based on interval
# Run YOLOv12x inference
results = model(frame, verbose=False)
detections = []
for result in results:
for box in result.boxes:
class_id = int(box.cls.item()) # Get class ID
confidence = float(box.conf.item()) # Get confidence score
# Track only trucks
if class_id == TRUCK_CLASS_ID and confidence > CONFIDENCE_THRESHOLD:
x1, y1, x2, y2 = map(int, box.xyxy[0]) # Get bounding box
detections.append([x1, y1, x2, y2, confidence])
# Convert detections to numpy array for SORT
detections = np.array(detections) if len(detections) > 0 else np.empty((0, 5))
# Update SORT tracker
tracked_objects = tracker.update(detections)
# Track movement history to avoid duplicate counts
for obj in tracked_objects:
truck_id = int(obj[4]) # Unique ID assigned by SORT
x1, y1, x2, y2 = obj[:4] # Get bounding box coordinates
truck_center = (x1 + x2) / 2, (y1 + y2) / 2 # Calculate truck center
# Entry-exit zone logic (e.g., bottom 20% of the frame)
frame_height, frame_width = frame.shape[:2]
entry_line = frame_height * 0.8 # Bottom 20% of the frame
exit_line = frame_height * 0.2 # Top 20% of the frame
if truck_id not in truck_history:
# New truck detected
truck_history[truck_id] = {
"position": truck_center,
"crossed_entry": truck_center[1] > entry_line,
"crossed_exit": False
}
continue
# If the truck crosses from entry to exit, count it
if truck_history[truck_id]["crossed_entry"] and truck_center[1] < exit_line:
truck_history[truck_id]["crossed_exit"] = True
unique_truck_ids.add(truck_id)
cap.release()
return {"Total Unique Trucks": len(unique_truck_ids)}
# Gradio UI function
def analyze_video(video_file):
result = count_unique_trucks(video_file)
return "\n".join([f"{key}: {value}" for key, value in result.items()])
# Define Gradio interface
iface = gr.Interface(
fn=analyze_video,
inputs=gr.Video(label="Upload Video"),
outputs=gr.Textbox(label="Analysis Result"),
title="YOLOv12x Unique Truck Counter",
description="Upload a video to count unique trucks using YOLOv12x and SORT tracking."
)
# Launch the Gradio app
if __name__ == "__main__":
iface.launch()