Spaces:
Sleeping
Sleeping
import streamlit as st | |
import cv2 | |
import numpy as np | |
from ultralytics import YOLO | |
import pytesseract | |
from PIL import Image | |
import os | |
# Set page title and icon | |
st.set_page_config(page_title="Motorcycle Helmet Detection", layout="wide") | |
# Set Tesseract path | |
pytesseract.pytesseract.tesseract_cmd = '/opt/homebrew/bin/tesseract' | |
# Load models | |
def load_models(): | |
# Load YOLO models | |
detection_model = YOLO("yolov8n.pt") | |
# Load face detection model | |
face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml') | |
return detection_model, face_cascade | |
# Load the models | |
try: | |
detection_model, face_cascade = load_models() | |
st.sidebar.success("β Models loaded successfully") | |
except Exception as e: | |
st.error(f"Error loading models: {str(e)}") | |
st.stop() | |
def extract_license_plate(image, roi=None): | |
"""Extract license plate from image or region of interest""" | |
if roi is not None: | |
# If a region is specified, use it | |
target = roi | |
else: | |
# Otherwise use the whole image | |
target = image | |
plate_text = None | |
try: | |
# Convert to grayscale | |
gray = cv2.cvtColor(target, cv2.COLOR_BGR2GRAY) | |
gray = cv2.bilateralFilter(gray, 11, 17, 17) | |
edged = cv2.Canny(gray, 30, 200) | |
# Find contours | |
contours, _ = cv2.findContours(edged.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) | |
contours = sorted(contours, key=cv2.contourArea, reverse=True)[:10] | |
for contour in contours: | |
perimeter = cv2.arcLength(contour, True) | |
approx = cv2.approxPolyDP(contour, 0.02 * perimeter, True) | |
# License plates are typically rectangular (4 points) | |
if len(approx) == 4: | |
x, y, w, h = cv2.boundingRect(approx) | |
# Filter by aspect ratio - license plates are typically wider than tall | |
aspect_ratio = float(w) / h | |
if 1.5 < aspect_ratio < 5.0: | |
plate_roi = gray[y:y+h, x:x+w] | |
if plate_roi.size > 0: | |
# Resize for better OCR | |
plate_roi = cv2.resize(plate_roi, None, fx=2, fy=2) | |
# Apply threshold to improve text extraction | |
_, plate_roi = cv2.threshold(plate_roi, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU) | |
# Extract text | |
plate_text = pytesseract.image_to_string(plate_roi, | |
config='--psm 7 --oem 3 -c tessedit_char_whitelist=ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789') | |
plate_text = ''.join(e for e in plate_text if e.isalnum()) | |
if len(plate_text) > 3: # Basic validation | |
return plate_text | |
except Exception as e: | |
print(f"Error extracting license plate: {e}") | |
return plate_text | |
def detect_helmet_with_circle_detection(image, head_region): | |
"""Detect helmet using circle detection""" | |
try: | |
# Extract head region | |
x, y, w, h = head_region | |
head = image[y:y+h, x:x+w] | |
# Convert to grayscale | |
gray = cv2.cvtColor(head, cv2.COLOR_BGR2GRAY) | |
# Apply GaussianBlur to reduce noise | |
gray = cv2.GaussianBlur(gray, (5, 5), 0) | |
# Detect circles | |
circles = cv2.HoughCircles( | |
gray, cv2.HOUGH_GRADIENT, dp=1, minDist=20, | |
param1=50, param2=30, minRadius=int(w*0.2), maxRadius=int(w*0.6) | |
) | |
# If circles detected, likely a helmet | |
return circles is not None and len(circles[0]) > 0 | |
except: | |
return False | |
def detect_helmet_with_color(image, head_region): | |
"""Detect helmet using color analysis""" | |
try: | |
# Extract head region | |
x, y, w, h = head_region | |
head = image[y:y+h, x:x+w] | |
# Convert to HSV | |
hsv = cv2.cvtColor(head, cv2.COLOR_BGR2HSV) | |
# Define color ranges for common helmet colors (black, white, red, blue) | |
# Black | |
lower_black = np.array([0, 0, 0]) | |
upper_black = np.array([180, 255, 50]) | |
black_mask = cv2.inRange(hsv, lower_black, upper_black) | |
# White | |
lower_white = np.array([0, 0, 200]) | |
upper_white = np.array([180, 30, 255]) | |
white_mask = cv2.inRange(hsv, lower_white, upper_white) | |
# Red (two ranges) | |
lower_red1 = np.array([0, 100, 100]) | |
upper_red1 = np.array([10, 255, 255]) | |
red_mask1 = cv2.inRange(hsv, lower_red1, upper_red1) | |
lower_red2 = np.array([160, 100, 100]) | |
upper_red2 = np.array([180, 255, 255]) | |
red_mask2 = cv2.inRange(hsv, lower_red2, upper_red2) | |
# Blue | |
lower_blue = np.array([100, 100, 100]) | |
upper_blue = np.array([140, 255, 255]) | |
blue_mask = cv2.inRange(hsv, lower_blue, upper_blue) | |
# Combine masks | |
combined_mask = black_mask + white_mask + red_mask1 + red_mask2 + blue_mask | |
# Calculate percentage of helmet colors | |
helmet_color_percentage = np.sum(combined_mask > 0) / (w * h) | |
# If more than 30% of the head region has helmet-like colors, likely a helmet | |
return helmet_color_percentage > 0.3 | |
except: | |
return False | |
def process_image(image): | |
if isinstance(image, Image.Image): | |
# Convert PIL image to OpenCV format | |
image = cv2.cvtColor(np.array(image), cv2.COLOR_RGB2BGR) | |
# Make a copy for annotation | |
annotated_img = image.copy() | |
# Run YOLO detection for motorcycle and person | |
results = detection_model(image)[0] | |
# Initialize variables | |
helmet_detected = False | |
person_detected = False | |
motorcycle_detected = False | |
plate_text = None | |
# Variables to store bounding boxes | |
person_boxes = [] | |
motorcycle_boxes = [] | |
# Check detections | |
for box in results.boxes: | |
cls = int(box.cls[0]) | |
conf = float(box.conf[0]) | |
if conf > 0.3: # Confidence threshold | |
x1, y1, x2, y2 = map(int, box.xyxy[0]) | |
# Standard YOLOv8 classes | |
if cls == 0: # Person | |
person_detected = True | |
person_boxes.append((x1, y1, x2, y2)) | |
# Get head region (top 30% of person bounding box) | |
head_x = x1 | |
head_y = y1 | |
head_w = x2 - x1 | |
head_h = int((y2 - y1) * 0.3) | |
head_region = (head_x, head_y, head_w, head_h) | |
# Draw head region for debugging | |
cv2.rectangle(annotated_img, (head_x, head_y), (head_x + head_w, head_y + head_h), (0, 255, 255), 2) | |
# Extract head image | |
head_img = image[head_y:head_y+head_h, head_x:head_x+head_w] | |
# Use multiple methods to detect helmet | |
if head_img.size > 0: | |
# Method 1: Check for faces - visible face might indicate no helmet | |
gray = cv2.cvtColor(head_img, cv2.COLOR_BGR2GRAY) | |
faces = face_cascade.detectMultiScale(gray, 1.3, 5) | |
if len(faces) > 0: | |
# Face detected, likely no helmet | |
helmet_detected = False | |
cv2.putText(annotated_img, "Face Detected", (head_x, head_y - 10), | |
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2) | |
else: | |
# No face detected, check other methods | |
# Method 2: Circle detection | |
helmet_detected = detect_helmet_with_circle_detection(image, head_region) | |
# Method 3: Color analysis | |
if not helmet_detected: | |
helmet_detected = detect_helmet_with_color(image, head_region) | |
elif cls == 3: # Motorcycle | |
motorcycle_detected = True | |
motorcycle_boxes.append((x1, y1, x2, y2)) | |
# Extract license plate from motorcycle region | |
roi = image[y1:y2, x1:x2] | |
if roi.size > 0: | |
moto_plate = extract_license_plate(image, roi) | |
if moto_plate: | |
plate_text = moto_plate | |
# If a person on a motorcycle is detected, but no license plate found yet, try on full image | |
if person_detected and motorcycle_detected and not plate_text: | |
plate_text = extract_license_plate(image) | |
# Add manual override option via sidebar | |
st.sidebar.markdown("### Detection Override") | |
if st.sidebar.checkbox("Override automatic detection"): | |
helmet_detected = st.sidebar.radio("Helmet Status:", [True, False], index=0 if helmet_detected else 1) | |
return helmet_detected, plate_text, cv2.cvtColor(annotated_img, cv2.COLOR_BGR2RGB) | |
# Streamlit UI | |
st.title("ποΈ Motorcycle Helmet Detection System") | |
st.write("Upload an image to detect helmet usage and license plate") | |
uploaded_file = st.file_uploader("Choose an image", type=['jpg', 'jpeg', 'png']) | |
if uploaded_file is not None: | |
image = Image.open(uploaded_file) | |
col1, col2 = st.columns(2) | |
with col1: | |
st.image(image, caption="Uploaded Image", use_column_width=True) | |
with st.spinner('Processing image...'): | |
helmet_detected, plate_text, processed_image = process_image(image) | |
with col2: | |
st.image(processed_image, caption="Processed Image", use_column_width=True) | |
# Display results with custom styling | |
if helmet_detected: | |
st.markdown(""" | |
<div style='padding: 20px; background-color: #d4edda; border-radius: 5px; margin: 10px 0;'> | |
<h3 style='color: #155724; margin: 0;'>β Helmet Detected!</h3> | |
</div> | |
""", unsafe_allow_html=True) | |
else: | |
st.markdown(""" | |
<div style='padding: 20px; background-color: #f8d7da; border-radius: 5px; margin: 10px 0;'> | |
<h3 style='color: #721c24; margin: 0;'>β No Helmet Detected - Violation!</h3> | |
</div> | |
""", unsafe_allow_html=True) | |
# Always show license plate if detected, but emphasize when violation occurs | |
if plate_text: | |
st.markdown(f""" | |
<div style='padding: 20px; background-color: #f8d7da; border-radius: 5px; margin: 10px 0;'> | |
<h3 style='color: #721c24; margin: 0;'>License Plate Number:</h3> | |
<p style='font-size: 24px; margin: 10px 0 0 0;'>{plate_text}</p> | |
</div> | |
""", unsafe_allow_html=True) | |
# Only show license plate in neutral box if compliant | |
if helmet_detected and plate_text: | |
st.markdown(f""" | |
<div style='padding: 20px; background-color: #e2e3e5; border-radius: 5px; margin: 10px 0;'> | |
<h3 style='color: #383d41; margin: 0;'>License Plate Number:</h3> | |
<p style='font-size: 24px; margin: 10px 0 0 0;'>{plate_text}</p> | |
</div> | |
""", unsafe_allow_html=True) | |