SecurityDemo / app.py
mkhodary101's picture
Update app.py
aca5f43 verified
raw
history blame
10.9 kB
import gradio as gr
import torch
import cv2
import numpy as np
import time
from ultralytics import YOLO
import spaces
import os
class CrowdDetection:
def __init__(self, model_path="yolov8n.pt"):
"""Initialize the YOLO model once."""
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
if not os.path.exists(model_path):
self.model = YOLO("yolov8n.pt") # Downloads if not present
self.model.save(model_path)
else:
self.model = YOLO(model_path)
self.model.to(self.device)
@spaces.GPU
def detect_crowd(self, video_path):
"""Process video for crowd detection."""
cap = cv2.VideoCapture(video_path)
if not cap.isOpened():
raise ValueError(f"❌ Failed to open video: {video_path}")
fps = int(cap.get(cv2.CAP_PROP_FPS))
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
output_path = "output_crowd.mp4"
fourcc = cv2.VideoWriter_fourcc(*"mp4v")
out = cv2.VideoWriter(output_path, fourcc, fps, (width, height))
if not out.isOpened():
cap.release()
raise ValueError(f"❌ Failed to initialize video writer")
CROWD_THRESHOLD = 10
frame_count = 0
while cap.isOpened():
ret, frame = cap.read()
if not ret:
break
frame_count += 1
results = self.model(frame)
person_count = sum(1 for result in results for cls in result.boxes.cls.cpu().numpy() if int(cls) == 0)
for result in results:
boxes = result.boxes.xyxy.cpu().numpy()
classes = result.boxes.cls.cpu().numpy()
for box, cls in zip(boxes, classes):
if int(cls) == 0: # Person class
x1, y1, x2, y2 = map(int, box)
cv2.rectangle(frame, (x1, y1), (x2, y2), (0, 255, 0), 2)
cv2.putText(frame, "Person", (x1, y1 - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
alert_text = "Crowd Alert!" if person_count > CROWD_THRESHOLD else f"People: {person_count}"
cv2.putText(frame, alert_text, (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 1,
(0, 0, 255) if person_count > CROWD_THRESHOLD else (0, 255, 0), 2)
out.write(frame)
cap.release()
out.release()
if frame_count == 0 or not os.path.exists(output_path):
raise ValueError("❌ Processing failed")
return output_path
class PeopleTracking:
def __init__(self, yolo_model_path="yolov8n.pt"):
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
if not os.path.exists(yolo_model_path):
self.model = YOLO("yolov8n.pt")
self.model.save(yolo_model_path)
else:
self.model = YOLO(yolo_model_path)
self.model.to(self.device)
@spaces.GPU
def track_people(self, video_path):
cap = cv2.VideoCapture(video_path)
if not cap.isOpened():
raise ValueError(f"❌ Failed to open video: {video_path}")
fps = int(cap.get(cv2.CAP_PROP_FPS))
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
output_path = "output_tracking.mp4"
out = cv2.VideoWriter(output_path, cv2.VideoWriter_fourcc(*"mp4v"), fps, (width, height))
if not out.isOpened():
cap.release()
raise ValueError(f"❌ Failed to initialize video writer")
while cap.isOpened():
ret, frame = cap.read()
if not ret:
break
results = self.model.track(frame, persist=True)
for result in results:
boxes = result.boxes.xyxy.cpu().numpy()
classes = result.boxes.cls.cpu().numpy()
ids = result.boxes.id.cpu().numpy() if result.boxes.id is not None else np.arange(len(boxes))
for box, cls, obj_id in zip(boxes, classes, ids):
if int(cls) == 0:
x1, y1, x2, y2 = map(int, box)
cv2.rectangle(frame, (x1, y1), (x2, y2), (255, 0, 0), 2)
cv2.putText(frame, f"ID {int(obj_id)}", (x1, y1 - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 0, 0), 2)
out.write(frame)
cap.release()
out.release()
if not os.path.exists(output_path):
raise ValueError("❌ Processing failed")
return output_path
class FallDetection:
def __init__(self, yolo_model_path="yolov8l.pt"):
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
if not os.path.exists(yolo_model_path):
self.model = YOLO("yolov8l.pt")
self.model.save(yolo_model_path)
else:
self.model = YOLO(yolo_model_path)
self.model.to(self.device)
@spaces.GPU
def detect_fall(self, video_path):
cap = cv2.VideoCapture(video_path)
if not cap.isOpened():
raise ValueError(f"❌ Failed to open video: {video_path}")
fps = int(cap.get(cv2.CAP_PROP_FPS))
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
output_path = "output_fall.mp4"
out = cv2.VideoWriter(output_path, cv2.VideoWriter_fourcc(*"mp4v"), fps, (width, height))
if not out.isOpened():
cap.release()
raise ValueError(f"❌ Failed to initialize video writer")
while cap.isOpened():
ret, frame = cap.read()
if not ret:
break
results = self.model(frame)
for result in results:
boxes = result.boxes.xyxy.cpu().numpy()
classes = result.boxes.cls.cpu().numpy()
for box, cls in zip(boxes, classes):
if int(cls) == 0:
x1, y1, x2, y2 = map(int, box)
width = x2 - x1
height = y2 - y1
aspect_ratio = width / height if height > 0 else float('inf')
if aspect_ratio > 0.55: # Person lying down
color = (0, 0, 255)
label = "FALL DETECTED"
else:
color = (0, 255, 0)
label = "Standing"
cv2.rectangle(frame, (x1, y1), (x2, y2), color, 2)
cv2.putText(frame, label, (x1, y1 - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 2)
out.write(frame)
cap.release()
out.release()
if not os.path.exists(output_path):
raise ValueError("❌ Processing failed")
return output_path
class FightDetection:
def __init__(self, yolo_model_path="yolov8n-pose.pt"):
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
if not os.path.exists(yolo_model_path):
self.model = YOLO("yolov8n-pose.pt")
self.model.save(yolo_model_path)
else:
self.model = YOLO(yolo_model_path)
self.model.to(self.device)
@spaces.GPU
def detect_fight(self, video_path):
cap = cv2.VideoCapture(video_path)
if not cap.isOpened():
raise ValueError(f"❌ Failed to open video: {video_path}")
fps = int(cap.get(cv2.CAP_PROP_FPS))
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
output_path = "output_fight.mp4"
out = cv2.VideoWriter(output_path, cv2.VideoWriter_fourcc(*"mp4v"), fps, (width, height))
if not out.isOpened():
cap.release()
raise ValueError(f"❌ Failed to initialize video writer")
while cap.isOpened():
ret, frame = cap.read()
if not ret:
break
results = self.model.track(frame, persist=True)
fight_detected = False
person_count = 0
for result in results:
keypoints = result.keypoints.xy.cpu().numpy() if result.keypoints else []
boxes = result.boxes.xyxy.cpu().numpy() if result.boxes else []
classes = result.boxes.cls.cpu().numpy() if result.boxes else []
for box, kp, cls in zip(boxes, keypoints, classes):
if int(cls) == 0:
person_count += 1
x1, y1, x2, y2 = map(int, box)
# Simple fight detection: check if arms (keypoints 5, 7) are raised high
if len(kp) > 7 and (kp[5][1] < y1 + (y2 - y1) * 0.3 or kp[7][1] < y1 + (y2 - y1) * 0.3):
fight_detected = True
cv2.rectangle(frame, (x1, y1), (x2, y2), (0, 0, 255) if fight_detected else (0, 255, 0), 2)
label = "FIGHT DETECTED" if fight_detected else "Person"
cv2.putText(frame, label, (x1, y1 - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5,
(0, 0, 255) if fight_detected else (0, 255, 0), 2)
if fight_detected and person_count > 1:
cv2.putText(frame, "FIGHT ALERT!", (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)
out.write(frame)
cap.release()
out.release()
if not os.path.exists(output_path):
raise ValueError("❌ Processing failed")
return output_path
# Unified processing function
def process_video(feature, video):
detectors = {
"Crowd Detection": CrowdDetection,
"People Tracking": PeopleTracking,
"Fall Detection": FallDetection,
"Fight Detection": FightDetection
}
try:
detector = detectors[feature]()
method_name = feature.lower().replace(" ", "_") # Match method names exactly
output_path = getattr(detector, method_name)(video)
return output_path
except Exception as e:
raise ValueError(f"Error processing video: {str(e)}")
# Gradio Interface
interface = gr.Interface(
fn=process_video,
inputs=[
gr.Dropdown(choices=["Crowd Detection", "People Tracking", "Fall Detection", "Fight Detection"], label="Select Feature"),
gr.Video(label="Upload Video")
],
outputs=gr.Video(label="Processed Video"),
title="YOLOv8 Multitask Video Processing",
description="Select a feature to process your video: Crowd Detection, People Tracking, Fall Detection, or Fight Detection."
)
if __name__ == "__main__":
interface.launch(debug=True)