SecurityDemo / app.py
mkhodary101's picture
Update app.py
82ed089 verified
raw
history blame
5.84 kB
import gradio as gr
import torch
import cv2
import numpy as np
import time
from ultralytics import YOLO
import spaces
@spaces.GPU
# Define People Tracking
class PeopleTracking:
def __init__(self, yolo_model_path="yolov8n.pt"):
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
self.model = YOLO(yolo_model_path).to(self.device)
def track_people(self, video_path):
cap = cv2.VideoCapture(video_path)
output_path = "output_tracking.mp4"
fourcc = cv2.VideoWriter_fourcc(*"mp4v")
out = cv2.VideoWriter(output_path, fourcc, int(cap.get(cv2.CAP_PROP_FPS)),
(int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)), int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))))
while cap.isOpened():
ret, frame = cap.read()
if not ret:
break
results = self.model.track(frame, persist=True)
for result in results:
boxes = result.boxes.xyxy.cpu().numpy()
classes = result.boxes.cls.cpu().numpy()
ids = result.boxes.id.cpu().numpy() if hasattr(result.boxes, "id") else np.arange(len(boxes))
for box, cls, obj_id in zip(boxes, classes, ids):
if int(cls) == 0:
x1, y1, x2, y2 = map(int, box)
cv2.rectangle(frame, (x1, y1), (x2, y2), (255, 0, 0), 2)
cv2.putText(frame, f"ID {int(obj_id)}", (x1, y1 - 10),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 0, 0), 2)
out.write(frame)
cap.release()
out.release()
return output_path
# Define Fall Detection
class FallDetection:
def __init__(self, yolo_model_path="yolov8l.pt"):
self.model = YOLO(yolo_model_path)
def detect_fall(self, video_path):
cap = cv2.VideoCapture(video_path)
output_path = "output_fall.mp4"
fourcc = cv2.VideoWriter_fourcc(*"mp4v")
out = cv2.VideoWriter(output_path, fourcc, int(cap.get(cv2.CAP_PROP_FPS)),
(int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)), int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))))
while cap.isOpened():
ret, frame = cap.read()
if not ret:
break
results = self.model(frame)
for result in results:
boxes = result.boxes.xyxy.cpu().numpy()
classes = result.boxes.cls.cpu().numpy()
for box, cls in zip(boxes, classes):
if int(cls) == 0:
x1, y1, x2, y2 = map(int, box)
width = x2 - x1
height = y2 - y1
aspect_ratio = width / height
if aspect_ratio > 0.55:
color = (0, 0, 255)
label = "FALL DETECTED"
else:
color = (0, 255, 0)
label = "Standing"
cv2.rectangle(frame, (x1, y1), (x2, y2), color, 2)
cv2.putText(frame, label, (x1, y1 - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 2)
out.write(frame)
cap.release()
out.release()
return output_path
# Define Fight Detection
class FightDetection:
def __init__(self, yolo_model_path="yolov8n-pose.pt"):
self.model = YOLO(yolo_model_path).to(torch.device("cuda" if torch.cuda.is_available() else "cpu"))
def detect_fight(self, video_path):
cap = cv2.VideoCapture(video_path)
output_path = "output_fight.mp4"
fourcc = cv2.VideoWriter_fourcc(*"mp4v")
out = cv2.VideoWriter(output_path, fourcc, int(cap.get(cv2.CAP_PROP_FPS)),
(int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)), int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))))
while cap.isOpened():
ret, frame = cap.read()
if not ret:
break
results = self.model.track(frame, persist=True)
for result in results:
keypoints = result.keypoints.xy.cpu().numpy() if result.keypoints else []
classes = result.boxes.cls.cpu().numpy() if result.boxes else []
for kp, cls in zip(keypoints, classes):
if int(cls) == 0:
x1, y1 = int(kp[0][0]), int(kp[0][1])
x2, y2 = int(kp[-1][0]), int(kp[-1][1])
cv2.rectangle(frame, (x1, y1), (x2, y2), (0, 0, 255), 2)
cv2.putText(frame, "FIGHT DETECTED", (x1, y1 - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 255), 2)
out.write(frame)
cap.release()
out.release()
return output_path
# Function to process video based on selected feature
def process_video(feature, video):
detectors = {
"People Tracking": PeopleTracking,
"Fall Detection": FallDetection,
"Fight Detection": FightDetection
}
detector = detectors[feature]()
method_name = f"detect_{feature.lower().replace(' ', '_')}"
return getattr(detector, method_name)(video)
# Gradio Interface
interface = gr.Interface(
fn=process_video,
inputs=[
gr.Dropdown(choices=["People Tracking", "Fall Detection", "Fight Detection"], label="Select Feature"),
gr.Video(label="Upload Video")
],
outputs=gr.Video(label="Processed Video"),
title="YOLOv8 Multitask Video Processing"
)
if __name__ == "__main__":
interface.launch()