Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -256,13 +256,77 @@ class FightDetection:
|
|
256 |
except Exception as e:
|
257 |
raise ValueError(f"Error in fight_detection: {str(e)}")
|
258 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
259 |
# Unified processing function with status output
|
260 |
def process_video(feature, video):
|
261 |
detectors = {
|
262 |
"Crowd Detection": CrowdDetection,
|
263 |
"People Tracking": PeopleTracking,
|
264 |
"Fall Detection": FallDetection,
|
265 |
-
"Fight Detection": FightDetection
|
|
|
266 |
}
|
267 |
try:
|
268 |
detector = detectors[feature]()
|
@@ -276,7 +340,7 @@ def process_video(feature, video):
|
|
276 |
interface = gr.Interface(
|
277 |
fn=process_video,
|
278 |
inputs=[
|
279 |
-
gr.Dropdown(choices=["Crowd Detection", "People Tracking", "Fall Detection", "Fight Detection"], label="Select Feature"),
|
280 |
gr.Video(label="Upload Video")
|
281 |
],
|
282 |
outputs=[
|
|
|
256 |
except Exception as e:
|
257 |
raise ValueError(f"Error in fight_detection: {str(e)}")
|
258 |
|
259 |
+
class IntrusionDetection:
|
260 |
+
def __init__(self, model_path="yolov8n.pt", max_intrusion_time=300, iou_threshold=0.5, conf_threshold=0.5):
|
261 |
+
self.model_path = model_path
|
262 |
+
self.max_intrusion_time = max_intrusion_time
|
263 |
+
self.iou_threshold = iou_threshold
|
264 |
+
self.conf_threshold = conf_threshold
|
265 |
+
|
266 |
+
def detect_intrusion(self, video_path):
|
267 |
+
try:
|
268 |
+
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
269 |
+
if not os.path.exists(self.model_path):
|
270 |
+
model = YOLO("yolov8n.pt")
|
271 |
+
model.save(self.model_path)
|
272 |
+
else:
|
273 |
+
model = YOLO(self.model_path)
|
274 |
+
model.to(device)
|
275 |
+
|
276 |
+
cap = cv2.VideoCapture(video_path)
|
277 |
+
if not cap.isOpened():
|
278 |
+
raise ValueError(f"❌ Failed to open video: {video_path}")
|
279 |
+
|
280 |
+
fps = int(cap.get(cv2.CAP_PROP_FPS))
|
281 |
+
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
|
282 |
+
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
|
283 |
+
|
284 |
+
output_path = "output_intrusion.mp4"
|
285 |
+
fourcc = cv2.VideoWriter_fourcc(*"mp4v")
|
286 |
+
out = cv2.VideoWriter(output_path, fourcc, fps, (width, height))
|
287 |
+
if not out.isOpened():
|
288 |
+
cap.release()
|
289 |
+
raise ValueError(f"❌ Failed to initialize video writer")
|
290 |
+
|
291 |
+
frame_count = 0
|
292 |
+
|
293 |
+
while cap.isOpened():
|
294 |
+
ret, frame = cap.read()
|
295 |
+
if not ret:
|
296 |
+
break
|
297 |
+
frame_count += 1
|
298 |
+
|
299 |
+
results = model(frame)
|
300 |
+
for result in results:
|
301 |
+
boxes = result.boxes.xyxy.cpu().numpy()
|
302 |
+
classes = result.boxes.cls.cpu().numpy()
|
303 |
+
confidences = result.boxes.conf.cpu().numpy()
|
304 |
+
for box, cls, conf in zip(boxes, classes, confidences):
|
305 |
+
if int(cls) == 0 and conf > self.conf_threshold: # Person class with confidence filter
|
306 |
+
x1, y1, x2, y2 = map(int, box)
|
307 |
+
label = "Intruder"
|
308 |
+
color = (0, 0, 255)
|
309 |
+
cv2.rectangle(frame, (x1, y1), (x2, y2), color, 2)
|
310 |
+
cv2.putText(frame, label, (x1, y1 - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 2)
|
311 |
+
|
312 |
+
out.write(frame)
|
313 |
+
|
314 |
+
cap.release()
|
315 |
+
out.release()
|
316 |
+
if frame_count == 0 or not os.path.exists(output_path):
|
317 |
+
raise ValueError("❌ Processing failed: No frames processed or output not created")
|
318 |
+
return output_path
|
319 |
+
except Exception as e:
|
320 |
+
raise ValueError(f"Error in detect_intrusion: {str(e)}")
|
321 |
+
|
322 |
# Unified processing function with status output
|
323 |
def process_video(feature, video):
|
324 |
detectors = {
|
325 |
"Crowd Detection": CrowdDetection,
|
326 |
"People Tracking": PeopleTracking,
|
327 |
"Fall Detection": FallDetection,
|
328 |
+
"Fight Detection": FightDetection,
|
329 |
+
"Intrusion Detection" : IntrusionDetection
|
330 |
}
|
331 |
try:
|
332 |
detector = detectors[feature]()
|
|
|
340 |
interface = gr.Interface(
|
341 |
fn=process_video,
|
342 |
inputs=[
|
343 |
+
gr.Dropdown(choices=["Crowd Detection", "People Tracking", "Fall Detection", "Fight Detection", "Intrusion Detection"], label="Select Feature"),
|
344 |
gr.Video(label="Upload Video")
|
345 |
],
|
346 |
outputs=[
|