Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -324,6 +324,95 @@ class IntrusionDetection:
|
|
324 |
except Exception as e:
|
325 |
raise ValueError(f"Error in detect_intrusion: {str(e)}")
|
326 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
327 |
|
328 |
import cv2
|
329 |
import numpy as np
|
@@ -481,6 +570,7 @@ def process_video(feature, video, area=None):
|
|
481 |
"Fall Detection": FallDetection,
|
482 |
"Fight Detection": FightDetection,
|
483 |
"Intrusion Detection": IntrusionDetection,
|
|
|
484 |
"Loitering Detection": LoiteringDetection,
|
485 |
"Fire And Smoke Detection": FireAndSmokeDetection
|
486 |
}
|
@@ -504,7 +594,7 @@ interface = gr.Interface(
|
|
504 |
inputs=[
|
505 |
gr.Dropdown(choices=[
|
506 |
"Crowd Detection", "Fall Detection",
|
507 |
-
"Fight Detection", "Intrusion Detection", "Loitering Detection",
|
508 |
"Fire And Smoke Detection"
|
509 |
], label="Select Feature"),
|
510 |
gr.Video(label="Upload Video"),
|
|
|
324 |
except Exception as e:
|
325 |
raise ValueError(f"Error in detect_intrusion: {str(e)}")
|
326 |
|
327 |
+
class IntrusionDetectionEn:
|
328 |
+
def __init__(self, model_path="yolov8n.pt", max_intrusion_time=300, iou_threshold=0.5, conf_threshold=0.7):
|
329 |
+
self.model_path = model_path
|
330 |
+
self.max_intrusion_time = max_intrusion_time
|
331 |
+
self.iou_threshold = iou_threshold
|
332 |
+
self.conf_threshold = conf_threshold
|
333 |
+
|
334 |
+
# Predefined staff uniform colors (RGB format)
|
335 |
+
self.staff_colors = [
|
336 |
+
(139, 143, 133), # Grayish tone
|
337 |
+
(146, 150, 140), # Light grayish tone
|
338 |
+
(146, 152, 141), # Muted gray-green
|
339 |
+
(143, 147, 136), # Gray-green
|
340 |
+
(48, 59, 71) # Dark blue/gray
|
341 |
+
]
|
342 |
+
|
343 |
+
def is_staff(self, person_crop):
|
344 |
+
"""Checks if the detected person is a staff member based on clothing color."""
|
345 |
+
avg_color = np.mean(person_crop, axis=(0, 1)) # Compute average color (BGR)
|
346 |
+
avg_color = avg_color[::-1] # Convert BGR to RGB
|
347 |
+
|
348 |
+
# Compute Euclidean distance to known staff colors
|
349 |
+
for color in self.staff_colors:
|
350 |
+
dist = np.linalg.norm(np.array(avg_color) - np.array(color))
|
351 |
+
if dist < 30: # Threshold to consider it a match
|
352 |
+
return True
|
353 |
+
return False
|
354 |
+
|
355 |
+
def intrusion_detect_en(self, video_path):
|
356 |
+
try:
|
357 |
+
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
358 |
+
if not os.path.exists(self.model_path):
|
359 |
+
model = YOLO("yolov8n.pt")
|
360 |
+
model.save(self.model_path)
|
361 |
+
else:
|
362 |
+
model = YOLO(self.model_path)
|
363 |
+
model.to(device)
|
364 |
+
|
365 |
+
cap = cv2.VideoCapture(video_path)
|
366 |
+
if not cap.isOpened():
|
367 |
+
raise ValueError(f"❌ Failed to open video: {video_path}")
|
368 |
+
|
369 |
+
fps = int(cap.get(cv2.CAP_PROP_FPS))
|
370 |
+
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
|
371 |
+
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
|
372 |
+
|
373 |
+
output_path = "output_intrusion.mp4"
|
374 |
+
fourcc = cv2.VideoWriter_fourcc(*"mp4v")
|
375 |
+
out = cv2.VideoWriter(output_path, fourcc, fps, (width, height))
|
376 |
+
if not out.isOpened():
|
377 |
+
cap.release()
|
378 |
+
raise ValueError(f"❌ Failed to initialize video writer")
|
379 |
+
|
380 |
+
frame_count = 0
|
381 |
+
|
382 |
+
while cap.isOpened():
|
383 |
+
ret, frame = cap.read()
|
384 |
+
if not ret:
|
385 |
+
break
|
386 |
+
frame_count += 1
|
387 |
+
|
388 |
+
results = model(frame)
|
389 |
+
for result in results:
|
390 |
+
boxes = result.boxes.xyxy.cpu().numpy()
|
391 |
+
classes = result.boxes.cls.cpu().numpy()
|
392 |
+
confidences = result.boxes.conf.cpu().numpy()
|
393 |
+
|
394 |
+
for box, cls, conf in zip(boxes, classes, confidences):
|
395 |
+
if int(cls) == 0 and conf > self.conf_threshold: # Person class
|
396 |
+
x1, y1, x2, y2 = map(int, box)
|
397 |
+
person_crop = frame[y1:y2, x1:x2]
|
398 |
+
|
399 |
+
if self.is_staff(person_crop):
|
400 |
+
continue # Ignore staff members
|
401 |
+
|
402 |
+
label = "Intruder"
|
403 |
+
color = (0, 0, 255)
|
404 |
+
cv2.rectangle(frame, (x1, y1), (x2, y2), color, 2)
|
405 |
+
cv2.putText(frame, label, (x1, y1 - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 2)
|
406 |
+
|
407 |
+
out.write(frame)
|
408 |
+
|
409 |
+
cap.release()
|
410 |
+
out.release()
|
411 |
+
if frame_count == 0 or not os.path.exists(output_path):
|
412 |
+
raise ValueError("❌ Processing failed: No frames processed or output not created")
|
413 |
+
return output_path
|
414 |
+
except Exception as e:
|
415 |
+
raise ValueError(f"Error in detect_intrusion: {str(e)}")
|
416 |
|
417 |
import cv2
|
418 |
import numpy as np
|
|
|
570 |
"Fall Detection": FallDetection,
|
571 |
"Fight Detection": FightDetection,
|
572 |
"Intrusion Detection": IntrusionDetection,
|
573 |
+
"Intrusion Detection En" : IntrusionDetectionEn,
|
574 |
"Loitering Detection": LoiteringDetection,
|
575 |
"Fire And Smoke Detection": FireAndSmokeDetection
|
576 |
}
|
|
|
594 |
inputs=[
|
595 |
gr.Dropdown(choices=[
|
596 |
"Crowd Detection", "Fall Detection",
|
597 |
+
"Fight Detection", "Intrusion Detection", "Intrusion Detection En", "Loitering Detection",
|
598 |
"Fire And Smoke Detection"
|
599 |
], label="Select Feature"),
|
600 |
gr.Video(label="Upload Video"),
|