mkhodary101 commited on
Commit
f97e575
·
verified ·
1 Parent(s): 6bc6c86

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +80 -3
app.py CHANGED
@@ -262,7 +262,7 @@ class IntrusionDetection:
262
  self.max_intrusion_time = max_intrusion_time
263
  self.iou_threshold = iou_threshold
264
  self.conf_threshold = conf_threshold
265
- @spaces.GPU
266
 
267
  def detect_intrusion(self, video_path):
268
  try:
@@ -320,6 +320,82 @@ class IntrusionDetection:
320
  except Exception as e:
321
  raise ValueError(f"Error in detect_intrusion: {str(e)}")
322
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
323
  # Unified processing function with status output
324
  def process_video(feature, video):
325
  detectors = {
@@ -327,7 +403,8 @@ def process_video(feature, video):
327
  "People Tracking": PeopleTracking,
328
  "Fall Detection": FallDetection,
329
  "Fight Detection": FightDetection,
330
- "Intrusion Detection" : IntrusionDetection
 
331
  }
332
  try:
333
  detector = detectors[feature]()
@@ -341,7 +418,7 @@ def process_video(feature, video):
341
  interface = gr.Interface(
342
  fn=process_video,
343
  inputs=[
344
- gr.Dropdown(choices=["Crowd Detection", "People Tracking", "Fall Detection", "Fight Detection", "Intrusion Detection"], label="Select Feature"),
345
  gr.Video(label="Upload Video")
346
  ],
347
  outputs=[
 
262
  self.max_intrusion_time = max_intrusion_time
263
  self.iou_threshold = iou_threshold
264
  self.conf_threshold = conf_threshold
265
+ @spaces.GPU
266
 
267
  def detect_intrusion(self, video_path):
268
  try:
 
320
  except Exception as e:
321
  raise ValueError(f"Error in detect_intrusion: {str(e)}")
322
 
323
+
324
+ class LoiteringDetection:
325
+ def __init__(self, model_path="yolov8n.pt", loitering_threshold=10, conf_threshold=0.5):
326
+ self.model_path = model_path
327
+ self.loitering_threshold = loitering_threshold
328
+ self.conf_threshold = conf_threshold
329
+ self.entry_time = {}
330
+ self.area = [(153, 850), (139, 535), (239, 497), (291, 857)]
331
+ @spaces.GPU
332
+ def load_model(self):
333
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
334
+ model = YOLO(self.model_path).to(device)
335
+ return model
336
+
337
+ def calculate_center(self, box):
338
+ x1, y1, x2, y2 = box
339
+ return int((x1 + x2) / 2), int((y1 + y2) / 2)
340
+
341
+ def track_time(self, id, frame_duration):
342
+ if id not in self.entry_time:
343
+ self.entry_time[id] = {'duration': 0, 'loitering': False}
344
+ else:
345
+ self.entry_time[id]['duration'] += frame_duration
346
+ if self.entry_time[id]['duration'] > self.loitering_threshold:
347
+ self.entry_time[id]['loitering'] = True
348
+
349
+ def detect_loitering(self, video_path):
350
+ try:
351
+ model = self.load_model()
352
+ cap = cv2.VideoCapture(video_path)
353
+ if not cap.isOpened():
354
+ raise ValueError(f"❌ Failed to open video: {video_path}")
355
+
356
+ fps = int(cap.get(cv2.CAP_PROP_FPS)) or 30
357
+ width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
358
+ height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
359
+ output_path = "output_loitering.mp4"
360
+ out = cv2.VideoWriter(output_path, cv2.VideoWriter_fourcc(*"mp4v"), fps, (width, height))
361
+ frame_duration = 1 / fps
362
+ frame_count = 0
363
+
364
+ while cap.isOpened():
365
+ ret, frame = cap.read()
366
+ if not ret:
367
+ break
368
+ frame_count += 1
369
+
370
+ results = model.track(frame, conf=self.conf_threshold, iou=0.1, classes=[0], persist=True)
371
+ boxes = results[0].boxes.xyxy.cpu().tolist()
372
+ ids = results[0].boxes.id.cpu().tolist()
373
+
374
+ ids_in_area = []
375
+ for box, id in zip(boxes, ids):
376
+ center = self.calculate_center(box)
377
+ if cv2.pointPolygonTest(np.array(self.area, np.int32), center, False) >= 0:
378
+ ids_in_area.append(id)
379
+ self.track_time(id, frame_duration)
380
+
381
+ for id in ids_in_area:
382
+ color = (0, 0, 255) if self.entry_time.get(id, {}).get('loitering', False) else (0, 255, 0)
383
+ cv2.putText(frame, f"ID {id}, Time: {self.entry_time[id]['duration']:.1f}s", (15, 30 + id * 30),
384
+ cv2.FONT_HERSHEY_SIMPLEX, 0.6, color, 2)
385
+
386
+ pts = np.array(self.area, np.int32).reshape((-1, 1, 2))
387
+ color = (0, 0, 255) if any(self.entry_time.get(id, {}).get('loitering', False) for id in ids_in_area) else (152, 251, 152)
388
+ cv2.polylines(frame, [pts], isClosed=True, color=color, thickness=3)
389
+ out.write(frame)
390
+
391
+ cap.release()
392
+ out.release()
393
+ if frame_count == 0 or not os.path.exists(output_path):
394
+ raise ValueError("❌ Processing failed: No frames processed or output not created")
395
+ return output_path
396
+ except Exception as e:
397
+ raise ValueError(f"Error in detect_loitering: {str(e)}")
398
+
399
  # Unified processing function with status output
400
  def process_video(feature, video):
401
  detectors = {
 
403
  "People Tracking": PeopleTracking,
404
  "Fall Detection": FallDetection,
405
  "Fight Detection": FightDetection,
406
+ "Intrusion Detection" : IntrusionDetection,
407
+ "Loitering Detection" : LoiteringDetection
408
  }
409
  try:
410
  detector = detectors[feature]()
 
418
  interface = gr.Interface(
419
  fn=process_video,
420
  inputs=[
421
+ gr.Dropdown(choices=["Crowd Detection", "People Tracking", "Fall Detection", "Fight Detection", "Intrusion Detection", "Loitering Detection"], label="Select Feature"),
422
  gr.Video(label="Upload Video")
423
  ],
424
  outputs=[