Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -321,107 +321,132 @@ class IntrusionDetection:
|
|
321 |
raise ValueError(f"Error in detect_intrusion: {str(e)}")
|
322 |
|
323 |
|
324 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
325 |
|
326 |
-
def __init__(self, model_path="yolov8n.pt", loitering_threshold=10, conf_threshold=0.5):
|
327 |
-
self.model_path = model_path
|
328 |
-
self.loitering_threshold = loitering_threshold
|
329 |
-
self.conf_threshold = conf_threshold
|
330 |
-
self.entry_time = {}
|
331 |
-
self.area = [(153, 850), (139, 535), (239, 497), (291, 857)]
|
332 |
@spaces.GPU
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
333 |
|
334 |
-
|
335 |
-
|
336 |
-
|
337 |
-
return model
|
338 |
|
339 |
-
|
340 |
-
|
341 |
-
return int((x1 + x2) / 2), int((y1 + y2) / 2)
|
342 |
|
343 |
-
|
344 |
-
|
345 |
-
|
346 |
-
|
347 |
-
self.entry_time[id]['duration'] += frame_duration
|
348 |
-
if self.entry_time[id]['duration'] > self.loitering_threshold:
|
349 |
-
self.entry_time[id]['loitering'] = True
|
350 |
|
351 |
-
|
352 |
-
try:
|
353 |
-
model = self.load_model()
|
354 |
-
cap = cv2.VideoCapture(video_path)
|
355 |
-
if not cap.isOpened():
|
356 |
-
raise ValueError(f"❌ Failed to open video: {video_path}")
|
357 |
|
358 |
-
|
359 |
-
|
360 |
-
|
361 |
-
|
362 |
-
out = cv2.VideoWriter(output_path, cv2.VideoWriter_fourcc(*"mp4v"), fps, (width, height))
|
363 |
-
frame_duration = 1 / fps
|
364 |
-
frame_count = 0
|
365 |
|
366 |
-
|
367 |
-
|
368 |
-
|
369 |
-
|
370 |
-
|
|
|
371 |
|
372 |
-
|
373 |
-
|
374 |
-
ids = results[0].boxes.id.cpu().tolist()
|
375 |
|
376 |
-
|
377 |
-
|
378 |
-
center = self.calculate_center(box)
|
379 |
-
if cv2.pointPolygonTest(np.array(self.area, np.int32), center, False) >= 0:
|
380 |
-
ids_in_area.append(id)
|
381 |
-
self.track_time(id, frame_duration)
|
382 |
-
|
383 |
-
for id in ids_in_area:
|
384 |
-
color = (0, 0, 255) if self.entry_time.get(id, {}).get('loitering', False) else (0, 255, 0)
|
385 |
-
cv2.putText(frame, f"ID {id}, Time: {self.entry_time[id]['duration']:.1f}s", (15, 30 + id * 30),
|
386 |
-
cv2.FONT_HERSHEY_SIMPLEX, 0.6, color, 2)
|
387 |
-
|
388 |
-
pts = np.array(self.area, np.int32).reshape((-1, 1, 2))
|
389 |
-
color = (0, 0, 255) if any(self.entry_time.get(id, {}).get('loitering', False) for id in ids_in_area) else (152, 251, 152)
|
390 |
-
cv2.polylines(frame, [pts], isClosed=True, color=color, thickness=3)
|
391 |
-
out.write(frame)
|
392 |
|
393 |
-
|
394 |
-
|
395 |
-
|
396 |
-
|
397 |
-
|
398 |
-
|
399 |
-
|
|
|
400 |
|
401 |
-
|
402 |
-
|
|
|
|
|
403 |
detectors = {
|
404 |
"Crowd Detection": CrowdDetection,
|
405 |
"People Tracking": PeopleTracking,
|
406 |
"Fall Detection": FallDetection,
|
407 |
"Fight Detection": FightDetection,
|
408 |
-
"Intrusion Detection"
|
409 |
-
"Loitering Detection"
|
410 |
}
|
|
|
411 |
try:
|
412 |
detector = detectors[feature]()
|
413 |
method_name = feature.lower().replace(" ", "_").replace("detection", "detect") # Ensures correct method name
|
414 |
-
|
|
|
|
|
|
|
|
|
|
|
415 |
return f"{feature} completed successfully", output_path
|
416 |
except Exception as e:
|
417 |
return f"Error: {str(e)}", None
|
418 |
|
419 |
-
# Gradio Interface with
|
420 |
interface = gr.Interface(
|
421 |
fn=process_video,
|
422 |
inputs=[
|
423 |
-
gr.Dropdown(choices=[
|
424 |
-
|
|
|
|
|
|
|
|
|
425 |
],
|
426 |
outputs=[
|
427 |
gr.Textbox(label="Status"),
|
@@ -432,4 +457,4 @@ interface = gr.Interface(
|
|
432 |
)
|
433 |
|
434 |
if __name__ == "__main__":
|
435 |
-
interface.launch(debug=True)
|
|
|
321 |
raise ValueError(f"Error in detect_intrusion: {str(e)}")
|
322 |
|
323 |
|
324 |
+
import cv2
|
325 |
+
import numpy as np
|
326 |
+
from ultralytics import YOLO
|
327 |
+
from shapely.geometry import Point, Polygon
|
328 |
+
import time
|
329 |
+
|
330 |
+
class LoiteringDetector:
|
331 |
+
def __init__(self, model_path='loitering_model.pt'):
|
332 |
+
self.model = YOLO(model_path)
|
333 |
|
|
|
|
|
|
|
|
|
|
|
|
|
334 |
@spaces.GPU
|
335 |
+
def loitering_detect(self, video_path, area):
|
336 |
+
# Create polygon zone
|
337 |
+
time_threshold = 7
|
338 |
+
detection_threshold = 0.6
|
339 |
+
zone_points = None
|
340 |
+
if area == '131':
|
341 |
+
zone_points = [(842//1.5, 514//1.7), (686//1.5, 290//1.7), (775//1.5, 279//1.7), (961//1.5, 488//1.7)]
|
342 |
+
elif area == '145':
|
343 |
+
zone_points = [(153, 850), (139, 535), (239, 497), (291, 857)]
|
344 |
+
zone = Polygon(zone_points)
|
345 |
+
|
346 |
+
# Open video
|
347 |
+
cap = cv2.VideoCapture(video_path)
|
348 |
+
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH) * 0.5)
|
349 |
+
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT) * 0.5)
|
350 |
+
fps = int(cap.get(cv2.CAP_PROP_FPS))
|
351 |
+
|
352 |
+
# Create video writer
|
353 |
+
output_path = os.path.join(tempfile.gettempdir(), "loitering_video.mp4")
|
354 |
+
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
|
355 |
+
out = cv2.VideoWriter(output_path, fourcc, fps, (width, height))
|
356 |
+
|
357 |
+
while cap.isOpened():
|
358 |
+
ret, frame = cap.read()
|
359 |
+
if not ret:
|
360 |
+
break
|
361 |
+
|
362 |
+
frame = cv2.resize(frame, (width, height))
|
363 |
+
# Perform object detection and tracking
|
364 |
+
results = self.model.track(frame, persist=True, classes=[0], conf=detection_threshold) # 0 is the class ID for person
|
365 |
+
|
366 |
+
# List to store time information for display
|
367 |
+
time_display = []
|
368 |
+
|
369 |
+
if results[0].boxes.id is not None:
|
370 |
+
boxes = results[0].boxes.xyxy.cpu().numpy().astype(int)
|
371 |
+
ids = results[0].boxes.id.cpu().numpy().astype(int)
|
372 |
|
373 |
+
for box, id in zip(boxes, ids):
|
374 |
+
x1, y1, x2, y2 = box
|
375 |
+
center = Point((x1 + x2) / 2, (y1 + y2) / 2)
|
|
|
376 |
|
377 |
+
if id not in person_info:
|
378 |
+
person_info[id] = {'in_zone': False, 'start_time': None, 'duration': 0}
|
|
|
379 |
|
380 |
+
if zone.contains(center):
|
381 |
+
if not person_info[id]['in_zone']:
|
382 |
+
person_info[id]['in_zone'] = True
|
383 |
+
person_info[id]['start_time'] = time.time()
|
|
|
|
|
|
|
384 |
|
385 |
+
person_info[id]['duration'] = time.time() - person_info[id]['start_time']
|
|
|
|
|
|
|
|
|
|
|
386 |
|
387 |
+
if person_info[id]['duration'] > time_threshold:
|
388 |
+
color = (0, 0, 255) # Red for loitering
|
389 |
+
else:
|
390 |
+
color = (0, 255, 0) # Green for in zone
|
|
|
|
|
|
|
391 |
|
392 |
+
time_display.append(f"ID: {id}, Time: {person_info[id]['duration']:.2f}s")
|
393 |
+
else:
|
394 |
+
person_info[id]['in_zone'] = False
|
395 |
+
person_info[id]['start_time'] = None
|
396 |
+
person_info[id]['duration'] = 0
|
397 |
+
color = (255, 0, 0) # Blue for outside zone
|
398 |
|
399 |
+
cv2.rectangle(frame, (x1, y1), (x2, y2), color, 2)
|
400 |
+
cv2.putText(frame, f"ID: {id}", (x1, y1 - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 2)
|
|
|
401 |
|
402 |
+
# Draw polygon zone
|
403 |
+
cv2.polylines(frame, [np.array(zone_points, np.int32)], True, (255, 255, 0), 2)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
404 |
|
405 |
+
# Display time information in top left
|
406 |
+
for i, text in enumerate(time_display):
|
407 |
+
cv2.putText(frame, text, (10, 30 + i * 30), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255, 255, 255), 2)
|
408 |
+
|
409 |
+
out.write(frame)
|
410 |
+
|
411 |
+
cap.release()
|
412 |
+
out.release()
|
413 |
|
414 |
+
return output_path
|
415 |
+
|
416 |
+
|
417 |
+
def process_video(feature, video, area=None):
|
418 |
detectors = {
|
419 |
"Crowd Detection": CrowdDetection,
|
420 |
"People Tracking": PeopleTracking,
|
421 |
"Fall Detection": FallDetection,
|
422 |
"Fight Detection": FightDetection,
|
423 |
+
"Intrusion Detection": IntrusionDetection,
|
424 |
+
"Loitering Detection": LoiteringDetection
|
425 |
}
|
426 |
+
|
427 |
try:
|
428 |
detector = detectors[feature]()
|
429 |
method_name = feature.lower().replace(" ", "_").replace("detection", "detect") # Ensures correct method name
|
430 |
+
|
431 |
+
if feature == "Loitering Detection":
|
432 |
+
output_path = detector.detect_loitering(video, area) # Pass area if required
|
433 |
+
else:
|
434 |
+
output_path = getattr(detector, method_name)(video)
|
435 |
+
|
436 |
return f"{feature} completed successfully", output_path
|
437 |
except Exception as e:
|
438 |
return f"Error: {str(e)}", None
|
439 |
|
440 |
+
# Gradio Interface with additional input for Loitering Detection
|
441 |
interface = gr.Interface(
|
442 |
fn=process_video,
|
443 |
inputs=[
|
444 |
+
gr.Dropdown(choices=[
|
445 |
+
"Crowd Detection", "People Tracking", "Fall Detection",
|
446 |
+
"Fight Detection", "Intrusion Detection", "Loitering Detection"
|
447 |
+
], label="Select Feature"),
|
448 |
+
gr.Video(label="Upload Video"),
|
449 |
+
gr.Textbox(label="Loitering Area (131 or 145)") # Can be replaced with a drawing tool
|
450 |
],
|
451 |
outputs=[
|
452 |
gr.Textbox(label="Status"),
|
|
|
457 |
)
|
458 |
|
459 |
if __name__ == "__main__":
|
460 |
+
interface.launch(debug=True)
|