mkhodary101 commited on
Commit
0348375
·
verified ·
1 Parent(s): 16b5fca

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +78 -16
app.py CHANGED
@@ -215,72 +215,134 @@ class FallDetection:
215
  raise ValueError(f"Error in fall_detection: {str(e)}")
216
 
217
 
 
 
 
 
 
 
 
218
  class FightDetection:
219
  def __init__(self, yolo_model_path="yolov8n-pose.pt"):
220
  self.model_path = yolo_model_path
 
221
 
222
  @spaces.GPU
223
  def fight_detect(self, video_path):
224
  try:
225
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
 
 
226
  if not os.path.exists(self.model_path):
227
  model = YOLO("yolov8n-pose.pt")
228
  model.save(self.model_path)
229
  else:
230
  model = YOLO(self.model_path)
 
231
  model.to(device)
232
 
233
  cap = cv2.VideoCapture(video_path)
234
  if not cap.isOpened():
235
  raise ValueError(f"❌ Failed to open video: {video_path}")
236
 
237
- fps = int(cap.get(cv2.CAP_PROP_FPS))
238
  width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH) * 0.5)
239
  height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT) * 0.5)
240
  output_path = "output_fight.mp4"
 
241
  out = cv2.VideoWriter(output_path, cv2.VideoWriter_fourcc(*"mp4v"), fps, (width, height))
242
  if not out.isOpened():
243
  cap.release()
244
  raise ValueError(f"❌ Failed to initialize video writer")
245
 
 
 
 
 
 
 
 
246
  while cap.isOpened():
247
  ret, frame = cap.read()
248
  if not ret:
249
- break
250
-
 
 
 
 
251
  frame = cv2.resize(frame, (width, height))
252
  results = model.track(frame, persist=True)
253
- fight_detected = False
254
- person_count = 0
 
255
 
256
  for result in results:
257
  keypoints = result.keypoints.xy.cpu().numpy() if result.keypoints else []
258
- boxes = result.boxes.xyxy.cpu().numpy() if result.boxes else []
259
  classes = result.boxes.cls.cpu().numpy() if result.boxes else []
 
260
 
261
- for box, kp, cls in zip(boxes, keypoints, classes):
262
- if int(cls) == 0:
263
- person_count += 1
264
- x1, y1, x2, y2 = map(int, box)
265
- if len(kp) > 7 and (kp[5][1] < y1 + (y2 - y1) * 0.3 or kp[7][1] < y1 + (y2 - y1) * 0.3):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
266
  fight_detected = True
267
- cv2.rectangle(frame, (x1, y1), (x2, y2), (0, 0, 255) if fight_detected else (0, 255, 0), 2)
268
- label = "FIGHT DETECTED" if fight_detected else "Person"
269
- cv2.putText(frame, label, (x1, y1 - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5,
270
- (0, 0, 255) if fight_detected else (0, 255, 0), 2)
 
271
 
272
- if fight_detected and person_count > 1:
273
  cv2.putText(frame, "FIGHT ALERT!", (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)
 
274
  out.write(frame)
275
 
276
  cap.release()
277
  out.release()
 
278
  if not os.path.exists(output_path):
279
  raise ValueError("❌ Processing failed")
 
280
  return output_path
 
281
  except Exception as e:
282
  raise ValueError(f"Error in fight_detection: {str(e)}")
283
 
 
284
  class IntrusionDetection:
285
  def __init__(self, model_path="yolov8n.pt", max_intrusion_time=300, iou_threshold=0.5, conf_threshold=0.5):
286
  self.model_path = model_path
 
215
  raise ValueError(f"Error in fall_detection: {str(e)}")
216
 
217
 
218
+ import os
219
+ import cv2
220
+ import time
221
+ import torch
222
+ import numpy as np
223
+ from ultralytics import YOLO
224
+
225
  class FightDetection:
226
  def __init__(self, yolo_model_path="yolov8n-pose.pt"):
227
  self.model_path = yolo_model_path
228
+
229
 
230
  @spaces.GPU
231
  def fight_detect(self, video_path):
232
  try:
233
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
234
+
235
+ # Load YOLO Pose model
236
  if not os.path.exists(self.model_path):
237
  model = YOLO("yolov8n-pose.pt")
238
  model.save(self.model_path)
239
  else:
240
  model = YOLO(self.model_path)
241
+
242
  model.to(device)
243
 
244
  cap = cv2.VideoCapture(video_path)
245
  if not cap.isOpened():
246
  raise ValueError(f"❌ Failed to open video: {video_path}")
247
 
248
+ fps = int(cap.get(cv2.CAP_PROP_FPS)) // 2 # Slow down FPS for better tracking
249
  width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH) * 0.5)
250
  height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT) * 0.5)
251
  output_path = "output_fight.mp4"
252
+
253
  out = cv2.VideoWriter(output_path, cv2.VideoWriter_fourcc(*"mp4v"), fps, (width, height))
254
  if not out.isOpened():
255
  cap.release()
256
  raise ValueError(f"❌ Failed to initialize video writer")
257
 
258
+ # Fight detection parameters
259
+ FIGHT_THRESHOLD = 2.0
260
+ PROXIMITY_THRESHOLD = 100
261
+ frame_skip = 2
262
+ frame_count = 0
263
+ person_movements = {}
264
+
265
  while cap.isOpened():
266
  ret, frame = cap.read()
267
  if not ret:
268
+ break # End of video
269
+
270
+ frame_count += 1
271
+ if frame_count % frame_skip != 0:
272
+ continue # Skip frames for performance
273
+
274
  frame = cv2.resize(frame, (width, height))
275
  results = model.track(frame, persist=True)
276
+
277
+ current_time = time.time()
278
+ persons = []
279
 
280
  for result in results:
281
  keypoints = result.keypoints.xy.cpu().numpy() if result.keypoints else []
 
282
  classes = result.boxes.cls.cpu().numpy() if result.boxes else []
283
+ ids = result.boxes.id.cpu().numpy() if result.boxes.id is not None else []
284
 
285
+ for i, (kp, cls) in enumerate(zip(keypoints, classes)):
286
+ if int(cls) == 0: # Person class
287
+ person_id = int(ids[i]) if len(ids) > i else f"{int(kp[0][0])}-{int(kp[0][1])}"
288
+ persons.append((person_id, kp))
289
+
290
+ if person_id not in person_movements:
291
+ person_movements[person_id] = []
292
+
293
+ person_movements[person_id].append((current_time, kp))
294
+
295
+ # Draw keypoints
296
+ for point in kp:
297
+ x, y = int(point[0]), int(point[1])
298
+ cv2.circle(frame, (x, y), 5, (255, 255, 0), -1)
299
+
300
+ # Check for fights
301
+ fight_detected = False
302
+ for i in range(len(persons)):
303
+ for j in range(i + 1, len(persons)):
304
+ person1, kp1 = persons[i]
305
+ person2, kp2 = persons[j]
306
+
307
+ distance = np.linalg.norm(kp1[0] - kp2[0])
308
+ if distance > PROXIMITY_THRESHOLD:
309
+ continue # Ignore if too far apart
310
+
311
+ if len(person_movements[person1]) > 1 and len(person_movements[person2]) > 1:
312
+ hands1 = np.mean(kp1[[7, 8]], axis=0)
313
+ hands2 = np.mean(kp2[[7, 8]], axis=0)
314
+
315
+ prev_hands1 = person_movements[person1][-2][1][[7, 8]].mean(axis=0)
316
+ prev_hands2 = person_movements[person2][-2][1][[7, 8]].mean(axis=0)
317
+
318
+ speed1 = np.linalg.norm(hands1 - prev_hands1)
319
+ speed2 = np.linalg.norm(hands2 - prev_hands2)
320
+
321
+ if speed1 > FIGHT_THRESHOLD and speed2 > FIGHT_THRESHOLD:
322
  fight_detected = True
323
+ x1, y1 = int(kp1[0][0]), int(kp1[0][1])
324
+ x2, y2 = int(kp2[0][0]), int(kp2[0][1])
325
+ cv2.line(frame, (x1, y1), (x2, y2), (0, 0, 255), 3)
326
+ cv2.putText(frame, "FIGHT DETECTED", (x1, y1 - 10),
327
+ cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 255), 2)
328
 
329
+ if fight_detected:
330
  cv2.putText(frame, "FIGHT ALERT!", (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)
331
+
332
  out.write(frame)
333
 
334
  cap.release()
335
  out.release()
336
+
337
  if not os.path.exists(output_path):
338
  raise ValueError("❌ Processing failed")
339
+
340
  return output_path
341
+
342
  except Exception as e:
343
  raise ValueError(f"Error in fight_detection: {str(e)}")
344
 
345
+
346
  class IntrusionDetection:
347
  def __init__(self, model_path="yolov8n.pt", max_intrusion_time=300, iou_threshold=0.5, conf_threshold=0.5):
348
  self.model_path = model_path