mkhodary101 commited on
Commit
35960dd
·
verified ·
1 Parent(s): aca5f43

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +235 -189
app.py CHANGED
@@ -6,70 +6,96 @@ import time
6
  from ultralytics import YOLO
7
  import spaces
8
  import os
 
 
 
 
 
 
 
 
 
9
 
10
  class CrowdDetection:
11
  def __init__(self, model_path="yolov8n.pt"):
12
- """Initialize the YOLO model once."""
13
  self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
14
- if not os.path.exists(model_path):
15
- self.model = YOLO("yolov8n.pt") # Downloads if not present
16
- self.model.save(model_path)
17
- else:
18
- self.model = YOLO(model_path)
19
- self.model.to(self.device)
 
 
 
 
 
 
20
 
21
  @spaces.GPU
22
  def detect_crowd(self, video_path):
23
- """Process video for crowd detection."""
24
- cap = cv2.VideoCapture(video_path)
25
- if not cap.isOpened():
26
- raise ValueError(f"❌ Failed to open video: {video_path}")
27
-
28
- fps = int(cap.get(cv2.CAP_PROP_FPS))
29
- width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
30
- height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
31
-
32
- output_path = "output_crowd.mp4"
33
- fourcc = cv2.VideoWriter_fourcc(*"mp4v")
34
- out = cv2.VideoWriter(output_path, fourcc, fps, (width, height))
35
- if not out.isOpened():
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
36
  cap.release()
37
- raise ValueError(f"❌ Failed to initialize video writer")
38
-
39
- CROWD_THRESHOLD = 10
40
- frame_count = 0
41
-
42
- while cap.isOpened():
43
- ret, frame = cap.read()
44
- if not ret:
45
- break
46
- frame_count += 1
47
-
48
- results = self.model(frame)
49
- person_count = sum(1 for result in results for cls in result.boxes.cls.cpu().numpy() if int(cls) == 0)
50
-
51
- for result in results:
52
- boxes = result.boxes.xyxy.cpu().numpy()
53
- classes = result.boxes.cls.cpu().numpy()
54
- for box, cls in zip(boxes, classes):
55
- if int(cls) == 0: # Person class
56
- x1, y1, x2, y2 = map(int, box)
57
- cv2.rectangle(frame, (x1, y1), (x2, y2), (0, 255, 0), 2)
58
- cv2.putText(frame, "Person", (x1, y1 - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
59
-
60
- alert_text = "Crowd Alert!" if person_count > CROWD_THRESHOLD else f"People: {person_count}"
61
- cv2.putText(frame, alert_text, (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 1,
62
- (0, 0, 255) if person_count > CROWD_THRESHOLD else (0, 255, 0), 2)
63
- out.write(frame)
64
-
65
- cap.release()
66
- out.release()
67
- if frame_count == 0 or not os.path.exists(output_path):
68
- raise ValueError("❌ Processing failed")
69
- return output_path
70
 
71
  class PeopleTracking:
72
  def __init__(self, yolo_model_path="yolov8n.pt"):
 
73
  self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
74
  if not os.path.exists(yolo_model_path):
75
  self.model = YOLO("yolov8n.pt")
@@ -80,46 +106,52 @@ class PeopleTracking:
80
 
81
  @spaces.GPU
82
  def track_people(self, video_path):
83
- cap = cv2.VideoCapture(video_path)
84
- if not cap.isOpened():
85
- raise ValueError(f"❌ Failed to open video: {video_path}")
86
-
87
- fps = int(cap.get(cv2.CAP_PROP_FPS))
88
- width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
89
- height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
90
- output_path = "output_tracking.mp4"
91
- out = cv2.VideoWriter(output_path, cv2.VideoWriter_fourcc(*"mp4v"), fps, (width, height))
92
- if not out.isOpened():
93
- cap.release()
94
- raise ValueError(f"❌ Failed to initialize video writer")
95
-
96
- while cap.isOpened():
97
- ret, frame = cap.read()
98
- if not ret:
99
- break
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
100
 
101
- results = self.model.track(frame, persist=True)
102
- for result in results:
103
- boxes = result.boxes.xyxy.cpu().numpy()
104
- classes = result.boxes.cls.cpu().numpy()
105
- ids = result.boxes.id.cpu().numpy() if result.boxes.id is not None else np.arange(len(boxes))
106
-
107
- for box, cls, obj_id in zip(boxes, classes, ids):
108
- if int(cls) == 0:
109
- x1, y1, x2, y2 = map(int, box)
110
- cv2.rectangle(frame, (x1, y1), (x2, y2), (255, 0, 0), 2)
111
- cv2.putText(frame, f"ID {int(obj_id)}", (x1, y1 - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 0, 0), 2)
112
-
113
- out.write(frame)
114
-
115
- cap.release()
116
- out.release()
117
- if not os.path.exists(output_path):
118
- raise ValueError("❌ Processing failed")
119
- return output_path
120
 
121
  class FallDetection:
122
  def __init__(self, yolo_model_path="yolov8l.pt"):
 
123
  self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
124
  if not os.path.exists(yolo_model_path):
125
  self.model = YOLO("yolov8l.pt")
@@ -130,56 +162,62 @@ class FallDetection:
130
 
131
  @spaces.GPU
132
  def detect_fall(self, video_path):
133
- cap = cv2.VideoCapture(video_path)
134
- if not cap.isOpened():
135
- raise ValueError(f"❌ Failed to open video: {video_path}")
136
-
137
- fps = int(cap.get(cv2.CAP_PROP_FPS))
138
- width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
139
- height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
140
- output_path = "output_fall.mp4"
141
- out = cv2.VideoWriter(output_path, cv2.VideoWriter_fourcc(*"mp4v"), fps, (width, height))
142
- if not out.isOpened():
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
143
  cap.release()
144
- raise ValueError(f"❌ Failed to initialize video writer")
145
-
146
- while cap.isOpened():
147
- ret, frame = cap.read()
148
- if not ret:
149
- break
150
-
151
- results = self.model(frame)
152
- for result in results:
153
- boxes = result.boxes.xyxy.cpu().numpy()
154
- classes = result.boxes.cls.cpu().numpy()
155
-
156
- for box, cls in zip(boxes, classes):
157
- if int(cls) == 0:
158
- x1, y1, x2, y2 = map(int, box)
159
- width = x2 - x1
160
- height = y2 - y1
161
- aspect_ratio = width / height if height > 0 else float('inf')
162
-
163
- if aspect_ratio > 0.55: # Person lying down
164
- color = (0, 0, 255)
165
- label = "FALL DETECTED"
166
- else:
167
- color = (0, 255, 0)
168
- label = "Standing"
169
-
170
- cv2.rectangle(frame, (x1, y1), (x2, y2), color, 2)
171
- cv2.putText(frame, label, (x1, y1 - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 2)
172
-
173
- out.write(frame)
174
-
175
- cap.release()
176
- out.release()
177
- if not os.path.exists(output_path):
178
- raise ValueError("❌ Processing failed")
179
- return output_path
180
 
181
  class FightDetection:
182
  def __init__(self, yolo_model_path="yolov8n-pose.pt"):
 
183
  self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
184
  if not os.path.exists(yolo_model_path):
185
  self.model = YOLO("yolov8n-pose.pt")
@@ -190,56 +228,60 @@ class FightDetection:
190
 
191
  @spaces.GPU
192
  def detect_fight(self, video_path):
193
- cap = cv2.VideoCapture(video_path)
194
- if not cap.isOpened():
195
- raise ValueError(f"❌ Failed to open video: {video_path}")
196
-
197
- fps = int(cap.get(cv2.CAP_PROP_FPS))
198
- width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
199
- height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
200
- output_path = "output_fight.mp4"
201
- out = cv2.VideoWriter(output_path, cv2.VideoWriter_fourcc(*"mp4v"), fps, (width, height))
202
- if not out.isOpened():
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
203
  cap.release()
204
- raise ValueError(f"❌ Failed to initialize video writer")
205
-
206
- while cap.isOpened():
207
- ret, frame = cap.read()
208
- if not ret:
209
- break
210
-
211
- results = self.model.track(frame, persist=True)
212
- fight_detected = False
213
- person_count = 0
214
-
215
- for result in results:
216
- keypoints = result.keypoints.xy.cpu().numpy() if result.keypoints else []
217
- boxes = result.boxes.xyxy.cpu().numpy() if result.boxes else []
218
- classes = result.boxes.cls.cpu().numpy() if result.boxes else []
219
-
220
- for box, kp, cls in zip(boxes, keypoints, classes):
221
- if int(cls) == 0:
222
- person_count += 1
223
- x1, y1, x2, y2 = map(int, box)
224
- # Simple fight detection: check if arms (keypoints 5, 7) are raised high
225
- if len(kp) > 7 and (kp[5][1] < y1 + (y2 - y1) * 0.3 or kp[7][1] < y1 + (y2 - y1) * 0.3):
226
- fight_detected = True
227
- cv2.rectangle(frame, (x1, y1), (x2, y2), (0, 0, 255) if fight_detected else (0, 255, 0), 2)
228
- label = "FIGHT DETECTED" if fight_detected else "Person"
229
- cv2.putText(frame, label, (x1, y1 - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5,
230
- (0, 0, 255) if fight_detected else (0, 255, 0), 2)
231
-
232
- if fight_detected and person_count > 1:
233
- cv2.putText(frame, "FIGHT ALERT!", (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)
234
- out.write(frame)
235
-
236
- cap.release()
237
- out.release()
238
- if not os.path.exists(output_path):
239
- raise ValueError("❌ Processing failed")
240
- return output_path
241
-
242
- # Unified processing function
243
  def process_video(feature, video):
244
  detectors = {
245
  "Crowd Detection": CrowdDetection,
@@ -249,20 +291,24 @@ def process_video(feature, video):
249
  }
250
  try:
251
  detector = detectors[feature]()
252
- method_name = feature.lower().replace(" ", "_") # Match method names exactly
253
  output_path = getattr(detector, method_name)(video)
254
- return output_path
255
  except Exception as e:
256
- raise ValueError(f"Error processing video: {str(e)}")
 
257
 
258
- # Gradio Interface
259
  interface = gr.Interface(
260
  fn=process_video,
261
  inputs=[
262
  gr.Dropdown(choices=["Crowd Detection", "People Tracking", "Fall Detection", "Fight Detection"], label="Select Feature"),
263
  gr.Video(label="Upload Video")
264
  ],
265
- outputs=gr.Video(label="Processed Video"),
 
 
 
266
  title="YOLOv8 Multitask Video Processing",
267
  description="Select a feature to process your video: Crowd Detection, People Tracking, Fall Detection, or Fight Detection."
268
  )
 
6
  from ultralytics import YOLO
7
  import spaces
8
  import os
9
+ import logging
10
+
11
+ # Set up logging for Spaces
12
+ logging.basicConfig(
13
+ level=logging.INFO,
14
+ format='%(asctime)s - %(levelname)s - %(message)s',
15
+ handlers=[logging.StreamHandler()] # Output to console (visible in Spaces logs)
16
+ )
17
+ logger = logging.getLogger(__name__)
18
 
19
  class CrowdDetection:
20
  def __init__(self, model_path="yolov8n.pt"):
21
+ logger.info(f"Initializing CrowdDetection with model: {model_path}")
22
  self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
23
+ try:
24
+ if not os.path.exists(model_path):
25
+ logger.info(f"Model {model_path} not found, downloading...")
26
+ self.model = YOLO("yolov8n.pt") # Downloads if not present
27
+ self.model.save(model_path)
28
+ else:
29
+ self.model = YOLO(model_path)
30
+ self.model.to(self.device)
31
+ logger.info("CrowdDetection model loaded successfully")
32
+ except Exception as e:
33
+ logger.error(f"Failed to initialize model: {str(e)}")
34
+ raise
35
 
36
  @spaces.GPU
37
  def detect_crowd(self, video_path):
38
+ logger.info(f"Processing video for crowd detection: {video_path}")
39
+ try:
40
+ cap = cv2.VideoCapture(video_path)
41
+ if not cap.isOpened():
42
+ logger.error(f"Failed to open video: {video_path}")
43
+ raise ValueError(f"❌ Failed to open video: {video_path}")
44
+
45
+ fps = int(cap.get(cv2.CAP_PROP_FPS))
46
+ width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
47
+ height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
48
+ logger.debug(f"Video specs - FPS: {fps}, Width: {width}, Height: {height}")
49
+
50
+ output_path = "output_crowd.mp4"
51
+ fourcc = cv2.VideoWriter_fourcc(*"mp4v")
52
+ out = cv2.VideoWriter(output_path, fourcc, fps, (width, height))
53
+ if not out.isOpened():
54
+ cap.release()
55
+ logger.error(f"Failed to initialize video writer for {output_path}")
56
+ raise ValueError(f"❌ Failed to initialize video writer")
57
+
58
+ CROWD_THRESHOLD = 10
59
+ frame_count = 0
60
+
61
+ while cap.isOpened():
62
+ ret, frame = cap.read()
63
+ if not ret:
64
+ break
65
+ frame_count += 1
66
+
67
+ results = self.model(frame)
68
+ person_count = sum(1 for result in results for cls in result.boxes.cls.cpu().numpy() if int(cls) == 0)
69
+ logger.debug(f"Frame {frame_count}: Detected {person_count} people")
70
+
71
+ for result in results:
72
+ boxes = result.boxes.xyxy.cpu().numpy()
73
+ classes = result.boxes.cls.cpu().numpy()
74
+ for box, cls in zip(boxes, classes):
75
+ if int(cls) == 0:
76
+ x1, y1, x2, y2 = map(int, box)
77
+ cv2.rectangle(frame, (x1, y1), (x2, y2), (0, 255, 0), 2)
78
+ cv2.putText(frame, "Person", (x1, y1 - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
79
+
80
+ alert_text = "Crowd Alert!" if person_count > CROWD_THRESHOLD else f"People: {person_count}"
81
+ cv2.putText(frame, alert_text, (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 1,
82
+ (0, 0, 255) if person_count > CROWD_THRESHOLD else (0, 255, 0), 2)
83
+ out.write(frame)
84
+
85
  cap.release()
86
+ out.release()
87
+ if frame_count == 0 or not os.path.exists(output_path):
88
+ logger.error(f"Processing failed: Frames processed: {frame_count}, Output exists: {os.path.exists(output_path)}")
89
+ raise ValueError("❌ Processing failed: No frames processed or output not created")
90
+ logger.info(f"Crowd detection completed, output saved to: {output_path}")
91
+ return output_path
92
+ except Exception as e:
93
+ logger.error(f"Error in detect_crowd: {str(e)}")
94
+ raise
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
95
 
96
  class PeopleTracking:
97
  def __init__(self, yolo_model_path="yolov8n.pt"):
98
+ logger.info(f"Initializing PeopleTracking with model: {yolo_model_path}")
99
  self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
100
  if not os.path.exists(yolo_model_path):
101
  self.model = YOLO("yolov8n.pt")
 
106
 
107
  @spaces.GPU
108
  def track_people(self, video_path):
109
+ logger.info(f"Tracking people in video: {video_path}")
110
+ try:
111
+ cap = cv2.VideoCapture(video_path)
112
+ if not cap.isOpened():
113
+ raise ValueError(f"❌ Failed to open video: {video_path}")
114
+
115
+ fps = int(cap.get(cv2.CAP_PROP_FPS))
116
+ width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
117
+ height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
118
+ output_path = "output_tracking.mp4"
119
+ out = cv2.VideoWriter(output_path, cv2.VideoWriter_fourcc(*"mp4v"), fps, (width, height))
120
+ if not out.isOpened():
121
+ cap.release()
122
+ raise ValueError(f"❌ Failed to initialize video writer")
123
+
124
+ while cap.isOpened():
125
+ ret, frame = cap.read()
126
+ if not ret:
127
+ break
128
+
129
+ results = self.model.track(frame, persist=True)
130
+ for result in results:
131
+ boxes = result.boxes.xyxy.cpu().numpy()
132
+ classes = result.boxes.cls.cpu().numpy()
133
+ ids = result.boxes.id.cpu().numpy() if result.boxes.id is not None else np.arange(len(boxes))
134
+
135
+ for box, cls, obj_id in zip(boxes, classes, ids):
136
+ if int(cls) == 0:
137
+ x1, y1, x2, y2 = map(int, box)
138
+ cv2.rectangle(frame, (x1, y1), (x2, y2), (255, 0, 0), 2)
139
+ cv2.putText(frame, f"ID {int(obj_id)}", (x1, y1 - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 0, 0), 2)
140
+
141
+ out.write(frame)
142
 
143
+ cap.release()
144
+ out.release()
145
+ if not os.path.exists(output_path):
146
+ raise ValueError("❌ Processing failed")
147
+ return output_path
148
+ except Exception as e:
149
+ logger.error(f"Error in track_people: {str(e)}")
150
+ raise
 
 
 
 
 
 
 
 
 
 
 
151
 
152
  class FallDetection:
153
  def __init__(self, yolo_model_path="yolov8l.pt"):
154
+ logger.info(f"Initializing FallDetection with model: {yolo_model_path}")
155
  self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
156
  if not os.path.exists(yolo_model_path):
157
  self.model = YOLO("yolov8l.pt")
 
162
 
163
  @spaces.GPU
164
  def detect_fall(self, video_path):
165
+ logger.info(f"Detecting falls in video: {video_path}")
166
+ try:
167
+ cap = cv2.VideoCapture(video_path)
168
+ if not cap.isOpened():
169
+ raise ValueError(f"❌ Failed to open video: {video_path}")
170
+
171
+ fps = int(cap.get(cv2.CAP_PROP_FPS))
172
+ width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
173
+ height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
174
+ output_path = "output_fall.mp4"
175
+ out = cv2.VideoWriter(output_path, cv2.VideoWriter_fourcc(*"mp4v"), fps, (width, height))
176
+ if not out.isOpened():
177
+ cap.release()
178
+ raise ValueError(f"❌ Failed to initialize video writer")
179
+
180
+ while cap.isOpened():
181
+ ret, frame = cap.read()
182
+ if not ret:
183
+ break
184
+
185
+ results = self.model(frame)
186
+ for result in results:
187
+ boxes = result.boxes.xyxy.cpu().numpy()
188
+ classes = result.boxes.cls.cpu().numpy()
189
+
190
+ for box, cls in zip(boxes, classes):
191
+ if int(cls) == 0:
192
+ x1, y1, x2, y2 = map(int, box)
193
+ width = x2 - x1
194
+ height = y2 - y1
195
+ aspect_ratio = width / height if height > 0 else float('inf')
196
+
197
+ if aspect_ratio > 0.55:
198
+ color = (0, 0, 255)
199
+ label = "FALL DETECTED"
200
+ else:
201
+ color = (0, 255, 0)
202
+ label = "Standing"
203
+
204
+ cv2.rectangle(frame, (x1, y1), (x2, y2), color, 2)
205
+ cv2.putText(frame, label, (x1, y1 - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 2)
206
+
207
+ out.write(frame)
208
+
209
  cap.release()
210
+ out.release()
211
+ if not os.path.exists(output_path):
212
+ raise ValueError("❌ Processing failed")
213
+ return output_path
214
+ except Exception as e:
215
+ logger.error(f"Error in detect_fall: {str(e)}")
216
+ raise
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
217
 
218
  class FightDetection:
219
  def __init__(self, yolo_model_path="yolov8n-pose.pt"):
220
+ logger.info(f"Initializing FightDetection with model: {yolo_model_path}")
221
  self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
222
  if not os.path.exists(yolo_model_path):
223
  self.model = YOLO("yolov8n-pose.pt")
 
228
 
229
  @spaces.GPU
230
  def detect_fight(self, video_path):
231
+ logger.info(f"Detecting fights in video: {video_path}")
232
+ try:
233
+ cap = cv2.VideoCapture(video_path)
234
+ if not cap.isOpened():
235
+ raise ValueError(f"❌ Failed to open video: {video_path}")
236
+
237
+ fps = int(cap.get(cv2.CAP_PROP_FPS))
238
+ width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
239
+ height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
240
+ output_path = "output_fight.mp4"
241
+ out = cv2.VideoWriter(output_path, cv2.VideoWriter_fourcc(*"mp4v"), fps, (width, height))
242
+ if not out.isOpened():
243
+ cap.release()
244
+ raise ValueError(f"❌ Failed to initialize video writer")
245
+
246
+ while cap.isOpened():
247
+ ret, frame = cap.read()
248
+ if not ret:
249
+ break
250
+
251
+ results = self.model.track(frame, persist=True)
252
+ fight_detected = False
253
+ person_count = 0
254
+
255
+ for result in results:
256
+ keypoints = result.keypoints.xy.cpu().numpy() if result.keypoints else []
257
+ boxes = result.boxes.xyxy.cpu().numpy() if result.boxes else []
258
+ classes = result.boxes.cls.cpu().numpy() if result.boxes else []
259
+
260
+ for box, kp, cls in zip(boxes, keypoints, classes):
261
+ if int(cls) == 0:
262
+ person_count += 1
263
+ x1, y1, x2, y2 = map(int, box)
264
+ if len(kp) > 7 and (kp[5][1] < y1 + (y2 - y1) * 0.3 or kp[7][1] < y1 + (y2 - y1) * 0.3):
265
+ fight_detected = True
266
+ cv2.rectangle(frame, (x1, y1), (x2, y2), (0, 0, 255) if fight_detected else (0, 255, 0), 2)
267
+ label = "FIGHT DETECTED" if fight_detected else "Person"
268
+ cv2.putText(frame, label, (x1, y1 - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5,
269
+ (0, 0, 255) if fight_detected else (0, 255, 0), 2)
270
+
271
+ if fight_detected and person_count > 1:
272
+ cv2.putText(frame, "FIGHT ALERT!", (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)
273
+ out.write(frame)
274
+
275
  cap.release()
276
+ out.release()
277
+ if not os.path.exists(output_path):
278
+ raise ValueError("❌ Processing failed")
279
+ return output_path
280
+ except Exception as e:
281
+ logger.error(f"Error in detect_fight: {str(e)}")
282
+ raise
283
+
284
+ # Unified processing function with status output
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
285
  def process_video(feature, video):
286
  detectors = {
287
  "Crowd Detection": CrowdDetection,
 
291
  }
292
  try:
293
  detector = detectors[feature]()
294
+ method_name = feature.lower().replace(" ", "_")
295
  output_path = getattr(detector, method_name)(video)
296
+ return f"{feature} completed successfully", output_path
297
  except Exception as e:
298
+ logger.error(f"Error processing video with {feature}: {str(e)}")
299
+ return f"Error: {str(e)}", None
300
 
301
+ # Gradio Interface with dual outputs
302
  interface = gr.Interface(
303
  fn=process_video,
304
  inputs=[
305
  gr.Dropdown(choices=["Crowd Detection", "People Tracking", "Fall Detection", "Fight Detection"], label="Select Feature"),
306
  gr.Video(label="Upload Video")
307
  ],
308
+ outputs=[
309
+ gr.Textbox(label="Status"),
310
+ gr.Video(label="Processed Video")
311
+ ],
312
  title="YOLOv8 Multitask Video Processing",
313
  description="Select a feature to process your video: Crowd Detection, People Tracking, Fall Detection, or Fight Detection."
314
  )