mkhodary101 commited on
Commit
329c034
·
verified ·
1 Parent(s): 966c0a8

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +152 -0
app.py CHANGED
@@ -1,4 +1,155 @@
1
  import gradio as gr
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2
  import cv2
3
  import numpy as np
4
  import os
@@ -92,3 +243,4 @@ interface = gr.Interface(
92
 
93
  if __name__ == "__main__":
94
  interface.launch()
 
 
1
  import gradio as gr
2
+ import torch
3
+ import cv2
4
+ import numpy as np
5
+ import time
6
+ from ultralytics import YOLO
7
+
8
+ # Define People Tracking
9
+ class PeopleTracking:
10
+ def __init__(self, yolo_model_path="yolov8n.pt"):
11
+ self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
12
+ self.model = YOLO(yolo_model_path).to(self.device)
13
+
14
+ def track_people(self, video_path):
15
+ cap = cv2.VideoCapture(video_path)
16
+ output_path = "output_tracking.mp4"
17
+ fourcc = cv2.VideoWriter_fourcc(*"mp4v")
18
+ out = cv2.VideoWriter(output_path, fourcc, int(cap.get(cv2.CAP_PROP_FPS)),
19
+ (int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)), int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))))
20
+
21
+ while cap.isOpened():
22
+ ret, frame = cap.read()
23
+ if not ret:
24
+ break
25
+
26
+ results = self.model.track(frame, persist=True)
27
+ for result in results:
28
+ boxes = result.boxes.xyxy.cpu().numpy()
29
+ classes = result.boxes.cls.cpu().numpy()
30
+ ids = result.boxes.id.cpu().numpy() if hasattr(result.boxes, "id") else np.arange(len(boxes))
31
+
32
+ for box, cls, obj_id in zip(boxes, classes, ids):
33
+ if int(cls) == 0:
34
+ x1, y1, x2, y2 = map(int, box)
35
+ cv2.rectangle(frame, (x1, y1), (x2, y2), (255, 0, 0), 2)
36
+ cv2.putText(frame, f"ID {int(obj_id)}", (x1, y1 - 10),
37
+ cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 0, 0), 2)
38
+
39
+ out.write(frame)
40
+
41
+ cap.release()
42
+ out.release()
43
+ return output_path
44
+
45
+ # Define Fall Detection
46
+ class FallDetection:
47
+ def __init__(self, yolo_model_path="yolov8l.pt"):
48
+ self.model = YOLO(yolo_model_path)
49
+
50
+ def detect_fall(self, video_path):
51
+ cap = cv2.VideoCapture(video_path)
52
+ output_path = "output_fall.mp4"
53
+ fourcc = cv2.VideoWriter_fourcc(*"mp4v")
54
+ out = cv2.VideoWriter(output_path, fourcc, int(cap.get(cv2.CAP_PROP_FPS)),
55
+ (int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)), int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))))
56
+
57
+ while cap.isOpened():
58
+ ret, frame = cap.read()
59
+ if not ret:
60
+ break
61
+
62
+ results = self.model(frame)
63
+ for result in results:
64
+ boxes = result.boxes.xyxy.cpu().numpy()
65
+ classes = result.boxes.cls.cpu().numpy()
66
+
67
+ for box, cls in zip(boxes, classes):
68
+ if int(cls) == 0:
69
+ x1, y1, x2, y2 = map(int, box)
70
+ width = x2 - x1
71
+ height = y2 - y1
72
+ aspect_ratio = width / height
73
+
74
+ if aspect_ratio > 0.55:
75
+ color = (0, 0, 255)
76
+ label = "FALL DETECTED"
77
+ else:
78
+ color = (0, 255, 0)
79
+ label = "Standing"
80
+
81
+ cv2.rectangle(frame, (x1, y1), (x2, y2), color, 2)
82
+ cv2.putText(frame, label, (x1, y1 - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 2)
83
+
84
+ out.write(frame)
85
+
86
+ cap.release()
87
+ out.release()
88
+ return output_path
89
+
90
+ # Define Fight Detection
91
+ class FightDetection:
92
+ def __init__(self, yolo_model_path="yolov8n-pose.pt"):
93
+ self.model = YOLO(yolo_model_path).to(torch.device("cuda" if torch.cuda.is_available() else "cpu"))
94
+
95
+ def detect_fight(self, video_path):
96
+ cap = cv2.VideoCapture(video_path)
97
+ output_path = "output_fight.mp4"
98
+ fourcc = cv2.VideoWriter_fourcc(*"mp4v")
99
+ out = cv2.VideoWriter(output_path, fourcc, int(cap.get(cv2.CAP_PROP_FPS)),
100
+ (int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)), int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))))
101
+
102
+ while cap.isOpened():
103
+ ret, frame = cap.read()
104
+ if not ret:
105
+ break
106
+
107
+ results = self.model.track(frame, persist=True)
108
+ for result in results:
109
+ keypoints = result.keypoints.xy.cpu().numpy() if result.keypoints else []
110
+ classes = result.boxes.cls.cpu().numpy() if result.boxes else []
111
+
112
+ for kp, cls in zip(keypoints, classes):
113
+ if int(cls) == 0:
114
+ x1, y1 = int(kp[0][0]), int(kp[0][1])
115
+ x2, y2 = int(kp[-1][0]), int(kp[-1][1])
116
+ cv2.rectangle(frame, (x1, y1), (x2, y2), (0, 0, 255), 2)
117
+ cv2.putText(frame, "FIGHT DETECTED", (x1, y1 - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 255), 2)
118
+
119
+ out.write(frame)
120
+
121
+ cap.release()
122
+ out.release()
123
+ return output_path
124
+
125
+ # Function to process video based on selected feature
126
+ def process_video(feature, video):
127
+ detectors = {
128
+ "People Tracking": PeopleTracking,
129
+ "Fall Detection": FallDetection,
130
+ "Fight Detection": FightDetection
131
+ }
132
+
133
+ detector = detectors[feature]()
134
+ method_name = f"detect_{feature.lower().replace(' ', '_')}"
135
+ return getattr(detector, method_name)(video)
136
+
137
+ # Gradio Interface
138
+ interface = gr.Interface(
139
+ fn=process_video,
140
+ inputs=[
141
+ gr.Dropdown(choices=["People Tracking", "Fall Detection", "Fight Detection"], label="Select Feature"),
142
+ gr.Video(label="Upload Video")
143
+ ],
144
+ outputs=gr.Video(label="Processed Video"),
145
+ title="YOLOv8 Multitask Video Processing"
146
+ )
147
+
148
+ if __name__ == "__main__":
149
+ interface.launch()
150
+
151
+ """""
152
+ import gradio as gr
153
  import cv2
154
  import numpy as np
155
  import os
 
243
 
244
  if __name__ == "__main__":
245
  interface.launch()
246
+ """"