randomshit11 commited on
Commit
28b8eac
·
verified ·
1 Parent(s): 95f220f

Upload 2 files

Browse files
Files changed (2) hide show
  1. app.py +179 -0
  2. app_p.py +205 -0
app.py ADDED
@@ -0,0 +1,179 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <<<<<<< HEAD
2
+ import streamlit as st
3
+ import cv2
4
+ from collections import defaultdict
5
+ import supervision as sv
6
+ from ultralytics import YOLO
7
+ import os
8
+
9
+ def ensure_dir(file_path):
10
+ if not os.path.exists(file_path):
11
+ os.makedirs(file_path)
12
+
13
+ def process_video(input_video_path, output_video_path):
14
+ model = YOLO('yolov8x-seg.pt')
15
+ cap = cv2.VideoCapture(input_video_path)
16
+ START = sv.Point(1250, -2)
17
+ END = sv.Point(1250, 1070)
18
+ track_history = defaultdict(list)
19
+ crossed_objects = {}
20
+
21
+ ensure_dir(os.path.dirname(output_video_path))
22
+
23
+ if cap.isOpened():
24
+ width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
25
+ height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
26
+ # Use H.264 codec
27
+ fourcc = cv2.VideoWriter_fourcc(*'avc1') # Alternatively, use 'X264' if 'avc1' doesn't work
28
+
29
+ out = cv2.VideoWriter(output_video_path, fourcc, 20.0, (width, height), True)
30
+
31
+ while cap.isOpened():
32
+ success, frame = cap.read()
33
+ if not success:
34
+ break
35
+
36
+ results = model.track(frame, conf=0.3, classes=[19], persist=True, save=True, tracker="bytetrack.yaml")
37
+ boxes = results[0].boxes.xywh.cpu()
38
+ track_ids = results[0].boxes.id.int().cpu().tolist()
39
+
40
+ annotated_frame = results[0].plot() if hasattr(results[0], 'plot') else frame
41
+
42
+ for box, track_id in zip(boxes, track_ids):
43
+ x, y, w, h = box
44
+ track = track_history[track_id]
45
+ track.append(x)
46
+
47
+ if len(track) > 1: # Checking if we have at least two points to compare
48
+ if track[-2] <= START.x < track[-1] or track[-2] >= START.x > track[-1]:
49
+ if track_id not in crossed_objects:
50
+ crossed_objects[track_id] = True
51
+ cv2.rectangle(annotated_frame, (int(x - w / 2), int(y - h / 2)), (int(x + w / 2), int(y + h / 2)), (0, 255, 0), 2)
52
+
53
+ cv2.line(annotated_frame, (START.x, START.y), (END.x, END.y), (0, 255, 0), 2)
54
+ count_text = f"Objects crossed: {len(crossed_objects)}"
55
+ cv2.putText(annotated_frame, count_text, (100, 80), cv2.FONT_HERSHEY_SIMPLEX, 3, (0, 255, 0), 4)
56
+
57
+ out.write(annotated_frame)
58
+
59
+ cap.release()
60
+ out.release()
61
+
62
+ def main():
63
+ st.title("Video Processing for Object Tracking")
64
+ video_file = st.file_uploader("Upload a video", type=['mp4', 'avi'])
65
+
66
+ if video_file is not None:
67
+ output_folder = "output_videos"
68
+ ensure_dir(output_folder)
69
+ file_path = os.path.join(output_folder, "uploaded_video.mp4")
70
+ file_name = os.path.join(output_folder, "processed_video.mp4")
71
+
72
+ # Save the uploaded file first
73
+ with open(file_path, "wb") as f:
74
+ f.write(video_file.getbuffer())
75
+
76
+ if st.button("Process Video"):
77
+ process_video(file_path, file_name)
78
+ st.video(file_name)
79
+
80
+ with open(file_name, "rb") as file:
81
+ st.download_button(
82
+ label="Download processed video",
83
+ data=file,
84
+ file_name="processed_video.mp4",
85
+ mime="video/mp4"
86
+ )
87
+
88
+ if __name__ == "__main__":
89
+ main()
90
+ =======
91
+ import streamlit as st
92
+ import cv2
93
+ from collections import defaultdict
94
+ import supervision as sv
95
+ from ultralytics import YOLO
96
+ import os
97
+
98
+ def ensure_dir(file_path):
99
+ if not os.path.exists(file_path):
100
+ os.makedirs(file_path)
101
+
102
+ def process_video(input_video_path, output_video_path):
103
+ model = YOLO('yolov8x-seg.pt')
104
+ cap = cv2.VideoCapture(input_video_path)
105
+ START = sv.Point(1250, -2)
106
+ END = sv.Point(1250, 1070)
107
+ track_history = defaultdict(list)
108
+ crossed_objects = {}
109
+
110
+ ensure_dir(os.path.dirname(output_video_path))
111
+
112
+ if cap.isOpened():
113
+ width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
114
+ height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
115
+ # Use H.264 codec
116
+ fourcc = cv2.VideoWriter_fourcc(*'avc1') # Alternatively, use 'X264' if 'avc1' doesn't work
117
+
118
+ out = cv2.VideoWriter(output_video_path, fourcc, 20.0, (width, height), True)
119
+
120
+ while cap.isOpened():
121
+ success, frame = cap.read()
122
+ if not success:
123
+ break
124
+
125
+ results = model.track(frame, conf=0.3, classes=[19], persist=True, save=True, tracker="bytetrack.yaml")
126
+ boxes = results[0].boxes.xywh.cpu()
127
+ track_ids = results[0].boxes.id.int().cpu().tolist()
128
+
129
+ annotated_frame = results[0].plot() if hasattr(results[0], 'plot') else frame
130
+
131
+ for box, track_id in zip(boxes, track_ids):
132
+ x, y, w, h = box
133
+ track = track_history[track_id]
134
+ track.append(x)
135
+
136
+ if len(track) > 1: # Checking if we have at least two points to compare
137
+ if track[-2] <= START.x < track[-1] or track[-2] >= START.x > track[-1]:
138
+ if track_id not in crossed_objects:
139
+ crossed_objects[track_id] = True
140
+ cv2.rectangle(annotated_frame, (int(x - w / 2), int(y - h / 2)), (int(x + w / 2), int(y + h / 2)), (0, 255, 0), 2)
141
+
142
+ cv2.line(annotated_frame, (START.x, START.y), (END.x, END.y), (0, 255, 0), 2)
143
+ count_text = f"Objects crossed: {len(crossed_objects)}"
144
+ cv2.putText(annotated_frame, count_text, (100, 80), cv2.FONT_HERSHEY_SIMPLEX, 3, (0, 255, 0), 4)
145
+
146
+ out.write(annotated_frame)
147
+
148
+ cap.release()
149
+ out.release()
150
+
151
+ def main():
152
+ st.title("Video Processing for Object Tracking")
153
+ video_file = st.file_uploader("Upload a video", type=['mp4', 'avi'])
154
+
155
+ if video_file is not None:
156
+ output_folder = "output_videos"
157
+ ensure_dir(output_folder)
158
+ file_path = os.path.join(output_folder, "uploaded_video.mp4")
159
+ file_name = os.path.join(output_folder, "processed_video.mp4")
160
+
161
+ # Save the uploaded file first
162
+ with open(file_path, "wb") as f:
163
+ f.write(video_file.getbuffer())
164
+
165
+ if st.button("Process Video"):
166
+ process_video(file_path, file_name)
167
+ st.video(file_name)
168
+
169
+ with open(file_name, "rb") as file:
170
+ st.download_button(
171
+ label="Download processed video",
172
+ data=file,
173
+ file_name="processed_video.mp4",
174
+ mime="video/mp4"
175
+ )
176
+
177
+ if __name__ == "__main__":
178
+ main()
179
+ >>>>>>> d78b5b6f19f193037683178f33d0fd971846eef2
app_p.py ADDED
@@ -0,0 +1,205 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <<<<<<< HEAD
2
+ import streamlit as st
3
+ import cv2
4
+ import numpy as np
5
+ import supervision as sv
6
+ from ultralytics import YOLO
7
+ import os
8
+ from collections import defaultdict
9
+
10
+ def ensure_dir(file_path):
11
+ if not os.path.exists(file_path):
12
+ os.makedirs(file_path)
13
+
14
+ def is_point_in_polygon(point, polygon):
15
+ return cv2.pointPolygonTest(polygon, (int(point[0]), int(point[1])), False) > 0
16
+
17
+ def process_video(input_video_path, output_video_path, polygons):
18
+ model = YOLO('yolov8x-seg.pt')
19
+ cap = cv2.VideoCapture(input_video_path)
20
+ intruder_ids = set()
21
+
22
+ ensure_dir(os.path.dirname(output_video_path))
23
+
24
+ if cap.isOpened():
25
+ width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
26
+ height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
27
+ fourcc = cv2.VideoWriter_fourcc(*'avc1')
28
+ out = cv2.VideoWriter(output_video_path, fourcc, 20.0, (width, height), True)
29
+
30
+ poly_arrays = [np.array(polygon, np.int32) for polygon in polygons]
31
+ while cap.isOpened():
32
+ success, frame = cap.read()
33
+ if not success:
34
+ break
35
+
36
+ results = model.track(frame, conf=0.6, classes=None, persist=True, save=True, tracker="bytetrack.yaml")
37
+ boxes = results[0].boxes.xywh.cpu()
38
+ track_ids = results[0].boxes.id.int().cpu().tolist()
39
+ classes = results[0].boxes.cls.int().cpu().numpy()
40
+
41
+ annotated_frame = results[0].plot() if hasattr(results[0], 'plot') else frame
42
+
43
+ for poly_array in poly_arrays:
44
+ cv2.polylines(annotated_frame, [poly_array], True, (0, 255, 0), 3)
45
+
46
+ for box, track_id, cls in zip(boxes, track_ids, classes):
47
+ x, y, w, h = box
48
+ centroid = (x + w / 2, y + h / 2)
49
+
50
+ if cls != 19:
51
+ for poly_array in poly_arrays:
52
+ if is_point_in_polygon(centroid, poly_array):
53
+ if track_id not in intruder_ids:
54
+ intruder_ids.add(track_id)
55
+ cv2.rectangle(annotated_frame, (int(x), int(y)), (int(x + w), int(y + h)), (0, 0, 255), 2)
56
+ cv2.putText(annotated_frame, f"Intruder ID: {track_id}", (int(x), int(y) - 10), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)
57
+ break
58
+
59
+ intrusion_text = f"Total Intruders Detected: {len(intruder_ids)}"
60
+ cv2.putText(annotated_frame, intrusion_text, (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0), 2)
61
+
62
+ out.write(annotated_frame)
63
+
64
+ cap.release()
65
+ out.release()
66
+
67
+ def main():
68
+ st.title("Video Processing for Intrusion Detection")
69
+ video_file = st.file_uploader("Upload a video", type=['mp4', 'avi'])
70
+
71
+ if video_file is not None:
72
+ output_folder = "output_videos"
73
+ ensure_dir(output_folder)
74
+ file_path = os.path.join(output_folder, "uploaded_video.mp4")
75
+ file_name = os.path.join(output_folder, "processed_video.mp4")
76
+
77
+ with open(file_path, "wb") as f:
78
+ f.write(video_file.getbuffer())
79
+
80
+ polygons = [
81
+
82
+ np.array([
83
+ [110, 70],[1754, 62],[1754, 1062],[138, 1066],[110, 70]
84
+ ])
85
+
86
+
87
+ ]
88
+
89
+ if st.button("Process Video"):
90
+ process_video(file_path, file_name, polygons)
91
+ st.video(file_name)
92
+
93
+ with open(file_name, "rb") as file:
94
+ st.download_button(
95
+ label="Download processed video",
96
+ data=file,
97
+ file_name="processed_video.mp4",
98
+ mime="video/mp4"
99
+ )
100
+
101
+ if __name__ == "__main__":
102
+ main()
103
+ =======
104
+ import streamlit as st
105
+ import cv2
106
+ import numpy as np
107
+ import supervision as sv
108
+ from ultralytics import YOLO
109
+ import os
110
+ from collections import defaultdict
111
+
112
+ def ensure_dir(file_path):
113
+ if not os.path.exists(file_path):
114
+ os.makedirs(file_path)
115
+
116
+ def is_point_in_polygon(point, polygon):
117
+ return cv2.pointPolygonTest(polygon, (int(point[0]), int(point[1])), False) > 0
118
+
119
+ def process_video(input_video_path, output_video_path, polygons):
120
+ model = YOLO('yolov8x-seg.pt')
121
+ cap = cv2.VideoCapture(input_video_path)
122
+ intruder_ids = set()
123
+
124
+ ensure_dir(os.path.dirname(output_video_path))
125
+
126
+ if cap.isOpened():
127
+ width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
128
+ height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
129
+ fourcc = cv2.VideoWriter_fourcc(*'avc1')
130
+ out = cv2.VideoWriter(output_video_path, fourcc, 20.0, (width, height), True)
131
+
132
+ poly_arrays = [np.array(polygon, np.int32) for polygon in polygons]
133
+ while cap.isOpened():
134
+ success, frame = cap.read()
135
+ if not success:
136
+ break
137
+
138
+ results = model.track(frame, conf=0.6, classes=None, persist=True, save=True, tracker="bytetrack.yaml")
139
+ boxes = results[0].boxes.xywh.cpu()
140
+ track_ids = results[0].boxes.id.int().cpu().tolist()
141
+ classes = results[0].boxes.cls.int().cpu().numpy()
142
+
143
+ annotated_frame = results[0].plot() if hasattr(results[0], 'plot') else frame
144
+
145
+ for poly_array in poly_arrays:
146
+ cv2.polylines(annotated_frame, [poly_array], True, (0, 255, 0), 3)
147
+
148
+ for box, track_id, cls in zip(boxes, track_ids, classes):
149
+ x, y, w, h = box
150
+ centroid = (x + w / 2, y + h / 2)
151
+
152
+ if cls != 19:
153
+ for poly_array in poly_arrays:
154
+ if is_point_in_polygon(centroid, poly_array):
155
+ if track_id not in intruder_ids:
156
+ intruder_ids.add(track_id)
157
+ cv2.rectangle(annotated_frame, (int(x), int(y)), (int(x + w), int(y + h)), (0, 0, 255), 2)
158
+ cv2.putText(annotated_frame, f"Intruder ID: {track_id}", (int(x), int(y) - 10), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)
159
+ break
160
+
161
+ intrusion_text = f"Total Intruders Detected: {len(intruder_ids)}"
162
+ cv2.putText(annotated_frame, intrusion_text, (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0), 2)
163
+
164
+ out.write(annotated_frame)
165
+
166
+ cap.release()
167
+ out.release()
168
+
169
+ def main():
170
+ st.title("Video Processing for Intrusion Detection")
171
+ video_file = st.file_uploader("Upload a video", type=['mp4', 'avi'])
172
+
173
+ if video_file is not None:
174
+ output_folder = "output_videos"
175
+ ensure_dir(output_folder)
176
+ file_path = os.path.join(output_folder, "uploaded_video.mp4")
177
+ file_name = os.path.join(output_folder, "processed_video.mp4")
178
+
179
+ with open(file_path, "wb") as f:
180
+ f.write(video_file.getbuffer())
181
+
182
+ polygons = [
183
+
184
+ np.array([
185
+ [110, 70],[1754, 62],[1754, 1062],[138, 1066],[110, 70]
186
+ ])
187
+
188
+
189
+ ]
190
+
191
+ if st.button("Process Video"):
192
+ process_video(file_path, file_name, polygons)
193
+ st.video(file_name)
194
+
195
+ with open(file_name, "rb") as file:
196
+ st.download_button(
197
+ label="Download processed video",
198
+ data=file,
199
+ file_name="processed_video.mp4",
200
+ mime="video/mp4"
201
+ )
202
+
203
+ if __name__ == "__main__":
204
+ main()
205
+ >>>>>>> d78b5b6f19f193037683178f33d0fd971846eef2