Harinvnukala commited on
Commit
b81ac5a
·
verified ·
1 Parent(s): 28afbfc

Upload app.py

Browse files
Files changed (1) hide show
  1. app.py +173 -0
app.py ADDED
@@ -0,0 +1,173 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import cv2
2
+ from ultralytics import YOLO, solutions
3
+ import torch
4
+ import numpy as np
5
+ from collections import defaultdict
6
+ import gradio as gr
7
+ import tempfile
8
+ import os
9
+
10
+ device = torch.device("cuda:0") if torch.cuda.is_available() else torch.device("cpu")
11
+ print("Device:", device)
12
+
13
+ # Load MiDaS model for depth estimation
14
+ midas = torch.hub.load("intel-isl/MiDaS", "MiDaS_small")
15
+ midas.to(device)
16
+ midas.eval()
17
+ midas_transforms = torch.hub.load("intel-isl/MiDaS", "transforms").small_transform
18
+
19
+ # Load YOLO model
20
+ model = YOLO('yolov8x.pt')
21
+ names = model.model.names
22
+ model.to(device)
23
+
24
+ pixels_per_meter = 300
25
+ unattended_threshold = 2.0 # meters
26
+
27
+ dist_obj = solutions.DistanceCalculation(names=names, view_img=False, pixels_per_meter=pixels_per_meter)
28
+
29
+ # Set model parameters
30
+ model.overrides['conf'] = 0.5 # NMS confidence threshold
31
+ model.overrides['iou'] = 0.5 # NMS IoU threshold
32
+ model.overrides['agnostic_nms'] = True # NMS class-agnostic
33
+ model.overrides['max_det'] = 1000 # maximum number of detections per image
34
+
35
+ # Store scores for each person-luggage pair using tracker ID
36
+ ownership_scores = defaultdict(lambda: defaultdict(int))
37
+
38
+
39
+ def calculate_distance(depth_map, point1, point2):
40
+ dist_2d_m, dist_2d_mm = dist_obj.calculate_distance(point1, point2)
41
+ z1 = depth_map[int(point1[1]), int(point1[0])] / pixels_per_meter
42
+ z2 = depth_map[int(point2[1]), int(point2[0])] / pixels_per_meter
43
+ depth_diff = np.abs(z1 - z2)
44
+ distance = np.sqrt(dist_2d_m ** 2 + depth_diff ** 2)
45
+ return distance
46
+
47
+
48
+ def process_video(video_source):
49
+ cap = cv2.VideoCapture(video_source)
50
+ if not cap.isOpened():
51
+ print("Error: Could not open video.")
52
+ return None
53
+
54
+ owners = {} # Store assigned owners for luggage using tracker ID
55
+ abandoned_luggages = set() # Store abandoned luggage using tracker ID
56
+
57
+ frame_count = 0
58
+ output_frames = [] # Store the processed frames to return as video
59
+
60
+ while cap.isOpened():
61
+ ret, frame = cap.read()
62
+ if not ret:
63
+ break
64
+ frame_count += 1
65
+ if frame_count % 10 != 0:
66
+ continue
67
+
68
+ # Process frame with YOLO
69
+ results = model.track(frame, persist=True, classes=[0, 28, 24, 26], show=False)
70
+ frame_ = results[0].plot()
71
+
72
+ # MiDaS depth estimation
73
+ img = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
74
+ input_batch = midas_transforms(img).to(device)
75
+ with torch.no_grad():
76
+ prediction = midas(input_batch)
77
+ prediction = torch.nn.functional.interpolate(
78
+ prediction.unsqueeze(1),
79
+ size=img.shape[:2],
80
+ mode="bicubic",
81
+ align_corners=False,
82
+ ).squeeze()
83
+ depth_map = prediction.cpu().numpy()
84
+
85
+ # Extract objects and calculate distances
86
+ persons = []
87
+ luggages = []
88
+ num_boxes = len(results[0].boxes)
89
+ for i in range(num_boxes):
90
+ box = results[0].boxes[i]
91
+ centroid = get_centroid(box)
92
+ track_id = box.id
93
+ if box.cls == 0:
94
+ persons.append((track_id, centroid))
95
+ elif box.cls in [24, 28, 26]:
96
+ luggages.append((track_id, centroid))
97
+
98
+ for person_id, person_centroid in persons:
99
+ for luggage_id, luggage_centroid in luggages:
100
+ distance_m = calculate_distance(depth_map, person_centroid, luggage_centroid)
101
+ if distance_m <= unattended_threshold and luggage_id not in abandoned_luggages:
102
+ ownership_scores[luggage_id][person_id] += 1
103
+
104
+ # Check for abandoned luggage
105
+ for luggage_id, luggage_centroid in luggages:
106
+ person_in_range = any(
107
+ calculate_distance(depth_map, person_centroid, luggage_centroid) <= unattended_threshold
108
+ for person_id, person_centroid in persons
109
+ )
110
+ if not person_in_range and luggage_id not in abandoned_luggages:
111
+ abandoned_luggages.add(luggage_id)
112
+
113
+ # Visualization
114
+ for box in results[0].boxes:
115
+ xyxy = box.xyxy[0].cpu().numpy().astype(int)
116
+ cv2.rectangle(frame_, (xyxy[0], xyxy[1]), (xyxy[2], xyxy[3]), (0, 255, 0), 2)
117
+ centroid = get_centroid(box)
118
+ cv2.circle(frame_, (int(centroid[0]), int(centroid[1])), 5, (0, 255, 0), -1)
119
+
120
+ output_frames.append(frame_)
121
+
122
+ cap.release()
123
+ return output_frames
124
+
125
+
126
+ def get_centroid(box):
127
+ return dist_obj.calculate_centroid(box.xyxy[0].cpu().numpy().astype(int))
128
+
129
+
130
+ def video_interface(video_path):
131
+ processed_frames = process_video(video_path)
132
+ if not processed_frames:
133
+ return None
134
+
135
+ # Save processed frames as a video
136
+ height, width, _ = processed_frames[0].shape
137
+ temp_file = tempfile.NamedTemporaryFile(delete=False, suffix='.mp4')
138
+ out = cv2.VideoWriter(temp_file.name, cv2.VideoWriter_fourcc(*'mp4v'), 10, (width, height))
139
+
140
+ for frame in processed_frames:
141
+ out.write(frame)
142
+
143
+ out.release()
144
+
145
+ # Provide both video playback and download
146
+ if os.path.getsize(temp_file.name) > 50 * 1024 * 1024: # If video is larger than 50MB, provide download
147
+ return {"output": temp_file.name, "message": "The video is large. Click the link to download."}
148
+
149
+ return temp_file.name
150
+
151
+
152
+ # Create a Gradio interface
153
+ def gradio_interface(video_path):
154
+ result = video_interface(video_path)
155
+ if isinstance(result, dict):
156
+ return result['output'], result['message']
157
+ return result, None
158
+
159
+
160
+ interface = gr.Interface(
161
+ fn=gradio_interface,
162
+ inputs=gr.Video(format="mp4"), # No need for `source`
163
+ outputs=["video", "text"],
164
+ title="Abandoned Object Detection"
165
+ )
166
+
167
+ if __name__ == "__main__":
168
+ interface.queue(max_size=20).launch(
169
+ server_name="127.0.0.1", # Change this to "127.0.0.1" if you want local access only
170
+ server_port=7860, # Specify a port to run the server (default is 7860)
171
+ debug=True, # Enable debugging mode
172
+ share=True # Set `share=True` to create a public shareable link for testing (if required)
173
+ )