HarshitJoshi commited on
Commit
364a848
·
verified ·
1 Parent(s): c0fb974

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +228 -93
app.py CHANGED
@@ -16,10 +16,10 @@ def download_file(url, save_name):
16
  open(save_name, 'wb').write(file.content)
17
 
18
  for i, url in enumerate(file_urls):
19
- if 'mp4' in file_urls[i]:
20
- download_file(file_urls[i], f"video.mp4")
21
  else:
22
- download_file(file_urls[i], f"image_{i}.jpg")
23
 
24
  colors = {
25
  0: (255, 0, 0), # Red for class 0
@@ -33,8 +33,8 @@ colors = {
33
  }
34
 
35
  model = YOLO('modelbest.pt')
36
- path = [['image_0.jpg'], ['image_1.jpg']]
37
- video_path = [['video.mp4']]
38
 
39
  def show_preds_image(image_path):
40
  image = cv2.imread(image_path)
@@ -62,49 +62,26 @@ def show_preds_image(image_path):
62
 
63
  return cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
64
 
65
-
66
- # def show_preds_image(image_path):
67
- # image = cv2.imread(image_path)
68
- # outputs = model.predict(source=image_path)
69
- # results = outputs[0].cpu().numpy()
70
- # for i, det in enumerate(results.boxes.xyxy):
71
- # cv2.rectangle(
72
- # image,
73
- # (int(det[0]), int(det[1])),
74
- # (int(det[2]), int(det[3])),
75
- # color=(0, 0, 255),
76
- # thickness=2,
77
- # lineType=cv2.LINE_AA
78
- # )
79
- # return cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
80
-
81
- inputs_image = [
82
- gr.Image(type="filepath", label="Input Image"),
83
- ]
84
- outputs_image = [
85
- gr.Image(type="numpy", label="Output Image"),
86
- ]
87
 
88
  interface_image = gr.Interface(
89
  fn=show_preds_image,
90
  inputs=inputs_image,
91
  outputs=outputs_image,
92
  title="Smoke Detection on Indian Roads",
93
- examples=path,
94
  cache_examples=False,
95
  )
96
 
97
  def show_preds_video(video_path):
98
- # Open the input video
99
  cap = cv2.VideoCapture(video_path)
100
 
101
- # Get video properties
102
  width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
103
  height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
104
  fps = int(cap.get(cv2.CAP_PROP_FPS))
105
 
106
- # Define the codec and create a VideoWriter object
107
- fourcc = cv2.VideoWriter_fourcc(*'mp4v') # 'mp4v' for .mp4 format
108
  out = cv2.VideoWriter('output_video.mp4', fourcc, fps, (width, height))
109
 
110
  while cap.isOpened():
@@ -120,45 +97,36 @@ def show_preds_video(video_path):
120
  class_id = int(results.boxes.cls[i])
121
  label = model.names[class_id]
122
 
123
- # Get the bounding box coordinates
124
  x1, y1, x2, y2 = int(det[0]), int(det[1]), int(det[2]), int(det[3])
125
 
126
- # Draw the bounding box with the specified color
127
  color = colors.get(class_id, (0, 0, 255))
128
  cv2.rectangle(frame_copy, (x1, y1), (x2, y2), color, 2, cv2.LINE_AA)
129
 
130
- # Calculate text size and position
131
  label_size, _ = cv2.getTextSize(label, cv2.FONT_HERSHEY_SIMPLEX, 0.75, 2)
132
  text_x = x1 + (x2 - x1) // 2 - label_size[0] // 2
133
  text_y = y1 + (y2 - y1) // 2 + label_size[1] // 2
134
 
135
- # Draw the label text
136
  cv2.putText(frame_copy, label, (text_x, text_y), cv2.FONT_HERSHEY_SIMPLEX, 0.75, color, 2, cv2.LINE_AA)
137
 
138
- # Write the frame to the output video
139
  out.write(frame_copy)
140
 
141
- # Release everything
142
  cap.release()
143
  out.release()
144
 
145
  return 'output_video.mp4'
146
 
147
- # Updated Gradio interface
148
- inputs_video = [
149
- gr.Video(format="mp4", label="Input Video"),
150
- ]
151
- outputs_video = [
152
- gr.Video(label="Output Video"),
153
- ]
154
  interface_video = gr.Interface(
155
  fn=show_preds_video,
156
  inputs=inputs_video,
157
  outputs=outputs_video,
158
  title="Smoke Detection on Indian Roads",
159
- examples=video_path,
160
  cache_examples=False,
161
  )
 
162
  gr.TabbedInterface(
163
  [interface_image, interface_video],
164
  tab_names=['Image inference', 'Video inference']
@@ -169,32 +137,35 @@ gr.TabbedInterface(
169
  # import cv2
170
  # import requests
171
  # import os
172
-
173
  # from ultralytics import YOLO
 
174
  # file_urls = [
175
  # 'https://www.dropbox.com/scl/fi/kqd1z6wby1212c6ndodb3/Pol_20_jpg.rf.133c835b66958a7d48c12deeda31a719.jpg?rlkey=uqgvs2cwvahnmju15fv1zgorg&st=snv2yvtk&dl=0',
176
  # 'https://www.dropbox.com/scl/fi/39aakapeh2y5ztk94rsyu/11e-a347-3f2d_jpg.rf.c66e5aeb57ee2ed660fdf0162156127d.jpg?rlkey=xoi3iw45vksgiejycau2ha7fh&st=etiawigv&dl=0',
177
  # 'https://www.dropbox.com/scl/fi/8f08ehy53vsemw164g8n7/Recording2024-06-26184319.mp4?rlkey=pnmov906ttodl0cm92rpvc5ta&st=2twc9pjn&dl=0'
178
  # ]
179
 
180
-
181
  # def download_file(url, save_name):
182
- # url = url
183
  # if not os.path.exists(save_name):
184
  # file = requests.get(url)
185
  # open(save_name, 'wb').write(file.content)
186
-
187
  # for i, url in enumerate(file_urls):
188
  # if 'mp4' in file_urls[i]:
189
- # download_file(
190
- # file_urls[i],
191
- # f"video.mp4"
192
- # )
193
  # else:
194
- # download_file(
195
- # file_urls[i],
196
- # f"image_{i}.jpg"
197
- # )
 
 
 
 
 
 
 
 
198
 
199
  # model = YOLO('modelbest.pt')
200
  # path = [['image_0.jpg'], ['image_1.jpg']]
@@ -204,70 +175,234 @@ gr.TabbedInterface(
204
  # image = cv2.imread(image_path)
205
  # outputs = model.predict(source=image_path)
206
  # results = outputs[0].cpu().numpy()
 
207
  # for i, det in enumerate(results.boxes.xyxy):
208
- # cv2.rectangle(
209
- # image,
210
- # (int(det[0]), int(det[1])),
211
- # (int(det[2]), int(det[3])),
212
- # color=(0, 0, 255),
213
- # thickness=2,
214
- # lineType=cv2.LINE_AA
215
- # )
 
 
 
 
 
 
 
 
 
 
216
  # return cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
217
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
218
  # inputs_image = [
219
- # gr.components.Image(type="filepath", label="Input Image"),
220
  # ]
221
  # outputs_image = [
222
- # gr.components.Image(type="numpy", label="Output Image"),
223
  # ]
224
 
225
  # interface_image = gr.Interface(
226
  # fn=show_preds_image,
227
  # inputs=inputs_image,
228
  # outputs=outputs_image,
229
- # title="Pothole detector",
230
  # examples=path,
231
  # cache_examples=False,
232
  # )
233
 
234
-
235
  # def show_preds_video(video_path):
 
236
  # cap = cv2.VideoCapture(video_path)
237
- # while(cap.isOpened()):
 
 
 
 
 
 
 
 
 
 
238
  # ret, frame = cap.read()
239
- # if ret:
240
- # frame_copy = frame.copy()
241
- # outputs = model.predict(source=frame)
242
- # results = outputs[0].cpu().numpy()
243
- # for i, det in enumerate(results.boxes.xyxy):
244
- # cv2.rectangle(
245
- # frame_copy,
246
- # (int(det[0]), int(det[1])),
247
- # (int(det[2]), int(det[3])),
248
- # color=(0, 0, 255),
249
- # thickness=2,
250
- # lineType=cv2.LINE_AA
251
- # )
252
- # yield cv2.cvtColor(frame_copy, cv2.COLOR_BGR2RGB)
253
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
254
  # inputs_video = [
255
- # gr.components.Video(type="filepath", label="Input Video"),
256
-
257
  # ]
258
  # outputs_video = [
259
- # gr.components.Image(type="numpy", label="Output Image"),
260
  # ]
261
  # interface_video = gr.Interface(
262
  # fn=show_preds_video,
263
  # inputs=inputs_video,
264
  # outputs=outputs_video,
265
- # title="Pothole detector",
266
  # examples=video_path,
267
  # cache_examples=False,
268
  # )
269
-
270
  # gr.TabbedInterface(
271
  # [interface_image, interface_video],
272
  # tab_names=['Image inference', 'Video inference']
273
- # ).queue().launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
16
  open(save_name, 'wb').write(file.content)
17
 
18
  for i, url in enumerate(file_urls):
19
+ if url.endswith('.mp4'):
20
+ download_file(url, "video.mp4")
21
  else:
22
+ download_file(url, f"image_{i}.jpg")
23
 
24
  colors = {
25
  0: (255, 0, 0), # Red for class 0
 
33
  }
34
 
35
  model = YOLO('modelbest.pt')
36
+ image_paths = [['image_0.jpg'], ['image_1.jpg']]
37
+ video_paths = [['video.mp4']]
38
 
39
  def show_preds_image(image_path):
40
  image = cv2.imread(image_path)
 
62
 
63
  return cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
64
 
65
+ inputs_image = gr.Image(type="filepath", label="Input Image")
66
+ outputs_image = gr.Image(type="numpy", label="Output Image")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
67
 
68
  interface_image = gr.Interface(
69
  fn=show_preds_image,
70
  inputs=inputs_image,
71
  outputs=outputs_image,
72
  title="Smoke Detection on Indian Roads",
73
+ examples=image_paths,
74
  cache_examples=False,
75
  )
76
 
77
  def show_preds_video(video_path):
 
78
  cap = cv2.VideoCapture(video_path)
79
 
 
80
  width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
81
  height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
82
  fps = int(cap.get(cv2.CAP_PROP_FPS))
83
 
84
+ fourcc = cv2.VideoWriter_fourcc(*'mp4v')
 
85
  out = cv2.VideoWriter('output_video.mp4', fourcc, fps, (width, height))
86
 
87
  while cap.isOpened():
 
97
  class_id = int(results.boxes.cls[i])
98
  label = model.names[class_id]
99
 
 
100
  x1, y1, x2, y2 = int(det[0]), int(det[1]), int(det[2]), int(det[3])
101
 
 
102
  color = colors.get(class_id, (0, 0, 255))
103
  cv2.rectangle(frame_copy, (x1, y1), (x2, y2), color, 2, cv2.LINE_AA)
104
 
 
105
  label_size, _ = cv2.getTextSize(label, cv2.FONT_HERSHEY_SIMPLEX, 0.75, 2)
106
  text_x = x1 + (x2 - x1) // 2 - label_size[0] // 2
107
  text_y = y1 + (y2 - y1) // 2 + label_size[1] // 2
108
 
 
109
  cv2.putText(frame_copy, label, (text_x, text_y), cv2.FONT_HERSHEY_SIMPLEX, 0.75, color, 2, cv2.LINE_AA)
110
 
 
111
  out.write(frame_copy)
112
 
 
113
  cap.release()
114
  out.release()
115
 
116
  return 'output_video.mp4'
117
 
118
+ inputs_video = gr.Video(format="mp4", label="Input Video")
119
+ outputs_video = gr.Video(label="Output Video")
120
+
 
 
 
 
121
  interface_video = gr.Interface(
122
  fn=show_preds_video,
123
  inputs=inputs_video,
124
  outputs=outputs_video,
125
  title="Smoke Detection on Indian Roads",
126
+ examples=video_paths,
127
  cache_examples=False,
128
  )
129
+
130
  gr.TabbedInterface(
131
  [interface_image, interface_video],
132
  tab_names=['Image inference', 'Video inference']
 
137
  # import cv2
138
  # import requests
139
  # import os
 
140
  # from ultralytics import YOLO
141
+
142
  # file_urls = [
143
  # 'https://www.dropbox.com/scl/fi/kqd1z6wby1212c6ndodb3/Pol_20_jpg.rf.133c835b66958a7d48c12deeda31a719.jpg?rlkey=uqgvs2cwvahnmju15fv1zgorg&st=snv2yvtk&dl=0',
144
  # 'https://www.dropbox.com/scl/fi/39aakapeh2y5ztk94rsyu/11e-a347-3f2d_jpg.rf.c66e5aeb57ee2ed660fdf0162156127d.jpg?rlkey=xoi3iw45vksgiejycau2ha7fh&st=etiawigv&dl=0',
145
  # 'https://www.dropbox.com/scl/fi/8f08ehy53vsemw164g8n7/Recording2024-06-26184319.mp4?rlkey=pnmov906ttodl0cm92rpvc5ta&st=2twc9pjn&dl=0'
146
  # ]
147
 
 
148
  # def download_file(url, save_name):
 
149
  # if not os.path.exists(save_name):
150
  # file = requests.get(url)
151
  # open(save_name, 'wb').write(file.content)
152
+
153
  # for i, url in enumerate(file_urls):
154
  # if 'mp4' in file_urls[i]:
155
+ # download_file(file_urls[i], f"video.mp4")
 
 
 
156
  # else:
157
+ # download_file(file_urls[i], f"image_{i}.jpg")
158
+
159
+ # colors = {
160
+ # 0: (255, 0, 0), # Red for class 0
161
+ # 1: (0, 128, 0), # Green (dark) for class 1
162
+ # 2: (0, 0, 255), # Blue for class 2
163
+ # 3: (255, 255, 0), # Yellow for class 3
164
+ # 4: (255, 0, 255), # Magenta for class 4
165
+ # 5: (0, 255, 255), # Cyan for class 5
166
+ # 6: (128, 0, 0), # Maroon for class 6
167
+ # 7: (0, 225, 0), # Green for class 7
168
+ # }
169
 
170
  # model = YOLO('modelbest.pt')
171
  # path = [['image_0.jpg'], ['image_1.jpg']]
 
175
  # image = cv2.imread(image_path)
176
  # outputs = model.predict(source=image_path)
177
  # results = outputs[0].cpu().numpy()
178
+
179
  # for i, det in enumerate(results.boxes.xyxy):
180
+ # class_id = int(results.boxes.cls[i])
181
+ # label = model.names[class_id]
182
+
183
+ # # Get the bounding box coordinates
184
+ # x1, y1, x2, y2 = int(det[0]), int(det[1]), int(det[2]), int(det[3])
185
+
186
+ # # Draw the bounding box with the specified color
187
+ # color = colors.get(class_id, (0, 0, 255))
188
+ # cv2.rectangle(image, (x1, y1), (x2, y2), color, 2, cv2.LINE_AA)
189
+
190
+ # # Calculate text size and position
191
+ # label_size, _ = cv2.getTextSize(label, cv2.FONT_HERSHEY_SIMPLEX, 0.75, 2)
192
+ # text_x = x1 + (x2 - x1) // 2 - label_size[0] // 2
193
+ # text_y = y1 + (y2 - y1) // 2 + label_size[1] // 2
194
+
195
+ # # Draw the label text
196
+ # cv2.putText(image, label, (text_x, text_y), cv2.FONT_HERSHEY_SIMPLEX, 0.75, color, 2, cv2.LINE_AA)
197
+
198
  # return cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
199
+
200
+
201
+ # # def show_preds_image(image_path):
202
+ # # image = cv2.imread(image_path)
203
+ # # outputs = model.predict(source=image_path)
204
+ # # results = outputs[0].cpu().numpy()
205
+ # # for i, det in enumerate(results.boxes.xyxy):
206
+ # # cv2.rectangle(
207
+ # # image,
208
+ # # (int(det[0]), int(det[1])),
209
+ # # (int(det[2]), int(det[3])),
210
+ # # color=(0, 0, 255),
211
+ # # thickness=2,
212
+ # # lineType=cv2.LINE_AA
213
+ # # )
214
+ # # return cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
215
+
216
  # inputs_image = [
217
+ # gr.Image(type="filepath", label="Input Image"),
218
  # ]
219
  # outputs_image = [
220
+ # gr.Image(type="numpy", label="Output Image"),
221
  # ]
222
 
223
  # interface_image = gr.Interface(
224
  # fn=show_preds_image,
225
  # inputs=inputs_image,
226
  # outputs=outputs_image,
227
+ # title="Smoke Detection on Indian Roads",
228
  # examples=path,
229
  # cache_examples=False,
230
  # )
231
 
 
232
  # def show_preds_video(video_path):
233
+ # # Open the input video
234
  # cap = cv2.VideoCapture(video_path)
235
+
236
+ # # Get video properties
237
+ # width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
238
+ # height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
239
+ # fps = int(cap.get(cv2.CAP_PROP_FPS))
240
+
241
+ # # Define the codec and create a VideoWriter object
242
+ # fourcc = cv2.VideoWriter_fourcc(*'mp4v') # 'mp4v' for .mp4 format
243
+ # out = cv2.VideoWriter('output_video.mp4', fourcc, fps, (width, height))
244
+
245
+ # while cap.isOpened():
246
  # ret, frame = cap.read()
247
+ # if not ret:
248
+ # break
249
+
250
+ # frame_copy = frame.copy()
251
+ # outputs = model.predict(source=frame)
252
+ # results = outputs[0].cpu().numpy()
253
+
254
+ # for i, det in enumerate(results.boxes.xyxy):
255
+ # class_id = int(results.boxes.cls[i])
256
+ # label = model.names[class_id]
257
+
258
+ # # Get the bounding box coordinates
259
+ # x1, y1, x2, y2 = int(det[0]), int(det[1]), int(det[2]), int(det[3])
260
+
261
+ # # Draw the bounding box with the specified color
262
+ # color = colors.get(class_id, (0, 0, 255))
263
+ # cv2.rectangle(frame_copy, (x1, y1), (x2, y2), color, 2, cv2.LINE_AA)
264
+
265
+ # # Calculate text size and position
266
+ # label_size, _ = cv2.getTextSize(label, cv2.FONT_HERSHEY_SIMPLEX, 0.75, 2)
267
+ # text_x = x1 + (x2 - x1) // 2 - label_size[0] // 2
268
+ # text_y = y1 + (y2 - y1) // 2 + label_size[1] // 2
269
+
270
+ # # Draw the label text
271
+ # cv2.putText(frame_copy, label, (text_x, text_y), cv2.FONT_HERSHEY_SIMPLEX, 0.75, color, 2, cv2.LINE_AA)
272
+
273
+ # # Write the frame to the output video
274
+ # out.write(frame_copy)
275
+
276
+ # # Release everything
277
+ # cap.release()
278
+ # out.release()
279
+
280
+ # return 'output_video.mp4'
281
+
282
+ # # Updated Gradio interface
283
  # inputs_video = [
284
+ # gr.Video(format="mp4", label="Input Video"),
 
285
  # ]
286
  # outputs_video = [
287
+ # gr.Video(label="Output Video"),
288
  # ]
289
  # interface_video = gr.Interface(
290
  # fn=show_preds_video,
291
  # inputs=inputs_video,
292
  # outputs=outputs_video,
293
+ # title="Smoke Detection on Indian Roads",
294
  # examples=video_path,
295
  # cache_examples=False,
296
  # )
 
297
  # gr.TabbedInterface(
298
  # [interface_image, interface_video],
299
  # tab_names=['Image inference', 'Video inference']
300
+ # ).queue().launch()
301
+
302
+
303
+ # # import gradio as gr
304
+ # # import cv2
305
+ # # import requests
306
+ # # import os
307
+
308
+ # # from ultralytics import YOLO
309
+ # # file_urls = [
310
+ # # 'https://www.dropbox.com/scl/fi/kqd1z6wby1212c6ndodb3/Pol_20_jpg.rf.133c835b66958a7d48c12deeda31a719.jpg?rlkey=uqgvs2cwvahnmju15fv1zgorg&st=snv2yvtk&dl=0',
311
+ # # 'https://www.dropbox.com/scl/fi/39aakapeh2y5ztk94rsyu/11e-a347-3f2d_jpg.rf.c66e5aeb57ee2ed660fdf0162156127d.jpg?rlkey=xoi3iw45vksgiejycau2ha7fh&st=etiawigv&dl=0',
312
+ # # 'https://www.dropbox.com/scl/fi/8f08ehy53vsemw164g8n7/Recording2024-06-26184319.mp4?rlkey=pnmov906ttodl0cm92rpvc5ta&st=2twc9pjn&dl=0'
313
+ # # ]
314
+
315
+
316
+ # # def download_file(url, save_name):
317
+ # # url = url
318
+ # # if not os.path.exists(save_name):
319
+ # # file = requests.get(url)
320
+ # # open(save_name, 'wb').write(file.content)
321
+
322
+ # # for i, url in enumerate(file_urls):
323
+ # # if 'mp4' in file_urls[i]:
324
+ # # download_file(
325
+ # # file_urls[i],
326
+ # # f"video.mp4"
327
+ # # )
328
+ # # else:
329
+ # # download_file(
330
+ # # file_urls[i],
331
+ # # f"image_{i}.jpg"
332
+ # # )
333
+
334
+ # # model = YOLO('modelbest.pt')
335
+ # # path = [['image_0.jpg'], ['image_1.jpg']]
336
+ # # video_path = [['video.mp4']]
337
+
338
+ # # def show_preds_image(image_path):
339
+ # # image = cv2.imread(image_path)
340
+ # # outputs = model.predict(source=image_path)
341
+ # # results = outputs[0].cpu().numpy()
342
+ # # for i, det in enumerate(results.boxes.xyxy):
343
+ # # cv2.rectangle(
344
+ # # image,
345
+ # # (int(det[0]), int(det[1])),
346
+ # # (int(det[2]), int(det[3])),
347
+ # # color=(0, 0, 255),
348
+ # # thickness=2,
349
+ # # lineType=cv2.LINE_AA
350
+ # # )
351
+ # # return cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
352
+
353
+ # # inputs_image = [
354
+ # # gr.components.Image(type="filepath", label="Input Image"),
355
+ # # ]
356
+ # # outputs_image = [
357
+ # # gr.components.Image(type="numpy", label="Output Image"),
358
+ # # ]
359
+
360
+ # # interface_image = gr.Interface(
361
+ # # fn=show_preds_image,
362
+ # # inputs=inputs_image,
363
+ # # outputs=outputs_image,
364
+ # # title="Pothole detector",
365
+ # # examples=path,
366
+ # # cache_examples=False,
367
+ # # )
368
+
369
+
370
+ # # def show_preds_video(video_path):
371
+ # # cap = cv2.VideoCapture(video_path)
372
+ # # while(cap.isOpened()):
373
+ # # ret, frame = cap.read()
374
+ # # if ret:
375
+ # # frame_copy = frame.copy()
376
+ # # outputs = model.predict(source=frame)
377
+ # # results = outputs[0].cpu().numpy()
378
+ # # for i, det in enumerate(results.boxes.xyxy):
379
+ # # cv2.rectangle(
380
+ # # frame_copy,
381
+ # # (int(det[0]), int(det[1])),
382
+ # # (int(det[2]), int(det[3])),
383
+ # # color=(0, 0, 255),
384
+ # # thickness=2,
385
+ # # lineType=cv2.LINE_AA
386
+ # # )
387
+ # # yield cv2.cvtColor(frame_copy, cv2.COLOR_BGR2RGB)
388
+
389
+ # # inputs_video = [
390
+ # # gr.components.Video(type="filepath", label="Input Video"),
391
+
392
+ # # ]
393
+ # # outputs_video = [
394
+ # # gr.components.Image(type="numpy", label="Output Image"),
395
+ # # ]
396
+ # # interface_video = gr.Interface(
397
+ # # fn=show_preds_video,
398
+ # # inputs=inputs_video,
399
+ # # outputs=outputs_video,
400
+ # # title="Pothole detector",
401
+ # # examples=video_path,
402
+ # # cache_examples=False,
403
+ # # )
404
+
405
+ # # gr.TabbedInterface(
406
+ # # [interface_image, interface_video],
407
+ # # tab_names=['Image inference', 'Video inference']
408
+ # # ).queue().launch()