HarshitJoshi commited on
Commit
cf9e5f0
·
verified ·
1 Parent(s): 364a848

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -283
app.py CHANGED
@@ -4,6 +4,7 @@ import requests
4
  import os
5
  from ultralytics import YOLO
6
 
 
7
  file_urls = [
8
  'https://www.dropbox.com/scl/fi/kqd1z6wby1212c6ndodb3/Pol_20_jpg.rf.133c835b66958a7d48c12deeda31a719.jpg?rlkey=uqgvs2cwvahnmju15fv1zgorg&st=snv2yvtk&dl=0',
9
  'https://www.dropbox.com/scl/fi/39aakapeh2y5ztk94rsyu/11e-a347-3f2d_jpg.rf.c66e5aeb57ee2ed660fdf0162156127d.jpg?rlkey=xoi3iw45vksgiejycau2ha7fh&st=etiawigv&dl=0',
@@ -21,6 +22,7 @@ for i, url in enumerate(file_urls):
21
  else:
22
  download_file(url, f"image_{i}.jpg")
23
 
 
24
  colors = {
25
  0: (255, 0, 0), # Red for class 0
26
  1: (0, 128, 0), # Green (dark) for class 1
@@ -32,9 +34,8 @@ colors = {
32
  7: (0, 225, 0), # Green for class 7
33
  }
34
 
 
35
  model = YOLO('modelbest.pt')
36
- image_paths = [['image_0.jpg'], ['image_1.jpg']]
37
- video_paths = [['video.mp4']]
38
 
39
  def show_preds_image(image_path):
40
  image = cv2.imread(image_path)
@@ -69,9 +70,7 @@ interface_image = gr.Interface(
69
  fn=show_preds_image,
70
  inputs=inputs_image,
71
  outputs=outputs_image,
72
- title="Smoke Detection on Indian Roads",
73
- examples=image_paths,
74
- cache_examples=False,
75
  )
76
 
77
  def show_preds_video(video_path):
@@ -122,287 +121,10 @@ interface_video = gr.Interface(
122
  fn=show_preds_video,
123
  inputs=inputs_video,
124
  outputs=outputs_video,
125
- title="Smoke Detection on Indian Roads",
126
- examples=video_paths,
127
- cache_examples=False,
128
  )
129
 
130
  gr.TabbedInterface(
131
  [interface_image, interface_video],
132
  tab_names=['Image inference', 'Video inference']
133
  ).queue().launch()
134
-
135
-
136
- # import gradio as gr
137
- # import cv2
138
- # import requests
139
- # import os
140
- # from ultralytics import YOLO
141
-
142
- # file_urls = [
143
- # 'https://www.dropbox.com/scl/fi/kqd1z6wby1212c6ndodb3/Pol_20_jpg.rf.133c835b66958a7d48c12deeda31a719.jpg?rlkey=uqgvs2cwvahnmju15fv1zgorg&st=snv2yvtk&dl=0',
144
- # 'https://www.dropbox.com/scl/fi/39aakapeh2y5ztk94rsyu/11e-a347-3f2d_jpg.rf.c66e5aeb57ee2ed660fdf0162156127d.jpg?rlkey=xoi3iw45vksgiejycau2ha7fh&st=etiawigv&dl=0',
145
- # 'https://www.dropbox.com/scl/fi/8f08ehy53vsemw164g8n7/Recording2024-06-26184319.mp4?rlkey=pnmov906ttodl0cm92rpvc5ta&st=2twc9pjn&dl=0'
146
- # ]
147
-
148
- # def download_file(url, save_name):
149
- # if not os.path.exists(save_name):
150
- # file = requests.get(url)
151
- # open(save_name, 'wb').write(file.content)
152
-
153
- # for i, url in enumerate(file_urls):
154
- # if 'mp4' in file_urls[i]:
155
- # download_file(file_urls[i], f"video.mp4")
156
- # else:
157
- # download_file(file_urls[i], f"image_{i}.jpg")
158
-
159
- # colors = {
160
- # 0: (255, 0, 0), # Red for class 0
161
- # 1: (0, 128, 0), # Green (dark) for class 1
162
- # 2: (0, 0, 255), # Blue for class 2
163
- # 3: (255, 255, 0), # Yellow for class 3
164
- # 4: (255, 0, 255), # Magenta for class 4
165
- # 5: (0, 255, 255), # Cyan for class 5
166
- # 6: (128, 0, 0), # Maroon for class 6
167
- # 7: (0, 225, 0), # Green for class 7
168
- # }
169
-
170
- # model = YOLO('modelbest.pt')
171
- # path = [['image_0.jpg'], ['image_1.jpg']]
172
- # video_path = [['video.mp4']]
173
-
174
- # def show_preds_image(image_path):
175
- # image = cv2.imread(image_path)
176
- # outputs = model.predict(source=image_path)
177
- # results = outputs[0].cpu().numpy()
178
-
179
- # for i, det in enumerate(results.boxes.xyxy):
180
- # class_id = int(results.boxes.cls[i])
181
- # label = model.names[class_id]
182
-
183
- # # Get the bounding box coordinates
184
- # x1, y1, x2, y2 = int(det[0]), int(det[1]), int(det[2]), int(det[3])
185
-
186
- # # Draw the bounding box with the specified color
187
- # color = colors.get(class_id, (0, 0, 255))
188
- # cv2.rectangle(image, (x1, y1), (x2, y2), color, 2, cv2.LINE_AA)
189
-
190
- # # Calculate text size and position
191
- # label_size, _ = cv2.getTextSize(label, cv2.FONT_HERSHEY_SIMPLEX, 0.75, 2)
192
- # text_x = x1 + (x2 - x1) // 2 - label_size[0] // 2
193
- # text_y = y1 + (y2 - y1) // 2 + label_size[1] // 2
194
-
195
- # # Draw the label text
196
- # cv2.putText(image, label, (text_x, text_y), cv2.FONT_HERSHEY_SIMPLEX, 0.75, color, 2, cv2.LINE_AA)
197
-
198
- # return cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
199
-
200
-
201
- # # def show_preds_image(image_path):
202
- # # image = cv2.imread(image_path)
203
- # # outputs = model.predict(source=image_path)
204
- # # results = outputs[0].cpu().numpy()
205
- # # for i, det in enumerate(results.boxes.xyxy):
206
- # # cv2.rectangle(
207
- # # image,
208
- # # (int(det[0]), int(det[1])),
209
- # # (int(det[2]), int(det[3])),
210
- # # color=(0, 0, 255),
211
- # # thickness=2,
212
- # # lineType=cv2.LINE_AA
213
- # # )
214
- # # return cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
215
-
216
- # inputs_image = [
217
- # gr.Image(type="filepath", label="Input Image"),
218
- # ]
219
- # outputs_image = [
220
- # gr.Image(type="numpy", label="Output Image"),
221
- # ]
222
-
223
- # interface_image = gr.Interface(
224
- # fn=show_preds_image,
225
- # inputs=inputs_image,
226
- # outputs=outputs_image,
227
- # title="Smoke Detection on Indian Roads",
228
- # examples=path,
229
- # cache_examples=False,
230
- # )
231
-
232
- # def show_preds_video(video_path):
233
- # # Open the input video
234
- # cap = cv2.VideoCapture(video_path)
235
-
236
- # # Get video properties
237
- # width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
238
- # height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
239
- # fps = int(cap.get(cv2.CAP_PROP_FPS))
240
-
241
- # # Define the codec and create a VideoWriter object
242
- # fourcc = cv2.VideoWriter_fourcc(*'mp4v') # 'mp4v' for .mp4 format
243
- # out = cv2.VideoWriter('output_video.mp4', fourcc, fps, (width, height))
244
-
245
- # while cap.isOpened():
246
- # ret, frame = cap.read()
247
- # if not ret:
248
- # break
249
-
250
- # frame_copy = frame.copy()
251
- # outputs = model.predict(source=frame)
252
- # results = outputs[0].cpu().numpy()
253
-
254
- # for i, det in enumerate(results.boxes.xyxy):
255
- # class_id = int(results.boxes.cls[i])
256
- # label = model.names[class_id]
257
-
258
- # # Get the bounding box coordinates
259
- # x1, y1, x2, y2 = int(det[0]), int(det[1]), int(det[2]), int(det[3])
260
-
261
- # # Draw the bounding box with the specified color
262
- # color = colors.get(class_id, (0, 0, 255))
263
- # cv2.rectangle(frame_copy, (x1, y1), (x2, y2), color, 2, cv2.LINE_AA)
264
-
265
- # # Calculate text size and position
266
- # label_size, _ = cv2.getTextSize(label, cv2.FONT_HERSHEY_SIMPLEX, 0.75, 2)
267
- # text_x = x1 + (x2 - x1) // 2 - label_size[0] // 2
268
- # text_y = y1 + (y2 - y1) // 2 + label_size[1] // 2
269
-
270
- # # Draw the label text
271
- # cv2.putText(frame_copy, label, (text_x, text_y), cv2.FONT_HERSHEY_SIMPLEX, 0.75, color, 2, cv2.LINE_AA)
272
-
273
- # # Write the frame to the output video
274
- # out.write(frame_copy)
275
-
276
- # # Release everything
277
- # cap.release()
278
- # out.release()
279
-
280
- # return 'output_video.mp4'
281
-
282
- # # Updated Gradio interface
283
- # inputs_video = [
284
- # gr.Video(format="mp4", label="Input Video"),
285
- # ]
286
- # outputs_video = [
287
- # gr.Video(label="Output Video"),
288
- # ]
289
- # interface_video = gr.Interface(
290
- # fn=show_preds_video,
291
- # inputs=inputs_video,
292
- # outputs=outputs_video,
293
- # title="Smoke Detection on Indian Roads",
294
- # examples=video_path,
295
- # cache_examples=False,
296
- # )
297
- # gr.TabbedInterface(
298
- # [interface_image, interface_video],
299
- # tab_names=['Image inference', 'Video inference']
300
- # ).queue().launch()
301
-
302
-
303
- # # import gradio as gr
304
- # # import cv2
305
- # # import requests
306
- # # import os
307
-
308
- # # from ultralytics import YOLO
309
- # # file_urls = [
310
- # # 'https://www.dropbox.com/scl/fi/kqd1z6wby1212c6ndodb3/Pol_20_jpg.rf.133c835b66958a7d48c12deeda31a719.jpg?rlkey=uqgvs2cwvahnmju15fv1zgorg&st=snv2yvtk&dl=0',
311
- # # 'https://www.dropbox.com/scl/fi/39aakapeh2y5ztk94rsyu/11e-a347-3f2d_jpg.rf.c66e5aeb57ee2ed660fdf0162156127d.jpg?rlkey=xoi3iw45vksgiejycau2ha7fh&st=etiawigv&dl=0',
312
- # # 'https://www.dropbox.com/scl/fi/8f08ehy53vsemw164g8n7/Recording2024-06-26184319.mp4?rlkey=pnmov906ttodl0cm92rpvc5ta&st=2twc9pjn&dl=0'
313
- # # ]
314
-
315
-
316
- # # def download_file(url, save_name):
317
- # # url = url
318
- # # if not os.path.exists(save_name):
319
- # # file = requests.get(url)
320
- # # open(save_name, 'wb').write(file.content)
321
-
322
- # # for i, url in enumerate(file_urls):
323
- # # if 'mp4' in file_urls[i]:
324
- # # download_file(
325
- # # file_urls[i],
326
- # # f"video.mp4"
327
- # # )
328
- # # else:
329
- # # download_file(
330
- # # file_urls[i],
331
- # # f"image_{i}.jpg"
332
- # # )
333
-
334
- # # model = YOLO('modelbest.pt')
335
- # # path = [['image_0.jpg'], ['image_1.jpg']]
336
- # # video_path = [['video.mp4']]
337
-
338
- # # def show_preds_image(image_path):
339
- # # image = cv2.imread(image_path)
340
- # # outputs = model.predict(source=image_path)
341
- # # results = outputs[0].cpu().numpy()
342
- # # for i, det in enumerate(results.boxes.xyxy):
343
- # # cv2.rectangle(
344
- # # image,
345
- # # (int(det[0]), int(det[1])),
346
- # # (int(det[2]), int(det[3])),
347
- # # color=(0, 0, 255),
348
- # # thickness=2,
349
- # # lineType=cv2.LINE_AA
350
- # # )
351
- # # return cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
352
-
353
- # # inputs_image = [
354
- # # gr.components.Image(type="filepath", label="Input Image"),
355
- # # ]
356
- # # outputs_image = [
357
- # # gr.components.Image(type="numpy", label="Output Image"),
358
- # # ]
359
-
360
- # # interface_image = gr.Interface(
361
- # # fn=show_preds_image,
362
- # # inputs=inputs_image,
363
- # # outputs=outputs_image,
364
- # # title="Pothole detector",
365
- # # examples=path,
366
- # # cache_examples=False,
367
- # # )
368
-
369
-
370
- # # def show_preds_video(video_path):
371
- # # cap = cv2.VideoCapture(video_path)
372
- # # while(cap.isOpened()):
373
- # # ret, frame = cap.read()
374
- # # if ret:
375
- # # frame_copy = frame.copy()
376
- # # outputs = model.predict(source=frame)
377
- # # results = outputs[0].cpu().numpy()
378
- # # for i, det in enumerate(results.boxes.xyxy):
379
- # # cv2.rectangle(
380
- # # frame_copy,
381
- # # (int(det[0]), int(det[1])),
382
- # # (int(det[2]), int(det[3])),
383
- # # color=(0, 0, 255),
384
- # # thickness=2,
385
- # # lineType=cv2.LINE_AA
386
- # # )
387
- # # yield cv2.cvtColor(frame_copy, cv2.COLOR_BGR2RGB)
388
-
389
- # # inputs_video = [
390
- # # gr.components.Video(type="filepath", label="Input Video"),
391
-
392
- # # ]
393
- # # outputs_video = [
394
- # # gr.components.Image(type="numpy", label="Output Image"),
395
- # # ]
396
- # # interface_video = gr.Interface(
397
- # # fn=show_preds_video,
398
- # # inputs=inputs_video,
399
- # # outputs=outputs_video,
400
- # # title="Pothole detector",
401
- # # examples=video_path,
402
- # # cache_examples=False,
403
- # # )
404
-
405
- # # gr.TabbedInterface(
406
- # # [interface_image, interface_video],
407
- # # tab_names=['Image inference', 'Video inference']
408
- # # ).queue().launch()
 
4
  import os
5
  from ultralytics import YOLO
6
 
7
+ # Downloading the necessary files
8
  file_urls = [
9
  'https://www.dropbox.com/scl/fi/kqd1z6wby1212c6ndodb3/Pol_20_jpg.rf.133c835b66958a7d48c12deeda31a719.jpg?rlkey=uqgvs2cwvahnmju15fv1zgorg&st=snv2yvtk&dl=0',
10
  'https://www.dropbox.com/scl/fi/39aakapeh2y5ztk94rsyu/11e-a347-3f2d_jpg.rf.c66e5aeb57ee2ed660fdf0162156127d.jpg?rlkey=xoi3iw45vksgiejycau2ha7fh&st=etiawigv&dl=0',
 
22
  else:
23
  download_file(url, f"image_{i}.jpg")
24
 
25
+ # Define the colors for different classes
26
  colors = {
27
  0: (255, 0, 0), # Red for class 0
28
  1: (0, 128, 0), # Green (dark) for class 1
 
34
  7: (0, 225, 0), # Green for class 7
35
  }
36
 
37
+ # Load the YOLO model
38
  model = YOLO('modelbest.pt')
 
 
39
 
40
  def show_preds_image(image_path):
41
  image = cv2.imread(image_path)
 
70
  fn=show_preds_image,
71
  inputs=inputs_image,
72
  outputs=outputs_image,
73
+ title="Smoke Detection on Indian Roads"
 
 
74
  )
75
 
76
  def show_preds_video(video_path):
 
121
  fn=show_preds_video,
122
  inputs=inputs_video,
123
  outputs=outputs_video,
124
+ title="Smoke Detection on Indian Roads"
 
 
125
  )
126
 
127
  gr.TabbedInterface(
128
  [interface_image, interface_video],
129
  tab_names=['Image inference', 'Video inference']
130
  ).queue().launch()