Spaces:
Sleeping
Sleeping
import gradio as gr | |
import cv2 | |
import requests | |
import os | |
from ultralytics import YOLO | |
file_urls = [ | |
'https://www.dropbox.com/scl/fi/kqd1z6wby1212c6ndodb3/Pol_20_jpg.rf.133c835b66958a7d48c12deeda31a719.jpg?rlkey=uqgvs2cwvahnmju15fv1zgorg&st=snv2yvtk&dl=0', | |
'https://www.dropbox.com/scl/fi/39aakapeh2y5ztk94rsyu/11e-a347-3f2d_jpg.rf.c66e5aeb57ee2ed660fdf0162156127d.jpg?rlkey=xoi3iw45vksgiejycau2ha7fh&st=etiawigv&dl=0', | |
'https://www.dropbox.com/scl/fi/8f08ehy53vsemw164g8n7/Recording2024-06-26184319.mp4?rlkey=pnmov906ttodl0cm92rpvc5ta&st=2twc9pjn&dl=0' | |
] | |
def download_file(url, save_name): | |
if not os.path.exists(save_name): | |
file = requests.get(url) | |
open(save_name, 'wb').write(file.content) | |
for i, url in enumerate(file_urls): | |
if 'mp4' in file_urls[i]: | |
download_file(file_urls[i], f"video.mp4") | |
else: | |
download_file(file_urls[i], f"image_{i}.jpg") | |
colors = { | |
0: (255, 0, 0), # Red for class 0 | |
1: (0, 128, 0), # Green (dark) for class 1 | |
2: (0, 0, 255), # Blue for class 2 | |
3: (255, 255, 0), # Yellow for class 3 | |
4: (255, 0, 255), # Magenta for class 4 | |
5: (0, 255, 255), # Cyan for class 5 | |
6: (128, 0, 0), # Maroon for class 6 | |
7: (0, 225, 0), # Green for class 7 | |
} | |
model = YOLO('modelbest.pt') | |
path = [['image_0.jpg'], ['image_1.jpg']] | |
video_path = [['video.mp4']] | |
def show_preds_image(image_path): | |
image = cv2.imread(image_path) | |
outputs = model.predict(source=image_path) | |
results = outputs[0].cpu().numpy() | |
for i, det in enumerate(results.boxes.xyxy): | |
class_id = int(results.boxes.cls[i]) | |
label = model.names[class_id] | |
# Get the bounding box coordinates | |
x1, y1, x2, y2 = int(det[0]), int(det[1]), int(det[2]), int(det[3]) | |
# Draw the bounding box with the specified color | |
color = colors.get(class_id, (0, 0, 255)) | |
cv2.rectangle(image, (x1, y1), (x2, y2), color, 2, cv2.LINE_AA) | |
# Calculate text size and position | |
label_size, _ = cv2.getTextSize(label, cv2.FONT_HERSHEY_SIMPLEX, 0.75, 2) | |
text_x = x1 + (x2 - x1) // 2 - label_size[0] // 2 | |
text_y = y1 + (y2 - y1) // 2 + label_size[1] // 2 | |
# Draw the label text | |
cv2.putText(image, label, (text_x, text_y), cv2.FONT_HERSHEY_SIMPLEX, 0.75, color, 2, cv2.LINE_AA) | |
return cv2.cvtColor(image, cv2.COLOR_BGR2RGB) | |
# def show_preds_image(image_path): | |
# image = cv2.imread(image_path) | |
# outputs = model.predict(source=image_path) | |
# results = outputs[0].cpu().numpy() | |
# for i, det in enumerate(results.boxes.xyxy): | |
# cv2.rectangle( | |
# image, | |
# (int(det[0]), int(det[1])), | |
# (int(det[2]), int(det[3])), | |
# color=(0, 0, 255), | |
# thickness=2, | |
# lineType=cv2.LINE_AA | |
# ) | |
# return cv2.cvtColor(image, cv2.COLOR_BGR2RGB) | |
inputs_image = [ | |
gr.Image(type="filepath", label="Input Image"), | |
] | |
outputs_image = [ | |
gr.Image(type="numpy", label="Output Image"), | |
] | |
interface_image = gr.Interface( | |
fn=show_preds_image, | |
inputs=inputs_image, | |
outputs=outputs_image, | |
title="Smoke Detection on Indian Roads", | |
examples=path, | |
cache_examples=False, | |
) | |
def show_preds_video(video_path): | |
# Open the input video | |
cap = cv2.VideoCapture(video_path) | |
# Get video properties | |
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) | |
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) | |
fps = int(cap.get(cv2.CAP_PROP_FPS)) | |
# Define the codec and create a VideoWriter object | |
fourcc = cv2.VideoWriter_fourcc(*'mp4v') # 'mp4v' for .mp4 format | |
out = cv2.VideoWriter('output_video.mp4', fourcc, fps, (width, height)) | |
while cap.isOpened(): | |
ret, frame = cap.read() | |
if not ret: | |
break | |
frame_copy = frame.copy() | |
outputs = model.predict(source=frame) | |
results = outputs[0].cpu().numpy() | |
for i, det in enumerate(results.boxes.xyxy): | |
class_id = int(results.boxes.cls[i]) | |
label = model.names[class_id] | |
# Get the bounding box coordinates | |
x1, y1, x2, y2 = int(det[0]), int(det[1]), int(det[2]), int(det[3]) | |
# Draw the bounding box with the specified color | |
color = colors.get(class_id, (0, 0, 255)) | |
cv2.rectangle(frame_copy, (x1, y1), (x2, y2), color, 2, cv2.LINE_AA) | |
# Calculate text size and position | |
label_size, _ = cv2.getTextSize(label, cv2.FONT_HERSHEY_SIMPLEX, 0.75, 2) | |
text_x = x1 + (x2 - x1) // 2 - label_size[0] // 2 | |
text_y = y1 + (y2 - y1) // 2 + label_size[1] // 2 | |
# Draw the label text | |
cv2.putText(frame_copy, label, (text_x, text_y), cv2.FONT_HERSHEY_SIMPLEX, 0.75, color, 2, cv2.LINE_AA) | |
# Write the frame to the output video | |
out.write(frame_copy) | |
# Release everything | |
cap.release() | |
out.release() | |
return 'output_video.mp4' | |
# Updated Gradio interface | |
inputs_video = [ | |
gr.Video(format="mp4", label="Input Video"), | |
] | |
outputs_video = [ | |
gr.Video(label="Output Video"), | |
] | |
interface_video = gr.Interface( | |
fn=show_preds_video, | |
inputs=inputs_video, | |
outputs=outputs_video, | |
title="Pothole detector", | |
examples=video_path, | |
cache_examples=False, | |
) | |
gr.TabbedInterface( | |
[interface_image, interface_video], | |
tab_names=['Image inference', 'Video inference'] | |
).queue().launch() | |
# import gradio as gr | |
# import cv2 | |
# import requests | |
# import os | |
# from ultralytics import YOLO | |
# file_urls = [ | |
# 'https://www.dropbox.com/scl/fi/kqd1z6wby1212c6ndodb3/Pol_20_jpg.rf.133c835b66958a7d48c12deeda31a719.jpg?rlkey=uqgvs2cwvahnmju15fv1zgorg&st=snv2yvtk&dl=0', | |
# 'https://www.dropbox.com/scl/fi/39aakapeh2y5ztk94rsyu/11e-a347-3f2d_jpg.rf.c66e5aeb57ee2ed660fdf0162156127d.jpg?rlkey=xoi3iw45vksgiejycau2ha7fh&st=etiawigv&dl=0', | |
# 'https://www.dropbox.com/scl/fi/8f08ehy53vsemw164g8n7/Recording2024-06-26184319.mp4?rlkey=pnmov906ttodl0cm92rpvc5ta&st=2twc9pjn&dl=0' | |
# ] | |
# def download_file(url, save_name): | |
# url = url | |
# if not os.path.exists(save_name): | |
# file = requests.get(url) | |
# open(save_name, 'wb').write(file.content) | |
# for i, url in enumerate(file_urls): | |
# if 'mp4' in file_urls[i]: | |
# download_file( | |
# file_urls[i], | |
# f"video.mp4" | |
# ) | |
# else: | |
# download_file( | |
# file_urls[i], | |
# f"image_{i}.jpg" | |
# ) | |
# model = YOLO('modelbest.pt') | |
# path = [['image_0.jpg'], ['image_1.jpg']] | |
# video_path = [['video.mp4']] | |
# def show_preds_image(image_path): | |
# image = cv2.imread(image_path) | |
# outputs = model.predict(source=image_path) | |
# results = outputs[0].cpu().numpy() | |
# for i, det in enumerate(results.boxes.xyxy): | |
# cv2.rectangle( | |
# image, | |
# (int(det[0]), int(det[1])), | |
# (int(det[2]), int(det[3])), | |
# color=(0, 0, 255), | |
# thickness=2, | |
# lineType=cv2.LINE_AA | |
# ) | |
# return cv2.cvtColor(image, cv2.COLOR_BGR2RGB) | |
# inputs_image = [ | |
# gr.components.Image(type="filepath", label="Input Image"), | |
# ] | |
# outputs_image = [ | |
# gr.components.Image(type="numpy", label="Output Image"), | |
# ] | |
# interface_image = gr.Interface( | |
# fn=show_preds_image, | |
# inputs=inputs_image, | |
# outputs=outputs_image, | |
# title="Pothole detector", | |
# examples=path, | |
# cache_examples=False, | |
# ) | |
# def show_preds_video(video_path): | |
# cap = cv2.VideoCapture(video_path) | |
# while(cap.isOpened()): | |
# ret, frame = cap.read() | |
# if ret: | |
# frame_copy = frame.copy() | |
# outputs = model.predict(source=frame) | |
# results = outputs[0].cpu().numpy() | |
# for i, det in enumerate(results.boxes.xyxy): | |
# cv2.rectangle( | |
# frame_copy, | |
# (int(det[0]), int(det[1])), | |
# (int(det[2]), int(det[3])), | |
# color=(0, 0, 255), | |
# thickness=2, | |
# lineType=cv2.LINE_AA | |
# ) | |
# yield cv2.cvtColor(frame_copy, cv2.COLOR_BGR2RGB) | |
# inputs_video = [ | |
# gr.components.Video(type="filepath", label="Input Video"), | |
# ] | |
# outputs_video = [ | |
# gr.components.Image(type="numpy", label="Output Image"), | |
# ] | |
# interface_video = gr.Interface( | |
# fn=show_preds_video, | |
# inputs=inputs_video, | |
# outputs=outputs_video, | |
# title="Pothole detector", | |
# examples=video_path, | |
# cache_examples=False, | |
# ) | |
# gr.TabbedInterface( | |
# [interface_image, interface_video], | |
# tab_names=['Image inference', 'Video inference'] | |
# ).queue().launch() |