import gradio as gr import cv2 from Yolov5_Deepsort.demo import app_main import sys import os sys.path.append(os.path.dirname(os.path.abspath(__file__))) def process_video(video): cap = cv2.VideoCapture(video) fourcc = cv2.VideoWriter_fourcc(*'mp4v') out = cv2.VideoWriter('output.mp4', fourcc, 20.0, (int(cap.get(3)), int(cap.get(4)))) while cap.isOpened(): ret, frame = cap.read() if not ret: break gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) out.write(cv2.cvtColor(gray_frame, cv2.COLOR_GRAY2BGR)) cap.release() out.release() return 'output.mp4' title = "Welcome to DDM DeepSort" description = "Upload a video to process it using DDM and DeepSORT." with gr.Blocks() as demo: gr.Markdown(""" # Welcome to My Neuroscience Project The author is a third-year undergraduate student at the School of Intelligent Science and Technology, Nanjing University, Suzhou Campus. ## Note
Since this project uses Hugging Face's free CPU, the processing speed is very slow. In the worst case, even a video with a dozen frames can take several minutes to process. Therefore, if possible, it is recommended to deploy on a device with a better GPU. Although the YOLOv5 model supports up to 80 classes, my project is primarily focused on autonomous driving. Therefore, objects other than people and cars will be excluded after object detection.
**Tips for First-Time Users:** - Ensure that the video includes at least people and cars. - It's recommended that the video is not too long, ideally within 10 seconds.
""") with gr.Row(align="center"): gr.Video(value="DDMDeepsort1.mp4", label="Demo Video", width=720, height=435) gr.Interface( fn=app_main, inputs="video", outputs="video", title=title, description=description ) demo.launch()