import gradio as gr import cv2 from Yolov5_Deepsort.demo import app_main import sys import os sys.path.append(os.path.dirname(os.path.abspath(__file__))) def process_video(video): # 打开视频文件 cap = cv2.VideoCapture(video) # 创建一个 VideoWriter 对象以保存处理后的视频 fourcc = cv2.VideoWriter_fourcc(*'mp4v') out = cv2.VideoWriter('output.mp4', fourcc, 20.0, (int(cap.get(3)), int(cap.get(4)))) while cap.isOpened(): ret, frame = cap.read() if not ret: break # 处理帧,例如将其转换为灰度 gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) # 将处理后的帧写入输出视频 out.write(cv2.cvtColor(gray_frame, cv2.COLOR_GRAY2BGR)) cap.release() out.release() return 'output.mp4' # Define the title and description title = "Welcome to DDM DeepSort" description = "Upload a video to process it using DDM and DeepSORT." # Initialize Gradio interface with title and description with gr.Blocks() as demo: gr.Interface( fn=app_main, inputs="video", outputs="video", title=title, description=description ) gr.Markdown(""" # Welcome to My Neuroscience Project The author is a third-year undergraduate student at the School of Intelligent Science and Technology, Nanjing University, Suzhou Campus. ## Note
Since this project uses Hugging Face's free CPU, the processing speed is very slow. In the worst case, even a video with a dozen frames can take several minutes to process. Therefore, if possible, it is recommended to deploy on a device with a better GPU. Although the YOLOv5 model supports up to 80 classes, my project is primarily focused on autonomous driving. Therefore, objects other than people and cars will be excluded after object detection.
**Tips for First-Time Users:** - Ensure that the video includes at least people and cars. - It's recommended that the video is not too long, ideally within 10 seconds.
""") gr.HTML("""
""") demo.launch()