Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -6,9 +6,7 @@ import os
|
|
6 |
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
|
7 |
|
8 |
def process_video(video):
|
9 |
-
# 打开视频文件
|
10 |
cap = cv2.VideoCapture(video)
|
11 |
-
# 创建一个 VideoWriter 对象以保存处理后的视频
|
12 |
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
|
13 |
out = cv2.VideoWriter('output.mp4', fourcc, 20.0, (int(cap.get(3)), int(cap.get(4))))
|
14 |
|
@@ -16,21 +14,35 @@ def process_video(video):
|
|
16 |
ret, frame = cap.read()
|
17 |
if not ret:
|
18 |
break
|
19 |
-
# 处理帧,例如将其转换为灰度
|
20 |
gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
|
21 |
-
# 将处理后的帧写入输出视频
|
22 |
out.write(cv2.cvtColor(gray_frame, cv2.COLOR_GRAY2BGR))
|
23 |
|
24 |
cap.release()
|
25 |
out.release()
|
26 |
return 'output.mp4'
|
27 |
|
28 |
-
# Define the title and description
|
29 |
title = "Welcome to DDM DeepSort"
|
30 |
description = "Upload a video to process it using DDM and DeepSORT."
|
31 |
|
32 |
-
# Initialize Gradio interface with title and description
|
33 |
with gr.Blocks() as demo:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
34 |
gr.Interface(
|
35 |
fn=app_main,
|
36 |
inputs="video",
|
@@ -38,26 +50,5 @@ with gr.Blocks() as demo:
|
|
38 |
title=title,
|
39 |
description=description
|
40 |
)
|
41 |
-
gr.Markdown("""
|
42 |
-
# Welcome to My Neuroscience Project
|
43 |
-
|
44 |
-
The author is a third-year undergraduate student at the School of Intelligent Science and Technology, Nanjing University, Suzhou Campus.
|
45 |
-
|
46 |
-
## Note
|
47 |
-
<div style="border: 2px solid rgba(0, 0, 0, 0.1); border-radius: 10px; background-color: rgba(255, 255, 255, 0.8); padding: 20px;">
|
48 |
-
Since this project uses Hugging Face's free CPU, the processing speed is very slow. In the worst case, even a video with a dozen frames can take several minutes to process. Therefore, if possible, it is recommended to deploy on a device with a better GPU.
|
49 |
-
|
50 |
-
Although the YOLOv5 model supports up to 80 classes, my project is primarily focused on autonomous driving. Therefore, objects other than people and cars will be excluded after object detection.
|
51 |
-
</div>
|
52 |
-
<div style="border: 2px solid rgba(0, 0, 0, 0.1); border-radius: 10px; background-color: rgba(255, 255, 255, 0.8); padding: 20px;">
|
53 |
-
**Tips for First-Time Users:**
|
54 |
-
- Ensure that the video includes at least people and cars.
|
55 |
-
- It's recommended that the video is not too long, ideally within 10 seconds.
|
56 |
-
</div>
|
57 |
-
""")
|
58 |
-
|
59 |
-
with gr.Row():
|
60 |
-
with gr.Column(scale=1):
|
61 |
-
gr.Video(value="DDMDeepsort1.mp4", label="Demo Video", width=720, height=435)
|
62 |
|
63 |
demo.launch()
|
|
|
6 |
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
|
7 |
|
8 |
def process_video(video):
|
|
|
9 |
cap = cv2.VideoCapture(video)
|
|
|
10 |
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
|
11 |
out = cv2.VideoWriter('output.mp4', fourcc, 20.0, (int(cap.get(3)), int(cap.get(4))))
|
12 |
|
|
|
14 |
ret, frame = cap.read()
|
15 |
if not ret:
|
16 |
break
|
|
|
17 |
gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
|
|
|
18 |
out.write(cv2.cvtColor(gray_frame, cv2.COLOR_GRAY2BGR))
|
19 |
|
20 |
cap.release()
|
21 |
out.release()
|
22 |
return 'output.mp4'
|
23 |
|
|
|
24 |
title = "Welcome to DDM DeepSort"
|
25 |
description = "Upload a video to process it using DDM and DeepSORT."
|
26 |
|
|
|
27 |
with gr.Blocks() as demo:
|
28 |
+
gr.Markdown("""
|
29 |
+
# Welcome to My Neuroscience Project
|
30 |
+
The author is a third-year undergraduate student at the School of Intelligent Science and Technology, Nanjing University, Suzhou Campus.
|
31 |
+
## Note
|
32 |
+
<div style="border: 2px solid rgba(0, 0, 0, 0.1); border-radius: 10px; background-color: rgba(255, 255, 255, 0.8); padding: 20px;">
|
33 |
+
Since this project uses Hugging Face's free CPU, the processing speed is very slow. In the worst case, even a video with a dozen frames can take several minutes to process. Therefore, if possible, it is recommended to deploy on a device with a better GPU.
|
34 |
+
Although the YOLOv5 model supports up to 80 classes, my project is primarily focused on autonomous driving. Therefore, objects other than people and cars will be excluded after object detection.
|
35 |
+
</div>
|
36 |
+
<div style="border: 2px solid rgba(0, 0, 0, 0.1); border-radius: 10px; background-color: rgba(255, 255, 255, 0.8); padding: 20px;">
|
37 |
+
**Tips for First-Time Users:**
|
38 |
+
- Ensure that the video includes at least people and cars.
|
39 |
+
- It's recommended that the video is not too long, ideally within 10 seconds.
|
40 |
+
</div>
|
41 |
+
""")
|
42 |
+
|
43 |
+
with gr.Row(align="center"):
|
44 |
+
gr.Video(value="DDMDeepsort1.mp4", label="Demo Video", width=720, height=435)
|
45 |
+
|
46 |
gr.Interface(
|
47 |
fn=app_main,
|
48 |
inputs="video",
|
|
|
50 |
title=title,
|
51 |
description=description
|
52 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
53 |
|
54 |
demo.launch()
|