Spaces:
Sleeping
Sleeping
Create app.py
Browse files
app.py
ADDED
@@ -0,0 +1,81 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
import os
|
3 |
+
import cv2
|
4 |
+
import numpy as np
|
5 |
+
from ultralytics import YOLO
|
6 |
+
|
7 |
+
# Load YOLO model
|
8 |
+
model = YOLO('yolov8n.pt') # Ensure you have the correct model file
|
9 |
+
|
10 |
+
def process_video(video_path):
|
11 |
+
cap = cv2.VideoCapture(video_path)
|
12 |
+
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
|
13 |
+
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
|
14 |
+
fps = int(cap.get(cv2.CAP_PROP_FPS))
|
15 |
+
|
16 |
+
# Create a video writer to save the output
|
17 |
+
output_path = "output.mp4"
|
18 |
+
fourcc = cv2.VideoWriter_fourcc(*"mp4v")
|
19 |
+
out = cv2.VideoWriter(output_path, fourcc, fps, (width, height))
|
20 |
+
|
21 |
+
while cap.isOpened():
|
22 |
+
ret, frame = cap.read()
|
23 |
+
if not ret:
|
24 |
+
break
|
25 |
+
|
26 |
+
results = model(frame)
|
27 |
+
for result in results:
|
28 |
+
for bbox in result.boxes:
|
29 |
+
x1, y1, x2, y2 = map(int, bbox.xyxy[0])
|
30 |
+
confidence = float(bbox.conf)
|
31 |
+
cls = int(bbox.cls)
|
32 |
+
|
33 |
+
if cls == 0: # Assuming class 0 is 'person'
|
34 |
+
w = x2 - x1
|
35 |
+
h = y2 - y1
|
36 |
+
|
37 |
+
if h < w:
|
38 |
+
color = (0, 0, 255) # Red color for fall detected
|
39 |
+
label = "Fall Detected"
|
40 |
+
else:
|
41 |
+
color = (0, 255, 0) # Green color for normal detection
|
42 |
+
label = "Person"
|
43 |
+
|
44 |
+
cv2.rectangle(frame, (x1, y1), (x2, y2), color, 2)
|
45 |
+
cv2.putText(frame, label, (x1, y1 - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 2)
|
46 |
+
|
47 |
+
out.write(frame)
|
48 |
+
|
49 |
+
# Display the frame in Streamlit
|
50 |
+
frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
|
51 |
+
st.image(frame_rgb)
|
52 |
+
|
53 |
+
cap.release()
|
54 |
+
out.release()
|
55 |
+
return output_path
|
56 |
+
|
57 |
+
# Streamlit interface
|
58 |
+
st.title("Fall Detection App")
|
59 |
+
st.write("Upload a video or choose from the default videos to detect falls.")
|
60 |
+
|
61 |
+
# Video selection
|
62 |
+
default_videos = {
|
63 |
+
"Video 1": os.path.join(os.getcwd(),"fall_test_01.mp4"),
|
64 |
+
"Video 2": "video2.mp4",
|
65 |
+
"Video 3": "video3.mp4",
|
66 |
+
}
|
67 |
+
|
68 |
+
option = st.selectbox("Choose a video", list(default_videos.keys()))
|
69 |
+
uploaded_video = st.file_uploader("Or upload your own video", type=["mp4", "avi", "mov"])
|
70 |
+
|
71 |
+
if uploaded_video is not None:
|
72 |
+
video_path = uploaded_video.name
|
73 |
+
with open(video_path, 'wb') as f:
|
74 |
+
f.write(uploaded_video.getbuffer())
|
75 |
+
st.success(f"Uploaded {uploaded_video.name}")
|
76 |
+
else:
|
77 |
+
video_path = default_videos[option]
|
78 |
+
|
79 |
+
if st.button("Process Video"):
|
80 |
+
output_video = process_video(video_path)
|
81 |
+
st.video(output_video)
|