File size: 2,472 Bytes
1d9d691
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5f252ac
1d9d691
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
10ecd89
1d9d691
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
import streamlit as st
import os
import cv2
import numpy as np
from ultralytics import YOLO

# Load YOLO model
model = YOLO('yolov8n.pt')  # Ensure you have the correct model file

def process_video(video_path):
    cap = cv2.VideoCapture(video_path)
    width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
    height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
    fps = int(cap.get(cv2.CAP_PROP_FPS))

    # Create a video writer to save the output
    output_path = os.path.join(os.getcwd(),"output.mp4")
    fourcc = cv2.VideoWriter_fourcc(*"mp4v")
    out = cv2.VideoWriter(output_path, fourcc, fps, (width, height))

    while cap.isOpened():
        ret, frame = cap.read()
        if not ret:
            break

        results = model(frame)
        for result in results:
            for bbox in result.boxes:
                x1, y1, x2, y2 = map(int, bbox.xyxy[0])
                confidence = float(bbox.conf)
                cls = int(bbox.cls)

                if cls == 0:  # Assuming class 0 is 'person'
                    w = x2 - x1
                    h = y2 - y1

                    if h < w:
                        color = (0, 0, 255)  # Red color for fall detected
                        label = "Fall Detected"
                    else:
                        color = (0, 255, 0)  # Green color for normal detection
                        label = "Person"

                    cv2.rectangle(frame, (x1, y1), (x2, y2), color, 2)
                    cv2.putText(frame, label, (x1, y1 - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 2)

        out.write(frame)

    cap.release()
    out.release()
    return output_path

# Streamlit interface
st.title("Fall Detection App")
st.write("Upload a video or choose from the default videos to detect falls.")

# Video selection
default_videos = {
    "Video 1": os.path.join(os.getcwd(),"fall_test_01.mp4"),
    "Video 2": os.path.join(os.getcwd(),"fall_test_02.mp4"),
    "Video 3": "video3.mp4",
}

option = st.selectbox("Choose a video", list(default_videos.keys()))
uploaded_video = st.file_uploader("Or upload your own video", type=["mp4", "avi", "mov"])

if uploaded_video is not None:
    video_path = uploaded_video.name
    with open(video_path, 'wb') as f:
        f.write(uploaded_video.getbuffer())
    st.success(f"Uploaded {uploaded_video.name}")
else:
    video_path = default_videos[option]

if st.button("Process Video"):
    output_video = process_video(video_path)
    st.video(output_video)