simran0608 commited on
Commit
530a851
·
verified ·
1 Parent(s): 3363738

Upload 8 files

Browse files
.gitattributes CHANGED
@@ -34,3 +34,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
  src/shape_predictor_68_face_landmarks.dat filter=lfs diff=lfs merge=lfs -text
 
 
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
  src/shape_predictor_68_face_landmarks.dat filter=lfs diff=lfs merge=lfs -text
37
+ shape_predictor_68_face_landmarks.dat filter=lfs diff=lfs merge=lfs -text
aug_medium.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6a2590ddc636558a6cf887857adc3cfda5b2c8501f378124a1a4cfb239004c4e
3
+ size 40507685
drowsiness-detected.mp3 ADDED
Binary file (64.3 kB). View file
 
drowsiness_detection.py ADDED
@@ -0,0 +1,248 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # PREP DEPENDENCIES
2
+ from scipy.spatial import distance as dist
3
+ from imutils import face_utils
4
+ from threading import Thread
5
+ import numpy as np
6
+ import cv2 as cv
7
+ import imutils
8
+ import dlib
9
+ import pygame # Used for playing alarm sounds cross-platform
10
+ import argparse
11
+ import os
12
+
13
+ # --- INITIALIZE MODELS AND CONSTANTS ---
14
+
15
+ # Haar cascade classifier for face detection
16
+ haar_cascade_face_detector = "haarcascade_frontalface_default.xml"
17
+ face_detector = cv.CascadeClassifier(haar_cascade_face_detector)
18
+
19
+ # Dlib facial landmark detector
20
+ dlib_facial_landmark_predictor = "shape_predictor_68_face_landmarks.dat"
21
+ landmark_predictor = dlib.shape_predictor(dlib_facial_landmark_predictor)
22
+
23
+ # Important Variables
24
+ font = cv.FONT_HERSHEY_SIMPLEX
25
+ # --- INITIALIZE MODELS AND CONSTANTS ---
26
+ # Eye Drowsiness Detection
27
+ EYE_ASPECT_RATIO_THRESHOLD = 0.25
28
+ EYE_CLOSED_THRESHOLD = 20
29
+ EYE_THRESH_COUNTER = 0
30
+ DROWSY_COUNTER = 0
31
+ drowsy_alert = False
32
+
33
+ # Mouth Yawn Detection
34
+ MOUTH_ASPECT_RATIO_THRESHOLD = 0.5
35
+ MOUTH_OPEN_THRESHOLD = 15
36
+ YAWN_THRESH_COUNTER = 0
37
+ YAWN_COUNTER = 0
38
+ yawn_alert = False
39
+
40
+ # NEW: Head Not Visible Detection
41
+ FACE_LOST_THRESHOLD = 25 # Conseq. frames face must be lost to trigger alert
42
+ FACE_LOST_COUNTER = 0
43
+ HEAD_DOWN_COUNTER = 0 # Renaming for clarity
44
+ head_down_alert = False
45
+
46
+ # --- AUDIO SETUP (using Pygame) ---
47
+ pygame.mixer.init()
48
+ drowsiness_sound = pygame.mixer.Sound("drowsiness-detected.mp3")
49
+ yawn_sound = pygame.mixer.Sound("yawning-detected.mp3")
50
+ # head_down_sound = pygame.mixer.Sound("dependencies/audio/head-down-detected.mp3")
51
+
52
+ # --- CORE FUNCTIONS ---
53
+ def play_alarm(sound_to_play):
54
+ if not pygame.mixer.get_busy():
55
+ sound_to_play.play()
56
+
57
+ def generate_alert(final_eye_ratio, final_mouth_ratio):
58
+ global EYE_THRESH_COUNTER, YAWN_THRESH_COUNTER
59
+ global drowsy_alert, yawn_alert
60
+ global DROWSY_COUNTER, YAWN_COUNTER
61
+
62
+ # Drowsiness check
63
+ if final_eye_ratio < EYE_ASPECT_RATIO_THRESHOLD:
64
+ EYE_THRESH_COUNTER += 1
65
+ if EYE_THRESH_COUNTER >= EYE_CLOSED_THRESHOLD:
66
+ if not drowsy_alert:
67
+ DROWSY_COUNTER += 1
68
+ drowsy_alert = True
69
+ Thread(target=play_alarm, args=(drowsiness_sound,)).start()
70
+ else:
71
+ EYE_THRESH_COUNTER = 0
72
+ drowsy_alert = False
73
+
74
+ # Yawn check
75
+ if final_mouth_ratio > MOUTH_ASPECT_RATIO_THRESHOLD:
76
+ YAWN_THRESH_COUNTER += 1
77
+ if YAWN_THRESH_COUNTER >= MOUTH_OPEN_THRESHOLD:
78
+ if not yawn_alert:
79
+ YAWN_COUNTER += 1
80
+ yawn_alert = True
81
+ Thread(target=play_alarm, args=(yawn_sound,)).start()
82
+ else:
83
+ YAWN_THRESH_COUNTER = 0
84
+ yawn_alert = False
85
+
86
+ def detect_facial_landmarks(x, y, w, h, gray_frame):
87
+ face = dlib.rectangle(int(x), int(y), int(x + w), int(y + h))
88
+ face_landmarks = landmark_predictor(gray_frame, face)
89
+ face_landmarks = face_utils.shape_to_np(face_landmarks)
90
+ return face_landmarks
91
+
92
+ def eye_aspect_ratio(eye):
93
+ A = dist.euclidean(eye[1], eye[5])
94
+ B = dist.euclidean(eye[2], eye[4])
95
+ C = dist.euclidean(eye[0], eye[3])
96
+ ear = (A + B) / (2.0 * C)
97
+ return ear
98
+
99
+ def final_eye_aspect_ratio(shape):
100
+ (lStart, lEnd) = face_utils.FACIAL_LANDMARKS_IDXS["left_eye"]
101
+ (rStart, rEnd) = face_utils.FACIAL_LANDMARKS_IDXS["right_eye"]
102
+ left_eye = shape[lStart:lEnd]
103
+ right_eye = shape[rStart:rEnd]
104
+ left_ear = eye_aspect_ratio(left_eye)
105
+ right_ear = eye_aspect_ratio(right_eye)
106
+ final_ear = (left_ear + right_ear) / 2.0
107
+ return final_ear, left_eye, right_eye
108
+
109
+ def mouth_aspect_ratio(mouth):
110
+ A = dist.euclidean(mouth[2], mouth[10])
111
+ B = dist.euclidean(mouth[4], mouth[8])
112
+ C = dist.euclidean(mouth[0], mouth[6])
113
+ mar = (A + B) / (2.0 * C)
114
+ return mar
115
+
116
+ def final_mouth_aspect_ratio(shape):
117
+ (mStart, mEnd) = face_utils.FACIAL_LANDMARKS_IDXS["mouth"]
118
+ mouth = shape[mStart:mEnd]
119
+ return mouth_aspect_ratio(mouth), mouth
120
+
121
+ def head_pose_ratio(shape):
122
+ nose_tip = shape[30]
123
+ chin_tip = shape[8]
124
+ left_face_corner = shape[0]
125
+ right_face_corner = shape[16]
126
+ nose_to_chin_dist = dist.euclidean(nose_tip, chin_tip)
127
+ face_width = dist.euclidean(left_face_corner, right_face_corner)
128
+ if face_width == 0:
129
+ return 0.0
130
+ hpr = nose_to_chin_dist / face_width
131
+ return hpr
132
+
133
+ def reset_counters():
134
+ global EYE_THRESH_COUNTER, YAWN_THRESH_COUNTER, FACE_LOST_COUNTER
135
+ global DROWSY_COUNTER, YAWN_COUNTER, HEAD_DOWN_COUNTER
136
+ global drowsy_alert, yawn_alert, head_down_alert
137
+ EYE_THRESH_COUNTER, YAWN_THRESH_COUNTER, FACE_LOST_COUNTER = 0, 0, 0
138
+ DROWSY_COUNTER, YAWN_COUNTER, HEAD_DOWN_COUNTER = 0, 0, 0
139
+ drowsy_alert, yawn_alert, head_down_alert = False, False, False
140
+
141
+ def process_frame(frame):
142
+ global FACE_LOST_COUNTER, head_down_alert, HEAD_DOWN_COUNTER
143
+ frame = imutils.resize(frame, width=640)
144
+ gray_frame = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)
145
+ faces = face_detector.detectMultiScale(gray_frame, scaleFactor=1.1, minNeighbors=5, minSize=(30, 30), flags=cv.CASCADE_SCALE_IMAGE)
146
+ if len(faces) > 0:
147
+ FACE_LOST_COUNTER = 0
148
+ head_down_alert = False
149
+ (x, y, w, h) = faces[0]
150
+ face_landmarks = detect_facial_landmarks(x, y, w, h, gray_frame)
151
+ final_ear, left_eye, right_eye = final_eye_aspect_ratio(face_landmarks)
152
+ final_mar, mouth = final_mouth_aspect_ratio(face_landmarks)
153
+ # left_eye_hull, right_eye_hull, mouth_hull = cv.convexHull(left_eye), cv.convexHull(right_eye), cv.convexHull(mouth)
154
+ # cv.drawContours(frame, [left_eye_hull], -1, (0, 255, 0), 1)
155
+ # cv.drawContours(frame, [right_eye_hull], -1, (0, 255, 0), 1)
156
+ # cv.drawContours(frame, [mouth_hull], -1, (0, 255, 0), 1)
157
+ generate_alert(final_ear, final_mar)
158
+ cv.putText(frame, f"EAR: {final_ear:.2f}", (10, 30), font, 0.7, (0, 0, 255), 2)
159
+ cv.putText(frame, f"MAR: {final_mar:.2f}", (10, 60), font, 0.7, (0, 0, 255), 2)
160
+ else:
161
+ FACE_LOST_COUNTER += 1
162
+ if FACE_LOST_COUNTER >= FACE_LOST_THRESHOLD and not head_down_alert:
163
+ HEAD_DOWN_COUNTER += 1
164
+ head_down_alert = True
165
+ cv.putText(frame, f"Drowsy: {DROWSY_COUNTER}", (480, 30), font, 0.7, (255, 255, 0), 2)
166
+ cv.putText(frame, f"Yawn: {YAWN_COUNTER}", (480, 60), font, 0.7, (255, 255, 0), 2)
167
+ cv.putText(frame, f"Head Down: {HEAD_DOWN_COUNTER}", (480, 90), font, 0.7, (255, 255, 0), 2)
168
+ if drowsy_alert: cv.putText(frame, "DROWSINESS ALERT!", (150, 30), font, 0.9, (0, 0, 255), 2)
169
+ if yawn_alert: cv.putText(frame, "YAWN ALERT!", (200, 60), font, 0.9, (0, 0, 255), 2)
170
+ if head_down_alert: cv.putText(frame, "HEAD NOT VISIBLE!", (180, 90), font, 0.9, (0, 0, 255), 2)
171
+ return frame
172
+
173
+ def process_video(input_path, output_path=None):
174
+ reset_counters()
175
+ video_stream = cv.VideoCapture(input_path)
176
+ if not video_stream.isOpened():
177
+ print(f"Error: Could not open video file {input_path}")
178
+ return False
179
+
180
+ fps = int(video_stream.get(cv.CAP_PROP_FPS))
181
+ width = int(video_stream.get(cv.CAP_PROP_FRAME_WIDTH))
182
+ height = int(video_stream.get(cv.CAP_PROP_FRAME_HEIGHT))
183
+
184
+ print(f"Processing video: {input_path}")
185
+ print(f"Original Res: {width}x{height}, FPS: {fps}")
186
+
187
+ video_writer = None
188
+ if output_path:
189
+ fourcc = cv.VideoWriter_fourcc(*'mp4v')
190
+ # --- FIX: Calculate correct output dimensions to prevent corruption ---
191
+ # The process_frame function resizes frames to a fixed width of 640.
192
+ output_width = 640
193
+ # Maintain aspect ratio
194
+ output_height = int(height * (output_width / float(width)))
195
+ output_dims = (output_width, output_height)
196
+ video_writer = cv.VideoWriter(output_path, fourcc, fps, output_dims)
197
+ print(f"Outputting video with Res: {output_dims[0]}x{output_dims[1]}")
198
+
199
+ while True:
200
+ ret, frame = video_stream.read()
201
+ if not ret: break
202
+
203
+ processed_frame = process_frame(frame)
204
+ if video_writer: video_writer.write(processed_frame)
205
+
206
+ video_stream.release()
207
+ if video_writer: video_writer.release()
208
+
209
+ print("Video processing complete!")
210
+ print(f"Final Stats - Drowsy: {DROWSY_COUNTER}, Yawn: {YAWN_COUNTER}, Head Down: {HEAD_DOWN_COUNTER}")
211
+ return True
212
+
213
+ def run_webcam():
214
+ reset_counters()
215
+ video_stream = cv.VideoCapture(0)
216
+ if not video_stream.isOpened():
217
+ print("Error: Could not open webcam")
218
+ return False
219
+ while True:
220
+ ret, frame = video_stream.read()
221
+ if not ret:
222
+ print("Failed to grab frame")
223
+ break
224
+ processed_frame = process_frame(frame)
225
+ cv.imshow("Live Drowsiness and Yawn Detection", processed_frame)
226
+ if cv.waitKey(1) & 0xFF == ord('q'): break
227
+ video_stream.release()
228
+ cv.destroyAllWindows()
229
+ return True
230
+
231
+ # --- MAIN EXECUTION LOOP ---
232
+ if __name__ == "__main__":
233
+ parser = argparse.ArgumentParser(description='Drowsiness Detection System')
234
+ parser.add_argument('--mode', choices=['webcam', 'video'], default='webcam', help='Mode of operation')
235
+ parser.add_argument('--input', type=str, help='Input video file path for video mode')
236
+ parser.add_argument('--output', type=str, help='Output video file path for video mode')
237
+ args = parser.parse_args()
238
+
239
+ if args.mode == 'webcam':
240
+ print("Starting webcam detection...")
241
+ run_webcam()
242
+ elif args.mode == 'video':
243
+ if not args.input:
244
+ print("Error: --input argument is required for video mode.")
245
+ elif not os.path.exists(args.input):
246
+ print(f"Error: Input file not found at {args.input}")
247
+ else:
248
+ process_video(args.input, args.output)
haarcascade_frontalface_default.xml ADDED
The diff for this file is too large to render. See raw diff
 
shape_predictor_68_face_landmarks.dat ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fbdc2cb80eb9aa7a758672cbfdda32ba6300efe9b6e6c7a299ff7e736b11b92f
3
+ size 99693937
streamlit_app.py ADDED
@@ -0,0 +1,318 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import asyncio
2
+ import sys
3
+
4
+ if sys.platform.startswith('linux') and sys.version_info >= (3, 8):
5
+ try:
6
+ asyncio.set_event_loop_policy(asyncio.DefaultEventLoopPolicy())
7
+ except Exception:
8
+ pass
9
+ import streamlit as st
10
+ from PIL import Image
11
+ import numpy as np
12
+ import subprocess
13
+ import time
14
+ import tempfile
15
+ import os
16
+ from ultralytics import YOLO
17
+ import cv2 as cv
18
+ import pandas as pd
19
+
20
+ model_path="best.pt"
21
+
22
+ # --- Page Configuration ---
23
+ st.set_page_config(
24
+ page_title="Driver Distraction System",
25
+ page_icon="🚗",
26
+ layout="wide",
27
+ initial_sidebar_state="expanded",
28
+ )
29
+
30
+ # --- Sidebar ---
31
+ st.sidebar.title("🚗 Driver Distraction System")
32
+ st.sidebar.write("Choose an option below:")
33
+
34
+ # Sidebar navigation
35
+ page = st.sidebar.radio("Select Feature", [
36
+ "Distraction System",
37
+ "Real-time Drowsiness Detection",
38
+ "Video Drowsiness Detection"
39
+ ])
40
+
41
+ # --- Class Labels (for YOLO model) ---
42
+ class_names = ['drinking', 'hair and makeup', 'operating the radio', 'reaching behind',
43
+ 'safe driving', 'talking on the phone', 'talking to passenger', 'texting']
44
+
45
+ # Sidebar Class Name Display
46
+ st.sidebar.subheader("Class Names")
47
+ for idx, class_name in enumerate(class_names):
48
+ st.sidebar.write(f"{idx}: {class_name}")
49
+
50
+ # --- Feature: YOLO Distraction Detection ---
51
+ if page == "Distraction System":
52
+ st.title("Driver Distraction System")
53
+ st.write("Upload an image or video to detect distractions using YOLO model.")
54
+
55
+ # File type selection
56
+ file_type = st.radio("Select file type:", ["Image", "Video"])
57
+
58
+ if file_type == "Image":
59
+ uploaded_file = st.file_uploader("Upload Image", type=["jpg", "jpeg", "png"])
60
+ if uploaded_file is not None:
61
+ image = Image.open(uploaded_file).convert('RGB')
62
+ image_np = np.array(image)
63
+ col1, col2 = st.columns([1, 1])
64
+ with col1:
65
+ st.subheader("Uploaded Image")
66
+ st.image(image, caption="Original Image", use_container_width=True)
67
+ with col2:
68
+ st.subheader("Detection Results")
69
+ model = YOLO(model_path)
70
+ start_time = time.time()
71
+ results = model(image_np)
72
+ end_time = time.time()
73
+ prediction_time = end_time - start_time
74
+ result = results[0]
75
+ if len(result.boxes) > 0:
76
+ boxes = result.boxes
77
+ confidences = boxes.conf.cpu().numpy()
78
+ classes = boxes.cls.cpu().numpy()
79
+ class_names_dict = result.names
80
+ max_conf_idx = confidences.argmax()
81
+ predicted_class = class_names_dict[int(classes[max_conf_idx])]
82
+ confidence_score = confidences[max_conf_idx]
83
+ st.markdown(f"### Predicted Class: **{predicted_class}**")
84
+ st.markdown(f"### Confidence Score: **{confidence_score:.4f}** ({confidence_score*100:.1f}%)")
85
+ st.markdown(f"Inference Time: {prediction_time:.2f} seconds")
86
+ else:
87
+ st.warning("No distractions detected.")
88
+
89
+ else: # Video processing
90
+ uploaded_video = st.file_uploader("Upload Video", type=["mp4", "avi", "mov", "mkv", "webm"])
91
+
92
+ if uploaded_video is not None:
93
+ tfile = tempfile.NamedTemporaryFile(delete=False, suffix=".mp4")
94
+ tfile.write(uploaded_video.read())
95
+ temp_input_path = tfile.name
96
+ temp_output_path = tempfile.mktemp(suffix="_distraction_detected.mp4")
97
+
98
+ st.subheader("Video Information")
99
+ cap = cv.VideoCapture(temp_input_path)
100
+ fps = cap.get(cv.CAP_PROP_FPS)
101
+ width = int(cap.get(cv.CAP_PROP_FRAME_WIDTH))
102
+ height = int(cap.get(cv.CAP_PROP_FRAME_HEIGHT))
103
+ total_frames = int(cap.get(cv.CAP_PROP_FRAME_COUNT))
104
+ duration = total_frames / fps if fps > 0 else 0
105
+ cap.release()
106
+
107
+ col1, col2 = st.columns(2)
108
+ with col1:
109
+ st.metric("Duration", f"{duration:.2f} seconds")
110
+ st.metric("Original FPS", f"{fps:.2f}")
111
+ with col2:
112
+ st.metric("Resolution", f"{width}x{height}")
113
+ st.metric("Total Frames", total_frames)
114
+
115
+ st.subheader("Original Video Preview")
116
+ st.video(uploaded_video)
117
+
118
+ if st.button("Process Video for Distraction Detection"):
119
+ TARGET_PROCESSING_FPS = 10
120
+ # --- NEW: Hyperparameter for the temporal smoothing logic ---
121
+ PERSISTENCE_CONFIDENCE_THRESHOLD = 0.40 # Stick with old class if found with >= 40% confidence
122
+
123
+ st.info(f"🚀 For faster results, video will be processed at ~{TARGET_PROCESSING_FPS} FPS.")
124
+ st.info(f"🧠 Applying temporal smoothing to reduce status flickering (Persistence Threshold: {PERSISTENCE_CONFIDENCE_THRESHOLD*100:.0f}%).")
125
+
126
+ progress_bar = st.progress(0, text="Starting video processing...")
127
+
128
+ with st.spinner(f"Processing video... This may take a while."):
129
+ model = YOLO(model_path)
130
+ cap = cv.VideoCapture(temp_input_path)
131
+
132
+ fourcc = cv.VideoWriter_fourcc(*'mp4v')
133
+ out = cv.VideoWriter(temp_output_path, fourcc, fps, (width, height))
134
+
135
+ frame_skip_interval = max(1, round(fps / TARGET_PROCESSING_FPS))
136
+
137
+ frame_count = 0
138
+ last_best_box_coords = None
139
+ last_best_box_label = ""
140
+ last_status_text = "Status: Initializing..."
141
+ last_status_color = (128, 128, 128)
142
+ # --- NEW: State variable to store the last confirmed class ---
143
+ last_confirmed_class_name = 'safe driving'
144
+
145
+ while cap.isOpened():
146
+ ret, frame = cap.read()
147
+ if not ret:
148
+ break
149
+
150
+ frame_count += 1
151
+ progress = int((frame_count / total_frames) * 100) if total_frames > 0 else 0
152
+ progress_bar.progress(progress, text=f"Analyzing frame {frame_count}/{total_frames}")
153
+
154
+ annotated_frame = frame.copy()
155
+
156
+ if frame_count % frame_skip_interval == 0:
157
+ results = model(annotated_frame)
158
+ result = results[0]
159
+
160
+ last_best_box_coords = None # Reset box for this processing cycle
161
+
162
+ if len(result.boxes) > 0:
163
+ boxes = result.boxes
164
+ class_names_dict = result.names
165
+ confidences = boxes.conf.cpu().numpy()
166
+ classes = boxes.cls.cpu().numpy()
167
+
168
+ # --- NEW STABILITY LOGIC ---
169
+ final_box_to_use = None
170
+
171
+ # 1. Check if the last known class exists with reasonable confidence
172
+ for i in range(len(boxes)):
173
+ current_class_name = class_names_dict[int(classes[i])]
174
+ if current_class_name == last_confirmed_class_name and confidences[i] >= PERSISTENCE_CONFIDENCE_THRESHOLD:
175
+ final_box_to_use = boxes[i]
176
+ break
177
+
178
+ # 2. If not, fall back to the highest confidence detection in the current frame
179
+ if final_box_to_use is None:
180
+ max_conf_idx = confidences.argmax()
181
+ final_box_to_use = boxes[max_conf_idx]
182
+ # --- END OF NEW LOGIC ---
183
+
184
+ # Now, process the determined "final_box_to_use"
185
+ x1, y1, x2, y2 = final_box_to_use.xyxy[0].cpu().numpy()
186
+ confidence = final_box_to_use.conf[0].cpu().numpy()
187
+ class_id = int(final_box_to_use.cls[0].cpu().numpy())
188
+ class_name = class_names_dict[class_id]
189
+
190
+ # Update the state for the next frames
191
+ last_confirmed_class_name = class_name
192
+ last_best_box_coords = (int(x1), int(y1), int(x2), int(y2))
193
+ last_best_box_label = f"{class_name}: {confidence:.2f}"
194
+
195
+ if class_name != 'safe driving':
196
+ last_status_text = f"Status: {class_name.replace('_', ' ').title()}"
197
+ last_status_color = (0, 0, 255)
198
+ else:
199
+ last_status_text = "Status: Safe Driving"
200
+ last_status_color = (0, 128, 0)
201
+ else:
202
+ # No detections, reset to safe driving
203
+ last_confirmed_class_name = 'safe driving'
204
+ last_status_text = "Status: Safe Driving"
205
+ last_status_color = (0, 128, 0)
206
+
207
+ # Draw annotations on EVERY frame using the last known data
208
+ if last_best_box_coords:
209
+ cv.rectangle(annotated_frame, (last_best_box_coords[0], last_best_box_coords[1]),
210
+ (last_best_box_coords[2], last_best_box_coords[3]), (0, 255, 0), 2)
211
+ cv.putText(annotated_frame, last_best_box_label,
212
+ (last_best_box_coords[0], last_best_box_coords[1] - 10),
213
+ cv.FONT_HERSHEY_SIMPLEX, 0.9, (0, 255, 0), 2)
214
+
215
+ # Draw status text
216
+ font_scale, font_thickness = 1.0, 2
217
+ (text_w, text_h), _ = cv.getTextSize(last_status_text, cv.FONT_HERSHEY_SIMPLEX, font_scale, font_thickness)
218
+ padding = 10
219
+ rect_start = (padding, padding)
220
+ rect_end = (padding + text_w + padding, padding + text_h + padding)
221
+ cv.rectangle(annotated_frame, rect_start, rect_end, last_status_color, -1)
222
+ text_pos = (padding + 5, padding + text_h + 5)
223
+ cv.putText(annotated_frame, last_status_text, text_pos, cv.FONT_HERSHEY_SIMPLEX, font_scale, (255, 255, 255), font_thickness)
224
+
225
+ out.write(annotated_frame)
226
+
227
+ cap.release()
228
+ out.release()
229
+ progress_bar.progress(100, text="Video processing completed!")
230
+
231
+ st.success("Video processed successfully!")
232
+
233
+ if os.path.exists(temp_output_path):
234
+ with open(temp_output_path, "rb") as file:
235
+ video_bytes = file.read()
236
+
237
+ st.download_button(
238
+ label="📥 Download Processed Video",
239
+ data=video_bytes,
240
+ file_name=f"distraction_detected_{uploaded_video.name}",
241
+ mime="video/mp4",
242
+ key="download_distraction_video"
243
+ )
244
+
245
+ st.subheader("Sample Frame from Processed Video")
246
+ cap_out = cv.VideoCapture(temp_output_path)
247
+ ret, frame = cap_out.read()
248
+ if ret:
249
+ frame_rgb = cv.cvtColor(frame, cv.COLOR_BGR2RGB)
250
+ st.image(frame_rgb, caption="Sample frame with distraction detection", use_container_width=True)
251
+ cap_out.release()
252
+
253
+ try:
254
+ os.unlink(temp_input_path)
255
+ if os.path.exists(temp_output_path): os.unlink(temp_output_path)
256
+ except Exception as e:
257
+ st.warning(f"Failed to clean up temporary files: {e}")
258
+
259
+ # --- Feature: Real-time Drowsiness Detection ---
260
+ elif page == "Real-time Drowsiness Detection":
261
+ st.title("🧠 Real-time Drowsiness Detection")
262
+ st.write("This will open your webcam and run the detection script.")
263
+ if st.button("Start Drowsiness Detection"):
264
+ with st.spinner("Launching webcam..."):
265
+ subprocess.Popen(["python3", "drowsiness_detection.py", "--mode", "webcam"])
266
+ st.success("Drowsiness detection started in a separate window. Press 'q' in that window to quit.")
267
+
268
+ # --- Feature: Video Drowsiness Detection ---
269
+ elif page == "Video Drowsiness Detection":
270
+ st.title("📹 Video Drowsiness Detection")
271
+ st.write("Upload a video file to detect drowsiness and download the processed video.")
272
+ uploaded_video = st.file_uploader("Upload Video", type=["mp4", "avi", "mov", "mkv", "webm"])
273
+ if uploaded_video is not None:
274
+ tfile = tempfile.NamedTemporaryFile(delete=False, suffix=".mp4")
275
+ tfile.write(uploaded_video.read())
276
+ temp_input_path = tfile.name
277
+ temp_output_path = tempfile.mktemp(suffix="_processed.mp4")
278
+ st.subheader("Original Video Preview")
279
+ st.video(uploaded_video)
280
+ if st.button("Process Video for Drowsiness Detection"):
281
+ progress_bar = st.progress(0, text="Preparing to process video...")
282
+ with st.spinner("Processing video... This may take a while."):
283
+ process = subprocess.Popen([
284
+ "python3", "drowsiness_detection.py",
285
+ "--mode", "video",
286
+ "--input", temp_input_path,
287
+ "--output", temp_output_path
288
+ ], stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True)
289
+ stdout, stderr = process.communicate()
290
+ if process.returncode == 0:
291
+ progress_bar.progress(100, text="Video processing completed!")
292
+ if os.path.exists(temp_output_path):
293
+ st.success("Video processed successfully!")
294
+ if stdout: st.code(stdout)
295
+ with open(temp_output_path, "rb") as file: video_bytes = file.read()
296
+ st.download_button(
297
+ label="📥 Download Processed Video",
298
+ data=video_bytes,
299
+ file_name=f"drowsiness_detected_{uploaded_video.name}",
300
+ mime="video/mp4",
301
+ key="download_processed_video"
302
+ )
303
+ st.subheader("Sample Frame from Processed Video")
304
+ cap = cv.VideoCapture(temp_output_path)
305
+ ret, frame = cap.read()
306
+ if ret: st.image(cv.cvtColor(frame, cv.COLOR_BGR2RGB), caption="Sample frame with drowsiness detection", use_container_width=True)
307
+ cap.release()
308
+ else:
309
+ st.error("Error: Processed video file not found.")
310
+ if stderr: st.code(stderr)
311
+ else:
312
+ st.error("An error occurred during video processing.")
313
+ if stderr: st.code(stderr)
314
+ try:
315
+ if os.path.exists(temp_input_path): os.unlink(temp_input_path)
316
+ if os.path.exists(temp_output_path): os.unlink(temp_output_path)
317
+ except Exception as e:
318
+ st.warning(f"Failed to clean up temporary files: {e}")
video_processor.py ADDED
@@ -0,0 +1,142 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Video Processing Utility for Drowsiness Detection
3
+ This script provides a more robust video processing interface
4
+ """
5
+
6
+ import cv2 as cv
7
+ import os
8
+ import json
9
+ from datetime import datetime
10
+ import argparse
11
+
12
+ def get_video_info(video_path):
13
+ """Get detailed video information"""
14
+ cap = cv.VideoCapture(video_path)
15
+
16
+ if not cap.isOpened():
17
+ return None
18
+
19
+ info = {
20
+ 'fps': cap.get(cv.CAP_PROP_FPS),
21
+ 'width': int(cap.get(cv.CAP_PROP_FRAME_WIDTH)),
22
+ 'height': int(cap.get(cv.CAP_PROP_FRAME_HEIGHT)),
23
+ 'total_frames': int(cap.get(cv.CAP_PROP_FRAME_COUNT)),
24
+ 'duration': cap.get(cv.CAP_PROP_FRAME_COUNT) / cap.get(cv.CAP_PROP_FPS) if cap.get(cv.CAP_PROP_FPS) > 0 else 0,
25
+ 'codec': int(cap.get(cv.CAP_PROP_FOURCC)),
26
+ 'file_size': os.path.getsize(video_path)
27
+ }
28
+
29
+ cap.release()
30
+ return info
31
+
32
+ def create_processing_report(input_path, output_path, stats):
33
+ """Create a JSON report of the processing results"""
34
+ report = {
35
+ 'timestamp': datetime.now().isoformat(),
36
+ 'input_file': input_path,
37
+ 'output_file': output_path,
38
+ 'video_info': get_video_info(input_path),
39
+ 'detection_stats': stats,
40
+ 'processing_info': {
41
+ 'software': 'Drowsiness Detection System',
42
+ 'version': '1.0'
43
+ }
44
+ }
45
+
46
+ report_path = output_path.replace('.mp4', '_report.json')
47
+ with open(report_path, 'w') as f:
48
+ json.dump(report, f, indent=2)
49
+
50
+ return report_path
51
+
52
+ def process_video_with_progress(input_path, output_path, progress_callback=None):
53
+ """
54
+ Process video with progress callback
55
+ progress_callback: function that takes (current_frame, total_frames)
56
+ """
57
+ # Import the drowsiness detection functions
58
+ from drowsiness_detection import process_frame, reset_counters
59
+ from drowsiness_detection import DROWSY_COUNTER, YAWN_COUNTER, HEAD_DOWN_COUNTER
60
+
61
+ reset_counters()
62
+
63
+ # Open video file
64
+ video_stream = cv.VideoCapture(input_path)
65
+
66
+ if not video_stream.isOpened():
67
+ raise ValueError(f"Could not open video file {input_path}")
68
+
69
+ # Get video properties
70
+ fps = int(video_stream.get(cv.CAP_PROP_FPS))
71
+ width = int(video_stream.get(cv.CAP_PROP_FRAME_WIDTH))
72
+ height = int(video_stream.get(cv.CAP_PROP_FRAME_HEIGHT))
73
+ total_frames = int(video_stream.get(cv.CAP_PROP_FRAME_COUNT))
74
+
75
+ # Setup video writer
76
+ fourcc = cv.VideoWriter_fourcc(*'mp4v')
77
+ video_writer = cv.VideoWriter(output_path, fourcc, fps, (640, 480))
78
+
79
+ frame_count = 0
80
+
81
+ try:
82
+ while True:
83
+ ret, frame = video_stream.read()
84
+ if not ret:
85
+ break
86
+
87
+ frame_count += 1
88
+
89
+ # Process frame
90
+ processed_frame = process_frame(frame)
91
+
92
+ # Write frame to output video
93
+ video_writer.write(processed_frame)
94
+
95
+ # Call progress callback if provided
96
+ if progress_callback:
97
+ progress_callback(frame_count, total_frames)
98
+
99
+ # Get final stats
100
+ stats = {
101
+ 'total_frames': frame_count,
102
+ 'drowsy_events': DROWSY_COUNTER,
103
+ 'yawn_events': YAWN_COUNTER,
104
+ 'head_down_events': HEAD_DOWN_COUNTER
105
+ }
106
+
107
+ return stats
108
+
109
+ finally:
110
+ video_stream.release()
111
+ video_writer.release()
112
+
113
+ def main():
114
+ parser = argparse.ArgumentParser(description='Video Processing Utility for Drowsiness Detection')
115
+ parser.add_argument('--input', '-i', required=True, help='Input video file path')
116
+ parser.add_argument('--output', '-o', help='Output video file path (optional)')
117
+ parser.add_argument('--report', '-r', action='store_true', help='Generate processing report')
118
+ parser.add_argument('--info', action='store_true', help='Show video information only')
119
+
120
+ args = parser.parse_args()
121
+
122
+ if not os.path.exists(args.input):
123
+ print(f"Error: Input file {args.input} does not exist")
124
+ return
125
+
126
+ # Show video info
127
+ if args.info:
128
+ info = get_video_info(args.input)
129
+ if info:
130
+ print(f"Video Information for: {args.input}")
131
+ print(f"Resolution: {info['width']}x{info['height']}")
132
+ print(f"FPS: {info['fps']:.2f}")
133
+ print(f"Duration: {info['duration']:.2f} seconds")
134
+ print(f"Total Frames: {info['total_frames']}")
135
+ print(f"File Size: {info['file_size'] / (1024*1024):.2f} MB")
136
+ else:
137
+ print("Error: Could not read video file")
138
+ return
139
+
140
+ # Generate output path if not provided
141
+ if not args.output:
142
+ base_name
yawning-detected.mp3 ADDED
Binary file (64.3 kB). View file