simran0608 commited on
Commit
6736f11
Β·
verified Β·
1 Parent(s): 7317aac

Update streamlit_app.py

Browse files
Files changed (1) hide show
  1. streamlit_app.py +262 -98
streamlit_app.py CHANGED
@@ -21,7 +21,11 @@ import cv2 as cv
21
  # --- NEW: Import your refactored video processing logic ---
22
  from video_processor import process_video_with_progress
23
 
24
- model_path="best.pt"
 
 
 
 
25
 
26
  # --- Page Configuration ---
27
  st.set_page_config(
@@ -35,12 +39,22 @@ st.set_page_config(
35
  st.sidebar.title("πŸš— Driver Distraction System")
36
  st.sidebar.write("Choose an option below:")
37
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
38
  # --- Sidebar navigation ---
39
- page = st.sidebar.radio("Select Feature", [
40
- "Distraction System",
41
- "Video Drowsiness Detection",
42
- "Real-time Drowsiness Detection"
43
- ])
44
 
45
  # --- Class Labels (for YOLO model) ---
46
  st.sidebar.subheader("Class Names")
@@ -60,46 +74,180 @@ if page == "Distraction System":
60
  if file_type == "Image":
61
  uploaded_file = st.file_uploader("Upload Image", type=["jpg", "jpeg", "png"])
62
  if uploaded_file is not None:
63
- image = Image.open(uploaded_file).convert('RGB')
64
- image_np = np.array(image)
65
- col1, col2 = st.columns([1, 1])
66
- with col1:
67
- st.subheader("Uploaded Image")
68
- st.image(image, caption="Original Image", use_container_width=True)
69
- with col2:
70
- st.subheader("Detection Results")
71
- model = YOLO(model_path)
72
- start_time = time.time()
73
- results = model(image_np)
74
- end_time = time.time()
75
- prediction_time = end_time - start_time
76
- result = results[0]
77
- if len(result.boxes) > 0:
78
- boxes = result.boxes
79
- confidences = boxes.conf.cpu().numpy()
80
- classes = boxes.cls.cpu().numpy()
81
- class_names_dict = result.names
82
- max_conf_idx = confidences.argmax()
83
- predicted_class = class_names_dict[int(classes[max_conf_idx])]
84
- confidence_score = confidences[max_conf_idx]
85
- st.markdown(f"### Predicted Class: **{predicted_class}**")
86
- st.markdown(f"### Confidence Score: **{confidence_score:.4f}** ({confidence_score*100:.1f}%)")
87
- st.markdown(f"Inference Time: {prediction_time:.2f} seconds")
88
- else:
89
- st.warning("No distractions detected.")
90
-
91
- # --- Feature: Real-time Drowsiness Detection ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
92
  elif page == "Real-time Drowsiness Detection":
93
  st.title("🧠 Real-time Drowsiness Detection")
94
- st.info("This feature requires a local webcam and will open a new window.")
95
- st.warning("This feature is intended for local use and will not function in the cloud deployment.")
96
- if st.button("Start Drowsiness Detection"):
97
- try:
98
- # This call is fine, as your new drowsiness_detection.py is set up to handle it.
99
- subprocess.Popen(["python3", "drowsiness_detection.py", "--mode", "webcam"])
100
- st.success("Attempted to launch detection window. Please check your desktop.")
101
- except Exception as e:
102
- st.error(f"Failed to start process: {e}")
 
 
 
 
 
 
 
 
 
 
103
 
104
  # --- Feature: Video Drowsiness Detection ---
105
  elif page == "Video Drowsiness Detection":
@@ -108,60 +256,76 @@ elif page == "Video Drowsiness Detection":
108
  uploaded_video = st.file_uploader("Upload Video", type=["mp4", "avi", "mov", "mkv", "webm"])
109
 
110
  if uploaded_video is not None:
111
- # Create a temporary file to hold the uploaded video
112
- tfile = tempfile.NamedTemporaryFile(delete=False, suffix=".mp4")
113
- tfile.write(uploaded_video.read())
114
- temp_input_path = tfile.name
115
- temp_output_path = tempfile.mktemp(suffix="_processed.mp4")
116
-
117
- st.subheader("Original Video Preview")
118
- st.video(uploaded_video)
119
-
120
- if st.button("Process Video for Drowsiness Detection"):
121
- progress_bar = st.progress(0, text="Preparing to process video...")
122
-
123
- # --- NEW: Define a callback function for the progress bar ---
124
- def streamlit_progress_callback(current, total):
125
- if total > 0:
126
- percent_complete = int((current / total) * 100)
127
- progress_bar.progress(percent_complete, text=f"Analyzing frame {current}/{total}...")
128
 
129
- try:
130
- with st.spinner("Processing video... This may take a while."):
131
- # --- NEW: Directly call your robust video processing function ---
132
- # No more complex subprocess logic needed!
133
- stats = process_video_with_progress(
134
- input_path=temp_input_path,
135
- output_path=temp_output_path,
136
- progress_callback=streamlit_progress_callback
137
- )
138
 
139
- progress_bar.progress(100, text="Video processing completed!")
140
- st.success("Video processed successfully!")
141
-
142
- # --- NEW: Display the returned statistics ---
143
- st.subheader("Detection Results")
144
- col1, col2, col3 = st.columns(3)
145
- col1.metric("Drowsy Events", stats.get('drowsy_events', 0))
146
- col2.metric("Yawn Events", stats.get('yawn_events', 0))
147
- col3.metric("Head Down Events", stats.get('head_down_events', 0))
148
-
149
- # Offer the processed video for download
150
- if os.path.exists(temp_output_path):
151
- with open(temp_output_path, "rb") as file:
152
- video_bytes = file.read()
153
- st.download_button(
154
- label="πŸ“₯ Download Processed Video",
155
- data=video_bytes,
156
- file_name=f"drowsiness_detected_{uploaded_video.name}",
157
- mime="video/mp4"
158
- )
159
- except Exception as e:
160
- st.error(f"An error occurred during video processing: {e}")
161
- finally:
162
- # Cleanup temporary files
163
  try:
164
- if os.path.exists(temp_input_path): os.unlink(temp_input_path)
165
- if os.path.exists(temp_output_path): os.unlink(temp_output_path)
166
- except Exception as e_clean:
167
- st.warning(f"Failed to clean up temporary files: {e_clean}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
21
  # --- NEW: Import your refactored video processing logic ---
22
  from video_processor import process_video_with_progress
23
 
24
+ # --- FIXED: Model path handling ---
25
+ model_path = "best.pt"
26
+ if not os.path.exists(model_path):
27
+ st.error(f"Model file '{model_path}' not found. Please ensure it's included in your deployment.")
28
+ st.stop()
29
 
30
  # --- Page Configuration ---
31
  st.set_page_config(
 
39
  st.sidebar.title("πŸš— Driver Distraction System")
40
  st.sidebar.write("Choose an option below:")
41
 
42
+ # --- FIXED: Disable webcam feature for cloud deployment ---
43
+ if os.getenv("SPACE_ID"): # Running on Hugging Face Spaces
44
+ available_features = [
45
+ "Distraction System",
46
+ "Video Drowsiness Detection"
47
+ ]
48
+ st.sidebar.info("πŸ’‘ Note: Real-time webcam detection is not available in cloud deployment.")
49
+ else:
50
+ available_features = [
51
+ "Distraction System",
52
+ "Video Drowsiness Detection",
53
+ "Real-time Drowsiness Detection"
54
+ ]
55
+
56
  # --- Sidebar navigation ---
57
+ page = st.sidebar.radio("Select Feature", available_features)
 
 
 
 
58
 
59
  # --- Class Labels (for YOLO model) ---
60
  st.sidebar.subheader("Class Names")
 
74
  if file_type == "Image":
75
  uploaded_file = st.file_uploader("Upload Image", type=["jpg", "jpeg", "png"])
76
  if uploaded_file is not None:
77
+ try:
78
+ image = Image.open(uploaded_file).convert('RGB')
79
+ image_np = np.array(image)
80
+ col1, col2 = st.columns([1, 1])
81
+ with col1:
82
+ st.subheader("Uploaded Image")
83
+ st.image(image, caption="Original Image", use_container_width=True)
84
+ with col2:
85
+ st.subheader("Detection Results")
86
+
87
+ # Load model with error handling
88
+ try:
89
+ model = YOLO(model_path)
90
+ start_time = time.time()
91
+ results = model(image_np)
92
+ end_time = time.time()
93
+ prediction_time = end_time - start_time
94
+
95
+ result = results[0]
96
+ if len(result.boxes) > 0:
97
+ boxes = result.boxes
98
+ confidences = boxes.conf.cpu().numpy()
99
+ classes = boxes.cls.cpu().numpy()
100
+ class_names_dict = result.names
101
+ max_conf_idx = confidences.argmax()
102
+ predicted_class = class_names_dict[int(classes[max_conf_idx])]
103
+ confidence_score = confidences[max_conf_idx]
104
+ st.markdown(f"### Predicted Class: **{predicted_class}**")
105
+ st.markdown(f"### Confidence Score: **{confidence_score:.4f}** ({confidence_score*100:.1f}%)")
106
+ st.markdown(f"Inference Time: {prediction_time:.2f} seconds")
107
+ else:
108
+ st.warning("No distractions detected.")
109
+ except Exception as e:
110
+ st.error(f"Error loading or running model: {str(e)}")
111
+ st.info("Please ensure the model file 'best.pt' is present and valid.")
112
+ except Exception as e:
113
+ st.error(f"Error processing image: {str(e)}")
114
+
115
+ elif file_type == "Video":
116
+ uploaded_file = st.file_uploader("Upload Video", type=["mp4", "avi", "mov", "mkv", "webm"])
117
+ if uploaded_file is not None:
118
+ try:
119
+ # Create a temporary file to hold the uploaded video
120
+ tfile = tempfile.NamedTemporaryFile(delete=False, suffix=".mp4")
121
+ tfile.write(uploaded_file.read())
122
+ temp_input_path = tfile.name
123
+ temp_output_path = tempfile.mktemp(suffix="_processed.mp4")
124
+
125
+ st.subheader("Original Video Preview")
126
+ st.video(uploaded_file)
127
+
128
+ if st.button("Process Video for Distraction Detection"):
129
+ progress_bar = st.progress(0, text="Preparing to process video...")
130
+
131
+ try:
132
+ model = YOLO(model_path)
133
+ cap = cv.VideoCapture(temp_input_path)
134
+ total_frames = int(cap.get(cv.CAP_PROP_FRAME_COUNT))
135
+ fps = cap.get(cv.CAP_PROP_FPS)
136
+
137
+ # Get video properties
138
+ width = int(cap.get(cv.CAP_PROP_FRAME_WIDTH))
139
+ height = int(cap.get(cv.CAP_PROP_FRAME_HEIGHT))
140
+
141
+ # Setup video writer
142
+ fourcc = cv.VideoWriter_fourcc(*'mp4v')
143
+ out = cv.VideoWriter(temp_output_path, fourcc, fps, (width, height))
144
+
145
+ frame_count = 0
146
+ detections = []
147
+
148
+ while True:
149
+ ret, frame = cap.read()
150
+ if not ret:
151
+ break
152
+
153
+ frame_count += 1
154
+
155
+ # Process frame with YOLO
156
+ results = model(frame)
157
+ result = results[0]
158
+
159
+ # Draw detections on frame
160
+ annotated_frame = result.plot()
161
+ out.write(annotated_frame)
162
+
163
+ # Store detection info
164
+ if len(result.boxes) > 0:
165
+ boxes = result.boxes
166
+ for i in range(len(boxes)):
167
+ conf = boxes.conf[i].cpu().numpy()
168
+ cls = int(boxes.cls[i].cpu().numpy())
169
+ class_name = result.names[cls]
170
+ detections.append({
171
+ 'frame': frame_count,
172
+ 'class': class_name,
173
+ 'confidence': conf
174
+ })
175
+
176
+ # Update progress
177
+ progress = int((frame_count / total_frames) * 100)
178
+ progress_bar.progress(progress, text=f"Processing frame {frame_count}/{total_frames}")
179
+
180
+ cap.release()
181
+ out.release()
182
+
183
+ st.success("Video processed successfully!")
184
+
185
+ # Show results
186
+ st.subheader("Detection Results")
187
+ if detections:
188
+ # Count detections by class
189
+ class_counts = {}
190
+ for det in detections:
191
+ class_name = det['class']
192
+ if class_name not in class_counts:
193
+ class_counts[class_name] = 0
194
+ class_counts[class_name] += 1
195
+
196
+ # Display metrics
197
+ cols = st.columns(len(class_counts))
198
+ for i, (class_name, count) in enumerate(class_counts.items()):
199
+ cols[i].metric(class_name.title(), count)
200
+ else:
201
+ st.info("No distractions detected in the video.")
202
+
203
+ # Offer processed video for download
204
+ if os.path.exists(temp_output_path):
205
+ with open(temp_output_path, "rb") as file:
206
+ video_bytes = file.read()
207
+ st.download_button(
208
+ label="πŸ“₯ Download Processed Video",
209
+ data=video_bytes,
210
+ file_name=f"distraction_detected_{uploaded_file.name}",
211
+ mime="video/mp4"
212
+ )
213
+
214
+ except Exception as e:
215
+ st.error(f"Error processing video: {str(e)}")
216
+ finally:
217
+ # Cleanup
218
+ try:
219
+ if os.path.exists(temp_input_path):
220
+ os.unlink(temp_input_path)
221
+ if os.path.exists(temp_output_path):
222
+ os.unlink(temp_output_path)
223
+ except Exception as e:
224
+ st.warning(f"Failed to clean up temporary files: {e}")
225
+
226
+ except Exception as e:
227
+ st.error(f"Error handling video upload: {str(e)}")
228
+
229
+ # --- Feature: Real-time Drowsiness Detection (Only for local) ---
230
  elif page == "Real-time Drowsiness Detection":
231
  st.title("🧠 Real-time Drowsiness Detection")
232
+
233
+ if os.getenv("SPACE_ID"): # Running on Hugging Face Spaces
234
+ st.error("⚠️ Real-time webcam detection is not available in cloud deployment.")
235
+ st.info("This feature requires direct access to your camera and only works in local environments.")
236
+ st.markdown("""
237
+ **To use this feature:**
238
+ 1. Download the code to your local machine
239
+ 2. Install the required dependencies
240
+ 3. Run the application locally with `streamlit run streamlit_app.py`
241
+ """)
242
+ else:
243
+ st.info("This feature requires a local webcam and will open a new window.")
244
+ st.warning("This feature is intended for local use and will not function in cloud deployment.")
245
+ if st.button("Start Drowsiness Detection"):
246
+ try:
247
+ subprocess.Popen(["python3", "drowsiness_detection.py", "--mode", "webcam"])
248
+ st.success("Attempted to launch detection window. Please check your desktop.")
249
+ except Exception as e:
250
+ st.error(f"Failed to start process: {e}")
251
 
252
  # --- Feature: Video Drowsiness Detection ---
253
  elif page == "Video Drowsiness Detection":
 
256
  uploaded_video = st.file_uploader("Upload Video", type=["mp4", "avi", "mov", "mkv", "webm"])
257
 
258
  if uploaded_video is not None:
259
+ try:
260
+ # Create a temporary file to hold the uploaded video
261
+ tfile = tempfile.NamedTemporaryFile(delete=False, suffix=".mp4")
262
+ tfile.write(uploaded_video.read())
263
+ temp_input_path = tfile.name
264
+ temp_output_path = tempfile.mktemp(suffix="_processed.mp4")
 
 
 
 
 
 
 
 
 
 
 
265
 
266
+ st.subheader("Original Video Preview")
267
+ st.video(uploaded_video)
268
+
269
+ if st.button("Process Video for Drowsiness Detection"):
270
+ progress_bar = st.progress(0, text="Preparing to process video...")
 
 
 
 
271
 
272
+ # --- Define a callback function for the progress bar ---
273
+ def streamlit_progress_callback(current, total):
274
+ if total > 0:
275
+ percent_complete = int((current / total) * 100)
276
+ progress_bar.progress(percent_complete, text=f"Analyzing frame {current}/{total}...")
277
+
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
278
  try:
279
+ with st.spinner("Processing video... This may take a while."):
280
+ # Call your robust video processing function
281
+ stats = process_video_with_progress(
282
+ input_path=temp_input_path,
283
+ output_path=temp_output_path,
284
+ progress_callback=streamlit_progress_callback
285
+ )
286
+
287
+ progress_bar.progress(100, text="Video processing completed!")
288
+ st.success("Video processed successfully!")
289
+
290
+ # Display the returned statistics
291
+ st.subheader("Detection Results")
292
+ col1, col2, col3 = st.columns(3)
293
+ col1.metric("Drowsy Events", stats.get('drowsy_events', 0))
294
+ col2.metric("Yawn Events", stats.get('yawn_events', 0))
295
+ col3.metric("Head Down Events", stats.get('head_down_events', 0))
296
+
297
+ # Offer the processed video for download
298
+ if os.path.exists(temp_output_path):
299
+ with open(temp_output_path, "rb") as file:
300
+ video_bytes = file.read()
301
+ st.download_button(
302
+ label="πŸ“₯ Download Processed Video",
303
+ data=video_bytes,
304
+ file_name=f"drowsiness_detected_{uploaded_video.name}",
305
+ mime="video/mp4"
306
+ )
307
+ except Exception as e:
308
+ st.error(f"An error occurred during video processing: {e}")
309
+ st.info("Please ensure all required model files are present and the video format is supported.")
310
+ finally:
311
+ # Cleanup temporary files
312
+ try:
313
+ if os.path.exists(temp_input_path):
314
+ os.unlink(temp_input_path)
315
+ if os.path.exists(temp_output_path):
316
+ os.unlink(temp_output_path)
317
+ except Exception as e_clean:
318
+ st.warning(f"Failed to clean up temporary files: {e_clean}")
319
+
320
+ except Exception as e:
321
+ st.error(f"Error handling video upload: {str(e)}")
322
+
323
+ # --- Footer ---
324
+ st.sidebar.markdown("---")
325
+ st.sidebar.markdown("### πŸ“ Notes")
326
+ st.sidebar.markdown("""
327
+ - **Image Detection**: Upload JPG, PNG images
328
+ - **Video Detection**: Upload MP4, AVI, MOV videos
329
+ - **Cloud Limitations**: Webcam access not available in cloud deployment
330
+ - **Model**: Uses YOLO for distraction detection
331
+ """)