ankanpy commited on
Commit
1afea77
·
1 Parent(s): bdb20e6

updated app.py

Browse files
Files changed (3) hide show
  1. README.md +1 -1
  2. app.py +45 -76
  3. requirements.txt +1 -1
README.md CHANGED
@@ -4,7 +4,7 @@ emoji: 👁
4
  colorFrom: yellow
5
  colorTo: yellow
6
  sdk: gradio
7
- sdk_version: 4.42.0
8
  app_file: app.py
9
  pinned: false
10
  license: mit
 
4
  colorFrom: yellow
5
  colorTo: yellow
6
  sdk: gradio
7
+ sdk_version: 5.23.3
8
  app_file: app.py
9
  pinned: false
10
  license: mit
app.py CHANGED
@@ -2,102 +2,70 @@ import cv2
2
  import gradio as gr
3
  import numpy as np
4
 
5
- # input_video = './sample/car.mp4'
6
 
7
- # video Inference
8
  def vid_inf(vid_path, contour_thresh):
9
- # Create a VideoCapture object
 
10
  cap = cv2.VideoCapture(vid_path)
 
 
 
 
11
 
12
- # get the video frames' width and height for proper saving of videos
13
  frame_width = int(cap.get(3))
14
  frame_height = int(cap.get(4))
15
  fps = int(cap.get(cv2.CAP_PROP_FPS))
16
  frame_size = (frame_width, frame_height)
17
- fourcc = cv2.VideoWriter_fourcc(*'mp4v')
18
  output_video = "output_recorded.mp4"
19
-
20
- # create the `VideoWriter()` object
21
  out = cv2.VideoWriter(output_video, fourcc, fps, frame_size)
22
 
23
- # Create Background Subtractor MOG2 object
24
  backSub = cv2.createBackgroundSubtractorMOG2(history=200, varThreshold=25, detectShadows=True)
25
- # print(dir(backSub))
26
 
27
- # Check if camera opened successfully
28
- if not cap.isOpened():
29
- print("Error opening video file")
30
  count = 0
31
- # Read until video is completed
32
  while cap.isOpened():
33
- # Capture frame-by-frame
34
  ret, frame = cap.read()
35
- # print(frame.shape)
36
- if ret:
37
- # Apply background subtraction
38
- fg_mask = backSub.apply(frame)
39
- # print(fg_mask.shape)
40
- # fg_mask = cv2.resize(fg_mask, (640,480))
41
- # print(fg_mask.shape)
42
- # cv2.imshow('Frame_bg', fg_mask)
43
-
44
- # apply global threshol to remove shadows
45
- retval, mask_thresh = cv2.threshold(
46
- fg_mask, 200, 255, cv2.THRESH_BINARY)
47
- # cv2.imshow('frame_thresh', mask_thresh)
48
-
49
- # set the kernal
50
- kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))
51
- # Apply erosion
52
- mask_eroded = cv2.morphologyEx(mask_thresh, cv2.MORPH_OPEN, kernel)
53
- # mask_eroded = cv2.resize(mask_eroded, (640,480))
54
- # cv2.imshow('frame_erode', mask_eroded)
55
-
56
- # Find contours
57
- contours, hierarchy = cv2.findContours(
58
- mask_eroded, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
59
- # print(contours)
60
-
61
- min_contour_area = contour_thresh # Define your minimum area threshold
62
- large_contours = [
63
- cnt for cnt in contours if cv2.contourArea(cnt) > min_contour_area]
64
- # frame_ct = cv2.drawContours(frame, large_contours, -1, (0, 255, 0), 2)
65
- frame_out = frame.copy()
66
- for cnt in large_contours:
67
- # print(cnt.shape)
68
- x, y, w, h = cv2.boundingRect(cnt)
69
- frame_out = cv2.rectangle(frame_out, (x, y), (x+w, y+h), (0, 0, 200), 3)
70
- frame_out_final = cv2.cvtColor(frame_out, cv2.COLOR_BGR2RGB)
71
- vid = out.write(frame_out)
72
-
73
- # Display the resulting frame
74
- # resized_frame = cv2.resize(frame_out, (640,480))
75
- # cv2.imshow('Frame_final', frame_out)
76
-
77
- # update the count every frame and display every 12th frame
78
- if not count % 12:
79
- yield frame_out_final, None
80
- count += 1
81
-
82
- # Press Q on keyboard to exit
83
- if cv2.waitKey(25) & 0xFF == ord('q'):
84
- break
85
- else:
86
  break
87
 
88
- # When everything done, release the video capture and writer object
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
89
  cap.release()
90
  out.release()
91
- # Closes all the frames
92
  cv2.destroyAllWindows()
93
  yield None, output_video
94
 
95
- # vid_inf(input_video)
96
-
97
 
98
- # gradio interface
99
  input_video = gr.Video(label="Input Video")
100
- contour_thresh = gr.Slider(0, 10000, value=4, label="Contour Threshold", info="Adjust the Countour Threshold according to the object size that you want to detect.")
 
 
 
 
 
 
101
  output_frames = gr.Image(label="Output Frames")
102
  output_video_file = gr.Video(label="Output video")
103
 
@@ -105,10 +73,11 @@ app = gr.Interface(
105
  fn=vid_inf,
106
  inputs=[input_video, contour_thresh],
107
  outputs=[output_frames, output_video_file],
108
- title=f"Motion Detection using OpenCV",
109
- description=f'A gradio app for dynamic video analysis tool that leverages advanced background subtraction and contour detection techniques to identify and track moving objects in real-time.',
110
- allow_flagging="never",
111
- examples=[["./sample/car.mp4", "1000"], ["./sample/motion_test.mp4", "5000"], ["./sample/home.mp4", "4500"]],
112
  cache_examples=False,
113
  )
114
- app.queue().launch()
 
 
2
  import gradio as gr
3
  import numpy as np
4
 
 
5
 
 
6
  def vid_inf(vid_path, contour_thresh):
7
+ contour_thresh = int(contour_thresh)
8
+
9
  cap = cv2.VideoCapture(vid_path)
10
+ if not cap.isOpened():
11
+ print("Error opening video file")
12
+ yield None, None
13
+ return
14
 
 
15
  frame_width = int(cap.get(3))
16
  frame_height = int(cap.get(4))
17
  fps = int(cap.get(cv2.CAP_PROP_FPS))
18
  frame_size = (frame_width, frame_height)
19
+ fourcc = cv2.VideoWriter_fourcc(*"mp4v")
20
  output_video = "output_recorded.mp4"
 
 
21
  out = cv2.VideoWriter(output_video, fourcc, fps, frame_size)
22
 
 
23
  backSub = cv2.createBackgroundSubtractorMOG2(history=200, varThreshold=25, detectShadows=True)
 
24
 
 
 
 
25
  count = 0
 
26
  while cap.isOpened():
 
27
  ret, frame = cap.read()
28
+ if not ret:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
29
  break
30
 
31
+ fg_mask = backSub.apply(frame)
32
+
33
+ _, mask_thresh = cv2.threshold(fg_mask, 200, 255, cv2.THRESH_BINARY)
34
+
35
+ kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))
36
+ mask_cleaned = cv2.morphologyEx(mask_thresh, cv2.MORPH_OPEN, kernel)
37
+
38
+ contours, _ = cv2.findContours(mask_cleaned, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
39
+
40
+ large_contours = [cnt for cnt in contours if cv2.contourArea(cnt) > contour_thresh]
41
+
42
+ frame_out = frame.copy()
43
+ for cnt in large_contours:
44
+ x, y, w, h = cv2.boundingRect(cnt)
45
+ cv2.rectangle(frame_out, (x, y), (x + w, y + h), (0, 0, 200), 3)
46
+
47
+ frame_rgb = cv2.cvtColor(frame_out, cv2.COLOR_BGR2RGB)
48
+ out.write(frame_out)
49
+
50
+ if count % 12 == 0:
51
+ yield frame_rgb, None
52
+ count += 1
53
+
54
  cap.release()
55
  out.release()
 
56
  cv2.destroyAllWindows()
57
  yield None, output_video
58
 
 
 
59
 
60
+ # Gradio interface
61
  input_video = gr.Video(label="Input Video")
62
+ contour_thresh = gr.Slider(
63
+ 0,
64
+ 10000,
65
+ value=1000,
66
+ label="Contour Threshold",
67
+ info="Set the minimum size of moving objects to detect (in pixels).",
68
+ )
69
  output_frames = gr.Image(label="Output Frames")
70
  output_video_file = gr.Video(label="Output video")
71
 
 
73
  fn=vid_inf,
74
  inputs=[input_video, contour_thresh],
75
  outputs=[output_frames, output_video_file],
76
+ title="Motion Detection using OpenCV",
77
+ description="A Gradio app that uses background subtraction and contour detection to highlight moving objects in a video.",
78
+ flagging_mode="never",
79
+ examples=[["./sample/car.mp4", 1000], ["./sample/motion_test.mp4", 5000], ["./sample/home.mp4", 4500]],
80
  cache_examples=False,
81
  )
82
+
83
+ app.queue().launch()
requirements.txt CHANGED
@@ -1,2 +1,2 @@
1
  opencv-python==4.10.0.84
2
- gradio==4.42.0
 
1
  opencv-python==4.10.0.84
2
+ gradio==5.23.3