hifebole commited on
Commit
d6890dc
·
1 Parent(s): c948d1f

first commit

Browse files
Files changed (3) hide show
  1. app.py +167 -0
  2. requirements.txt +9 -0
  3. turn.py +33 -0
app.py ADDED
@@ -0,0 +1,167 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import cv2
2
+ import streamlit as st
3
+ import numpy as np
4
+ import tempfile
5
+ import os
6
+ from ultralytics import YOLO
7
+ from streamlit_webrtc import (webrtc_streamer, VideoProcessorBase, WebRtcMode, RTCConfiguration)
8
+ import av
9
+ from turn import get_ice_servers
10
+
11
+ model = YOLO('yolov8n.pt')
12
+
13
+ # Global variable to store the latest frame with bounding boxes
14
+ cached_frame = None
15
+ frame_skip = 5 # Process every 5th frame
16
+ # # Define a custom video processor class inheriting from VideoProcessorBase
17
+ # class VideoProcessor(VideoProcessorBase):
18
+ # def __init__(self):
19
+ # self.model = model
20
+ # self.frame_skip = 10 # Class-level variable for frame skipping
21
+ # self.cached_frame = None # Class-level variable for cached frames
22
+
23
+ def recv(frame: av.VideoFrame) -> av.VideoFrame:
24
+ # Skip frames to reduce processing load
25
+ # global frame_skip, cached_frame
26
+
27
+ # if frame_skip > 0:
28
+ # frame_skip -= 1
29
+ # return frame
30
+
31
+ # Reset frame skip
32
+ # frame_skip = 5
33
+
34
+ # Convert frame to OpenCV format (BGR)
35
+ frame_bgr = frame.to_ndarray(format="bgr24")
36
+
37
+ # Resize frame to reduce processing time
38
+ frame_resized = cv2.resize(frame_bgr, (160, 120)) # Instead of 640x480
39
+
40
+ # # Detect and track objects using YOLOv8
41
+ # results = model.track(frame_resized, persist=True)
42
+
43
+ # # Plot results
44
+ # frame_annotated = results[0].plot()
45
+
46
+ # # Cache the annotated frame
47
+ # cached_frame = frame_annotated
48
+
49
+
50
+ # Process every nth frame
51
+ if frame_skip == 0:
52
+ # Reset the frame skip counter
53
+ frame_skip = 10
54
+
55
+ # Detect and track objects using YOLOv8
56
+ results = model.track(frame_resized, persist=True)
57
+
58
+ # Plot results
59
+ frame_annotated = results[0].plot()
60
+
61
+ # Cache the annotated frame
62
+ cached_frame = frame_annotated
63
+ else:
64
+ # Use the cached frame for skipped frames
65
+ frame_annotated = cached_frame if cached_frame is not None else frame_resized
66
+ frame_skip -= 1
67
+
68
+ # Convert frame back to RGB format
69
+ frame_rgb = cv2.cvtColor(frame_annotated, cv2.COLOR_BGR2RGB)
70
+
71
+ return av.VideoFrame.from_ndarray(frame_rgb, format="rgb24")
72
+
73
+ # Streamlit web app
74
+ def main():
75
+ # Set page title
76
+ st.set_page_config(page_title="Object Tracking with Streamlit")
77
+
78
+ # Streamlit web app
79
+ st.title("Object Tracking")
80
+
81
+ # Radio button for user selection
82
+ option = st.radio("Choose an option:", ("Live Stream", "Upload Video"))
83
+
84
+ if option == "Live Stream":
85
+ # Start the WebRTC stream with object tracking
86
+ # WebRTC streamer configuration
87
+ # Define RTC configuration for WebRTC
88
+ # RTC_CONFIGURATION = RTCConfiguration({
89
+ # "iceServers": [{"urls": ["stun:stun.l.google.com:19302"]}]
90
+ # })
91
+ # Start the WebRTC stream with object tracking
92
+ # webrtc_streamer(key="live-stream", video_frame_callback=recv,
93
+ # rtc_configuration=rtc_configuration, sendback_audio=False)
94
+ webrtc_streamer(key="live-stream",
95
+ #mode=WebRtcMode.SENDRECV,
96
+ video_frame_callback=recv,
97
+ rtc_configuration={"iceServers": get_ice_servers()},
98
+ media_stream_constraints={"video": True, "audio": False},
99
+ async_processing=True)
100
+
101
+
102
+
103
+ elif option == "Upload Video":
104
+ # File uploader for video upload
105
+ uploaded_file = st.file_uploader("Upload a video file", type=["mp4", "avi", "mov"])
106
+
107
+ # Button to start tracking
108
+ start_button_pressed = st.button("Start Tracking")
109
+
110
+ # Placeholder for video frame
111
+ frame_placeholder = st.empty()
112
+
113
+ # Button to stop tracking
114
+ stop_button_pressed = st.button("Stop")
115
+
116
+ # Check if the start button is pressed and file is uploaded
117
+ if start_button_pressed and uploaded_file is not None:
118
+ # Call the function to track uploaded video with the stop button state
119
+ track_uploaded_video(uploaded_file, stop_button_pressed, frame_placeholder)
120
+
121
+ # Release resources
122
+ if uploaded_file:
123
+ uploaded_file.close()
124
+
125
+ # Function to perform object tracking on uploaded video
126
+ def track_uploaded_video(video_file, stop_button, frame_placeholder):
127
+
128
+ # Create a temporary file to save the uploaded video
129
+ temp_video = tempfile.NamedTemporaryFile(delete=False)
130
+ temp_video.write(video_file.read())
131
+ temp_video.close()
132
+
133
+ # OpenCV's VideoCapture for reading video file
134
+ cap = cv2.VideoCapture(temp_video.name)
135
+
136
+ frame_count = 0
137
+ while cap.isOpened() and not stop_button:
138
+ ret, frame = cap.read()
139
+
140
+ if not ret:
141
+ st.write("The video capture has ended.")
142
+ break
143
+
144
+ # Process every 5th frame
145
+ if frame_count % 5 == 0:
146
+ # Resize frame to reduce processing time
147
+ frame_resized = cv2.resize(frame, (640, 480))
148
+
149
+ # Detect and track objects using YOLOv8
150
+ results = model.track(frame_resized, persist=True)
151
+
152
+ # Plot results
153
+ frame_ = results[0].plot()
154
+
155
+ # Display frame with bounding boxes
156
+ frame_placeholder.image(frame_, channels="BGR")
157
+
158
+ frame_count += 1
159
+
160
+ # Release resources
161
+ cap.release()
162
+ # Remove temporary file
163
+ os.remove(temp_video.name)
164
+
165
+ # Run the app
166
+ if __name__ == "__main__":
167
+ main()
requirements.txt ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ numpy==1.26.4
2
+ opencv_python==4.9.0.80
3
+ Pillow==10.2.0
4
+ streamlit==1.32.2
5
+ ultralytics==8.1.29
6
+ lapx>=0.5.2
7
+ streamlit-webrtc==0.47.6
8
+ twilio~=8.1.0
9
+ imutils==0.5.3
turn.py ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import logging
2
+ import os
3
+
4
+ import streamlit as st
5
+ from twilio.rest import Client
6
+
7
+ logger = logging.getLogger(__name__)
8
+
9
+
10
+ @st.cache_data # type: ignore
11
+ def get_ice_servers():
12
+ """Use Twilio's TURN server because Streamlit Community Cloud has changed
13
+ its infrastructure and WebRTC connection cannot be established without TURN server now. # noqa: E501
14
+ We considered Open Relay Project (https://www.metered.ca/tools/openrelay/) too,
15
+ but it is not stable and hardly works as some people reported like https://github.com/aiortc/aiortc/issues/832#issuecomment-1482420656 # noqa: E501
16
+ See https://github.com/whitphx/streamlit-webrtc/issues/1213
17
+ """
18
+
19
+ # Ref: https://www.twilio.com/docs/stun-turn/api
20
+ try:
21
+ account_sid = os.environ["TWILIO_ACCOUNT_SID"]
22
+ auth_token = os.environ["TWILIO_AUTH_TOKEN"]
23
+ except KeyError:
24
+ logger.warning(
25
+ "Twilio credentials are not set. Fallback to a free STUN server from Google." # noqa: E501
26
+ )
27
+ return [{"urls": ["stun:stun.l.google.com:19302"]}]
28
+
29
+ client = Client(account_sid, auth_token)
30
+
31
+ token = client.tokens.create()
32
+
33
+ return token.ice_servers