Spaces:
Runtime error
Runtime error
MuhFaridanSutariya
commited on
Commit
·
7e5c89f
1
Parent(s):
6990784
first init
Browse files- app.py +68 -0
- requirements.txt +5 -0
- utils.py +34 -0
- video_processor.py +13 -0
app.py
ADDED
@@ -0,0 +1,68 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
from ultralytics import YOLO
|
3 |
+
from supervision.draw.color import ColorPalette
|
4 |
+
from supervision.tracker.byte_tracker.core import ByteTrack
|
5 |
+
from supervision.detection.annotate import BoxAnnotator
|
6 |
+
from streamlit_webrtc import webrtc_streamer, RTCConfiguration
|
7 |
+
from video_processor import VideoProcessor
|
8 |
+
from utils import process_image, process_video_realtime
|
9 |
+
import tempfile
|
10 |
+
import numpy as np
|
11 |
+
import cv2
|
12 |
+
|
13 |
+
RTC_CONFIGURATION = RTCConfiguration({"iceServers": [{"urls": ["stun:stun.l.google.com:19302"]}]})
|
14 |
+
|
15 |
+
def main():
|
16 |
+
st.title('Vehicle Detection and Counting')
|
17 |
+
|
18 |
+
model = YOLO("yolov8n.pt")
|
19 |
+
model.fuse()
|
20 |
+
label_map = model.model.names
|
21 |
+
byte_tracker = ByteTrack()
|
22 |
+
box_annotator = BoxAnnotator(color=ColorPalette.default(), thickness=4, text_thickness=4, text_scale=2)
|
23 |
+
|
24 |
+
source_option = st.selectbox("Select Source Data", ("Webcam", "Image file", "Video file"))
|
25 |
+
|
26 |
+
if source_option == "Webcam":
|
27 |
+
ctx = webrtc_streamer(
|
28 |
+
key="example",
|
29 |
+
video_processor_factory=lambda: VideoProcessor(model, label_map, byte_tracker, box_annotator),
|
30 |
+
rtc_configuration=RTC_CONFIGURATION,
|
31 |
+
media_stream_constraints={"video": True, "audio": False}
|
32 |
+
)
|
33 |
+
if st.button('Take Screenshot and Process'):
|
34 |
+
if ctx.video_processor and ctx.video_processor.frame is not None:
|
35 |
+
image = ctx.video_processor.frame
|
36 |
+
processed_image, object_counts = process_image(image, model, label_map, byte_tracker, box_annotator)
|
37 |
+
st.image(processed_image, caption="Processed Image", use_column_width=True)
|
38 |
+
st.write("Results:", object_counts)
|
39 |
+
else:
|
40 |
+
st.warning("No frame available yet.")
|
41 |
+
elif source_option == "Image file":
|
42 |
+
uploaded_image = st.file_uploader("Upload an image file", type=["jpg", "jpeg", "png"])
|
43 |
+
if uploaded_image is not None:
|
44 |
+
image = cv2.imdecode(np.frombuffer(uploaded_image.read(), np.uint8), cv2.IMREAD_COLOR)
|
45 |
+
st.image(image, caption="Uploaded Image", use_column_width=True)
|
46 |
+
if st.button('Process Image'):
|
47 |
+
processed_image, object_counts = process_image(image, model, label_map, byte_tracker, box_annotator)
|
48 |
+
st.image(processed_image, caption="Processed Image", use_column_width=True)
|
49 |
+
st.write("Results:", object_counts)
|
50 |
+
elif source_option == "Video file":
|
51 |
+
uploaded_video = st.file_uploader("Upload a video file", type=["mp4"])
|
52 |
+
demo_video = "vehicle-counting.mp4"
|
53 |
+
|
54 |
+
if uploaded_video is not None:
|
55 |
+
temp_video_path = tempfile.NamedTemporaryFile(delete=False, suffix='.mp4').name
|
56 |
+
with open(temp_video_path, 'wb') as f:
|
57 |
+
f.write(uploaded_video.read())
|
58 |
+
st.video(temp_video_path)
|
59 |
+
if st.button('Process Uploaded Video'):
|
60 |
+
process_video_realtime(temp_video_path, model, label_map, byte_tracker, box_annotator)
|
61 |
+
else:
|
62 |
+
st.header("Demo Video Preview")
|
63 |
+
st.video(demo_video)
|
64 |
+
if st.button('Process Demo Video'):
|
65 |
+
process_video_realtime(demo_video, model, label_map, byte_tracker, box_annotator)
|
66 |
+
|
67 |
+
if __name__ == "__main__":
|
68 |
+
main()
|
requirements.txt
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
ultralytics==8.0.157
|
2 |
+
supervision==0.13.0
|
3 |
+
tqdm
|
4 |
+
streamlit==1.36.0
|
5 |
+
streamlit_webrtc==0.47.7
|
utils.py
ADDED
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from supervision.detection.core import Detections
|
2 |
+
from supervision.utils.video import VideoInfo, get_video_frames_generator
|
3 |
+
import streamlit as st
|
4 |
+
|
5 |
+
def process_image(image, model, label_map, byte_tracker, box_annotator):
|
6 |
+
results = model(image)[0]
|
7 |
+
detections = Detections.from_ultralytics(results)
|
8 |
+
detections = byte_tracker.update_with_detections(detections=detections)
|
9 |
+
labels = [f"{label_map[class_id]} {confidence:0.2f} -track_id:{tracker_id}" for _, _, confidence, class_id, tracker_id in detections]
|
10 |
+
annotated_image = box_annotator.annotate(scene=image, detections=detections, labels=labels)
|
11 |
+
|
12 |
+
object_counts = {}
|
13 |
+
for detection in detections:
|
14 |
+
class_id = detection[3]
|
15 |
+
class_name = label_map[class_id]
|
16 |
+
if class_name in object_counts:
|
17 |
+
object_counts[class_name] += 1
|
18 |
+
else:
|
19 |
+
object_counts[class_name] = 1
|
20 |
+
|
21 |
+
return annotated_image, object_counts
|
22 |
+
|
23 |
+
def process_video_realtime(input_video_path, model, label_map, byte_tracker, box_annotator):
|
24 |
+
video_info = VideoInfo.from_video_path(input_video_path)
|
25 |
+
generator = get_video_frames_generator(input_video_path)
|
26 |
+
stframe = st.empty()
|
27 |
+
|
28 |
+
for frame in generator:
|
29 |
+
results = model(frame)[0]
|
30 |
+
detections = Detections.from_ultralytics(results)
|
31 |
+
detections = byte_tracker.update_with_detections(detections=detections)
|
32 |
+
labels = [f"{label_map[class_id]} {confidence:0.2f} -track_id:{tracker_id}" for _, _, confidence, class_id, tracker_id in detections]
|
33 |
+
annotated_frame = box_annotator.annotate(scene=frame, detections=detections, labels=labels)
|
34 |
+
stframe.image(annotated_frame, channels="BGR")
|
video_processor.py
ADDED
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from streamlit_webrtc import VideoProcessorBase
|
2 |
+
|
3 |
+
class VideoProcessor(VideoProcessorBase):
|
4 |
+
def __init__(self, model, label_map, byte_tracker, box_annotator):
|
5 |
+
self.model = model
|
6 |
+
self.label_map = label_map
|
7 |
+
self.byte_tracker = byte_tracker
|
8 |
+
self.box_annotator = box_annotator
|
9 |
+
self.frame = None
|
10 |
+
|
11 |
+
def recv(self, frame):
|
12 |
+
self.frame = frame.to_ndarray(format="bgr24")
|
13 |
+
return self.frame
|