ju0im6bt6 commited on
Commit
1a5dd7c
·
verified ·
1 Parent(s): b44c957

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +43 -56
app.py CHANGED
@@ -1,59 +1,46 @@
 
 
1
  import gradio as gr
2
- import numpy as np
3
- from fastrtc import WebRTC
4
- import time
5
-
6
- # WebRTC configuration with public STUN servers
7
- rtc_configuration = {
8
- "iceServers": [
9
- {"urls": "stun:stun.l.google.com:19302"},
10
- {"urls": "stun:stun1.l.google.com:19302"}
11
- ]
12
- }
13
-
14
- # Video processing function (example: flip video vertically)
15
- def flip_vertically(image):
16
- return np.flip(image, axis=0)
17
-
18
- # Server-side function to process video stream
19
- def process_stream(webrtc_input):
20
- while True:
21
- # Get the next frame from the WebRTC input
22
- frame = next(webrtc_input, None)
23
- if frame is not None:
24
- # Process the frame (e.g., flip vertically)
25
- processed_frame = flip_vertically(frame)
26
- yield processed_frame
27
- # Control frame rate (e.g., process every 0.1 seconds)
28
- time.sleep(0.1)
29
-
30
- # Gradio app setup
31
- with gr.Blocks() as demo:
32
- gr.HTML("<h1 style='text-align: center'>Real-Time Webcam Stream with FastRTC</h1>")
33
-
34
- # WebRTC component for video streaming
35
- webrtc_input = WebRTC(
36
- label="Webcam Stream",
37
- mode="send",
38
- modality="video",
39
- rtc_configuration=rtc_configuration,
40
- )
41
-
42
- # Output component to display processed video
43
- webrtc_output = WebRTC(
44
- label="Processed Stream",
45
- mode="receive",
46
- modality="video",
47
- rtc_configuration=rtc_configuration,
48
- )
49
-
50
- # Connect input to output through the processing function
51
- demo.load(
52
- fn=process_stream,
53
- inputs=[webrtc_input],
54
- outputs=[webrtc_output],
55
- queue=True,
56
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
57
 
58
- # Launch the app
59
- demo.launch()
 
1
+ from huggingface_hub import hf_hub_download
2
+ from ultralytics import YOLOv10
3
  import gradio as gr
4
+ from gradio_webrtc import WebRTC
5
+
6
+ model_file = hf_hub_download(
7
+ repo_id="onnx-community/yolov10n", filename="onnx/model.onnx"
8
+ )
9
+
10
+ model = YOLOv10(model_file)
11
+
12
+ def detection(image, conf_threshold=0.3):
13
+ image = cv2.resize(image, (model.input_width, model.input_height))
14
+ new_image = model.detect_objects(image, conf_threshold)
15
+ return new_image
16
+
17
+
18
+
19
+ css = """.my-group {max-width: 600px !important; max-height: 600px !important;}
20
+ .my-column {display: flex !important; justify-content: center !important; align-items: center !important;}"""
21
+
22
+ with gr.Blocks(css=css) as demo:
23
+ gr.HTML(
24
+ """
25
+ <h1 style='text-align: center'>
26
+ YOLOv10 Webcam Stream (Powered by WebRTC ⚡️)
27
+ </h1>
28
+ """
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
29
  )
30
+ with gr.Column(elem_classes=["my-column"]):
31
+ with gr.Group(elem_classes=["my-group"]):
32
+ image = WebRTC(label="Stream", rtc_configuration=rtc_configuration)
33
+ conf_threshold = gr.Slider(
34
+ label="Confidence Threshold",
35
+ minimum=0.0,
36
+ maximum=1.0,
37
+ step=0.05,
38
+ value=0.30,
39
+ )
40
+
41
+ image.stream(
42
+ fn=detection, inputs=[image, conf_threshold], outputs=[image], time_limit=10
43
+ )
44
 
45
+ if __name__ == "__main__":
46
+ demo.launch()