Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -3,65 +3,81 @@ import cv2
|
|
3 |
import torch
|
4 |
import numpy as np
|
5 |
import time
|
|
|
|
|
6 |
|
7 |
# Load the YOLO model on the CPU
|
8 |
model = torch.hub.load('ultralytics/yolov5', 'yolov5s', pretrained=True)
|
9 |
|
|
|
|
|
|
|
|
|
10 |
def move_robot(command):
|
11 |
"""
|
12 |
Simulate sending a command to the robot to move.
|
13 |
Replace this with actual communication code to send commands to the robot.
|
14 |
"""
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
elif command == "stop":
|
24 |
-
print("Robot stopped")
|
25 |
-
|
26 |
return f"Command sent: {command}"
|
27 |
|
28 |
-
def
|
29 |
-
"""
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
# Read a frame
|
40 |
ret, frame = cap.read()
|
41 |
-
|
42 |
if ret:
|
43 |
# Run YOLO detection
|
44 |
results = model(frame)
|
45 |
processed_frame = results.render()[0]
|
46 |
# Convert BGR to RGB for display
|
47 |
processed_frame = cv2.cvtColor(processed_frame, cv2.COLOR_BGR2RGB)
|
48 |
-
else:
|
49 |
-
processed_frame = np.zeros((480, 640, 3), dtype=np.uint8)
|
50 |
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
59 |
|
60 |
-
def
|
61 |
-
"""
|
62 |
-
|
63 |
-
|
64 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
65 |
|
66 |
def robot_interface():
|
67 |
with gr.Blocks() as interface:
|
@@ -82,28 +98,30 @@ def robot_interface():
|
|
82 |
camera_output = gr.Image(
|
83 |
label="Robot Camera Feed with Object Detection",
|
84 |
type="numpy",
|
85 |
-
height=480
|
|
|
86 |
)
|
87 |
|
88 |
-
#
|
89 |
-
|
|
|
90 |
|
91 |
-
# Set up button click events
|
92 |
forward_btn.click(fn=move_robot, inputs=[gr.Textbox(value="forward", visible=False)], outputs=status_output)
|
93 |
backward_btn.click(fn=move_robot, inputs=[gr.Textbox(value="backward", visible=False)], outputs=status_output)
|
94 |
left_btn.click(fn=move_robot, inputs=[gr.Textbox(value="left", visible=False)], outputs=status_output)
|
95 |
right_btn.click(fn=move_robot, inputs=[gr.Textbox(value="right", visible=False)], outputs=status_output)
|
96 |
stop_btn.click(fn=move_robot, inputs=[gr.Textbox(value="stop", visible=False)], outputs=status_output)
|
97 |
|
98 |
-
# Set up camera
|
99 |
-
|
100 |
-
|
101 |
-
|
102 |
-
|
103 |
-
)
|
104 |
|
105 |
return interface
|
106 |
|
107 |
# Launch the Gradio interface
|
108 |
if __name__ == "__main__":
|
109 |
-
robot_interface().launch(share=True, debug=True)
|
|
|
3 |
import torch
|
4 |
import numpy as np
|
5 |
import time
|
6 |
+
from threading import Thread
|
7 |
+
import queue
|
8 |
|
9 |
# Load the YOLO model on the CPU
|
10 |
model = torch.hub.load('ultralytics/yolov5', 'yolov5s', pretrained=True)
|
11 |
|
12 |
+
# Global variables for camera control
|
13 |
+
camera_queue = queue.Queue(maxsize=1)
|
14 |
+
stop_camera = False
|
15 |
+
|
16 |
def move_robot(command):
|
17 |
"""
|
18 |
Simulate sending a command to the robot to move.
|
19 |
Replace this with actual communication code to send commands to the robot.
|
20 |
"""
|
21 |
+
commands = {
|
22 |
+
"forward": "Robot moving forward",
|
23 |
+
"backward": "Robot moving backward",
|
24 |
+
"left": "Robot turning left",
|
25 |
+
"right": "Robot turning right",
|
26 |
+
"stop": "Robot stopped"
|
27 |
+
}
|
28 |
+
print(commands.get(command, "Invalid command"))
|
|
|
|
|
|
|
29 |
return f"Command sent: {command}"
|
30 |
|
31 |
+
def camera_loop():
|
32 |
+
"""Background thread function to continuously capture frames"""
|
33 |
+
global stop_camera
|
34 |
+
cap = cv2.VideoCapture(0)
|
35 |
+
|
36 |
+
if not cap.isOpened():
|
37 |
+
print("Error: Could not open camera")
|
38 |
+
return
|
39 |
+
|
40 |
+
while not stop_camera:
|
|
|
|
|
41 |
ret, frame = cap.read()
|
|
|
42 |
if ret:
|
43 |
# Run YOLO detection
|
44 |
results = model(frame)
|
45 |
processed_frame = results.render()[0]
|
46 |
# Convert BGR to RGB for display
|
47 |
processed_frame = cv2.cvtColor(processed_frame, cv2.COLOR_BGR2RGB)
|
|
|
|
|
48 |
|
49 |
+
# Update the queue with the latest frame
|
50 |
+
try:
|
51 |
+
if camera_queue.full():
|
52 |
+
camera_queue.get_nowait() # Remove old frame
|
53 |
+
camera_queue.put_nowait(processed_frame)
|
54 |
+
except queue.Full:
|
55 |
+
pass
|
56 |
+
time.sleep(0.1) # Prevent excessive CPU usage
|
57 |
+
|
58 |
+
cap.release()
|
59 |
+
|
60 |
+
def start_camera():
|
61 |
+
"""Start the camera feed"""
|
62 |
+
global stop_camera
|
63 |
+
stop_camera = False
|
64 |
+
Thread(target=camera_loop, daemon=True).start()
|
65 |
+
return "Camera started"
|
66 |
|
67 |
+
def stop_camera_feed():
|
68 |
+
"""Stop the camera feed"""
|
69 |
+
global stop_camera
|
70 |
+
stop_camera = True
|
71 |
+
empty_frame = np.zeros((480, 640, 3), dtype=np.uint8)
|
72 |
+
return "Camera stopped", empty_frame
|
73 |
+
|
74 |
+
def get_current_frame():
|
75 |
+
"""Get the latest frame from the camera queue"""
|
76 |
+
try:
|
77 |
+
frame = camera_queue.get_nowait()
|
78 |
+
return frame
|
79 |
+
except queue.Empty:
|
80 |
+
return np.zeros((480, 640, 3), dtype=np.uint8)
|
81 |
|
82 |
def robot_interface():
|
83 |
with gr.Blocks() as interface:
|
|
|
98 |
camera_output = gr.Image(
|
99 |
label="Robot Camera Feed with Object Detection",
|
100 |
type="numpy",
|
101 |
+
height=480,
|
102 |
+
streaming=True
|
103 |
)
|
104 |
|
105 |
+
# Start/Stop buttons for camera feed
|
106 |
+
start_camera_btn = gr.Button("Start Camera", variant="primary")
|
107 |
+
stop_camera_btn = gr.Button("Stop Camera", variant="secondary")
|
108 |
|
109 |
+
# Set up button click events for robot movement
|
110 |
forward_btn.click(fn=move_robot, inputs=[gr.Textbox(value="forward", visible=False)], outputs=status_output)
|
111 |
backward_btn.click(fn=move_robot, inputs=[gr.Textbox(value="backward", visible=False)], outputs=status_output)
|
112 |
left_btn.click(fn=move_robot, inputs=[gr.Textbox(value="left", visible=False)], outputs=status_output)
|
113 |
right_btn.click(fn=move_robot, inputs=[gr.Textbox(value="right", visible=False)], outputs=status_output)
|
114 |
stop_btn.click(fn=move_robot, inputs=[gr.Textbox(value="stop", visible=False)], outputs=status_output)
|
115 |
|
116 |
+
# Set up button click events for camera control
|
117 |
+
start_camera_btn.click(fn=start_camera, inputs=None, outputs=status_output)
|
118 |
+
stop_camera_btn.click(fn=stop_camera_feed, inputs=None, outputs=[status_output, camera_output])
|
119 |
+
|
120 |
+
# Stream the camera feed
|
121 |
+
camera_output.stream(fn=get_current_frame)
|
122 |
|
123 |
return interface
|
124 |
|
125 |
# Launch the Gradio interface
|
126 |
if __name__ == "__main__":
|
127 |
+
robot_interface().launch(share=True, debug=True)
|