Spaces:
Sleeping
Sleeping
import gradio as gr | |
import cv2 | |
import torch | |
import numpy as np | |
import time | |
from threading import Thread | |
import queue | |
# Load the YOLO model on the CPU | |
model = torch.hub.load('ultralytics/yolov5', 'yolov5s', pretrained=True) | |
# Global variables for camera control | |
camera_queue = queue.Queue(maxsize=1) | |
stop_camera = False | |
def move_robot(command): | |
""" | |
Simulate sending a command to the robot to move. | |
Replace this with actual communication code to send commands to the robot. | |
""" | |
commands = { | |
"forward": "Robot moving forward", | |
"backward": "Robot moving backward", | |
"left": "Robot turning left", | |
"right": "Robot turning right", | |
"stop": "Robot stopped" | |
} | |
print(commands.get(command, "Invalid command")) | |
return f"Command sent: {command}" | |
def camera_loop(): | |
"""Background thread function to continuously capture frames""" | |
global stop_camera | |
cap = cv2.VideoCapture(0) | |
if not cap.isOpened(): | |
print("Error: Could not open camera") | |
return | |
while not stop_camera: | |
ret, frame = cap.read() | |
if ret: | |
# Run YOLO detection | |
results = model(frame) | |
processed_frame = results.render()[0] | |
# Convert BGR to RGB for display | |
processed_frame = cv2.cvtColor(processed_frame, cv2.COLOR_BGR2RGB) | |
# Update the queue with the latest frame | |
try: | |
if camera_queue.full(): | |
camera_queue.get_nowait() # Remove old frame | |
camera_queue.put_nowait(processed_frame) | |
except queue.Full: | |
pass | |
time.sleep(0.1) # Prevent excessive CPU usage | |
cap.release() | |
def start_camera(): | |
"""Start the camera feed""" | |
global stop_camera | |
stop_camera = False | |
Thread(target=camera_loop, daemon=True).start() | |
return "Camera started" | |
def stop_camera_feed(): | |
"""Stop the camera feed""" | |
global stop_camera | |
stop_camera = True | |
empty_frame = np.zeros((480, 640, 3), dtype=np.uint8) | |
return "Camera stopped", empty_frame | |
def get_current_frame(): | |
"""Get the latest frame from the camera queue""" | |
try: | |
frame = camera_queue.get_nowait() | |
return frame | |
except queue.Empty: | |
return np.zeros((480, 640, 3), dtype=np.uint8) | |
def robot_interface(): | |
with gr.Blocks() as interface: | |
with gr.Row(): | |
# Left column for controls | |
with gr.Column(scale=1): | |
# Control buttons | |
forward_btn = gr.Button("Forward", variant="primary") | |
backward_btn = gr.Button("Backward", variant="primary") | |
left_btn = gr.Button("Left", variant="primary") | |
right_btn = gr.Button("Right", variant="primary") | |
stop_btn = gr.Button("Stop", variant="secondary") | |
status_output = gr.Textbox(label="Robot Status") | |
# Right column for camera feed | |
with gr.Column(scale=2): | |
# Camera feed output | |
camera_output = gr.Image( | |
label="Robot Camera Feed with Object Detection", | |
type="numpy", | |
height=480, | |
streaming=True | |
) | |
# Start/Stop buttons for camera feed | |
start_camera_btn = gr.Button("Start Camera", variant="primary") | |
stop_camera_btn = gr.Button("Stop Camera", variant="secondary") | |
# Set up button click events for robot movement | |
forward_btn.click(fn=move_robot, inputs=[gr.Textbox(value="forward", visible=False)], outputs=status_output) | |
backward_btn.click(fn=move_robot, inputs=[gr.Textbox(value="backward", visible=False)], outputs=status_output) | |
left_btn.click(fn=move_robot, inputs=[gr.Textbox(value="left", visible=False)], outputs=status_output) | |
right_btn.click(fn=move_robot, inputs=[gr.Textbox(value="right", visible=False)], outputs=status_output) | |
stop_btn.click(fn=move_robot, inputs=[gr.Textbox(value="stop", visible=False)], outputs=status_output) | |
# Set up button click events for camera control | |
start_camera_btn.click(fn=start_camera, inputs=None, outputs=status_output) | |
stop_camera_btn.click(fn=stop_camera_feed, inputs=None, outputs=[status_output, camera_output]) | |
# Stream the camera feed | |
camera_output.stream(fn=get_current_frame) | |
return interface | |
# Launch the Gradio interface | |
if __name__ == "__main__": | |
robot_interface().launch(share=True, debug=True) | |