File size: 4,645 Bytes
6781c17
 
 
 
 
7508d11
 
6781c17
 
 
 
7508d11
 
 
 
6781c17
 
 
 
 
7508d11
 
 
 
 
 
 
 
6781c17
 
7508d11
 
 
 
 
 
 
 
 
 
6781c17
 
 
 
 
 
 
 
7508d11
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6781c17
7508d11
 
 
 
 
 
 
 
 
 
 
 
 
 
6781c17
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7508d11
 
6781c17
 
7508d11
 
 
6781c17
7508d11
6781c17
 
 
 
 
 
7508d11
 
 
 
 
 
6781c17
 
 
 
 
7508d11
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
import gradio as gr
import cv2
import torch
import numpy as np
import time
from threading import Thread
import queue

# Load the YOLO model on the CPU
model = torch.hub.load('ultralytics/yolov5', 'yolov5s', pretrained=True)

# Global variables for camera control
camera_queue = queue.Queue(maxsize=1)
stop_camera = False

def move_robot(command):
    """
    Simulate sending a command to the robot to move.
    Replace this with actual communication code to send commands to the robot.
    """
    commands = {
        "forward": "Robot moving forward",
        "backward": "Robot moving backward",
        "left": "Robot turning left",
        "right": "Robot turning right",
        "stop": "Robot stopped"
    }
    print(commands.get(command, "Invalid command"))
    return f"Command sent: {command}"

def camera_loop():
    """Background thread function to continuously capture frames"""
    global stop_camera
    cap = cv2.VideoCapture(0)
    
    if not cap.isOpened():
        print("Error: Could not open camera")
        return
    
    while not stop_camera:
        ret, frame = cap.read()
        if ret:
            # Run YOLO detection
            results = model(frame)
            processed_frame = results.render()[0]
            # Convert BGR to RGB for display
            processed_frame = cv2.cvtColor(processed_frame, cv2.COLOR_BGR2RGB)
            
            # Update the queue with the latest frame
            try:
                if camera_queue.full():
                    camera_queue.get_nowait()  # Remove old frame
                camera_queue.put_nowait(processed_frame)
            except queue.Full:
                pass
        time.sleep(0.1)  # Prevent excessive CPU usage
    
    cap.release()

def start_camera():
    """Start the camera feed"""
    global stop_camera
    stop_camera = False
    Thread(target=camera_loop, daemon=True).start()
    return "Camera started"

def stop_camera_feed():
    """Stop the camera feed"""
    global stop_camera
    stop_camera = True
    empty_frame = np.zeros((480, 640, 3), dtype=np.uint8)
    return "Camera stopped", empty_frame

def get_current_frame():
    """Get the latest frame from the camera queue"""
    try:
        frame = camera_queue.get_nowait()
        return frame
    except queue.Empty:
        return np.zeros((480, 640, 3), dtype=np.uint8)

def robot_interface():
    with gr.Blocks() as interface:
        with gr.Row():
            # Left column for controls
            with gr.Column(scale=1):
                # Control buttons
                forward_btn = gr.Button("Forward", variant="primary")
                backward_btn = gr.Button("Backward", variant="primary")
                left_btn = gr.Button("Left", variant="primary")
                right_btn = gr.Button("Right", variant="primary")
                stop_btn = gr.Button("Stop", variant="secondary")
                status_output = gr.Textbox(label="Robot Status")

            # Right column for camera feed
            with gr.Column(scale=2):
                # Camera feed output
                camera_output = gr.Image(
                    label="Robot Camera Feed with Object Detection",
                    type="numpy",
                    height=480,
                    streaming=True
                )
                
                # Start/Stop buttons for camera feed
                start_camera_btn = gr.Button("Start Camera", variant="primary")
                stop_camera_btn = gr.Button("Stop Camera", variant="secondary")
                
        # Set up button click events for robot movement
        forward_btn.click(fn=move_robot, inputs=[gr.Textbox(value="forward", visible=False)], outputs=status_output)
        backward_btn.click(fn=move_robot, inputs=[gr.Textbox(value="backward", visible=False)], outputs=status_output)
        left_btn.click(fn=move_robot, inputs=[gr.Textbox(value="left", visible=False)], outputs=status_output)
        right_btn.click(fn=move_robot, inputs=[gr.Textbox(value="right", visible=False)], outputs=status_output)
        stop_btn.click(fn=move_robot, inputs=[gr.Textbox(value="stop", visible=False)], outputs=status_output)
        
        # Set up button click events for camera control
        start_camera_btn.click(fn=start_camera, inputs=None, outputs=status_output)
        stop_camera_btn.click(fn=stop_camera_feed, inputs=None, outputs=[status_output, camera_output])
        
        # Stream the camera feed
        camera_output.stream(fn=get_current_frame)

    return interface

# Launch the Gradio interface
if __name__ == "__main__":
    robot_interface().launch(share=True, debug=True)