Spaces:
Sleeping
Sleeping
Create app.py
Browse files
app.py
ADDED
@@ -0,0 +1,109 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
import cv2
|
3 |
+
import torch
|
4 |
+
import numpy as np
|
5 |
+
import time
|
6 |
+
|
7 |
+
# Load the YOLO model on the CPU
|
8 |
+
model = torch.hub.load('ultralytics/yolov5', 'yolov5s', pretrained=True)
|
9 |
+
|
10 |
+
def move_robot(command):
|
11 |
+
"""
|
12 |
+
Simulate sending a command to the robot to move.
|
13 |
+
Replace this with actual communication code to send commands to the robot.
|
14 |
+
"""
|
15 |
+
if command == "forward":
|
16 |
+
print("Robot moving forward")
|
17 |
+
elif command == "backward":
|
18 |
+
print("Robot moving backward")
|
19 |
+
elif command == "left":
|
20 |
+
print("Robot turning left")
|
21 |
+
elif command == "right":
|
22 |
+
print("Robot turning right")
|
23 |
+
elif command == "stop":
|
24 |
+
print("Robot stopped")
|
25 |
+
|
26 |
+
return f"Command sent: {command}"
|
27 |
+
|
28 |
+
def process_camera_feed():
|
29 |
+
"""Function to process single frame and run object detection"""
|
30 |
+
try:
|
31 |
+
# Initialize camera
|
32 |
+
cap = cv2.VideoCapture(0)
|
33 |
+
|
34 |
+
# Check if camera opened successfully
|
35 |
+
if not cap.isOpened():
|
36 |
+
print("Error: Could not open camera")
|
37 |
+
return np.zeros((480, 640, 3), dtype=np.uint8)
|
38 |
+
|
39 |
+
# Read a frame
|
40 |
+
ret, frame = cap.read()
|
41 |
+
|
42 |
+
if ret:
|
43 |
+
# Run YOLO detection
|
44 |
+
results = model(frame)
|
45 |
+
processed_frame = results.render()[0]
|
46 |
+
# Convert BGR to RGB for display
|
47 |
+
processed_frame = cv2.cvtColor(processed_frame, cv2.COLOR_BGR2RGB)
|
48 |
+
else:
|
49 |
+
processed_frame = np.zeros((480, 640, 3), dtype=np.uint8)
|
50 |
+
|
51 |
+
# Release the camera
|
52 |
+
cap.release()
|
53 |
+
|
54 |
+
return processed_frame
|
55 |
+
|
56 |
+
except Exception as e:
|
57 |
+
print(f"Error processing camera feed: {e}")
|
58 |
+
return np.zeros((480, 640, 3), dtype=np.uint8)
|
59 |
+
|
60 |
+
def continuously_update_feed():
|
61 |
+
"""Function to continuously update the camera feed"""
|
62 |
+
while True:
|
63 |
+
yield process_camera_feed()
|
64 |
+
time.sleep(0.1) # 100ms delay between updates
|
65 |
+
|
66 |
+
def robot_interface():
|
67 |
+
with gr.Blocks() as interface:
|
68 |
+
with gr.Row():
|
69 |
+
# Left column for controls
|
70 |
+
with gr.Column(scale=1):
|
71 |
+
# Control buttons
|
72 |
+
forward_btn = gr.Button("Forward", variant="primary")
|
73 |
+
backward_btn = gr.Button("Backward", variant="primary")
|
74 |
+
left_btn = gr.Button("Left", variant="primary")
|
75 |
+
right_btn = gr.Button("Right", variant="primary")
|
76 |
+
stop_btn = gr.Button("Stop", variant="secondary")
|
77 |
+
status_output = gr.Textbox(label="Robot Status")
|
78 |
+
|
79 |
+
# Right column for camera feed
|
80 |
+
with gr.Column(scale=2):
|
81 |
+
# Camera feed output
|
82 |
+
camera_output = gr.Image(
|
83 |
+
label="Robot Camera Feed with Object Detection",
|
84 |
+
type="numpy",
|
85 |
+
height=480
|
86 |
+
)
|
87 |
+
|
88 |
+
# Update button for camera
|
89 |
+
update_btn = gr.Button("Start Camera", variant="primary")
|
90 |
+
|
91 |
+
# Set up button click events
|
92 |
+
forward_btn.click(fn=move_robot, inputs=[gr.Textbox(value="forward", visible=False)], outputs=status_output)
|
93 |
+
backward_btn.click(fn=move_robot, inputs=[gr.Textbox(value="backward", visible=False)], outputs=status_output)
|
94 |
+
left_btn.click(fn=move_robot, inputs=[gr.Textbox(value="left", visible=False)], outputs=status_output)
|
95 |
+
right_btn.click(fn=move_robot, inputs=[gr.Textbox(value="right", visible=False)], outputs=status_output)
|
96 |
+
stop_btn.click(fn=move_robot, inputs=[gr.Textbox(value="stop", visible=False)], outputs=status_output)
|
97 |
+
|
98 |
+
# Set up camera update
|
99 |
+
update_btn.click(
|
100 |
+
fn=process_camera_feed,
|
101 |
+
inputs=None,
|
102 |
+
outputs=camera_output
|
103 |
+
)
|
104 |
+
|
105 |
+
return interface
|
106 |
+
|
107 |
+
# Launch the Gradio interface
|
108 |
+
if __name__ == "__main__":
|
109 |
+
robot_interface().launch(share=True, debug=True)
|