Anupam251272 commited on
Commit
7508d11
·
verified ·
1 Parent(s): f171576

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +68 -50
app.py CHANGED
@@ -3,65 +3,81 @@ import cv2
3
  import torch
4
  import numpy as np
5
  import time
 
 
6
 
7
  # Load the YOLO model on the CPU
8
  model = torch.hub.load('ultralytics/yolov5', 'yolov5s', pretrained=True)
9
 
 
 
 
 
10
  def move_robot(command):
11
  """
12
  Simulate sending a command to the robot to move.
13
  Replace this with actual communication code to send commands to the robot.
14
  """
15
- if command == "forward":
16
- print("Robot moving forward")
17
- elif command == "backward":
18
- print("Robot moving backward")
19
- elif command == "left":
20
- print("Robot turning left")
21
- elif command == "right":
22
- print("Robot turning right")
23
- elif command == "stop":
24
- print("Robot stopped")
25
-
26
  return f"Command sent: {command}"
27
 
28
- def process_camera_feed():
29
- """Function to process single frame and run object detection"""
30
- try:
31
- # Initialize camera
32
- cap = cv2.VideoCapture(0)
33
-
34
- # Check if camera opened successfully
35
- if not cap.isOpened():
36
- print("Error: Could not open camera")
37
- return np.zeros((480, 640, 3), dtype=np.uint8)
38
-
39
- # Read a frame
40
  ret, frame = cap.read()
41
-
42
  if ret:
43
  # Run YOLO detection
44
  results = model(frame)
45
  processed_frame = results.render()[0]
46
  # Convert BGR to RGB for display
47
  processed_frame = cv2.cvtColor(processed_frame, cv2.COLOR_BGR2RGB)
48
- else:
49
- processed_frame = np.zeros((480, 640, 3), dtype=np.uint8)
50
 
51
- # Release the camera
52
- cap.release()
53
-
54
- return processed_frame
55
-
56
- except Exception as e:
57
- print(f"Error processing camera feed: {e}")
58
- return np.zeros((480, 640, 3), dtype=np.uint8)
 
 
 
 
 
 
 
 
 
59
 
60
- def continuously_update_feed():
61
- """Function to continuously update the camera feed"""
62
- while True:
63
- yield process_camera_feed()
64
- time.sleep(0.1) # 100ms delay between updates
 
 
 
 
 
 
 
 
 
65
 
66
  def robot_interface():
67
  with gr.Blocks() as interface:
@@ -82,28 +98,30 @@ def robot_interface():
82
  camera_output = gr.Image(
83
  label="Robot Camera Feed with Object Detection",
84
  type="numpy",
85
- height=480
 
86
  )
87
 
88
- # Update button for camera
89
- update_btn = gr.Button("Start Camera", variant="primary")
 
90
 
91
- # Set up button click events
92
  forward_btn.click(fn=move_robot, inputs=[gr.Textbox(value="forward", visible=False)], outputs=status_output)
93
  backward_btn.click(fn=move_robot, inputs=[gr.Textbox(value="backward", visible=False)], outputs=status_output)
94
  left_btn.click(fn=move_robot, inputs=[gr.Textbox(value="left", visible=False)], outputs=status_output)
95
  right_btn.click(fn=move_robot, inputs=[gr.Textbox(value="right", visible=False)], outputs=status_output)
96
  stop_btn.click(fn=move_robot, inputs=[gr.Textbox(value="stop", visible=False)], outputs=status_output)
97
 
98
- # Set up camera update
99
- update_btn.click(
100
- fn=process_camera_feed,
101
- inputs=None,
102
- outputs=camera_output
103
- )
104
 
105
  return interface
106
 
107
  # Launch the Gradio interface
108
  if __name__ == "__main__":
109
- robot_interface().launch(share=True, debug=True)
 
3
  import torch
4
  import numpy as np
5
  import time
6
+ from threading import Thread
7
+ import queue
8
 
9
  # Load the YOLO model on the CPU
10
  model = torch.hub.load('ultralytics/yolov5', 'yolov5s', pretrained=True)
11
 
12
+ # Global variables for camera control
13
+ camera_queue = queue.Queue(maxsize=1)
14
+ stop_camera = False
15
+
16
  def move_robot(command):
17
  """
18
  Simulate sending a command to the robot to move.
19
  Replace this with actual communication code to send commands to the robot.
20
  """
21
+ commands = {
22
+ "forward": "Robot moving forward",
23
+ "backward": "Robot moving backward",
24
+ "left": "Robot turning left",
25
+ "right": "Robot turning right",
26
+ "stop": "Robot stopped"
27
+ }
28
+ print(commands.get(command, "Invalid command"))
 
 
 
29
  return f"Command sent: {command}"
30
 
31
+ def camera_loop():
32
+ """Background thread function to continuously capture frames"""
33
+ global stop_camera
34
+ cap = cv2.VideoCapture(0)
35
+
36
+ if not cap.isOpened():
37
+ print("Error: Could not open camera")
38
+ return
39
+
40
+ while not stop_camera:
 
 
41
  ret, frame = cap.read()
 
42
  if ret:
43
  # Run YOLO detection
44
  results = model(frame)
45
  processed_frame = results.render()[0]
46
  # Convert BGR to RGB for display
47
  processed_frame = cv2.cvtColor(processed_frame, cv2.COLOR_BGR2RGB)
 
 
48
 
49
+ # Update the queue with the latest frame
50
+ try:
51
+ if camera_queue.full():
52
+ camera_queue.get_nowait() # Remove old frame
53
+ camera_queue.put_nowait(processed_frame)
54
+ except queue.Full:
55
+ pass
56
+ time.sleep(0.1) # Prevent excessive CPU usage
57
+
58
+ cap.release()
59
+
60
+ def start_camera():
61
+ """Start the camera feed"""
62
+ global stop_camera
63
+ stop_camera = False
64
+ Thread(target=camera_loop, daemon=True).start()
65
+ return "Camera started"
66
 
67
+ def stop_camera_feed():
68
+ """Stop the camera feed"""
69
+ global stop_camera
70
+ stop_camera = True
71
+ empty_frame = np.zeros((480, 640, 3), dtype=np.uint8)
72
+ return "Camera stopped", empty_frame
73
+
74
+ def get_current_frame():
75
+ """Get the latest frame from the camera queue"""
76
+ try:
77
+ frame = camera_queue.get_nowait()
78
+ return frame
79
+ except queue.Empty:
80
+ return np.zeros((480, 640, 3), dtype=np.uint8)
81
 
82
  def robot_interface():
83
  with gr.Blocks() as interface:
 
98
  camera_output = gr.Image(
99
  label="Robot Camera Feed with Object Detection",
100
  type="numpy",
101
+ height=480,
102
+ streaming=True
103
  )
104
 
105
+ # Start/Stop buttons for camera feed
106
+ start_camera_btn = gr.Button("Start Camera", variant="primary")
107
+ stop_camera_btn = gr.Button("Stop Camera", variant="secondary")
108
 
109
+ # Set up button click events for robot movement
110
  forward_btn.click(fn=move_robot, inputs=[gr.Textbox(value="forward", visible=False)], outputs=status_output)
111
  backward_btn.click(fn=move_robot, inputs=[gr.Textbox(value="backward", visible=False)], outputs=status_output)
112
  left_btn.click(fn=move_robot, inputs=[gr.Textbox(value="left", visible=False)], outputs=status_output)
113
  right_btn.click(fn=move_robot, inputs=[gr.Textbox(value="right", visible=False)], outputs=status_output)
114
  stop_btn.click(fn=move_robot, inputs=[gr.Textbox(value="stop", visible=False)], outputs=status_output)
115
 
116
+ # Set up button click events for camera control
117
+ start_camera_btn.click(fn=start_camera, inputs=None, outputs=status_output)
118
+ stop_camera_btn.click(fn=stop_camera_feed, inputs=None, outputs=[status_output, camera_output])
119
+
120
+ # Stream the camera feed
121
+ camera_output.stream(fn=get_current_frame)
122
 
123
  return interface
124
 
125
  # Launch the Gradio interface
126
  if __name__ == "__main__":
127
+ robot_interface().launch(share=True, debug=True)