file_path
stringlengths 20
207
| content
stringlengths 5
3.85M
| size
int64 5
3.85M
| lang
stringclasses 9
values | avg_line_length
float64 1.33
100
| max_line_length
int64 4
993
| alphanum_fraction
float64 0.26
0.93
|
---|---|---|---|---|---|---|
superboySB/SBDrone_deprecated/examples/3_ros2_single_vehicle.py | #!/usr/bin/env python
"""
| File: 3_ros2_single_vehicle.py
| Author: Marcelo Jacinto ([email protected])
| License: BSD-3-Clause. Copyright (c) 2023, Marcelo Jacinto. All rights reserved.
| Description: This files serves as an example on how to build an app that makes use of the Pegasus API to run a
simulation with a single vehicle, controlled using the ROS2 backend system. NOTE: this ROS2 interface only works on Ubuntu 20.04LTS
for now. Check the website https://docs.omniverse.nvidia.com/app_isaacsim/app_isaacsim/install_ros.html#enabling-the-ros-ros-2-bridge-extension
and follow the steps 1, 2 and 3 to make sure that the ROS2 example runs properly
"""
# Imports to start Isaac Sim from this script
import carb
from omni.isaac.kit import SimulationApp
# Start Isaac Sim's simulation environment
# Note: this simulation app must be instantiated right after the SimulationApp import, otherwise the simulator will crash
# as this is the object that will load all the extensions and load the actual simulator.
simulation_app = SimulationApp({"headless": False})
# -----------------------------------
# The actual script should start here
# -----------------------------------
import omni.timeline
from omni.isaac.core.world import World
# Import the Pegasus API for simulating drones
from pegasus.simulator.params import ROBOTS, SIMULATION_ENVIRONMENTS
from pegasus.simulator.logic.state import State
from pegasus.simulator.logic.backends.ros2_backend import ROS2Backend
from pegasus.simulator.logic.vehicles.multirotor import Multirotor, MultirotorConfig
from pegasus.simulator.logic.interface.pegasus_interface import PegasusInterface
# Auxiliary scipy and numpy modules
from scipy.spatial.transform import Rotation
class PegasusApp:
"""
A Template class that serves as an example on how to build a simple Isaac Sim standalone App.
"""
def __init__(self):
"""
Method that initializes the PegasusApp and is used to setup the simulation environment.
"""
# Acquire the timeline that will be used to start/stop the simulation
self.timeline = omni.timeline.get_timeline_interface()
# Start the Pegasus Interface
self.pg = PegasusInterface()
# Acquire the World, .i.e, the singleton that controls that is a one stop shop for setting up physics,
# spawning asset primitives, etc.
self.pg._world = World(**self.pg._world_settings)
self.world = self.pg.world
# Launch one of the worlds provided by NVIDIA
self.pg.load_environment(SIMULATION_ENVIRONMENTS["Curved Gridroom"])
# Create the vehicle
# Try to spawn the selected robot in the world to the specified namespace
config_multirotor = MultirotorConfig()
config_multirotor.backends = [ROS2Backend(vehicle_id=1)]
Multirotor(
"/World/quadrotor",
ROBOTS['Iris'],
0,
[0.0, 0.0, 0.07],
Rotation.from_euler("XYZ", [0.0, 0.0, 0.0], degrees=True).as_quat(),
config=config_multirotor,
)
# Reset the simulation environment so that all articulations (aka robots) are initialized
self.world.reset()
# Auxiliar variable for the timeline callback example
self.stop_sim = False
def run(self):
"""
Method that implements the application main loop, where the physics steps are executed.
"""
# Start the simulation
self.timeline.play()
# The "infinite" loop
while simulation_app.is_running() and not self.stop_sim:
# Update the UI of the app and perform the physics step
self.world.step(render=True)
# Cleanup and stop
carb.log_warn("PegasusApp Simulation App is closing.")
self.timeline.stop()
simulation_app.close()
def main():
# Instantiate the template app
pg_app = PegasusApp()
# Run the application loop
pg_app.run()
if __name__ == "__main__":
main()
| 4,049 | Python | 35.818181 | 143 | 0.679674 |
superboySB/SBDrone_deprecated/examples/5_python_multi_vehicle.py | #!/usr/bin/env python
"""
| File: python_control_backend.py
| Author: Marcelo Jacinto and Joao Pinto ([email protected], [email protected])
| License: BSD-3-Clause. Copyright (c) 2023, Marcelo Jacinto. All rights reserved.
| Description: This files serves as an example on how to use the control backends API to create a custom controller
for the vehicle from scratch and use it to perform a simulation, without using PX4 nor ROS.
"""
# Imports to start Isaac Sim from this script
import carb
from omni.isaac.kit import SimulationApp
# Start Isaac Sim's simulation environment
# Note: this simulation app must be instantiated right after the SimulationApp import, otherwise the simulator will crash
# as this is the object that will load all the extensions and load the actual simulator.
simulation_app = SimulationApp({"headless": False})
# -----------------------------------
# The actual script should start here
# -----------------------------------
import omni.timeline
from omni.isaac.core.world import World
# Used for adding extra lights to the environment
import omni.isaac.core.utils.prims as prim_utils
# Import the Pegasus API for simulating drones
from pegasus.simulator.params import ROBOTS
from pegasus.simulator.logic.vehicles.multirotor import Multirotor, MultirotorConfig
from pegasus.simulator.logic.interface.pegasus_interface import PegasusInterface
# Import the custom python control backend
from utils.nonlinear_controller import NonlinearController
# Auxiliary scipy and numpy modules
import numpy as np
from scipy.spatial.transform import Rotation
# Use os and pathlib for parsing the desired trajectory from a CSV file
import os
from pathlib import Path
import random
from omni.isaac.debug_draw import _debug_draw
class PegasusApp:
"""
A Template class that serves as an example on how to build a simple Isaac Sim standalone App.
"""
def __init__(self):
"""
Method that initializes the PegasusApp and is used to setup the simulation environment.
"""
# Acquire the timeline that will be used to start/stop the simulation
self.timeline = omni.timeline.get_timeline_interface()
# Start the Pegasus Interface
self.pg = PegasusInterface()
# Acquire the World, .i.e, the singleton that controls that is a one stop shop for setting up physics,
# spawning asset primitives, etc.
self.pg._world = World(**self.pg._world_settings)
self.world = self.pg.world
# Add a custom light with a high-definition HDR surround environment of an exhibition hall,
# instead of the typical ground plane
prim_utils.create_prim(
"/World/Light/DomeLight",
"DomeLight",
attributes={
"texture:file": "omniverse://localhost/NVIDIA/Assets/Skies/Indoor/ZetoCGcom_ExhibitionHall_Interior1.hdr",
"intensity": 1000.0
})
# Get the current directory used to read trajectories and save results
self.curr_dir = str(Path(os.path.dirname(os.path.realpath(__file__))).resolve())
# Create the vehicle 1
# Try to spawn the selected robot in the world to the specified namespace
config_multirotor1 = MultirotorConfig()
config_multirotor1.backends = [NonlinearController(
trajectory_file=self.curr_dir + "/trajectories/pitch_relay_90_deg_1.csv",
results_file=self.curr_dir + "/results/statistics_1.npz",
Ki=[0.5, 0.5, 0.5],
Kr=[2.0, 2.0, 2.0])]
Multirotor(
"/World/quadrotor1",
ROBOTS['Iris'],
1,
[0,-1.5, 8.0],
Rotation.from_euler("XYZ", [0.0, 0.0, 0.0], degrees=True).as_quat(),
config=config_multirotor1,
)
# Create the vehicle 2
#Try to spawn the selected robot in the world to the specified namespace
config_multirotor2 = MultirotorConfig()
config_multirotor2.backends = [NonlinearController(
trajectory_file=self.curr_dir + "/trajectories/pitch_relay_90_deg_2.csv",
results_file=self.curr_dir + "/results/statistics_2.npz",
Ki=[0.5, 0.5, 0.5],
Kr=[2.0, 2.0, 2.0])]
Multirotor(
"/World/quadrotor2",
ROBOTS['Iris'],
2,
[2.3,-1.5, 8.0],
Rotation.from_euler("XYZ", [0.0, 0.0, 0.0], degrees=True).as_quat(),
config=config_multirotor2,
)
# Set the camera to a nice position so that we can see the 2 drones almost touching each other
self.pg.set_viewport_camera([7.53, -1.6, 4.96], [0.0, 3.3, 7.0])
# Read the trajectories and plot them inside isaac sim
trajectory1 = np.flip(np.genfromtxt(self.curr_dir + "/trajectories/pitch_relay_90_deg_1.csv", delimiter=','), axis=0)
num_samples1,_ = trajectory1.shape
trajectory2 = np.flip(np.genfromtxt(self.curr_dir + "/trajectories/pitch_relay_90_deg_2.csv", delimiter=','), axis=0)
num_samples2,_ = trajectory2.shape
# Draw the lines of the desired trajectory in Isaac Sim with the same color as the output plots for the paper
draw = _debug_draw.acquire_debug_draw_interface()
point_list_1 = [(trajectory1[i,1], trajectory1[i,2], trajectory1[i,3]) for i in range(num_samples1)]
draw.draw_lines_spline(point_list_1, (31/255, 119/255, 180/255, 1), 5, False)
point_list_2 = [(trajectory2[i,1], trajectory2[i,2], trajectory2[i,3]) for i in range(num_samples2)]
draw.draw_lines_spline(point_list_2, (255/255, 0, 0, 1), 5, False)
self.world.reset()
def run(self):
"""
Method that implements the application main loop, where the physics steps are executed.
"""
# Start the simulation
self.timeline.play()
# The "infinite" loop
while simulation_app.is_running():
# Update the UI of the app and perform the physics step
self.world.step(render=True)
# Cleanup and stop
carb.log_warn("PegasusApp Simulation App is closing.")
self.timeline.stop()
simulation_app.close()
def main():
# Instantiate the template app
pg_app = PegasusApp()
# Run the application loop
pg_app.run()
if __name__ == "__main__":
main() | 6,397 | Python | 37.775757 | 125 | 0.647647 |
superboySB/SBDrone_deprecated/examples/4_python_single_vehicle.py | #!/usr/bin/env python
"""
| File: 4_python_single_vehicle.py
| Author: Marcelo Jacinto and Joao Pinto ([email protected], [email protected])
| License: BSD-3-Clause. Copyright (c) 2023, Marcelo Jacinto. All rights reserved.
| Description: This files serves as an example on how to use the control backends API to create a custom controller
for the vehicle from scratch and use it to perform a simulation, without using PX4 nor ROS.
"""
# Imports to start Isaac Sim from this script
import carb
from omni.isaac.kit import SimulationApp
# Start Isaac Sim's simulation environment
# Note: this simulation app must be instantiated right after the SimulationApp import, otherwise the simulator will crash
# as this is the object that will load all the extensions and load the actual simulator.
simulation_app = SimulationApp({"headless": False})
# -----------------------------------
# The actual script should start here
# -----------------------------------
import omni.timeline
from omni.isaac.core.world import World
# Import the Pegasus API for simulating drones
from pegasus.simulator.params import ROBOTS, SIMULATION_ENVIRONMENTS
from pegasus.simulator.logic.vehicles.multirotor import Multirotor, MultirotorConfig
from pegasus.simulator.logic.interface.pegasus_interface import PegasusInterface
# Import the custom python control backend
from utils.nonlinear_controller import NonlinearController
# Auxiliary scipy and numpy modules
from scipy.spatial.transform import Rotation
# Use os and pathlib for parsing the desired trajectory from a CSV file
import os
from pathlib import Path
class PegasusApp:
"""
A Template class that serves as an example on how to build a simple Isaac Sim standalone App.
"""
def __init__(self):
"""
Method that initializes the PegasusApp and is used to setup the simulation environment.
"""
# Acquire the timeline that will be used to start/stop the simulation
self.timeline = omni.timeline.get_timeline_interface()
# Start the Pegasus Interface
self.pg = PegasusInterface()
# Acquire the World, .i.e, the singleton that controls that is a one stop shop for setting up physics,
# spawning asset primitives, etc.
self.pg._world = World(**self.pg._world_settings)
self.world = self.pg.world
# Launch one of the worlds provided by NVIDIA
self.pg.load_environment(SIMULATION_ENVIRONMENTS["Curved Gridroom"])
# Get the current directory used to read trajectories and save results
self.curr_dir = str(Path(os.path.dirname(os.path.realpath(__file__))).resolve())
# Create the vehicle 1
# Try to spawn the selected robot in the world to the specified namespace
config_multirotor1 = MultirotorConfig()
config_multirotor1.backends = [NonlinearController(
trajectory_file=self.curr_dir + "/trajectories/pitch_relay_90_deg_2.csv",
results_file=self.curr_dir + "/results/single_statistics.npz",
Ki=[0.5, 0.5, 0.5],
Kr=[2.0, 2.0, 2.0]
)]
Multirotor(
"/World/quadrotor1",
ROBOTS['Iris'],
0,
[2.3, -1.5, 0.07],
Rotation.from_euler("XYZ", [0.0, 0.0, 0.0], degrees=True).as_quat(),
config=config_multirotor1,
)
# Reset the simulation environment so that all articulations (aka robots) are initialized
self.world.reset()
def run(self):
"""
Method that implements the application main loop, where the physics steps are executed.
"""
# Start the simulation
self.timeline.play()
# The "infinite" loop
while simulation_app.is_running():
# Update the UI of the app and perform the physics step
self.world.step(render=True)
# Cleanup and stop
carb.log_warn("PegasusApp Simulation App is closing.")
self.timeline.stop()
simulation_app.close()
def main():
# Instantiate the template app
pg_app = PegasusApp()
# Run the application loop
pg_app.run()
if __name__ == "__main__":
main()
| 4,221 | Python | 34.478991 | 121 | 0.666667 |
superboySB/SBDrone_deprecated/examples/0_template_app.py | #!/usr/bin/env python
"""
| File: 0_template_app.py
| Author: Marcelo Jacinto ([email protected])
| License: BSD-3-Clause. Copyright (c) 2023, Marcelo Jacinto. All rights reserved.
| Description: This files serves as a template on how to build a clean and simple Isaac Sim based standalone App.
"""
# Imports to start Isaac Sim from this script
import carb
from omni.isaac.kit import SimulationApp
# Start Isaac Sim's simulation environment
# Note: this simulation app must be instantiated right after the SimulationApp import, otherwise the simulator will crash
# as this is the object that will load all the extensions and load the actual simulator.
simulation_app = SimulationApp({"headless": False})
# -----------------------------------
# The actual script should start here
# -----------------------------------
import omni.timeline
from omni.isaac.core import World
class Template:
"""
A Template class that serves as an example on how to build a simple Isaac Sim standalone App.
"""
def __init__(self):
"""
Method that initializes the template App and is used to setup the simulation environment.
"""
# Acquire the timeline that will be used to start/stop the simulation
self.timeline = omni.timeline.get_timeline_interface()
# Acquire the World, .i.e, the singleton that controls that is a one stop shop for setting up physics,
# spawning asset primitives, etc.
self.world = World()
# Create a ground plane for the simulation
self.world.scene.add_default_ground_plane()
# Create an example physics callback
self.world.add_physics_callback('template_physics_callback', self.physics_callback)
# Create an example render callback
self.world.add_render_callback('template_render_callback', self.render_callback)
# Create an example timeline callback
self.world.add_timeline_callback('template_timeline_callback', self.timeline_callback)
# Reset the simulation environment so that all articulations (aka robots) are initialized
self.world.reset()
# Auxiliar variable for the timeline callback example
self.stop_sim = False
def physics_callback(self, dt: float):
"""An example physics callback. It will get invoked every physics step.
Args:
dt (float): The time difference between the previous and current function call, in seconds.
"""
carb.log_info("This is a physics callback. It is called every " + str(dt) + " seconds!")
def render_callback(self, data):
"""An example render callback. It will get invoked for every rendered frame.
Args:
data: Rendering data.
"""
carb.log_info("This is a render callback. It is called every frame!")
def timeline_callback(self, timeline_event):
"""An example timeline callback. It will get invoked every time a timeline event occurs. In this example,
we will check if the event is for a 'simulation stop'. If so, we will attempt to close the app
Args:
timeline_event: A timeline event
"""
if self.world.is_stopped():
self.stop_sim = True
def run(self):
"""
Method that implements the application main loop, where the physics steps are executed.
"""
# Start the simulation
self.timeline.play()
# The "infinite" loop
while simulation_app.is_running() and not self.stop_sim:
# Update the UI of the app and perform the physics step
self.world.step(render=True)
# Cleanup and stop
carb.log_warn("Template Simulation App is closing.")
self.timeline.stop()
simulation_app.close()
def main():
# Instantiate the template app
template_app = Template()
# Run the application loop
template_app.run()
if __name__ == "__main__":
main()
| 4,001 | Python | 34.105263 | 121 | 0.652587 |
superboySB/SBDrone_deprecated/examples/8_camera_vehicle.py | #!/usr/bin/env python
"""
| File: 8_camera_vehicle.py
| License: BSD-3-Clause. Copyright (c) 2023, Marcelo Jacinto. All rights reserved.
| Description: This files serves as an example on how to build an app that makes use of the Pegasus API to run a simulation
with a single vehicle equipped with a camera, producing rgb and camera info ROS2 Humble topics.
"""
# Imports to start Isaac Sim from this script
import carb
from omni.isaac.kit import SimulationApp
# Start Isaac Sim's simulation environment
# Note: this simulation app must be instantiated right after the SimulationApp import, otherwise the simulator will crash
# as this is the object that will load all the extensions and load the actual simulator.
simulation_app = SimulationApp({"headless": False})
# -----------------------------------
# The actual script should start here
# -----------------------------------
import omni.timeline
from omni.isaac.core.world import World
from omni.isaac.core.utils.extensions import disable_extension, enable_extension
# Enable/disable ROS bridge extensions to keep only ROS2 Humble Bridge
disable_extension("omni.isaac.ros_bridge")
disable_extension("omni.isaac.ros2_bridge")
enable_extension("omni.isaac.ros2_bridge-humble")
# Import the Pegasus API for simulating drones
from pegasus.simulator.params import ROBOTS, SIMULATION_ENVIRONMENTS
from pegasus.simulator.logic.state import State
from pegasus.simulator.logic.backends.mavlink_backend import MavlinkBackend, MavlinkBackendConfig
from pegasus.simulator.logic.vehicles.multirotor import Multirotor, MultirotorConfig
from pegasus.simulator.logic.interface.pegasus_interface import PegasusInterface
from pegasus.simulator.logic.graphs import ROS2Camera, ROS2Tf
from pegasus.simulator.logic.sensors import Camera
# Auxiliary scipy and numpy modules
from scipy.spatial.transform import Rotation
class PegasusApp:
"""
A Template class that serves as an example on how to build a simple Isaac Sim standalone App.
"""
def __init__(self):
"""
Method that initializes the PegasusApp and is used to setup the simulation environment.
"""
# Acquire the timeline that will be used to start/stop the simulation
self.timeline = omni.timeline.get_timeline_interface()
# Start the Pegasus Interface
self.pg = PegasusInterface()
# Acquire the World, .i.e, the singleton that controls that is a one stop shop for setting up physics,
# spawning asset primitives, etc.
self.pg._world = World(**self.pg._world_settings)
self.world = self.pg.world
# Launch one of the worlds provided by NVIDIA
self.pg.load_environment(SIMULATION_ENVIRONMENTS["Curved Gridroom"])
# Create the vehicle
# Try to spawn the selected robot in the world to the specified namespace
config_multirotor = MultirotorConfig()
# Create the multirotor configuration
mavlink_config = MavlinkBackendConfig({
"vehicle_id": 0,
"px4_autolaunch": True,
"px4_dir": "/home/fstec/Projects/PX4-Autopilot",
"px4_vehicle_model": 'iris'
})
config_multirotor.backends = [MavlinkBackend(mavlink_config)]
# Create Camera sensor on top of the existing camera prim and change its parameters
camera_prim_path = "body/Camera"
camera_config = {
"position": [0.1, 0.0, 0.0],
"orientation": Rotation.from_euler("XYZ", [0.0, 0.0, 0.0], degrees=True).as_quat(),
"focal_length": 16.0,
"overwrite_params": True
}
config_multirotor.sensors += [Camera(camera_prim_path, config=camera_config)]
# Create camera graph for the existing Camera prim on the Iris model, which can be found
# at the prim path `/World/quadrotor/body/Camera`. The camera prim path is the local path from the vehicle's prim path
# to the camera prim, to which this graph will be connected. All ROS2 topics published by this graph will have
# namespace `quadrotor` and frame_id `Camera` followed by the selected camera types (`rgb`, `camera_info`).
config_multirotor.graphs = [ROS2Camera(camera_prim_path, config={"types": ['rgb', 'camera_info']}), ROS2Tf()]
Multirotor(
"/World/quadrotor",
ROBOTS['Iris'],
0,
[0.0, 0.0, 0.07],
Rotation.from_euler("XYZ", [0.0, 0.0, 0.0], degrees=True).as_quat(),
config=config_multirotor,
)
# Reset the simulation environment so that all articulations (aka robots) are initialized
self.world.reset()
# Auxiliar variable for the timeline callback example
self.stop_sim = False
def run(self):
"""
Method that implements the application main loop, where the physics steps are executed.
"""
# Start the simulation
self.timeline.play()
# The "infinite" loop
while simulation_app.is_running() and not self.stop_sim:
# Update the UI of the app and perform the physics step
self.world.step(render=True)
# Cleanup and stop
carb.log_warn("PegasusApp Simulation App is closing.")
self.timeline.stop()
simulation_app.close()
def main():
# Instantiate the template app
pg_app = PegasusApp()
# Run the application loop
pg_app.run()
if __name__ == "__main__":
main()
| 5,471 | Python | 38.941606 | 126 | 0.672272 |
superboySB/SBDrone_deprecated/examples/2_px4_multi_vehicle.py | #!/usr/bin/env python
"""
| File: 2_px4_multi_vehicle.py
| Author: Marcelo Jacinto ([email protected])
| License: BSD-3-Clause. Copyright (c) 2023, Marcelo Jacinto. All rights reserved.
| Description: This files serves as an example on how to build an app that makes use of the Pegasus API to run a simulation with multiple vehicles, controlled using the MAVLink control backend.
"""
# Imports to start Isaac Sim from this script
import carb
from omni.isaac.kit import SimulationApp
# Start Isaac Sim's simulation environment
# Note: this simulation app must be instantiated right after the SimulationApp import, otherwise the simulator will crash
# as this is the object that will load all the extensions and load the actual simulator.
simulation_app = SimulationApp({"headless": False})
# -----------------------------------
# The actual script should start here
# -----------------------------------
import omni.timeline
from omni.isaac.core.world import World
# Import the Pegasus API for simulating drones
from pegasus.simulator.params import ROBOTS, SIMULATION_ENVIRONMENTS
from pegasus.simulator.logic.state import State
from pegasus.simulator.logic.backends.mavlink_backend import MavlinkBackend, MavlinkBackendConfig
from pegasus.simulator.logic.vehicles.multirotor import Multirotor, MultirotorConfig
from pegasus.simulator.logic.interface.pegasus_interface import PegasusInterface
# Auxiliary scipy and numpy modules
from scipy.spatial.transform import Rotation
class PegasusApp:
"""
A Template class that serves as an example on how to build a simple Isaac Sim standalone App.
"""
def __init__(self):
"""
Method that initializes the PegasusApp and is used to setup the simulation environment.
"""
# Acquire the timeline that will be used to start/stop the simulation
self.timeline = omni.timeline.get_timeline_interface()
# Start the Pegasus Interface
self.pg = PegasusInterface()
# Acquire the World, .i.e, the singleton that controls that is a one stop shop for setting up physics,
# spawning asset primitives, etc.
self.pg._world = World(**self.pg._world_settings)
self.world = self.pg.world
# Launch one of the worlds provided by NVIDIA
self.pg.load_environment(SIMULATION_ENVIRONMENTS["Curved Gridroom"])
# Spawn 5 vehicles with the PX4 control backend in the simulation, separated by 1.0 m along the x-axis
for i in range(5):
self.vehicle_factory(i, gap_x_axis=1.0)
# Reset the simulation environment so that all articulations (aka robots) are initialized
self.world.reset()
# Auxiliar variable for the timeline callback example
self.stop_sim = False
def vehicle_factory(self, vehicle_id: int, gap_x_axis: float):
"""Auxiliar method to create multiple multirotor vehicles
Args:
vehicle_id (_type_): _description_
"""
# Create the vehicle
# Try to spawn the selected robot in the world to the specified namespace
config_multirotor = MultirotorConfig()
# Create the multirotor configuration
mavlink_config = MavlinkBackendConfig({"vehicle_id": vehicle_id, "px4_autolaunch": True, "px4_dir": "/home/marcelo/PX4-Autopilot", "px4_vehicle_model": 'iris'})
config_multirotor.backends = [MavlinkBackend(mavlink_config)]
Multirotor(
"/World/quadrotor",
ROBOTS['Iris'],
vehicle_id,
[gap_x_axis * vehicle_id, 0.0, 0.07],
Rotation.from_euler("XYZ", [0.0, 0.0, 0.0], degrees=True).as_quat(),
config=config_multirotor)
def run(self):
"""
Method that implements the application main loop, where the physics steps are executed.
"""
# Start the simulation
self.timeline.play()
# The "infinite" loop
while simulation_app.is_running() and not self.stop_sim:
# Update the UI of the app and perform the physics step
self.world.step(render=True)
# Cleanup and stop
carb.log_warn("PegasusApp Simulation App is closing.")
self.timeline.stop()
simulation_app.close()
def main():
# Instantiate the template app
pg_app = PegasusApp()
# Run the application loop
pg_app.run()
if __name__ == "__main__":
main()
| 4,454 | Python | 35.818182 | 193 | 0.668388 |
superboySB/SBDrone_deprecated/examples/utils/nonlinear_controller.py | #!/usr/bin/env python
"""
| File: nonlinear_controller.py
| Author: Marcelo Jacinto and Joao Pinto ([email protected], [email protected])
| License: BSD-3-Clause. Copyright (c) 2023, Marcelo Jacinto. All rights reserved.
| Description: This files serves as an example on how to use the control backends API to create a custom controller
for the vehicle from scratch and use it to perform a simulation, without using PX4 nor ROS. In this controller, we
provide a quick way of following a given trajectory specified in csv files or track an hard-coded trajectory based
on exponentials! NOTE: This is just an example, to demonstrate the potential of the API. A much more flexible solution
can be achieved
"""
# Imports to be able to log to the terminal with fancy colors
import carb
# Imports from the Pegasus library
from pegasus.simulator.logic.state import State
from pegasus.simulator.logic.backends import Backend
# Auxiliary scipy and numpy modules
import numpy as np
from scipy.spatial.transform import Rotation
class NonlinearController(Backend):
"""A nonlinear controller class. It implements a nonlinear controller that allows a vehicle to track
aggressive trajectories. This controlers is well described in the papers
[1] J. Pinto, B. J. Guerreiro and R. Cunha, "Planning Parcel Relay Manoeuvres for Quadrotors,"
2021 International Conference on Unmanned Aircraft Systems (ICUAS), Athens, Greece, 2021,
pp. 137-145, doi: 10.1109/ICUAS51884.2021.9476757.
[2] D. Mellinger and V. Kumar, "Minimum snap trajectory generation and control for quadrotors,"
2011 IEEE International Conference on Robotics and Automation, Shanghai, China, 2011,
pp. 2520-2525, doi: 10.1109/ICRA.2011.5980409.
"""
def __init__(self,
trajectory_file: str = None,
results_file: str=None,
reverse=False,
Kp=[10.0, 10.0, 10.0],
Kd=[8.5, 8.5, 8.5],
Ki=[1.50, 1.50, 1.50],
Kr=[3.5, 3.5, 3.5],
Kw=[0.5, 0.5, 0.5]):
# The current rotor references [rad/s]
self.input_ref = [0.0, 0.0, 0.0, 0.0]
# The current state of the vehicle expressed in the inertial frame (in ENU)
self.p = np.zeros((3,)) # The vehicle position
self.R: Rotation = Rotation.identity() # The vehicle attitude
self.w = np.zeros((3,)) # The angular velocity of the vehicle
self.v = np.zeros((3,)) # The linear velocity of the vehicle in the inertial frame
self.a = np.zeros((3,)) # The linear acceleration of the vehicle in the inertial frame
# Define the control gains matrix for the outer-loop
self.Kp = np.diag(Kp)
self.Kd = np.diag(Kd)
self.Ki = np.diag(Ki)
self.Kr = np.diag(Kr)
self.Kw = np.diag(Kw)
self.int = np.array([0.0, 0.0, 0.0])
# Define the dynamic parameters for the vehicle
self.m = 1.50 # Mass in Kg
self.g = 9.81 # The gravity acceleration ms^-2
# Read the target trajectory from a CSV file inside the trajectories directory
# if a trajectory is provided. Otherwise, just perform the hard-coded trajectory provided with this controller
if trajectory_file is not None:
self.trajectory = self.read_trajectory_from_csv(trajectory_file)
self.index = 0
self.max_index, _ = self.trajectory.shape
self.total_time = 0.0
# Use the built-in trajectory hard-coded for this controller
else:
# Set the initial time for starting when using the built-in trajectory (the time is also used in this case
# as the parametric value)
self.total_time = -5.0
# Signal that we will not used a received trajectory
self.trajectory = None
self.max_index = 1
self.reverse = reverse
# Auxiliar variable, so that we only start sending motor commands once we get the state of the vehicle
self.reveived_first_state = False
# Lists used for analysing performance statistics
self.results_files = results_file
self.time_vector = []
self.desired_position_over_time = []
self.position_over_time = []
self.position_error_over_time = []
self.velocity_error_over_time = []
self.atittude_error_over_time = []
self.attitude_rate_error_over_time = []
def read_trajectory_from_csv(self, file_name: str):
"""Auxiliar method used to read the desired trajectory from a CSV file
Args:
file_name (str): A string with the name of the trajectory inside the trajectories directory
Returns:
np.ndarray: A numpy matrix with the trajectory desired states over time
"""
# Read the trajectory to a pandas frame
return np.flip(np.genfromtxt(file_name, delimiter=','), axis=0)
def start(self):
"""
Reset the control and trajectory index
"""
self.reset_statistics()
def stop(self):
"""
Stopping the controller. Saving the statistics data for plotting later
"""
# Check if we should save the statistics to some file or not
if self.results_files is None:
return
statistics = {}
statistics["time"] = np.array(self.time_vector)
statistics["p"] = np.vstack(self.position_over_time)
statistics["desired_p"] = np.vstack(self.desired_position_over_time)
statistics["ep"] = np.vstack(self.position_error_over_time)
statistics["ev"] = np.vstack(self.velocity_error_over_time)
statistics["er"] = np.vstack(self.atittude_error_over_time)
statistics["ew"] = np.vstack(self.attitude_rate_error_over_time)
np.savez(self.results_files, **statistics)
carb.log_warn("Statistics saved to: " + self.results_files)
self.reset_statistics()
def update_sensor(self, sensor_type: str, data):
"""
Do nothing. For now ignore all the sensor data and just use the state directly for demonstration purposes.
This is a callback that is called at every physics step.
Args:
sensor_type (str): The name of the sensor providing the data
data (dict): A dictionary that contains the data produced by the sensor
"""
pass
def update_state(self, state: State):
"""
Method that updates the current state of the vehicle. This is a callback that is called at every physics step
Args:
state (State): The current state of the vehicle.
"""
self.p = state.position
self.R = Rotation.from_quat(state.attitude)
self.w = state.angular_velocity
self.v = state.linear_velocity
self.reveived_first_state = True
def input_reference(self):
"""
Method that is used to return the latest target angular velocities to be applied to the vehicle
Returns:
A list with the target angular velocities for each individual rotor of the vehicle
"""
return self.input_ref
def update(self, dt: float):
"""Method that implements the nonlinear control law and updates the target angular velocities for each rotor.
This method will be called by the simulation on every physics step
Args:
dt (float): The time elapsed between the previous and current function calls (s).
"""
if self.reveived_first_state == False:
return
# -------------------------------------------------
# Update the references for the controller to track
# -------------------------------------------------
self.total_time += dt
# Check if we need to update to the next trajectory index
if self.index < self.max_index - 1 and self.total_time >= self.trajectory[self.index + 1, 0]:
self.index += 1
# Update using an external trajectory
if self.trajectory is not None:
# the target positions [m], velocity [m/s], accelerations [m/s^2], jerk [m/s^3], yaw-angle [rad], yaw-rate [rad/s]
p_ref = np.array([self.trajectory[self.index, 1], self.trajectory[self.index, 2], self.trajectory[self.index, 3]])
v_ref = np.array([self.trajectory[self.index, 4], self.trajectory[self.index, 5], self.trajectory[self.index, 6]])
a_ref = np.array([self.trajectory[self.index, 7], self.trajectory[self.index, 8], self.trajectory[self.index, 9]])
j_ref = np.array([self.trajectory[self.index, 10], self.trajectory[self.index, 11], self.trajectory[self.index, 12]])
yaw_ref = self.trajectory[self.index, 13]
yaw_rate_ref = self.trajectory[self.index, 14]
# Or update the reference using the built-in trajectory
else:
s = 0.6
p_ref = self.pd(self.total_time, s, self.reverse)
v_ref = self.d_pd(self.total_time, s, self.reverse)
a_ref = self.dd_pd(self.total_time, s, self.reverse)
j_ref = self.ddd_pd(self.total_time, s, self.reverse)
yaw_ref = self.yaw_d(self.total_time, s)
yaw_rate_ref = self.d_yaw_d(self.total_time, s)
# -------------------------------------------------
# Start the controller implementation
# -------------------------------------------------
# Compute the tracking errors
ep = self.p - p_ref
ev = self.v - v_ref
self.int = self.int + (ep * dt)
ei = self.int
# Compute F_des term
F_des = -(self.Kp @ ep) - (self.Kd @ ev) - (self.Ki @ ei) + np.array([0.0, 0.0, self.m * self.g]) + (self.m * a_ref)
# Get the current axis Z_B (given by the last column of the rotation matrix)
Z_B = self.R.as_matrix()[:,2]
# Get the desired total thrust in Z_B direction (u_1)
u_1 = F_des @ Z_B
# Compute the desired body-frame axis Z_b
Z_b_des = F_des / np.linalg.norm(F_des)
# Compute X_C_des
X_c_des = np.array([np.cos(yaw_ref), np.sin(yaw_ref), 0.0])
# Compute Y_b_des
Z_b_cross_X_c = np.cross(Z_b_des, X_c_des)
Y_b_des = Z_b_cross_X_c / np.linalg.norm(Z_b_cross_X_c)
# Compute X_b_des
X_b_des = np.cross(Y_b_des, Z_b_des)
# Compute the desired rotation R_des = [X_b_des | Y_b_des | Z_b_des]
R_des = np.c_[X_b_des, Y_b_des, Z_b_des]
R = self.R.as_matrix()
# Compute the rotation error
e_R = 0.5 * self.vee((R_des.T @ R) - (R.T @ R_des))
# Compute an approximation of the current vehicle acceleration in the inertial frame (since we cannot measure it directly)
self.a = (u_1 * Z_B) / self.m - np.array([0.0, 0.0, self.g])
# Compute the desired angular velocity by projecting the angular velocity in the Xb-Yb plane
# projection of angular velocity on xB − yB plane
# see eqn (7) from [2].
hw = (self.m / u_1) * (j_ref - np.dot(Z_b_des, j_ref) * Z_b_des)
# desired angular velocity
w_des = np.array([-np.dot(hw, Y_b_des),
np.dot(hw, X_b_des),
yaw_rate_ref * Z_b_des[2]])
# Compute the angular velocity error
e_w = self.w - w_des
# Compute the torques to apply on the rigid body
tau = -(self.Kr @ e_R) - (self.Kw @ e_w)
# Use the allocation matrix provided by the Multirotor vehicle to convert the desired force and torque
# to angular velocity [rad/s] references to give to each rotor
if self.vehicle:
self.input_ref = self.vehicle.force_and_torques_to_velocities(u_1, tau)
# ----------------------------
# Statistics to save for later
# ----------------------------
self.time_vector.append(self.total_time)
self.position_over_time.append(self.p)
self.desired_position_over_time.append(p_ref)
self.position_error_over_time.append(ep)
self.velocity_error_over_time.append(ev)
self.atittude_error_over_time.append(e_R)
self.attitude_rate_error_over_time.append(e_w)
@staticmethod
def vee(S):
"""Auxiliary function that computes the 'v' map which takes elements from so(3) to R^3.
Args:
S (np.array): A matrix in so(3)
"""
return np.array([-S[1,2], S[0,2], -S[0,1]])
def reset_statistics(self):
self.index = 0
# If we received an external trajectory, reset the time to 0.0
if self.trajectory is not None:
self.total_time = 0.0
# if using the internal trajectory, make the parametric value start at -5.0
else:
self.total_time = -5.0
# Reset the lists used for analysing performance statistics
self.time_vector = []
self.desired_position_over_time = []
self.position_over_time = []
self.position_error_over_time = []
self.velocity_error_over_time = []
self.atittude_error_over_time = []
self.attitude_rate_error_over_time = []
# ---------------------------------------------------
# Definition of an exponential trajectory for example
# This can be used as a reference if not trajectory file is passed
# as an argument to the constructor of this class
# ---------------------------------------------------
def pd(self, t, s, reverse=False):
"""The desired position of the built-in trajectory
Args:
t (float): The parametric value that guides the equation
s (float): How steep and agressive the curve is
reverse (bool, optional): Choose whether we want to flip the curve (so that we can have 2 drones almost touching). Defaults to False.
Returns:
np.ndarray: A 3x1 array with the x, y ,z desired [m]
"""
x = t
z = 1 / s * np.exp(-0.5 * np.power(t/s, 2)) + 1.0
y = 1 / s * np.exp(-0.5 * np.power(t/s, 2))
if reverse == True:
y = -1 / s * np.exp(-0.5 * np.power(t/s, 2)) + 4.5
return np.array([x,y,z])
def d_pd(self, t, s, reverse=False):
"""The desired velocity of the built-in trajectory
Args:
t (float): The parametric value that guides the equation
s (float): How steep and agressive the curve is
reverse (bool, optional): Choose whether we want to flip the curve (so that we can have 2 drones almost touching). Defaults to False.
Returns:
np.ndarray: A 3x1 array with the d_x, d_y ,d_z desired [m/s]
"""
x = 1.0
y = -(t * np.exp(-np.power(t,2)/(2*np.power(s,2))))/np.power(s,3)
z = -(t * np.exp(-np.power(t,2)/(2*np.power(s,2))))/np.power(s,3)
if reverse == True:
y = (t * np.exp(-np.power(t,2)/(2*np.power(s,2))))/np.power(s,3)
return np.array([x,y,z])
def dd_pd(self, t, s, reverse=False):
"""The desired acceleration of the built-in trajectory
Args:
t (float): The parametric value that guides the equation
s (float): How steep and agressive the curve is
reverse (bool, optional): Choose whether we want to flip the curve (so that we can have 2 drones almost touching). Defaults to False.
Returns:
np.ndarray: A 3x1 array with the dd_x, dd_y ,dd_z desired [m/s^2]
"""
x = 0.0
y = (np.power(t,2)*np.exp(-np.power(t,2)/(2*np.power(s,2))))/np.power(s,5) - np.exp(-np.power(t,2)/(2*np.power(s,2)))/np.power(s,3)
z = (np.power(t,2)*np.exp(-np.power(t,2)/(2*np.power(s,2))))/np.power(s,5) - np.exp(-np.power(t,2)/(2*np.power(s,2)))/np.power(s,3)
if reverse == True:
y = np.exp(-np.power(t,2)/(2*np.power(s,2)))/np.power(s,3) - (np.power(t,2)*np.exp(-np.power(t,2)/(2*np.power(s,2))))/np.power(s,5)
return np.array([x,y,z])
def ddd_pd(self, t, s, reverse=False):
"""The desired jerk of the built-in trajectory
Args:
t (float): The parametric value that guides the equation
s (float): How steep and agressive the curve is
reverse (bool, optional): Choose whether we want to flip the curve (so that we can have 2 drones almost touching). Defaults to False.
Returns:
np.ndarray: A 3x1 array with the ddd_x, ddd_y ,ddd_z desired [m/s^3]
"""
x = 0.0
y = (3*t*np.exp(-np.power(t,2)/(2*np.power(s,2))))/np.power(s,5) - (np.power(t,3)*np.exp(-np.power(t,2)/(2*np.power(s,2))))/np.power(s,7)
z = (3*t*np.exp(-np.power(t,2)/(2*np.power(s,2))))/np.power(s,5) - (np.power(t,3)*np.exp(-np.power(t,2)/(2*np.power(s,2))))/np.power(s,7)
if reverse == True:
y = (np.power(t,3)*np.exp(-np.power(t,2)/(2*np.power(s,2))))/np.power(s,7) - (3*t*np.exp(-np.power(t,2)/(2*np.power(s,2))))/np.power(s,5)
return np.array([x,y,z])
def yaw_d(self, t, s):
"""The desired yaw of the built-in trajectory
Args:
t (float): The parametric value that guides the equation
s (float): How steep and agressive the curve is
reverse (bool, optional): Choose whether we want to flip the curve (so that we can have 2 drones almost touching). Defaults to False.
Returns:
np.ndarray: A float with the desired yaw in rad
"""
return 0.0
def d_yaw_d(self, t, s):
"""The desired yaw_rate of the built-in trajectory
Args:
t (float): The parametric value that guides the equation
s (float): How steep and agressive the curve is
reverse (bool, optional): Choose whether we want to flip the curve (so that we can have 2 drones almost touching). Defaults to False.
Returns:
np.ndarray: A float with the desired yaw_rate in rad/s
"""
return 0.0 | 18,171 | Python | 41.064815 | 149 | 0.587144 |
superboySB/SBDrone_deprecated/src/HITL/drone_env.py | # import setup_path
import airsim
import numpy as np
import math
import time
from argparse import ArgumentParser
import gym
from gym import spaces
class AirSimDroneEnv(gym.Env):
def __init__(self, ip_address, step_length, image_shape):
super().__init__(image_shape)
self.step_length = step_length
self.image_shape = image_shape
self.state = {
"position": np.zeros(3),
"collision": False,
"prev_position": np.zeros(3),
}
self.drone = airsim.MultirotorClient(ip=ip_address)
self.action_space = spaces.Discrete(7)
self.observation_space = spaces.Box(0, 255, shape=image_shape, dtype=np.uint8)
self._setup_flight()
self.image_request = airsim.ImageRequest(
3, airsim.ImageType.DepthPerspective, True, False
)
def __del__(self):
self.drone.reset()
def _setup_flight(self):
self.drone.reset()
self.drone.enableApiControl(True)
self.drone.armDisarm(True)
# Set home position and velocity
self.drone.moveToPositionAsync(-0.55265, -31.9786, -19.0225, 10).join()
self.drone.moveByVelocityAsync(1, -0.67, -0.8, 5).join()
def transform_obs(self, responses):
img1d = np.array(responses[0].image_data_float, dtype=float)
img1d = 255 / np.maximum(np.ones(img1d.size), img1d)
img2d = np.reshape(img1d, (responses[0].height, responses[0].width))
from PIL import Image
image = Image.fromarray(img2d)
im_final = np.array(image.resize((84, 84)).convert("L"))
return im_final.reshape([84, 84, 1])
def _get_obs(self):
responses = self.drone.simGetImages([self.image_request])
image = self.transform_obs(responses)
self.drone_state = self.drone.getMultirotorState()
self.state["prev_position"] = self.state["position"]
self.state["position"] = self.drone_state.kinematics_estimated.position
self.state["velocity"] = self.drone_state.kinematics_estimated.linear_velocity
collision = self.drone.simGetCollisionInfo().has_collided
self.state["collision"] = collision
return image
def _do_action(self, action):
quad_offset = self.interpret_action(action)
quad_vel = self.drone.getMultirotorState().kinematics_estimated.linear_velocity
self.drone.moveByVelocityAsync(
quad_vel.x_val + quad_offset[0],
quad_vel.y_val + quad_offset[1],
quad_vel.z_val + quad_offset[2],
5,
).join()
def _compute_reward(self):
thresh_dist = 7
beta = 1
z = -10
pts = [
np.array([-0.55265, -31.9786, -19.0225]),
np.array([48.59735, -63.3286, -60.07256]),
np.array([193.5974, -55.0786, -46.32256]),
np.array([369.2474, 35.32137, -62.5725]),
np.array([541.3474, 143.6714, -32.07256]),
]
quad_pt = np.array(
list(
(
self.state["position"].x_val,
self.state["position"].y_val,
self.state["position"].z_val,
)
)
)
if self.state["collision"]:
reward = -100
else:
dist = 10000000
for i in range(0, len(pts) - 1):
dist = min(
dist,
np.linalg.norm(np.cross((quad_pt - pts[i]), (quad_pt - pts[i + 1])))
/ np.linalg.norm(pts[i] - pts[i + 1]),
)
if dist > thresh_dist:
reward = -10
else:
reward_dist = math.exp(-beta * dist) - 0.5
reward_speed = (
np.linalg.norm(
[
self.state["velocity"].x_val,
self.state["velocity"].y_val,
self.state["velocity"].z_val,
]
)
- 0.5
)
reward = reward_dist + reward_speed
done = 0
if reward <= -10:
done = 1
return reward, done
def step(self, action):
self._do_action(action)
obs = self._get_obs()
reward, done = self._compute_reward()
return obs, reward, done, self.state
def reset(self):
self._setup_flight()
return self._get_obs()
def interpret_action(self, action):
if action == 0:
quad_offset = (self.step_length, 0, 0)
elif action == 1:
quad_offset = (0, self.step_length, 0)
elif action == 2:
quad_offset = (0, 0, self.step_length)
elif action == 3:
quad_offset = (-self.step_length, 0, 0)
elif action == 4:
quad_offset = (0, -self.step_length, 0)
elif action == 5:
quad_offset = (0, 0, -self.step_length)
else:
quad_offset = (0, 0, 0)
return quad_offset
| 5,105 | Python | 29.945454 | 88 | 0.518511 |
superboySB/SBDrone_deprecated/src/HITL/run_ppo.py | # import setup_path
import gym
import airgym
import time
from stable_baselines3 import DQN
from stable_baselines3.common.monitor import Monitor
from stable_baselines3.common.vec_env import DummyVecEnv, VecTransposeImage
from stable_baselines3.common.evaluation import evaluate_policy
from stable_baselines3.common.callbacks import EvalCallback
from drone_env import AirSimDroneEnv
# Create a DummyVecEnv for main airsim gym env
env = AirSimDroneEnv(ip_address="172.16.13.104",
step_length=0.25,
image_shape=(84, 84, 1),)
env = DummyVecEnv(env)
# DummyVecEnv(
# [
# lambda: Monitor(
# gym.make(
# "airsim-drone-sample-v0",
# ip_address="172.16.13.104",
# step_length=0.25,
# image_shape=(84, 84, 1),
# )
# )
# ]
# )
# Wrap env as VecTransposeImage to allow SB to handle frame observations
env = VecTransposeImage(env)
# Initialize RL algorithm type and parameters
model = DQN(
"CnnPolicy",
env,
learning_rate=0.00025,
verbose=1,
batch_size=32,
train_freq=4,
target_update_interval=10000,
learning_starts=10000,
buffer_size=500000,
max_grad_norm=10,
exploration_fraction=0.1,
exploration_final_eps=0.01,
device="cuda",
tensorboard_log="./tb_logs/",
)
# Create an evaluation callback with the same env, called every 10000 iterations
callbacks = []
eval_callback = EvalCallback(
env,
callback_on_new_best=None,
n_eval_episodes=5,
best_model_save_path=".",
log_path=".",
eval_freq=10000,
)
callbacks.append(eval_callback)
kwargs = {}
kwargs["callback"] = callbacks
# Train for a certain number of timesteps
model.learn(
total_timesteps=5e5,
tb_log_name="dqn_airsim_drone_run_" + str(time.time()),
**kwargs
)
# Save policy weights
model.save("dqn_airsim_drone_policy")
| 1,908 | Python | 23.792207 | 80 | 0.658281 |
superboySB/SBDrone_deprecated/src/HITL/airsim/pfm.py | import numpy as np
import matplotlib.pyplot as plt
import re
import sys
import pdb
def read_pfm(file):
""" Read a pfm file """
file = open(file, 'rb')
color = None
width = None
height = None
scale = None
endian = None
header = file.readline().rstrip()
header = str(bytes.decode(header, encoding='utf-8'))
if header == 'PF':
color = True
elif header == 'Pf':
color = False
else:
raise Exception('Not a PFM file.')
pattern = r'^(\d+)\s(\d+)\s$'
temp_str = str(bytes.decode(file.readline(), encoding='utf-8'))
dim_match = re.match(pattern, temp_str)
if dim_match:
width, height = map(int, dim_match.groups())
else:
temp_str += str(bytes.decode(file.readline(), encoding='utf-8'))
dim_match = re.match(pattern, temp_str)
if dim_match:
width, height = map(int, dim_match.groups())
else:
raise Exception('Malformed PFM header: width, height cannot be found')
scale = float(file.readline().rstrip())
if scale < 0: # little-endian
endian = '<'
scale = -scale
else:
endian = '>' # big-endian
data = np.fromfile(file, endian + 'f')
shape = (height, width, 3) if color else (height, width)
data = np.reshape(data, shape)
# DEY: I don't know why this was there.
file.close()
return data, scale
def write_pfm(file, image, scale=1):
""" Write a pfm file """
file = open(file, 'wb')
color = None
if image.dtype.name != 'float32':
raise Exception('Image dtype must be float32.')
if len(image.shape) == 3 and image.shape[2] == 3: # color image
color = True
elif len(image.shape) == 2 or len(image.shape) == 3 and image.shape[2] == 1: # greyscale
color = False
else:
raise Exception('Image must have H x W x 3, H x W x 1 or H x W dimensions.')
file.write(bytes('PF\n', 'UTF-8') if color else bytes('Pf\n', 'UTF-8'))
temp_str = '%d %d\n' % (image.shape[1], image.shape[0])
file.write(bytes(temp_str, 'UTF-8'))
endian = image.dtype.byteorder
if endian == '<' or endian == '=' and sys.byteorder == 'little':
scale = -scale
temp_str = '%f\n' % scale
file.write(bytes(temp_str, 'UTF-8'))
image.tofile(file)
| 2,323 | Python | 26.023256 | 92 | 0.575118 |
superboySB/SBDrone_deprecated/src/HITL/airsim/__init__.py | from .client import *
from .utils import *
from .types import *
__version__ = "1.8.1"
| 87 | Python | 13.666664 | 21 | 0.643678 |
superboySB/SBDrone_deprecated/src/HITL/airsim/utils.py | import numpy as np #pip install numpy
import math
import time
import sys
import os
import inspect
import types
import re
import logging
from .types import *
def string_to_uint8_array(bstr):
return np.fromstring(bstr, np.uint8)
def string_to_float_array(bstr):
return np.fromstring(bstr, np.float32)
def list_to_2d_float_array(flst, width, height):
return np.reshape(np.asarray(flst, np.float32), (height, width))
def get_pfm_array(response):
return list_to_2d_float_array(response.image_data_float, response.width, response.height)
def get_public_fields(obj):
return [attr for attr in dir(obj)
if not (attr.startswith("_")
or inspect.isbuiltin(attr)
or inspect.isfunction(attr)
or inspect.ismethod(attr))]
def to_dict(obj):
return dict([attr, getattr(obj, attr)] for attr in get_public_fields(obj))
def to_str(obj):
return str(to_dict(obj))
def write_file(filename, bstr):
"""
Write binary data to file.
Used for writing compressed PNG images
"""
with open(filename, 'wb') as afile:
afile.write(bstr)
# helper method for converting getOrientation to roll/pitch/yaw
# https:#en.wikipedia.org/wiki/Conversion_between_quaternions_and_Euler_angles
def to_eularian_angles(q):
z = q.z_val
y = q.y_val
x = q.x_val
w = q.w_val
ysqr = y * y
# roll (x-axis rotation)
t0 = +2.0 * (w*x + y*z)
t1 = +1.0 - 2.0*(x*x + ysqr)
roll = math.atan2(t0, t1)
# pitch (y-axis rotation)
t2 = +2.0 * (w*y - z*x)
if (t2 > 1.0):
t2 = 1
if (t2 < -1.0):
t2 = -1.0
pitch = math.asin(t2)
# yaw (z-axis rotation)
t3 = +2.0 * (w*z + x*y)
t4 = +1.0 - 2.0 * (ysqr + z*z)
yaw = math.atan2(t3, t4)
return (pitch, roll, yaw)
def to_quaternion(pitch, roll, yaw):
t0 = math.cos(yaw * 0.5)
t1 = math.sin(yaw * 0.5)
t2 = math.cos(roll * 0.5)
t3 = math.sin(roll * 0.5)
t4 = math.cos(pitch * 0.5)
t5 = math.sin(pitch * 0.5)
q = Quaternionr()
q.w_val = t0 * t2 * t4 + t1 * t3 * t5 #w
q.x_val = t0 * t3 * t4 - t1 * t2 * t5 #x
q.y_val = t0 * t2 * t5 + t1 * t3 * t4 #y
q.z_val = t1 * t2 * t4 - t0 * t3 * t5 #z
return q
def wait_key(message = ''):
''' Wait for a key press on the console and return it. '''
if message != '':
print (message)
result = None
if os.name == 'nt':
import msvcrt
result = msvcrt.getch()
else:
import termios
fd = sys.stdin.fileno()
oldterm = termios.tcgetattr(fd)
newattr = termios.tcgetattr(fd)
newattr[3] = newattr[3] & ~termios.ICANON & ~termios.ECHO
termios.tcsetattr(fd, termios.TCSANOW, newattr)
try:
result = sys.stdin.read(1)
except IOError:
pass
finally:
termios.tcsetattr(fd, termios.TCSAFLUSH, oldterm)
return result
def read_pfm(file):
""" Read a pfm file """
file = open(file, 'rb')
color = None
width = None
height = None
scale = None
endian = None
header = file.readline().rstrip()
header = str(bytes.decode(header, encoding='utf-8'))
if header == 'PF':
color = True
elif header == 'Pf':
color = False
else:
raise Exception('Not a PFM file.')
temp_str = str(bytes.decode(file.readline(), encoding='utf-8'))
dim_match = re.match(r'^(\d+)\s(\d+)\s$', temp_str)
if dim_match:
width, height = map(int, dim_match.groups())
else:
raise Exception('Malformed PFM header.')
scale = float(file.readline().rstrip())
if scale < 0: # little-endian
endian = '<'
scale = -scale
else:
endian = '>' # big-endian
data = np.fromfile(file, endian + 'f')
shape = (height, width, 3) if color else (height, width)
data = np.reshape(data, shape)
# DEY: I don't know why this was there.
file.close()
return data, scale
def write_pfm(file, image, scale=1):
""" Write a pfm file """
file = open(file, 'wb')
color = None
if image.dtype.name != 'float32':
raise Exception('Image dtype must be float32.')
if len(image.shape) == 3 and image.shape[2] == 3: # color image
color = True
elif len(image.shape) == 2 or len(image.shape) == 3 and image.shape[2] == 1: # grayscale
color = False
else:
raise Exception('Image must have H x W x 3, H x W x 1 or H x W dimensions.')
file.write('PF\n'.encode('utf-8') if color else 'Pf\n'.encode('utf-8'))
temp_str = '%d %d\n' % (image.shape[1], image.shape[0])
file.write(temp_str.encode('utf-8'))
endian = image.dtype.byteorder
if endian == '<' or endian == '=' and sys.byteorder == 'little':
scale = -scale
temp_str = '%f\n' % scale
file.write(temp_str.encode('utf-8'))
image.tofile(file)
def write_png(filename, image):
""" image must be numpy array H X W X channels
"""
import cv2 # pip install opencv-python
ret = cv2.imwrite(filename, image)
if not ret:
logging.error(f"Writing PNG file {filename} failed")
| 5,280 | Python | 24.267942 | 93 | 0.565341 |
superboySB/SBDrone_deprecated/src/HITL/airsim/types.py | from __future__ import print_function
import msgpackrpc #install as admin: pip install msgpack-rpc-python
import numpy as np #pip install numpy
import math
class MsgpackMixin:
def __repr__(self):
from pprint import pformat
return "<" + type(self).__name__ + "> " + pformat(vars(self), indent=4, width=1)
def to_msgpack(self, *args, **kwargs):
return self.__dict__
@classmethod
def from_msgpack(cls, encoded):
obj = cls()
#obj.__dict__ = {k.decode('utf-8'): (from_msgpack(v.__class__, v) if hasattr(v, "__dict__") else v) for k, v in encoded.items()}
obj.__dict__ = { k : (v if not isinstance(v, dict) else getattr(getattr(obj, k).__class__, "from_msgpack")(v)) for k, v in encoded.items()}
#return cls(**msgpack.unpack(encoded))
return obj
class _ImageType(type):
@property
def Scene(cls):
return 0
def DepthPlanar(cls):
return 1
def DepthPerspective(cls):
return 2
def DepthVis(cls):
return 3
def DisparityNormalized(cls):
return 4
def Segmentation(cls):
return 5
def SurfaceNormals(cls):
return 6
def Infrared(cls):
return 7
def OpticalFlow(cls):
return 8
def OpticalFlowVis(cls):
return 9
def __getattr__(self, key):
if key == 'DepthPlanner':
print('\033[31m'+"DepthPlanner has been (correctly) renamed to DepthPlanar. Please use ImageType.DepthPlanar instead."+'\033[0m')
raise AttributeError
class ImageType(metaclass=_ImageType):
Scene = 0
DepthPlanar = 1
DepthPerspective = 2
DepthVis = 3
DisparityNormalized = 4
Segmentation = 5
SurfaceNormals = 6
Infrared = 7
OpticalFlow = 8
OpticalFlowVis = 9
class DrivetrainType:
MaxDegreeOfFreedom = 0
ForwardOnly = 1
class LandedState:
Landed = 0
Flying = 1
class WeatherParameter:
Rain = 0
Roadwetness = 1
Snow = 2
RoadSnow = 3
MapleLeaf = 4
RoadLeaf = 5
Dust = 6
Fog = 7
Enabled = 8
class Vector2r(MsgpackMixin):
x_val = 0.0
y_val = 0.0
def __init__(self, x_val = 0.0, y_val = 0.0):
self.x_val = x_val
self.y_val = y_val
class Vector3r(MsgpackMixin):
x_val = 0.0
y_val = 0.0
z_val = 0.0
def __init__(self, x_val = 0.0, y_val = 0.0, z_val = 0.0):
self.x_val = x_val
self.y_val = y_val
self.z_val = z_val
@staticmethod
def nanVector3r():
return Vector3r(np.nan, np.nan, np.nan)
def containsNan(self):
return (math.isnan(self.x_val) or math.isnan(self.y_val) or math.isnan(self.z_val))
def __add__(self, other):
return Vector3r(self.x_val + other.x_val, self.y_val + other.y_val, self.z_val + other.z_val)
def __sub__(self, other):
return Vector3r(self.x_val - other.x_val, self.y_val - other.y_val, self.z_val - other.z_val)
def __truediv__(self, other):
if type(other) in [int, float] + np.sctypes['int'] + np.sctypes['uint'] + np.sctypes['float']:
return Vector3r( self.x_val / other, self.y_val / other, self.z_val / other)
else:
raise TypeError('unsupported operand type(s) for /: %s and %s' % ( str(type(self)), str(type(other))) )
def __mul__(self, other):
if type(other) in [int, float] + np.sctypes['int'] + np.sctypes['uint'] + np.sctypes['float']:
return Vector3r(self.x_val*other, self.y_val*other, self.z_val*other)
else:
raise TypeError('unsupported operand type(s) for *: %s and %s' % ( str(type(self)), str(type(other))) )
def dot(self, other):
if type(self) == type(other):
return self.x_val*other.x_val + self.y_val*other.y_val + self.z_val*other.z_val
else:
raise TypeError('unsupported operand type(s) for \'dot\': %s and %s' % ( str(type(self)), str(type(other))) )
def cross(self, other):
if type(self) == type(other):
cross_product = np.cross(self.to_numpy_array(), other.to_numpy_array())
return Vector3r(cross_product[0], cross_product[1], cross_product[2])
else:
raise TypeError('unsupported operand type(s) for \'cross\': %s and %s' % ( str(type(self)), str(type(other))) )
def get_length(self):
return ( self.x_val**2 + self.y_val**2 + self.z_val**2 )**0.5
def distance_to(self, other):
return ( (self.x_val-other.x_val)**2 + (self.y_val-other.y_val)**2 + (self.z_val-other.z_val)**2 )**0.5
def to_Quaternionr(self):
return Quaternionr(self.x_val, self.y_val, self.z_val, 0)
def to_numpy_array(self):
return np.array([self.x_val, self.y_val, self.z_val], dtype=np.float32)
def __iter__(self):
return iter((self.x_val, self.y_val, self.z_val))
class Quaternionr(MsgpackMixin):
w_val = 0.0
x_val = 0.0
y_val = 0.0
z_val = 0.0
def __init__(self, x_val = 0.0, y_val = 0.0, z_val = 0.0, w_val = 1.0):
self.x_val = x_val
self.y_val = y_val
self.z_val = z_val
self.w_val = w_val
@staticmethod
def nanQuaternionr():
return Quaternionr(np.nan, np.nan, np.nan, np.nan)
def containsNan(self):
return (math.isnan(self.w_val) or math.isnan(self.x_val) or math.isnan(self.y_val) or math.isnan(self.z_val))
def __add__(self, other):
if type(self) == type(other):
return Quaternionr( self.x_val+other.x_val, self.y_val+other.y_val, self.z_val+other.z_val, self.w_val+other.w_val )
else:
raise TypeError('unsupported operand type(s) for +: %s and %s' % ( str(type(self)), str(type(other))) )
def __mul__(self, other):
if type(self) == type(other):
t, x, y, z = self.w_val, self.x_val, self.y_val, self.z_val
a, b, c, d = other.w_val, other.x_val, other.y_val, other.z_val
return Quaternionr( w_val = a*t - b*x - c*y - d*z,
x_val = b*t + a*x + d*y - c*z,
y_val = c*t + a*y + b*z - d*x,
z_val = d*t + z*a + c*x - b*y)
else:
raise TypeError('unsupported operand type(s) for *: %s and %s' % ( str(type(self)), str(type(other))) )
def __truediv__(self, other):
if type(other) == type(self):
return self * other.inverse()
elif type(other) in [int, float] + np.sctypes['int'] + np.sctypes['uint'] + np.sctypes['float']:
return Quaternionr( self.x_val / other, self.y_val / other, self.z_val / other, self.w_val / other)
else:
raise TypeError('unsupported operand type(s) for /: %s and %s' % ( str(type(self)), str(type(other))) )
def dot(self, other):
if type(self) == type(other):
return self.x_val*other.x_val + self.y_val*other.y_val + self.z_val*other.z_val + self.w_val*other.w_val
else:
raise TypeError('unsupported operand type(s) for \'dot\': %s and %s' % ( str(type(self)), str(type(other))) )
def cross(self, other):
if type(self) == type(other):
return (self * other - other * self) / 2
else:
raise TypeError('unsupported operand type(s) for \'cross\': %s and %s' % ( str(type(self)), str(type(other))) )
def outer_product(self, other):
if type(self) == type(other):
return ( self.inverse()*other - other.inverse()*self ) / 2
else:
raise TypeError('unsupported operand type(s) for \'outer_product\': %s and %s' % ( str(type(self)), str(type(other))) )
def rotate(self, other):
if type(self) == type(other):
if other.get_length() == 1:
return other * self * other.inverse()
else:
raise ValueError('length of the other Quaternionr must be 1')
else:
raise TypeError('unsupported operand type(s) for \'rotate\': %s and %s' % ( str(type(self)), str(type(other))) )
def conjugate(self):
return Quaternionr(-self.x_val, -self.y_val, -self.z_val, self.w_val)
def star(self):
return self.conjugate()
def inverse(self):
return self.star() / self.dot(self)
def sgn(self):
return self/self.get_length()
def get_length(self):
return ( self.x_val**2 + self.y_val**2 + self.z_val**2 + self.w_val**2 )**0.5
def to_numpy_array(self):
return np.array([self.x_val, self.y_val, self.z_val, self.w_val], dtype=np.float32)
def __iter__(self):
return iter((self.x_val, self.y_val, self.z_val, self.w_val))
class Pose(MsgpackMixin):
position = Vector3r()
orientation = Quaternionr()
def __init__(self, position_val = None, orientation_val = None):
position_val = position_val if position_val is not None else Vector3r()
orientation_val = orientation_val if orientation_val is not None else Quaternionr()
self.position = position_val
self.orientation = orientation_val
@staticmethod
def nanPose():
return Pose(Vector3r.nanVector3r(), Quaternionr.nanQuaternionr())
def containsNan(self):
return (self.position.containsNan() or self.orientation.containsNan())
def __iter__(self):
return iter((self.position, self.orientation))
class CollisionInfo(MsgpackMixin):
has_collided = False
normal = Vector3r()
impact_point = Vector3r()
position = Vector3r()
penetration_depth = 0.0
time_stamp = 0.0
object_name = ""
object_id = -1
class GeoPoint(MsgpackMixin):
latitude = 0.0
longitude = 0.0
altitude = 0.0
class YawMode(MsgpackMixin):
is_rate = True
yaw_or_rate = 0.0
def __init__(self, is_rate = True, yaw_or_rate = 0.0):
self.is_rate = is_rate
self.yaw_or_rate = yaw_or_rate
class RCData(MsgpackMixin):
timestamp = 0
pitch, roll, throttle, yaw = (0.0,)*4 #init 4 variable to 0.0
switch1, switch2, switch3, switch4 = (0,)*4
switch5, switch6, switch7, switch8 = (0,)*4
is_initialized = False
is_valid = False
def __init__(self, timestamp = 0, pitch = 0.0, roll = 0.0, throttle = 0.0, yaw = 0.0, switch1 = 0,
switch2 = 0, switch3 = 0, switch4 = 0, switch5 = 0, switch6 = 0, switch7 = 0, switch8 = 0, is_initialized = False, is_valid = False):
self.timestamp = timestamp
self.pitch = pitch
self.roll = roll
self.throttle = throttle
self.yaw = yaw
self.switch1 = switch1
self.switch2 = switch2
self.switch3 = switch3
self.switch4 = switch4
self.switch5 = switch5
self.switch6 = switch6
self.switch7 = switch7
self.switch8 = switch8
self.is_initialized = is_initialized
self.is_valid = is_valid
class ImageRequest(MsgpackMixin):
camera_name = '0'
image_type = ImageType.Scene
pixels_as_float = False
compress = False
def __init__(self, camera_name, image_type, pixels_as_float = False, compress = True):
# todo: in future remove str(), it's only for compatibility to pre v1.2
self.camera_name = str(camera_name)
self.image_type = image_type
self.pixels_as_float = pixels_as_float
self.compress = compress
class ImageResponse(MsgpackMixin):
image_data_uint8 = np.uint8(0)
image_data_float = 0.0
camera_position = Vector3r()
camera_orientation = Quaternionr()
time_stamp = np.uint64(0)
message = ''
pixels_as_float = 0.0
compress = True
width = 0
height = 0
image_type = ImageType.Scene
class CarControls(MsgpackMixin):
throttle = 0.0
steering = 0.0
brake = 0.0
handbrake = False
is_manual_gear = False
manual_gear = 0
gear_immediate = True
def __init__(self, throttle = 0, steering = 0, brake = 0,
handbrake = False, is_manual_gear = False, manual_gear = 0, gear_immediate = True):
self.throttle = throttle
self.steering = steering
self.brake = brake
self.handbrake = handbrake
self.is_manual_gear = is_manual_gear
self.manual_gear = manual_gear
self.gear_immediate = gear_immediate
def set_throttle(self, throttle_val, forward):
if (forward):
self.is_manual_gear = False
self.manual_gear = 0
self.throttle = abs(throttle_val)
else:
self.is_manual_gear = False
self.manual_gear = -1
self.throttle = - abs(throttle_val)
class KinematicsState(MsgpackMixin):
position = Vector3r()
orientation = Quaternionr()
linear_velocity = Vector3r()
angular_velocity = Vector3r()
linear_acceleration = Vector3r()
angular_acceleration = Vector3r()
class EnvironmentState(MsgpackMixin):
position = Vector3r()
geo_point = GeoPoint()
gravity = Vector3r()
air_pressure = 0.0
temperature = 0.0
air_density = 0.0
class CarState(MsgpackMixin):
speed = 0.0
gear = 0
rpm = 0.0
maxrpm = 0.0
handbrake = False
collision = CollisionInfo()
kinematics_estimated = KinematicsState()
timestamp = np.uint64(0)
class MultirotorState(MsgpackMixin):
collision = CollisionInfo()
kinematics_estimated = KinematicsState()
gps_location = GeoPoint()
timestamp = np.uint64(0)
landed_state = LandedState.Landed
rc_data = RCData()
ready = False
ready_message = ""
can_arm = False
class RotorStates(MsgpackMixin):
timestamp = np.uint64(0)
rotors = []
class ProjectionMatrix(MsgpackMixin):
matrix = []
class CameraInfo(MsgpackMixin):
pose = Pose()
fov = -1
proj_mat = ProjectionMatrix()
class LidarData(MsgpackMixin):
point_cloud = 0.0
time_stamp = np.uint64(0)
pose = Pose()
segmentation = 0
class ImuData(MsgpackMixin):
time_stamp = np.uint64(0)
orientation = Quaternionr()
angular_velocity = Vector3r()
linear_acceleration = Vector3r()
class BarometerData(MsgpackMixin):
time_stamp = np.uint64(0)
altitude = Quaternionr()
pressure = Vector3r()
qnh = Vector3r()
class MagnetometerData(MsgpackMixin):
time_stamp = np.uint64(0)
magnetic_field_body = Vector3r()
magnetic_field_covariance = 0.0
class GnssFixType(MsgpackMixin):
GNSS_FIX_NO_FIX = 0
GNSS_FIX_TIME_ONLY = 1
GNSS_FIX_2D_FIX = 2
GNSS_FIX_3D_FIX = 3
class GnssReport(MsgpackMixin):
geo_point = GeoPoint()
eph = 0.0
epv = 0.0
velocity = Vector3r()
fix_type = GnssFixType()
time_utc = np.uint64(0)
class GpsData(MsgpackMixin):
time_stamp = np.uint64(0)
gnss = GnssReport()
is_valid = False
class DistanceSensorData(MsgpackMixin):
time_stamp = np.uint64(0)
distance = 0.0
min_distance = 0.0
max_distance = 0.0
relative_pose = Pose()
class Box2D(MsgpackMixin):
min = Vector2r()
max = Vector2r()
class Box3D(MsgpackMixin):
min = Vector3r()
max = Vector3r()
class DetectionInfo(MsgpackMixin):
name = ''
geo_point = GeoPoint()
box2D = Box2D()
box3D = Box3D()
relative_pose = Pose()
class PIDGains():
"""
Struct to store values of PID gains. Used to transmit controller gain values while instantiating
AngleLevel/AngleRate/Velocity/PositionControllerGains objects.
Attributes:
kP (float): Proportional gain
kI (float): Integrator gain
kD (float): Derivative gain
"""
def __init__(self, kp, ki, kd):
self.kp = kp
self.ki = ki
self.kd = kd
def to_list(self):
return [self.kp, self.ki, self.kd]
class AngleRateControllerGains():
"""
Struct to contain controller gains used by angle level PID controller
Attributes:
roll_gains (PIDGains): kP, kI, kD for roll axis
pitch_gains (PIDGains): kP, kI, kD for pitch axis
yaw_gains (PIDGains): kP, kI, kD for yaw axis
"""
def __init__(self, roll_gains = PIDGains(0.25, 0, 0),
pitch_gains = PIDGains(0.25, 0, 0),
yaw_gains = PIDGains(0.25, 0, 0)):
self.roll_gains = roll_gains
self.pitch_gains = pitch_gains
self.yaw_gains = yaw_gains
def to_lists(self):
return [self.roll_gains.kp, self.pitch_gains.kp, self.yaw_gains.kp], [self.roll_gains.ki, self.pitch_gains.ki, self.yaw_gains.ki], [self.roll_gains.kd, self.pitch_gains.kd, self.yaw_gains.kd]
class AngleLevelControllerGains():
"""
Struct to contain controller gains used by angle rate PID controller
Attributes:
roll_gains (PIDGains): kP, kI, kD for roll axis
pitch_gains (PIDGains): kP, kI, kD for pitch axis
yaw_gains (PIDGains): kP, kI, kD for yaw axis
"""
def __init__(self, roll_gains = PIDGains(2.5, 0, 0),
pitch_gains = PIDGains(2.5, 0, 0),
yaw_gains = PIDGains(2.5, 0, 0)):
self.roll_gains = roll_gains
self.pitch_gains = pitch_gains
self.yaw_gains = yaw_gains
def to_lists(self):
return [self.roll_gains.kp, self.pitch_gains.kp, self.yaw_gains.kp], [self.roll_gains.ki, self.pitch_gains.ki, self.yaw_gains.ki], [self.roll_gains.kd, self.pitch_gains.kd, self.yaw_gains.kd]
class VelocityControllerGains():
"""
Struct to contain controller gains used by velocity PID controller
Attributes:
x_gains (PIDGains): kP, kI, kD for X axis
y_gains (PIDGains): kP, kI, kD for Y axis
z_gains (PIDGains): kP, kI, kD for Z axis
"""
def __init__(self, x_gains = PIDGains(0.2, 0, 0),
y_gains = PIDGains(0.2, 0, 0),
z_gains = PIDGains(2.0, 2.0, 0)):
self.x_gains = x_gains
self.y_gains = y_gains
self.z_gains = z_gains
def to_lists(self):
return [self.x_gains.kp, self.y_gains.kp, self.z_gains.kp], [self.x_gains.ki, self.y_gains.ki, self.z_gains.ki], [self.x_gains.kd, self.y_gains.kd, self.z_gains.kd]
class PositionControllerGains():
"""
Struct to contain controller gains used by position PID controller
Attributes:
x_gains (PIDGains): kP, kI, kD for X axis
y_gains (PIDGains): kP, kI, kD for Y axis
z_gains (PIDGains): kP, kI, kD for Z axis
"""
def __init__(self, x_gains = PIDGains(0.25, 0, 0),
y_gains = PIDGains(0.25, 0, 0),
z_gains = PIDGains(0.25, 0, 0)):
self.x_gains = x_gains
self.y_gains = y_gains
self.z_gains = z_gains
def to_lists(self):
return [self.x_gains.kp, self.y_gains.kp, self.z_gains.kp], [self.x_gains.ki, self.y_gains.ki, self.z_gains.ki], [self.x_gains.kd, self.y_gains.kd, self.z_gains.kd]
class MeshPositionVertexBuffersResponse(MsgpackMixin):
position = Vector3r()
orientation = Quaternionr()
vertices = 0.0
indices = 0.0
name = ''
| 18,961 | Python | 31.636833 | 199 | 0.594009 |
superboySB/SBDrone_deprecated/src/HITL/airsim/client.py | from __future__ import print_function
from .utils import *
from .types import *
import msgpackrpc #install as admin: pip install msgpack-rpc-python
import numpy as np #pip install numpy
import msgpack
import time
import math
import logging
class VehicleClient:
def __init__(self, ip = "", port = 41451, timeout_value = 3600):
if (ip == ""):
ip = "127.0.0.1"
self.client = msgpackrpc.Client(msgpackrpc.Address(ip, port), timeout = timeout_value, pack_encoding = 'utf-8', unpack_encoding = 'utf-8')
#----------------------------------- Common vehicle APIs ---------------------------------------------
def reset(self):
"""
Reset the vehicle to its original starting state
Note that you must call `enableApiControl` and `armDisarm` again after the call to reset
"""
self.client.call('reset')
def ping(self):
"""
If connection is established then this call will return true otherwise it will be blocked until timeout
Returns:
bool:
"""
return self.client.call('ping')
def getClientVersion(self):
return 1 # sync with C++ client
def getServerVersion(self):
return self.client.call('getServerVersion')
def getMinRequiredServerVersion(self):
return 1 # sync with C++ client
def getMinRequiredClientVersion(self):
return self.client.call('getMinRequiredClientVersion')
#basic flight control
def enableApiControl(self, is_enabled, vehicle_name = ''):
"""
Enables or disables API control for vehicle corresponding to vehicle_name
Args:
is_enabled (bool): True to enable, False to disable API control
vehicle_name (str, optional): Name of the vehicle to send this command to
"""
self.client.call('enableApiControl', is_enabled, vehicle_name)
def isApiControlEnabled(self, vehicle_name = ''):
"""
Returns true if API control is established.
If false (which is default) then API calls would be ignored. After a successful call to `enableApiControl`, `isApiControlEnabled` should return true.
Args:
vehicle_name (str, optional): Name of the vehicle
Returns:
bool: If API control is enabled
"""
return self.client.call('isApiControlEnabled', vehicle_name)
def armDisarm(self, arm, vehicle_name = ''):
"""
Arms or disarms vehicle
Args:
arm (bool): True to arm, False to disarm the vehicle
vehicle_name (str, optional): Name of the vehicle to send this command to
Returns:
bool: Success
"""
return self.client.call('armDisarm', arm, vehicle_name)
def simPause(self, is_paused):
"""
Pauses simulation
Args:
is_paused (bool): True to pause the simulation, False to release
"""
self.client.call('simPause', is_paused)
def simIsPause(self):
"""
Returns true if the simulation is paused
Returns:
bool: If the simulation is paused
"""
return self.client.call("simIsPaused")
def simContinueForTime(self, seconds):
"""
Continue the simulation for the specified number of seconds
Args:
seconds (float): Time to run the simulation for
"""
self.client.call('simContinueForTime', seconds)
def simContinueForFrames(self, frames):
"""
Continue (or resume if paused) the simulation for the specified number of frames, after which the simulation will be paused.
Args:
frames (int): Frames to run the simulation for
"""
self.client.call('simContinueForFrames', frames)
def getHomeGeoPoint(self, vehicle_name = ''):
"""
Get the Home location of the vehicle
Args:
vehicle_name (str, optional): Name of vehicle to get home location of
Returns:
GeoPoint: Home location of the vehicle
"""
return GeoPoint.from_msgpack(self.client.call('getHomeGeoPoint', vehicle_name))
def confirmConnection(self):
"""
Checks state of connection every 1 sec and reports it in Console so user can see the progress for connection.
"""
if self.ping():
print("Connected!")
else:
print("Ping returned false!")
server_ver = self.getServerVersion()
client_ver = self.getClientVersion()
server_min_ver = self.getMinRequiredServerVersion()
client_min_ver = self.getMinRequiredClientVersion()
ver_info = "Client Ver:" + str(client_ver) + " (Min Req: " + str(client_min_ver) + \
"), Server Ver:" + str(server_ver) + " (Min Req: " + str(server_min_ver) + ")"
if server_ver < server_min_ver:
print(ver_info, file=sys.stderr)
print("AirSim server is of older version and not supported by this client. Please upgrade!")
elif client_ver < client_min_ver:
print(ver_info, file=sys.stderr)
print("AirSim client is of older version and not supported by this server. Please upgrade!")
else:
print(ver_info)
print('')
def simSetLightIntensity(self, light_name, intensity):
"""
Change intensity of named light
Args:
light_name (str): Name of light to change
intensity (float): New intensity value
Returns:
bool: True if successful, otherwise False
"""
return self.client.call("simSetLightIntensity", light_name, intensity)
def simSwapTextures(self, tags, tex_id = 0, component_id = 0, material_id = 0):
"""
Runtime Swap Texture API
See https://microsoft.github.io/AirSim/retexturing/ for details
Args:
tags (str): string of "," or ", " delimited tags to identify on which actors to perform the swap
tex_id (int, optional): indexes the array of textures assigned to each actor undergoing a swap
If out-of-bounds for some object's texture set, it will be taken modulo the number of textures that were available
component_id (int, optional):
material_id (int, optional):
Returns:
list[str]: List of objects which matched the provided tags and had the texture swap perfomed
"""
return self.client.call("simSwapTextures", tags, tex_id, component_id, material_id)
def simSetObjectMaterial(self, object_name, material_name, component_id = 0):
"""
Runtime Swap Texture API
See https://microsoft.github.io/AirSim/retexturing/ for details
Args:
object_name (str): name of object to set material for
material_name (str): name of material to set for object
component_id (int, optional) : index of material elements
Returns:
bool: True if material was set
"""
return self.client.call("simSetObjectMaterial", object_name, material_name, component_id)
def simSetObjectMaterialFromTexture(self, object_name, texture_path, component_id = 0):
"""
Runtime Swap Texture API
See https://microsoft.github.io/AirSim/retexturing/ for details
Args:
object_name (str): name of object to set material for
texture_path (str): path to texture to set for object
component_id (int, optional) : index of material elements
Returns:
bool: True if material was set
"""
return self.client.call("simSetObjectMaterialFromTexture", object_name, texture_path, component_id)
# time-of-day control
#time - of - day control
def simSetTimeOfDay(self, is_enabled, start_datetime = "", is_start_datetime_dst = False, celestial_clock_speed = 1, update_interval_secs = 60, move_sun = True):
"""
Control the position of Sun in the environment
Sun's position is computed using the coordinates specified in `OriginGeopoint` in settings for the date-time specified in the argument,
else if the string is empty, current date & time is used
Args:
is_enabled (bool): True to enable time-of-day effect, False to reset the position to original
start_datetime (str, optional): Date & Time in %Y-%m-%d %H:%M:%S format, e.g. `2018-02-12 15:20:00`
is_start_datetime_dst (bool, optional): True to adjust for Daylight Savings Time
celestial_clock_speed (float, optional): Run celestial clock faster or slower than simulation clock
E.g. Value 100 means for every 1 second of simulation clock, Sun's position is advanced by 100 seconds
so Sun will move in sky much faster
update_interval_secs (float, optional): Interval to update the Sun's position
move_sun (bool, optional): Whether or not to move the Sun
"""
self.client.call('simSetTimeOfDay', is_enabled, start_datetime, is_start_datetime_dst, celestial_clock_speed, update_interval_secs, move_sun)
#weather
def simEnableWeather(self, enable):
"""
Enable Weather effects. Needs to be called before using `simSetWeatherParameter` API
Args:
enable (bool): True to enable, False to disable
"""
self.client.call('simEnableWeather', enable)
def simSetWeatherParameter(self, param, val):
"""
Enable various weather effects
Args:
param (WeatherParameter): Weather effect to be enabled
val (float): Intensity of the effect, Range 0-1
"""
self.client.call('simSetWeatherParameter', param, val)
#camera control
#simGetImage returns compressed png in array of bytes
#image_type uses one of the ImageType members
def simGetImage(self, camera_name, image_type, vehicle_name = '', external = False):
"""
Get a single image
Returns bytes of png format image which can be dumped into abinary file to create .png image
`string_to_uint8_array()` can be used to convert into Numpy unit8 array
See https://microsoft.github.io/AirSim/image_apis/ for details
Args:
camera_name (str): Name of the camera, for backwards compatibility, ID numbers such as 0,1,etc. can also be used
image_type (ImageType): Type of image required
vehicle_name (str, optional): Name of the vehicle with the camera
external (bool, optional): Whether the camera is an External Camera
Returns:
Binary string literal of compressed png image
"""
#todo : in future remove below, it's only for compatibility to pre v1.2
camera_name = str(camera_name)
#because this method returns std::vector < uint8>, msgpack decides to encode it as a string unfortunately.
result = self.client.call('simGetImage', camera_name, image_type, vehicle_name, external)
if (result == "" or result == "\0"):
return None
return result
#camera control
#simGetImage returns compressed png in array of bytes
#image_type uses one of the ImageType members
def simGetImages(self, requests, vehicle_name = '', external = False):
"""
Get multiple images
See https://microsoft.github.io/AirSim/image_apis/ for details and examples
Args:
requests (list[ImageRequest]): Images required
vehicle_name (str, optional): Name of vehicle associated with the camera
external (bool, optional): Whether the camera is an External Camera
Returns:
list[ImageResponse]:
"""
responses_raw = self.client.call('simGetImages', requests, vehicle_name, external)
return [ImageResponse.from_msgpack(response_raw) for response_raw in responses_raw]
#CinemAirSim
def simGetPresetLensSettings(self, camera_name, vehicle_name = '', external = False):
result = self.client.call('simGetPresetLensSettings', camera_name, vehicle_name, external)
if (result == "" or result == "\0"):
return None
return result
def simGetLensSettings(self, camera_name, vehicle_name = '', external = False):
result = self.client.call('simGetLensSettings', camera_name, vehicle_name, external)
if (result == "" or result == "\0"):
return None
return result
def simSetPresetLensSettings(self, preset_lens_settings, camera_name, vehicle_name = '', external = False):
self.client.call("simSetPresetLensSettings", preset_lens_settings, camera_name, vehicle_name, external)
def simGetPresetFilmbackSettings(self, camera_name, vehicle_name = '', external = False):
result = self.client.call('simGetPresetFilmbackSettings', camera_name, vehicle_name, external)
if (result == "" or result == "\0"):
return None
return result
def simSetPresetFilmbackSettings(self, preset_filmback_settings, camera_name, vehicle_name = '', external = False):
self.client.call("simSetPresetFilmbackSettings", preset_filmback_settings, camera_name, vehicle_name, external)
def simGetFilmbackSettings(self, camera_name, vehicle_name = '', external = False):
result = self.client.call('simGetFilmbackSettings', camera_name, vehicle_name, external)
if (result == "" or result == "\0"):
return None
return result
def simSetFilmbackSettings(self, sensor_width, sensor_height, camera_name, vehicle_name = '', external = False):
return self.client.call("simSetFilmbackSettings", sensor_width, sensor_height, camera_name, vehicle_name, external)
def simGetFocalLength(self, camera_name, vehicle_name = '', external = False):
return self.client.call("simGetFocalLength", camera_name, vehicle_name, external)
def simSetFocalLength(self, focal_length, camera_name, vehicle_name = '', external = False):
self.client.call("simSetFocalLength", focal_length, camera_name, vehicle_name, external)
def simEnableManualFocus(self, enable, camera_name, vehicle_name = '', external = False):
self.client.call("simEnableManualFocus", enable, camera_name, vehicle_name, external)
def simGetFocusDistance(self, camera_name, vehicle_name = '', external = False):
return self.client.call("simGetFocusDistance", camera_name, vehicle_name, external)
def simSetFocusDistance(self, focus_distance, camera_name, vehicle_name = '', external = False):
self.client.call("simSetFocusDistance", focus_distance, camera_name, vehicle_name, external)
def simGetFocusAperture(self, camera_name, vehicle_name = '', external = False):
return self.client.call("simGetFocusAperture", camera_name, vehicle_name, external)
def simSetFocusAperture(self, focus_aperture, camera_name, vehicle_name = '', external = False):
self.client.call("simSetFocusAperture", focus_aperture, camera_name, vehicle_name, external)
def simEnableFocusPlane(self, enable, camera_name, vehicle_name = '', external = False):
self.client.call("simEnableFocusPlane", enable, camera_name, vehicle_name, external)
def simGetCurrentFieldOfView(self, camera_name, vehicle_name = '', external = False):
return self.client.call("simGetCurrentFieldOfView", camera_name, vehicle_name, external)
#End CinemAirSim
def simTestLineOfSightToPoint(self, point, vehicle_name = ''):
"""
Returns whether the target point is visible from the perspective of the inputted vehicle
Args:
point (GeoPoint): target point
vehicle_name (str, optional): Name of vehicle
Returns:
[bool]: Success
"""
return self.client.call('simTestLineOfSightToPoint', point, vehicle_name)
def simTestLineOfSightBetweenPoints(self, point1, point2):
"""
Returns whether the target point is visible from the perspective of the source point
Args:
point1 (GeoPoint): source point
point2 (GeoPoint): target point
Returns:
[bool]: Success
"""
return self.client.call('simTestLineOfSightBetweenPoints', point1, point2)
def simGetWorldExtents(self):
"""
Returns a list of GeoPoints representing the minimum and maximum extents of the world
Returns:
list[GeoPoint]
"""
responses_raw = self.client.call('simGetWorldExtents')
return [GeoPoint.from_msgpack(response_raw) for response_raw in responses_raw]
def simRunConsoleCommand(self, command):
"""
Allows the client to execute a command in Unreal's native console, via an API.
Affords access to the countless built-in commands such as "stat unit", "stat fps", "open [map]", adjust any config settings, etc. etc.
Allows the user to create bespoke APIs very easily, by adding a custom event to the level blueprint, and then calling the console command "ce MyEventName [args]". No recompilation of AirSim needed!
Args:
command ([string]): Desired Unreal Engine Console command to run
Returns:
[bool]: Success
"""
return self.client.call('simRunConsoleCommand', command)
#gets the static meshes in the unreal scene
def simGetMeshPositionVertexBuffers(self):
"""
Returns the static meshes that make up the scene
See https://microsoft.github.io/AirSim/meshes/ for details and how to use this
Returns:
list[MeshPositionVertexBuffersResponse]:
"""
responses_raw = self.client.call('simGetMeshPositionVertexBuffers')
return [MeshPositionVertexBuffersResponse.from_msgpack(response_raw) for response_raw in responses_raw]
def simGetCollisionInfo(self, vehicle_name = ''):
"""
Args:
vehicle_name (str, optional): Name of the Vehicle to get the info of
Returns:
CollisionInfo:
"""
return CollisionInfo.from_msgpack(self.client.call('simGetCollisionInfo', vehicle_name))
def simSetVehiclePose(self, pose, ignore_collision, vehicle_name = ''):
"""
Set the pose of the vehicle
If you don't want to change position (or orientation) then just set components of position (or orientation) to floating point nan values
Args:
pose (Pose): Desired Pose pf the vehicle
ignore_collision (bool): Whether to ignore any collision or not
vehicle_name (str, optional): Name of the vehicle to move
"""
self.client.call('simSetVehiclePose', pose, ignore_collision, vehicle_name)
def simGetVehiclePose(self, vehicle_name = ''):
"""
The position inside the returned Pose is in the frame of the vehicle's starting point
Args:
vehicle_name (str, optional): Name of the vehicle to get the Pose of
Returns:
Pose:
"""
pose = self.client.call('simGetVehiclePose', vehicle_name)
return Pose.from_msgpack(pose)
def simSetTraceLine(self, color_rgba, thickness=1.0, vehicle_name = ''):
"""
Modify the color and thickness of the line when Tracing is enabled
Tracing can be enabled by pressing T in the Editor or setting `EnableTrace` to `True` in the Vehicle Settings
Args:
color_rgba (list): desired RGBA values from 0.0 to 1.0
thickness (float, optional): Thickness of the line
vehicle_name (string, optional): Name of the vehicle to set Trace line values for
"""
self.client.call('simSetTraceLine', color_rgba, thickness, vehicle_name)
def simGetObjectPose(self, object_name):
"""
The position inside the returned Pose is in the world frame
Args:
object_name (str): Object to get the Pose of
Returns:
Pose:
"""
pose = self.client.call('simGetObjectPose', object_name)
return Pose.from_msgpack(pose)
def simSetObjectPose(self, object_name, pose, teleport = True):
"""
Set the pose of the object(actor) in the environment
The specified actor must have Mobility set to movable, otherwise there will be undefined behaviour.
See https://www.unrealengine.com/en-US/blog/moving-physical-objects for details on how to set Mobility and the effect of Teleport parameter
Args:
object_name (str): Name of the object(actor) to move
pose (Pose): Desired Pose of the object
teleport (bool, optional): Whether to move the object immediately without affecting their velocity
Returns:
bool: If the move was successful
"""
return self.client.call('simSetObjectPose', object_name, pose, teleport)
def simGetObjectScale(self, object_name):
"""
Gets scale of an object in the world
Args:
object_name (str): Object to get the scale of
Returns:
airsim.Vector3r: Scale
"""
scale = self.client.call('simGetObjectScale', object_name)
return Vector3r.from_msgpack(scale)
def simSetObjectScale(self, object_name, scale_vector):
"""
Sets scale of an object in the world
Args:
object_name (str): Object to set the scale of
scale_vector (airsim.Vector3r): Desired scale of object
Returns:
bool: True if scale change was successful
"""
return self.client.call('simSetObjectScale', object_name, scale_vector)
def simListSceneObjects(self, name_regex = '.*'):
"""
Lists the objects present in the environment
Default behaviour is to list all objects, regex can be used to return smaller list of matching objects or actors
Args:
name_regex (str, optional): String to match actor names against, e.g. "Cylinder.*"
Returns:
list[str]: List containing all the names
"""
return self.client.call('simListSceneObjects', name_regex)
def simLoadLevel(self, level_name):
"""
Loads a level specified by its name
Args:
level_name (str): Name of the level to load
Returns:
bool: True if the level was successfully loaded
"""
return self.client.call('simLoadLevel', level_name)
def simListAssets(self):
"""
Lists all the assets present in the Asset Registry
Returns:
list[str]: Names of all the assets
"""
return self.client.call('simListAssets')
def simSpawnObject(self, object_name, asset_name, pose, scale, physics_enabled=False, is_blueprint=False):
"""Spawned selected object in the world
Args:
object_name (str): Desired name of new object
asset_name (str): Name of asset(mesh) in the project database
pose (airsim.Pose): Desired pose of object
scale (airsim.Vector3r): Desired scale of object
physics_enabled (bool, optional): Whether to enable physics for the object
is_blueprint (bool, optional): Whether to spawn a blueprint or an actor
Returns:
str: Name of spawned object, in case it had to be modified
"""
return self.client.call('simSpawnObject', object_name, asset_name, pose, scale, physics_enabled, is_blueprint)
def simDestroyObject(self, object_name):
"""Removes selected object from the world
Args:
object_name (str): Name of object to be removed
Returns:
bool: True if object is queued up for removal
"""
return self.client.call('simDestroyObject', object_name)
def simSetSegmentationObjectID(self, mesh_name, object_id, is_name_regex = False):
"""
Set segmentation ID for specific objects
See https://microsoft.github.io/AirSim/image_apis/#segmentation for details
Args:
mesh_name (str): Name of the mesh to set the ID of (supports regex)
object_id (int): Object ID to be set, range 0-255
RBG values for IDs can be seen at https://microsoft.github.io/AirSim/seg_rgbs.txt
is_name_regex (bool, optional): Whether the mesh name is a regex
Returns:
bool: If the mesh was found
"""
return self.client.call('simSetSegmentationObjectID', mesh_name, object_id, is_name_regex)
def simGetSegmentationObjectID(self, mesh_name):
"""
Returns Object ID for the given mesh name
Mapping of Object IDs to RGB values can be seen at https://microsoft.github.io/AirSim/seg_rgbs.txt
Args:
mesh_name (str): Name of the mesh to get the ID of
"""
return self.client.call('simGetSegmentationObjectID', mesh_name)
def simAddDetectionFilterMeshName(self, camera_name, image_type, mesh_name, vehicle_name = '', external = False):
"""
Add mesh name to detect in wild card format
For example: simAddDetectionFilterMeshName("Car_*") will detect all instance named "Car_*"
Args:
camera_name (str): Name of the camera, for backwards compatibility, ID numbers such as 0,1,etc. can also be used
image_type (ImageType): Type of image required
mesh_name (str): mesh name in wild card format
vehicle_name (str, optional): Vehicle which the camera is associated with
external (bool, optional): Whether the camera is an External Camera
"""
self.client.call('simAddDetectionFilterMeshName', camera_name, image_type, mesh_name, vehicle_name, external)
def simSetDetectionFilterRadius(self, camera_name, image_type, radius_cm, vehicle_name = '', external = False):
"""
Set detection radius for all cameras
Args:
camera_name (str): Name of the camera, for backwards compatibility, ID numbers such as 0,1,etc. can also be used
image_type (ImageType): Type of image required
radius_cm (int): Radius in [cm]
vehicle_name (str, optional): Vehicle which the camera is associated with
external (bool, optional): Whether the camera is an External Camera
"""
self.client.call('simSetDetectionFilterRadius', camera_name, image_type, radius_cm, vehicle_name, external)
def simClearDetectionMeshNames(self, camera_name, image_type, vehicle_name = '', external = False):
"""
Clear all mesh names from detection filter
Args:
camera_name (str): Name of the camera, for backwards compatibility, ID numbers such as 0,1,etc. can also be used
image_type (ImageType): Type of image required
vehicle_name (str, optional): Vehicle which the camera is associated with
external (bool, optional): Whether the camera is an External Camera
"""
self.client.call('simClearDetectionMeshNames', camera_name, image_type, vehicle_name, external)
def simGetDetections(self, camera_name, image_type, vehicle_name = '', external = False):
"""
Get current detections
Args:
camera_name (str): Name of the camera, for backwards compatibility, ID numbers such as 0,1,etc. can also be used
image_type (ImageType): Type of image required
vehicle_name (str, optional): Vehicle which the camera is associated with
external (bool, optional): Whether the camera is an External Camera
Returns:
DetectionInfo array
"""
responses_raw = self.client.call('simGetDetections', camera_name, image_type, vehicle_name, external)
return [DetectionInfo.from_msgpack(response_raw) for response_raw in responses_raw]
def simPrintLogMessage(self, message, message_param = "", severity = 0):
"""
Prints the specified message in the simulator's window.
If message_param is supplied, then it's printed next to the message and in that case if this API is called with same message value
but different message_param again then previous line is overwritten with new line (instead of API creating new line on display).
For example, `simPrintLogMessage("Iteration: ", to_string(i))` keeps updating same line on display when API is called with different values of i.
The valid values of severity parameter is 0 to 3 inclusive that corresponds to different colors.
Args:
message (str): Message to be printed
message_param (str, optional): Parameter to be printed next to the message
severity (int, optional): Range 0-3, inclusive, corresponding to the severity of the message
"""
self.client.call('simPrintLogMessage', message, message_param, severity)
def simGetCameraInfo(self, camera_name, vehicle_name = '', external=False):
"""
Get details about the camera
Args:
camera_name (str): Name of the camera, for backwards compatibility, ID numbers such as 0,1,etc. can also be used
vehicle_name (str, optional): Vehicle which the camera is associated with
external (bool, optional): Whether the camera is an External Camera
Returns:
CameraInfo:
"""
#TODO : below str() conversion is only needed for legacy reason and should be removed in future
return CameraInfo.from_msgpack(self.client.call('simGetCameraInfo', str(camera_name), vehicle_name, external))
def simGetDistortionParams(self, camera_name, vehicle_name = '', external = False):
"""
Get camera distortion parameters
Args:
camera_name (str): Name of the camera, for backwards compatibility, ID numbers such as 0,1,etc. can also be used
vehicle_name (str, optional): Vehicle which the camera is associated with
external (bool, optional): Whether the camera is an External Camera
Returns:
List (float): List of distortion parameter values corresponding to K1, K2, K3, P1, P2 respectively.
"""
return self.client.call('simGetDistortionParams', str(camera_name), vehicle_name, external)
def simSetDistortionParams(self, camera_name, distortion_params, vehicle_name = '', external = False):
"""
Set camera distortion parameters
Args:
camera_name (str): Name of the camera, for backwards compatibility, ID numbers such as 0,1,etc. can also be used
distortion_params (dict): Dictionary of distortion param names and corresponding values
{"K1": 0.0, "K2": 0.0, "K3": 0.0, "P1": 0.0, "P2": 0.0}
vehicle_name (str, optional): Vehicle which the camera is associated with
external (bool, optional): Whether the camera is an External Camera
"""
for param_name, value in distortion_params.items():
self.simSetDistortionParam(camera_name, param_name, value, vehicle_name, external)
def simSetDistortionParam(self, camera_name, param_name, value, vehicle_name = '', external = False):
"""
Set single camera distortion parameter
Args:
camera_name (str): Name of the camera, for backwards compatibility, ID numbers such as 0,1,etc. can also be used
param_name (str): Name of distortion parameter
value (float): Value of distortion parameter
vehicle_name (str, optional): Vehicle which the camera is associated with
external (bool, optional): Whether the camera is an External Camera
"""
self.client.call('simSetDistortionParam', str(camera_name), param_name, value, vehicle_name, external)
def simSetCameraPose(self, camera_name, pose, vehicle_name = '', external = False):
"""
- Control the pose of a selected camera
Args:
camera_name (str): Name of the camera to be controlled
pose (Pose): Pose representing the desired position and orientation of the camera
vehicle_name (str, optional): Name of vehicle which the camera corresponds to
external (bool, optional): Whether the camera is an External Camera
"""
#TODO : below str() conversion is only needed for legacy reason and should be removed in future
self.client.call('simSetCameraPose', str(camera_name), pose, vehicle_name, external)
def simSetCameraFov(self, camera_name, fov_degrees, vehicle_name = '', external = False):
"""
- Control the field of view of a selected camera
Args:
camera_name (str): Name of the camera to be controlled
fov_degrees (float): Value of field of view in degrees
vehicle_name (str, optional): Name of vehicle which the camera corresponds to
external (bool, optional): Whether the camera is an External Camera
"""
#TODO : below str() conversion is only needed for legacy reason and should be removed in future
self.client.call('simSetCameraFov', str(camera_name), fov_degrees, vehicle_name, external)
def simGetGroundTruthKinematics(self, vehicle_name = ''):
"""
Get Ground truth kinematics of the vehicle
The position inside the returned KinematicsState is in the frame of the vehicle's starting point
Args:
vehicle_name (str, optional): Name of the vehicle
Returns:
KinematicsState: Ground truth of the vehicle
"""
kinematics_state = self.client.call('simGetGroundTruthKinematics', vehicle_name)
return KinematicsState.from_msgpack(kinematics_state)
simGetGroundTruthKinematics.__annotations__ = {'return': KinematicsState}
def simSetKinematics(self, state, ignore_collision, vehicle_name = ''):
"""
Set the kinematics state of the vehicle
If you don't want to change position (or orientation) then just set components of position (or orientation) to floating point nan values
Args:
state (KinematicsState): Desired Pose pf the vehicle
ignore_collision (bool): Whether to ignore any collision or not
vehicle_name (str, optional): Name of the vehicle to move
"""
self.client.call('simSetKinematics', state, ignore_collision, vehicle_name)
def simGetGroundTruthEnvironment(self, vehicle_name = ''):
"""
Get ground truth environment state
The position inside the returned EnvironmentState is in the frame of the vehicle's starting point
Args:
vehicle_name (str, optional): Name of the vehicle
Returns:
EnvironmentState: Ground truth environment state
"""
env_state = self.client.call('simGetGroundTruthEnvironment', vehicle_name)
return EnvironmentState.from_msgpack(env_state)
simGetGroundTruthEnvironment.__annotations__ = {'return': EnvironmentState}
#sensor APIs
def getImuData(self, imu_name = '', vehicle_name = ''):
"""
Args:
imu_name (str, optional): Name of IMU to get data from, specified in settings.json
vehicle_name (str, optional): Name of vehicle to which the sensor corresponds to
Returns:
ImuData:
"""
return ImuData.from_msgpack(self.client.call('getImuData', imu_name, vehicle_name))
def getBarometerData(self, barometer_name = '', vehicle_name = ''):
"""
Args:
barometer_name (str, optional): Name of Barometer to get data from, specified in settings.json
vehicle_name (str, optional): Name of vehicle to which the sensor corresponds to
Returns:
BarometerData:
"""
return BarometerData.from_msgpack(self.client.call('getBarometerData', barometer_name, vehicle_name))
def getMagnetometerData(self, magnetometer_name = '', vehicle_name = ''):
"""
Args:
magnetometer_name (str, optional): Name of Magnetometer to get data from, specified in settings.json
vehicle_name (str, optional): Name of vehicle to which the sensor corresponds to
Returns:
MagnetometerData:
"""
return MagnetometerData.from_msgpack(self.client.call('getMagnetometerData', magnetometer_name, vehicle_name))
def getGpsData(self, gps_name = '', vehicle_name = ''):
"""
Args:
gps_name (str, optional): Name of GPS to get data from, specified in settings.json
vehicle_name (str, optional): Name of vehicle to which the sensor corresponds to
Returns:
GpsData:
"""
return GpsData.from_msgpack(self.client.call('getGpsData', gps_name, vehicle_name))
def getDistanceSensorData(self, distance_sensor_name = '', vehicle_name = ''):
"""
Args:
distance_sensor_name (str, optional): Name of Distance Sensor to get data from, specified in settings.json
vehicle_name (str, optional): Name of vehicle to which the sensor corresponds to
Returns:
DistanceSensorData:
"""
return DistanceSensorData.from_msgpack(self.client.call('getDistanceSensorData', distance_sensor_name, vehicle_name))
def getLidarData(self, lidar_name = '', vehicle_name = ''):
"""
Args:
lidar_name (str, optional): Name of Lidar to get data from, specified in settings.json
vehicle_name (str, optional): Name of vehicle to which the sensor corresponds to
Returns:
LidarData:
"""
return LidarData.from_msgpack(self.client.call('getLidarData', lidar_name, vehicle_name))
def simGetLidarSegmentation(self, lidar_name = '', vehicle_name = ''):
"""
NOTE: Deprecated API, use `getLidarData()` API instead
Returns Segmentation ID of each point's collided object in the last Lidar update
Args:
lidar_name (str, optional): Name of Lidar sensor
vehicle_name (str, optional): Name of the vehicle wth the sensor
Returns:
list[int]: Segmentation IDs of the objects
"""
logging.warning("simGetLidarSegmentation API is deprecated, use getLidarData() API instead")
return self.getLidarData(lidar_name, vehicle_name).segmentation
#Plotting APIs
def simFlushPersistentMarkers(self):
"""
Clear any persistent markers - those plotted with setting `is_persistent=True` in the APIs below
"""
self.client.call('simFlushPersistentMarkers')
def simPlotPoints(self, points, color_rgba=[1.0, 0.0, 0.0, 1.0], size = 10.0, duration = -1.0, is_persistent = False):
"""
Plot a list of 3D points in World NED frame
Args:
points (list[Vector3r]): List of Vector3r objects
color_rgba (list, optional): desired RGBA values from 0.0 to 1.0
size (float, optional): Size of plotted point
duration (float, optional): Duration (seconds) to plot for
is_persistent (bool, optional): If set to True, the desired object will be plotted for infinite time.
"""
self.client.call('simPlotPoints', points, color_rgba, size, duration, is_persistent)
def simPlotLineStrip(self, points, color_rgba=[1.0, 0.0, 0.0, 1.0], thickness = 5.0, duration = -1.0, is_persistent = False):
"""
Plots a line strip in World NED frame, defined from points[0] to points[1], points[1] to points[2], ... , points[n-2] to points[n-1]
Args:
points (list[Vector3r]): List of 3D locations of line start and end points, specified as Vector3r objects
color_rgba (list, optional): desired RGBA values from 0.0 to 1.0
thickness (float, optional): Thickness of line
duration (float, optional): Duration (seconds) to plot for
is_persistent (bool, optional): If set to True, the desired object will be plotted for infinite time.
"""
self.client.call('simPlotLineStrip', points, color_rgba, thickness, duration, is_persistent)
def simPlotLineList(self, points, color_rgba=[1.0, 0.0, 0.0, 1.0], thickness = 5.0, duration = -1.0, is_persistent = False):
"""
Plots a line strip in World NED frame, defined from points[0] to points[1], points[2] to points[3], ... , points[n-2] to points[n-1]
Args:
points (list[Vector3r]): List of 3D locations of line start and end points, specified as Vector3r objects. Must be even
color_rgba (list, optional): desired RGBA values from 0.0 to 1.0
thickness (float, optional): Thickness of line
duration (float, optional): Duration (seconds) to plot for
is_persistent (bool, optional): If set to True, the desired object will be plotted for infinite time.
"""
self.client.call('simPlotLineList', points, color_rgba, thickness, duration, is_persistent)
def simPlotArrows(self, points_start, points_end, color_rgba=[1.0, 0.0, 0.0, 1.0], thickness = 5.0, arrow_size = 2.0, duration = -1.0, is_persistent = False):
"""
Plots a list of arrows in World NED frame, defined from points_start[0] to points_end[0], points_start[1] to points_end[1], ... , points_start[n-1] to points_end[n-1]
Args:
points_start (list[Vector3r]): List of 3D start positions of arrow start positions, specified as Vector3r objects
points_end (list[Vector3r]): List of 3D end positions of arrow start positions, specified as Vector3r objects
color_rgba (list, optional): desired RGBA values from 0.0 to 1.0
thickness (float, optional): Thickness of line
arrow_size (float, optional): Size of arrow head
duration (float, optional): Duration (seconds) to plot for
is_persistent (bool, optional): If set to True, the desired object will be plotted for infinite time.
"""
self.client.call('simPlotArrows', points_start, points_end, color_rgba, thickness, arrow_size, duration, is_persistent)
def simPlotStrings(self, strings, positions, scale = 5, color_rgba=[1.0, 0.0, 0.0, 1.0], duration = -1.0):
"""
Plots a list of strings at desired positions in World NED frame.
Args:
strings (list[String], optional): List of strings to plot
positions (list[Vector3r]): List of positions where the strings should be plotted. Should be in one-to-one correspondence with the strings' list
scale (float, optional): Font scale of transform name
color_rgba (list, optional): desired RGBA values from 0.0 to 1.0
duration (float, optional): Duration (seconds) to plot for
"""
self.client.call('simPlotStrings', strings, positions, scale, color_rgba, duration)
def simPlotTransforms(self, poses, scale = 5.0, thickness = 5.0, duration = -1.0, is_persistent = False):
"""
Plots a list of transforms in World NED frame.
Args:
poses (list[Pose]): List of Pose objects representing the transforms to plot
scale (float, optional): Length of transforms' axes
thickness (float, optional): Thickness of transforms' axes
duration (float, optional): Duration (seconds) to plot for
is_persistent (bool, optional): If set to True, the desired object will be plotted for infinite time.
"""
self.client.call('simPlotTransforms', poses, scale, thickness, duration, is_persistent)
def simPlotTransformsWithNames(self, poses, names, tf_scale = 5.0, tf_thickness = 5.0, text_scale = 10.0, text_color_rgba = [1.0, 0.0, 0.0, 1.0], duration = -1.0):
"""
Plots a list of transforms with their names in World NED frame.
Args:
poses (list[Pose]): List of Pose objects representing the transforms to plot
names (list[string]): List of strings with one-to-one correspondence to list of poses
tf_scale (float, optional): Length of transforms' axes
tf_thickness (float, optional): Thickness of transforms' axes
text_scale (float, optional): Font scale of transform name
text_color_rgba (list, optional): desired RGBA values from 0.0 to 1.0 for the transform name
duration (float, optional): Duration (seconds) to plot for
"""
self.client.call('simPlotTransformsWithNames', poses, names, tf_scale, tf_thickness, text_scale, text_color_rgba, duration)
def cancelLastTask(self, vehicle_name = ''):
"""
Cancel previous Async task
Args:
vehicle_name (str, optional): Name of the vehicle
"""
self.client.call('cancelLastTask', vehicle_name)
#Recording APIs
def startRecording(self):
"""
Start Recording
Recording will be done according to the settings
"""
self.client.call('startRecording')
def stopRecording(self):
"""
Stop Recording
"""
self.client.call('stopRecording')
def isRecording(self):
"""
Whether Recording is running or not
Returns:
bool: True if Recording, else False
"""
return self.client.call('isRecording')
def simSetWind(self, wind):
"""
Set simulated wind, in World frame, NED direction, m/s
Args:
wind (Vector3r): Wind, in World frame, NED direction, in m/s
"""
self.client.call('simSetWind', wind)
def simCreateVoxelGrid(self, position, x, y, z, res, of):
"""
Construct and save a binvox-formatted voxel grid of environment
Args:
position (Vector3r): Position around which voxel grid is centered in m
x, y, z (int): Size of each voxel grid dimension in m
res (float): Resolution of voxel grid in m
of (str): Name of output file to save voxel grid as
Returns:
bool: True if output written to file successfully, else False
"""
return self.client.call('simCreateVoxelGrid', position, x, y, z, res, of)
#Add new vehicle via RPC
def simAddVehicle(self, vehicle_name, vehicle_type, pose, pawn_path = ""):
"""
Create vehicle at runtime
Args:
vehicle_name (str): Name of the vehicle being created
vehicle_type (str): Type of vehicle, e.g. "simpleflight"
pose (Pose): Initial pose of the vehicle
pawn_path (str, optional): Vehicle blueprint path, default empty wbich uses the default blueprint for the vehicle type
Returns:
bool: Whether vehicle was created
"""
return self.client.call('simAddVehicle', vehicle_name, vehicle_type, pose, pawn_path)
def listVehicles(self):
"""
Lists the names of current vehicles
Returns:
list[str]: List containing names of all vehicles
"""
return self.client.call('listVehicles')
def getSettingsString(self):
"""
Fetch the settings text being used by AirSim
Returns:
str: Settings text in JSON format
"""
return self.client.call('getSettingsString')
#----------------------------------- Multirotor APIs ---------------------------------------------
class MultirotorClient(VehicleClient, object):
def __init__(self, ip = "", port = 41451, timeout_value = 3600):
super(MultirotorClient, self).__init__(ip, port, timeout_value)
def takeoffAsync(self, timeout_sec = 20, vehicle_name = ''):
"""
Takeoff vehicle to 3m above ground. Vehicle should not be moving when this API is used
Args:
timeout_sec (int, optional): Timeout for the vehicle to reach desired altitude
vehicle_name (str, optional): Name of the vehicle to send this command to
Returns:
msgpackrpc.future.Future: future. call .join() to wait for method to finish. Example: client.METHOD().join()
"""
return self.client.call_async('takeoff', timeout_sec, vehicle_name)
def landAsync(self, timeout_sec = 60, vehicle_name = ''):
"""
Land the vehicle
Args:
timeout_sec (int, optional): Timeout for the vehicle to land
vehicle_name (str, optional): Name of the vehicle to send this command to
Returns:
msgpackrpc.future.Future: future. call .join() to wait for method to finish. Example: client.METHOD().join()
"""
return self.client.call_async('land', timeout_sec, vehicle_name)
def goHomeAsync(self, timeout_sec = 3e+38, vehicle_name = ''):
"""
Return vehicle to Home i.e. Launch location
Args:
timeout_sec (int, optional): Timeout for the vehicle to reach desired altitude
vehicle_name (str, optional): Name of the vehicle to send this command to
Returns:
msgpackrpc.future.Future: future. call .join() to wait for method to finish. Example: client.METHOD().join()
"""
return self.client.call_async('goHome', timeout_sec, vehicle_name)
#APIs for control
def moveByVelocityBodyFrameAsync(self, vx, vy, vz, duration, drivetrain = DrivetrainType.MaxDegreeOfFreedom, yaw_mode = YawMode(), vehicle_name = ''):
"""
Args:
vx (float): desired velocity in the X axis of the vehicle's local NED frame.
vy (float): desired velocity in the Y axis of the vehicle's local NED frame.
vz (float): desired velocity in the Z axis of the vehicle's local NED frame.
duration (float): Desired amount of time (seconds), to send this command for
drivetrain (DrivetrainType, optional):
yaw_mode (YawMode, optional):
vehicle_name (str, optional): Name of the multirotor to send this command to
Returns:
msgpackrpc.future.Future: future. call .join() to wait for method to finish. Example: client.METHOD().join()
"""
return self.client.call_async('moveByVelocityBodyFrame', vx, vy, vz, duration, drivetrain, yaw_mode, vehicle_name)
def moveByVelocityZBodyFrameAsync(self, vx, vy, z, duration, drivetrain = DrivetrainType.MaxDegreeOfFreedom, yaw_mode = YawMode(), vehicle_name = ''):
"""
Args:
vx (float): desired velocity in the X axis of the vehicle's local NED frame
vy (float): desired velocity in the Y axis of the vehicle's local NED frame
z (float): desired Z value (in local NED frame of the vehicle)
duration (float): Desired amount of time (seconds), to send this command for
drivetrain (DrivetrainType, optional):
yaw_mode (YawMode, optional):
vehicle_name (str, optional): Name of the multirotor to send this command to
Returns:
msgpackrpc.future.Future: future. call .join() to wait for method to finish. Example: client.METHOD().join()
"""
return self.client.call_async('moveByVelocityZBodyFrame', vx, vy, z, duration, drivetrain, yaw_mode, vehicle_name)
def moveByAngleZAsync(self, pitch, roll, z, yaw, duration, vehicle_name = ''):
logging.warning("moveByAngleZAsync API is deprecated, use moveByRollPitchYawZAsync() API instead")
return self.client.call_async('moveByRollPitchYawZ', roll, -pitch, -yaw, z, duration, vehicle_name)
def moveByAngleThrottleAsync(self, pitch, roll, throttle, yaw_rate, duration, vehicle_name = ''):
logging.warning("moveByAngleThrottleAsync API is deprecated, use moveByRollPitchYawrateThrottleAsync() API instead")
return self.client.call_async('moveByRollPitchYawrateThrottle', roll, -pitch, -yaw_rate, throttle, duration, vehicle_name)
def moveByVelocityAsync(self, vx, vy, vz, duration, drivetrain = DrivetrainType.MaxDegreeOfFreedom, yaw_mode = YawMode(), vehicle_name = ''):
"""
Args:
vx (float): desired velocity in world (NED) X axis
vy (float): desired velocity in world (NED) Y axis
vz (float): desired velocity in world (NED) Z axis
duration (float): Desired amount of time (seconds), to send this command for
drivetrain (DrivetrainType, optional):
yaw_mode (YawMode, optional):
vehicle_name (str, optional): Name of the multirotor to send this command to
Returns:
msgpackrpc.future.Future: future. call .join() to wait for method to finish. Example: client.METHOD().join()
"""
return self.client.call_async('moveByVelocity', vx, vy, vz, duration, drivetrain, yaw_mode, vehicle_name)
def moveByVelocityZAsync(self, vx, vy, z, duration, drivetrain = DrivetrainType.MaxDegreeOfFreedom, yaw_mode = YawMode(), vehicle_name = ''):
return self.client.call_async('moveByVelocityZ', vx, vy, z, duration, drivetrain, yaw_mode, vehicle_name)
def moveOnPathAsync(self, path, velocity, timeout_sec = 3e+38, drivetrain = DrivetrainType.MaxDegreeOfFreedom, yaw_mode = YawMode(),
lookahead = -1, adaptive_lookahead = 1, vehicle_name = ''):
return self.client.call_async('moveOnPath', path, velocity, timeout_sec, drivetrain, yaw_mode, lookahead, adaptive_lookahead, vehicle_name)
def moveToPositionAsync(self, x, y, z, velocity, timeout_sec = 3e+38, drivetrain = DrivetrainType.MaxDegreeOfFreedom, yaw_mode = YawMode(),
lookahead = -1, adaptive_lookahead = 1, vehicle_name = ''):
return self.client.call_async('moveToPosition', x, y, z, velocity, timeout_sec, drivetrain, yaw_mode, lookahead, adaptive_lookahead, vehicle_name)
def moveToGPSAsync(self, latitude, longitude, altitude, velocity, timeout_sec = 3e+38, drivetrain = DrivetrainType.MaxDegreeOfFreedom, yaw_mode = YawMode(),
lookahead = -1, adaptive_lookahead = 1, vehicle_name = ''):
return self.client.call_async('moveToGPS', latitude, longitude, altitude, velocity, timeout_sec, drivetrain, yaw_mode, lookahead, adaptive_lookahead, vehicle_name)
def moveToZAsync(self, z, velocity, timeout_sec = 3e+38, yaw_mode = YawMode(), lookahead = -1, adaptive_lookahead = 1, vehicle_name = ''):
return self.client.call_async('moveToZ', z, velocity, timeout_sec, yaw_mode, lookahead, adaptive_lookahead, vehicle_name)
def moveByManualAsync(self, vx_max, vy_max, z_min, duration, drivetrain = DrivetrainType.MaxDegreeOfFreedom, yaw_mode = YawMode(), vehicle_name = ''):
"""
- Read current RC state and use it to control the vehicles.
Parameters sets up the constraints on velocity and minimum altitude while flying. If RC state is detected to violate these constraints
then that RC state would be ignored.
Args:
vx_max (float): max velocity allowed in x direction
vy_max (float): max velocity allowed in y direction
vz_max (float): max velocity allowed in z direction
z_min (float): min z allowed for vehicle position
duration (float): after this duration vehicle would switch back to non-manual mode
drivetrain (DrivetrainType): when ForwardOnly, vehicle rotates itself so that its front is always facing the direction of travel. If MaxDegreeOfFreedom then it doesn't do that (crab-like movement)
yaw_mode (YawMode): Specifies if vehicle should face at given angle (is_rate=False) or should be rotating around its axis at given rate (is_rate=True)
vehicle_name (str, optional): Name of the multirotor to send this command to
Returns:
msgpackrpc.future.Future: future. call .join() to wait for method to finish. Example: client.METHOD().join()
"""
return self.client.call_async('moveByManual', vx_max, vy_max, z_min, duration, drivetrain, yaw_mode, vehicle_name)
def rotateToYawAsync(self, yaw, timeout_sec = 3e+38, margin = 5, vehicle_name = ''):
return self.client.call_async('rotateToYaw', yaw, timeout_sec, margin, vehicle_name)
def rotateByYawRateAsync(self, yaw_rate, duration, vehicle_name = ''):
return self.client.call_async('rotateByYawRate', yaw_rate, duration, vehicle_name)
def hoverAsync(self, vehicle_name = ''):
return self.client.call_async('hover', vehicle_name)
def moveByRC(self, rcdata = RCData(), vehicle_name = ''):
return self.client.call('moveByRC', rcdata, vehicle_name)
#low - level control API
def moveByMotorPWMsAsync(self, front_right_pwm, rear_left_pwm, front_left_pwm, rear_right_pwm, duration, vehicle_name = ''):
"""
- Directly control the motors using PWM values
Args:
front_right_pwm (float): PWM value for the front right motor (between 0.0 to 1.0)
rear_left_pwm (float): PWM value for the rear left motor (between 0.0 to 1.0)
front_left_pwm (float): PWM value for the front left motor (between 0.0 to 1.0)
rear_right_pwm (float): PWM value for the rear right motor (between 0.0 to 1.0)
duration (float): Desired amount of time (seconds), to send this command for
vehicle_name (str, optional): Name of the multirotor to send this command to
Returns:
msgpackrpc.future.Future: future. call .join() to wait for method to finish. Example: client.METHOD().join()
"""
return self.client.call_async('moveByMotorPWMs', front_right_pwm, rear_left_pwm, front_left_pwm, rear_right_pwm, duration, vehicle_name)
def moveByRollPitchYawZAsync(self, roll, pitch, yaw, z, duration, vehicle_name = ''):
"""
- z is given in local NED frame of the vehicle.
- Roll angle, pitch angle, and yaw angle set points are given in **radians**, in the body frame.
- The body frame follows the Front Left Up (FLU) convention, and right-handedness.
- Frame Convention:
- X axis is along the **Front** direction of the quadrotor.
| Clockwise rotation about this axis defines a positive **roll** angle.
| Hence, rolling with a positive angle is equivalent to translating in the **right** direction, w.r.t. our FLU body frame.
- Y axis is along the **Left** direction of the quadrotor.
| Clockwise rotation about this axis defines a positive **pitch** angle.
| Hence, pitching with a positive angle is equivalent to translating in the **front** direction, w.r.t. our FLU body frame.
- Z axis is along the **Up** direction.
| Clockwise rotation about this axis defines a positive **yaw** angle.
| Hence, yawing with a positive angle is equivalent to rotated towards the **left** direction wrt our FLU body frame. Or in an anticlockwise fashion in the body XY / FL plane.
Args:
roll (float): Desired roll angle, in radians.
pitch (float): Desired pitch angle, in radians.
yaw (float): Desired yaw angle, in radians.
z (float): Desired Z value (in local NED frame of the vehicle)
duration (float): Desired amount of time (seconds), to send this command for
vehicle_name (str, optional): Name of the multirotor to send this command to
Returns:
msgpackrpc.future.Future: future. call .join() to wait for method to finish. Example: client.METHOD().join()
"""
return self.client.call_async('moveByRollPitchYawZ', roll, -pitch, -yaw, z, duration, vehicle_name)
def moveByRollPitchYawThrottleAsync(self, roll, pitch, yaw, throttle, duration, vehicle_name = ''):
"""
- Desired throttle is between 0.0 to 1.0
- Roll angle, pitch angle, and yaw angle are given in **degrees** when using PX4 and in **radians** when using SimpleFlight, in the body frame.
- The body frame follows the Front Left Up (FLU) convention, and right-handedness.
- Frame Convention:
- X axis is along the **Front** direction of the quadrotor.
| Clockwise rotation about this axis defines a positive **roll** angle.
| Hence, rolling with a positive angle is equivalent to translating in the **right** direction, w.r.t. our FLU body frame.
- Y axis is along the **Left** direction of the quadrotor.
| Clockwise rotation about this axis defines a positive **pitch** angle.
| Hence, pitching with a positive angle is equivalent to translating in the **front** direction, w.r.t. our FLU body frame.
- Z axis is along the **Up** direction.
| Clockwise rotation about this axis defines a positive **yaw** angle.
| Hence, yawing with a positive angle is equivalent to rotated towards the **left** direction wrt our FLU body frame. Or in an anticlockwise fashion in the body XY / FL plane.
Args:
roll (float): Desired roll angle.
pitch (float): Desired pitch angle.
yaw (float): Desired yaw angle.
throttle (float): Desired throttle (between 0.0 to 1.0)
duration (float): Desired amount of time (seconds), to send this command for
vehicle_name (str, optional): Name of the multirotor to send this command to
Returns:
msgpackrpc.future.Future: future. call .join() to wait for method to finish. Example: client.METHOD().join()
"""
return self.client.call_async('moveByRollPitchYawThrottle', roll, -pitch, -yaw, throttle, duration, vehicle_name)
def moveByRollPitchYawrateThrottleAsync(self, roll, pitch, yaw_rate, throttle, duration, vehicle_name = ''):
"""
- Desired throttle is between 0.0 to 1.0
- Roll angle, pitch angle, and yaw rate set points are given in **radians**, in the body frame.
- The body frame follows the Front Left Up (FLU) convention, and right-handedness.
- Frame Convention:
- X axis is along the **Front** direction of the quadrotor.
| Clockwise rotation about this axis defines a positive **roll** angle.
| Hence, rolling with a positive angle is equivalent to translating in the **right** direction, w.r.t. our FLU body frame.
- Y axis is along the **Left** direction of the quadrotor.
| Clockwise rotation about this axis defines a positive **pitch** angle.
| Hence, pitching with a positive angle is equivalent to translating in the **front** direction, w.r.t. our FLU body frame.
- Z axis is along the **Up** direction.
| Clockwise rotation about this axis defines a positive **yaw** angle.
| Hence, yawing with a positive angle is equivalent to rotated towards the **left** direction wrt our FLU body frame. Or in an anticlockwise fashion in the body XY / FL plane.
Args:
roll (float): Desired roll angle, in radians.
pitch (float): Desired pitch angle, in radians.
yaw_rate (float): Desired yaw rate, in radian per second.
throttle (float): Desired throttle (between 0.0 to 1.0)
duration (float): Desired amount of time (seconds), to send this command for
vehicle_name (str, optional): Name of the multirotor to send this command to
Returns:
msgpackrpc.future.Future: future. call .join() to wait for method to finish. Example: client.METHOD().join()
"""
return self.client.call_async('moveByRollPitchYawrateThrottle', roll, -pitch, -yaw_rate, throttle, duration, vehicle_name)
def moveByRollPitchYawrateZAsync(self, roll, pitch, yaw_rate, z, duration, vehicle_name = ''):
"""
- z is given in local NED frame of the vehicle.
- Roll angle, pitch angle, and yaw rate set points are given in **radians**, in the body frame.
- The body frame follows the Front Left Up (FLU) convention, and right-handedness.
- Frame Convention:
- X axis is along the **Front** direction of the quadrotor.
| Clockwise rotation about this axis defines a positive **roll** angle.
| Hence, rolling with a positive angle is equivalent to translating in the **right** direction, w.r.t. our FLU body frame.
- Y axis is along the **Left** direction of the quadrotor.
| Clockwise rotation about this axis defines a positive **pitch** angle.
| Hence, pitching with a positive angle is equivalent to translating in the **front** direction, w.r.t. our FLU body frame.
- Z axis is along the **Up** direction.
| Clockwise rotation about this axis defines a positive **yaw** angle.
| Hence, yawing with a positive angle is equivalent to rotated towards the **left** direction wrt our FLU body frame. Or in an anticlockwise fashion in the body XY / FL plane.
Args:
roll (float): Desired roll angle, in radians.
pitch (float): Desired pitch angle, in radians.
yaw_rate (float): Desired yaw rate, in radian per second.
z (float): Desired Z value (in local NED frame of the vehicle)
duration (float): Desired amount of time (seconds), to send this command for
vehicle_name (str, optional): Name of the multirotor to send this command to
Returns:
msgpackrpc.future.Future: future. call .join() to wait for method to finish. Example: client.METHOD().join()
"""
return self.client.call_async('moveByRollPitchYawrateZ', roll, -pitch, -yaw_rate, z, duration, vehicle_name)
def moveByAngleRatesZAsync(self, roll_rate, pitch_rate, yaw_rate, z, duration, vehicle_name = ''):
"""
- z is given in local NED frame of the vehicle.
- Roll rate, pitch rate, and yaw rate set points are given in **radians**, in the body frame.
- The body frame follows the Front Left Up (FLU) convention, and right-handedness.
- Frame Convention:
- X axis is along the **Front** direction of the quadrotor.
| Clockwise rotation about this axis defines a positive **roll** angle.
| Hence, rolling with a positive angle is equivalent to translating in the **right** direction, w.r.t. our FLU body frame.
- Y axis is along the **Left** direction of the quadrotor.
| Clockwise rotation about this axis defines a positive **pitch** angle.
| Hence, pitching with a positive angle is equivalent to translating in the **front** direction, w.r.t. our FLU body frame.
- Z axis is along the **Up** direction.
| Clockwise rotation about this axis defines a positive **yaw** angle.
| Hence, yawing with a positive angle is equivalent to rotated towards the **left** direction wrt our FLU body frame. Or in an anticlockwise fashion in the body XY / FL plane.
Args:
roll_rate (float): Desired roll rate, in radians / second
pitch_rate (float): Desired pitch rate, in radians / second
yaw_rate (float): Desired yaw rate, in radians / second
z (float): Desired Z value (in local NED frame of the vehicle)
duration (float): Desired amount of time (seconds), to send this command for
vehicle_name (str, optional): Name of the multirotor to send this command to
Returns:
msgpackrpc.future.Future: future. call .join() to wait for method to finish. Example: client.METHOD().join()
"""
return self.client.call_async('moveByAngleRatesZ', roll_rate, -pitch_rate, -yaw_rate, z, duration, vehicle_name)
def moveByAngleRatesThrottleAsync(self, roll_rate, pitch_rate, yaw_rate, throttle, duration, vehicle_name = ''):
"""
- Desired throttle is between 0.0 to 1.0
- Roll rate, pitch rate, and yaw rate set points are given in **radians**, in the body frame.
- The body frame follows the Front Left Up (FLU) convention, and right-handedness.
- Frame Convention:
- X axis is along the **Front** direction of the quadrotor.
| Clockwise rotation about this axis defines a positive **roll** angle.
| Hence, rolling with a positive angle is equivalent to translating in the **right** direction, w.r.t. our FLU body frame.
- Y axis is along the **Left** direction of the quadrotor.
| Clockwise rotation about this axis defines a positive **pitch** angle.
| Hence, pitching with a positive angle is equivalent to translating in the **front** direction, w.r.t. our FLU body frame.
- Z axis is along the **Up** direction.
| Clockwise rotation about this axis defines a positive **yaw** angle.
| Hence, yawing with a positive angle is equivalent to rotated towards the **left** direction wrt our FLU body frame. Or in an anticlockwise fashion in the body XY / FL plane.
Args:
roll_rate (float): Desired roll rate, in radians / second
pitch_rate (float): Desired pitch rate, in radians / second
yaw_rate (float): Desired yaw rate, in radians / second
throttle (float): Desired throttle (between 0.0 to 1.0)
duration (float): Desired amount of time (seconds), to send this command for
vehicle_name (str, optional): Name of the multirotor to send this command to
Returns:
msgpackrpc.future.Future: future. call .join() to wait for method to finish. Example: client.METHOD().join()
"""
return self.client.call_async('moveByAngleRatesThrottle', roll_rate, -pitch_rate, -yaw_rate, throttle, duration, vehicle_name)
def setAngleRateControllerGains(self, angle_rate_gains=AngleRateControllerGains(), vehicle_name = ''):
"""
- Modifying these gains will have an affect on *ALL* move*() APIs.
This is because any velocity setpoint is converted to an angle level setpoint which is tracked with an angle level controllers.
That angle level setpoint is itself tracked with and angle rate controller.
- This function should only be called if the default angle rate control PID gains need to be modified.
Args:
angle_rate_gains (AngleRateControllerGains):
- Correspond to the roll, pitch, yaw axes, defined in the body frame.
- Pass AngleRateControllerGains() to reset gains to default recommended values.
vehicle_name (str, optional): Name of the multirotor to send this command to
"""
self.client.call('setAngleRateControllerGains', *(angle_rate_gains.to_lists()+(vehicle_name,)))
def setAngleLevelControllerGains(self, angle_level_gains=AngleLevelControllerGains(), vehicle_name = ''):
"""
- Sets angle level controller gains (used by any API setting angle references - for ex: moveByRollPitchYawZAsync(), moveByRollPitchYawThrottleAsync(), etc)
- Modifying these gains will also affect the behaviour of moveByVelocityAsync() API.
This is because the AirSim flight controller will track velocity setpoints by converting them to angle set points.
- This function should only be called if the default angle level control PID gains need to be modified.
- Passing AngleLevelControllerGains() sets gains to default airsim values.
Args:
angle_level_gains (AngleLevelControllerGains):
- Correspond to the roll, pitch, yaw axes, defined in the body frame.
- Pass AngleLevelControllerGains() to reset gains to default recommended values.
vehicle_name (str, optional): Name of the multirotor to send this command to
"""
self.client.call('setAngleLevelControllerGains', *(angle_level_gains.to_lists()+(vehicle_name,)))
def setVelocityControllerGains(self, velocity_gains=VelocityControllerGains(), vehicle_name = ''):
"""
- Sets velocity controller gains for moveByVelocityAsync().
- This function should only be called if the default velocity control PID gains need to be modified.
- Passing VelocityControllerGains() sets gains to default airsim values.
Args:
velocity_gains (VelocityControllerGains):
- Correspond to the world X, Y, Z axes.
- Pass VelocityControllerGains() to reset gains to default recommended values.
- Modifying velocity controller gains will have an affect on the behaviour of moveOnSplineAsync() and moveOnSplineVelConstraintsAsync(), as they both use velocity control to track the trajectory.
vehicle_name (str, optional): Name of the multirotor to send this command to
"""
self.client.call('setVelocityControllerGains', *(velocity_gains.to_lists()+(vehicle_name,)))
def setPositionControllerGains(self, position_gains=PositionControllerGains(), vehicle_name = ''):
"""
Sets position controller gains for moveByPositionAsync.
This function should only be called if the default position control PID gains need to be modified.
Args:
position_gains (PositionControllerGains):
- Correspond to the X, Y, Z axes.
- Pass PositionControllerGains() to reset gains to default recommended values.
vehicle_name (str, optional): Name of the multirotor to send this command to
"""
self.client.call('setPositionControllerGains', *(position_gains.to_lists()+(vehicle_name,)))
#query vehicle state
def getMultirotorState(self, vehicle_name = ''):
"""
The position inside the returned MultirotorState is in the frame of the vehicle's starting point
Args:
vehicle_name (str, optional): Vehicle to get the state of
Returns:
MultirotorState:
"""
return MultirotorState.from_msgpack(self.client.call('getMultirotorState', vehicle_name))
getMultirotorState.__annotations__ = {'return': MultirotorState}
#query rotor states
def getRotorStates(self, vehicle_name = ''):
"""
Used to obtain the current state of all a multirotor's rotors. The state includes the speeds,
thrusts and torques for all rotors.
Args:
vehicle_name (str, optional): Vehicle to get the rotor state of
Returns:
RotorStates: Containing a timestamp and the speed, thrust and torque of all rotors.
"""
return RotorStates.from_msgpack(self.client.call('getRotorStates', vehicle_name))
getRotorStates.__annotations__ = {'return': RotorStates}
#----------------------------------- Car APIs ---------------------------------------------
class CarClient(VehicleClient, object):
def __init__(self, ip = "", port = 41451, timeout_value = 3600):
super(CarClient, self).__init__(ip, port, timeout_value)
def setCarControls(self, controls, vehicle_name = ''):
"""
Control the car using throttle, steering, brake, etc.
Args:
controls (CarControls): Struct containing control values
vehicle_name (str, optional): Name of vehicle to be controlled
"""
self.client.call('setCarControls', controls, vehicle_name)
def getCarState(self, vehicle_name = ''):
"""
The position inside the returned CarState is in the frame of the vehicle's starting point
Args:
vehicle_name (str, optional): Name of vehicle
Returns:
CarState:
"""
state_raw = self.client.call('getCarState', vehicle_name)
return CarState.from_msgpack(state_raw)
def getCarControls(self, vehicle_name=''):
"""
Args:
vehicle_name (str, optional): Name of vehicle
Returns:
CarControls:
"""
controls_raw = self.client.call('getCarControls', vehicle_name)
return CarControls.from_msgpack(controls_raw) | 76,649 | Python | 46.285626 | 211 | 0.64434 |
superboySB/SBDrone_deprecated/src/HITL/toturials/deprecated/flightmare/README.md | # SBDrone (Flightmare)
use sim-to-real RL to achieve a perception-aware velocity controller. This is note for runing codes in x86_64 machines
# Configure the enironment
## Install dependencies
```sh
sudo apt-get update && sudo apt-get install -y --no-install-recommends build-essential cmake libzmqpp-dev libopencv-dev libgoogle-glog-dev protobuf-compiler ros-$ROS_DISTRO-octomap-msgs ros-$ROS_DISTRO-octomap-ros ros-$ROS_DISTRO-joy python3-vcstool python-catkin-tools git python3-pip lsb-core vim gedit locate wget desktop-file-utils python3-empy gcc g++ cmake git gnuplot doxygen graphviz software-properties-common apt-transport-https curl libqglviewer-dev-qt5 libzmqpp-dev libeigen3-dev libglfw3-dev libglm-dev libvulkan1 vulkan-utils gdb libsdl-image1.2-dev libsdl-dev ros-melodic-octomap-mapping libomp-dev libompl-dev ompl-demos && curl -sSL https://packages.microsoft.com/keys/microsoft.asc | sudo apt-key add - && sudo add-apt-repository "deb [arch=amd64] https://packages.microsoft.com/repos/vscode stable main" && sudo apt update && sudo apt install code -y && sudo pip3 install catkin-tools numpy -i https://pypi.tuna.tsinghua.edu.cn/simple
```
## Install Open3D
```sh
tar -C ~/ -zxvf ~/dependencies/Open3D.tgz && cd ~/Open3D/ && util/scripts/install-deps-ubuntu.sh assume-yes && mkdir build && cd build && cmake -DBUILD_SHARED_LIBS=ON .. && make -j16 && sudo make install
```
## Install cv_bridge
```sh
mkdir -p ~/cv_bridge_ws/src && tar -C ~/cv_bridge_ws/src/ -zxvf ~/dependencies/vision_opencv.tgz && apt-cache show ros-melodic-cv-bridge | grep Version && cd ~/cv_bridge_ws/ && catkin config --install && catkin config -DPYTHON_EXECUTABLE=/usr/bin/python3 -DPYTHON_INCLUDE_DIR=/usr/include/python3.6m -DPYTHON_LIBRARY=/usr/lib/x86_64-linux-gnu/libpython3.6m.so && catkin build && source install/setup.bash --extend
```
---------
## Install Python Package:
```sh
sudo pip3 install --upgrade pip && pip3 install tensorflow-gpu==1.14 markupsafe scikit-build -i https://pypi.tuna.tsinghua.edu.cn/simple && cd ~/flightmare_ws/src/flightmare/flightlib && pip3 install -e . -i https://pypi.tuna.tsinghua.edu.cn/simple
```
## Compile our project
**Every time when you change the code in other machines**, you can delete the project and then restart by:
```sh
cd ~ && git clone https://github.com/superboySB/flightmare_ws.git
```
```sh
echo "export FLIGHTMARE_PATH=~/flightmare_ws/src/flightmare" >> ~/.bashrc && source ~/.bashrc
```
Download the Flightmare Unity Binary **RPG_Flightmare.tar.xz** for rendering from the [Releases](https://github.com/uzh-rpg/flightmare/releases) and extract it into the /home/qiyuan/flightmare_ws/src/flightmare/flightrender/
```sh
cd ~/flightmare_ws/ && catkin init && catkin config --extend /opt/ros/melodic && catkin config --merge-devel && catkin config --cmake-args -DCMAKE_BUILD_TYPE=Release -DCMAKE_CXX_FLAGS=-fdiagnostics-color && catkin config -DPYTHON_EXECUTABLE=/usr/bin/python3 -DPYTHON_INCLUDE_DIR=/usr/include/python3.6m -DPYTHON_LIBRARY=/usr/lib/x86_64-linux-gnu/libpython3.6m.so && catkin build
```
## Install Python Package: flightlib + flightrl
flightlib
```sh
sudo pip3 install --upgrade pip && pip3 install tensorflow-gpu==1.14 markupsafe scikit-build -i https://pypi.tuna.tsinghua.edu.cn/simple && cd ~/flightmare_ws/src/flightmare/flightlib && pip3 install -e . -i https://pypi.tuna.tsinghua.edu.cn/simple
```
flightrl (main)
```sh
cd ~/flightmare_ws/src/flightmare/flightrl && pip3 install -e . -i https://pypi.tuna.tsinghua.edu.cn/simple
```
# Basic Usage with ROS
## Launch Flightmare (use gazebo-based dynamics)
In this example, we show how to use the [RotorS](https://github.com/ethz-asl/rotors_simulator) for the quadrotor dynamics modelling, [rpg_quadrotor_control](https://github.com/uzh-rpg/rpg_quadrotor_control) for model-based controller, and Flightmare for image rendering.
```sh
cd ~/flightmare_ws && source ./devel/setup.bash && roslaunch flightros rotors_gazebo.launch
```
We hope this example can serve as a starting point for many other applications. For example, Flightmare can be used with other multirotor models that comes with RotorS such as AscTec Hummingbird, the AscTec Pelican, or the AscTec Firefly. The default controller in [rpg_quadrotor_control](https://github.com/uzh-rpg/rpg_quadrotor_control) is a PID controller. Users have the option to use more advanced controller in this framework, such as [Perception-Aware Model Predictive Control](https://github.com/uzh-rpg/rpg_mpc).
# Basic Usage with Python
## Train neural network controller using PPO
```sh
cd ~/flightmare_ws/examples && python3 run_drone_control.py --train 1
```
## Test a pre-trained neural network controller
```sh
cd ~/flightmare_ws/examples && python3 run_drone_control.py --train 0
```
## With Unity Rendering
To enable unity for visualization, double click the extracted executable file RPG_Flightmare.x84-64
```sh
~/flightmare_ws/src/flightmare/flightrender/RPG_Flightmare.x86_64
```
and then test a pre-trained controller
```sh
cd ~/flightmare_ws/examples && python3 run_drone_control.py --train 0 --render 1
```
| 5,099 | Markdown | 59.714285 | 954 | 0.751324 |
superboySB/SBDrone_deprecated/src/HITL/toturials/deprecated/use_mavros/README.md | # Toturials of mavros and px4
如何在airsim上面用MAVROS给PX4无人机发送话题控制
## 从Source安装mavros
源码编译方式同单无人机教程,需要先在“编译用容器”里编译,然后再启动“运行用容器”如下
```sh
docker run -itd --privileged --env=LOCAL_USER_ID="$(id -u)" --env=PX4_SIM_HOST_ADDR=172.16.13.104 -v /home/wangchao/daizipeng/SBDrone:/src:rw -v /tmp/.X11-unix:/tmp/.X11-unix:ro -e DISPLAY=:0 --network=host --name=mypx4-0 mypx4_image:v1 /bin/bash
```
其中,`–-env=PX4_SIM_HOST_ADDR=172.16.13.104` 容器添加`PX4_SIM_HOST_ADDR`环境变量,指定远端airsim主机地址;`–-name`后面指定此容器名称。
## 逐步开启mavros服务
在windows设备中,先检查AirSim中setting.json,启动AirSim的某一个map,进入等待服务状态。然后,登录容器
```sh
docker exec -it --user $(id -u) mypx4-0 /bin/bash
```
打开一个窗口,运行2个PX4实例,需要观察到Airsim中有QGC(GPS lock)相关的提示才算成功:
```sh
bash /src/Scripts/run_airsim_sitl.sh 0
bash /src/Scripts/run_airsim_sitl.sh 1
```
注意每次使用ros相关命令时需要输入
```sh
source /opt/ros/melodic/setup.bash
```
打开一个窗口,运行mavros服务,其中第一个端口指定本地主机(127.0.0.1)上的接收端口号(udp_onboard_payload_port_remote),第二个端口指定飞行控制器上的发送端口号(udp_onboard_payload_port_local)。这些可以在上一个窗口的运行日志中,在mavlink的onboard udp port对应上。
```sh
roslaunch mavros px4.launch fcu_url:=udp://:[email protected]:14280
roslaunch mavros px4.launch fcu_url:=udp://:[email protected]:14281
```
## 使用mavros话题通信在Airsim里手动控制PX4无人机(有点受限于版本V1.12.1)
参考[教程](https://www.youtube.com/watch?v=ZonkdMcwXH4),打开一个窗口,基于mavros发送服务调用指令给px4,实现对无人机的控制,这里给出依次玩耍这些指令的结果:
```sh
# 发起起飞指令,此时不能起飞
rosservice call /mavros/cmd/takeoff "{min_pitch: 0.0, yaw: 0.0, latitude: 0.0, longitude: 0.0, altitude: 0.0}"
# 解锁无人机,此时可以起飞
rosservice call /mavros/cmd/arming "value: true"
# 无人机起飞
rosservice call /mavros/cmd/arming "value: true"
# 无人机降落
rosservice call /mavros/cmd/land "{min_pitch: 0.0, yaw: 0.0, latitude: 0.0, longitude: 0.0, altitude: 0.0}"
```
也可以基于mavros发送话题给px4,以下是开一个窗口跑position controller:
```sh
# 发送position controller的话题指令
rostopic pub /mavros/setpoint_position/local geometry_msgs/PoseStamped "header:
seq: 0
stamp:
secs: 0
nsecs: 0
frame_id: ''
pose:
position:
x: 1.0
y: 0.0
z: 2.0
orientation:
x: 0.0
y: 0.0
z: 0.0
w: 0.0" -r 20
```
然后再换个窗口设置飞行模式
```sh
# 该服务的目的是让飞行控制器(例如PX4)切换到特定的飞行模式,这里使用的是'OFFBOARD'模式,该模式允许飞行控制器接受来自外部计算机的指令控制飞行。
rosservice call /mavros/set_mode "base mode: 0
custom_mode: 'OFFBOARD'"
# 解锁无人机,执行指令
rosservice call /mavros/cmd/arming "value: true"
# 可以继续发送其它position controller的话题指令
```
以下是velocity controller的画圈demo:
```sh
rostopic pub /mavros/setpoint_velocity/cmd_vel geometry_msgs/TwistStamped "header
seq: 0
stamp:
secs: 0
nsecs: 0
frame_id: ''
twist:
linear:
x: 1.0
y: 0.0
z: 0.0
angular:
x: 0.0
y: 0.0
z: 1.0" -r 20
```
| 2,690 | Markdown | 26.742268 | 247 | 0.693309 |
superboySB/SBDrone_deprecated/src/HITL/toturials/2_rl_single_px4_drone/README.md | # Notes for re-implementing paper "PRL4AirSim"
尝试带着PX4做强化学习,从一个无人机开始
## Requirements
集成必要的环境
```sh
docker build -t mypx4_image:v1 .
docker run -itd --privileged -v /tmp/.X11-unix:/tmp/.X11-unix:ro -e DISPLAY=$DISPLAY --gpus all --user=user --env=PX4_SIM_HOST_ADDR=172.16.13.104 --network=host --name=mypx4-dev mypx4_image:v1 /bin/bash
docker exec -it --user=user mypx4-dev /bin/bash
git clone https://github.com/superboySB/SBDrone && cd cd SBDrone && pip install -r requirements.txt
```
```sh
bash /home/user/PX4-Autopilot/Tools/simulation/sitl_multiple_run.sh 1
```
/home/user/PX4-Autopilot/build/px4_sitl_default/bin/px4 -i 0 -d /home/user/PX4-Autopilot/build/px4_sitl_default/etc >out.log 2>err.log &
## TroubleShooting
### 1. 可以换一台网络好的机器解决docker拉不下来的问题。
```sh
docker save > <image-name>.tar <repository>:<tag>
docker load < <image-name>.tar
```
### 2. 修改AirSim屏幕分辨率
https://blog.csdn.net/qq_33727884/article/details/89487292
### 3. 建飞老师打的命令
```sh
mavlink status
listener manual_control_setpoint -r 10
listener input_rc
``` | 1,038 | Markdown | 24.974999 | 202 | 0.725434 |
superboySB/SBDrone_deprecated/src/HITL/toturials/3_rl_multiple_px4_drones/README.md | # Notes for re-implementing paper "PRL4AirSim"
复现论文PRL4AirSim.
## Requirements
这个原论文自带的binary编译自某个windows editor项目,但开源只提供了linux版本,所以应该整个项目暂时都是将一台linux的机器作为host machine
## Install
```sh
docker build -t mypx4_image:v1 .
docker run -itd --privileged -v /tmp/.X11-unix:/tmp/.X11-unix:ro -e DISPLAY=$DISPLAY --gpus all --user=user --env=PX4_SIM_HOST_ADDR=172.16.13.104 --network=host --name=mypx4-dev mypx4_image:v1 /bin/bash
docker exec -it --user=user mypx4-dev /bin/bash
bash PX4-Autopilot/Tools/simulation/sitl_multiple_run.sh 2
cd PRL4AirSim && pip install -r requirements.txt
```
## TroubleShooting
### 1. 可以换一台网络好的机器解决docker拉不下来的问题。
```sh
docker save > <image-name>.tar <repository>:<tag>
docker load < <image-name>.tar
```
### 2. 如果使用原版AirSim,遇到UE4.27跑不了Blocks实例的问题
| 782 | Markdown | 22.727272 | 202 | 0.742967 |
superboySB/SBDrone_deprecated/src/HITL/sbrl/PyClient.py | import Utils as Utils
import DQNTrainer as DQNTrainer
import datetime
import time
import Simulation as Simulation
import argparse
if __name__ == "__main__":
"""
Model Server port 29000
UE Server port 29001
"""
parser = argparse.ArgumentParser(description="PyClient",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("UE_Port")
parser.add_argument("UE_Address")
parser.add_argument("storage_port")
args = parser.parse_args()
arguments = vars(args)
trainer_ip_address = '127.0.0.1' #os.environ['BUFFER_SERVER_IP']
#trainer_port = int(29000) #int(os.environ['BUFFER_SERVER_PORT'])
storage_port = int(arguments["storage_port"])
ue_ip_address = arguments["UE_Address"] #os.environ['UE_SERVER_IP']
#ue_ip_address = str(arguments["IP_Address"])
ue_port = int(arguments["UE_Port"]) #int(os.environ['UE_SERVER_PORT'])
client, model_server = Utils.connectClient(trainer_ip_address=trainer_ip_address, ue_ip_address=ue_ip_address, trainer_port=storage_port, ue_port=ue_port)
times = []
## Setup Environment
image_shape = (2, 32, 32)
now = datetime.datetime.now()
current_time = now.strftime("%H:%M:%S")
print("start time: ", current_time)
agent = DQNTrainer.DQNTrainer(image_input_dims=Utils.getConfig()['state_space'],
n_actions=Utils.getConfig()['action_space'],
replayMemory_size=Utils.getConfig()['buffer_Size'],
batch_size=Utils.getConfig()['batch_size'],
learningRate=Utils.getConfig()['learning_rate'],
discount_factor=Utils.getConfig()['discount_factor'],
epsilon=1.0,
replace_target_count_episode=Utils.getConfig()['replace_target_count_episode'])
#print("loaded best model")
#agent.load('{}/BestModelSaves/dqn.pth'.format(pathlib.Path().resolve()))
run_name = now.strftime("%Y_%m_%d_%Hh%Mm%Ss")
simulation = Simulation.Sim(image_shape=Utils.getConfig()['state_space'], num_drones=Utils.getConfig()['num_drones'])
train = Utils.getConfig()['from_artifact'] == ''
start = (time.perf_counter() / 3600)
Utils.getModelServer().call("startSimulation")
while simulation.episodes < Utils.getConfig()['max_episodes']:
finished = simulation.tick(agent)
end = datetime.datetime.now()
current_time = end.strftime("%H:%M:%S")
print("End time: ", current_time)
| 2,618 | Python | 38.681818 | 158 | 0.612299 |
superboySB/SBDrone_deprecated/src/HITL/sbrl/Storage.py | import msgpackrpc #install as admin: pip install msgpack-rpc-python
#import distributed.model.DQNTrainer as DQNTrainer
#https://linuxtut.com/en/70b626ca3ac6fbcdf939/
import numpy as np
import torch
import pathlib
import wandb
import DQNTrainer as DQNTrainer
import datetime
import time
import Utils as Utils
from collections import deque
import ReplayMemory as ReplayMemory
class Storage(object):
def __init__(self):
self.run_name = datetime.datetime.now().strftime("%Y_%m_%d_%Hh%Mm%Ss")
self.run = wandb.init(
project="drone",
config=Utils.getConfig(),
name=self.run_name,
)
self.total_episodes = 0
self.start_time = None
self.agent = DQNTrainer.DQNTrainer(image_input_dims=Utils.getConfig()['state_space'],
n_actions=Utils.getConfig()['action_space'],
replayMemory_size=Utils.getConfig()['buffer_Size'],
batch_size=Utils.getConfig()['batch_size'],
learningRate=Utils.getConfig()['learning_rate'],
discount_factor=Utils.getConfig()['discount_factor'],
epsilon=1.0,
replace_target_count_episode=Utils.getConfig()['replace_target_count_episode'])
self.start_time = time.perf_counter()
def pushMemory(self, state, action, next_state, reward, not_done):
self.agent.memory.push(Utils.convertStateDicToNumpyDic(state), action, Utils.convertStateDicToNumpyDic(next_state), reward, not_done)
if (len(self.agent.memory) % 100 == 0):
wandb.log({"metric/Observations" : self.agent.memory.pushCounter},
step=self.total_episodes)
if not len(self.agent.memory) == self.agent.memory.maxSize:
print(len(self.agent.memory))
def getMemoryPushCounter(self):
return self.agent.memory.pushCounter
def startSimulation(self):
self.start_time = (time.perf_counter() / 3600)
wandb.log({"metric/HoursRun" : 0,
"metric/Observations" : self.agent.memory.pushCounter},
step=self.total_episodes)
print("============ START SIMULATION ===========")
def getEpsilon(self):
return self.agent.epsilon
def finishEpisode(self, finalDistance, totalReward):
self.total_episodes += 1
self.agent.decrement_epsilon()
wandb.log({
"metric/Distance From Goal": finalDistance,
"metric/Total Reward" : totalReward,
"metric/Wall-Time /h" : (time.perf_counter()-self.start_time) / 3600.0,
"metric/Epsilon" : self.agent.epsilon
}, step=self.total_episodes)
if self.total_episodes % 1000 == 0 and self.total_episodes != 0:
print("saving model parameters in wandb")
artifact = wandb.Artifact('dqn_3D_{}_EP_{}'.format(self.run_name, self.total_episodes), type='model', description='Episode {}'.format(self.total_episodes))
artifact.add_file('{}/ModelSaves/dqn.pth'.format(pathlib.Path().resolve()))
self.run.log_artifact(artifact)
def setNetworkTrainIteration(self, trainIteration):
wandb.log({
"metric/Train Iteration": trainIteration
}, step=self.total_episodes)
def sampleFromStorage(self):
if len(self.agent.memory) >= self.agent.replayMemory_size or len(self.agent.memory) >= self.agent.batch_size:
sample = self.agent.memory.sample(self.agent.batch_size)
batch = ReplayMemory.Transition(*zip(*sample))
state = [Utils.convertStateDicToListDic(i) for i in batch.state]
action = [int(i) for i in batch.action]
next_state = [Utils.convertStateDicToListDic(i) for i in batch.next_state]
reward = [float(i) for i in batch.reward]
not_done = [int(i) for i in batch.not_done]
return state, \
action, \
next_state, \
reward, \
not_done
else:
return None, None, None, None, None
def confirmConnection(self):
return 'Storage Server Connected!'
def testSampleFromStorage():
storage_server = Storage()
for i in range(50):
storage_server.agent.memory.push({'image': np.zeros(shape=(32, 32)),
'position': np.zeros(shape=(3,))},
1,
{'image': np.zeros(shape=(32, 32)),
'position': np.zeros(shape=(3,))},
0.1,
1)
storage_server.sampleFromStorage()
import argparse
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Storage",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("storage_port")
args = parser.parse_args()
arguments = vars(args)
storage_server = Storage()
server = msgpackrpc.Server(storage_server)
server.listen(msgpackrpc.Address("127.0.0.1", int(arguments["storage_port"])))
print("========== STARTING STORAGE SERVER ============")
server.start()
print("========== FINISH STORAGE SERVER ============")
storage_server.run.finish()
| 5,554 | Python | 41.083333 | 167 | 0.567699 |
superboySB/SBDrone_deprecated/src/HITL/sbrl/DQNTrainer.py | import numpy as np
import torch
#import distributed.distributed_22_06_02.ReplayMemory as ReplayMemory
#import distributed.distributed_22_06_02.model.DQNetwork as DQNetwork
import ReplayMemory as ReplayMemory
import DQNetwork as DQNetwork
class DQNTrainer(object):
def __init__(self, image_input_dims : np.array,
n_actions : int,
replayMemory_size : int,
batch_size : int,
learningRate : float = 0.01,
discount_factor : float = 0.99,
epsilon : float = 1.0,
replace_target_count_episode : int = 100,
save_model_count_episode : int = 250,
checkpoint_episode : int = 250,
checkpoint_file : str = 'model_saves/dqn',
number_dimensions : int = 2):
self.image_input_dims = image_input_dims
self.n_actions = n_actions
self.discount_factor = discount_factor
self.epsilon = epsilon
self.replace_target_count_episode = replace_target_count_episode
self.save_model_count_episode = save_model_count_episode
self.network = DQNetwork.DQNetwork(learningRate, self.n_actions, image_input_dims)
self.target_network = DQNetwork.DQNetwork(learningRate, self.n_actions, image_input_dims)
self.batch_size = batch_size
self.memory = ReplayMemory.ReplayMemory(replayMemory_size)
self.replayMemory_size = replayMemory_size
self.checkpoint_episode = checkpoint_episode
self.checkpoint_file = checkpoint_file
def load(self, state_dict):
self.network.load_state_dict(state_dict=torch.load(state_dict))
self.target_network.load_state_dict(state_dict=torch.load(state_dict))
print("Loaded from state dictionary")
# Epsilon Greedy action selection.
def choose_action(self, observation : dict):
maxValue = None
# Expecting (Batch, Channels, Height, Width)
image = torch.tensor(np.reshape(np.array(observation['image']), (1, *self.image_input_dims)), dtype=torch.float).to(self.network.device)
velocity = torch.tensor(np.array(observation['velocity']).reshape((1, 3)), dtype=torch.float).to(self.network.device)
actions = self.network.forward(image, velocity)
if np.random.random() > self.epsilon:
action = torch.argmax(actions).item()
else:
action = np.random.choice([i for i in range(self.n_actions)])
#action = torch.argmax(actions).item()
maxValue = torch.max(actions).item()
#self.decrement_epsilon()
return action, maxValue
def learn(self, transitions):
self.network.optimizer.zero_grad()
self.memory.pushCounter += 1
if self.memory.pushCounter % self.replace_target_count_episode == 0:
print("Transfer weights to target network at step {}".format(self.memory.pushCounter))
self.target_network.load_state_dict(self.network.state_dict())
batch = ReplayMemory.Transition(*zip(*transitions))
state = (torch.tensor(np.array([i[b'image'].reshape(*self.image_input_dims) for i in batch.state])).to(self.network.device).float(),
torch.tensor(np.array([i[b'velocity'] for i in batch.state])).to(self.network.device).float())
next_state = (torch.tensor(np.array([i[b'image'].reshape(*self.image_input_dims) for i in batch.next_state])).to(self.network.device).float(),
torch.tensor(np.array([i[b'velocity'] for i in batch.next_state])).to(self.network.device).float())
actions = torch.tensor(batch.action).to(self.network.device)
rewards = torch.tensor(batch.reward).to(self.network.device)
not_done = torch.tensor(batch.not_done).to(self.network.device)
indices = np.arange(self.batch_size)
# https://en.wikipedia.org/wiki/Q-learning
# Old quality value
Q_old = self.network.forward(*state)[indices, actions]
Q_target = rewards + self.target_network.forward(*next_state).max(dim=1)[0] * self.discount_factor * not_done
loss = self.network.loss(Q_old.double(), Q_target.double()).to(self.network.device)
loss.backward()
self.network.optimizer.step()
def decrement_epsilon(self):
#if self.memory.pushCounter < self.replayMemory_size and self.memory.pushCounter > self.replayMemory_size * 0.2 * 0.99:
if self.memory.pushCounter > self.replayMemory_size:
self.epsilon = max(0, 1. - ((self.memory.pushCounter - self.replayMemory_size) / self.replayMemory_size)) | 4,616 | Python | 47.599999 | 150 | 0.647964 |
superboySB/SBDrone_deprecated/src/HITL/sbrl/Start.py | import os
import json
import os
import pathlib
import time
import Utils
import subprocess
import atexit
homeDir = str(pathlib.Path.home())
projectName = Utils.getConfig()['projectName']
envProcesses = int(Utils.getConfig()['envProcesses'])
storage_port = int(Utils.getConfig()['storage_port'])
headless = bool(Utils.getConfig()['headless'])
def changeUEIPJson(port):
with open(str(pathlib.Path.home()) + "/Documents/AirSim/settings.json", "r") as jsonFile:
data = json.load(jsonFile)
data["ApiServerPort"] = port
with open(str(pathlib.Path.home()) + "/Documents/AirSim/settings.json", "w") as jsonFile:
json.dump(data, jsonFile, indent=4)
# os.system('gnome-terminal -- python Storage.py {}'.format(storage_port))
# time.sleep(5)
# os.system('gnome-terminal -- python Trainer.py {}'.format(storage_port))
storage_procress = subprocess.Popen(['python3','Storage.py',f"{storage_port}"],shell=False, bufsize=0)
atexit.register(storage_procress.terminate)
time.sleep(5)
trainer_procress = subprocess.Popen(['python3','Trainer.py',f"{storage_port}"],shell=False,bufsize=0)
atexit.register(trainer_procress.terminate)
for i in range(envProcesses):
port = storage_port + i + 1
changeUEIPJson(port)
if headless:
# os.system('gnome-terminal -- ./UEBinary/{projectName}.sh -RenderOffscreen -windowed -NoVSync'.format(projectName=projectName))
ue_procress = subprocess.Popen([f'./UEBinary/{projectName}.sh','-RenderOffscreen','-windowed','-NoVSync'],shell=False,bufsize=0)
atexit.register(ue_procress.terminate)
else:
windowX = 1000 * i
windowY = 1000
# os.system('gnome-terminal -- ./UEBinary/{projectName}.sh -windowed -WinX={WinX} -WinY={WinY} -NoVSync'.format(
# projectName=projectName,
# WinX=windowX,
# WinY=windowY))
ue_procress = subprocess.Popen([f'./UEBinary/{projectName}.sh','--windowed',f'-WinX={windowX}',f'-WinY={windowY}','-NoVSync'],shell=False,bufsize=0)
atexit.register(ue_procress.terminate)
time.sleep(4)
time.sleep(5)
for i in range(envProcesses):
UE_port = storage_port + i + 1
UE_Address = "127.0.0.1"
# os.system('gnome-terminal -- python PyClient.py {UE_port} {UE_Address} {storage_port}'.format(UE_port=UE_port, UE_Address="127.0.0.1", storage_port=storage_port))
agent_procress = subprocess.Popen(['python3','PyClient.py',f'{UE_port}',f'{UE_Address}',f'{storage_port}'],shell=False, bufsize=0)
atexit.register(agent_procress.terminate) | 2,533 | Python | 39.222222 | 168 | 0.686932 |
superboySB/SBDrone_deprecated/src/HITL/sbrl/Utils.py | import airsim
import numpy as np
import cv2 as cv
import msgpackrpc
import json
config = json.load(open("config.json", "r"))
print(config)
client = None
model_server = None
def connectClient(trainer_ip_address, ue_ip_address, trainer_port = 29000, ue_port = 41451):
global client, model_server
try:
client = airsim.MultirotorClient(ip=ue_ip_address, port=ue_port)
client.confirmConnection()
except Exception as e:
print("Cannot Connect to Multirotor Client, please ensure Unreal Engine is running with AirSim plugin")
print("Ip address = {} and port {}".format(ue_ip_address, ue_port))
print(e)
exit(1)
try:
model_server = msgpackrpc.Client(msgpackrpc.Address(trainer_ip_address, trainer_port))
print(model_server.call("confirmConnection"))
except Exception as e:
print("Cannot connect to the model server, please ")
print("Ip address = {} and port {}".format(trainer_ip_address, trainer_port))
print(e)
exit(1)
return client, model_server
def getClient() -> airsim.MultirotorClient:
return client
def getModelServer() -> msgpackrpc.Client:
return model_server
def getConfig():
return config
def convertStateDicToListDic(state):
listState = {}
for key in state:
listState[key] = state[key].tolist()
#print(listState)
return listState
def convertStateDicToNumpyDic(state):
listState = {}
for key in state:
listState[key] = np.array(state[key])
#print(listState)
return listState
# API call in AirSim can sometimes be broken depending on version, easier to call using RPC directly
def fixed_simGetImages(requests, vehicle_name = '', external : bool = False):
responses_raw = getClient().client.call('simGetImages', requests, vehicle_name, external)
return [airsim.ImageResponse.from_msgpack(response_raw) for response_raw in responses_raw]
def handleImage(droneName : str, cameraName : str, imageType : airsim.ImageType) -> np.array:
if (imageType == airsim.ImageType.Scene):
imageRequests = [airsim.ImageRequest(cameraName, imageType, False, False)]
imageResponses = fixed_simGetImages(imageRequests, droneName, False)
image1d = np.fromstring(imageResponses[0].image_data_uint8, dtype=np.uint8)
imageRGB = image1d.reshape((imageResponses[0].height, imageResponses[0].width, 3))
return imageRGB
elif (imageType == airsim.ImageType.DepthPlanar or imageType == airsim.ImageType.DepthVis or imageType == airsim.ImageType.DepthPerspective):
imageResponses = fixed_simGetImages([airsim.ImageRequest(cameraName, airsim.ImageType.DepthPlanar, True, True)], droneName, False)
imageDepth = airsim.list_to_2d_float_array(imageResponses[0].image_data_float, imageResponses[0].width, imageResponses[0].height)
return imageDepth
else:
print("NOT CODED THE HANDLING OF THIS IMAGE TYPE YET")
return np.array([])
def showRGBImage(droneName : str):
image = handleImage(droneName, 'scene_cam', airsim.ImageType.Scene)
cv.imshow("RGB image", image)
cv.waitKey(0)
def showDepthImage(droneName : str):
imageResponses = fixed_simGetImages([airsim.ImageRequest('depth_cam', airsim.ImageType.DepthPlanar, True, True)], droneName, False)
imageDepth = airsim.list_to_2d_float_array(imageResponses[0].image_data_float, imageResponses[0].width,
imageResponses[0].height)
cv.imshow("depth image", imageDepth)
cv.waitKey(0)
def convert_pos_UE_to_AS(origin_UE : np.array, pos_UE : np.array):
pos = np.zeros(3, dtype=np.float)
pos[0] = pos_UE[0] - origin_UE[0]
pos[1] = pos_UE[1] - origin_UE[1]
pos[2] = - pos_UE[2] + origin_UE[2]
return pos / 100
| 3,819 | Python | 39.210526 | 145 | 0.686305 |
superboySB/SBDrone_deprecated/src/HITL/sbrl/DQNetwork.py | import torch.nn as nn
import torch.optim as optim
import torch
import torch.nn.functional as functional
import numpy as np
class DQNetwork(nn.Module):
def __init__(self, learningRate: float, num_actions: int, image_input_dims: tuple):
super(DQNetwork, self).__init__()
self.learningRate = learningRate
self.num_actions = num_actions
self.image_input_dims = image_input_dims
self.maxpooling = nn.MaxPool2d((2, 2), stride=2)
self.image_conv1 = nn.Conv2d(image_input_dims[0], 16, kernel_size=(6, 6), stride=(2, 2))
self.image_conv2 = nn.Conv2d(16, 32, kernel_size=(3, 3), stride=(1, 1))
self.vel_fc1 = nn.Linear(3, 16)
conv_output_dim = self.calculate_conv_output_dims()
self.out_fc1 = nn.Linear(conv_output_dim + 16, 16)
self.out_fc2 = nn.Linear(16, num_actions)
self.optimizer = optim.RMSprop(self.parameters(), lr=learningRate)
self.loss = nn.MSELoss()
self.device = torch.device('cuda:0')
self.to(self.device)
def calculate_conv_output_dims(self):
state = torch.zeros(1, *self.image_input_dims).float()
print("inpute state :", state.size())
x = self.maxpooling(functional.relu(self.image_conv1(state)))
print("layer 1", x.size())
x = self.maxpooling(functional.relu(self.image_conv2(x)))
print("layer 2", x.size())
return int(np.prod(x.size()))
def forward(self, image : torch.tensor, velocity : torch.tensor):
image = self.maxpooling(functional.relu(self.image_conv1(image)))
image = self.maxpooling(functional.relu(self.image_conv2(image)))
image_flattened = image.view(image.size()[0], -1)
velocity = functional.relu(self.vel_fc1(velocity))
concatinated_tensor = torch.cat((image_flattened, velocity), 1)
x = functional.relu(self.out_fc1(concatinated_tensor))
x = self.out_fc2(x)
return x
def test(self):
print("Testing network")
image = torch.zeros(1, *self.image_input_dims).float().to(self.device)
velocity = torch.zeros((1, 3)).float().to(self.device)
print("Input shapes: [image]: {} [velocity]: {}".format(image.size(), velocity.size()))
output = self.forward(image, velocity)
print("Output: {}".format(output))
if __name__ == "__main__":
print("test")
model = DQNetwork(learningRate=0.001, num_actions=2, image_input_dims=(2, 64, 64))
print("total parameters: ", sum(p.numel() for p in model.parameters()))
print("total trainable parameters: ", sum(p.numel() for p in model.parameters() if p.requires_grad))
print("total data points: ", (10 * 32 * 5000) / 30)
model.test()
| 2,727 | Python | 38.536231 | 104 | 0.627429 |
superboySB/SBDrone_deprecated/src/HITL/sbrl/Trainer.py | import msgpackrpc #install as admin: pip install msgpack-rpc-python
#import distributed.model.DQNTrainer as DQNTrainer
#https://linuxtut.com/en/70b626ca3ac6fbcdf939/
import torch
import pathlib
import DQNTrainer as DQNTrainer
import datetime
import time
import Utils as Utils
from collections import deque
import ReplayMemory as ReplayMemory
import os
from os.path import exists
class Trainer(object):
def __init__(self):
self.total_episodes = 0
self.start_time = None
self.agent = DQNTrainer.DQNTrainer(image_input_dims=Utils.getConfig()['state_space'],
n_actions=Utils.getConfig()['action_space'],
replayMemory_size=Utils.getConfig()['buffer_Size'],
batch_size=Utils.getConfig()['batch_size'],
learningRate=Utils.getConfig()['learning_rate'],
discount_factor=Utils.getConfig()['discount_factor'],
epsilon=1.0,
replace_target_count_episode=Utils.getConfig()['replace_target_count_episode'])
def confirmConnection(self):
return 'Model Server Connected!'
def learn(self):
return
def saveModel(self):
return
def testSampleFromStorageTrainer():
import Storage
import numpy as np
storage_server = Storage.Storage()
for i in range(50):
storage_server.agent.memory.push({'image': np.zeros(shape=(2, 32, 32)),
'velocity': np.zeros(shape=(3,))},
1,
{'image': np.zeros(shape=(2, 32, 32)),
'velocity': np.zeros(shape=(3,))},
0.1,
1)
state, action, next_state, reward, not_done = storage_server.sampleFromStorage()
transitions = []
for i in range(len(state)):
transition = ReplayMemory.Transition(Utils.convertStateDicToNumpyDic(state[i]),
action[i],
Utils.convertStateDicToNumpyDic(next_state[i]),
reward[i],
not_done[i])
transitions.append(transition)
trainer = Trainer()
trainer.agent.learn(transitions)
import argparse
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Storage",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("storage_port")
args = parser.parse_args()
arguments = vars(args)
run_tests = False
if run_tests:
testSampleFromStorageTrainer()
print("========== STARTING TRAINING CLIENT ============")
trainer = Trainer()
try:
model_server = msgpackrpc.Client(msgpackrpc.Address("127.0.0.1", int(arguments["storage_port"])))
print(model_server.call("confirmConnection"))
except Exception as e:
print("Cannot connect to the model server, please ")
print("Ip address = {} and port {}".format("127.0.0.1", int(arguments["storage_port"])))
print(e)
exit(1)
trainIteration = 0
previous_time = time.perf_counter()
while True:
state, action, next_state, reward, not_done = model_server.call("sampleFromStorage")
if state == None:
print("Waiting for transitions")
time.sleep(2)
else:
transitions = []
for i in range(len(state)):
transition = ReplayMemory.Transition(Utils.convertStateDicToNumpyDic(state[i]),
action[i],
Utils.convertStateDicToNumpyDic(next_state[i]),
reward[i],
not_done[i])
transitions.append(transition)
trainer.agent.learn(transitions)
trainIteration += 1
if trainIteration % 200 == 0:
model_server.call("setNetworkTrainIteration", trainIteration)
print("Saving model")
#torch.save(trainer.agent.network.state_dict(), '{}/ModelSaves/dqn.pth'.format(pathlib.Path().resolve()))
print("train iteration ", trainIteration, time.perf_counter() - previous_time)
if exists('{}/ModelSaves/dqn.pth'.format(pathlib.Path().resolve())):
os.rename('{}/ModelSaves/dqn.pth'.format(pathlib.Path().resolve()), '{}/ModelSaves/dqn_read.pth'.format(pathlib.Path().resolve()))
torch.save(trainer.agent.network.state_dict(), '{}/ModelSaves/dqn_read.pth'.format(pathlib.Path().resolve()))
os.rename('{}/ModelSaves/dqn_read.pth'.format(pathlib.Path().resolve()), '{}/ModelSaves/dqn.pth'.format(pathlib.Path().resolve()))
else:
torch.save(trainer.agent.network.state_dict(), '{}/ModelSaves/dqn.pth'.format(pathlib.Path().resolve()))
previous_time = time.perf_counter()
| 5,394 | Python | 43.221311 | 150 | 0.531516 |
superboySB/SBDrone_deprecated/src/HITL/sbrl/Start_UEEditor.py | import os
import json
import os
import pathlib
import time
import Utils
UEEditor_port = 29001
storage_port = 29000
def changeUEIPJson(port):
with open(str(pathlib.Path.home()) + "/Documents/AirSim/settings.json", "r") as jsonFile:
data = json.load(jsonFile)
data["ApiServerPort"] = port
with open(str(pathlib.Path.home()) + "/Documents/AirSim/settings.json", "w") as jsonFile:
json.dump(data, jsonFile, indent=4)
changeUEIPJson(UEEditor_port)
os.system('gnome-terminal -- python Storage.py {}'.format(storage_port))
time.sleep(10)
os.system('gnome-terminal -- python PyClient.py {UE_port} {UE_Address} {storage_port}'.format(UE_port=UEEditor_port, UE_Address="127.0.0.1", storage_port=storage_port))
os.system('gnome-terminal -- python Trainer.py {}'.format(storage_port)) | 807 | Python | 30.076922 | 168 | 0.714994 |
superboySB/SBDrone_deprecated/src/HITL/sbrl/Simulation.py | import Utils as Utils
import airsim
import numpy as np
import time
import DroneObj as DroneObj
import random
import argparse
from os.path import exists
import os
import pathlib
beforeTime = None
afterTime = None
class Sim(object):
def __init__(self, image_shape, num_drones):
self.image_shape = image_shape
self.origin_UE = np.array([0.0, 0.0, 910.0])
self.areans_train_long = np.array([
# Using larger environment
#[Utils.convert_pos_UE_to_AS(self.origin_UE, np.array([41156.0, 20459.0, 1000.0])), Utils.convert_pos_UE_to_AS(self.origin_UE, np.array([56206.0, 21019.0, 1000.0]))]
# Using smaller environment
[Utils.convert_pos_UE_to_AS(self.origin_UE, np.array([9030.0, -6760.0, 1000.0])), Utils.convert_pos_UE_to_AS(self.origin_UE, np.array([14060.0, -6760.0, 1000.0]))]
])
self.areans = self.areans_train_long
self.droneObjects = [DroneObj.DroneObject(i) for i in range(num_drones)]
self.episodes = 0
self.model_download_at_episode = 0
self.numImagesSent = 0
#TODO: HyperParameters
self.step_length = 0.25
self.constant_x_vel = 1.0
self.constant_z_pos = Utils.convert_pos_UE_to_AS(origin_UE=self.origin_UE, pos_UE=[8600.0, -4160.0, 1510.0])[2]
self.actionTime = 1.0
self.resetBatch()
def gatherAllObservations(self):
useNewMethod = True
nonResetingDrones = []
for droneObject in self.droneObjects:
if droneObject.reseting == False:
nonResetingDrones.append(droneObject)
if len(nonResetingDrones) == 0:
return
if useNewMethod:
requests = [airsim.ImageRequest('depth_cam_{}'.format(droneObject.droneId), airsim.ImageType.DepthPlanar, True, True) for droneObject in nonResetingDrones]
names = [droneObject.droneName for droneObject in nonResetingDrones]
beforeTime = time.perf_counter()
responses_raw = Utils.getClient().client.call('simGetBatchImages', requests, names)
afterTime = time.perf_counter()
print("Gather images: ", afterTime - beforeTime)
responses = [airsim.ImageResponse.from_msgpack(response_raw) for response_raw in responses_raw]
imageDepths = [airsim.list_to_2d_float_array(responses[i].image_data_float, responses[i].width, responses[i].height) for i in range(len(responses))]
else:
beforeTime = time.perf_counter()
responses_raw = [Utils.getClient().client.call('simGetImages',
[airsim.ImageRequest('depth_cam_{}'.format(droneObject.droneId), airsim.ImageType.DepthPlanar, True, True)],
'Drone{}'.format(droneObject.droneId),
False) for droneObject in nonResetingDrones]
afterTime = time.perf_counter()
print("Gather images (old method): ", afterTime - beforeTime)
responses = [airsim.ImageResponse.from_msgpack(response_raw[0]) for response_raw in responses_raw]
imageDepths = [airsim.list_to_2d_float_array(responses[i].image_data_float, responses[i].width, responses[i].height) for i in range(len(responses))]
for i, droneObject in enumerate(nonResetingDrones):
imageDepth = imageDepths[i]
if (imageDepth.size == 0):
print("Image size is 0")
imageDepth = np.ones(shape=(self.image_shape[1], self.image_shape[2])) * 30
maxDistance = 50
imageDepth[imageDepth > maxDistance] = maxDistance
imageDepth = imageDepth.astype(np.uint8)
if droneObject.currentStep == 0:
droneObject.previous_depth_image = imageDepth
stacked_images = np.array([imageDepth, droneObject.previous_depth_image])
multirotorState = Utils.getClient().getMultirotorState(droneObject.droneName)
velocity = multirotorState.kinematics_estimated.linear_velocity.to_numpy_array()
droneObject.previous_depth_image = imageDepth
droneObject.previousState = droneObject.currentState
droneObject.currentState = {'image': stacked_images, 'velocity': velocity}
droneObject.currentStatePos = multirotorState.kinematics_estimated.position.to_numpy_array()
def doActionBatch(self):
droneNames = []
vx_vec = []
vy_vec = []
z_vec = []
for droneObject in self.droneObjects:
droneNames.append(droneObject.droneName)
quad_vel = Utils.getClient().getMultirotorState(droneObject.droneName).kinematics_estimated.linear_velocity
y_val_offset = 0
if droneObject.currentAction == 0:
y_val_offset = self.step_length
elif droneObject.currentAction == 1:
y_val_offset = -self.step_length
vx_vec.append(self.constant_x_vel if droneObject.reseting == False else 0)
vy_vec.append(quad_vel.y_val + y_val_offset if droneObject.reseting == False else 0)
z_vec.append(self.constant_z_pos)
droneObject.currentStep += 1
Utils.getClient().simPause(False)
Utils.getClient().client.call_async('moveByVelocityZBatch', vx_vec, vy_vec, z_vec, self.actionTime, airsim.DrivetrainType.MaxDegreeOfFreedom, airsim.YawMode(), droneNames).join()
Utils.getClient().simPause(True)
def randomPoseInArena(self):
width = 1600 // 100
min = -(width // 2)
max = (width // 2)
return random.uniform(min, max)
def resetBatch(self):
windows = False
# Size difference: -7710.0, -6070.0
Utils.getClient().simPause(False)
Utils.getClient().reset()
time.sleep(5) if windows else time.sleep(0.25)
randomArenas = np.random.randint(len(self.areans), size=len(self.droneObjects))
for i in range(len(self.droneObjects)):
self.droneObjects[i].currentArena = randomArenas[i]
# airsim.Quaternionr(0.0, 0.0, 1.0, 0.0) = 180 degrees
poses = [airsim.Pose(airsim.Vector3r(self.areans[droneObject.currentArena][0][0],
self.areans[droneObject.currentArena][0][1] + self.randomPoseInArena(),
self.areans[droneObject.currentArena][0][2]),
airsim.Quaternionr(0.0, 0.0, 0.0, 0.0)) for droneObject in self.droneObjects]
Utils.getClient().client.call('simSetVehiclePoseBatch', poses, [droneObject.droneName for droneObject in self.droneObjects])
time.sleep(5) if windows else time.sleep(0.25)
for droneObject in self.droneObjects:
Utils.getClient().armDisarm(True, droneObject.droneName)
Utils.getClient().enableApiControl(True, droneObject.droneName)
Utils.getClient().takeoffAsync(vehicle_name=droneObject.droneName)
if windows: time.sleep(1)
# Move up 3m
time.sleep(5) if windows else time.sleep(0.25)
for droneObject in self.droneObjects:
quad_position = Utils.getClient().getMultirotorState(droneObject.droneName).kinematics_estimated.position
#Utils.getClient().takeoffAsync(vehicle_name=droneObject.droneName).join()
#Utils.getClient().hoverAsync(vehicle_name=droneObject.droneName).join()
Utils.getClient().moveToPositionAsync(quad_position.x_val, quad_position.y_val, self.constant_z_pos, 3.0, vehicle_name=droneObject.droneName)
droneObject.currentStep = 0
currentPos_x_AS = Utils.getClient().getMultirotorState(droneObject.droneName).kinematics_estimated.position.to_numpy_array()[0]
droneObject.distanceFromGoal = abs(currentPos_x_AS - self.areans[droneObject.currentArena][1][0])
droneObject.reseting = False
droneObject.currentTotalReward = 0
if windows: time.sleep(1)
#time.sleep(5)
self.gatherAllObservations()
time.sleep(5) if windows else time.sleep(0.25)
Utils.getClient().simPause(True)
self.episodes += 1
def calculateReward(self, droneObject : DroneObj):
image = droneObject.currentState['image']
currentPos_x_AS = Utils.getClient().getMultirotorState(droneObject.droneName).kinematics_estimated.position.to_numpy_array()[0]
distanceFromGoal = abs(currentPos_x_AS - self.areans[droneObject.currentArena][1][0])
collisionInfo = Utils.getClient().simGetCollisionInfo(droneObject.droneName)
hasCollided = collisionInfo.has_collided or image.min() < 0.55
if droneObject.currentStep < 2:
hasCollided = False
done = 0
reward_States = {
"Collided": 0,
"Won": 0,
"approaching_collision": 0,
"constant_reward" : 0,
"max_actions" : 0,
"goal_distance" : 0,
}
reward_States["goal_distance"] = 3.0
if hasCollided:
done = 1
reward_States["Collided"] = -100
elif distanceFromGoal <= 5:
done = 1
#reward_States["Won"] = 100
elif droneObject.currentStep > 400:
done = 1
reward_States["max_actions"] = -10
reward = sum(reward_States.values())
droneObject.distanceFromGoal = distanceFromGoal
droneObject.currentTotalReward += reward
return reward, done
def resetStep(self, droneObject : DroneObj):
if droneObject.reseting == True:
if droneObject.resetTick == 0 and time.perf_counter() - droneObject.resetingTime > 1:
print("RESETING DRONE ", droneObject.droneId, print("len "), len(self.droneObjects))
randomArena = np.random.randint(len(self.areans), size=(1,))[0]
droneObject.currentArena = randomArena
Utils.getClient().client.call_async("resetVehicle", droneObject.droneName, airsim.Pose(airsim.Vector3r(self.areans[droneObject.currentArena][0][0],
self.areans[droneObject.currentArena][0][1] + self.randomPoseInArena(),
self.areans[droneObject.currentArena][0][2]),
airsim.Quaternionr(0.0, 0.0, 0.0, 0.0)))
droneObject.resetTick = 1
droneObject.resetingTime = time.perf_counter()
if droneObject.resetTick == 1 and time.perf_counter() - droneObject.resetingTime > 1:
Utils.getClient().armDisarm(True, droneObject.droneName)
Utils.getClient().enableApiControl(True, droneObject.droneName)
Utils.getClient().takeoffAsync(vehicle_name=droneObject.droneName)
droneObject.resetingTime = droneObject.resetingTime
droneObject.resetTick = 3
if droneObject.resetTick == 3 and time.perf_counter() - droneObject.resetingTime > 2:
droneObject.reseting = False
droneObject.resetTick = 0
state = Utils.getClient().getMultirotorState(droneObject.droneName)
quad_position = state.kinematics_estimated.position
Utils.getClient().moveToPositionAsync(quad_position.x_val, quad_position.y_val, self.constant_z_pos, 3.0, vehicle_name=droneObject.droneName)
currentPos_x_AS = state.kinematics_estimated.position.to_numpy_array()[0]
droneObject.distanceFromGoal = abs(currentPos_x_AS - self.areans[droneObject.currentArena][1][0])
droneObject.currentStep = 0
droneObject.currentTotalReward = 0
self.episodes += 1
def tick(self, agent):
for droneObject in self.droneObjects:
if droneObject.currentStatePos[0] < 5:
droneObject.reseting = True
self.resetStep(droneObject)
if droneObject.reseting == False:
maxAction, _ = agent.choose_action(droneObject.currentState)
droneObject.currentAction = maxAction
self.doActionBatch()
self.gatherAllObservations()
loadDQNFile = False
for droneObject in self.droneObjects:
if droneObject.reseting == False:
self.numImagesSent += 1
reward, done = self.calculateReward(droneObject)
Utils.getModelServer().call_async("pushMemory",
Utils.convertStateDicToListDic(droneObject.previousState),
int(droneObject.currentAction), #was considered np.int rather than int.
Utils.convertStateDicToListDic(droneObject.currentState),
reward,
1 - int(done))
if done:
Utils.getModelServer().call_async("finishEpisode", droneObject.distanceFromGoal, droneObject.currentTotalReward)
droneObject.reseting = True
droneObject.resetingTime = time.perf_counter()
agent.epsilon = Utils.getModelServer().call("getEpsilon")
agent.memory.pushCounter = Utils.getModelServer().call("getMemoryPushCounter")
loadDQNFile = True
if loadDQNFile and exists('{}/ModelSaves/dqn.pth'.format(pathlib.Path().resolve())):
try:
os.rename('{}/ModelSaves/dqn.pth'.format(pathlib.Path().resolve()), '{}/ModelSaves/dqn_read.pth'.format(pathlib.Path().resolve()))
agent.load('{}/ModelSaves/dqn_read.pth'.format(pathlib.Path().resolve()))
os.rename('{}/ModelSaves/dqn_read.pth'.format(pathlib.Path().resolve()), '{}/ModelSaves/dqn.pth'.format(pathlib.Path().resolve()))
except:
print("issue reading file")
print("NumImagesSent: ", self.numImagesSent)
finished = True
for droneObject in self.droneObjects:
if droneObject.reseting == False:
finished = False
finished = False
return finished
#libUE4Editor-AirSim.so!_ZNSt3__110__function6__funcIZN3rpc6detail10dispatcher4bindIZN3msr6airlib22MultirotorRpcLibServerC1EPNS7_11ApiProviderENS_12basic_stringIcNS_11char_traitsIcEENS_9allocatorIcEEEEtE4$_14EEvRKSG_T_RKNS3_4tags14nonvoid_resultERKNSL_11nonzero_argEEUlRKN14clmdep_msgpack2v26objectEE_NSE_ISX_EEFNS_10unique_ptrINSS_2v113object_handleENS_14default_deleteIS11_EEEESW_EEclESW_() | 15,043 | Python | 46.457413 | 392 | 0.605797 |
superboySB/SBDrone_deprecated/src/HITL/sbrl/DroneObj.py | import time
import numpy as np
class DroneObject(object):
def __init__(self, droneId):
self.droneId = droneId
self.droneName = 'Drone{}'.format(droneId)
self.currentArena = None
self.currentStep = 0
self.droneSpawnOffset = np.array([0, 0 * droneId, 0])
self.previous_depth_image = None
self.currentState = None
self.currentStatePos = None # Used to create the value heat map
self.previousState = None
self.currentAction = None
self.currentTotalReward = 0
self.distanceFromGoal = None
self.reseting = True
self.reseting_API = False
self.reseting_API_2 = False
self.resetTick = 0
self.resetingTime = time.perf_counter()
def getCurrentArena(self):
return -1 if self.currentArena == None else self.currentArena | 864 | Python | 28.827585 | 71 | 0.630787 |
superboySB/SBDrone_deprecated/src/HITL/sbrl/ReplayMemory.py | import random
from collections import namedtuple, deque
#state_image, state_velocity, action, next_state_image, next_state_velocity, reward, not_done
Transition = namedtuple('Transition', ('state', 'action', 'next_state', 'reward', 'not_done'))
class ReplayMemory(object):
def __init__(self, maxSize : int):
self.maxSize = maxSize
self.pushCounter = 0
self.memory = deque([], maxlen=self.maxSize)
def push(self, *args):
"""Save transition"""
self.memory.append(Transition(*args))
self.pushCounter += 1
def sample(self, batch_size):
return random.sample(self.memory, batch_size)
def __len__(self):
return len(self.memory) | 705 | Python | 31.090908 | 94 | 0.648227 |
superboySB/SBDrone_deprecated/extensions/pegasus.simulator/setup.py | """
| File: setup.py
| Author: Marcelo Jacinto ([email protected])
| License: BSD-3-Clause. Copyright (c) 2023, Marcelo Jacinto. All rights reserved.
| Description: File that defines the installation requirements for this python package.
"""
import os
import toml
from setuptools import setup
# Obtain the extension data from the extension.toml file
EXTENSION_PATH = os.path.dirname(os.path.realpath(__file__))
# Read the extension.toml file
EXTENSION_TOML_DATA = toml.load(os.path.join(EXTENSION_PATH, "config", "extension.toml"))
# Minimum dependencies required prior to installation
INSTALL_REQUIRES = [
# generic
"numpy",
"pymavlink",
"scipy",
"pyyaml",
]
# Installation operation
setup(
name="pegasus-simulator",
author="Marcelo Jacinto",
maintainer="Marcelo Jacinto",
maintainer_email="[email protected]",
url=EXTENSION_TOML_DATA["package"]["repository"],
version=EXTENSION_TOML_DATA["package"]["version"],
description=EXTENSION_TOML_DATA["package"]["description"],
keywords=EXTENSION_TOML_DATA["package"]["keywords"],
license="BSD-3-Clause",
include_package_data=True,
python_requires=">=3.7.*",
install_requires=INSTALL_REQUIRES,
packages=["pegasus.simulator"],
classifiers=["Natural Language :: English", "Programming Language :: Python :: 3.7"],
zip_safe=False,
) | 1,391 | Python | 31.372092 | 89 | 0.711718 |
superboySB/SBDrone_deprecated/extensions/pegasus.simulator/pegasus/simulator/extension.py | """
| File: extension.py
| Author: Marcelo Jacinto ([email protected])
| License: BSD-3-Clause. Copyright (c) 2023, Marcelo Jacinto. All rights reserved.
| Description: Implements the Pegasus_SimulatorExtension which omni.ext.IExt that is created when this class is enabled. In turn, this class initializes the extension widget.
"""
__all__ = ["Pegasus_SimulatorExtension"]
# Python garbage collenction and asyncronous API
import gc
import asyncio
from functools import partial
from threading import Timer
# Omniverse general API
import pxr
import carb
import omni.ext
import omni.usd
import omni.kit.ui
import omni.kit.app
import omni.ui as ui
from omni.kit.viewport.utility import get_active_viewport
# Pegasus Extension Files and API
from pegasus.simulator.params import MENU_PATH, WINDOW_TITLE
from pegasus.simulator.logic.interface.pegasus_interface import PegasusInterface
# Setting up the UI for the extension's Widget
from pegasus.simulator.ui.ui_window import WidgetWindow
from pegasus.simulator.ui.ui_delegate import UIDelegate
# Any class derived from `omni.ext.IExt` in top level module (defined in `python.modules` of `extension.toml`) will be
# instantiated when extension gets enabled and `on_startup(ext_id)` will be called. Later when extension gets disabled
# on_shutdown() is called.
class Pegasus_SimulatorExtension(omni.ext.IExt):
# ext_id is current extension id. It can be used with extension manager to query additional information, like where
# this extension is located on filesystem.
def on_startup(self, ext_id):
carb.log_info("Pegasus Simulator is starting up")
# Save the extension id
self._ext_id = ext_id
# Create the UI of the app and its manager
self.ui_delegate = None
self.ui_window = None
# Start the extension backend
self._pegasus_sim = PegasusInterface()
# Check if we already have a stage loaded (when using autoload feature, it might not be ready yet)
# This is a limitation of the simulator, and we are doing this to make sure that the
# extension does no crash when using the GUI with autoload feature
# If autoload was not enabled, and we are enabling the extension from the Extension widget, then
# we will always have a state open, and the auxiliary timer will never run
if omni.usd.get_context().get_stage_state() != omni.usd.StageState.CLOSED:
self._pegasus_sim.initialize_world()
else:
# We need to create a timer to check until the window is properly open and the stage created. This is a limitation
# of the current Isaac Sim simulator and the way it loads extensions :(
self.autoload_helper()
# Add the ability to show the window if the system requires it (QuickLayout feature)
ui.Workspace.set_show_window_fn(WINDOW_TITLE, partial(self.show_window, None))
# Add the extension to the editor menu inside isaac sim
editor_menu = omni.kit.ui.get_editor_menu()
if editor_menu:
self._menu = editor_menu.add_item(MENU_PATH, self.show_window, toggle=True, value=True)
# Show the window (It call the self.show_window)
ui.Workspace.show_window(WINDOW_TITLE, show=True)
def autoload_helper(self):
# Check if we already have a viewport and a camera of interest
if get_active_viewport() != None and type(get_active_viewport().stage) == pxr.Usd.Stage and str(get_active_viewport().stage.GetPrimAtPath("/OmniverseKit_Persp")) != "invalid null prim":
self._pegasus_sim.initialize_world()
else:
Timer(0.1, self.autoload_helper).start()
def show_window(self, menu, show):
"""
Method that controls whether a widget window is created or not
"""
if show == True:
# Create a window and its delegate
self.ui_delegate = UIDelegate()
self.ui_window = WidgetWindow(self.ui_delegate)
self.ui_window.set_visibility_changed_fn(self._visibility_changed_fn)
# If we have a window and we are not supposed to show it, then change its visibility
elif self.ui_window:
self.ui_window.visible = False
def _visibility_changed_fn(self, visible):
"""
This method is invoked when the user pressed the "X" to close the extension window
"""
# Update the Isaac sim menu visibility
self._set_menu(visible)
if not visible:
# Destroy the window, because we create a new one in the show window method
asyncio.ensure_future(self._destroy_window_async())
def _set_menu(self, visible):
"""
Method that updates the isaac sim ui menu to create the Widget window on and off
"""
editor_menu = omni.kit.ui.get_editor_menu()
if editor_menu:
editor_menu.set_value(MENU_PATH, visible)
async def _destroy_window_async(self):
# Wait one frame before it gets destructed (from NVidia example)
await omni.kit.app.get_app().next_update_async()
# Destroy the window UI if it exists
if self.ui_window:
self.ui_window.destroy()
self.ui_window = None
def on_shutdown(self):
"""
Callback called when the extension is shutdown
"""
carb.log_info("Pegasus Isaac extension shutdown")
# Destroy the isaac sim menu object
self._menu = None
# Destroy the window
if self.ui_window:
self.ui_window.destroy()
self.ui_window = None
# Destroy the UI delegate
if self.ui_delegate:
self.ui_delegate = None
# De-register the function taht shows the window from the isaac sim ui
ui.Workspace.set_show_window_fn(WINDOW_TITLE, None)
editor_menu = omni.kit.ui.get_editor_menu()
if editor_menu:
editor_menu.remove_item(MENU_PATH)
# Call the garbage collector
gc.collect()
| 6,081 | Python | 37.493671 | 193 | 0.666996 |
superboySB/SBDrone_deprecated/extensions/pegasus.simulator/pegasus/simulator/__init__.py | """
| Author: Marcelo Jacinto ([email protected])
| License: BSD-3-Clause. Copyright (c) 2023, Marcelo Jacinto. All rights reserved.
"""
__author__ = "Marcelo Jacinto"
__email__ = "[email protected]"
from .extension import Pegasus_SimulatorExtension | 285 | Python | 30.777774 | 82 | 0.740351 |
superboySB/SBDrone_deprecated/extensions/pegasus.simulator/pegasus/simulator/params.py | """
| File: params.py
| Author: Marcelo Jacinto ([email protected])
| License: BSD-3-Clause. Copyright (c) 2023, Marcelo Jacinto. All rights reserved.
| Description: File that defines the base configurations for the Pegasus Simulator.
"""
import os
from pathlib import Path
import omni.isaac.core.utils.nucleus as nucleus
# Extension configuration
EXTENSION_NAME = "Pegasus Simulator"
WINDOW_TITLE = "Pegasus Simulator"
MENU_PATH = "Window/" + WINDOW_TITLE
DOC_LINK = "https://docs.omniverse.nvidia.com"
EXTENSION_OVERVIEW = "This extension shows how to incorporate drones into Isaac Sim"
# Get the current directory of where this extension is located
EXTENSION_FOLDER_PATH = Path(os.path.dirname(os.path.realpath(__file__)))
ROOT = str(EXTENSION_FOLDER_PATH.parent.parent.parent.resolve())
# Get the configurations file path
CONFIG_FILE = ROOT + "/pegasus.simulator/config/configs.yaml"
# Define the Extension Assets Path
ASSET_PATH = ROOT + "/pegasus.simulator/pegasus/simulator/assets"
ROBOTS_ASSETS = ASSET_PATH + "/Robots"
# Define the built in robots of the extension
ROBOTS = {"Iris": ROBOTS_ASSETS + "/Iris/iris.usd"} #, "Flying Cube": ROBOTS_ASSETS + "/iris_cube.usda"}
# Setup the default simulation environments path
NVIDIA_ASSETS_PATH = str(nucleus.get_assets_root_path())
ISAAC_SIM_ENVIRONMENTS = "/Isaac/Environments"
NVIDIA_SIMULATION_ENVIRONMENTS = {
"Default Environment": "Grid/default_environment.usd",
"Black Gridroom": "Grid/gridroom_black.usd",
"Curved Gridroom": "Grid/gridroom_curved.usd",
"Hospital": "Hospital/hospital.usd",
"Office": "Office/office.usd",
"Simple Room": "Simple_Room/simple_room.usd",
"Warehouse": "Simple_Warehouse/warehouse.usd",
"Warehouse with Forklifts": "Simple_Warehouse/warehouse_with_forklifts.usd",
"Warehouse with Shelves": "Simple_Warehouse/warehouse_multiple_shelves.usd",
"Full Warehouse": "Simple_Warehouse/full_warehouse.usd",
"Flat Plane": "Terrains/flat_plane.usd",
"Rough Plane": "Terrains/rough_plane.usd",
"Slope Plane": "Terrains/slope.usd",
"Stairs Plane": "Terrains/stairs.usd",
}
OMNIVERSE_ENVIRONMENTS = {
"Exhibition Hall": "omniverse://localhost/NVIDIA/Assets/Scenes/Templates/Interior/ZetCG_ExhibitionHall.usd"
}
SIMULATION_ENVIRONMENTS = {}
# Add the Isaac Sim assets to the list
for asset in NVIDIA_SIMULATION_ENVIRONMENTS:
SIMULATION_ENVIRONMENTS[asset] = (
NVIDIA_ASSETS_PATH + ISAAC_SIM_ENVIRONMENTS + "/" + NVIDIA_SIMULATION_ENVIRONMENTS[asset]
)
# Add the omniverse assets to the list
for asset in OMNIVERSE_ENVIRONMENTS:
SIMULATION_ENVIRONMENTS[asset] = OMNIVERSE_ENVIRONMENTS[asset]
# Define the default settings for the simulation environment
DEFAULT_WORLD_SETTINGS = {"physics_dt": 1.0 / 250.0, "stage_units_in_meters": 1.0, "rendering_dt": 1.0 / 60.0}
# Define where the thumbnail of the vehicle is located
THUMBNAIL = ROBOTS_ASSETS + "/Iris/iris_thumbnail.png"
# Define where the thumbail of the world is located
WORLD_THUMBNAIL = ASSET_PATH + "/Worlds/Empty_thumbnail.png"
| 3,070 | Python | 38.883116 | 111 | 0.739414 |
superboySB/SBDrone_deprecated/extensions/pegasus.simulator/pegasus/simulator/parser/dynamics_parser.py | # Copyright (c) 2023, Marcelo Jacinto
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
# Sensors that can be used with the vehicles
from pegasus.simulator.parser import Parser
from pegasus.simulator.logic.dynamics import LinearDrag
class DynamicsParser(Parser):
def __init__(self):
# Dictionary of available sensors to instantiate
self.dynamics = {"linear_drag": LinearDrag}
def parse(self, data_type: str, data_dict):
# Get the class of the sensor
dynamics_cls = self.dynamics[data_type]
# Create an instance of that sensor
return dynamics_cls(data_dict)
| 635 | Python | 25.499999 | 56 | 0.699213 |
superboySB/SBDrone_deprecated/extensions/pegasus.simulator/pegasus/simulator/parser/parser.py | # Copyright (c) 2023, Marcelo Jacinto
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
class Parser:
def __init__(self):
pass
def parse(self, data_type: str, data_dict):
pass
| 218 | Python | 15.846153 | 47 | 0.62844 |
superboySB/SBDrone_deprecated/extensions/pegasus.simulator/pegasus/simulator/parser/thrusters_parser.py | # Copyright (c) 2023, Marcelo Jacinto
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
# Sensors that can be used with the vehicles
from pegasus.simulator.parser import Parser
from pegasus.simulator.logic.thrusters import QuadraticThrustCurve
class ThrustersParser(Parser):
def __init__(self):
# Dictionary of available thrust curves to instantiate
self.thrust_curves = {"quadratic_thrust_curve": QuadraticThrustCurve}
def parse(self, data_type: str, data_dict):
# Get the class of the sensor
thrust_curve_cls = self.thrust_curves[data_type]
# Create an instance of that sensor
return thrust_curve_cls(data_dict)
| 692 | Python | 27.874999 | 77 | 0.715318 |
superboySB/SBDrone_deprecated/extensions/pegasus.simulator/pegasus/simulator/parser/vehicle_parser.py | # Copyright (c) 2023, Marcelo Jacinto
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
import carb
# Sensors that can be used with the vehicles
from pegasus.simulator.parser import Parser, SensorParser, ThrustersParser, DynamicsParser, BackendsParser
from pegasus.simulator.logic.vehicles import MultirotorConfig
class VehicleParser(Parser):
def __init__(self):
# Initialize the Parser object
super().__init__()
# Initialize Parsers for the sensors, dynamics and backends for control and communications
self.sensor_parser = SensorParser()
self.thrusters_parser = ThrustersParser()
self.dynamics_parser = DynamicsParser()
self.backends_parser = BackendsParser()
def parse(self, data_type: str, data_dict={}):
# Get the USD model associated with the vehicle
usd_model = data_dict.get("usd_model", "")
# Get the model thumbnail of the vehicle
thumbnail = data_dict.get("thumbnail", "")
# ---------------------------------------
# Generate the sensors for the multirotor
# ---------------------------------------
sensors = []
sensors_config = data_dict.get("sensors", {})
for sensor_name in sensors_config:
sensor = self.sensor_parser.parse(sensor_name, sensors_config[sensor_name])
if sensor is not None:
sensors.append(sensor)
# -----------------------------------------
# Generate the thrusters for the multirotor
# -----------------------------------------
thrusters = None
thrusters_config = data_dict.get("thrusters", {})
# Note: if a dictionary/yaml file contains more than one thrust curve configuration,
# only the last one will be kept
for thrust_curve_name in thrusters_config:
curve = self.thrusters_parser.parse(thrust_curve_name, thrusters_config[thrust_curve_name])
if curve is not None:
thrusters = curve
# ----------------------------------------
# Generate the dynamics for the multirotor
# ----------------------------------------
dynamics = None
dynamics_config = data_dict.get("drag", {})
for dynamics_name in dynamics_config:
carb.log_warn(dynamics_config[dynamics_name])
dynamic = self.dynamics_parser.parse(dynamics_name, dynamics_config[dynamics_name])
if dynamic is not None:
dynamics = dynamic
# ----------------------------------------
# Generate the backends for the multirotor
# ----------------------------------------
backends = []
backends_config = data_dict.get("backends", {})
for backends_name in backends_config:
backend = self.backends_parser.parse(backends_name, backends_config[backends_name])
if backend is not None:
backends.append(backend)
# Create a Multirotor config from the parsed data
multirotor_configuration = MultirotorConfig()
multirotor_configuration.usd_file = usd_model
multirotor_configuration.thrust_curve = thrusters
multirotor_configuration.drag = dynamics
multirotor_configuration.sensors = sensors
multirotor_configuration.backends = backends
return multirotor_configuration
| 3,406 | Python | 37.715909 | 106 | 0.57751 |
superboySB/SBDrone_deprecated/extensions/pegasus.simulator/pegasus/simulator/parser/__init__.py | # Copyright (c) 2023, Marcelo Jacinto
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
from .parser import Parser
from .sensor_parser import SensorParser
from .thrusters_parser import ThrustersParser
from .dynamics_parser import DynamicsParser
from .backends_parser import BackendsParser
from .graphs_parser import GraphParser
| 343 | Python | 30.272725 | 45 | 0.819242 |
superboySB/SBDrone_deprecated/extensions/pegasus.simulator/pegasus/simulator/parser/sensor_parser.py | # Copyright (c) 2023, Marcelo Jacinto
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
# Sensors that can be used with the vehicles
from pegasus.simulator.parser import Parser
from pegasus.simulator.logic.sensors import Barometer, GPS, IMU, Magnetometer, Vision, Camera, Lidar
class SensorParser(Parser):
def __init__(self):
# Dictionary of available sensors to instantiate
self.sensors = {
"barometer": Barometer,
"gps": GPS,
"imu": IMU,
"magnetometer": Magnetometer,
"vision": Vision,
"camera": Camera,
"lidar": Lidar
}
def parse(self, data_type: str, data_dict):
# Get the class of the sensor
sensor_cls = self.sensors[data_type]
# Create an instance of that sensor
return sensor_cls(data_dict)
| 869 | Python | 26.187499 | 100 | 0.617952 |
superboySB/SBDrone_deprecated/extensions/pegasus.simulator/pegasus/simulator/parser/backends_parser.py | # Copyright (c) 2023, Marcelo Jacinto
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
# Sensors that can be used with the vehicles
from pegasus.simulator.parser import Parser
from pegasus.simulator.logic.backends import MavlinkBackendConfig, MavlinkBackend, ROS2Backend
class BackendsParser(Parser):
# TODO - improve the structure of the backends in order to clean this parser
def __init__(self):
# Dictionary of available sensors to instantiate
self.backends = {"mavlink": MavlinkBackendConfig, "ros2": ROS2Backend}
def parse(self, data_type: str, data_dict):
# Get the class of the sensor
backends_cls = self.backends[data_type]
if backends_cls == MavlinkBackendConfig:
return MavlinkBackend(backends_cls(data_dict))
# Create an instance of that sensor
return backends_cls(data_dict)
| 892 | Python | 29.793102 | 94 | 0.709641 |
superboySB/SBDrone_deprecated/extensions/pegasus.simulator/pegasus/simulator/parser/graphs_parser.py | # Copyright (c) 2023, Marcelo Jacinto
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
# Graphs that can be used with the vehicles
from pegasus.simulator.parser import Parser
from pegasus.simulator.logic.graphs import ROS2Camera, ROS2Tf, ROS2Odometry, ROS2Lidar
class GraphParser(Parser):
def __init__(self):
# Dictionary of available graphs to instantiate
self.graphs = {
"ROS2 Camera": ROS2Camera,
"ROS2 Tf": ROS2Tf,
"ROS2 Odometry": ROS2Odometry,
"ROS2 Lidar": ROS2Lidar
}
def parse(self, data_type: str, data_dict):
# Get the class of the graph
graph_cls = self.graphs[data_type]
# Create an instance of that graph
return graph_cls(data_dict) | 781 | Python | 26.92857 | 86 | 0.647887 |
superboySB/SBDrone_deprecated/extensions/pegasus.simulator/pegasus/simulator/ui/ui_window.py | """
| File: ui_window.py
| Author: Marcelo Jacinto ([email protected])
| License: BSD-3-Clause. Copyright (c) 2023, Marcelo Jacinto. All rights reserved.
| Description: Definition of WidgetWindow which contains all the UI code that defines the extension GUI
"""
__all__ = ["WidgetWindow"]
# External packages
import numpy as np
# Omniverse general API
import carb
import omni.ui as ui
from omni.ui import color as cl
from pegasus.simulator.ui.ui_delegate import UIDelegate
from pegasus.simulator.params import ROBOTS, SIMULATION_ENVIRONMENTS, THUMBNAIL, WORLD_THUMBNAIL, WINDOW_TITLE
class WidgetWindow(ui.Window):
# Design constants for the widgets
LABEL_PADDING = 120
BUTTON_HEIGHT = 50
GENERAL_SPACING = 5
WINDOW_WIDTH = 300
WINDOW_HEIGHT = 850
BUTTON_SELECTED_STYLE = {
"Button": {
"background_color": 0xFF5555AA,
"border_color": 0xFF5555AA,
"border_width": 2,
"border_radius": 5,
"padding": 5,
}
}
BUTTON_BASE_STYLE = {
"Button": {
"background_color": cl("#292929"),
"border_color": cl("#292929"),
"border_width": 2,
"border_radius": 5,
"padding": 5,
}
}
def __init__(self, delegate: UIDelegate, **kwargs):
"""
Constructor for the Window UI widget of the extension. Receives as input a UIDelegate that implements
all the callbacks to handle button clicks, drop-down menu actions, etc. (abstracting the interface between
the logic of the code and the ui)
"""
# Setup the base widget window
super().__init__(
WINDOW_TITLE, width=WidgetWindow.WINDOW_WIDTH, height=WidgetWindow.WINDOW_HEIGHT, visible=True, **kwargs
)
self.deferred_dock_in("Property", ui.DockPolicy.CURRENT_WINDOW_IS_ACTIVE)
# Setup the delegate that will bridge between the logic and the UI
self._delegate = delegate
# Bind the UI delegate to this window
self._delegate.set_window_bind(self)
# Auxiliar attributes for getting the transforms of the vehicle and the camera from the UI
self._camera_transform_models = []
self._vehicle_transform_models = []
# Build the actual window UI
self._build_window()
def destroy(self):
# Clear the world and the stage correctly
self._delegate.on_clear_scene()
# It will destroy all the children
super().destroy()
def _build_window(self):
# Define the UI of the widget window
with self.frame:
# Vertical Stack of menus
with ui.VStack():
# Create a frame for selecting which scene to load
self._scene_selection_frame()
ui.Spacer(height=5)
# Create a frame for selecting which vehicle to load in the simulation environment
self._robot_selection_frame()
ui.Spacer(height=5)
# Create a frame for selecting the camera position, and what it should point torwards to
self._viewport_camera_frame()
ui.Spacer()
def _scene_selection_frame(self):
"""
Method that implements a dropdown menu with the list of available simulation environemts for the vehicle
"""
# Frame for selecting the simulation environment to load
with ui.CollapsableFrame("Scene Selection"):
with ui.VStack(height=0, spacing=10, name="frame_v_stack"):
ui.Spacer(height=WidgetWindow.GENERAL_SPACING)
# Iterate over all existing pre-made worlds bundled with this extension
with ui.HStack():
ui.Label("World Assets", width=WidgetWindow.LABEL_PADDING, height=10.0)
# Combo box with the available environments to select from
dropdown_menu = ui.ComboBox(0, height=10, name="environments")
for environment in SIMULATION_ENVIRONMENTS:
dropdown_menu.model.append_child_item(None, ui.SimpleStringModel(environment))
# Allow the delegate to know which option was selected in the dropdown menu
self._delegate.set_scene_dropdown(dropdown_menu.model)
ui.Spacer(height=0)
# UI to configure the default latitude, longitude and altitude coordinates
with ui.CollapsableFrame("Geographic Coordinates", collapsed=False):
with ui.VStack(height=0, spacing=10, name="frame_v_stack"):
with ui.HStack():
# Latitude
ui.Label("Latitude", name="label", width=WidgetWindow.LABEL_PADDING-50)
latitude_field = ui.FloatField(name="latitude", precision=6)
latitude_field.model.set_value(self._delegate._latitude)
self._delegate.set_latitude_field(latitude_field.model)
ui.Circle(name="transform", width=20, height=20, radius=3.5, size_policy=ui.CircleSizePolicy.FIXED)
# Longitude
ui.Label("Longitude", name="label", width=WidgetWindow.LABEL_PADDING-50)
longitude_field = ui.FloatField(name="longitude", precision=6)
longitude_field.model.set_value(self._delegate._longitude)
self._delegate.set_longitude_field(longitude_field.model)
ui.Circle(name="transform", width=20, height=20, radius=3.5, size_policy=ui.CircleSizePolicy.FIXED)
# Altitude
ui.Label("Altitude", name="label", width=WidgetWindow.LABEL_PADDING-50)
altitude_field = ui.FloatField(name="altitude", precision=6)
altitude_field.model.set_value(self._delegate._altitude)
self._delegate.set_altitude_field(altitude_field.model)
ui.Circle(name="transform", width=20, height=20, radius=3.5, size_policy=ui.CircleSizePolicy.FIXED)
with ui.HStack():
ui.Button("Set", enabled=True, clicked_fn=self._delegate.on_set_new_global_coordinates)
ui.Button("Reset", enabled=True, clicked_fn=self._delegate.on_reset_global_coordinates)
ui.Button("Make Default", enabled=True, clicked_fn=self._delegate.on_set_new_default_global_coordinates)
ui.Spacer(height=0)
with ui.HStack():
# Add a thumbnail image to have a preview of the world that is about to be loaded
with ui.ZStack(width=WidgetWindow.LABEL_PADDING, height=WidgetWindow.BUTTON_HEIGHT * 2):
ui.Rectangle()
ui.Image(
WORLD_THUMBNAIL,
fill_policy=ui.FillPolicy.PRESERVE_ASPECT_FIT,
alignment=ui.Alignment.LEFT_CENTER,
)
ui.Spacer(width=WidgetWindow.GENERAL_SPACING)
with ui.VStack():
# Button for loading a desired scene
ui.Button(
"Load Scene",
height=WidgetWindow.BUTTON_HEIGHT,
clicked_fn=self._delegate.on_load_scene,
style=WidgetWindow.BUTTON_BASE_STYLE,
)
# Button to reset the stage
ui.Button(
"Clear Scene",
height=WidgetWindow.BUTTON_HEIGHT,
clicked_fn=self._delegate.on_clear_scene,
style=WidgetWindow.BUTTON_BASE_STYLE,
)
def _robot_selection_frame(self):
"""
Method that implements a frame that allows the user to choose which robot that is about to be spawned
"""
# Auxiliary function to handle the "switch behaviour" of the buttons that are used to choose between a px4 or ROS2 backend
def handle_px4_ros_switch(self, px4_button, ros2_button, button):
# Handle the UI of both buttons switching of and on (To make it prettier)
if button == "px4":
px4_button.enabled = False
ros2_button.enabled = True
px4_button.set_style(WidgetWindow.BUTTON_SELECTED_STYLE)
ros2_button.set_style(WidgetWindow.BUTTON_BASE_STYLE)
else:
px4_button.enabled = True
ros2_button.enabled = False
ros2_button.set_style(WidgetWindow.BUTTON_SELECTED_STYLE)
px4_button.set_style(WidgetWindow.BUTTON_BASE_STYLE)
# Handle the logic of switching between the two operating modes
self._delegate.set_streaming_backend(button)
# --------------------------
# Function UI starts here
# --------------------------
# Frame for selecting the vehicle to load
with ui.CollapsableFrame(title="Vehicle Selection"):
with ui.VStack(height=0, spacing=10, name="frame_v_stack"):
ui.Spacer(height=WidgetWindow.GENERAL_SPACING)
# Iterate over all existing robots in the extension
with ui.HStack():
ui.Label("Vehicle Model", name="label", width=WidgetWindow.LABEL_PADDING)
# Combo box with the available vehicles to select from
dropdown_menu = ui.ComboBox(0, height=10, name="robots")
for robot in ROBOTS:
dropdown_menu.model.append_child_item(None, ui.SimpleStringModel(robot))
self._delegate.set_vehicle_dropdown(dropdown_menu.model)
with ui.HStack():
ui.Label("Vehicle ID", name="label", width=WidgetWindow.LABEL_PADDING)
vehicle_id_field = ui.IntField()
self._delegate.set_vehicle_id_field(vehicle_id_field.model)
# Add a frame transform to select the position of where to place the selected robot in the world
self._transform_frame()
ui.Label("Streaming Backend")
with ui.HStack():
# Add a thumbnail image to have a preview of the world that is about to be loaded
with ui.ZStack(width=WidgetWindow.LABEL_PADDING, height=WidgetWindow.BUTTON_HEIGHT * 2):
ui.Rectangle()
ui.Image(
THUMBNAIL, fill_policy=ui.FillPolicy.PRESERVE_ASPECT_FIT, alignment=ui.Alignment.LEFT_CENTER
)
ui.Spacer(width=WidgetWindow.GENERAL_SPACING)
with ui.VStack():
# Buttons that behave like switches to choose which network interface to use to simulate the control of the vehicle
px4_button = ui.Button(
"PX4",
height=WidgetWindow.BUTTON_HEIGHT * 2,
style=WidgetWindow.BUTTON_SELECTED_STYLE,
enabled=False,
)
ros2_button = ui.Button(
"ROS 2",
height=WidgetWindow.BUTTON_HEIGHT,
style=WidgetWindow.BUTTON_BASE_STYLE,
enabled=True,
visible=False
)
# Set the auxiliary function to handle the switch between both backends
px4_button.set_clicked_fn(lambda: handle_px4_ros_switch(self, px4_button, ros2_button, "px4"))
ros2_button.set_clicked_fn(lambda: handle_px4_ros_switch(self, px4_button, ros2_button, "ros"))
# UI to configure the PX4 settings
with ui.CollapsableFrame("PX4 Configurations", collapsed=False):
with ui.VStack(height=0, spacing=10, name="frame_v_stack"):
with ui.HStack():
ui.Label("Auto-launch PX4", name="label", width=WidgetWindow.LABEL_PADDING - 20)
px4_checkbox = ui.CheckBox()
px4_checkbox.model.set_value(self._delegate._autostart_px4)
self._delegate.set_px4_autostart_checkbox(px4_checkbox.model)
with ui.HStack():
ui.Label("PX4 Path", name="label", width=WidgetWindow.LABEL_PADDING - 20)
px4_path_field = ui.StringField(name="px4_path", width=300)
px4_path_field.model.set_value(self._delegate._px4_dir)
self._delegate.set_px4_directory_field(px4_path_field.model)
ui.Button("Reset", enabled=True, clicked_fn=self._delegate.on_reset_px4_path)
ui.Button("Make Default", enabled=True, clicked_fn=self._delegate.on_set_new_default_px4_path)
with ui.HStack():
ui.Label("PX4 airframe", name="label", width=WidgetWindow.LABEL_PADDING - 20)
px4_airframe_field = ui.StringField(name="px4_model")
px4_airframe_field.model.set_value(self._delegate._px4_airframe)
self._delegate.set_px4_airframe_field(px4_airframe_field.model)
# Button to load the drone
ui.Button(
"Load Vehicle",
height=WidgetWindow.BUTTON_HEIGHT,
clicked_fn=self._delegate.on_load_vehicle,
style=WidgetWindow.BUTTON_BASE_STYLE,
)
def _viewport_camera_frame(self):
"""
Method that implements a frame that allows the user to choose what is the viewport camera pose easily
"""
all_axis = ["X", "Y", "Z"]
colors = {"X": 0xFF5555AA, "Y": 0xFF76A371, "Z": 0xFFA07D4F}
default_values = [5.0, 5.0, 5.0]
target_default_values = [0.0, 0.0, 0.0]
# Frame for setting the camera to visualize the vehicle in the simulator viewport
with ui.CollapsableFrame("Viewport Camera"):
with ui.VStack(spacing=8):
ui.Spacer(height=0)
# Iterate over the position and rotation menus
with ui.HStack():
with ui.HStack():
ui.Label("Position", name="transform", width=50, height=20)
ui.Spacer()
# Fields X, Y and Z
for axis, default_value in zip(all_axis, default_values):
with ui.HStack():
with ui.ZStack(width=15):
ui.Rectangle(
width=15,
height=20,
style={
"background_color": colors[axis],
"border_radius": 3,
"corner_flag": ui.CornerFlag.LEFT,
},
)
ui.Label(axis, height=20, name="transform_label", alignment=ui.Alignment.CENTER)
float_drag = ui.FloatDrag(name="transform", min=-1000000, max=1000000, step=0.01)
float_drag.model.set_value(default_value)
# Save the model of each FloatDrag such that we can access its values later on
self._camera_transform_models.append(float_drag.model)
ui.Circle(
name="transform", width=20, height=20, radius=3.5, size_policy=ui.CircleSizePolicy.FIXED
)
# Iterate over the position and rotation menus
with ui.HStack():
with ui.HStack():
ui.Label("Target", name="transform", width=50, height=20)
ui.Spacer()
# Fields X, Y and Z
for axis, default_value in zip(all_axis, target_default_values):
with ui.HStack():
with ui.ZStack(width=15):
ui.Rectangle(
width=15,
height=20,
style={
"background_color": colors[axis],
"border_radius": 3,
"corner_flag": ui.CornerFlag.LEFT,
},
)
ui.Label(axis, height=20, name="transform_label", alignment=ui.Alignment.CENTER)
float_drag = ui.FloatDrag(name="transform", min=-1000000, max=1000000, step=0.01)
float_drag.model.set_value(default_value)
# Save the model of each FloatDrag such that we can access its values later on
self._camera_transform_models.append(float_drag.model)
ui.Circle(
name="transform", width=20, height=20, radius=3.5, size_policy=ui.CircleSizePolicy.FIXED
)
# Button to set the camera view
ui.Button(
"Set Camera Pose",
height=WidgetWindow.BUTTON_HEIGHT,
clicked_fn=self._delegate.on_set_viewport_camera,
style=WidgetWindow.BUTTON_BASE_STYLE,
)
ui.Spacer()
def _transform_frame(self):
"""
Method that implements a transform frame to translate and rotate an object that is about to be spawned
"""
components = ["Position", "Rotation"]
all_axis = ["X", "Y", "Z"]
colors = {"X": 0xFF5555AA, "Y": 0xFF76A371, "Z": 0xFFA07D4F}
default_values = [0.0, 0.0, 0.1]
with ui.CollapsableFrame("Position and Orientation"):
with ui.VStack(spacing=8):
ui.Spacer(height=0)
# Iterate over the position and rotation menus
for component in components:
with ui.HStack():
with ui.HStack():
ui.Label(component, name="transform", width=50)
ui.Spacer()
# Fields X, Y and Z
for axis, default_value in zip(all_axis, default_values):
with ui.HStack():
with ui.ZStack(width=15):
ui.Rectangle(
width=15,
height=20,
style={
"background_color": colors[axis],
"border_radius": 3,
"corner_flag": ui.CornerFlag.LEFT,
},
)
ui.Label(axis, name="transform_label", alignment=ui.Alignment.CENTER)
if component == "Position":
float_drag = ui.FloatDrag(name="transform", min=-1000000, max=1000000, step=0.01)
float_drag.model.set_value(default_value)
else:
float_drag = ui.FloatDrag(name="transform", min=-180.0, max=180.0, step=0.01)
# Save the model of each FloatDrag such that we can access its values later on
self._vehicle_transform_models.append(float_drag.model)
ui.Circle(name="transform", width=20, radius=3.5, size_policy=ui.CircleSizePolicy.FIXED)
ui.Spacer(height=0)
# ------------------------------------------------------------------------------------------------
# TODO - optimize the reading of values from the transform widget. This could be one function only
# ------------------------------------------------------------------------------------------------
def get_selected_vehicle_attitude(self):
# Extract the vehicle desired position and orientation for spawning
if len(self._vehicle_transform_models) == 6:
vehicle_pos = np.array([self._vehicle_transform_models[i].get_value_as_float() for i in range(3)])
vehicel_orientation = np.array(
[self._vehicle_transform_models[i].get_value_as_float() for i in range(3, 6)]
)
return vehicle_pos, vehicel_orientation
return None, None
def get_selected_camera_pos(self):
"""
Method that returns the currently selected camera position in the camera transform widget
"""
# Extract the camera desired position and the target it is pointing to
if len(self._camera_transform_models) == 6:
camera_pos = np.array([self._camera_transform_models[i].get_value_as_float() for i in range(3)])
camera_target = np.array([self._camera_transform_models[i].get_value_as_float() for i in range(3, 6)])
return camera_pos, camera_target
return None, None
| 22,183 | Python | 47.756044 | 139 | 0.511428 |
superboySB/SBDrone_deprecated/extensions/pegasus.simulator/pegasus/simulator/ui/ui_delegate.py | """
| File: ui_delegate.py
| Author: Marcelo Jacinto ([email protected])
| License: BSD-3-Clause. Copyright (c) 2023, Marcelo Jacinto. All rights reserved.
| Description: Definition of the UiDelegate which is an abstraction layer betweeen the extension UI and code logic features
"""
# External packages
import os
import asyncio
from scipy.spatial.transform import Rotation
# Omniverse extensions
import carb
import omni.ui as ui
# Extension Configurations
from pegasus.simulator.params import ROBOTS, SIMULATION_ENVIRONMENTS
from pegasus.simulator.logic.interface.pegasus_interface import PegasusInterface
# Vehicle Manager to spawn Vehicles
from pegasus.simulator.logic.backends import MavlinkBackend, MavlinkBackendConfig #, ROS2Backend
from pegasus.simulator.logic.vehicles.multirotor import Multirotor, MultirotorConfig
from pegasus.simulator.logic.vehicle_manager import VehicleManager
class UIDelegate:
"""
Object that will interface between the logic/dynamic simulation part of the extension and the Widget UI
"""
def __init__(self):
# The window that will be bound to this delegate
self._window = None
# Get an instance of the pegasus simulator
self._pegasus_sim: PegasusInterface = PegasusInterface()
# Attribute that holds the currently selected scene from the dropdown menu
self._scene_dropdown: ui.AbstractItemModel = None
self._scene_names = list(SIMULATION_ENVIRONMENTS.keys())
# Selected latitude, longitude and altitude
self._latitude_field: ui.AbstractValueModel = None
self._latitude = PegasusInterface().latitude
self._longitude_field: ui.AbstractValueModel = None
self._longitude = PegasusInterface().longitude
self._altitude_field: ui.AbstractValueModel = None
self._altitude = PegasusInterface().altitude
# Attribute that hold the currently selected vehicle from the dropdown menu
self._vehicle_dropdown: ui.AbstractItemModel = None
self._vehicles_names = list(ROBOTS.keys())
# Get an instance of the vehicle manager
self._vehicle_manager = VehicleManager()
# Selected option for broadcasting the simulated vehicle (PX4+ROS2 or just ROS2)
# By default we assume PX4
self._streaming_backend: str = "px4"
# Selected value for the the id of the vehicle
self._vehicle_id_field: ui.AbstractValueModel = None
self._vehicle_id: int = 0
# Attribute that will save the model for the px4-autostart checkbox
self._px4_autostart_checkbox: ui.AbstractValueModel = None
self._autostart_px4: bool = True
# Atributes to store the path for the Px4 directory
self._px4_directory_field: ui.AbstractValueModel = None
self._px4_dir: str = PegasusInterface().px4_path
# Atributes to store the PX4 airframe
self._px4_airframe_field: ui.AbstractValueModel = None
self._px4_airframe: str = 'iris'
def set_window_bind(self, window):
self._window = window
def set_scene_dropdown(self, scene_dropdown_model: ui.AbstractItemModel):
self._scene_dropdown = scene_dropdown_model
def set_latitude_field(self, latitude_model: ui.AbstractValueModel):
self._latitude_field = latitude_model
def set_longitude_field(self, longitude_model: ui.AbstractValueModel):
self._longitude_field = longitude_model
def set_altitude_field(self, altitude_model: ui.AbstractValueModel):
self._altitude_field = altitude_model
def set_vehicle_dropdown(self, vehicle_dropdown_model: ui.AbstractItemModel):
self._vehicle_dropdown = vehicle_dropdown_model
def set_vehicle_id_field(self, vehicle_id_field: ui.AbstractValueModel):
self._vehicle_id_field = vehicle_id_field
def set_streaming_backend(self, backend: str = "px4"):
carb.log_info("Chosen option: " + backend)
self._streaming_backend = backend
def set_px4_autostart_checkbox(self, checkbox_model:ui.AbstractValueModel):
self._px4_autostart_checkbox = checkbox_model
def set_px4_directory_field(self, directory_field_model: ui.AbstractValueModel):
self._px4_directory_field = directory_field_model
def set_px4_airframe_field(self, airframe_field_model: ui.AbstractValueModel):
self._px4_airframe_field = airframe_field_model
"""
---------------------------------------------------------------------
Callbacks to handle user interaction with the extension widget window
---------------------------------------------------------------------
"""
def on_load_scene(self):
"""
Method that should be invoked when the button to load the selected world is pressed
"""
# Check if a scene is selected in the drop-down menu
if self._scene_dropdown is not None:
# Get the id of the selected environment from the list
environemnt_index = self._scene_dropdown.get_item_value_model().as_int
# Get the name of the selected world
selected_world = self._scene_names[environemnt_index]
# Try to spawn the selected world
asyncio.ensure_future(self._pegasus_sim.load_environment_async(SIMULATION_ENVIRONMENTS[selected_world], force_clear=True))
def on_set_new_global_coordinates(self):
"""
Method that gets invoked to set new global coordinates for this simulation
"""
self._pegasus_sim.set_global_coordinates(
self._latitude_field.get_value_as_float(),
self._longitude_field.get_value_as_float(),
self._altitude_field.get_value_as_float())
def on_reset_global_coordinates(self):
"""
Method that gets invoked to set the global coordinates to the defaults saved in the extension configuration file
"""
self._pegasus_sim.set_default_global_coordinates()
self._latitude_field.set_value(self._pegasus_sim.latitude)
self._longitude_field.set_value(self._pegasus_sim.longitude)
self._altitude_field.set_value(self._pegasus_sim.altitude)
def on_set_new_default_global_coordinates(self):
"""
Method that gets invoked to set new defualt global coordinates for this simulation. This will attempt
to save the current coordinates as new defaults for the extension itself
"""
self._pegasus_sim.set_new_default_global_coordinates(
self._latitude_field.get_value_as_float(),
self._longitude_field.get_value_as_float(),
self._altitude_field.get_value_as_float()
)
def on_clear_scene(self):
"""
Method that should be invoked when the clear world button is pressed
"""
self._pegasus_sim.clear_scene()
def on_load_vehicle(self):
"""
Method that should be invoked when the button to load the selected vehicle is pressed
"""
async def async_load_vehicle():
# Check if we already have a physics environment activated. If not, then activate it
# and only after spawn the vehicle. This is to avoid trying to spawn a vehicle without a physics
# environment setup. This way we can even spawn a vehicle in an empty world and it won't care
if hasattr(self._pegasus_sim.world, "_physics_context") == False:
await self._pegasus_sim.world.initialize_simulation_context_async()
# Check if a vehicle is selected in the drop-down menu
if self._vehicle_dropdown is not None and self._window is not None:
# Get the id of the selected vehicle from the list
vehicle_index = self._vehicle_dropdown.get_item_value_model().as_int
# Get the name of the selected vehicle
selected_robot = self._vehicles_names[vehicle_index]
# Get the id of the selected vehicle
self._vehicle_id = self._vehicle_id_field.get_value_as_int()
# Get the desired position and orientation of the vehicle from the UI transform
pos, euler_angles = self._window.get_selected_vehicle_attitude()
# Read if we should auto-start px4 from the checkbox
px4_autostart = self._px4_autostart_checkbox.get_value_as_bool()
# Read the PX4 path from the field
px4_path = os.path.expanduser(self._px4_directory_field.get_value_as_string())
# Read the PX4 airframe from the field
px4_airframe = self._px4_airframe_field.get_value_as_string()
# Create the multirotor configuration
mavlink_config = MavlinkBackendConfig({
"vehicle_id": self._vehicle_id,
"px4_autolaunch": px4_autostart,
"px4_dir": px4_path,
"px4_vehicle_model": px4_airframe
})
config_multirotor = MultirotorConfig()
config_multirotor.backends = [MavlinkBackend(mavlink_config)]
#ros2 = ROS2Backend(self._vehicle_id)
# Try to spawn the selected robot in the world to the specified namespace
Multirotor(
"/World/quadrotor",
ROBOTS[selected_robot],
self._vehicle_id,
pos,
Rotation.from_euler("XYZ", euler_angles, degrees=True).as_quat(),
config=config_multirotor,
)
# Log that a vehicle of the type multirotor was spawned in the world via the extension UI
carb.log_info("Spawned the robot: " + selected_robot + " using the Pegasus Simulator UI")
else:
# Log that it was not possible to spawn the vehicle in the world using the Pegasus Simulator UI
carb.log_error("Could not spawn the robot using the Pegasus Simulator UI")
# Run the actual vehicle spawn async so that the UI does not freeze
asyncio.ensure_future(async_load_vehicle())
def on_set_viewport_camera(self):
"""
Method that should be invoked when the button to set the viewport camera pose is pressed
"""
carb.log_warn("The viewport camera pose has been adjusted")
if self._window:
# Get the current camera position value
camera_position, camera_target = self._window.get_selected_camera_pos()
if camera_position is not None and camera_target is not None:
# Set the camera view to a fixed value
self._pegasus_sim.set_viewport_camera(eye=camera_position, target=camera_target)
def on_set_new_default_px4_path(self):
"""
Method that will try to update the new PX4 autopilot path with whatever is passed on the string field
"""
carb.log_warn("A new default PX4 Path will be set for the extension.")
# Read the current path from the field
path = self._px4_directory_field.get_value_as_string()
# Set the path using the pegasus interface
self._pegasus_sim.set_px4_path(path)
def on_reset_px4_path(self):
"""
Method that will reset the string field to the default PX4 path
"""
carb.log_warn("Reseting the path to the default one")
self._px4_directory_field.set_value(self._pegasus_sim.px4_path)
| 11,564 | Python | 41.208029 | 134 | 0.642771 |
superboySB/SBDrone_deprecated/extensions/pegasus.simulator/pegasus/simulator/ui/__init__.py | # Copyright (c) 2023, Marcelo Jacinto
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
from .ui_delegate import UIDelegate
from .ui_window import WidgetWindow
| 175 | Python | 24.142854 | 39 | 0.777143 |
superboySB/SBDrone_deprecated/extensions/pegasus.simulator/pegasus/simulator/logic/vehicle_manager.py | """
| File: vehicle_manager.py
| Author: Marcelo Jacinto ([email protected])
| License: BSD-3-Clause. Copyright (c) 2023, Marcelo Jacinto. All rights reserved.
| Description: Definition of the VehicleManager class - a singleton used to manage the vehiles that are spawned in the simulation world
"""
__all__ = ["VehicleManager"]
import carb
from threading import Lock
class VehicleManager:
"""The VehicleManager class is implemented following a singleton pattern. This means that once a vehicle is spawned
on the world or an instance of the VehicleManager is created, no either will be running at the same time.
This class keeps track of all the vehicles that are spawned in the simulation world, either trough the extension UI
or via Python script. Every time a new vehicle object is created, the 'add_vehicle' method is invoked. Additionally,
a vehicle is removed, i.e. 'remove_vehicle' gets invoked, every time the '__del__' function of the "Vehicle" object
gets invoked.
"""
# The object instance of the Vehicle Manager
_instance = None
_is_initialized = False
# A dictionary of vehicles that are spawned in the simulator
_vehicles = {}
# Lock for safe multi-threading
_lock: Lock = Lock()
def __init__(self):
"""
Constructor for the vehicle manager class.
"""
pass
"""
Properties
"""
@property
def vehicles(self):
"""
Returns:
(list) List of vehicles that were spawned.
"""
return VehicleManager._vehicles
"""
Operations
"""
@staticmethod
def get_vehicle_manager():
"""
Method that returns the current vehicle manager.
"""
return VehicleManager()
def add_vehicle(self, stage_prefix: str, vehicle):
"""
Method that adds the vehicles to the vehicle manager.
Args:
stage_prefix (str): A string with the name that the vehicle is spawned in the simulator
vehicle (Vehicle): The vehicle object being added to the vehicle manager.
"""
VehicleManager._vehicles[stage_prefix] = vehicle
def get_vehicle(self, stage_prefix: str):
"""Method that returns the vehicle object given its stage prefix. Returns None if there is no vehicle
associated with that stage prefix
Args:
stage_prefix (str): A string with the name that the vehicle is spawned in the simulator
Returns:
Vehicle: The vehicle object associated with the stage_prefix
"""
return VehicleManager._vehicles.get(stage_prefix, None)
def remove_vehicle(self, stage_prefix: str):
"""
Method that deletes a vehicle from the vehicle manager.
Args:
stage_prefix (str): A string with the name that the vehicle is spawned in the simulator.
"""
try:
VehicleManager._vehicles.pop(stage_prefix)
except:
pass
def remove_all_vehicles(self):
"""
Method that will delete all the vehicles that were spawned from the vehicle manager.
"""
VehicleManager._vehicles.clear()
def __new__(cls):
"""Method that allocated memory for a new vehicle_manager. Since the VehicleManager follows a singleton pattern,
only one instance of VehicleManger object can be in memory at any time.
Returns:
VehicleManger: the single instance of the VehicleManager class.
"""
# Use a lock in here to make sure we do not have a race condition
# when using multi-threading and creating the first instance of the VehicleManager
with cls._lock:
if cls._instance is None:
cls._instance = object.__new__(cls)
else:
carb.log_info("Vehicle Manager is defined already, returning the previously defined one")
return VehicleManager._instance
def __del__(self):
"""Destructor for the object"""
VehicleManager._instance = None
return
| 4,124 | Python | 31.738095 | 135 | 0.640883 |
superboySB/SBDrone_deprecated/extensions/pegasus.simulator/pegasus/simulator/logic/state.py | """
| File: state.py
| Author: Marcelo Jacinto ([email protected])
| License: BSD-3-Clause. Copyright (c) 2023, Marcelo Jacinto. All rights reserved.
| Description: Describes the state of a vehicle (or rigidbody).
"""
__all__ = ["State"]
import numpy as np
from scipy.spatial.transform import Rotation
from pegasus.simulator.logic.rotations import rot_ENU_to_NED, rot_FLU_to_FRD
class State:
"""
Stores the state of a given vehicle.
Note:
- position - A numpy array with the [x,y,z] of the vehicle expressed in the inertial frame according to an ENU convention.
- orientation - A numpy array with the quaternion [qx, qy, qz, qw] that encodes the attitude of the vehicle's FLU body frame, relative to an ENU inertial frame, expressed in the ENU inertial frame.
- linear_velocity - A numpy array with [vx,vy,vz] that defines the velocity of the vehicle expressed in the inertial frame according to an ENU convention.
- linear_body_velocity - A numpy array with [u,v,w] that defines the velocity of the vehicle expressed in the FLU body frame.
- angular_velocity - A numpy array with [p,q,r] with the angular velocity of the vehicle's FLU body frame, relative to an ENU inertial frame, expressed in the FLU body frame.
- linear acceleration - An array with [x_ddot, y_ddot, z_ddot] with the acceleration of the vehicle expressed in the inertial frame according to an ENU convention.
"""
def __init__(self):
"""
Initialize the State object
"""
# The position [x,y,z] of the vehicle's body frame relative to the inertial frame, expressed in the inertial frame
self.position = np.array([0.0, 0.0, 0.0])
# The attitude (orientation) of the vehicle's body frame relative to the inertial frame of reference,
# expressed in the inertial frame. This quaternion should follow the convention [qx, qy, qz, qw], such that "no rotation"
# equates to the quaternion=[0, 0, 0, 1]
self.attitude = np.array([0.0, 0.0, 0.0, 1.0])
# The linear velocity [u,v,w] of the vehicle's body frame expressed in the body frame of reference
self.linear_body_velocity = np.array([0.0, 0.0, 0.0])
# The linear velocity [x_dot, y_dot, z_dot] of the vehicle's body frame expressed in the inertial frame of reference
self.linear_velocity = np.array([0.0, 0.0, 0.0])
# The angular velocity [wx, wy, wz] of the vehicle's body frame relative to the inertial frame, expressed in the body frame
self.angular_velocity = np.array([0.0, 0.0, 0.0])
# The linear acceleration [ax, ay, az] of the vehicle's body frame relative to the inertial frame, expressed in the inertial frame
self.linear_acceleration = np.array([0.0, 0.0, 0.0])
def get_position_ned(self):
"""
Method that, assuming that a state is encoded in ENU standard (the Isaac Sim standard), converts the position
to the NED convention used by PX4 and other onboard flight controllers
Returns:
np.ndarray: A numpy array with the [x,y,z] of the vehicle expressed in the inertial frame according to an NED convention.
"""
return rot_ENU_to_NED.apply(self.position)
def get_attitude_ned_frd(self):
"""
Method that, assuming that a state is encoded in ENU-FLU standard (the Isaac Sim standard), converts the
attitude of the vehicle it to the NED-FRD convention used by PX4 and other onboard flight controllers
Returns:
np.ndarray: A numpy array with the quaternion [qx, qy, qz, qw] that encodes the attitude of the vehicle's FRD body frame, relative to an NED inertial frame, expressed in the NED inertial frame.
"""
attitude_frd_ned = rot_ENU_to_NED * Rotation.from_quat(self.attitude) * rot_FLU_to_FRD
return attitude_frd_ned.as_quat()
def get_linear_body_velocity_ned_frd(self):
"""
Method that, assuming that a state is encoded in ENU-FLU standard (the Isaac Sim standard), converts the
linear body velocity of the vehicle it to the NED-FRD convention used by PX4 and other onboard flight controllers
Returns:
np.ndarray: A numpy array with [u,v,w] that defines the velocity of the vehicle expressed in the FRD body frame.
"""
# Get the linear acceleration in FLU convention
linear_acc_body_flu = Rotation.from_quat(self.attitude).inv().apply(self.linear_acceleration)
# Convert the linear acceleration in the body frame expressed in FLU convention to the FRD convention
return rot_FLU_to_FRD.apply(linear_acc_body_flu)
def get_linear_velocity_ned(self):
"""
Method that, assuming that a state is enconded in ENU-FLU standard (the Isaac Sim standard), converts the
linear velocity expressed in the inertial frame to the NED convention used by PX4 and other onboard flight
controllers
Returns:
np.ndarray: A numpy array with [vx,vy,vz] that defines the velocity of the vehicle expressed in the inertial frame according to a NED convention.
"""
return rot_ENU_to_NED.apply(self.linear_velocity)
def get_angular_velocity_frd(self):
"""
Method that, assuming that a state is enconded in ENU-FLU standard (the Isaac Sim standard), converts the
angular velocity expressed in the body frame to the NED-FRD convention used by PX4 and other onboard flight
controllers
Returns:
np.ndarray: A numpy array with [p,q,r] with the angular velocity of the vehicle's FRD body frame, relative to an NED inertial frame, expressed in the FRD body frame.
"""
return rot_FLU_to_FRD.apply(self.angular_velocity)
def get_linear_acceleration_ned(self):
"""
Method that, assuming that a state is enconded in ENU-FLU standard (the Isaac Sim standard), converts the
linear acceleration expressed in the inertial frame to the NED convention used by PX4 and other onboard flight
controllers
Returns:
np.ndarray: An array with [x_ddot, y_ddot, z_ddot] with the acceleration of the vehicle expressed in the inertial frame according to an NED convention.
"""
return rot_ENU_to_NED.apply(self.linear_acceleration)
| 6,384 | Python | 52.208333 | 205 | 0.684211 |
superboySB/SBDrone_deprecated/extensions/pegasus.simulator/pegasus/simulator/logic/__init__.py | """
| Author: Marcelo Jacinto ([email protected])
| License: BSD-3-Clause. Copyright (c) 2023, Marcelo Jacinto. All rights reserved.
"""
from .interface.pegasus_interface import PegasusInterface | 212 | Python | 34.499994 | 82 | 0.768868 |
superboySB/SBDrone_deprecated/extensions/pegasus.simulator/pegasus/simulator/logic/rotations.py | """
| File: rotations.py
| Author: Marcelo Jacinto ([email protected])
| License: BSD-3-Clause. Copyright (c) 2023, Marcelo Jacinto. All rights reserved.
| Description: Implements utilitary rotations between ENU and NED inertial frame conventions and FLU and FRD body frame conventions.
"""
import numpy as np
from scipy.spatial.transform import Rotation
# Quaternion for rotation between ENU and NED INERTIAL frames
# NED to ENU: +PI/2 rotation about Z (Down) followed by a +PI rotation around X (old North/new East)
# ENU to NED: +PI/2 rotation about Z (Up) followed by a +PI rotation about X (old East/new North)
# This rotation is symmetric, so q_ENU_to_NED == q_NED_to_ENU.
# Note: this quaternion follows the convention [qx, qy, qz, qw]
q_ENU_to_NED = np.array([0.70711, 0.70711, 0.0, 0.0])
# A scipy rotation from the ENU inertial frame to the NED inertial frame of reference
rot_ENU_to_NED = Rotation.from_quat(q_ENU_to_NED)
# Quaternion for rotation between body FLU and body FRD frames
# +PI rotation around X (Forward) axis rotates from Forward, Right, Down (aircraft)
# to Forward, Left, Up (base_link) frames and vice-versa.
# This rotation is symmetric, so q_FLU_to_FRD == q_FRD_to_FLU.
# Note: this quaternion follows the convention [qx, qy, qz, qw]
q_FLU_to_FRD = np.array([1.0, 0.0, 0.0, 0.0])
# A scipe rotation from the FLU body frame to the FRD body frame
rot_FLU_to_FRD = Rotation.from_quat(q_FLU_to_FRD)
| 1,447 | Python | 48.931033 | 132 | 0.735314 |
superboySB/SBDrone_deprecated/extensions/pegasus.simulator/pegasus/simulator/logic/thrusters/__init__.py | """
| Author: Marcelo Jacinto ([email protected])
| License: BSD-3-Clause. Copyright (c) 2023, Marcelo Jacinto. All rights reserved.
"""
from .thrust_curve import ThrustCurve
from .quadratic_thrust_curve import QuadraticThrustCurve | 249 | Python | 34.714281 | 82 | 0.779116 |
superboySB/SBDrone_deprecated/extensions/pegasus.simulator/pegasus/simulator/logic/thrusters/quadratic_thrust_curve.py | """
| File: quadratic_thrust_curve.py
| Author: Marcelo Jacinto ([email protected])
| Descriptio: File that implements a quadratic thrust curve for rotors
| License: BSD-3-Clause. Copyright (c) 2023, Marcelo Jacinto. All rights reserved.
"""
import numpy as np
from pegasus.simulator.logic.state import State
from pegasus.simulator.logic.thrusters.thrust_curve import ThrustCurve
class QuadraticThrustCurve(ThrustCurve):
"""Class that implements the dynamics of rotors that can be described by a quadratic thrust curve
"""
def __init__(self, config={}):
"""_summary_
Args:
config (dict): A Dictionary that contains all the parameters for configuring the QuadraticThrustCurve - it can be empty or only have some of the parameters used by the QuadraticThrustCurve.
Examples:
The dictionary default parameters are
>>> {"num_rotors": 4,
>>> "rotor_constant": [5.84e-6, 5.84e-6, 5.84e-6, 5.84e-6],
>>> "rolling_moment_coefficient": [1e-6, 1e-6, 1e-6, 1e-6],
>>> "rot_dir": [-1, -1, 1, 1],
>>> "min_rotor_velocity": [0, 0, 0, 0], # rad/s
>>> "max_rotor_velocity": [1100, 1100, 1100, 1100], # rad/s
>>> }
"""
# Get the total number of rotors to simulate
self._num_rotors = config.get("num_rotors", 4)
# The rotor constant used for computing the total thrust produced by the rotor: T = rotor_constant * omega^2
self._rotor_constant = config.get("rotor_constant", [8.54858e-6, 8.54858e-6, 8.54858e-6, 8.54858e-6])
assert len(self._rotor_constant) == self._num_rotors
# The rotor constant used for computing the total torque generated about the vehicle Z-axis
self._rolling_moment_coefficient = config.get("rolling_moment_coefficient", [1e-6, 1e-6, 1e-6, 1e-6])
assert len(self._rolling_moment_coefficient) == self._num_rotors
# Save the rotor direction of rotation
self._rot_dir = config.get("rot_dir", [-1, -1, 1, 1])
assert len(self._rot_dir) == self._num_rotors
# Values for the minimum and maximum rotor velocity in rad/s
self.min_rotor_velocity = config.get("min_rotor_velocity", [0, 0, 0, 0])
assert len(self.min_rotor_velocity) == self._num_rotors
self.max_rotor_velocity = config.get("max_rotor_velocity", [1100, 1100, 1100, 1100])
assert len(self.max_rotor_velocity) == self._num_rotors
# The actual speed references to apply to the vehicle rotor joints
self._input_reference = [0.0 for i in range(self._num_rotors)]
# The actual velocity that each rotor is spinning at
self._velocity = [0.0 for i in range(self._num_rotors)]
# The actual force that each rotor is generating
self._force = [0.0 for i in range(self._num_rotors)]
# The actual rolling moment that is generated on the body frame of the vehicle
self._rolling_moment = 0.0
def set_input_reference(self, input_reference):
"""
Receives as input a list of target angular velocities of each rotor in rad/s
"""
# The target angular velocity of the rotor
self._input_reference = input_reference
def update(self, state: State, dt: float):
"""
Note: the state and dt variables are not used in this implementation, but left
to add support to other rotor models where the total thrust is dependent on
states such as vehicle linear velocity
Args:
state (State): The current state of the vehicle.
dt (float): The time elapsed between the previous and current function calls (s).
"""
rolling_moment = 0.0
# Compute the actual force to apply to the rotors and the rolling moment contribution
for i in range(self._num_rotors):
# Set the actual velocity that each rotor is spinning at (instanenous model - no delay introduced)
# Only apply clipping of the input reference
self._velocity[i] = np.maximum(
self.min_rotor_velocity[i], np.minimum(self._input_reference[i], self.max_rotor_velocity[i])
)
# Set the force using a quadratic thrust curve
self._force[i] = self._rotor_constant[i] * np.power(self._velocity[i], 2)
# Compute the rolling moment coefficient
rolling_moment += self._rolling_moment_coefficient[i] * np.power(self._velocity[i], 2.0) * self._rot_dir[i]
# Update the rolling moment variable
self._rolling_moment = rolling_moment
# Return the forces and velocities on each rotor and total torque applied on the body frame
return self._force, self._velocity, self._rolling_moment
@property
def force(self):
"""The force to apply to each rotor of the vehicle at any given time instant
Returns:
list: A list of forces (in Newton N) to apply to each rotor of the vehicle (on its Z-axis) at any given time instant
"""
return self._force
@property
def velocity(self):
"""The velocity at which each rotor of the vehicle should be rotating at any given time instant
Returns:
list: A list of angular velocities (in rad/s) of each rotor (about its Z-axis) at any given time instant
"""
return self._velocity
@property
def rolling_moment(self):
"""The total rolling moment being generated on the body frame of the vehicle by the rotating propellers
Returns:
float: The total rolling moment to apply to the vehicle body frame (Torque about the Z-axis) in Nm
"""
return self._rolling_moment
@property
def rot_dir(self):
"""The direction of rotation of each rotor of the vehicle
Returns:
list(int): A list with the rotation direction of each rotor (-1 is counter-clockwise and 1 for clockwise)
"""
return self._rot_dir
| 6,097 | Python | 41.643356 | 201 | 0.632606 |
superboySB/SBDrone_deprecated/extensions/pegasus.simulator/pegasus/simulator/logic/thrusters/thrust_curve.py | """
| File: thrust_curve.py
| Author: Marcelo Jacinto ([email protected])
| Descriptio: File that implements the base interface for defining thrust curves for vehicles
| License: BSD-3-Clause. Copyright (c) 2023, Marcelo Jacinto. All rights reserved.
"""
from pegasus.simulator.logic.state import State
class ThrustCurve:
"""Class that implements the dynamics of rotors that can be described by a quadratic thrust curve
"""
def __init__(self):
pass
def set_input_reference(self, input_reference):
"""
Receives as input a list of target angular velocities of each rotor in rad/s
"""
pass
def update(self, state: State, dt: float):
"""
Note: the state and dt variables are not used in this implementation, but left
to add support to other rotor models where the total thrust is dependent on
states such as vehicle linear velocity
Args:
state (State): The current state of the vehicle.
dt (float): The time elapsed between the previous and current function calls (s).
"""
pass
@property
def force(self):
"""The force to apply to each rotor of the vehicle at any given time instant
Returns:
list: A list of forces (in Newton N) to apply to each rotor of the vehicle (on its Z-axis) at any given time instant
"""
pass
@property
def velocity(self):
"""The velocity at which each rotor of the vehicle should be rotating at any given time instant
Returns:
list: A list of angular velocities (in rad/s) of each rotor (about its Z-axis) at any given time instant
"""
pass
@property
def rolling_moment(self):
"""The total rolling moment being generated on the body frame of the vehicle by the rotating propellers
Returns:
float: The total rolling moment to apply to the vehicle body frame (Torque about the Z-axis) in Nm
"""
pass
@property
def rot_dir(self):
"""The direction of rotation of each rotor of the vehicle
Returns:
list(int): A list with the rotation direction of each rotor (-1 is counter-clockwise and 1 for clockwise)
"""
pass | 2,307 | Python | 32.941176 | 128 | 0.642826 |
superboySB/SBDrone_deprecated/extensions/pegasus.simulator/pegasus/simulator/logic/graphs/ros2_camera.py | """
| File: ros2_camera.py
| License: BSD-3-Clause. Copyright (c) 2023, Micah Nye. All rights reserved.
"""
__all__ = ["ROS2Camera"]
import carb
from omni.isaac.core.utils import stage
import omni.graph.core as og
from omni.isaac.core.utils.prims import is_prim_path_valid
from omni.isaac.core.utils.prims import set_targets
from pegasus.simulator.logic.graphs import Graph
from pegasus.simulator.logic.vehicles import Vehicle
import numpy as np
class ROS2Camera(Graph):
"""The class that implements the ROS2 Camera graph. This class inherits the base class Graph.
"""
def __init__(self, camera_prim_path: str, config: dict = {}):
"""Initialize the ROS2 Camera class
Args:
camera_prim_path (str): Path to the camera prim. Global path when it starts with `/`, else local to vehicle prim path
config (dict): A Dictionary that contains all the parameters for configuring the ROS2Camera - it can be empty or only have some of the parameters used by the ROS2Camera.
Examples:
The dictionary default parameters are
>>> {"graph_evaluator": "execution", # type of the omnigraph to create (execution, push)
>>> "resolution": [640, 480], # output video stream resolution in pixels [width, height]
>>> "types": ['rgb', 'camera_info'], # rgb, depth, depth_pcl, instance_segmentation, semantic_segmentation, bbox_2d_tight, bbox_2d_loose, bbox_3d, camera_info
>>> "publish_labels": True} # publish labels for instance_segmentation, semantic_segmentation, bbox_2d_tight, bbox_2d_loose and bbox_3d camera types
"""
# Initialize the Super class "object" attribute
super().__init__(graph_type="ROS2Camera")
# Save camera path, frame id and ros topic name
self._camera_prim_path = camera_prim_path
self._frame_id = camera_prim_path.rpartition("/")[-1] # frame_id of the camera is the last prim path part after `/`
self._base_topic = ""
# Process the config dictionary
self._graph_evaluator = config.get("graph_evaluator", "execution")
self._resolution = config.get("resolution", [640, 480])
self._types = np.array(config.get("types", ['rgb', 'camera_info']))
self._publish_labels = config.get("publish_labels", True)
def initialize(self, vehicle: Vehicle):
"""Method that initializes the graph of the camera.
Args:
vehicle (Vehicle): The vehicle that this graph is attached to.
"""
self._namespace = f"/{vehicle.vehicle_name}"
self._base_topic = f"/{self._frame_id}"
# Set the prim_path for the camera
if self._camera_prim_path[0] != '/':
self._camera_prim_path = f"{vehicle.prim_path}/{self._camera_prim_path}"
# Create camera prism
if not is_prim_path_valid(self._camera_prim_path):
carb.log_error(f"Cannot create ROS2 Camera graph, the camera prim path \"{self._camera_prim_path}\" is not valid")
return
# Set the prim paths for camera and tf graphs
graph_path = f"{self._camera_prim_path}_pub"
# Graph configuration
if self._graph_evaluator == "execution":
graph_specs = {
"graph_path": graph_path,
"evaluator_name": "execution",
}
elif self._graph_evaluator == "push":
graph_specs = {
"graph_path": graph_path,
"evaluator_name": "push",
"pipeline_stage": og.GraphPipelineStage.GRAPH_PIPELINE_STAGE_ONDEMAND,
}
else:
carb.log_error(f"Cannot create ROS2 Camera graph, graph evaluator type \"{self._graph_evaluator}\" is not valid")
return
# Creating a graph edit configuration with cameraHelper nodes to generate ROS image publishers
keys = og.Controller.Keys
graph_config = {
keys.CREATE_NODES: [
("on_tick", "omni.graph.action.OnTick"),
("create_viewport", "omni.isaac.core_nodes.IsaacCreateViewport"),
("get_render_product", "omni.isaac.core_nodes.IsaacGetViewportRenderProduct"),
("set_viewport_resolution", "omni.isaac.core_nodes.IsaacSetViewportResolution"),
("set_camera", "omni.isaac.core_nodes.IsaacSetCameraOnRenderProduct"),
],
keys.CONNECT: [
("on_tick.outputs:tick", "create_viewport.inputs:execIn"),
("create_viewport.outputs:execOut", "get_render_product.inputs:execIn"),
("create_viewport.outputs:viewport", "get_render_product.inputs:viewport"),
("create_viewport.outputs:execOut", "set_viewport_resolution.inputs:execIn"),
("create_viewport.outputs:viewport", "set_viewport_resolution.inputs:viewport"),
("set_viewport_resolution.outputs:execOut", "set_camera.inputs:execIn"),
("get_render_product.outputs:renderProductPath", "set_camera.inputs:renderProductPath"),
],
keys.SET_VALUES: [
("create_viewport.inputs:viewportId", 0),
("create_viewport.inputs:name", f"{self._namespace}/{self._frame_id}"),
("set_viewport_resolution.inputs:width", self._resolution[0]),
("set_viewport_resolution.inputs:height", self._resolution[1]),
],
}
# Add camerasHelper for each selected camera type
valid_camera_type = False
for camera_type in self._types:
if not camera_type in ["rgb", "depth", "depth_pcl", "semantic_segmentation", "instance_segmentation", "bbox_2d_tight", "bbox_2d_loose", "bbox_3d", "camera_info"]:
continue
camera_helper_name = f"camera_helper_{camera_type}"
graph_config[keys.CREATE_NODES] += [
(camera_helper_name, "omni.isaac.ros2_bridge.ROS2CameraHelper")
]
graph_config[keys.CONNECT] += [
("set_camera.outputs:execOut", f"{camera_helper_name}.inputs:execIn"),
("get_render_product.outputs:renderProductPath", f"{camera_helper_name}.inputs:renderProductPath")
]
graph_config[keys.SET_VALUES] += [
(f"{camera_helper_name}.inputs:nodeNamespace", self._namespace),
(f"{camera_helper_name}.inputs:frameId", self._frame_id),
(f"{camera_helper_name}.inputs:topicName", f"{self._base_topic}/{camera_type}"),
(f"{camera_helper_name}.inputs:type", camera_type)
]
# Publish labels for specific camera types
if self._publish_labels and camera_type in ["semantic_segmentation", "instance_segmentation", "bbox_2d_tight", "bbox_2d_loose", "bbox_3d"]:
graph_config[keys.SET_VALUES] += [
(camera_helper_name + ".inputs:enableSemanticLabels", True),
(camera_helper_name + ".inputs:semanticLabelsTopicName", f"{self._frame_id}/{camera_type}_labels")
]
valid_camera_type = True
if not valid_camera_type:
carb.log_error(f"Cannot create ROS2 Camera graph, no valid camera type was selected")
return
# Create the camera graph
(graph, _, _, _) = og.Controller.edit(
graph_specs,
graph_config
)
# Connect camera to the graphs
set_targets(
prim=stage.get_current_stage().GetPrimAtPath(f"{graph_path}/set_camera"),
attribute="inputs:cameraPrim",
target_prim_paths=[self._camera_prim_path]
)
# Run the ROS Camera graph once to generate ROS image publishers in SDGPipeline
og.Controller.evaluate_sync(graph)
# Also initialize the Super class with updated prim path (only camera graph path)
super().initialize(graph_path)
def camera_topic(self, camera_type: str) -> str:
"""
(str) Path to the camera topic.
Args:
camera_type (str): one of the supported camera output types
Returns:
Camera topic name (str) if the camera type exists, else empty string
"""
return f"{self._namespace}{self._base_topic}/{camera_type}" if camera_type in self._types else ""
def camera_labels_topic(self, camera_type: str) -> str:
"""
(str) Path to the camera labels topic.
Args:
camera_type (str): one of the supported camera output types
Returns:
Camera labels topic name (str) if the camera type exists, else empty string
"""
if not self._publish_labels or \
not camera_type in self._types or \
not camera_type in ["semantic_segmentation", "instance_segmentation", "bbox_2d_tight", "bbox_2d_loose", "bbox_3d"]:
return ""
return f"{self._namespace}{self._base_topic}/{camera_type}_labels"
| 9,070 | Python | 44.813131 | 181 | 0.600551 |
superboySB/SBDrone_deprecated/extensions/pegasus.simulator/pegasus/simulator/logic/graphs/__init__.py | """
| License: BSD-3-Clause. Copyright (c) 2023, Marcelo Jacinto. All rights reserved.
"""
from .graph import Graph
from .ros2_camera import ROS2Camera
from .ros2_tf import ROS2Tf
from .ros2_odometry import ROS2Odometry
from .ros2_lidar import ROS2Lidar | 254 | Python | 27.33333 | 82 | 0.771654 |
superboySB/SBDrone_deprecated/extensions/pegasus.simulator/pegasus/simulator/logic/graphs/ros2_odometry.py | """
| File: ros2_odometry.py
| License: BSD-3-Clause. Copyright (c) 2023, Micah Nye. All rights reserved.
"""
__all__ = ["ROS2Tf"]
import carb
from omni.isaac.core.utils import stage
import omni.graph.core as og
from omni.isaac.core.utils.prims import is_prim_path_valid, set_targets
from pegasus.simulator.logic.graphs import Graph
from pegasus.simulator.logic.vehicles import Vehicle
class ROS2Odometry(Graph):
"""The class that implements the ROS2 Odometry graph. This class inherits the base class Graph.
"""
def __init__(self, config: dict = {}):
"""Initialize the ROS2 Odometry class
Args:
config (dict): A Dictionary that contains all the parameters for configuring the ROS2Odometry - it can be empty or only have some of the parameters used by the ROS2Odometry.
Examples:
The dictionary default parameters are
>>> {"odom_topic": "odom", # String for odometry topic
>>> "publish_odom_to_base_tf": True, # Enable tf broadcaster for odom_frame->base_frame transform
>>> "publish_map_to_odom_tf": True, # Enable tf broadcaster for map_frame->odom_frame transform
>>> "map_frame": "map", # String name for the map_frame
>>> "odom_frame": "odom", # String name for the odom_frame
>>> "base_frame": "base_link"} # String name for the base_frame
"""
# Initialize the Super class "object" attribute
super().__init__(graph_type="ROS2Odometry")
# Process the config dictionary
self._odom_topic = config.get("odom_topic", "odom")
self._publish_odom_to_base_tf = config.get("publish_map_to_odom_tf", True)
self._publish_map_to_odom_tf = config.get("publish_map_to_odom_tf", True)
self._map_frame = config.get("map_frame", "map")
self._odom_frame = config.get("odom_frame", "odom")
self._base_frame = config.get("base_frame", "base_link")
def initialize(self, vehicle: Vehicle):
"""Method that initializes the graph.
Args:
vehicle (Vehicle): The vehicle that this graph is attached to.
"""
self._namespace = f"/{vehicle.vehicle_name}"
# Create the graph under vehicle with graph name odom_pub and allow only one per vehicle.
graph_path = f"{vehicle.prim_path}/odom_pub"
if is_prim_path_valid(graph_path):
carb.log_warn(f"ROS2 Odometry Graph for vehicle {vehicle.vehicle_name} already exists")
return
# Graph configuration
graph_specs = {
"graph_path": graph_path,
"evaluator_name": "execution",
}
# Creating a graph edit configuration with transform tree publishers
keys = og.Controller.Keys
graph_config = {
keys.CREATE_NODES: [
("on_playback_tick", "omni.graph.action.OnPlaybackTick"),
("isaac_read_simulation_time", "omni.isaac.core_nodes.IsaacReadSimulationTime"),
("isaac_compute_odometry", "omni.isaac.core_nodes.IsaacComputeOdometry"),
("publish_odometry", "omni.isaac.ros2_bridge.ROS2PublishOdometry")
],
keys.CONNECT: [
("on_playback_tick.outputs:tick", "isaac_compute_odometry.inputs:execIn"),
("isaac_read_simulation_time.outputs:simulationTime", "publish_odometry.inputs:timeStamp"),
("isaac_compute_odometry.outputs:execOut", "publish_odometry.inputs:execIn"),
("isaac_compute_odometry.outputs:linearVelocity", "publish_odometry.inputs:linearVelocity"),
("isaac_compute_odometry.outputs:orientation", "publish_odometry.inputs:orientation"),
("isaac_compute_odometry.outputs:position", "publish_odometry.inputs:position")
],
keys.SET_VALUES: [
("publish_odometry.inputs:odomFrameId", self._odom_frame),
("publish_odometry.inputs:chassisFrameId", self._base_frame),
("publish_odometry.inputs:nodeNamespace", self._namespace),
("publish_odometry.inputs:topicName", self._odom_topic)
]
}
# Create odom_frame->base_frame publisher
if self._publish_odom_to_base_tf:
graph_config[keys.CREATE_NODES] += [
("publish_odom_transform_tree", "omni.isaac.ros2_bridge.ROS2PublishRawTransformTree")
]
graph_config[keys.CONNECT] += [
("on_playback_tick.outputs:tick", "publish_odom_transform_tree.inputs:execIn"),
("isaac_read_simulation_time.outputs:simulationTime", "publish_odom_transform_tree.inputs:timeStamp"),
("isaac_compute_odometry.outputs:orientation", "publish_odom_transform_tree.inputs:rotation"),
("isaac_compute_odometry.outputs:position", "publish_odom_transform_tree.inputs:translation")
]
graph_config[keys.SET_VALUES] += [
("publish_odom_transform_tree.inputs:parentFrameId", self._odom_frame),
("publish_odom_transform_tree.inputs:childFrameId", self._base_frame)
]
# Create map_frame->odom_frame publisher
# Because there is no drift or pose jumps in simulated odometry, map_frame->base_frame == odom_frame->base_frame
if self._publish_odom_to_base_tf:
graph_config[keys.CREATE_NODES] += [
("publish_map_transform_tree", "omni.isaac.ros2_bridge.ROS2PublishRawTransformTree")
]
graph_config[keys.CONNECT] += [
("on_playback_tick.outputs:tick", "publish_map_transform_tree.inputs:execIn"),
("isaac_read_simulation_time.outputs:simulationTime", "publish_map_transform_tree.inputs:timeStamp")
]
graph_config[keys.SET_VALUES] += [
("publish_map_transform_tree.inputs:parentFrameId", self._map_frame),
("publish_map_transform_tree.inputs:childFrameId", self._odom_frame)
]
# Create the camera graph
(graph, _, _, _) = og.Controller.edit(
graph_specs,
graph_config
)
# Set the odometry chassis prim, which should be the vehicle prim path
set_targets(
prim=stage.get_current_stage().GetPrimAtPath(f"{graph_path}/isaac_compute_odometry"),
attribute="inputs:chassisPrim",
target_prim_paths=[vehicle.prim_path]
)
# Run the ROS Camera graph once to generate ROS image publishers in SDGPipeline
og.Controller.evaluate_sync(graph)
# Also initialize the Super class with updated prim path (only camera graph path)
super().initialize(graph_path)
@property
def odometry_topic(self) -> str:
"""
(str) Path to the odometry topic.
Returns:
Odometry topic name (str)
"""
return f"{self._namespace}/{self._odom_topic}"
| 7,131 | Python | 45.921052 | 185 | 0.603422 |
superboySB/SBDrone_deprecated/extensions/pegasus.simulator/pegasus/simulator/logic/graphs/ros2_tf.py | """
| File: ros2_tf.py
| License: BSD-3-Clause. Copyright (c) 2023, Micah Nye. All rights reserved.
"""
__all__ = ["ROS2Tf"]
import carb
from omni.isaac.core.utils import stage
import omni.graph.core as og
from omni.isaac.core.utils.prims import is_prim_path_valid, set_targets
from omni.isaac.core.prims import XFormPrim
from pegasus.simulator.logic.graphs import Graph
from pegasus.simulator.logic.vehicles import Vehicle
class ROS2Tf(Graph):
"""The class that implements the ROS2 TF graph. This class inherits the base class Graph.
"""
def __init__(self):
"""Initialize the ROS2 TF class
"""
# Initialize the Super class "object" attribute
super().__init__(graph_type="ROS2Tf")
def initialize(self, vehicle: Vehicle):
"""Method that initializes the graph.
Args:
vehicle (Vehicle): The vehicle that this graph is attached to.
"""
self._namespace = f"/{vehicle.vehicle_name}"
# The vehicle uses body instead of standardized base_link,
# so we need to create the base_link and connect the body to it
base_link_xform_path = f"{vehicle.prim_path}/body/base_link"
XFormPrim(
prim_path=base_link_xform_path
)
# Create the graph under vehicle with graph name tf and allow only one per vehicle.
graph_path = f"{vehicle.prim_path}/tf_pub"
if is_prim_path_valid(graph_path):
carb.log_warn(f"ROS2 TF Graph for vehicle {vehicle.vehicle_name} already exists")
return
# Graph configuration
graph_specs = {
"graph_path": graph_path,
"evaluator_name": "execution",
}
# Creating a graph edit configuration with transform tree publishers
keys = og.Controller.Keys
graph_config = {
keys.CREATE_NODES: [
("on_playback_tick", "omni.graph.action.OnPlaybackTick"),
("isaac_read_simulation_time", "omni.isaac.core_nodes.IsaacReadSimulationTime"),
("publish_transform_tree", "omni.isaac.ros2_bridge.ROS2PublishTransformTree")
],
keys.CONNECT: [
("on_playback_tick.outputs:tick", "publish_transform_tree.inputs:execIn"),
("isaac_read_simulation_time.outputs:simulationTime", "publish_transform_tree.inputs:timeStamp")
],
keys.SET_VALUES: [
("publish_transform_tree.inputs:nodeNamespace", self._namespace)
]
}
# Create the camera graph
(graph, _, _, _) = og.Controller.edit(
graph_specs,
graph_config
)
# Set the parent frame, it should be the base_link
set_targets(
prim=stage.get_current_stage().GetPrimAtPath(f"{graph_path}/publish_transform_tree"),
attribute="inputs:parentPrim",
target_prim_paths=[base_link_xform_path]
)
# Create list of target prims, which will contain articulation root
# and all sensors with frame_path filled
target_prim_paths = [vehicle.prim_path]
for sensor in vehicle._sensors:
if len(sensor.frame_path) and is_prim_path_valid(sensor.frame_path):
target_prim_paths.append(sensor.frame_path)
set_targets(
prim=stage.get_current_stage().GetPrimAtPath(f"{graph_path}/publish_transform_tree"),
attribute="inputs:targetPrims",
target_prim_paths=target_prim_paths
)
# Run the ROS Camera graph once to generate ROS image publishers in SDGPipeline
og.Controller.evaluate_sync(graph)
# Also initialize the Super class with updated prim path (only camera graph path)
super().initialize(graph_path)
| 3,809 | Python | 35.634615 | 112 | 0.618273 |
superboySB/SBDrone_deprecated/extensions/pegasus.simulator/pegasus/simulator/logic/graphs/graph.py | """
| License: BSD-3-Clause. Copyright (c) 2023, Marcelo Jacinto. All rights reserved.
"""
__all__ = ["Graph"]
class Graph:
"""The base class for implementing OmniGraphs
Attributes:
graph_prim_path
"""
def __init__(self, graph_type: str):
"""Initialize Graph class
Args:
graph_type (str): A name that describes the type of graph
"""
self._graph_type = graph_type
self._graph_prim_path = None
def initialize(self, graph_prim_path: str):
"""
Method that should be implemented and called by the class that inherits the graph object.
"""
self._graph_prim_path = graph_prim_path
@property
def graph_type(self) -> str:
"""
(str) A name that describes the type of graph.
"""
return self._graph_type
@property
def graph_prim_path(self) -> str:
"""
(str) Path to the graph.
"""
return self._graph_prim_path
| 996 | Python | 23.924999 | 97 | 0.565261 |
superboySB/SBDrone_deprecated/extensions/pegasus.simulator/pegasus/simulator/logic/graphs/ros2_lidar.py | """
| File: ros2_lidar.py
| License: BSD-3-Clause. Copyright (c) 2023, Micah Nye. All rights reserved.
"""
__all__ = ["ROS2Lidar"]
import carb
from omni.isaac.core.utils import stage
import omni.graph.core as og
from omni.isaac.core.utils.prims import is_prim_path_valid
from omni.isaac.core.utils.prims import set_targets
from pegasus.simulator.logic.graphs import Graph
from pegasus.simulator.logic.vehicles import Vehicle
import numpy as np
class ROS2Lidar(Graph):
"""The class that implements the ROS2 Lidar graph. This class inherits the base class Graph.
"""
def __init__(self, lidar_prim_path: str, config: dict = {}):
"""Initialize the ROS2 Lidar class
Args:
lidar_prim_path (str): Path to the lidar prim. Global path when it starts with `/`, else local to vehicle prim path
config (dict): A Dictionary that contains all the parameters for configuring the ROS2Lidar - it can be empty or only have some of the parameters used by the ROS2Lidar.
Examples:
The dictionary default parameters are
>>> {"publish_scan": False, # publish scanner data as sensor_msgs/LaserScan (requires high_lod turned off)
>>> "publish_point_cloud": True} # publish scanner data as sensor_msgs/PointCloud2 (for 2D data, requires high_lod turned on)
"""
# Initialize the Super class "object" attribute
super().__init__(graph_type="ROS2Lidar")
# Save lidar path, frame id and ros topic name
self._lidar_prim_path = lidar_prim_path
self._frame_id = lidar_prim_path.rpartition("/")[-1] # frame_id of the lidar is the last prim path part after `/`
self._base_topic = ""
# Process the config dictionary
self._publish_scan = config.get("publish_scan", False)
self._publish_point_cloud = config.get("publish_point_cloud", True)
def initialize(self, vehicle: Vehicle):
"""Method that initializes the graph of the lidar.
Args:
vehicle (Vehicle): The vehicle that this graph is attached to.
"""
self._namespace = f"/{vehicle.vehicle_name}"
self._base_topic = f"/{self._frame_id}"
# Set the prim_path for the camera
if self._lidar_prim_path[0] != '/':
self._lidar_prim_path = f"{vehicle.prim_path}/{self._lidar_prim_path}"
# Check if the prim path is valid
if not is_prim_path_valid(self._lidar_prim_path):
carb.log_error(f"Cannot create ROS2 Lidar graph, the lidar prim path \"{self._lidar_prim_path}\" is not valid")
return
# Set the prim paths for camera and tf graphs
graph_path = f"{self._lidar_prim_path}_pub"
# Graph configuration
graph_specs = {
"graph_path": graph_path,
"evaluator_name": "execution",
}
# Creating a default graph edit configuration
keys = og.Controller.Keys
graph_config = {
keys.CREATE_NODES: [
("on_tick", "omni.graph.action.OnTick"),
("isaac_read_simulation_time", "omni.isaac.core_nodes.IsaacReadSimulationTime"),
],
keys.CONNECT: [],
keys.SET_VALUES: [],
}
# Add laser scan publishing to the graph
if self._publish_scan:
graph_config[keys.CREATE_NODES] += [
("isaac_read_lidar_beams", "omni.isaac.range_sensor.IsaacReadLidarBeams"),
("publish_laser_scan", "omni.isaac.ros2_bridge.ROS2PublishLaserScan")
]
graph_config[keys.CONNECT] += [
("on_tick.outputs:tick", "isaac_read_lidar_beams.inputs:execIn"),
("isaac_read_lidar_beams.outputs:execOut", "publish_laser_scan.inputs:execIn"),
("isaac_read_lidar_beams.outputs:azimuthRange", "publish_laser_scan.inputs:azimuthRange"),
("isaac_read_lidar_beams.outputs:depthRange", "publish_laser_scan.inputs:depthRange"),
("isaac_read_lidar_beams.outputs:horizontalFov", "publish_laser_scan.inputs:horizontalFov"),
("isaac_read_lidar_beams.outputs:horizontalResolution", "publish_laser_scan.inputs:horizontalResolution"),
("isaac_read_lidar_beams.outputs:intensitiesData", "publish_laser_scan.inputs:intensitiesData"),
("isaac_read_lidar_beams.outputs:linearDepthData", "publish_laser_scan.inputs:linearDepthData"),
("isaac_read_lidar_beams.outputs:numCols", "publish_laser_scan.inputs:numCols"),
("isaac_read_lidar_beams.outputs:numRows", "publish_laser_scan.inputs:numRows"),
("isaac_read_lidar_beams.outputs:rotationRate", "publish_laser_scan.inputs:rotationRate"),
("isaac_read_simulation_time.outputs:simulationTime", "publish_laser_scan.inputs:timeStamp")
]
graph_config[keys.SET_VALUES] += [
("publish_laser_scan.inputs:frameId", self._frame_id),
("publish_laser_scan.inputs:nodeNamespace", self._namespace),
("publish_laser_scan.inputs:topicName", f"{self._base_topic}/scan")
]
# Add point cloud publishing to the graph
if self._publish_point_cloud:
graph_config[keys.CREATE_NODES] += [
("isaac_read_lidar_point_cloud", "omni.isaac.range_sensor.IsaacReadLidarPointCloud"),
("publish_point_cloud", "omni.isaac.ros2_bridge.ROS2PublishPointCloud")
]
graph_config[keys.CONNECT] += [
("on_tick.outputs:tick", "isaac_read_lidar_point_cloud.inputs:execIn"),
("isaac_read_lidar_point_cloud.outputs:execOut", "publish_point_cloud.inputs:execIn"),
("isaac_read_lidar_point_cloud.outputs:pointCloudData", "publish_point_cloud.inputs:pointCloudData"),
("isaac_read_simulation_time.outputs:simulationTime", "publish_point_cloud.inputs:timeStamp")
]
graph_config[keys.SET_VALUES] += [
("publish_point_cloud.inputs:frameId", self._frame_id),
("publish_point_cloud.inputs:nodeNamespace", self._namespace),
("publish_point_cloud.inputs:topicName", f"{self._base_topic}/point_cloud")
]
# Create the camera graph
(graph, _, _, _) = og.Controller.edit(
graph_specs,
graph_config
)
# Connect lidar to the graphs
if self._publish_scan:
set_targets(
prim=stage.get_current_stage().GetPrimAtPath(f"{graph_path}/isaac_read_lidar_beams"),
attribute="inputs:lidarPrim",
target_prim_paths=[self._lidar_prim_path]
)
if self._publish_point_cloud:
set_targets(
prim=stage.get_current_stage().GetPrimAtPath(f"{graph_path}/isaac_read_lidar_point_cloud"),
attribute="inputs:lidarPrim",
target_prim_paths=[self._lidar_prim_path]
)
# Run the ROS Lidar graph once to generate ROS publishers in SDGPipeline
og.Controller.evaluate_sync(graph)
# Also initialize the Super class with updated prim path (only lidar graph path)
super().initialize(graph_path)
def laser_scan_topic(self) -> str:
"""
Returns:
(str) Lidar laser scan topic name if exists, else empty string
"""
return f"{self._namespace}{self._base_topic}/scan" if self._publish_scan else ""
def camera_labels_topic(self) -> str:
"""
Returns:
(str) Lidar point cloud topic name if exists, else empty string
"""
return f"{self._namespace}{self._base_topic}/point_cloud" if self._publish_point_cloud else "" | 7,872 | Python | 45.863095 | 179 | 0.607342 |
superboySB/SBDrone_deprecated/extensions/pegasus.simulator/pegasus/simulator/logic/sensors/camera.py | """
| File: camera.py
| License: BSD-3-Clause. Copyright (c) 2023, Micah Nye. All rights reserved.
| Description: Creates or connects to a Camera prim for higher level functionality
"""
__all__ = ["Camera"]
import carb
from omni.isaac.core.utils.prims import is_prim_path_valid
from omni.isaac.sensor import Camera as CameraPrim
from pegasus.simulator.logic.state import State
from pegasus.simulator.logic.sensors import Sensor
from pegasus.simulator.logic.vehicles import Vehicle
import numpy as np
class Camera(Sensor):
"""The class that implements the Camera sensor. This class inherits the base class Sensor.
"""
def __init__(self, camera_prim_path: str, config: dict = {}):
"""Initialize the Camera class
Args:
camera_prim_path (str): Path to the camera prim. Global path when it starts with `/`, else local to vehicle prim path
config (dict): A Dictionary that contains all the parameters for configuring the Camera - it can be empty or only have some of the parameters used by the Camera.
Examples:
The dictionary default parameters are
>>> {"position": [0.0, 0.0, 0.0], # Meters
>>> "orientation": [0.0, 0.0, 0.0, 1.0], # Quaternion [qx, qy, qz, qw]
>>> "focal_length": 24.0, # Millimeters
>>> "focus_distance", 400.0, # Stage units
>>> "resolution": [640, 480], # Pixels
>>> "set_projection_type": "pinhole", # pinhole, fisheyeOrthographic, fisheyeEquidistant, fisheyeEquisolid, fisheyePolynomial, fisheyeSpherical
>>> "update_rate": 30.0, # Hz
>>> "overwrite_params": False} # Overwrite params if the camera prim already exists
"""
# Initialize the Super class "object" attribute
# update_rate not necessary
super().__init__(sensor_type="Camera", update_rate=config.get("update_rate", 30.0))
# Save the id of the sensor
self._camera_prim_path = camera_prim_path
self._frame_id = camera_prim_path.rpartition("/")[-1] # frame_id of the camera is the last prim path part after `/`
# Reference to the actual camera object. This is set when the camera is initialized
self.camera = None
# Get the position of the camera relative to the vehicle
self._position = np.array(config.get("position", [0.0, 0.0, 0.0]))
self._orientation = np.array(config.get("orientation", [0.0, 0.0, 0.0, 1.0])) # Quaternion [qx, qy, qz, qw]
# Get the camera parameters
self._focal_length = config.get("focal_length", 24.0)
self._focus_distance = config.get("focus_distance", 400.0)
self._clipping_range = config.get("clipping_range", [0.05, 1000000.0])
self._resolution = config.get("resolution", [640, 480])
self._set_projection_type = config.get("set_projection_type", "pinhole")
self._horizonal_aperture = config.get("horizontal_aperture", 20.9550)
self._vertical_aperture = config.get("vertical_aperture", 15.2908)
self._overwrite = config.get("overwrite_params", False)
# Save the current state of the camera sensor
self._state = {
"frame_id": self._frame_id
}
def initialize(self, vehicle: Vehicle):
"""Method that initializes the action graph of the camera. It also initalizes the sensor latitude, longitude and
altitude attributes as well as the vehicle that the sensor is attached to.
Args:
vehicle (Vehicle): The vehicle that this sensor is attached to.
"""
# Set the prim path for the camera
if self._camera_prim_path[0] != '/':
self._camera_prim_path = f"{vehicle.prim_path}/{self._camera_prim_path}"
else:
self._camera_prim_path = self._camera_prim_path
# Create camera prim
if not is_prim_path_valid(self._camera_prim_path) or self._overwrite:
self.camera = CameraPrim(
prim_path=self._camera_prim_path,
frequency=self._update_rate,
resolution=self._resolution,
translation=np.array(self._position),
orientation=[self._orientation[3], self._orientation[0], self._orientation[1], self._orientation[2]]
)
# Set camera parameters
self.camera.set_focal_length(self._focal_length)
self.camera.set_focus_distance(self._focus_distance)
self.camera.set_clipping_range(self._clipping_range[0], self._clipping_range[1])
self.camera.set_projection_type(self._set_projection_type)
self.camera.set_horizontal_aperture(self._horizonal_aperture)
self.camera.set_vertical_aperture(self._vertical_aperture)
else:
self.camera = CameraPrim(
prim_path=self._camera_prim_path,
frequency=self._update_rate,
resolution=self._resolution
)
# Set the sensor's frame path
self.frame_path = self._camera_prim_path
@property
def state(self):
"""
(dict) The 'state' of the sensor, i.e. the data produced by the sensor at any given point in time
"""
return self._state
@Sensor.update_at_rate
def update(self, state: State, dt: float):
"""
Args:
state (State): The current state of the vehicle. UNUSED IN THIS SENSOR
dt (float): The time elapsed between the previous and current function calls (s). UNUSED IN THIS SENSOR
Returns:
None
"""
return None | 5,720 | Python | 44.404762 | 173 | 0.61049 |
superboySB/SBDrone_deprecated/extensions/pegasus.simulator/pegasus/simulator/logic/sensors/lidar.py | """
| File: lidar.py
| License: BSD-3-Clause. Copyright (c) 2023, Micah Nye. All rights reserved.
| Description: Creates a lidar sensor
"""
__all__ = ["Lidar"]
from omni.usd import get_context
from omni.isaac.range_sensor import _range_sensor
import omni.isaac.RangeSensorSchema as RangeSensorSchema
from pxr import Sdf, Gf
from pegasus.simulator.logic.state import State
from pegasus.simulator.logic.sensors import Sensor
from pegasus.simulator.logic.vehicles import Vehicle
import numpy as np
class Lidar(Sensor):
"""The class that implements the Lidar sensor. This class inherits the base class Sensor.
"""
def __init__(self, prim_path: str, config: dict = {}):
"""Initialize the Camera class
Args:
prim_path (str): Path to the lidar prim. Global path when it starts with `/`, else local to vehicle prim path
config (dict): A Dictionary that contains all the parameters for configuring the lidar - it can be empty or only have some of the parameters used by the lidar.
Examples:
The dictionary default parameters are
>>> {"position": [0.0, 0.0, 0.0], # Meters
>>> "yaw_offset": 0.0, # Degrees
>>> "rotation_rate": 20.0, # Hz
>>> "horizontal_fov": 360.0, # Degrees
>>> "horizontal_resolution": 1.0, # Degrees
>>> "vertical_fov": 10.0, # Degrees
>>> "vertical_resolution": 1.0, # Degrees
>>> "min_range": 0.4, # Meters
>>> "max_range": 100.0, # Meters
>>> "high_lod": True, # High level of detail (True - draw all rays, False - draw horizontal rays)
>>> "draw_points": False, # Draw lidar points where they hit an object
>>> "draw_lines": False, # Draw lidar ray lines
>>> "fill_state: False} # Fill state with sensor data
"""
# Initialize the Super class "object" attribute
# update_rate not necessary
super().__init__(sensor_type="Lidar", update_rate=config.get("rotation_rate", 20.0))
# Save the id of the sensor
self._prim_path = prim_path
self._frame_id = prim_path.rpartition("/")[-1] # frame_id of the camera is the last prim path part after `/`
# The extension acquires the LIDAR interface at startup. It will be released during extension shutdown. We
# create a LIDAR prim using our schema, and then we interact with / query that prim using the python API found
# in lidar/bindings
self._li = _range_sensor.acquire_lidar_sensor_interface()
self.lidar = None
# Get the lidar position relative to its parent prim
self._position = np.array(config.get("position", [0.0, 0.0, 0.0]))
# Get the lidar parameters
self._yaw_offset = config.get("yaw_offset", 0.0)
self._rotation_rate = config.get("rotation_rate", 20.0)
self._horizontal_fov = config.get("horizontal_fov", 360.0)
self._horizontal_resolution = config.get("horizontal_resolution", 1.0)
self._vertical_fov = config.get("vertical_fov", 10.0)
self._vertical_resolution = config.get("vertical_resolution", 1.0)
self._min_range = config.get("min_range", 0.4)
self._max_range = config.get("max_range", 100.0)
self._high_lod = config.get("high_lod", True)
self._draw_points = config.get("draw_points", False)
self._draw_lines = config.get("draw_lines", False)
# Save the current state of the range sensor
self._fill_state = config.get("fill_state", False)
if self._fill_state:
self._state = {
"frame_id": self._frame_id,
"depth": None,
"zenith": None,
"azimuth": None
}
else:
self._state = None
def initialize(self, vehicle: Vehicle):
"""Method that initializes the lidar sensor. It also initalizes the sensor latitude, longitude and
altitude attributes as well as the vehicle that the sensor is attached to.
Args:
vehicle (Vehicle): The vehicle that this sensor is attached to.
"""
# Set the prim path for the camera
if self._prim_path[0] != '/':
self._prim_path = f"{vehicle.prim_path}/{self._prim_path}"
else:
self._prim_path = self._prim_path
# create the LIDAR. Before we can set any attributes on our LIDAR, we must first create the prim using our
# LIDAR schema, and then populate it with the parameters we will be manipulating. If you try to manipulate
# a parameter before creating it, you will get a runtime error
stage = get_context().get_stage()
self.lidar = RangeSensorSchema.Lidar.Define(stage, Sdf.Path(self._prim_path))
# Set lidar parameters
self.lidar.AddTranslateOp().Set(Gf.Vec3f(*self._position))
self.lidar.CreateYawOffsetAttr().Set(self._yaw_offset)
self.lidar.CreateRotationRateAttr().Set(self._rotation_rate)
self.lidar.CreateHorizontalFovAttr().Set(self._horizontal_fov)
self.lidar.CreateHorizontalResolutionAttr().Set(self._horizontal_resolution)
self.lidar.CreateVerticalFovAttr().Set(self._vertical_fov)
self.lidar.CreateVerticalResolutionAttr().Set(self._vertical_resolution)
self.lidar.CreateMinRangeAttr().Set(self._min_range)
self.lidar.CreateMaxRangeAttr().Set(self._max_range)
self.lidar.CreateHighLodAttr().Set(self._high_lod)
self.lidar.CreateDrawPointsAttr().Set(self._draw_points)
self.lidar.CreateDrawLinesAttr().Set(self._draw_lines)
# Set the sensor's frame path
self.frame_path = self._prim_path
@property
def state(self):
"""
(dict) The 'state' of the sensor, i.e. the data produced by the sensor at any given point in time
"""
return self._state
@Sensor.update_at_rate
def update(self, state: State, dt: float):
"""
Args:
state (State): The current state of the vehicle.
dt (float): The time elapsed between the previous and current function calls (s).
Returns:
(dict) A dictionary containing the current state of the sensor (the data produced by the sensor) or None
"""
# Add the values to the dictionary and return it
if self._fill_state:
self._state = {
"frame_id": self._frame_id,
"depth": self._li.get_depth_data(self._prim_path),
"zenith": self._li.get_zenith_data(self._prim_path),
"azimuth": self._li.get_azimuth_data(self._prim_path),
}
return self._state
| 6,991 | Python | 45.613333 | 171 | 0.595766 |
superboySB/SBDrone_deprecated/extensions/pegasus.simulator/pegasus/simulator/logic/sensors/magnetometer.py | """
| Author: Marcelo Jacinto ([email protected])
| License: BSD-3-Clause. Copyright (c) 2023, Marcelo Jacinto. All rights reserved.
| Description: Simulates a magnetometer. Based on the original implementation provided in PX4 stil_gazebo (https://github.com/PX4/PX4-SITL_gazebo) by Elia Tarasov
"""
__all__ = ["Magnetometer"]
import numpy as np
from scipy.spatial.transform import Rotation
from pegasus.simulator.logic.state import State
from pegasus.simulator.logic.sensors import Sensor
from pegasus.simulator.logic.rotations import rot_ENU_to_NED, rot_FLU_to_FRD
from pegasus.simulator.logic.sensors.geo_mag_utils import (
get_mag_declination,
get_mag_inclination,
get_mag_strength,
reprojection,
)
class Magnetometer(Sensor):
"""The class that implements a magnetometer sensor. This class inherits the base class Sensor.
"""
def __init__(self, config={}):
"""Initialize the Magnetometer class
Args:
config (dict): A Dictionary that contains all the parameters for configuring the Magnetometer - it can be empty or only have some of the parameters used by the Magnetometer.
Examples:
The dictionary default parameters are
>>> {"noise_density": 0.4e-3, # gauss / sqrt(hz)
>>> "random_walk": 6.4e-6, # gauss * sqrt(hz)
>>> "bias_correlation_time": 6.0e2, # s
>>> "update_rate": 250.0} # Hz
"""
# Initialize the Super class "object" attributes
super().__init__(sensor_type="Magnetometer", update_rate=config.get("update_rate", 250.0))
# Set the noise parameters
self._bias: np.ndarray = np.array([0.0, 0.0, 0.0])
self._noise_density = config.get("noise_density", 0.4e-3) # gauss / sqrt(hz)
self._random_walk = config.get("random_walk", 6.4e-6) # gauss * sqrt(hz)
self._bias_correlation_time = config.get("bias_correlation_time", 6.0e2) # s
# Initial state measured by the Magnetometer
self._state = {"magnetic_field": np.zeros((3,))}
@property
def state(self):
"""
(dict) The 'state' of the sensor, i.e. the data produced by the sensor at any given point in time
"""
return self._state
@Sensor.update_at_rate
def update(self, state: State, dt: float):
"""Method that implements the logic of a magnetometer. In this method we start by computing the projection
of the vehicle body frame such in the elipsoidal model of the earth in order to get its current latitude and
longitude. From here the declination and inclination are computed and used to get the strength of the magnetic
field, expressed in the inertial frame of reference (in ENU convention). This magnetic field is then rotated
to the body frame such that it becomes expressed in a FRD body frame relative to a NED inertial reference frame.
(The convention adopted by PX4). Random noise and bias are added to this magnetic field.
Args:
state (State): The current state of the vehicle.
dt (float): The time elapsed between the previous and current function calls (s).
Returns:
(dict) A dictionary containing the current state of the sensor (the data produced by the sensor)
"""
# Get the latitude and longitude from the current state
latitude, longitude = reprojection(state.position, np.radians(self._origin_lat), np.radians(self._origin_lon))
# Magnetic declination and inclination (radians)
declination_rad: float = np.radians(get_mag_declination(np.degrees(latitude), np.degrees(longitude)))
inclination_rad: float = np.radians(get_mag_inclination(np.degrees(latitude), np.degrees(longitude)))
# Compute the magnetic strength (10^5xnanoTesla)
strength_ga: float = 0.01 * get_mag_strength(np.degrees(latitude), np.degrees(longitude))
# Compute the Magnetic filed components according to: http://geomag.nrcan.gc.ca/mag_fld/comp-en.php
H: float = strength_ga * np.cos(inclination_rad)
Z: float = np.tan(inclination_rad) * H
X: float = H * np.cos(declination_rad)
Y: float = H * np.sin(declination_rad)
# Magnetic field of a body following a front-left-up (FLU) convention expressed in a East-North-Up (ENU) inertial frame
magnetic_field_inertial: np.ndarray = np.array([X, Y, Z])
# Rotate the magnetic field vector such that it expresses a field of a body frame according to the front-right-down (FRD)
# expressed in a North-East-Down (NED) inertial frame (the standard used in magnetometer units)
attitude_flu_enu = Rotation.from_quat(state.attitude)
# Rotate the magnetic field from the inertial frame to the body frame of reference according to the FLU frame convention
rot_body_to_world = rot_ENU_to_NED * attitude_flu_enu * rot_FLU_to_FRD.inv()
# The magnetic field expressed in the body frame according to the front-right-down (FRD) convention
magnetic_field_body = rot_body_to_world.inv().apply(magnetic_field_inertial)
# -------------------------------
# Add noise to the magnetic field
# -------------------------------
tau = self._bias_correlation_time
# Discrete-time standard deviation equivalent to an "integrating" sampler with integration time dt.
sigma_d: float = 1 / np.sqrt(dt) * self._noise_density
sigma_b: float = self._random_walk
# Compute exact covariance of the process after dt [Maybeck 4-114].
sigma_b_d: float = np.sqrt(-sigma_b * sigma_b * tau / 2.0 * (np.exp(-2.0 * dt / tau) - 1.0))
# Compute state-transition.
phi_d: float = np.exp(-1.0 / tau * dt)
# Add the noise to the magnetic field
magnetic_field_noisy: np.ndarray = np.zeros((3,))
for i in range(3):
self._bias[i] = phi_d * self._bias[i] + sigma_b_d * np.random.randn()
magnetic_field_noisy[i] = magnetic_field_body[i] + sigma_d * np.random.randn() + self._bias[i]
# Add the values to the dictionary and return it
self._state = {"magnetic_field": [magnetic_field_noisy[0], magnetic_field_noisy[1], magnetic_field_noisy[2]]}
return self._state
| 6,384 | Python | 48.115384 | 185 | 0.649593 |
superboySB/SBDrone_deprecated/extensions/pegasus.simulator/pegasus/simulator/logic/sensors/geo_mag_utils.py | """
| File: geo_mag_utils.py
| Description: Provides utilities for computing latitude, longitude, and magnetic strength
given the position of the vehicle in the simulated world. These computations and table constants are in agreement
with the PX4 stil_gazebo implementation (https://github.com/PX4/PX4-SITL_gazebo). Therefore, PX4 should behave similarly
to a gazebo-based simulation.
"""
import numpy as np
# Declare which functions are visible from this file
__all__ = ["get_mag_declination", "get_mag_inclination", "get_mag_strength", "reprojection", "GRAVITY_VECTOR"]
# --------------------------------------------------------------------
# Magnetic field data from WMM2018 (10^5xnanoTesla (N, E D) n-frame )
# --------------------------------------------------------------------
# Declination data in degrees
DECLINATION_TABLE = [
[ 47,46,45,43,42,41,39,37,33,29,23,16,10,4,-1,-6,-10,-15,-20,-27,-34,-42,-49,-56,-62,-67,-72,-74,-75,-73,-61,-22,26,42,47,48,47 ],
[ 31,31,31,30,30,30,30,29,27,24,18,11,3,-4,-9,-13,-15,-18,-21,-27,-33,-40,-47,-52,-56,-57,-56,-52,-44,-30,-14,2,14,22,27,30,31 ],
[ 22,23,23,23,22,22,22,23,22,19,13,5,-4,-12,-17,-20,-22,-22,-23,-25,-30,-36,-41,-45,-46,-44,-39,-31,-21,-11,-3,4,10,15,19,21,22 ],
[ 17,17,17,18,17,17,17,17,16,13,8,-1,-10,-18,-22,-25,-26,-25,-22,-20,-21,-25,-29,-32,-31,-28,-23,-16,-9,-3,0,4,7,11,14,16,17 ],
[ 13,13,14,14,14,13,13,12,11,9,3,-5,-14,-20,-24,-25,-24,-21,-17,-12,-9,-11,-14,-17,-18,-16,-12,-8,-3,-0,1,3,6,8,11,12,13 ],
[ 11,11,11,11,11,10,10,10,9,6,-0,-8,-15,-21,-23,-22,-19,-15,-10,-5,-2,-2,-4,-7,-9,-8,-7,-4,-1,1,1,2,4,7,9,10,11 ],
[ 10,9,9,9,9,9,9,8,7,3,-3,-10,-16,-20,-20,-18,-14,-9,-5,-2,1,2,0,-2,-4,-4,-3,-2,-0,0,0,1,3,5,7,9,10 ],
[ 9,9,9,9,9,9,9,8,6,1,-4,-11,-16,-18,-17,-14,-10,-5,-2,-0,2,3,2,0,-1,-2,-2,-1,-0,-1,-1,-1,1,3,6,8,9 ],
[ 8,9,9,10,10,10,10,8,5,0,-6,-12,-15,-16,-15,-11,-7,-4,-1,1,3,4,3,2,1,0,-0,-0,-1,-2,-3,-4,-2,0,3,6,8 ],
[ 7,9,10,11,12,12,12,9,5,-1,-7,-13,-15,-15,-13,-10,-6,-3,0,2,3,4,4,4,3,2,1,0,-1,-3,-5,-6,-6,-3,0,4,7 ],
[ 5,8,11,13,14,15,14,11,5,-2,-9,-15,-17,-16,-13,-10,-6,-3,0,3,4,5,6,6,6,5,4,2,-1,-5,-8,-9,-9,-6,-3,1,5 ],
[ 3,8,11,15,17,17,16,12,5,-4,-12,-18,-19,-18,-16,-12,-8,-4,-0,3,5,7,9,10,10,9,7,4,-1,-6,-10,-12,-12,-9,-5,-1,3 ],
[ 3,8,12,16,19,20,18,13,4,-8,-18,-24,-25,-23,-20,-16,-11,-6,-1,3,7,11,14,16,17,17,14,8,-0,-8,-13,-15,-14,-11,-7,-2,3 ]]
# Inclination data in degrees
INCLINATION_TABLE = [
[ -78,-76,-74,-72,-70,-68,-65,-63,-60,-57,-55,-54,-54,-55,-56,-57,-58,-59,-59,-59,-59,-60,-61,-63,-66,-69,-73,-76,-79,-83,-86,-87,-86,-84,-82,-80,-78 ],
[ -72,-70,-68,-66,-64,-62,-60,-57,-54,-51,-49,-48,-49,-51,-55,-58,-60,-61,-61,-61,-60,-60,-61,-63,-66,-69,-72,-76,-78,-80,-81,-80,-79,-77,-76,-74,-72 ],
[ -64,-62,-60,-59,-57,-55,-53,-50,-47,-44,-41,-41,-43,-47,-53,-58,-62,-65,-66,-65,-63,-62,-61,-63,-65,-68,-71,-73,-74,-74,-73,-72,-71,-70,-68,-66,-64 ],
[ -55,-53,-51,-49,-46,-44,-42,-40,-37,-33,-30,-30,-34,-41,-48,-55,-60,-65,-67,-68,-66,-63,-61,-61,-62,-64,-65,-66,-66,-65,-64,-63,-62,-61,-59,-57,-55 ],
[ -42,-40,-37,-35,-33,-30,-28,-25,-22,-18,-15,-16,-22,-31,-40,-48,-55,-59,-62,-63,-61,-58,-55,-53,-53,-54,-55,-55,-54,-53,-51,-51,-50,-49,-47,-45,-42 ],
[ -25,-22,-20,-17,-15,-12,-10,-7,-3,1,3,2,-5,-16,-27,-37,-44,-48,-50,-50,-48,-44,-41,-38,-38,-38,-39,-39,-38,-37,-36,-35,-35,-34,-31,-28,-25 ],
[ -5,-2,1,3,5,8,10,13,16,20,21,19,12,2,-10,-20,-27,-30,-30,-29,-27,-23,-19,-17,-17,-17,-18,-18,-17,-16,-16,-16,-16,-15,-12,-9,-5 ],
[ 15,18,21,22,24,26,29,31,34,36,37,34,28,20,10,2,-3,-5,-5,-4,-2,2,5,7,8,7,7,6,7,7,7,6,5,6,8,11,15 ],
[ 31,34,36,38,39,41,43,46,48,49,49,46,42,36,29,24,20,19,20,21,23,25,28,30,30,30,29,29,29,29,28,27,25,25,26,28,31 ],
[ 43,45,47,49,51,53,55,57,58,59,59,56,53,49,45,42,40,40,40,41,43,44,46,47,47,47,47,47,47,47,46,44,42,41,40,42,43 ],
[ 53,54,56,57,59,61,64,66,67,68,67,65,62,60,57,55,55,54,55,56,57,58,59,59,60,60,60,60,60,60,59,57,55,53,52,52,53 ],
[ 62,63,64,65,67,69,71,73,75,75,74,73,70,68,67,66,65,65,65,66,66,67,68,68,69,70,70,71,71,70,69,67,65,63,62,62,62 ],
[ 71,71,72,73,75,77,78,80,81,81,80,79,77,76,74,73,73,73,73,73,73,74,74,75,76,77,78,78,78,78,77,75,73,72,71,71,71 ]]
# Strength data in centi-Tesla
STRENGTH_TABLE = [
[ 62,60,58,56,54,52,49,46,43,41,38,36,34,32,31,31,30,30,30,31,33,35,38,42,46,51,55,59,62,64,66,67,67,66,65,64,62 ],
[ 59,56,54,52,50,47,44,41,38,35,32,29,28,27,26,26,26,25,25,26,28,30,34,39,44,49,54,58,61,64,65,66,65,64,63,61,59 ],
[ 54,52,49,47,45,42,40,37,34,30,27,25,24,24,24,24,24,24,24,24,25,28,32,37,42,48,52,56,59,61,62,62,62,60,59,56,54 ],
[ 49,47,44,42,40,37,35,33,30,28,25,23,22,23,23,24,25,25,26,26,26,28,31,36,41,46,51,54,56,57,57,57,56,55,53,51,49 ],
[ 43,41,39,37,35,33,32,30,28,26,25,23,23,23,24,25,26,28,29,29,29,30,32,36,40,44,48,51,52,52,51,51,50,49,47,45,43 ],
[ 38,36,35,33,32,31,30,29,28,27,26,25,24,24,25,26,28,30,31,32,32,32,33,35,38,42,44,46,47,46,45,45,44,43,41,40,38 ],
[ 34,33,32,32,31,31,31,30,30,30,29,28,27,27,27,28,29,31,32,33,33,33,34,35,37,39,41,42,43,42,41,40,39,38,36,35,34 ],
[ 33,33,32,32,33,33,34,34,35,35,34,33,32,31,30,30,31,32,33,34,35,35,36,37,38,40,41,42,42,41,40,39,37,36,34,33,33 ],
[ 34,34,34,35,36,37,39,40,41,41,40,39,37,35,35,34,35,35,36,37,38,39,40,41,42,43,44,45,45,45,43,41,39,37,35,34,34 ],
[ 37,37,38,39,41,42,44,46,47,47,46,45,43,41,40,39,39,40,41,41,42,43,45,46,47,48,49,50,50,50,48,46,43,41,39,38,37 ],
[ 42,42,43,44,46,48,50,52,53,53,52,51,49,47,45,45,44,44,45,46,46,47,48,50,51,53,54,55,56,55,54,52,49,46,44,43,42 ],
[ 48,48,49,50,52,53,55,56,57,57,56,55,53,51,50,49,48,48,48,49,49,50,51,53,55,56,58,59,60,60,58,56,54,52,50,49,48 ],
[ 54,54,54,55,56,57,58,58,59,58,58,57,56,54,53,52,51,51,51,51,52,53,54,55,57,58,60,61,62,61,61,59,58,56,55,54,54 ]]
SAMPLING_RES = 10.0
SAMPLING_MIN_LAT = -60 # deg
SAMPLING_MAX_LAT = 60 # deg
SAMPLING_MIN_LON = -180 # deg
SAMPLING_MAX_LON = 180 # deg
EARTH_RADIUS = 6353000.0 # meters
# Gravity vector expressed in ENU
GRAVITY_VECTOR = np.array([0.0, 0.0, -9.80665]) # m/s^2
def get_lookup_table_index(val: int, min: int, max: int):
# for the rare case of hitting the bounds exactly
# the rounding logic wouldn't fit, so enforce it.
# limit to table bounds - required for maxima even when table spans full globe range
# limit to (table bounds - 1) because bilinear interpolation requires checking (index + 1)
val = np.clip(val, min, max - SAMPLING_RES)
return int((-min + val) / SAMPLING_RES)
def get_table_data(lat: float, lon: float, table):
# If the values exceed valid ranges, return zero as default
# as we have no way of knowing what the closest real value
# would be.
if lat < -90.0 or lat > 90.0 or lon < -180.0 or lon > 180.0:
return 0.0
# round down to nearest sampling resolution
min_lat = int(lat / SAMPLING_RES) * SAMPLING_RES
min_lon = int(lon / SAMPLING_RES) * SAMPLING_RES
# find index of nearest low sampling point
min_lat_index = get_lookup_table_index(min_lat, SAMPLING_MIN_LAT, SAMPLING_MAX_LAT)
min_lon_index = get_lookup_table_index(min_lon, SAMPLING_MIN_LON, SAMPLING_MAX_LON)
data_sw = table[min_lat_index][min_lon_index]
data_se = table[min_lat_index][min_lon_index + 1]
data_ne = table[min_lat_index + 1][min_lon_index + 1]
data_nw = table[min_lat_index + 1][min_lon_index]
# perform bilinear interpolation on the four grid corners
lat_scale = np.clip((lat - min_lat) / SAMPLING_RES, 0.0, 1.0)
lon_scale = np.clip((lon - min_lon) / SAMPLING_RES, 0.0, 1.0)
data_min = lon_scale * (data_se - data_sw) + data_sw
data_max = lon_scale * (data_ne - data_nw) + data_nw
return lat_scale * (data_max - data_min) + data_min
def get_mag_declination(latitude: float, longitude: float):
return get_table_data(latitude, longitude, DECLINATION_TABLE)
def get_mag_inclination(latitude: float, longitude: float):
return get_table_data(latitude, longitude, INCLINATION_TABLE)
def get_mag_strength(latitude: float, longitude: float):
return get_table_data(latitude, longitude, STRENGTH_TABLE)
def reprojection(position: np.ndarray, origin_lat=-999, origin_long=-999):
"""
Compute the latitude and longitude coordinates from a local position
"""
# reproject local position to gps coordinates
x_rad: float = position[1] / EARTH_RADIUS # north
y_rad: float = position[0] / EARTH_RADIUS # east
c: float = np.sqrt(x_rad * x_rad + y_rad * y_rad)
sin_c: float = np.sin(c)
cos_c: float = np.cos(c)
if c != 0.0:
latitude_rad = np.arcsin(cos_c * np.sin(origin_lat) + (x_rad * sin_c * np.cos(origin_lat)) / c)
longitude_rad = origin_long + np.arctan2(y_rad * sin_c, c * np.cos(origin_lat) * cos_c - x_rad * np.sin(origin_lat) * sin_c)
else:
latitude_rad = origin_lat
longitude_rad = origin_long
return latitude_rad, longitude_rad
| 8,992 | Python | 58.953333 | 156 | 0.590747 |
superboySB/SBDrone_deprecated/extensions/pegasus.simulator/pegasus/simulator/logic/sensors/__init__.py | """
| Author: Marcelo Jacinto ([email protected])
| License: BSD-3-Clause. Copyright (c) 2023, Marcelo Jacinto. All rights reserved.
"""
from .sensor import Sensor
from .barometer import Barometer
from .gps import GPS
from .imu import IMU
from .magnetometer import Magnetometer
from .vision import Vision
from .camera import Camera
from .lidar import Lidar | 374 | Python | 27.846152 | 82 | 0.780749 |
superboySB/SBDrone_deprecated/extensions/pegasus.simulator/pegasus/simulator/logic/sensors/sensor.py | """
| Author: Marcelo Jacinto ([email protected])
| License: BSD-3-Clause. Copyright (c) 2023, Marcelo Jacinto. All rights reserved.
| Description: Definition of the Sensor class which is used as the base for all the sensors.
"""
__all__ = ["Sensor"]
from pegasus.simulator.logic.state import State
class Sensor:
"""The base class for implementing a sensor
Attributes:
update_period (float): The period for each sensor update: update_period = 1 / update_rate (in s).
origin_lat (float): The latitude of the origin of the world in degrees (might get used by some sensors).
origin_lon (float): The longitude of the origin of the world in degrees (might get used by some sensors).
origin_alt (float): The altitude of the origin of the world relative to sea water level (might get used by some sensors)
"""
def __init__(self, sensor_type: str, update_rate: float):
"""Initialize the Sensor class
Args:
sensor_type (str): A name that describes the type of sensor
update_rate (float): The rate at which the data in the sensor should be refreshed (in Hz)
"""
# Set the sensor type and update rate
self._sensor_type = sensor_type
self._update_rate = update_rate
self._update_period = 1.0 / self._update_rate
# Auxiliar variables used to control whether to update the sensor or not given the time elapsed
self._first_update = True
self._total_time = 0.0
# Set the "configuration of the world" - some sensors might need it
self._origin_lat = -999
self._origin_lon = -999
self._origin_alt = 0.0
# Path to a prim describing the sensor's frame
self.frame_path = ""
def initialize(self, origin_lat, origin_lon, origin_alt):
"""Method that initializes the sensor latitude, longitude and altitude attributes.
Note:
Given that some sensors require the knowledge of the latitude, longitude and altitude of the [0, 0, 0] coordinate
of the world, then we might as well just save this information for whatever sensor that comes
Args:
origin_lat (float): The latitude of the origin of the world in degrees (might get used by some sensors).
origin_lon (float): The longitude of the origin of the world in degrees (might get used by some sensors).
origin_alt (float): The altitude of the origin of the world relative to sea water level (might get used by some sensors).
"""
self._origin_lat = origin_lat
self._origin_lon = origin_lon
self._origin_alt = origin_alt
def set_update_rate(self, update_rate: float):
"""Method that changes the update rate and period of the sensor
Args:
update_rate (float): The new rate at which the data in the sensor should be refreshed (in Hz)
"""
self._update_rate = update_rate
self._update_period = 1.0 / self._update_rate
def update_at_rate(fnc):
"""Decorator function used to check if the time elapsed between the last sensor update call and the current
sensor update call is higher than the defined update_rate of the sensor. If so, we need to actually compute new
values to simulate a measurement of the sensor at a given rate.
Args:
fnc (function): The function that we want to enforce a specific update rate.
Examples:
>>> class GPS(Sensor):
>>> @Sensor.update_at_rate
>>> def update(self):
>>> (do some logic here)
Returns:
[None, Dict]: This decorator function returns None if there was no data to be produced by the sensor at the
specified timestamp or a dict with the current state of the sensor otherwise.
"""
#
# Define a wrapper function so that the "self" of the object can be passed to the function as well
def wrapper(self, state: State, dt: float):
# Add the total time passed between the last time the sensor was updated and the current call
self._total_time += dt
# If it is time to update the sensor data, then just call the update function of the sensor
if self._total_time >= self._update_period or self._first_update:
# Result of the update function for the sensor
result = fnc(self, state, self._total_time)
# Reset the auxiliar counter variables
self._first_update = False
self._total_time = 0.0
return result
return None
return wrapper
@property
def sensor_type(self):
"""
(str) A name that describes the type of sensor.
"""
return self._sensor_type
@property
def update_rate(self):
"""
(float) The rate at which the data in the sensor should be refreshed (in Hz).
"""
return self._update_rate
@property
def state(self):
"""
(dict) A dictionary which contains the data produced by the sensor at any given time.
"""
return None
@property
def frame_path(self):
"""
(str) Path to the sensor's frame
"""
return self._frame_path
@frame_path.setter
def frame_path(self, value):
self._frame_path = value
def update(self, state: State, dt: float):
"""Method that should be implemented by the class that inherits Sensor. This is where the actual implementation
of the sensor should be performed.
Args:
state (State): The current state of the vehicle.
dt (float): The time elapsed between the previous and current function calls (s).
Returns:
(dict) A dictionary containing the current state of the sensor (the data produced by the sensor)
"""
pass
def config_from_dict(self, config_dict):
"""Method that should be implemented by the class that inherits Sensor. This is where the configuration of the
sensor based on a dictionary input should be performed.
Args:
config_dict (dict): A dictionary containing the configurations of the sensor
"""
pass
| 6,398 | Python | 38.257668 | 133 | 0.623163 |
superboySB/SBDrone_deprecated/extensions/pegasus.simulator/pegasus/simulator/logic/sensors/barometer.py | """
| File: barometer.py
| Author: Marcelo Jacinto ([email protected])
| License: BSD-3-Clause. Copyright (c) 2023, Marcelo Jacinto. All rights reserved.
| Description: Simulates a barometer. Based on the implementation provided in PX4 stil_gazebo (https://github.com/PX4/PX4-SITL_gazebo) by Elia Tarasov.
| References: Both the original implementation provided in the gazebo based simulation and this one are based on the following article - 'A brief summary of atmospheric modeling', Cavcar, M., http://fisicaatmo.at.fcen.uba.ar/practicas/ISAweb.pdf
"""
__all__ = ["Barometer"]
import numpy as np
from pegasus.simulator.logic.state import State
from pegasus.simulator.logic.sensors import Sensor
from pegasus.simulator.logic.sensors.geo_mag_utils import GRAVITY_VECTOR
DEFAULT_HOME_ALT_AMSL = 488.0
class Barometer(Sensor):
"""The class that implements a barometer sensor. This class inherits the base class Sensor.
"""
def __init__(self, config={}):
"""Initialize the Barometer class
Args:
config (dict): A Dictionary that contains all the parameters for configuring the Barometer - it can be empty or only have some of the parameters used by the Barometer.
Examples:
The dictionary default parameters are
>>> {"temperature_msl": 288.15, # temperature at MSL [K] (15 [C])
>>> "pressure_msl": 101325.0, # pressure at MSL [Pa]
>>> "lapse_rate": 0.0065, # reduction in temperature with altitude for troposphere [K/m]
>>> "air_density_msl": 1.225, # air density at MSL [kg/m^3]
>>> "absolute_zero": -273.15, # [C]
>>> "drift_pa_per_sec": 0.0, # Pa
>>> "update_rate": 250.0} # Hz
"""
# Initialize the Super class "object" attributes
super().__init__(sensor_type="Barometer", update_rate=config.get("update_rate", 250.0))
self._z_start: float = None
# Setup the default home altitude (aka the altitude at the [0.0, 0.0, 0.0] coordinate on the simulated world)
# If desired, the user can override this default by calling the initialize() method defined inside the Sensor
# implementation
self._origin_alt = DEFAULT_HOME_ALT_AMSL
# Define the constants for the barometer
# International standard atmosphere (troposphere model - valid up to 11km) see [1]
self._TEMPERATURE_MSL: float = config.get("temperature_msl", 288.15) # temperature at MSL [K] (15 [C])
self._PRESSURE_MSL: float = config.get("pressure_msl", 101325.0) # pressure at MSL [Pa]
self._LAPSE_RATE: float = config.get(
"lapse_rate", 0.0065
) # reduction in temperature with altitude for troposphere [K/m]
self._AIR_DENSITY_MSL: float = config.get("air_density_msl", 1.225) # air density at MSL [kg/m^3]
self._ABSOLUTE_ZERO_C: float = config.get("absolute_zero", -273.15) # [C]
# Set the drift for the sensor
self._baro_drift_pa_per_sec: float = config.get("drift_pa_per_sec", 0.0)
# Auxiliar variables for generating the noise
self._baro_rnd_use_last: bool = False
self._baro_rnd_y2: float = 0.0
self._baro_drift_pa: float = 0.0
# Save the current state measured by the Baramoter
self._state = {"absolute_pressure": 0.0, "pressure_altitude": 0.0, "temperature": 0.0}
@property
def state(self):
"""
(dict) The 'state' of the sensor, i.e. the data produced by the sensor at any given point in time
"""
return self._state
@Sensor.update_at_rate
def update(self, state: State, dt: float):
"""Method that implements the logic of a barometer. In this method we compute the relative altitude of the vehicle
relative to the origin's altitude. Aditionally, we compute the actual altitude of the vehicle, local temperature and
absolute presure, based on the reference - [A brief summary of atmospheric modeling, Cavcar, M., http://fisicaatmo.at.fcen.uba.ar/practicas/ISAweb.pdf]
Args:
state (State): The current state of the vehicle.
dt (float): The time elapsed between the previous and current function calls (s).
Returns:
(dict) A dictionary containing the current state of the sensor (the data produced by the sensor)
"""
# Set the initial altitude if not yet defined
if self._z_start is None:
self._z_start = state.position[2]
# Compute the temperature at the current altitude
alt_rel: float = state.position[2] - self._z_start
alt_amsl: float = self._origin_alt + alt_rel
temperature_local: float = self._TEMPERATURE_MSL - self._LAPSE_RATE * alt_amsl
# Compute the absolute pressure at local temperature
pressure_ratio: float = np.power(self._TEMPERATURE_MSL / temperature_local, 5.2561)
absolute_pressure: float = self._PRESSURE_MSL / pressure_ratio
# Generate a Gaussian noise sequence using polar form of Box-Muller transformation
# Honestly, this is overkill and will get replaced by numpys random.randn.
if not self._baro_rnd_use_last:
w: float = 1.0
while w >= 1.0:
x1: float = 2.0 * np.random.randn() - 1.0
x2: float = 2.0 * np.random.randn() - 1.0
w = (x1 * x1) + (x2 * x2)
w = np.sqrt((-2.0 * np.log(w)) / w)
y1: float = x1 * w
self._baro_rnd_y2 = x2 * w
self._baro_rnd_use_last = True
else:
y1: float = self._baro_rnd_y2
self._baro_rnd_use_last = False
# Apply noise and drift
abs_pressure_noise: float = y1 # 1 Pa RMS noise
self._baro_drift_pa = self._baro_drift_pa + (self._baro_drift_pa_per_sec * dt) # Update the drift
absolute_pressure_noisy: float = absolute_pressure + abs_pressure_noise + self._baro_drift_pa_per_sec
# Convert to hPa (Note: 1 hPa = 100 Pa)
absolute_pressure_noisy_hpa: float = absolute_pressure_noisy * 0.01
# Compute air density at local temperature
density_ratio: float = np.power(self._TEMPERATURE_MSL / temperature_local, 4.256)
air_density: float = self._AIR_DENSITY_MSL / density_ratio
# Compute pressure altitude including effect of pressure noise
pressure_altitude: float = alt_amsl - (abs_pressure_noise + self._baro_drift_pa) / (np.linalg.norm(GRAVITY_VECTOR) * air_density)
#pressure_altitude: float = alt_amsl - (abs_pressure_noise) / (np.linalg.norm(GRAVITY_VECTOR) * air_density)
# Compute temperature in celsius
temperature_celsius: float = temperature_local + self._ABSOLUTE_ZERO_C
# Add the values to the dictionary and return it
self._state = {
"absolute_pressure": absolute_pressure_noisy_hpa,
"pressure_altitude": pressure_altitude,
"temperature": temperature_celsius,
}
return self._state
| 7,189 | Python | 46.615894 | 245 | 0.626095 |
superboySB/SBDrone_deprecated/extensions/pegasus.simulator/pegasus/simulator/logic/sensors/gps.py | """
| File: gps.py
| Author: Marcelo Jacinto ([email protected])
| License: BSD-3-Clause. Copyright (c) 2023, Marcelo Jacinto. All rights reserved.
| Description: Simulates a gps. Based on the implementation provided in PX4 stil_gazebo (https://github.com/PX4/PX4-SITL_gazebo) by Amy Wagoner and Nuno Marques
"""
__all__ = ["GPS"]
import numpy as np
from pegasus.simulator.logic.sensors import Sensor
from pegasus.simulator.logic.sensors.geo_mag_utils import reprojection
# TODO - Introduce delay on the GPS data
class GPS(Sensor):
"""The class that implements a GPS sensor. This class inherits the base class Sensor.
"""
def __init__(self, config={}):
"""Initialize the GPS class.
Args:
config (dict): A Dictionary that contains all the parameters for configuring the GPS - it can be empty or only have some of the parameters used by the GPS.
Examples:
The dictionary default parameters are
>>> {"fix_type": 3,
>>> "eph": 1.0,
>>> "epv": 1.0,
>>> "sattelites_visible": 10,
>>> "gps_xy_random_walk": 2.0, # (m/s) / sqrt(hz)
>>> "gps_z_random_walk": 4.0, # (m/s) / sqrt(hz)
>>> "gps_xy_noise_density": 2.0e-4, # (m) / sqrt(hz)
>>> "gps_z_noise_density": 4.0e-4, # (m) / sqrt(hz)
>>> "gps_vxy_noise_density": 0.2, # (m/s) / sqrt(hz)
>>> "gps_vz_noise_density": 0.4, # (m/s) / sqrt(hz)
>>> "gps_correlation_time": 60, # s
>>> "update_rate": 1.0 # Hz
>>> }
"""
# Initialize the Super class "object" attributes
super().__init__(sensor_type="GPS", update_rate=config.get("update_rate", 250.0))
# Define the GPS simulated/fixed values
self._fix_type = config.get("fix_type", 3)
self._eph = config.get("eph", 1.0)
self._epv = config.get("epv", 1.0)
self._sattelites_visible = config.get("sattelites_visible", 10)
# Parameters for GPS random walk
self._random_walk_gps = np.array([0.0, 0.0, 0.0])
self._gps_xy_random_walk = config.get("gps_xy_random_walk", 2.0) # (m/s) / sqrt(hz)
self._gps_z_random_walk = config.get("gps_z_random_walk", 4.0) # (m/s) / sqrt(hz)
# Parameters for the position noise
self._noise_gps_pos = np.array([0.0, 0.0, 0.0])
self._gps_xy_noise_density = config.get("gps_xy_noise_density", 2.0e-4) # (m) / sqrt(hz)
self._gps_z_noise_density = config.get("gps_z_noise_density", 4.0e-4) # (m) / sqrt(hz)
# Parameters for the velocity noise
self._noise_gps_vel = np.array([0.0, 0.0, 0.0])
self._gps_vxy_noise_density = config.get("gps_vxy_noise_density", 0.2) # (m/s) / sqrt(hz)
self._gps_vz_noise_density = config.get("gps_vz_noise_density", 0.4) # (m/s) / sqrt(hz)
# Parameters for the GPS bias
self._gps_bias = np.array([0.0, 0.0, 0.0])
self._gps_correlation_time = config.get("gps_correlation_time", 60)
# Save the current state measured by the GPS (and initialize at the origin)
self._state = {
"latitude": np.radians(self._origin_lat),
"longitude": np.radians(self._origin_lon),
"altitude": self._origin_alt,
"eph": 1.0,
"epv": 1.0,
"speed": 0.0,
"velocity_north": 0.0,
"velocity_east": 0.0,
"velocity_down": 0.0,
# Constant values
"fix_type": self._fix_type,
"eph": self._eph,
"epv": self._epv,
"cog": 0.0,
"sattelites_visible": self._sattelites_visible,
"latitude_gt": np.radians(self._origin_lat),
"longitude_gt": np.radians(self._origin_lon),
"altitude_gt": self._origin_alt,
}
@property
def state(self):
"""
(dict) The 'state' of the sensor, i.e. the data produced by the sensor at any given point in time
"""
return self._state
@Sensor.update_at_rate
def update(self, state: np.ndarray, dt: float):
"""Method that implements the logic of a gps. In this method we start by generating the GPS bias terms which are then
added to the real position of the vehicle, expressed in ENU inertial frame. This position affected by noise
is reprojected in order to obtain the corresponding latitude and longitude. Additionally, to the linear velocity, noise
is added.
Args:
state (State): The current state of the vehicle.
dt (float): The time elapsed between the previous and current function calls (s).
Returns:
(dict) A dictionary containing the current state of the sensor (the data produced by the sensor)
"""
# Update noise parameters
self._random_walk_gps[0] = self._gps_xy_random_walk * np.sqrt(dt) * np.random.randn()
self._random_walk_gps[1] = self._gps_xy_random_walk * np.sqrt(dt) * np.random.randn()
self._random_walk_gps[2] = self._gps_z_random_walk * np.sqrt(dt) * np.random.randn()
self._noise_gps_pos[0] = self._gps_xy_noise_density * np.sqrt(dt) * np.random.randn()
self._noise_gps_pos[1] = self._gps_xy_noise_density * np.sqrt(dt) * np.random.randn()
self._noise_gps_pos[2] = self._gps_z_noise_density * np.sqrt(dt) * np.random.randn()
self._noise_gps_vel[0] = self._gps_vxy_noise_density * np.sqrt(dt) * np.random.randn()
self._noise_gps_vel[1] = self._gps_vxy_noise_density * np.sqrt(dt) * np.random.randn()
self._noise_gps_vel[2] = self._gps_vz_noise_density * np.sqrt(dt) * np.random.randn()
# Perform GPS bias integration (using euler integration -> to be improved)
self._gps_bias[0] = (
self._gps_bias[0] + self._random_walk_gps[0] * dt - self._gps_bias[0] / self._gps_correlation_time
)
self._gps_bias[1] = (
self._gps_bias[1] + self._random_walk_gps[1] * dt - self._gps_bias[1] / self._gps_correlation_time
)
self._gps_bias[2] = (
self._gps_bias[2] + self._random_walk_gps[2] * dt - self._gps_bias[2] / self._gps_correlation_time
)
# reproject position with noise into geographic coordinates
pos_with_noise: np.ndarray = state.position + self._noise_gps_pos + self._gps_bias
latitude, longitude = reprojection(pos_with_noise, np.radians(self._origin_lat), np.radians(self._origin_lon))
# Compute the values of the latitude and longitude without noise (for groundtruth measurements)
latitude_gt, longitude_gt = reprojection(
state.position, np.radians(self._origin_lat), np.radians(self._origin_lon)
)
# Add noise to the velocity expressed in the world frame
velocity: np.ndarray = state.linear_velocity # + self._noise_gps_vel
# Compute the xy speed
speed: float = np.linalg.norm(velocity[:2])
# Course over ground (NOT heading, but direction of movement),
# 0.0..359.99 degrees. If unknown, set to: 65535 [cdeg] (type:uint16_t)
ve = velocity[0]
vn = velocity[1]
cog = np.degrees(np.arctan2(ve, vn))
if cog < 0.0:
cog = cog + 360.0
cog = cog * 100
# Add the values to the dictionary and return it
self._state = {
"latitude": np.degrees(latitude),
"longitude": np.degrees(longitude),
"altitude": state.position[2] + self._origin_alt - self._noise_gps_pos[2] + self._gps_bias[2],
"eph": 1.0,
"epv": 1.0,
"speed": speed,
# Conversion from ENU (standard of Isaac Sim to NED - used in GPS sensors)
"velocity_north": velocity[1],
"velocity_east": velocity[0],
"velocity_down": -velocity[2],
# Constant values
"fix_type": self._fix_type,
"eph": self._eph,
"epv": self._epv,
"cog": 0.0, # cog,
"sattelites_visible": self._sattelites_visible,
"latitude_gt": latitude_gt,
"longitude_gt": longitude_gt,
"altitude_gt": state.position[2] + self._origin_alt,
}
return self._state
| 8,406 | Python | 43.481481 | 167 | 0.571259 |
superboySB/SBDrone_deprecated/extensions/pegasus.simulator/pegasus/simulator/logic/sensors/imu.py | """
| File: imu.py
| Author: Marcelo Jacinto ([email protected])
| License: BSD-3-Clause. Copyright (c) 2023, Marcelo Jacinto. All rights reserved.
| Description: Simulates an imu. Based on the implementation provided in PX4 stil_gazebo (https://github.com/PX4/PX4-SITL_gazebo)
"""
__all__ = ["IMU"]
import numpy as np
from scipy.spatial.transform import Rotation
from pegasus.simulator.logic.state import State
from pegasus.simulator.logic.sensors import Sensor
from pegasus.simulator.logic.rotations import rot_FLU_to_FRD, rot_ENU_to_NED
from pegasus.simulator.logic.sensors.geo_mag_utils import GRAVITY_VECTOR
class IMU(Sensor):
"""The class that implements the IMU sensor. This class inherits the base class Sensor.
"""
def __init__(self, config={}):
"""Initialize the IMU class
Args:
config (dict): A Dictionary that contains all teh parameters for configuring the IMU - it can be empty or only have some of the parameters used by the IMU.
Examples:
The dictionary default parameters are
>>> {"gyroscope": {
>>> "noise_density": 2.0 * 35.0 / 3600.0 / 180.0 * pi,
>>> "random_walk": 2.0 * 4.0 / 3600.0 / 180.0 * pi,
>>> "bias_correlation_time": 1.0e3,
>>> "turn_on_bias_sigma": 0.5 / 180.0 * pi},
>>> "accelerometer": {
>>> "noise_density": 2.0 * 2.0e-3,
>>> "random_walk": 2.0 * 3.0e-3,
>>> "bias_correlation_time": 300.0,
>>> "turn_on_bias_sigma": 20.0e-3 * 9.8
>>> },
>>> "update_rate": 1.0} # Hz
"""
# Initialize the Super class "object" attributes
super().__init__(sensor_type="IMU", update_rate=config.get("update_rate", 250.0))
# Orientation noise constant
self._orientation_noise: float = 0.0
# Gyroscope noise constants
self._gyroscope_bias: np.ndarray = np.zeros((3,))
gyroscope_config = config.get("gyroscope", {})
self._gyroscope_noise_density = gyroscope_config.get("noise_density", 0.0003393695767766752)
self._gyroscope_random_walk = gyroscope_config.get("random_walk", 3.878509448876288E-05)
self._gyroscope_bias_correlation_time = gyroscope_config.get("bias_correlation_time", 1.0E3)
self._gyroscope_turn_on_bias_sigma = gyroscope_config.get("turn_on_bias_sigma", 0.008726646259971648)
# Accelerometer noise constants
self._accelerometer_bias: np.ndarray = np.zeros((3,))
accelerometer_config = config.get("accelerometer", {})
self._accelerometer_noise_density = accelerometer_config.get("noise_density", 0.004)
self._accelerometer_random_walk = accelerometer_config.get("random_walk", 0.006)
self._accelerometer_bias_correlation_time = accelerometer_config.get("bias_correlation_time", 300.0)
self._accelerometer_turn_on_bias_sigma = accelerometer_config.get("turn_on_bias_sigma", 0.196)
# Auxiliar variable used to compute the linear acceleration of the vehicle
self._prev_linear_velocity = np.zeros((3,))
# Save the current state measured by the IMU
self._state = {
"orientation": np.array([1.0, 0.0, 0.0, 0.0]),
"angular_velocity": np.array([0.0, 0.0, 0.0]),
"linear_acceleration": np.array([0.0, 0.0, 0.0]),
}
@property
def state(self):
"""
(dict) The 'state' of the sensor, i.e. the data produced by the sensor at any given point in time
"""
return self._state
@Sensor.update_at_rate
def update(self, state: State, dt: float):
"""Method that implements the logic of an IMU. In this method we start by generating the random walk of the
gyroscope. This value is then added to the real angular velocity of the vehicle (FLU relative to ENU inertial frame
expressed in FLU body frame). The same logic is followed for the accelerometer and the accelerations. After this step,
the angular velocity is rotated such that it expressed a FRD body frame, relative to a NED inertial frame, expressed
in the FRD body frame. Additionally, the acceleration is also rotated, such that it becomes expressed in the body
FRD frame of the vehicle. This sensor outputs data that follows the PX4 adopted standard.
Args:
state (State): The current state of the vehicle.
dt (float): The time elapsed between the previous and current function calls (s).
Returns:
(dict) A dictionary containing the current state of the sensor (the data produced by the sensor)
"""
# Gyroscopic terms
tau_g: float = self._accelerometer_bias_correlation_time
# Discrete-time standard deviation equivalent to an "integrating" sampler with integration time dt
sigma_g_d: float = 1 / np.sqrt(dt) * self._gyroscope_noise_density
sigma_b_g: float = self._gyroscope_random_walk
# Compute exact covariance of the process after dt [Maybeck 4-114]
sigma_b_g_d: float = np.sqrt(-sigma_b_g * sigma_b_g * tau_g / 2.0 * (np.exp(-2.0 * dt / tau_g) - 1.0))
# Compute state-transition
phi_g_d: float = np.exp(-1.0 / tau_g * dt)
# Simulate gyroscope noise processes and add them to the true angular rate.
angular_velocity: np.ndarray = np.zeros((3,))
for i in range(3):
self._gyroscope_bias[i] = phi_g_d * self._gyroscope_bias[i] + sigma_b_g_d * np.random.randn()
angular_velocity[i] = state.angular_velocity[i] + sigma_g_d * np.random.randn() + self._gyroscope_bias[i]
# Accelerometer terms
tau_a: float = self._accelerometer_bias_correlation_time
# Discrete-time standard deviation equivalent to an "integrating" sampler with integration time dt
sigma_a_d: float = 1.0 / np.sqrt(dt) * self._accelerometer_noise_density
sigma_b_a: float = self._accelerometer_random_walk
# Compute exact covariance of the process after dt [Maybeck 4-114].
sigma_b_a_d: float = np.sqrt(-sigma_b_a * sigma_b_a * tau_a / 2.0 * (np.exp(-2.0 * dt / tau_a) - 1.0))
# Compute state-transition.
phi_a_d: float = np.exp(-1.0 / tau_a * dt)
# Compute the linear acceleration from diferentiating the velocity of the vehicle expressed in the inertial frame
linear_acceleration_inertial = (state.linear_velocity - self._prev_linear_velocity) / dt
linear_acceleration_inertial = linear_acceleration_inertial - GRAVITY_VECTOR
# Update the previous linear velocity for the next computation
self._prev_linear_velocity = state.linear_velocity
# Compute the linear acceleration of the body frame, with respect to the inertial frame, expressed in the body frame
linear_acceleration = np.array(Rotation.from_quat(state.attitude).inv().apply(linear_acceleration_inertial))
# Simulate the accelerometer noise processes and add them to the true linear aceleration values
for i in range(3):
self._accelerometer_bias[i] = phi_a_d * self._accelerometer_bias[i] + sigma_b_a_d * np.random.rand()
linear_acceleration[i] = (
linear_acceleration[i] + sigma_a_d * np.random.randn()
) #+ self._accelerometer_bias[i]
# TODO - Add small "noisy" to the attitude
# --------------------------------------------------------------------------------------------
# Apply rotations such that we express the IMU data according to the FRD body frame convention
# --------------------------------------------------------------------------------------------
# Convert the orientation to the FRD-NED standard
attitude_flu_enu = Rotation.from_quat(state.attitude)
attitude_frd_enu = attitude_flu_enu * rot_FLU_to_FRD
attitude_frd_ned = rot_ENU_to_NED * attitude_frd_enu
# Convert the angular velocity from FLU to FRD standard
angular_velocity_frd = rot_FLU_to_FRD.apply(angular_velocity)
# Convert the linear acceleration in the body frame from FLU to FRD standard
linear_acceleration_frd = rot_FLU_to_FRD.apply(linear_acceleration)
# Add the values to the dictionary and return it
self._state = {
"orientation": attitude_frd_ned.as_quat(),
"angular_velocity": angular_velocity_frd,
"linear_acceleration": linear_acceleration_frd,
}
return self._state
| 8,652 | Python | 48.445714 | 167 | 0.624942 |
superboySB/SBDrone_deprecated/extensions/pegasus.simulator/pegasus/simulator/logic/sensors/vision.py | """
| File: vision.py
| License: BSD-3-Clause. Copyright (c) 2023, Marcelo Jacinto. All rights reserved.
| Description: Simulates a visual odometry. Based on the implementation provided in PX4 stil_gazebo (https://github.com/PX4/PX4-SITL_gazebo) by Amy Wagoner and Nuno Marques
"""
__all__ = ["Vision"]
import numpy as np
from scipy.spatial.transform import Rotation
from pegasus.simulator.logic.sensors import Sensor
class Vision(Sensor):
"""The class that implements a Vision sensor. This class inherits the base class Sensor.
"""
def __init__(self, config={}):
"""Initialize the Vision class.
Args:
config (dict): A Dictionary that contains all the parameters for configuring the Vision - it can be empty or only have some of the parameters used by the Vision.
Examples:
The dictionary default parameters are
>>> {"reset_counter": 0,
>>> "vision_random_walk": 0.1, # (m/s) / sqrt(hz)
>>> "vision_noise_density": 0.01, # (m) / sqrt(hz)
>>> "vision_correlation_time": 60, # s
>>> "update_rate": 30.0 # Hz
>>> }
"""
# Initialize the Super class "object" attributes
super().__init__(sensor_type="Vision", update_rate=config.get("update_rate", 30.0))
# Define the Vision simulated/fixed values
self._reset_counter = config.get("reset_counter", 0)
# Parameters for Vision random walk
self._random_walk = np.array([0.0, 0.0, 0.0])
self._vision_random_walk = config.get("vision_random_walk", 0.1)
# Parameters for Vision position and linear/angular velocity noise
self._noise_pos = np.array([0.0, 0.0, 0.0])
self._noise_linvel = np.array([0.0, 0.0, 0.0])
self._noise_angvel = np.array([0.0, 0.0, 0.0])
self._vision_noise_density = config.get("vision_noise_density", 0.01)
# Parameters for Vision bias
self._bias = np.array([0.0, 0.0, 0.0])
self._vision_correlation_time = config.get("vision_correlation_time", 60.0)
# Position covariance is constant, so prepare it in advance
self._vision_covariance = np.array(
[self._vision_noise_density * self._vision_noise_density if i in [0, 6, 11, 15, 18, 20] else 0.0 for i in range(21)],
dtype=float)
# Save the current state measured by the GPS (and initialize at the origin)
self._state = {
"x": 0.0,
"y": 0.0,
"z": 0.0,
"roll": 0.0,
"pitch": 0.0,
"yaw": 0.0,
"covariance": self._vision_covariance,
"reset_counter": self._reset_counter,
}
@property
def state(self):
"""
(dict) The 'state' of the sensor, i.e. the data produced by the sensor at any given point in time
"""
return self._state
@Sensor.update_at_rate
def update(self, state: np.ndarray, dt: float):
"""Method that implements the logic of a visual odometry.
Args:
state (State): The current state of the vehicle.
dt (float): The time elapsed between the previous and current function calls (s).
Returns:
(dict) A dictionary containing the current state of the sensor (the data produced by the sensor)
"""
# Update noise parameters
self._random_walk[0] = self._vision_random_walk * np.sqrt(dt) * np.random.randn()
self._random_walk[1] = self._vision_random_walk * np.sqrt(dt) * np.random.randn()
self._random_walk[2] = self._vision_random_walk * np.sqrt(dt) * np.random.randn()
self._noise_pos[0] = self._vision_noise_density * np.sqrt(dt) * np.random.randn()
self._noise_pos[1] = self._vision_noise_density * np.sqrt(dt) * np.random.randn()
self._noise_pos[2] = self._vision_noise_density * np.sqrt(dt) * np.random.randn()
self._noise_linvel[0] = self._vision_noise_density * np.sqrt(dt) * np.random.randn()
self._noise_linvel[1] = self._vision_noise_density * np.sqrt(dt) * np.random.randn()
self._noise_linvel[2] = self._vision_noise_density * np.sqrt(dt) * np.random.randn()
tau_g = self._vision_correlation_time
sigma_g_d = 1 / np.sqrt(dt) * self._vision_noise_density
sigma_b_g = self._vision_random_walk
sigma_b_g_d = np.sqrt(-sigma_b_g * sigma_b_g * tau_g / 2.0 * (np.exp(-2.0 * dt / tau_g) - 1.0))
phi_g_d = np.exp(-1.0 / tau_g * dt)
self._noise_angvel[0] = phi_g_d * self._noise_angvel[0] + sigma_b_g_d * np.sqrt(dt) * np.random.randn() # self._noise_angvel[0] might need to be 0.0
self._noise_angvel[1] = phi_g_d * self._noise_angvel[1] + sigma_b_g_d * np.sqrt(dt) * np.random.randn()
self._noise_angvel[2] = phi_g_d * self._noise_angvel[2] + sigma_b_g_d * np.sqrt(dt) * np.random.randn()
# Perform Vision bias integration
self._bias[0] = (
self._bias[0] + self._random_walk[0] * dt - self._bias[0] / self._vision_correlation_time
)
self._bias[1] = (
self._bias[1] + self._random_walk[1] * dt - self._bias[1] / self._vision_correlation_time
)
self._bias[2] = (
self._bias[2] + self._random_walk[2] * dt - self._bias[2] / self._vision_correlation_time
)
# Get resulting values
position: np.ndarray = state.get_position_ned() + self._noise_pos + self._bias
orientation: np.ndarray = Rotation.from_quat(state.get_attitude_ned_frd()).as_euler('xyz', degrees=False)
linear_velocity: np.ndarray = state.get_linear_velocity_ned() + self._noise_linvel
angular_velocity: np.ndarray = state.get_angular_velocity_frd() + self._noise_angvel
self._state = {
"x": position[0],
"y": position[1],
"z": position[2],
"roll": orientation[0],
"pitch": orientation[1],
"yaw": orientation[2],
"covariance": self._vision_covariance,
"reset_counter": self._reset_counter,
}
return self._state
| 6,195 | Python | 42.943262 | 173 | 0.580791 |
superboySB/SBDrone_deprecated/extensions/pegasus.simulator/pegasus/simulator/logic/interface/__init__.py | """
| Author: Marcelo Jacinto ([email protected])
| License: BSD-3-Clause. Copyright (c) 2023, Marcelo Jacinto. All rights reserved.
"""
from .pegasus_interface import PegasusInterface
| 203 | Python | 28.142853 | 82 | 0.758621 |
superboySB/SBDrone_deprecated/extensions/pegasus.simulator/pegasus/simulator/logic/interface/pegasus_interface.py | """
| File: pegasus_interface.py
| Author: Marcelo Jacinto ([email protected])
| License: BSD-3-Clause. Copyright (c) 2023, Marcelo Jacinto. All rights reserved.
| Description: Definition of the PegasusInterface class (a singleton) that is used to manage the Pegasus framework.
"""
__all__ = ["PegasusInterface"]
# Importing Lock in ordef to have a multithread safe Pegasus singleton that manages the entire Pegasus extension
import gc
import yaml
import asyncio
import os
from threading import Lock
# NVidia API imports
import carb
from omni.isaac.core.world import World
from omni.isaac.core.utils.stage import clear_stage
from omni.isaac.core.utils.viewports import set_camera_view
import omni.isaac.core.utils.nucleus as nucleus
# Pegasus Simulator internal API
from pegasus.simulator.params import DEFAULT_WORLD_SETTINGS, SIMULATION_ENVIRONMENTS, CONFIG_FILE
from pegasus.simulator.logic.vehicle_manager import VehicleManager
class PegasusInterface:
"""
PegasusInterface is a singleton class (there is only one object instance at any given time) that will be used
to
"""
# The object instance of the Vehicle Manager
_instance = None
_is_initialized = False
# Lock for safe multi-threading
_lock: Lock = Lock()
def __init__(self):
"""
Initialize the PegasusInterface singleton object (only runs once at a time)
"""
# If we already have an instance of the PegasusInterface, do not overwrite it!
if PegasusInterface._is_initialized:
return
carb.log_info("Initializing the Pegasus Simulator Extension")
PegasusInterface._is_initialized = True
# Get a handle to the vehicle manager instance which will manage which vehicles are spawned in the world
# to be controlled and simulated
self._vehicle_manager = VehicleManager()
# Initialize the world with the default simulation settings
self._world_settings = DEFAULT_WORLD_SETTINGS
self._world = None
#self.initialize_world()
# Initialize the latitude, longitude and altitude of the simulated environment at the (0.0, 0.0, 0.0) coordinate
# from the extension configuration file
self._latitude, self._longitude, self._altitude = self._get_global_coordinates_from_config()
# Get the px4_path from the extension configuration file
self._px4_path: str = self._get_px4_path_from_config()
carb.log_info("Default PX4 path:" + str(self._px4_path))
@property
def world(self):
"""The current omni.isaac.core.world World instance
Returns:
omni.isaac.core.world: The world instance
"""
return self._world
@property
def vehicle_manager(self):
"""The instance of the VehicleManager.
Returns:
VehicleManager: The current instance of the VehicleManager.
"""
return self._vehicle_manager
@property
def latitude(self):
"""The latitude of the origin of the simulated world in degrees.
Returns:
float: The latitude of the origin of the simulated world in degrees.
"""
return self._latitude
@property
def longitude(self):
"""The longitude of the origin of the simulated world in degrees.
Returns:
float: The longitude of the origin of the simulated world in degrees.
"""
return self._longitude
@property
def altitude(self):
"""The altitude of the origin of the simulated world in meters.
Returns:
float: The latitude of the origin of the simulated world in meters.
"""
return self._altitude
@property
def px4_path(self):
"""A string with the installation directory for PX4 (if it was setup). Otherwise it is None.
Returns:
str: A string with the installation directory for PX4 (if it was setup). Otherwise it is None.
"""
return self._px4_path
def set_global_coordinates(self, latitude=None, longitude=None, altitude=None):
"""Method that can be used to set the latitude, longitude and altitude of the simulation world at the origin.
Args:
latitude (float): The latitude of the origin of the simulated world in degrees. Defaults to None.
longitude (float): The longitude of the origin of the simulated world in degrees. Defaults to None.
altitude (float): The altitude of the origin of the simulated world in meters. Defaults to None.
"""
if latitude is not None:
self._latitude = latitude
if longitude is not None:
self._longitude = longitude
if self.altitude is not None:
self._altitude = altitude
carb.log_warn("New global coordinates set to: " + str(self._latitude) + ", " + str(self._longitude) + ", " + str(self._altitude))
def initialize_world(self):
"""Method that initializes the world object
"""
self._world = World(**self._world_settings)
#asyncio.ensure_future(self._world.initialize_simulation_context_async())
def get_vehicle(self, stage_prefix: str):
"""Method that returns the vehicle object given its 'stage_prefix', i.e., the name the vehicle was spawned with in the simulator.
Args:
stage_prefix (str): The name the vehicle will present in the simulator when spawned.
Returns:
Vehicle: Returns a vehicle object that was spawned with the given 'stage_prefix'
"""
return self._vehicle_manager.vehicles[stage_prefix]
def get_all_vehicles(self):
"""
Method that returns a list of vehicles that are considered active in the simulator
Returns:
list: A list of all vehicles that are currently instantiated.
"""
return self._vehicle_manager.vehicles
def get_default_environments(self):
"""
Method that returns a dictionary containing all the default simulation environments and their path
"""
return SIMULATION_ENVIRONMENTS
def generate_quadrotor_config_from_yaml(self, file: str):
"""_summary_
Args:
file (str): _description_
Returns:
_type_: _description_
"""
# Load the quadrotor configuration data from the given yaml file
with open(file) as f:
data = yaml.safe_load(f)
return self.generate_quadrotor_config_from_dict(data)
def clear_scene(self):
"""
Method that when invoked will clear all vehicles and the simulation environment, leaving only an empty world with a physics environment.
"""
# If the physics simulation was running, stop it first
if self.world is not None:
self.world.stop()
# Clear the world
if self.world is not None:
self.world.clear_all_callbacks()
self.world.clear()
# Clear the stage
clear_stage()
# Remove all the robots that were spawned
self._vehicle_manager.remove_all_vehicles()
# Call python's garbage collection
gc.collect()
# Re-initialize the physics context
asyncio.ensure_future(self._world.initialize_simulation_context_async())
carb.log_info("Current scene and its vehicles has been deleted")
async def load_environment_async(self, usd_path: str, force_clear: bool=False):
"""Method that loads a given world (specified in the usd_path) into the simulator asynchronously.
Args:
usd_path (str): The path where the USD file describing the world is located.
force_clear (bool): Whether to perform a clear before loading the asset. Defaults to False.
"""
# Reset and pause the world simulation (only if force_clear is true)
# This is done to maximize the support between running in GUI as extension vs App
if force_clear == True:
await self.world.reset_async()
await self.world.stop_async()
# Load the USD asset that will be used for the environment
try:
self.load_asset(usd_path, "/World/layout")
except Exception as e:
carb.log_warn("Could not load the desired environment: " + str(e))
carb.log_info("A new environment has been loaded successfully")
def load_environment(self, usd_path: str, force_clear: bool=False):
"""Method that loads a given world (specified in the usd_path) into the simulator. If invoked from a python app,
this method should have force_clear=False, as the world reset and stop are performed asynchronously by this method,
and when we are operating in App mode, we want everything to run in sync.
Args:
usd_path (str): The path where the USD file describing the world is located.
force_clear (bool): Whether to perform a clear before loading the asset. Defaults to False.
"""
asyncio.ensure_future(self.load_environment_async(usd_path, force_clear))
def load_nvidia_environment(self, environment_asset: str = "Hospital/hospital.usd"):
"""
Method that is used to load NVidia internally provided USD stages into the simulaton World
Args:
environment_asset (str): The name of the nvidia asset inside the /Isaac/Environments folder. Default to Hospital/hospital.usd.
"""
# Get the nvidia assets root path
nvidia_assets_path = nucleus.get_assets_root_path()
# Define the environments path inside the NVidia assets
environments_path = "/Isaac/Environments"
# Get the complete usd path
usd_path = nvidia_assets_path + environments_path + "/" + environment_asset
# Try to load the asset into the world
self.load_asset(usd_path, "/World/layout")
def load_asset(self, usd_asset: str, stage_prefix: str):
"""
Method that will attempt to load an asset into the current simulation world, given the USD asset path.
Args:
usd_asset (str): The path where the USD file describing the world is located.
stage_prefix (str): The name the vehicle will present in the simulator when spawned.
"""
# Try to check if there is already a prim with the same stage prefix in the stage
if self._world.stage.GetPrimAtPath(stage_prefix):
raise Exception("A primitive already exists at the specified path")
# Create the stage primitive and load the usd into it
prim = self._world.stage.DefinePrim(stage_prefix)
success = prim.GetReferences().AddReference(usd_asset)
if not success:
raise Exception("The usd asset" + usd_asset + "is not load at stage path " + stage_prefix)
def set_viewport_camera(self, camera_position, camera_target):
"""Sets the viewport camera to given position and makes it point to another target position.
Args:
camera_position (list): A list with [X, Y, Z] coordinates of the camera in ENU inertial frame.
camera_target (list): A list with [X, Y, Z] coordinates of the target that the camera should point to in the ENU inertial frame.
"""
# Set the camera view to a fixed value
set_camera_view(eye=camera_position, target=camera_target)
def set_world_settings(self, physics_dt=None, stage_units_in_meters=None, rendering_dt=None):
"""
Set the current world settings to the pre-defined settings. TODO - finish the implementation of this method.
For now these new setting will never override the default ones.
"""
# Set the physics engine update rate
if physics_dt is not None:
self._world_settings["physics_dt"] = physics_dt
# Set the units of the simulator to meters
if stage_units_in_meters is not None:
self._world_settings["stage_units_in_meters"] = stage_units_in_meters
# Set the render engine update rate (might not be the same as the physics engine)
if rendering_dt is not None:
self._world_settings["rendering_dt"] = rendering_dt
def _get_px4_path_from_config(self):
"""
Method that reads the configured PX4 installation directory from the extension configuration file
Returns:
str: A string with the path to the px4 configuration directory or empty string ''
"""
px4_dir = ""
# Open the configuration file. If it fails, just return the empty path
try:
with open(CONFIG_FILE, 'r') as f:
data = yaml.safe_load(f)
px4_dir = os.path.expanduser(data.get("px4_dir", None))
except:
carb.log_warn("Could not retrieve px4_dir from: " + str(CONFIG_FILE))
return px4_dir
def _get_global_coordinates_from_config(self):
"""Method that reads the default latitude, longitude and altitude from the extension configuration file
Returns:
(float, float, float): A tuple of 3 floats with the latitude, longitude and altitude to use as the origin of the world
"""
latitude = 0.0
longitude = 0.0
altitude = 0.0
# Open the configuration file. If it fails, just return the empty path
try:
with open(CONFIG_FILE, 'r') as f:
data = yaml.safe_load(f)
# Try to read the coordinates from the configuration file
global_coordinates = data.get("global_coordinates", {})
latitude = global_coordinates.get("latitude", 0.0)
longitude = global_coordinates.get("longitude", 0.0)
altitude = global_coordinates.get("altitude", 0.0)
except:
carb.log_warn("Could not retrieve the global coordinates from: " + str(CONFIG_FILE))
return (latitude, longitude, altitude)
def set_px4_path(self, path: str):
"""Method that allows a user to save a new px4 directory in the configuration files of the extension.
Args:
absolute_path (str): The new path of the px4-autopilot installation directory
"""
# Save the new path for current use during this simulation
self._px4_path = os.path.expanduser(path)
# Save the new path in the configurations file for the next simulations
try:
# Open the configuration file and the all the configurations that it contains
with open(CONFIG_FILE, 'r') as f:
data = yaml.safe_load(f)
# Open the configuration file. If it fails, just warn in the console
with open(CONFIG_FILE, 'w') as f:
data["px4_dir"] = path
yaml.dump(data, f)
except:
carb.log_warn("Could not save px4_dir to: " + str(CONFIG_FILE))
carb.log_warn("New px4_dir set to: " + str(self._px4_path))
def set_default_global_coordinates(self):
"""
Method that sets the latitude, longitude and altitude from the pegasus interface to the
default global coordinates specified in the extension configuration file
"""
self._latitude, self._longitude, self._altitude = self._get_global_coordinates_from_config()
def set_new_default_global_coordinates(self, latitude: float=None, longitude: float=None, altitude: float=None):
# Set the current global coordinates to the new default global coordinates
self.set_global_coordinates(latitude, longitude, altitude)
# Update the default global coordinates in the configuration file
try:
# Open the configuration file and the all the configurations that it contains
with open(CONFIG_FILE, 'r') as f:
data = yaml.safe_load(f)
# Open the configuration file. If it fails, just warn in the console
with open(CONFIG_FILE, 'w') as f:
if latitude is not None:
data["global_coordinates"]["latitude"] = latitude
if longitude is not None:
data["global_coordinates"]["longitude"] = longitude
if altitude is not None:
data["global_coordinates"]["altitude"] = altitude
# Save the updated configurations
yaml.dump(data, f)
except:
carb.log_warn("Could not save the new global coordinates to: " + str(CONFIG_FILE))
carb.log_warn("New global coordinates set to: latitude=" + str(latitude) + ", longitude=" + str(longitude) + ", altitude=" + str(altitude))
def __new__(cls):
"""Allocates the memory and creates the actual PegasusInterface object is not instance exists yet. Otherwise,
returns the existing instance of the PegasusInterface class.
Returns:
VehicleManger: the single instance of the VehicleManager class
"""
# Use a lock in here to make sure we do not have a race condition
# when using multi-threading and creating the first instance of the Pegasus extension manager
with cls._lock:
if cls._instance is None:
cls._instance = object.__new__(cls)
return PegasusInterface._instance
def __del__(self):
"""Destructor for the object. Destroys the only existing instance of this class."""
PegasusInterface._instance = None
PegasusInterface._is_initialized = False | 17,722 | Python | 38.384444 | 147 | 0.638698 |
superboySB/SBDrone_deprecated/extensions/pegasus.simulator/pegasus/simulator/logic/backends/backend.py | """
| File: backend.py
| Author: Marcelo Jacinto ([email protected])
| Description:
| License: BSD-3-Clause. Copyright (c) 2023, Marcelo Jacinto. All rights reserved.
"""
class Backend:
"""
This class defines the templates for the communication and control backend. Every vehicle can have at least one backend
at the same time. Every timestep, the methods 'update_state' and 'update_sensor' are called to update the data produced
by the simulation, i.e. for every time step the backend will receive teh current state of the vehicle and its sensors.
Additionally, the backend must provide a method named 'input_reference' which will be used by the vehicle simulation
to know the desired angular velocities to apply to the rotors of the vehicle. The method 'update' is called on every
physics step and can be use to implement some logic or send data to another interface (such as PX4 through mavlink or ROS2).
The methods 'start', 'stop' and 'reset' are callbacks that get called when the simulation is started, stoped and reset as the name implies.
"""
def __init__(self):
"""Initialize the Backend class
"""
self._vehicle = None
"""
Properties
"""
@property
def vehicle(self):
"""A reference to the vehicle associated with this backend.
Returns:
Vehicle: A reference to the vehicle associated with this backend.
"""
return self._vehicle
def initialize(self, vehicle):
"""A method that can be invoked when the simulation is starting to give access to the control backend
to the entire vehicle object. Even though we provide update_sensor and update_state callbacks that are called
at every physics step with the latest vehicle state and its sensor data, having access to the full vehicle
object may prove usefull under some circumstances. This is nice to give users the possibility of overiding
default vehicle behaviour via this control backend structure.
Args:
vehicle (Vehicle): A reference to the vehicle that this sensor is associated with
"""
self._vehicle = vehicle
def update_sensor(self, sensor_type: str, data):
"""Method that when implemented, should handle the receival of sensor data
Args:
sensor_type (str): A name that describes the type of sensor
data (dict): A dictionary that contains the data produced by the sensor
"""
pass
def update_state(self, state):
"""Method that when implemented, should handle the receival of the state of the vehicle using this callback
Args:
state (State): The current state of the vehicle.
"""
pass
def input_reference(self):
"""Method that when implemented, should return a list of desired angular velocities to apply to the vehicle rotors
"""
return []
def update(self, dt: float):
"""Method that when implemented, should be used to update the state of the backend and the information being sent/received
from the communication interface. This method will be called by the simulation on every physics step
Args:
dt (float): The time elapsed between the previous and current function calls (s).
"""
pass
def start(self):
"""Method that when implemented should handle the begining of the simulation of vehicle
"""
pass
def stop(self):
"""Method that when implemented should handle the stopping of the simulation of vehicle
"""
pass
def reset(self):
"""Method that when implemented, should handle the reset of the vehicle simulation to its original state
"""
pass
| 3,830 | Python | 40.193548 | 143 | 0.674935 |
superboySB/SBDrone_deprecated/extensions/pegasus.simulator/pegasus/simulator/logic/backends/__init__.py | """
| Author: Marcelo Jacinto ([email protected])
| License: BSD-3-Clause. Copyright (c) 2023, Marcelo Jacinto. All rights reserved.
"""
from .backend import Backend
from .mavlink_backend import MavlinkBackend, MavlinkBackendConfig
from .ros2_backend import ROS2Backend
| 288 | Python | 31.111108 | 82 | 0.784722 |
superboySB/SBDrone_deprecated/extensions/pegasus.simulator/pegasus/simulator/logic/backends/ros2_backend.py | """
| File: ros2_backend.py
| Author: Marcelo Jacinto ([email protected])
| Description: File that implements the ROS2 Backend for communication/control with/of the vehicle simulation through ROS2 topics
| License: BSD-3-Clause. Copyright (c) 2023, Marcelo Jacinto. All rights reserved.
"""
import carb
from omni.isaac.core.utils.extensions import disable_extension, enable_extension
# Perform some checks, because Isaac Sim some times does not play nice when using ROS/ROS2
disable_extension("omni.isaac.ros_bridge")
enable_extension("omni.isaac.ros2_bridge")
# Inform the user that now we are actually import the ROS2 dependencies
# Note: we are performing the imports here to make sure that ROS2 extension was load correctly
import rclpy
from std_msgs.msg import Float64
from sensor_msgs.msg import Imu, MagneticField, NavSatFix, NavSatStatus
from geometry_msgs.msg import PoseStamped, TwistStamped, AccelStamped
import omni.kit.app
from pegasus.simulator.logic.backends.backend import Backend
class ROS2Backend(Backend):
def __init__(self, vehicle_id: int, num_rotors=4):
# Save the configurations for this backend
self._id = vehicle_id
self._num_rotors = num_rotors
# Start the actual ROS2 setup here
rclpy.init()
self.node = rclpy.create_node("vehicle_" + str(vehicle_id))
# Create publishers for the state of the vehicle in ENU
self.pose_pub = self.node.create_publisher(PoseStamped, "vehicle" + str(self._id) + "/state/pose", 10)
self.twist_pub = self.node.create_publisher(TwistStamped, "vehicle" + str(self._id) + "/state/twist", 10)
self.twist_inertial_pub = self.node.create_publisher(TwistStamped, "vehicle" + str(self._id) + "/state/twist_inertial", 10)
self.accel_pub = self.node.create_publisher(AccelStamped, "vehicle" + str(self._id) + "/state/accel", 10)
# Create publishers for some sensor data
self.imu_pub = self.node.create_publisher(Imu, "vehicle" + str(self._id) + "/sensors/imu", 10)
self.mag_pub = self.node.create_publisher(MagneticField, "vehicle" + str(self._id) + "/sensors/imu", 10)
self.gps_pub = self.node.create_publisher(NavSatFix, "vehicle" + str(self._id) + "/sensors/gps", 10)
self.gps_vel_pub = self.node.create_publisher(TwistStamped, "vehicle" + str(self._id) + "/sensors/gps_twist", 10)
# Subscribe to vector of floats with the target angular velocities to control the vehicle
# This is not ideal, but we need to reach out to NVIDIA so that they can improve the ROS2 support with custom messages
# The current setup as it is.... its a pain!!!!
self.rotor_subs = []
for i in range(self._num_rotors):
self.rotor_subs.append(self.node.create_subscription(Float64, "vehicle" + str(self._id) + "/control/rotor" + str(i) + "/ref", lambda x: self.rotor_callback(x, i),10))
# Setup zero input reference for the thrusters
self.input_ref = [0.0 for i in range(self._num_rotors)]
def update_state(self, state):
"""
Method that when implemented, should handle the receivel of the state of the vehicle using this callback
"""
pose = PoseStamped()
twist = TwistStamped()
twist_inertial = TwistStamped()
accel = AccelStamped()
# Update the header
pose.header.stamp = self.node.get_clock().now().to_msg()
twist.header.stamp = pose.header.stamp
twist_inertial.header.stamp = pose.header.stamp
accel.header.stamp = pose.header.stamp
pose.header.frame_id = "world"
twist.header.frame_id = "base_link"
twist_inertial.header.frame_id = "world"
accel.header.frame_id = "world"
# Fill the position and attitude of the vehicle in ENU
pose.pose.position.x = state.position[0]
pose.pose.position.y = state.position[1]
pose.pose.position.z = state.position[2]
pose.pose.orientation.x = state.attitude[0]
pose.pose.orientation.y = state.attitude[1]
pose.pose.orientation.z = state.attitude[2]
pose.pose.orientation.w = state.attitude[3]
# Fill the linear and angular velocities in the body frame of the vehicle
twist.twist.linear.x = state.linear_body_velocity[0]
twist.twist.linear.y = state.linear_body_velocity[1]
twist.twist.linear.z = state.linear_body_velocity[2]
twist.twist.angular.x = state.angular_velocity[0]
twist.twist.angular.y = state.angular_velocity[1]
twist.twist.angular.z = state.angular_velocity[2]
# Fill the linear velocity of the vehicle in the inertial frame
twist_inertial.twist.linear.x = state.linear_velocity[0]
twist_inertial.twist.linear.y = state.linear_velocity[1]
twist_inertial.twist.linear.z = state.linear_velocity[2]
# Fill the linear acceleration in the inertial frame
accel.accel.linear.x = state.linear_acceleration[0]
accel.accel.linear.y = state.linear_acceleration[1]
accel.accel.linear.z = state.linear_acceleration[2]
# Publish the messages containing the state of the vehicle
self.pose_pub.publish(pose)
self.twist_pub.publish(twist)
self.twist_inertial_pub.publish(twist_inertial)
self.accel_pub.publish(accel)
def rotor_callback(self, ros_msg: Float64, rotor_id):
# Update the reference for the rotor of the vehicle
self.input_ref[rotor_id] = float(ros_msg.data)
def update_sensor(self, sensor_type: str, data):
"""
Method that when implemented, should handle the receival of sensor data
"""
if sensor_type == "IMU":
self.update_imu_data(data)
elif sensor_type == "GPS":
self.update_gps_data(data)
elif sensor_type == "Magnetometer":
self.update_mag_data(data)
elif sensor_type == "Barometer": # TODO - create a topic for the barometer later on
pass
def update_imu_data(self, data):
msg = Imu()
# Update the header
msg.header.stamp = self.node.get_clock().now().to_msg()
msg.header.frame_id = "base_link_frd"
# Update the angular velocity (NED + FRD)
msg.angular_velocity.x = data["angular_velocity"][0]
msg.angular_velocity.y = data["angular_velocity"][1]
msg.angular_velocity.z = data["angular_velocity"][2]
# Update the linear acceleration (NED)
msg.linear_acceleration.x = data["linear_acceleration"][0]
msg.linear_acceleration.y = data["linear_acceleration"][1]
msg.linear_acceleration.z = data["linear_acceleration"][2]
# Publish the message with the current imu state
self.imu_pub.publish(msg)
def update_gps_data(self, data):
msg = NavSatFix()
msg_vel = TwistStamped()
# Update the headers
msg.header.stamp = self.node.get_clock().now().to_msg()
msg.header.frame_id = "world_ned"
msg_vel.header.stamp = msg.header.stamp
msg_vel.header.frame_id = msg.header.frame_id
# Update the status of the GPS
status_msg = NavSatStatus()
status_msg.status = 0 # unaugmented fix position
status_msg.service = 1 # GPS service
msg.status = status_msg
# Update the latitude, longitude and altitude
msg.latitude = data["latitude"]
msg.longitude = data["longitude"]
msg.altitude = data["altitude"]
# Update the velocity of the vehicle measured by the GPS in the inertial frame (NED)
msg_vel.twist.linear.x = data["velocity_north"]
msg_vel.twist.linear.y = data["velocity_east"]
msg_vel.twist.linear.z = data["velocity_down"]
# Publish the message with the current GPS state
self.gps_pub.publish(msg)
self.gps_vel_pub.publish(msg_vel)
def update_mag_data(self, data):
msg = MagneticField()
# Update the headers
msg.header.stamp = self.node.get_clock().now().to_msg()
msg.header.frame_id = "base_link_frd"
msg.magnetic_field.x = data["magnetic_field"][0]
msg.magnetic_field.y = data["magnetic_field"][1]
msg.magnetic_field.z = data["magnetic_field"][2]
# Publish the message with the current magnetic data
self.mag_pub.publish(msg)
def input_reference(self):
"""
Method that is used to return the latest target angular velocities to be applied to the vehicle
Returns:
A list with the target angular velocities for each individual rotor of the vehicle
"""
return self.input_ref
def update(self, dt: float):
"""
Method that when implemented, should be used to update the state of the backend and the information being sent/received
from the communication interface. This method will be called by the simulation on every physics step
"""
# In this case, do nothing as we are sending messages as soon as new data arrives from the sensors and state
# and updating the reference for the thrusters as soon as receiving from ROS2 topics
# Just poll for new ROS 2 messages in a non-blocking way
rclpy.spin_once(self.node, timeout_sec=0)
def start(self):
"""
Method that when implemented should handle the begining of the simulation of vehicle
"""
# Reset the reference for the thrusters
self.input_ref = [0.0 for i in range(self._num_rotors)]
def stop(self):
"""
Method that when implemented should handle the stopping of the simulation of vehicle
"""
# Reset the reference for the thrusters
self.input_ref = [0.0 for i in range(self._num_rotors)]
def reset(self):
"""
Method that when implemented, should handle the reset of the vehicle simulation to its original state
"""
# Reset the reference for the thrusters
self.input_ref = [0.0 for i in range(self._num_rotors)]
def check_ros_extension(self):
"""
Method that checks which ROS extension is installed.
"""
# Get the handle for the extension manager
extension_manager = omni.kit.app.get_app().get_extension_manager()
version = ""
if self._ext_manager.is_extension_enabled("omni.isaac.ros_bridge"):
version = "ros"
elif self._ext_manager.is_extension_enabled("omni.isaac.ros2_bridge"):
version = "ros2"
else:
carb.log_warn("Neither extension 'omni.isaac.ros_bridge' nor 'omni.isaac.ros2_bridge' is enabled")
| 10,744 | Python | 40.972656 | 178 | 0.648827 |
superboySB/SBDrone_deprecated/extensions/pegasus.simulator/pegasus/simulator/logic/backends/mavlink_backend.py | """
| File: mavlink_backend.py
| Author: Marcelo Jacinto ([email protected])
| Description: File that implements the Mavlink Backend for communication/control with/of the vehicle simulation
| License: BSD-3-Clause. Copyright (c) 2023, Marcelo Jacinto. All rights reserved.
"""
__all__ = ["MavlinkBackend", "MavlinkBackendConfig"]
import carb
import time
import numpy as np
from pymavlink import mavutil
from pegasus.simulator.logic.state import State
from pegasus.simulator.logic.backends.backend import Backend
from pegasus.simulator.logic.interface.pegasus_interface import PegasusInterface
from pegasus.simulator.logic.backends.tools.px4_launch_tool import PX4LaunchTool
class SensorSource:
""" The binary codes to signal which simulated data is being sent through mavlink
Atribute:
| ACCEL (int): mavlink binary code for the accelerometer (0b0000000000111 = 7)
| GYRO (int): mavlink binary code for the gyroscope (0b0000000111000 = 56)
| MAG (int): mavlink binary code for the magnetometer (0b0000111000000=448)
| BARO (int): mavlink binary code for the barometer (0b1101000000000=6656)
| DIFF_PRESS (int): mavlink binary code for the pressure sensor (0b0010000000000=1024)
"""
ACCEL: int = 7
GYRO: int = 56
MAG: int = 448
BARO: int = 6656
DIFF_PRESS: int = 1024
class SensorMsg:
"""
An auxiliary data class where we write all the sensor data that is going to be sent through mavlink
"""
def __init__(self):
# IMU Data
self.new_imu_data: bool = False
self.received_first_imu: bool = False
self.xacc: float = 0.0
self.yacc: float = 0.0
self.zacc: float = 0.0
self.xgyro: float = 0.0
self.ygyro: float = 0.0
self.zgyro: float = 0.0
# Baro Data
self.new_bar_data: bool = False
self.abs_pressure: float = 0.0
self.pressure_alt: float = 0.0
self.temperature: float = 0.0
# Magnetometer Data
self.new_mag_data: bool = False
self.xmag: float = 0.0
self.ymag: float = 0.0
self.zmag: float = 0.0
# Airspeed Data
self.new_press_data: bool = False
self.diff_pressure: float = 0.0
# GPS Data
self.new_gps_data: bool = False
self.fix_type: int = 0
self.latitude_deg: float = -999
self.longitude_deg: float = -999
self.altitude: float = -999
self.eph: float = 1.0
self.epv: float = 1.0
self.velocity: float = 0.0
self.velocity_north: float = 0.0
self.velocity_east: float = 0.0
self.velocity_down: float = 0.0
self.cog: float = 0.0
self.satellites_visible: int = 0
# Vision Pose
self.new_vision_data: bool = False
self.vision_x: float = 0.0
self.vision_y: float = 0.0
self.vision_z: float = 0.0
self.vision_roll: float = 0.0
self.vision_pitch: float = 0.0
self.vision_yaw: float = 0.0
self.vision_covariance = (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
self.vision_reset_counter: int = 0
# Simulation State
self.new_sim_state: bool = False
self.sim_attitude = [1.0, 0.0, 0.0, 0.0] # [w, x, y, z]
self.sim_acceleration = [0.0, 0.0, 0.0] # [x,y,z body acceleration]
self.sim_angular_vel = [0.0, 0.0, 0.0] # [roll-rate, pitch-rate, yaw-rate] rad/s
self.sim_lat = 0.0 # [deg]
self.sim_lon = 0.0 # [deg]
self.sim_alt = 0.0 # [m]
self.sim_ind_airspeed = 0.0 # Indicated air speed
self.sim_true_airspeed = 0.0 # Indicated air speed
self.sim_velocity_inertial = [0.0, 0.0, 0.0] # North-east-down [m/s]
class ThrusterControl:
"""
An auxiliary data class that saves the thrusters command data received via mavlink and
scales them into individual angular velocities expressed in rad/s to apply to each rotor
"""
def __init__(
self,
num_rotors: int = 4,
input_offset=[0, 0, 0, 0],
input_scaling=[0, 0, 0, 0],
zero_position_armed=[100, 100, 100, 100],
):
"""Initialize the ThrusterControl object
Args:
num_rotors (int): The number of rotors that the actual system has 4.
input_offset (list): A list with the offsets to apply to the rotor values received via mavlink. Defaults to [0, 0, 0, 0].
input_scaling (list): A list with the scaling to apply to the rotor values received via mavlink. Defaults to [0, 0, 0, 0].
zero_position_armed (list): Another list of offsets to apply to the rotor values received via mavlink. Defaults to [100, 100, 100, 100].
"""
self.num_rotors: int = num_rotors
# Values to scale and offset the rotor control inputs received from PX4
assert len(input_offset) == self.num_rotors
self.input_offset = input_offset
assert len(input_scaling) == self.num_rotors
self.input_scaling = input_scaling
assert len(zero_position_armed) == self.num_rotors
self.zero_position_armed = zero_position_armed
# The actual speed references to apply to the vehicle rotor joints
self._input_reference = [0.0 for i in range(self.num_rotors)]
@property
def input_reference(self):
"""A list of floats with the angular velocities in rad/s
Returns:
list: A list of floats with the angular velocities to apply to each rotor, expressed in rad/s
"""
return self._input_reference
def update_input_reference(self, controls):
"""Takes a list with the thrust controls received via mavlink and scales them in order to generated
the equivalent angular velocities in rad/s
Args:
controls (list): A list of ints with thrust controls received via mavlink
"""
# Check if the number of controls received is correct
if len(controls) < self.num_rotors:
carb.log_warn("Did not receive enough inputs for all the rotors")
return
# Update the desired reference for every rotor (and saturate according to the min and max values)
for i in range(self.num_rotors):
# Compute the actual velocity reference to apply to each rotor
self._input_reference[i] = (controls[i] + self.input_offset[i]) * self.input_scaling[
i
] + self.zero_position_armed[i]
def zero_input_reference(self):
"""
When this method is called, the input_reference is updated such that every rotor is stopped
"""
self._input_reference = [0.0 for i in range(self.num_rotors)]
class MavlinkBackendConfig:
"""
An auxiliary data class used to store all the configurations for the mavlink communications.
"""
def __init__(self, config={}):
"""
Initialize the MavlinkBackendConfig class
Args:
config (dict): A Dictionary that contains all the parameters for configuring the Mavlink interface - it can be empty or only have some of the parameters used by this backend.
Examples:
The dictionary default parameters are
>>> {"vehicle_id": 0,
>>> "connection_type": "tcpin",
>>> "connection_ip": "localhost",
>>> "connection_baseport": 4560,
>>> "px4_autolaunch": True,
>>> "px4_dir": "PegasusInterface().px4_path",
>>> "px4_vehicle_model": "iris",
>>> "enable_lockstep": True,
>>> "num_rotors": 4,
>>> "input_offset": [0.0, 0.0, 0.0, 0.0],
>>> "input_scaling": [1000.0, 1000.0, 1000.0, 1000.0],
>>> "zero_position_armed": [100.0, 100.0, 100.0, 100.0],
>>> "update_rate": 250.0
>>> }
"""
# Configurations for the mavlink communication protocol (note: the vehicle id is sumed to the connection_baseport)
self.vehicle_id = config.get("vehicle_id", 0)
self.connection_type = config.get("connection_type", "tcpin")
self.connection_ip = config.get("connection_ip", "localhost")
self.connection_baseport = config.get("connection_baseport", 4560)
# Configure whether to launch px4 in the background automatically or not for every vehicle launched
self.px4_autolaunch: bool = config.get("px4_autolaunch", True)
self.px4_dir: str = config.get("px4_dir", PegasusInterface().px4_path)
self.px4_vehicle_model: str = config.get("px4_vehicle_model", "iris")
# Configurations to interpret the rotors control messages coming from mavlink
self.enable_lockstep: bool = config.get("enable_lockstep", True)
self.num_rotors: int = config.get("num_rotors", 4)
self.input_offset = config.get("input_offset", [0.0, 0.0, 0.0, 0.0])
self.input_scaling = config.get("input_scaling", [1000.0, 1000.0, 1000.0, 1000.0])
self.zero_position_armed = config.get("zero_position_armed", [100.0, 100.0, 100.0, 100.0])
# The update rate at which we will be sending data to mavlink (TODO - remove this from here in the future
# and infer directly from the function calls)
self.update_rate: float = config.get("update_rate", 250.0) # [Hz]
class MavlinkBackend(Backend):
""" The Mavlink Backend used to receive the vehicle's state and sensor data in order to send to PX4 through mavlink. It also
receives via mavlink the thruster commands to apply to each vehicle rotor.
"""
def __init__(self, config=MavlinkBackendConfig()):
"""Initialize the MavlinkBackend
Args:
config (MavlinkBackendConfig): The configuration class for the MavlinkBackend. Defaults to MavlinkBackendConfig().
"""
# Initialize the Backend object
super().__init__()
# Setup the desired mavlink connection port
# The connection will only be created once the simulation starts
self._vehicle_id = config.vehicle_id
self._connection = None
self._connection_port = (
config.connection_type
+ ":"
+ config.connection_ip
+ ":"
+ str(config.connection_baseport + config.vehicle_id)
)
# Check if we need to autolaunch px4 in the background or not
self.px4_autolaunch: bool = config.px4_autolaunch
self.px4_vehicle_model: str = config.px4_vehicle_model # only needed if px4_autolaunch == True
self.px4_tool: PX4LaunchTool = None
self.px4_dir: str = config.px4_dir
# Set the update rate used for sending the messages (TODO - remove this hardcoded value from here)
self._update_rate: float = config.update_rate
self._time_step: float = 1.0 / self._update_rate # s
self._is_running: bool = False
# Vehicle Sensor data to send through mavlink
self._sensor_data: SensorMsg = SensorMsg()
# Vehicle Rotor data received from mavlink
self._rotor_data: ThrusterControl = ThrusterControl(
config.num_rotors, config.input_offset, config.input_scaling, config.zero_position_armed
)
# Vehicle actuator control data
self._num_inputs: int = config.num_rotors
self._input_reference: np.ndarray = np.zeros((self._num_inputs,))
self._armed: bool = False
self._input_offset: np.ndarray = np.zeros((self._num_inputs,))
self._input_scaling: np.ndarray = np.zeros((self._num_inputs,))
# Select whether lockstep is enabled
self._enable_lockstep: bool = config.enable_lockstep
# Auxiliar variables to handle the lockstep between receiving sensor data and actuator control
self._received_first_actuator: bool = False
self._received_actuator: bool = False
# Auxiliar variables to check if we have already received an hearbeat from the software in the loop simulation
self._received_first_hearbeat: bool = False
self._last_heartbeat_sent_time = 0
# Auxiliar variables for setting the u_time when sending sensor data to px4
self._current_utime: int = 0
def update_sensor(self, sensor_type: str, data):
"""Method that is used as callback for the vehicle for every iteration that a sensor produces new data.
Only the IMU, GPS, Barometer and Magnetometer sensor data are stored to be sent through mavlink. Every other
sensor data that gets passed to this function is discarded.
Args:
sensor_type (str): A name that describes the type of sensor
data (dict): A dictionary that contains the data produced by the sensor
"""
if sensor_type == "IMU":
self.update_imu_data(data)
elif sensor_type == "GPS":
self.update_gps_data(data)
elif sensor_type == "Vision":
self.update_vision_data(data)
elif sensor_type == "Barometer":
self.update_bar_data(data)
elif sensor_type == "Magnetometer":
self.update_mag_data(data)
# If the data received is not from one of the above sensors, then this backend does
# not support that sensor and it will just ignore it
else:
pass
def update_imu_data(self, data):
"""Gets called by the 'update_sensor' method to update the current IMU data
Args:
data (dict): The data produced by an IMU sensor
"""
# Acelerometer data
self._sensor_data.xacc = data["linear_acceleration"][0]
self._sensor_data.yacc = data["linear_acceleration"][1]
self._sensor_data.zacc = data["linear_acceleration"][2]
# Gyro data
self._sensor_data.xgyro = data["angular_velocity"][0]
self._sensor_data.ygyro = data["angular_velocity"][1]
self._sensor_data.zgyro = data["angular_velocity"][2]
# Signal that we have new IMU data
self._sensor_data.new_imu_data = True
self._sensor_data.received_first_imu = True
def update_gps_data(self, data):
"""Gets called by the 'update_sensor' method to update the current GPS data
Args:
data (dict): The data produced by an GPS sensor
"""
# GPS data
self._sensor_data.fix_type = int(data["fix_type"])
self._sensor_data.latitude_deg = int(data["latitude"] * 10000000)
self._sensor_data.longitude_deg = int(data["longitude"] * 10000000)
self._sensor_data.altitude = int(data["altitude"] * 1000)
self._sensor_data.eph = int(data["eph"])
self._sensor_data.epv = int(data["epv"])
self._sensor_data.velocity = int(data["speed"] * 100)
self._sensor_data.velocity_north = int(data["velocity_north"] * 100)
self._sensor_data.velocity_east = int(data["velocity_east"] * 100)
self._sensor_data.velocity_down = int(data["velocity_down"] * 100)
self._sensor_data.cog = int(data["cog"] * 100)
self._sensor_data.satellites_visible = int(data["sattelites_visible"])
# Signal that we have new GPS data
self._sensor_data.new_gps_data = True
# Also update the groundtruth for the latitude and longitude
self._sensor_data.sim_lat = int(data["latitude_gt"] * 10000000)
self._sensor_data.sim_lon = int(data["longitude_gt"] * 10000000)
self._sensor_data.sim_alt = int(data["altitude_gt"] * 1000)
def update_bar_data(self, data):
"""Gets called by the 'update_sensor' method to update the current Barometer data
Args:
data (dict): The data produced by an Barometer sensor
"""
# Barometer data
self._sensor_data.temperature = data["temperature"]
self._sensor_data.abs_pressure = data["absolute_pressure"]
self._sensor_data.pressure_alt = data["pressure_altitude"]
# Signal that we have new Barometer data
self._sensor_data.new_bar_data = True
def update_mag_data(self, data):
"""Gets called by the 'update_sensor' method to update the current Vision data
Args:
data (dict): The data produced by an Vision sensor
"""
# Magnetometer data
self._sensor_data.xmag = data["magnetic_field"][0]
self._sensor_data.ymag = data["magnetic_field"][1]
self._sensor_data.zmag = data["magnetic_field"][2]
# Signal that we have new Magnetometer data
self._sensor_data.new_mag_data = True
def update_vision_data(self, data):
"""Method that 'in the future' will get called by the 'update_sensor' method to update the current Vision data
This callback is currently not being called (TODO in a future simulator version)
Args:
data (dict): The data produced by an Vision sensor
"""
# Vision or MOCAP data
self._sensor_data.vision_x = data["x"]
self._sensor_data.vision_y = data["y"]
self._sensor_data.vision_z = data["z"]
self._sensor_data.vision_roll = data["roll"]
self._sensor_data.vision_pitch = data["pitch"]
self._sensor_data.vision_yaw = data["yaw"]
self._sensor_data.vision_covariance = data["covariance"]
self._sensor_data.vision_reset_counter = data["reset_counter"]
# Signal that we have new vision or mocap data
self._sensor_data.new_vision_data = True
def update_state(self, state: State):
"""Method that is used as callback and gets called at every physics step with the current state of the vehicle.
This state is then stored in order to be sent as groundtruth via mavlink
Args:
state (State): The current state of the vehicle.
"""
# Get the quaternion in the convention [x, y, z, w]
attitude = state.get_attitude_ned_frd()
# Rotate the quaternion to the mavlink standard
self._sensor_data.sim_attitude[0] = attitude[3]
self._sensor_data.sim_attitude[1] = attitude[0]
self._sensor_data.sim_attitude[2] = attitude[1]
self._sensor_data.sim_attitude[3] = attitude[2]
# Get the angular velocity
ang_vel = state.get_angular_velocity_frd()
self._sensor_data.sim_angular_vel[0] = ang_vel[0]
self._sensor_data.sim_angular_vel[1] = ang_vel[1]
self._sensor_data.sim_angular_vel[2] = ang_vel[2]
# Get the acceleration
acc_vel = state.get_linear_acceleration_ned()
self._sensor_data.sim_acceleration[0] = int(acc_vel[0] * 1000)
self._sensor_data.sim_acceleration[1] = int(acc_vel[1] * 1000)
self._sensor_data.sim_acceleration[2] = int(acc_vel[2] * 1000)
# Get the latitude, longitude and altitude directly from the GPS
# Get the linear velocity of the vehicle in the inertial frame
lin_vel = state.get_linear_velocity_ned()
self._sensor_data.sim_velocity_inertial[0] = int(lin_vel[0] * 100)
self._sensor_data.sim_velocity_inertial[1] = int(lin_vel[1] * 100)
self._sensor_data.sim_velocity_inertial[2] = int(lin_vel[2] * 100)
# Compute the air_speed - assumed indicated airspeed due to flow aligned with pitot (body x)
body_vel = state.get_linear_body_velocity_ned_frd()
self._sensor_data.sim_ind_airspeed = int(body_vel[0] * 100)
self._sensor_data.sim_true_airspeed = int(np.linalg.norm(lin_vel) * 100) # TODO - add wind here
self._sensor_data.new_sim_state = True
def input_reference(self):
"""Method that when implemented, should return a list of desired angular velocities to apply to the vehicle rotors
"""
return self._rotor_data.input_reference
def __del__(self):
"""Gets called when the MavlinkBackend object gets destroyed. When this happens, we make sure
to close any mavlink connection open for this vehicle.
"""
# When this object gets destroyed, close the mavlink connection to free the communication port
try:
self._connection.close()
self._connection = None
except:
carb.log_info("Mavlink connection was not closed, because it was never opened")
def start(self):
"""Method that handles the begining of the simulation of vehicle. It will try to open the mavlink connection
interface and also attemp to launch px4 in a background process if that option as specified in the config class
"""
# If we are already running the mavlink interface, then ignore the function call
if self._is_running == True:
return
# If the connection no longer exists (we stoped and re-started the stream, then re_intialize the interface)
if self._connection is None:
self.re_initialize_interface()
# Set the flag to signal that the mavlink transmission has started
self._is_running = True
# Launch the PX4 in the background if needed
if self.px4_autolaunch and self.px4_tool is None:
carb.log_info("Attempting to launch PX4 in background process")
self.px4_tool = PX4LaunchTool(self.px4_dir, self._vehicle_id, self.px4_vehicle_model)
self.px4_tool.launch_px4()
def stop(self):
"""Method that when called will handle the stopping of the simulation of vehicle. It will make sure that any open
mavlink connection will be closed and also that the PX4 background process gets killed (if it was auto-initialized)
"""
# If the simulation was already stoped, then ignore the function call
if self._is_running == False:
return
# Set the flag so that we are no longer running the mavlink interface
self._is_running = False
# Close the mavlink connection
self._connection.close()
self._connection = None
# Close the PX4 if it was running
if self.px4_autolaunch and self.px4_autolaunch is not None:
carb.log_info("Attempting to kill PX4 background process")
self.px4_tool.kill_px4()
self.px4_tool = None
def reset(self):
"""For now does nothing. Here for compatibility purposes only
"""
return
def re_initialize_interface(self):
"""Auxiliar method used to get the MavlinkInterface to reset the MavlinkInterface to its initial state
"""
self._is_running = False
# Restart the sensor data
self._sensor_data = SensorMsg()
# Restart the connection
self._connection = mavutil.mavlink_connection(self._connection_port)
# Auxiliar variables to handle the lockstep between receiving sensor data and actuator control
self._received_first_actuator: bool = False
self._received_actuator: bool = False
# Auxiliar variables to check if we have already received an hearbeat from the software in the loop simulation
self._received_first_hearbeat: bool = False
self._last_heartbeat_sent_time = 0
def wait_for_first_hearbeat(self):
"""
Responsible for waiting for the first hearbeat. This method is locking and will only return
if an hearbeat is received via mavlink. When this first heartbeat is received poll for mavlink messages
"""
carb.log_warn("Waiting for first hearbeat")
result = self._connection.wait_heartbeat(blocking=False)
if result is not None:
self._received_first_hearbeat = True
carb.log_warn("Received first hearbeat")
def update(self, dt):
"""
Method that is called at every physics step to send data to px4 and receive the control inputs via mavlink
Args:
dt (float): The time elapsed between the previous and current function calls (s).
"""
# Check for the first hearbeat on the first few iterations
if not self._received_first_hearbeat:
self.wait_for_first_hearbeat()
return
# Check if we have already received IMU data. If not, start the lockstep and wait for more data
if self._sensor_data.received_first_imu:
while not self._sensor_data.new_imu_data and self._is_running:
# Just go for the next update and then try to check if we have new simulated sensor data
# DO not continue and get mavlink thrusters commands until we have simulated IMU data available
return
# Check if we have received any mavlink messages
self.poll_mavlink_messages()
# Send hearbeats at 1Hz
if (time.time() - self._last_heartbeat_sent_time) > 1.0 or self._received_first_hearbeat == False:
self.send_heartbeat()
self._last_heartbeat_sent_time = time.time()
# Update the current u_time for px4
self._current_utime += int(dt * 1000000)
# Send sensor messages
self.send_sensor_msgs(self._current_utime)
# Send the GPS messages
self.send_gps_msgs(self._current_utime)
# Send the Vision messages
self.send_vision_msgs(self._current_utime)
def poll_mavlink_messages(self):
"""
Method that is used to check if new mavlink messages were received
"""
# If we have not received the first hearbeat yet, do not poll for mavlink messages
if self._received_first_hearbeat == False:
return
# Check if we need to lock and wait for actuator control data
needs_to_wait_for_actuator: bool = self._received_first_actuator and self._enable_lockstep
# Start by assuming that we have not received data for the actuators for the current step
self._received_actuator = False
# Use this loop to emulate a do-while loop (make sure this runs at least once)
while True:
# Try to get a message
msg = self._connection.recv_match(blocking=needs_to_wait_for_actuator)
# If a message was received
if msg is not None:
# Check if it is of the type that contains actuator controls
if msg.id == mavutil.mavlink.MAVLINK_MSG_ID_HIL_ACTUATOR_CONTROLS:
self._received_first_actuator = True
self._received_actuator = True
# Handle the control of the actuation commands received by PX4
self.handle_control(msg.time_usec, msg.controls, msg.mode, msg.flags)
# Check if we do not need to wait for an actuator message or we just received actuator input
# If so, break out of the infinite loop
if not needs_to_wait_for_actuator or self._received_actuator:
break
def send_heartbeat(self, mav_type=mavutil.mavlink.MAV_TYPE_GENERIC):
"""
Method that is used to publish an heartbear through mavlink protocol
Args:
mav_type (int): The ID that indicates the type of vehicle. Defaults to MAV_TYPE_GENERIC=0
"""
carb.log_info("Sending heartbeat")
# Note: to know more about these functions, go to pymavlink->dialects->v20->standard.py
# This contains the definitions for sending the hearbeat and simulated sensor messages
self._connection.mav.heartbeat_send(mav_type, mavutil.mavlink.MAV_AUTOPILOT_INVALID, 0, 0, 0)
def send_sensor_msgs(self, time_usec: int):
"""
Method that when invoked, will send the simulated sensor data through mavlink
Args:
time_usec (int): The total time elapsed since the simulation started
"""
carb.log_info("Sending sensor msgs")
# Check which sensors have new data to send
fields_updated: int = 0
if self._sensor_data.new_imu_data:
# Set the bit field to signal that we are sending updated accelerometer and gyro data
fields_updated = fields_updated | SensorSource.ACCEL | SensorSource.GYRO
self._sensor_data.new_imu_data = False
if self._sensor_data.new_mag_data:
# Set the bit field to signal that we are sending updated magnetometer data
fields_updated = fields_updated | SensorSource.MAG
self._sensor_data.new_mag_data = False
if self._sensor_data.new_bar_data:
# Set the bit field to signal that we are sending updated barometer data
fields_updated = fields_updated | SensorSource.BARO
self._sensor_data.new_bar_data = False
if self._sensor_data.new_press_data:
# Set the bit field to signal that we are sending updated diff pressure data
fields_updated = fields_updated | SensorSource.DIFF_PRESS
self._sensor_data.new_press_data = False
try:
self._connection.mav.hil_sensor_send(
time_usec,
self._sensor_data.xacc,
self._sensor_data.yacc,
self._sensor_data.zacc,
self._sensor_data.xgyro,
self._sensor_data.ygyro,
self._sensor_data.zgyro,
self._sensor_data.xmag,
self._sensor_data.ymag,
self._sensor_data.zmag,
self._sensor_data.abs_pressure,
self._sensor_data.diff_pressure,
self._sensor_data.pressure_alt,
self._sensor_data.altitude,
fields_updated,
)
except:
carb.log_warn("Could not send sensor data through mavlink")
def send_gps_msgs(self, time_usec: int):
"""
Method that is used to send simulated GPS data through the mavlink protocol.
Args:
time_usec (int): The total time elapsed since the simulation started
"""
carb.log_info("Sending GPS msgs")
# Do not send GPS data, if no new data was received
if not self._sensor_data.new_gps_data:
return
self._sensor_data.new_gps_data = False
# Latitude, longitude and altitude (all in integers)
try:
self._connection.mav.hil_gps_send(
time_usec,
self._sensor_data.fix_type,
self._sensor_data.latitude_deg,
self._sensor_data.longitude_deg,
self._sensor_data.altitude,
self._sensor_data.eph,
self._sensor_data.epv,
self._sensor_data.velocity,
self._sensor_data.velocity_north,
self._sensor_data.velocity_east,
self._sensor_data.velocity_down,
self._sensor_data.cog,
self._sensor_data.satellites_visible,
)
except:
carb.log_warn("Could not send gps data through mavlink")
def send_vision_msgs(self, time_usec: int):
"""
Method that is used to send simulated vision/mocap data through the mavlink protocol.
Args:
time_usec (int): The total time elapsed since the simulation started
"""
carb.log_info("Sending vision/mocap msgs")
# Do not send vision/mocap data, if not new data was received
if not self._sensor_data.new_vision_data:
return
self._sensor_data.new_vision_data = False
try:
self._connection.mav.vision_position_estimate_send(
time_usec,
self._sensor_data.vision_x,
self._sensor_data.vision_y,
self._sensor_data.vision_z,
self._sensor_data.vision_roll,
self._sensor_data.vision_pitch,
self._sensor_data.vision_yaw,
self._sensor_data.vision_covariance,
self._sensor_data.vision_reset_counter,
)
except:
carb.log_warn("Could not send vision/mocap data through mavlink")
def send_ground_truth(self, time_usec: int):
"""
Method that is used to send the groundtruth data of the vehicle through mavlink
Args:
time_usec (int): The total time elapsed since the simulation started
"""
carb.log_info("Sending groundtruth msgs")
# Do not send vision/mocap data, if not new data was received
if not self._sensor_data.new_sim_state or self._sensor_data.sim_alt == 0:
return
self._sensor_data.new_sim_state = False
try:
self._connection.mav.hil_state_quaternion_send(
time_usec,
self._sensor_data.sim_attitude,
self._sensor_data.sim_angular_vel[0],
self._sensor_data.sim_angular_vel[1],
self._sensor_data.sim_angular_vel[2],
self._sensor_data.sim_lat,
self._sensor_data.sim_lon,
self._sensor_data.sim_alt,
self._sensor_data.sim_velocity_inertial[0],
self._sensor_data.sim_velocity_inertial[1],
self._sensor_data.sim_velocity_inertial[2],
self._sensor_data.sim_ind_airspeed,
self._sensor_data.sim_true_airspeed,
self._sensor_data.sim_acceleration[0],
self._sensor_data.sim_acceleration[1],
self._sensor_data.sim_acceleration[2],
)
except:
carb.log_warn("Could not send groundtruth through mavlink")
def handle_control(self, time_usec, controls, mode, flags):
"""
Method that when received a control message, compute the forces simulated force that should be applied
on each rotor of the vehicle
Args:
time_usec (int): The total time elapsed since the simulation started - Ignored argument
controls (list): A list of ints which contains the thrust_control received via mavlink
flags: Ignored argument
"""
# Check if the vehicle is armed - Note: here we have to add a +1 since the code for armed is 128, but
# pymavlink is return 129 (the end of the buffer)
if mode == mavutil.mavlink.MAV_MODE_FLAG_SAFETY_ARMED + 1:
carb.log_info("Parsing control input")
# Set the rotor target speeds
self._rotor_data.update_input_reference(controls)
# If the vehicle is not armed, do not rotate the propellers
else:
self._rotor_data.zero_input_reference()
| 34,705 | Python | 39.878681 | 186 | 0.61677 |
superboySB/SBDrone_deprecated/extensions/pegasus.simulator/pegasus/simulator/logic/backends/tools/px4_launch_tool.py | """
| File: px4_launch_tool.py
| Author: Marcelo Jacinto ([email protected])
| Description: Defines an auxiliary tool to launch the PX4 process in the background
| License: BSD-3-Clause. Copyright (c) 2023, Marcelo Jacinto. All rights reserved.
"""
# System tools used to launch the px4 process in the brackground
import os
import tempfile
import subprocess
class PX4LaunchTool:
"""
A class that manages the start/stop of a px4 process. It requires only the path to the PX4 installation (assuming that
PX4 was already built with 'make px4_sitl_default none'), the vehicle id and the vehicle model.
"""
def __init__(self, px4_dir, vehicle_id: int = 0, px4_model: str = "iris"):
"""Construct the PX4LaunchTool object
Args:
px4_dir (str): A string with the path to the PX4-Autopilot directory
vehicle_id (int): The ID of the vehicle. Defaults to 0.
px4_model (str): The vehicle model. Defaults to "iris".
"""
# Attribute that will hold the px4 process once it is running
self.px4_process = None
# The vehicle id (used for the mavlink port open in the system)
self.vehicle_id = vehicle_id
# Configurations to whether autostart px4 (SITL) automatically or have the user launch it manually on another
# terminal
self.px4_dir = px4_dir
self.rc_script = self.px4_dir + "/ROMFS/px4fmu_common/init.d-posix/rcS"
# Create a temporary filesystem for px4 to write data to/from (and modify the origin rcS files)
self.root_fs = tempfile.TemporaryDirectory()
# Set the environement variables that let PX4 know which vehicle model to use internally
self.environment = os.environ
self.environment["PX4_SIM_MODEL"] = px4_model
def launch_px4(self):
"""
Method that will launch a px4 instance with the specified configuration
"""
self.px4_process = subprocess.Popen(
[
self.px4_dir + "/build/px4_sitl_default/bin/px4",
self.px4_dir + "/ROMFS/px4fmu_common/",
"-s",
self.rc_script,
"-i",
str(self.vehicle_id),
"-d",
],
cwd=self.root_fs.name,
shell=False,
env=self.environment,
)
def kill_px4(self):
"""
Method that will kill a px4 instance with the specified configuration
"""
if self.px4_process is not None:
self.px4_process.kill()
self.px4_process = None
def __del__(self):
"""
If the px4 process is still running when the PX4 launch tool object is whiped from memory, then make sure
we kill the px4 instance so we don't end up with hanged px4 instances
"""
# Make sure the PX4 process gets killed
if self.px4_process:
self.kill_px4()
# Make sure we clean the temporary filesystem used for the simulation
self.root_fs.cleanup()
# ---- Code used for debugging the px4 tool ----
def main():
px4_tool = PX4LaunchTool(os.environ["HOME"] + "/PX4-Autopilot")
px4_tool.launch_px4()
import time
time.sleep(60)
if __name__ == "__main__":
main()
| 3,317 | Python | 31.851485 | 122 | 0.611396 |
superboySB/SBDrone_deprecated/extensions/pegasus.simulator/pegasus/simulator/logic/vehicles/vehicle.py | """
| File: vehicle.py
| Author: Marcelo Jacinto ([email protected])
| License: BSD-3-Clause. Copyright (c) 2023, Marcelo Jacinto. All rights reserved.
| Description: Definition of the Vehicle class which is used as the base for all the vehicles.
"""
# Numerical computations
import numpy as np
from scipy.spatial.transform import Rotation
# Low level APIs
import carb
from pxr import Usd, Gf
# High level Isaac sim APIs
import omni.usd
from omni.isaac.core.world import World
from omni.isaac.core.utils.prims import define_prim, get_prim_at_path
from omni.usd import get_stage_next_free_path
from omni.isaac.core.robots.robot import Robot
# Extension APIs
from pegasus.simulator.logic.state import State
from pegasus.simulator.logic.interface.pegasus_interface import PegasusInterface
from pegasus.simulator.logic.vehicle_manager import VehicleManager
def get_world_transform_xform(prim: Usd.Prim):
"""
Get the local transformation of a prim using omni.usd.get_world_transform_matrix().
See https://docs.omniverse.nvidia.com/kit/docs/omni.usd/latest/omni.usd/omni.usd.get_world_transform_matrix.html
Args:
prim (Usd.Prim): The prim to calculate the world transformation.
Returns:
A tuple of:
- Translation vector.
- Rotation quaternion, i.e. 3d vector plus angle.
- Scale vector.
"""
world_transform: Gf.Matrix4d = omni.usd.get_world_transform_matrix(prim)
rotation: Gf.Rotation = world_transform.ExtractRotation()
return rotation
class Vehicle(Robot):
def __init__(
self,
stage_prefix: str,
usd_path: str = None,
init_pos=[0.0, 0.0, 0.0],
init_orientation=[0.0, 0.0, 0.0, 1.0],
):
"""
Class that initializes a vehicle in the isaac sim's curent stage
Args:
stage_prefix (str): The name the vehicle will present in the simulator when spawned. Defaults to "quadrotor".
usd_path (str): The USD file that describes the looks and shape of the vehicle. Defaults to "".
init_pos (list): The initial position of the vehicle in the inertial frame (in ENU convention). Defaults to [0.0, 0.0, 0.0].
init_orientation (list): The initial orientation of the vehicle in quaternion [qx, qy, qz, qw]. Defaults to [0.0, 0.0, 0.0, 1.0].
"""
# Get the current world at which we want to spawn the vehicle
self._world = PegasusInterface().world
self._current_stage = self._world.stage
# Save the name with which the vehicle will appear in the stage
# and the name of the .usd file that contains its description
self._stage_prefix = get_stage_next_free_path(self._current_stage, stage_prefix, False)
self._usd_file = usd_path
# Get the vehicle name by taking the last part of vehicle stage prefix
self._vehicle_name = self._stage_prefix.rpartition("/")[-1]
# Spawn the vehicle primitive in the world's stage
self._prim = define_prim(self._stage_prefix, "Xform")
self._prim = get_prim_at_path(self._stage_prefix)
self._prim.GetReferences().AddReference(self._usd_file)
# Initialize the "Robot" class
# Note: we need to change the rotation to have qw first, because NVidia
# does not keep a standard of quaternions inside its own libraries (not good, but okay)
super().__init__(
prim_path=self._stage_prefix,
name=self._stage_prefix,
position=init_pos,
orientation=[init_orientation[3], init_orientation[0], init_orientation[1], init_orientation[2]],
articulation_controller=None,
)
# Add this object for the world to track, so that if we clear the world, this object is deleted from memory and
# as a consequence, from the VehicleManager as well
self._world.scene.add(self)
# Add the current vehicle to the vehicle manager, so that it knows
# that a vehicle was instantiated
VehicleManager.get_vehicle_manager().add_vehicle(self._stage_prefix, self)
# Variable that will hold the current state of the vehicle
self._state = State()
# Motor that is given as reference
self._motor_speed = []
# Add a callback to the physics engine to update the current state of the system
self._world.add_physics_callback(self._stage_prefix + "/state", self.update_state)
# Add the update method to the physics callback if the world was received
# so that we can apply forces and torques to the vehicle. Note, this method should
# be implemented in classes that inherit the vehicle object
self._world.add_physics_callback(self._stage_prefix + "/update", self.update)
# Set the flag that signals if the simulation is running or not
self._sim_running = False
# Add a callback to start/stop of the simulation once the play/stop button is hit
self._world.add_timeline_callback(self._stage_prefix + "/start_stop_sim", self.sim_start_stop)
def __del__(self):
"""
Method that is invoked when a vehicle object gets destroyed. When this happens, we also invoke the
'remove_vehicle' from the VehicleManager in order to remove the vehicle from the list of active vehicles.
"""
# Remove this object from the vehicleHandler
VehicleManager.get_vehicle_manager().remove_vehicle(self._stage_prefix)
"""
Properties
"""
@property
def state(self):
"""The state of the vehicle.
Returns:
State: The current state of the vehicle, i.e., position, orientation, linear and angular velocities...
"""
return self._state
@property
def vehicle_name(self) -> str:
"""Vehicle name.
Returns:
Vehicle name (str): last prim name in vehicle prim path
"""
return self._vehicle_name
"""
Operations
"""
def sim_start_stop(self, event):
"""
Callback that is called every time there is a timeline event such as starting/stoping the simulation.
Args:
event: A timeline event generated from Isaac Sim, such as starting or stoping the simulation.
"""
# If the start/stop button was pressed, then call the start and stop methods accordingly
if self._world.is_playing() and self._sim_running == False:
self._sim_running = True
self.start()
if self._world.is_stopped() and self._sim_running == True:
self._sim_running = False
self.stop()
def apply_force(self, force, pos=[0.0, 0.0, 0.0], body_part="/body"):
"""
Method that will apply a force on the rigidbody, on the part specified in the 'body_part' at its relative position
given by 'pos' (following a FLU) convention.
Args:
force (list): A 3-dimensional vector of floats with the force [Fx, Fy, Fz] on the body axis of the vehicle according to a FLU convention.
pos (list): _description_. Defaults to [0.0, 0.0, 0.0].
body_part (str): . Defaults to "/body".
"""
# Get the handle of the rigidbody that we will apply the force to
rb = self._world.dc_interface.get_rigid_body(self._stage_prefix + body_part)
# Apply the force to the rigidbody. The force should be expressed in the rigidbody frame
self._world.dc_interface.apply_body_force(rb, carb._carb.Float3(force), carb._carb.Float3(pos), False)
def apply_torque(self, torque, body_part="/body"):
"""
Method that when invoked applies a given torque vector to /<rigid_body_name>/"body" or to /<rigid_body_name>/<body_part>.
Args:
torque (list): A 3-dimensional vector of floats with the force [Tx, Ty, Tz] on the body axis of the vehicle according to a FLU convention.
body_part (str): . Defaults to "/body".
"""
# Get the handle of the rigidbody that we will apply a torque to
rb = self._world.dc_interface.get_rigid_body(self._stage_prefix + body_part)
# Apply the torque to the rigidbody. The torque should be expressed in the rigidbody frame
self._world.dc_interface.apply_body_torque(rb, carb._carb.Float3(torque), False)
def update_state(self, dt: float):
"""
Method that is called at every physics step to retrieve and update the current state of the vehicle, i.e., get
the current position, orientation, linear and angular velocities and acceleration of the vehicle.
Args:
dt (float): The time elapsed between the previous and current function calls (s).
"""
# Get the body frame interface of the vehicle (this will be the frame used to get the position, orientation, etc.)
body = self._world.dc_interface.get_rigid_body(self._stage_prefix + "/body")
# Get the current position and orientation in the inertial frame
pose = self._world.dc_interface.get_rigid_body_pose(body)
# Get the attitude according to the convention [w, x, y, z]
prim = self._world.stage.GetPrimAtPath(self._stage_prefix + "/body")
rotation_quat = get_world_transform_xform(prim).GetQuaternion()
rotation_quat_real = rotation_quat.GetReal()
rotation_quat_img = rotation_quat.GetImaginary()
# Get the angular velocity of the vehicle expressed in the body frame of reference
ang_vel = self._world.dc_interface.get_rigid_body_angular_velocity(body)
# The linear velocity [x_dot, y_dot, z_dot] of the vehicle's body frame expressed in the inertial frame of reference
linear_vel = self._world.dc_interface.get_rigid_body_linear_velocity(body)
# Get the linear acceleration of the body relative to the inertial frame, expressed in the inertial frame
# Note: we must do this approximation, since the Isaac sim does not output the acceleration of the rigid body directly
linear_acceleration = (np.array(linear_vel) - self._state.linear_velocity) / dt
# Update the state variable X = [x,y,z]
self._state.position = np.array(pose.p)
# Get the quaternion according in the [qx,qy,qz,qw] standard
self._state.attitude = np.array(
[rotation_quat_img[0], rotation_quat_img[1], rotation_quat_img[2], rotation_quat_real]
)
# Express the velocity of the vehicle in the inertial frame X_dot = [x_dot, y_dot, z_dot]
self._state.linear_velocity = np.array(linear_vel)
# The linear velocity V =[u,v,w] of the vehicle's body frame expressed in the body frame of reference
# Note that: x_dot = Rot * V
self._state.linear_body_velocity = (
Rotation.from_quat(self._state.attitude).inv().apply(self._state.linear_velocity)
)
# omega = [p,q,r]
self._state.angular_velocity = Rotation.from_quat(self._state.attitude).inv().apply(np.array(ang_vel))
# The acceleration of the vehicle expressed in the inertial frame X_ddot = [x_ddot, y_ddot, z_ddot]
self._state.linear_acceleration = linear_acceleration
def start(self):
"""
Method that should be implemented by the class that inherits the vehicle object.
"""
pass
def stop(self):
"""
Method that should be implemented by the class that inherits the vehicle object.
"""
pass
def update(self, dt: float):
"""
Method that computes and applies the forces to the vehicle in
simulation based on the motor speed. This method must be implemented
by a class that inherits this type and it's called periodically by the physics engine.
Args:
dt (float): The time elapsed between the previous and current function calls (s).
"""
pass
| 11,970 | Python | 41.601423 | 150 | 0.653133 |
superboySB/SBDrone_deprecated/extensions/pegasus.simulator/pegasus/simulator/logic/vehicles/multirotor.py | """
| File: multirotor.py
| Author: Marcelo Jacinto ([email protected])
| License: BSD-3-Clause. Copyright (c) 2023, Marcelo Jacinto. All rights reserved.
| Description: Definition of the Multirotor class which is used as the base for all the multirotor vehicles.
"""
import numpy as np
# The vehicle interface
from pegasus.simulator.logic.vehicles.vehicle import Vehicle
# Mavlink interface
from pegasus.simulator.logic.backends.mavlink_backend import MavlinkBackend
# Sensors and dynamics setup
from pegasus.simulator.logic.dynamics import LinearDrag
from pegasus.simulator.logic.thrusters import QuadraticThrustCurve
from pegasus.simulator.logic.sensors import Barometer, IMU, Magnetometer, GPS
from pegasus.simulator.logic.interface.pegasus_interface import PegasusInterface
class MultirotorConfig:
"""
A data class that is used for configuring a Multirotor
"""
def __init__(self):
"""
Initialization of the MultirotorConfig class
"""
# Stage prefix of the vehicle when spawning in the world
self.stage_prefix = "quadrotor"
# The USD file that describes the visual aspect of the vehicle (and some properties such as mass and moments of inertia)
self.usd_file = ""
# The default thrust curve for a quadrotor and dynamics relating to drag
self.thrust_curve = QuadraticThrustCurve()
self.drag = LinearDrag([0.50, 0.30, 0.0])
# The default sensors for a quadrotor
self.sensors = [Barometer(), IMU(), Magnetometer(), GPS()]
# The default graphs
self.graphs = []
# The backends for actually sending commands to the vehicle. By default use mavlink (with default mavlink configurations)
# [Can be None as well, if we do not desired to use PX4 with this simulated vehicle]. It can also be a ROS2 backend
# or your own custom Backend implementation!
self.backends = [MavlinkBackend()]
class Multirotor(Vehicle):
"""Multirotor class - It defines a base interface for creating a multirotor
"""
def __init__(
self,
# Simulation specific configurations
stage_prefix: str = "quadrotor",
usd_file: str = "",
vehicle_id: int = 0,
# Spawning pose of the vehicle
init_pos=[0.0, 0.0, 0.07],
init_orientation=[0.0, 0.0, 0.0, 1.0],
config=MultirotorConfig(),
):
"""Initializes the multirotor object
Args:
stage_prefix (str): The name the vehicle will present in the simulator when spawned. Defaults to "quadrotor".
usd_file (str): The USD file that describes the looks and shape of the vehicle. Defaults to "".
vehicle_id (int): The id to be used for the vehicle. Defaults to 0.
init_pos (list): The initial position of the vehicle in the inertial frame (in ENU convention). Defaults to [0.0, 0.0, 0.07].
init_orientation (list): The initial orientation of the vehicle in quaternion [qx, qy, qz, qw]. Defaults to [0.0, 0.0, 0.0, 1.0].
config (_type_, optional): _description_. Defaults to MultirotorConfig().
"""
# 1. Initiate the Vehicle object itself
super().__init__(stage_prefix, usd_file, init_pos, init_orientation)
# 2. Initialize all the vehicle sensors
self._sensors = config.sensors
for sensor in self._sensors:
if sensor.sensor_type in ["Camera", "Lidar"]:
sensor.initialize(self)
else:
sensor.initialize(PegasusInterface().latitude, PegasusInterface().longitude, PegasusInterface().altitude)
# Add callbacks to the physics engine to update each sensor at every timestep
# and let the sensor decide depending on its internal update rate whether to generate new data
self._world.add_physics_callback(self._stage_prefix + "/Sensors", self.update_sensors)
# 3. Initialize all the vehicle graphs
self._graphs = config.graphs
for graph in self._graphs:
graph.initialize(self)
# 4. Setup the dynamics of the system
# Get the thrust curve of the vehicle from the configuration
self._thrusters = config.thrust_curve
self._drag = config.drag
# 5. Save the backend interface (if given in the configuration of the multirotor)
# and initialize them
self._backends = config.backends
for backend in self._backends:
backend.initialize(self)
# Add a callbacks for the
self._world.add_physics_callback(self._stage_prefix + "/mav_state", self.update_sim_state)
def update_sensors(self, dt: float):
"""Callback that is called at every physics steps and will call the sensor.update method to generate new
sensor data. For each data that the sensor generates, the backend.update_sensor method will also be called for
every backend. For example, if new data is generated for an IMU and we have a MavlinkBackend, then the update_sensor
method will be called for that backend so that this data can latter be sent thorugh mavlink.
Args:
dt (float): The time elapsed between the previous and current function calls (s).
"""
# Call the update method for the sensor to update its values internally (if applicable)
for sensor in self._sensors:
sensor_data = sensor.update(self._state, dt)
# If some data was updated and we have a mavlink backend or ros backend (or other), then just update it
if sensor_data is not None:
for backend in self._backends:
backend.update_sensor(sensor.sensor_type, sensor_data)
def update_sim_state(self, dt: float):
"""
Callback that is used to "send" the current state for each backend being used to control the vehicle. This callback
is called on every physics step.
Args:
dt (float): The time elapsed between the previous and current function calls (s).
"""
for backend in self._backends:
backend.update_state(self._state)
def start(self):
"""
Intializes the communication with all the backends. This method is invoked automatically when the simulation starts
"""
for backend in self._backends:
backend.start()
def stop(self):
"""
Signal all the backends that the simulation has stoped. This method is invoked automatically when the simulation stops
"""
for backend in self._backends:
backend.stop()
def update(self, dt: float):
"""
Method that computes and applies the forces to the vehicle in simulation based on the motor speed.
This method must be implemented by a class that inherits this type. This callback
is called on every physics step.
Args:
dt (float): The time elapsed between the previous and current function calls (s).
"""
# Get the articulation root of the vehicle
articulation = self._world.dc_interface.get_articulation(self._stage_prefix)
# Get the desired angular velocities for each rotor from the first backend (can be mavlink or other) expressed in rad/s
if len(self._backends) != 0:
desired_rotor_velocities = self._backends[0].input_reference()
else:
desired_rotor_velocities = [0.0 for i in range(self._thrusters._num_rotors)]
# Input the desired rotor velocities in the thruster model
self._thrusters.set_input_reference(desired_rotor_velocities)
# Get the desired forces to apply to the vehicle
forces_z, _, rolling_moment = self._thrusters.update(self._state, dt)
# Apply force to each rotor
for i in range(4):
# Apply the force in Z on the rotor frame
self.apply_force([0.0, 0.0, forces_z[i]], body_part="/rotor" + str(i))
# Generate the rotating propeller visual effect
self.handle_propeller_visual(i, forces_z[i], articulation)
# Apply the torque to the body frame of the vehicle that corresponds to the rolling moment
self.apply_torque([0.0, 0.0, rolling_moment], "/body")
# Compute the total linear drag force to apply to the vehicle's body frame
drag = self._drag.update(self._state, dt)
self.apply_force(drag, body_part="/body")
# Call the update methods in all backends
for backend in self._backends:
backend.update(dt)
def handle_propeller_visual(self, rotor_number, force: float, articulation):
"""
Auxiliar method used to set the joint velocity of each rotor (for animation purposes) based on the
amount of force being applied on each joint
Args:
rotor_number (int): The number of the rotor to generate the rotation animation
force (float): The force that is being applied on that rotor
articulation (_type_): The articulation group the joints of the rotors belong to
"""
# Rotate the joint to yield the visual of a rotor spinning (for animation purposes only)
joint = self._world.dc_interface.find_articulation_dof(articulation, "joint" + str(rotor_number))
# Spinning when armed but not applying force
if 0.0 < force < 0.1:
self._world.dc_interface.set_dof_velocity(joint, 5 * self._thrusters.rot_dir[rotor_number])
# Spinning when armed and applying force
elif 0.1 <= force:
self._world.dc_interface.set_dof_velocity(joint, 100 * self._thrusters.rot_dir[rotor_number])
# Not spinning
else:
self._world.dc_interface.set_dof_velocity(joint, 0)
def force_and_torques_to_velocities(self, force: float, torque: np.ndarray):
"""
Auxiliar method used to get the target angular velocities for each rotor, given the total desired thrust [N] and
torque [Nm] to be applied in the multirotor's body frame.
Note: This method assumes a quadratic thrust curve. This method will be improved in a future update,
and a general thrust allocation scheme will be adopted. For now, it is made to work with multirotors directly.
Args:
force (np.ndarray): A vector of the force to be applied in the body frame of the vehicle [N]
torque (np.ndarray): A vector of the torque to be applied in the body frame of the vehicle [Nm]
Returns:
list: A list of angular velocities [rad/s] to apply in reach rotor to accomplish suchs forces and torques
"""
# Get the body frame of the vehicle
rb = self._world.dc_interface.get_rigid_body(self._stage_prefix + "/body")
# Get the rotors of the vehicle
rotors = [self._world.dc_interface.get_rigid_body(self._stage_prefix + "/rotor" + str(i)) for i in range(self._thrusters._num_rotors)]
# Get the relative position of the rotors with respect to the body frame of the vehicle (ignoring the orientation for now)
relative_poses = self._world.dc_interface.get_relative_body_poses(rb, rotors)
# Define the alocation matrix
aloc_matrix = np.zeros((4, self._thrusters._num_rotors))
# Define the first line of the matrix (T [N])
aloc_matrix[0, :] = np.array(self._thrusters._rotor_constant)
# Define the second and third lines of the matrix (\tau_x [Nm] and \tau_y [Nm])
aloc_matrix[1, :] = np.array([relative_poses[i].p[1] * self._thrusters._rotor_constant[i] for i in range(self._thrusters._num_rotors)])
aloc_matrix[2, :] = np.array([-relative_poses[i].p[0] * self._thrusters._rotor_constant[i] for i in range(self._thrusters._num_rotors)])
# Define the forth line of the matrix (\tau_z [Nm])
aloc_matrix[3, :] = np.array([self._thrusters._rolling_moment_coefficient[i] * self._thrusters._rot_dir[i] for i in range(self._thrusters._num_rotors)])
# Compute the inverse allocation matrix, so that we can get the angular velocities (squared) from the total thrust and torques
aloc_inv = np.linalg.pinv(aloc_matrix)
# Compute the target angular velocities (squared)
squared_ang_vel = aloc_inv @ np.array([force, torque[0], torque[1], torque[2]])
# Making sure that there is no negative value on the target squared angular velocities
squared_ang_vel[squared_ang_vel < 0] = 0.0
# ------------------------------------------------------------------------------------------------
# Saturate the inputs while preserving their relation to each other, by performing a normalization
# ------------------------------------------------------------------------------------------------
max_thrust_vel_squared = np.power(self._thrusters.max_rotor_velocity[0], 2)
max_val = np.max(squared_ang_vel)
if max_val >= max_thrust_vel_squared:
normalize = np.maximum(max_val / max_thrust_vel_squared, 1.0)
squared_ang_vel = squared_ang_vel / normalize
# Compute the angular velocities for each rotor in [rad/s]
ang_vel = np.sqrt(squared_ang_vel)
return ang_vel
| 13,372 | Python | 45.434028 | 160 | 0.642911 |
superboySB/SBDrone_deprecated/extensions/pegasus.simulator/pegasus/simulator/logic/vehicles/__init__.py | """
| Author: Marcelo Jacinto ([email protected])
| License: BSD-3-Clause. Copyright (c) 2023, Marcelo Jacinto. All rights reserved.
"""
from .vehicle import Vehicle
from .multirotor import Multirotor, MultirotorConfig
| 237 | Python | 28.749996 | 82 | 0.767932 |
superboySB/SBDrone_deprecated/extensions/pegasus.simulator/pegasus/simulator/logic/vehicles/multirotors/iris.py | # Copyright (c) 2023, Marcelo Jacinto
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
from pegasus.simulator.logic.vehicles.multirotor import Multirotor, MultirotorConfig
# Sensors and dynamics setup
from pegasus.simulator.logic.dynamics import LinearDrag
from pegasus.simulator.logic.thrusters import QuadraticThrustCurve
from pegasus.simulator.logic.sensors import Barometer, IMU, Magnetometer, GPS
# Mavlink interface
from pegasus.simulator.logic.backends.mavlink_backend import MavlinkBackend
# Get the location of the IRIS asset
from pegasus.simulator.params import ROBOTS
class IrisConfig(MultirotorConfig):
def __init__(self):
# Stage prefix of the vehicle when spawning in the world
self.stage_prefix = "quadrotor"
# The USD file that describes the visual aspect of the vehicle (and some properties such as mass and moments of inertia)
self.usd_file = ROBOTS["Iris"]
# The default thrust curve for a quadrotor and dynamics relating to drag
self.thrust_curve = QuadraticThrustCurve()
self.drag = LinearDrag([0.50, 0.30, 0.0])
# The default sensors for a quadrotor
self.sensors = [Barometer(), IMU(), Magnetometer(), GPS()]
# The backends for actually sending commands to the vehicle. By default use mavlink (with default mavlink configurations)
# [Can be None as well, if we do not desired to use PX4 with this simulated vehicle]. It can also be a ROS2 backend
# or your own custom Backend implementation!
self.backends = [MavlinkBackend()]
class Iris(Multirotor):
def __init__(self, id: int, world, init_pos=[0.0, 0.0, 0.07, init_orientation=[0.0, 0.0, 0.0, 1.0]], config=IrisConfig()):
super.__init__(config.stage_prefix, config.usd_file, id, world, init_pos, init_orientation, config=config) | 1,850 | Python | 42.046511 | 129 | 0.721081 |
superboySB/SBDrone_deprecated/extensions/pegasus.simulator/pegasus/simulator/logic/dynamics/__init__.py | """
| Author: Marcelo Jacinto ([email protected])
| License: BSD-3-Clause. Copyright (c) 2023, Marcelo Jacinto. All rights reserved.
"""
from .drag import Drag
from .linear_drag import LinearDrag
| 214 | Python | 25.874997 | 82 | 0.747664 |
superboySB/SBDrone_deprecated/extensions/pegasus.simulator/pegasus/simulator/logic/dynamics/drag.py | """
| File: drag.py
| Author: Marcelo Jacinto ([email protected])
| Description: Base interface used to implement forces that should actuate on a rigidbody such as linear drag
| License: BSD-3-Clause. Copyright (c) 2023, Marcelo Jacinto. All rights reserved.
"""
from pegasus.simulator.logic.state import State
class Drag:
"""
Class that serves as a template for the implementation of Drag forces that actuate on a rigid body
"""
def __init__(self):
"""
Receives as input the drag coefficients of the vehicle as a 3x1 vector of constants
"""
@property
def drag(self):
"""The drag force to be applied on the body frame of the vehicle
Returns:
list: A list with len==3 containing the drag force to be applied on the rigid body according to a FLU body reference
frame, expressed in Newton (N) [dx, dy, dz]
"""
return [0.0, 0.0, 0.0]
def update(self, state: State, dt: float):
"""Method that should be implemented to update the drag force to be applied on the body frame of the vehicle
Args:
state (State): The current state of the vehicle.
dt (float): The time elapsed between the previous and current function calls (s).
Returns:
list: A list with len==3 containing the drag force to be applied on the rigid body according to a FLU body reference
"""
return [0.0, 0.0, 0.0]
| 1,481 | Python | 36.049999 | 128 | 0.649561 |
superboySB/SBDrone_deprecated/extensions/pegasus.simulator/pegasus/simulator/logic/dynamics/linear_drag.py | """
| File: linear_drag.py
| Author: Marcelo Jacinto ([email protected])
| Description: Computes the forces that should actuate on a rigidbody affected by linear drag
| License: BSD-3-Clause. Copyright (c) 2023, Marcelo Jacinto. All rights reserved.
"""
import numpy as np
from pegasus.simulator.logic.dynamics.drag import Drag
from pegasus.simulator.logic.state import State
class LinearDrag(Drag):
"""
Class that implements linear drag computations afftecting a rigid body. It inherits the Drag base class.
"""
def __init__(self, drag_coefficients=[0.0, 0.0, 0.0]):
"""
Receives as input the drag coefficients of the vehicle as a 3x1 vector of constants
Args:
drag_coefficients (list[float]): The constant linear drag coefficients to used to compute the total drag forces
affecting the rigid body. The linear drag is given by diag(dx, dy, dz) * [v_x, v_y, v_z] where the velocities
are expressed in the body frame of the rigid body (using the FRU frame convention).
"""
# Initialize the base Drag class
super().__init__()
# The linear drag coefficients of the vehicle's body frame
self._drag_coefficients = np.diag(drag_coefficients)
# The drag force to apply on the vehicle's body frame
self._drag_force = np.array([0.0, 0.0, 0.0])
@property
def drag(self):
"""The drag force to be applied on the body frame of the vehicle
Returns:
list: A list with len==3 containing the drag force to be applied on the rigid body according to a FLU body reference
frame, expressed in Newton (N) [dx, dy, dz]
"""
return self._drag_force
def update(self, state: State, dt: float):
"""Method that updates the drag force to be applied on the body frame of the vehicle. The total drag force
applied on the body reference frame (FLU convention) is given by diag(dx,dy,dz) * R' * v
where v is the velocity of the vehicle expressed in the inertial frame and R' * v = velocity_body_frame
Args:
state (State): The current state of the vehicle.
dt (float): The time elapsed between the previous and current function calls (s).
Returns:
list: A list with len==3 containing the drag force to be applied on the rigid body according to a FLU body reference
"""
# Get the velocity of the vehicle expressed in the body frame of reference
body_vel = state.linear_body_velocity
# Compute the component of the drag force to be applied in the body frame
self._drag_force = -np.dot(self._drag_coefficients, body_vel)
return self._drag_force
| 2,762 | Python | 42.171874 | 128 | 0.663287 |
superboySB/SBDrone_deprecated/extensions/pegasus.simulator/pegasus/simulator/tests/__init__.py | # Copyright (c) 2023, Marcelo Jacinto
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
from .test_hello_world import *
| 135 | Python | 21.666663 | 39 | 0.740741 |
superboySB/SBDrone_deprecated/extensions/pegasus.simulator/pegasus/simulator/tests/test_hello_world.py | # NOTE:
# omni.kit.test - std python's unittest module with additional wrapping to add suport for async/await tests
# For most things refer to unittest docs: https://docs.python.org/3/library/unittest.html
import omni.kit.test
# Extnsion for writing UI tests (simulate UI interaction)
import omni.kit.ui_test as ui_test
# Import extension python module we are testing with absolute import path, as if we are external user (other extension)
import pegasus.simulator
# Having a test class dervived from omni.kit.test.AsyncTestCase declared on the root of module will make it auto-discoverable by omni.kit.test
class Test(omni.kit.test.AsyncTestCase):
# Before running each test
async def setUp(self):
pass
# After running each test
async def tearDown(self):
pass
# Actual test, notice it is "async" function, so "await" can be used if needed
async def test_hello_public_function(self):
result = pegasus.simulator.some_public_function(4)
self.assertEqual(result, 256)
async def test_window_button(self):
# Find a label in our window
label = ui_test.find("My Window//Frame/**/Label[*]")
# Find buttons in our window
add_button = ui_test.find("My Window//Frame/**/Button[*].text=='Add'")
reset_button = ui_test.find("My Window//Frame/**/Button[*].text=='Reset'")
# Click reset button
await reset_button.click()
self.assertEqual(label.widget.text, "empty")
await add_button.click()
self.assertEqual(label.widget.text, "count: 1")
await add_button.click()
self.assertEqual(label.widget.text, "count: 2")
| 1,669 | Python | 35.304347 | 142 | 0.683044 |
superboySB/SBDrone_deprecated/extensions/pegasus.simulator/config/extension.toml | [package]
# Semantic Versioning is used: https://semver.org/
version = "1.0.0"
# Lists people or organizations that are considered the "authors" of the package.
authors = ["Marcelo Jacinto"]
# The title and description fields are primarily for displaying extension info in UI
title = "Pegasus Simulator"
description="Extension providing the main framework interfaces for simulating aerial vehicles using PX4, Python or ROS 2 as a backend"
# Path (relative to the root) or content of readme markdown file for UI.
readme = "docs/README.md"
# URL of the extension source repository.
repository = ""
# One of categories for UI.
category = "Simulation"
# Keywords for the extension
keywords = ["drone", "quadrotor", "multirotor", "UAV", "px4", "sitl", "robotics"]
# Location of change log file in target (final) folder of extension, relative to the root.
# More info on writing changelog: https://keepachangelog.com/en/1.0.0/
changelog="docs/CHANGELOG.md"
# Preview image and icon. Folder named "data" automatically goes in git lfs (see .gitattributes file).
# Preview image is shown in "Overview" of Extensions window. Screenshot of an extension might be a good preview image.
preview_image = "data/preview.png"
# Icon is shown in Extensions window, it is recommended to be square, of size 256x256.
icon = "data/icon.png"
# Use omni.ui to build simple UI
[dependencies]
"omni.ui" = {}
"omni.usd" = {}
"omni.kit.uiapp" = {}
"omni.isaac.core" = {}
"omni.ui.scene" = {}
"omni.kit.window.viewport" = {}
# Main python module this extension provides, it will be publicly available as "import pegasus.simulator".
[[python.module]]
name = "pegasus.simulator"
[python.pipapi]
requirements = ["numpy", "scipy", "pymavlink", "pyyaml"]
use_online_index = true
[[test]]
# Extra dependencies only to be used during test run
dependencies = [
"omni.kit.ui_test" # UI testing extension
]
| 1,885 | TOML | 32.087719 | 134 | 0.731565 |
superboySB/SBDrone_deprecated/extensions/pegasus.simulator/config/configs.yaml | global_coordinates:
altitude: 90.0
latitude: 38.736832
longitude: -9.137977
px4_dir: ~/PX4-Autopilot
| 107 | YAML | 16.999997 | 24 | 0.728972 |
superboySB/SBDrone_deprecated/extensions/pegasus.simulator/docs/CHANGELOG.md | # Changelog
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/).
## [1.0.0] - 2023-02-17
- Initial version of Pegasus Simulator extension
### Added
- A widget GUI to spawn a limited set of simulation environments and drones using the PX4-bakend.
- A powerful sensors, drag, thrusters, control and vehicle API.
- Barometer, IMU, magnetometer and GPS sensors.
- Linear drag model.
- Quadratic thrust curve model.
- Multirotor model.
- The 3DR Iris quadrotor simulation model.
- MAVLink communications control support.
- ROS 2 communications control support (needs fixing).
- A library for implementing rotations from NED to ENU and FLU to FRD frame conventions.
- Examples on how to use the framework in standalone scripting mode.
- Demo with a nonlinear controller implemented in python.
- A PX4 tool for automatically launching PX4 in SITL mode when provided with the PX4-Autopilot installation directory.
- A paper describing the motivation for this framework and its inner-workings.
- Basic documentation generation using sphinx. | 1,063 | Markdown | 43.333332 | 118 | 0.780809 |
superboySB/SBDrone_deprecated/extensions/pegasus.simulator/docs/README.md | # Pegasus Simulator
Pegasus Simulator is a framework built on top of NVIDIA
Omniverse and Isaac
Sim. It is designed to provide an easy, yet powerfull way of simulating the dynamics of vehicles. It provides a simulation interface for PX4 integration as well as a custom python control interface. At the moment, only multirotor vehicles are supported, with support for other vehicle topologies planned for future versions.
## Contributing
The developers of the Pegasus simulator welcome any positive contributions and ideas from the robotics comunity to make
in order to allow this extension to mature. If you think you have a nice contribution to make or just a simple suggestion,
feel free to create bug reports, feature requests or open pull requests for direct code contributions.
## Acknowledgement
NVIDIA Isaac Sim is available freely under https://www.nvidia.com/en-us/omniverse/download/.
Pegasus Simulator is released under BSD-3 License.
The license files of its dependencies and assets are present in the docs/licenses directory.
## Citation
Please cite if you use this extension in your work:
```
@misc{jacinto2023pegasus,
author = {Marcelo Jacinto and Rita Cunha},
title = {Pegasus Simulator: An Isaac Sim Framework for Multiple Aerial Vehicles Simulation},
year = {2023},
eprint = {},
}
```
## Main Developer Team
This simulation framework is an open-source effort, started by me, Marcelo Jacinto in January/2023. It is a tool that was created with the original purpose of serving my Ph.D. workplan for the next 4 years, which means that you can expect this repository to be mantained, hopefully at least until 2027.
* Project Founder
* Marcelo Jacinto], under the supervision of Prof. Rita Cunha and Prof. Antonio Pascoal (IST/ISR-Lisbon)
* Architecture
* Marcelo Jacinto
* João Pinto
* Multirotor Dynamic Simulation and Control
* Marcelo Jacinto
* Example Applications
* Marcelo Jacinto
* João Pinto
* Paper Writting and Revision
* Marcelo Jacinto
* João Pinto
* Rita Cunha
* António Pascoal | 2,036 | Markdown | 39.739999 | 324 | 0.783399 |
superboySB/SBDrone_deprecated/scripts/README.md | # Toturials of mavros and px4
如何在airsim上面用MAVROS给PX4无人机发送话题控制
## 从Source安装mavros
源码编译方式同单无人机教程,需要先在“编译用容器”里编译,然后再启动“运行用容器”如下
```sh
docker run -itd --privileged --env=LOCAL_USER_ID="$(id -u)" --env=PX4_SIM_HOST_ADDR=172.16.13.104 -v /home/wangchao/daizipeng/SBDrone:/src:rw -v /tmp/.X11-unix:/tmp/.X11-unix:ro -e DISPLAY=:0 --network=host --name=mypx4-0 mypx4_image:v1 /bin/bash
```
其中,`–-env=PX4_SIM_HOST_ADDR=172.16.13.104` 容器添加`PX4_SIM_HOST_ADDR`环境变量,指定远端airsim主机地址;`–-name`后面指定此容器名称。
## 逐步开启mavros服务
在windows设备中,先检查AirSim中setting.json,启动AirSim的某一个map,进入等待服务状态。然后,登录容器
```sh
docker exec -it --user $(id -u) mypx4-0 /bin/bash
```
打开一个窗口,运行2个PX4实例,需要观察到Airsim中有QGC(GPS lock)相关的提示才算成功:
```sh
bash /src/Scripts/run_airsim_sitl.sh 0
bash /src/Scripts/run_airsim_sitl.sh 1
```
注意每次使用ros相关命令时需要输入
```sh
source /opt/ros/melodic/setup.bash
```
打开一个窗口,运行mavros服务,其中第一个端口指定本地主机(127.0.0.1)上的接收端口号(udp_onboard_payload_port_remote),第二个端口指定飞行控制器上的发送端口号(udp_onboard_payload_port_local)。这些可以在上一个窗口的运行日志中,在mavlink的onboard udp port对应上。
```sh
roslaunch mavros px4.launch fcu_url:=udp://:[email protected]:14280
roslaunch mavros px4.launch fcu_url:=udp://:[email protected]:14281
```
## 使用mavros话题通信在Airsim里手动控制PX4无人机(有点受限于版本V1.12.1)
参考[教程](https://www.youtube.com/watch?v=ZonkdMcwXH4),打开一个窗口,基于mavros发送服务调用指令给px4,实现对无人机的控制,这里给出依次玩耍这些指令的结果:
```sh
# 发起起飞指令,此时不能起飞
rosservice call /mavros/cmd/takeoff "{min_pitch: 0.0, yaw: 0.0, latitude: 0.0, longitude: 0.0, altitude: 0.0}"
# 解锁无人机,此时可以起飞
rosservice call /mavros/cmd/arming "value: true"
# 无人机起飞
rosservice call /mavros/cmd/arming "value: true"
# 无人机降落
rosservice call /mavros/cmd/land "{min_pitch: 0.0, yaw: 0.0, latitude: 0.0, longitude: 0.0, altitude: 0.0}"
```
也可以基于mavros发送话题给px4,以下是开一个窗口跑position controller:
```sh
# 发送position controller的话题指令
rostopic pub /mavros/setpoint_position/local geometry_msgs/PoseStamped "header:
seq: 0
stamp:
secs: 0
nsecs: 0
frame_id: ''
pose:
position:
x: 1.0
y: 0.0
z: 2.0
orientation:
x: 0.0
y: 0.0
z: 0.0
w: 0.0" -r 20
```
然后再换个窗口设置飞行模式
```sh
# 该服务的目的是让飞行控制器(例如PX4)切换到特定的飞行模式,这里使用的是'OFFBOARD'模式,该模式允许飞行控制器接受来自外部计算机的指令控制飞行。
rosservice call /mavros/set_mode "base mode: 0
custom_mode: 'OFFBOARD'"
# 解锁无人机,执行指令
rosservice call /mavros/cmd/arming "value: true"
# 可以继续发送其它position controller的话题指令
```
以下是velocity controller的画圈demo:
```sh
rostopic pub /mavros/setpoint_velocity/cmd_vel geometry_msgs/TwistStamped "header
seq: 0
stamp:
secs: 0
nsecs: 0
frame_id: ''
twist:
linear:
x: 1.0
y: 0.0
z: 0.0
angular:
x: 0.0
y: 0.0
z: 1.0" -r 20
```
| 2,690 | Markdown | 26.742268 | 247 | 0.693309 |
superboySB/SBDrone_deprecated/tests/test_api_control.py | # ready to run example: PythonClient/multirotor/hello_drone.py
import airsim
import os
# connect to the AirSim simulator
client = airsim.MultirotorClient(ip="172.16.13.104")
client.confirmConnection()
client.enableApiControl(True)
client.armDisarm(True)
# Async methods returns Future. Call join() to wait for task to complete.
client.takeoffAsync()
state = client.getMultirotorState(vehicle_name = 'UAV_0')
client.landAsync().join()
# take images
# responses = client.simGetImages([
# airsim.ImageRequest("0", airsim.ImageType.DepthVis),
# airsim.ImageRequest("1", airsim.ImageType.DepthPlanar, True)])
# print('Retrieved images: %d', len(responses))
# # do something with the images
# for response in responses:
# if response.pixels_as_float:
# print("Type %d, size %d" % (response.image_type, len(response.image_data_float)))
# airsim.write_pfm('./py1.pfm', airsim.get_pfm_array(response))
# else:
# print("Type %d, size %d" % (response.image_type, len(response.image_data_uint8)))
# airsim.write_file('./py1.png', response.image_data_uint8) | 1,100 | Python | 32.363635 | 91 | 0.708182 |
superboySB/SBDrone_deprecated/tests/test_manual_control.py | """
For connecting to the AirSim drone environment and testing API functionality
"""
import airsim
import os
import tempfile
import pprint
# connect to the AirSim simulator
client = airsim.MultirotorClient(ip="172.16.13.104")
client.confirmConnection()
client.enableApiControl(True)
client.armDisarm(True,vehicle_name='UAV_1')
state = client.getMultirotorState(vehicle_name='UAV_1')
s = pprint.pformat(state)
print("state: %s" % s)
client.takeoffAsync(timeout_sec = 20, vehicle_name = 'UAV_1')
# client.moveByManualAsync(vx_max = 1E6, vy_max = 1E6, z_min = -1E6, duration = 1, vehicle_name='UAV_1') # 控制杆量
# airsim.wait_key('Manual mode is setup. Press any key to send RC data to takeoff')
# 会持续控制,需要下一条命令覆盖
client.moveByRC(rcdata = airsim.RCData(pitch = 1, throttle = 0.5, is_initialized = True, is_valid = True), vehicle_name='UAV_1')
client.moveByRC(rcdata = airsim.RCData(pitch = 0, throttle = 0.1, is_initialized = True, is_valid = True), vehicle_name='UAV_1')
| 975 | Python | 32.655171 | 128 | 0.732308 |
superboySB/SBDrone_deprecated/tests/test_get_state.py | import airsim
import time
# this script moves the drone to a location, then rests it thousands of time
# purpose of this script is to stress test reset API
# connect to the AirSim simulator
client = airsim.MultirotorClient(ip="172.16.13.104",port=41451)
client.confirmConnection()
client.enableApiControl(True)
client.armDisarm(True)
for idx in range(3000):
# client.moveToPositionAsync(0, 0, -10, 5).join()
# client.reset()
# client.enableApiControl(True)
print(client.getMultirotorState())
print("%d" % idx)
time.sleep(1)
# that's enough fun for now. let's quite cleanly
client.enableApiControl(False)
| 635 | Python | 24.439999 | 76 | 0.733858 |
superboySB/SBDrone_deprecated/tests/test_functions.py | import time
import airsim
import numpy as np
def convert_pos_UE_to_AS(origin_UE : np.array, pos_UE : np.array):
pos = np.zeros(3, dtype=float)
pos[0] = pos_UE[0] - origin_UE[0]
pos[1] = pos_UE[1] - origin_UE[1]
pos[2] = - pos_UE[2] + origin_UE[2]
return pos / 100
droneName = "Drone0"
origin_UE = np.array([0.0, 0.0, 910.0])
areans_train_long = np.array([
# Using larger environment
# [Utils.convert_pos_UE_to_AS(self.origin_UE, np.array([41156.0, 20459.0, 1000.0])), Utils.convert_pos_UE_to_AS(self.origin_UE, np.array([56206.0, 21019.0, 1000.0]))]
# Using smaller environment
[convert_pos_UE_to_AS(origin_UE, np.array([8430.0, -6760.0, 1000.0])),
convert_pos_UE_to_AS(origin_UE, np.array([14060.0, -6760.0, 1000.0]))]
])
client = airsim.MultirotorClient(ip="172.16.13.104")
client.confirmConnection()
client.reset()
client.enableApiControl(True, vehicle_name = droneName)
client.armDisarm(True, vehicle_name = droneName)
client.takeoffAsync(vehicle_name=droneName)
time.sleep(10)
client.client.call_async("resetVehicle", droneName,
airsim.Pose(airsim.Vector3r(areans_train_long[0][0][0],
areans_train_long[0][0][1],
areans_train_long[0][0][2]),
airsim.Quaternionr(0.0, 0.0, 0.0, 0.0)))
| 1,450 | Python | 35.274999 | 170 | 0.577931 |
superboySB/SBDrone_deprecated/tests/test_subprocress.py | import subprocess
# 定义要执行的脚本命令
command = "ls -l" # 以ls -l命令为例,你可以换成你要执行的任何其他脚本命令
# 执行脚本命令
try:
# 使用subprocess.run()来执行命令
# capture_output=True 表示捕获标准输出和标准错误
result = subprocess.run(command, shell=True, text=True, capture_output=True)
# 输出命令执行结果
print("标准输出:")
print(result.stdout)
print("\n标准错误:")
print(result.stderr)
print("\n返回代码:", result.returncode)
except subprocess.CalledProcessError as e:
print("命令执行出错:", e) | 464 | Python | 20.136363 | 80 | 0.68319 |
AshisGhosh/roboai/docker-compose.yml | services:
isaac-sim:
build:
context: .
dockerfile: ./isaac_sim/Dockerfile
volumes:
- /tmp/.X11-unix:/tmp/.X11-unix
- /run/user/1000/gdm/Xauthority:/root/.Xauthority:rw
- ~/docker/isaac-sim/cache/kit:/isaac-sim/kit/cache:rw
- ~/docker/isaac-sim/cache/ov:/root/.cache/ov:rw
- ~/docker/isaac-sim/cache/pip:/root/.cache/pip:rw
- ~/docker/isaac-sim/cache/glcache:/root/.cache/nvidia/GLCache:rw
- ~/docker/isaac-sim/cache/computecache:/root/.nv/ComputeCache:rw
- ~/docker/isaac-sim/logs:/root/.nvidia-omniverse/logs:rw
- ~/docker/isaac-sim/data:/root/.local/share/ov/data:rw
- ~/docker/isaac-sim/documents:/root/Documents:rw
- ./isaac_sim/isaac_sim:/isaac-sim/roboai/
- ./shared:/isaac-sim/roboai/shared
- ./isaac_sim/humble_ws/src:/isaac-sim/humble_ws/src
- ./isaac_sim/bin:/isaac-sim/roboai/bin
environment:
- DISPLAY=${DISPLAY}
- XAUTHORITY=/root/.Xauthority
- ACCEPT_EULA=Y
- PRIVACY_CONSENT=Y
- ROS_DOMAIN_ID=${ROS_DOMAIN_ID:-0}
entrypoint: /bin/bash -c "while true; do sleep 30; done"
network_mode: host
deploy:
resources:
reservations:
devices:
- driver: nvidia
count: 1
capabilities: [gpu]
franka:
image: franka_isaac_moveit_tutorial
build:
context: ./franka_moveit
dockerfile: Dockerfile
stdin_open: true
tty: true
network_mode: host
ipc: host
privileged: true
environment:
- ROS_DOMAIN_ID=${ROS_DOMAIN_ID:-0}
- DISPLAY=${DISPLAY}
- QT_X11_NO_MITSHM=1
volumes:
- /tmp/.X11-unix:/tmp/.X11-unix:rw
- ${XAUTHORITY:-$HOME/.Xauthority}:/root/.Xauthority
- ./franka_moveit/config:/root/ws_moveit/src/moveit2_tutorials/doc/how_to_guides/isaac_panda/config
command: ros2 launch moveit2_tutorials isaac_demo.launch.py
deploy:
resources:
reservations:
devices:
- capabilities: [gpu]
grasp-server:
build:
context: ./grasping/grasp_server
dockerfile: Dockerfile
volumes:
- ./grasping/grasp_server:/app
- /run/user/1000/gdm/Xauthority:/root/.Xauthority:rw
- ./shared:/app/shared
environment:
- DISPLAY=${DISPLAY}
- XAUTHORITY=/root/.Xauthority
command: poetry run uvicorn app.main:app --host 0.0.0.0 --port 8005 --reload
network_mode: host
deploy:
resources:
reservations:
devices:
- capabilities: [gpu]
ollama-server:
image: ollama/ollama:latest
volumes:
- ~/.cache/ollama:/root/.ollama
ports:
- 11434:11434
roboai:
build:
context: .
dockerfile: ./roboai/Dockerfile
volumes:
- ./roboai:/app
- /run/user/1000/gdm/Xauthority:/root/.Xauthority:rw
- ./shared:/app/shared
- ~/.burr:/root/.burr
environment:
- DISPLAY=${DISPLAY}
- MUJOCO_GL=osmesa
- XAUTHORITY=/root/.Xauthority
# command: python -u -m roboai.roboai
command: bash -c "python -m streamlit run roboai/streamlit_app.py --server.headless true --server.port=8501 --server.address=0.0.0.0 & burr --no-open"
# command: /bin/bash -c "while true; do sleep 30; done"
network_mode: host
roboai-demo:
extends: roboai
command: python -u -m roboai.roboai_demo
| 3,455 | YAML | 30.706422 | 154 | 0.597685 |
AshisGhosh/roboai/README.md | # RoboAI: Playground + Framework for applying LLM/VLMs to Robots in Sim
### Update Videos:
* **May 27 2024** - [VIDEO](https://www.youtube.com/watch?v=ycvPWq4JfEI) - Robot learning task relevant information and factoring that in the plan -- integrated with [OmniGibson](https://behavior.stanford.edu/omnigibson/) from Stanford/NVIDIA
* **May 8 2024** - [VIDEO](https://www.youtube.com/watch?v=sg3PTz5q6kc) - Robot going from plain text to grasping attempt -- integrated with ROS2, MoveIt2, a grasping model and Isaac Sim.
## Simulation Frameworks
### MuJoCo & Robosuite
[Mujoco](https://mujoco.org/) is Google DeepMind's physics simulation.
[Robosuite](https://robosuite.ai/) is a modular framework built on top of MuJoCo.
In the `/robosim` folder you'll find a Robosuite/MuJoCo sim environment:
* Focused on Panda arm grasping objects in pick and place environment
* Camera views to focus on objects
* Markers to indicate robot goal and grasp targets
* Simple API to control the robot
### Isaac Sim
[Isaac Sim](https://docs.omniverse.nvidia.com/isaacsim/latest/index.html) is NVIDIA's robot simulation powered by GPUs.
Isaac Sim offers advanced tooling as well as close to real rendering. This was adopted to better test vision models.
Isaac Sim does not support external async frameworks as well - the development towards it in this project is still in progress and may need some re-architecting.
The simulation
* Focuses on the Panda arm on a table with objects to grasp
* Cameras for different views
* Initial work on Markers - rendering/material support is still WIP
## Models & LLM Framework
The high-level goal is to be able to command a robot to complete a long-horizon task with natural language.
An example would be to "clear the messy table".
### LLMs
LLMs are used in planning layer. Once the scene is understood an LLM (either iteratively or with CoT/ToT) to generate a robot affordable plan.
Currently focused on free models hosted on [openrouter.ai](https://openrouter.ai).
### VLMs
VLMs are an extremely fast changing space. Current work is focused on:
* [moondream2](https://huggingface.co/vikhyatk/moondream2)
* [VILA-2.7b](https://huggingface.co/Efficient-Large-Model/VILA-2.7b) -- inference running on a Jetson Orin Nano (not in this repo) using [NanoLLM](https://dusty-nv.github.io/NanoLLM/index.html) | 2,351 | Markdown | 43.377358 | 242 | 0.762654 |
AshisGhosh/roboai/model_server/pyproject.toml | [tool.poetry]
name = "model-server"
version = "0.1.0"
description = ""
authors = ["AshisGhosh <[email protected]>"]
readme = "README.md"
[tool.poetry.dependencies]
python = "^3.10"
fastapi = "^0.110.1"
uvicorn = "^0.29.0"
transformers = "^4.39.3"
timm = "^0.9.16"
einops = "^0.7.0"
python-multipart = "^0.0.9"
[build-system]
requires = ["poetry-core"]
build-backend = "poetry.core.masonry.api"
| 399 | TOML | 18.047618 | 46 | 0.651629 |
AshisGhosh/roboai/model_server/model_server/hf_cerule.py | from transformers import AutoModelForCausalLM, AutoTokenizer
from PIL import Image
import time
import logging
log = logging.getLogger("model-server")
log.setLevel(logging.DEBUG)
handler = logging.StreamHandler()
handler.setLevel(logging.DEBUG)
log.addHandler(handler)
class HuggingFaceCerule:
def __init__(self):
self.model_id = "Tensoic/Cerule-v0.1"
model_load_start = time.time()
self.model = AutoModelForCausalLM.from_pretrained(
self.model_id, trust_remote_code=True
)
log.info(f"Model loaded in {time.time() - model_load_start} seconds.")
self.tokenizer = AutoTokenizer.from_pretrained(self.model_id)
def encode_image(self, image):
start_encode = time.time()
encoded_image = self.model.encode_image(image)
log.info(f"Image encoded in {time.time() - start_encode} seconds.")
return encoded_image
def answer_question(self, enc_image, question):
start_model = time.time()
answer = self.model.answer_question(enc_image, question, self.tokenizer)
log.info(f"Answered question in {time.time() - start_model} seconds.")
return answer
def answer_question_from_image(self, image, question):
enc_image = self.encode_image(image)
return self.answer_question(enc_image, question)
if __name__ == "__main__":
model = HuggingFaceCerule()
img_path = "/app/shared/data/test2.png"
image = Image.open(img_path)
enc_image = model.encode_image(image)
question = "Describe this image."
print(model.answer_question(enc_image, question))
| 1,609 | Python | 31.857142 | 80 | 0.674953 |
AshisGhosh/roboai/model_server/model_server/hf_idefics.py | # Load model directly
from transformers import AutoProcessor, AutoModelForVision2Seq
from transformers.image_utils import load_image
import torch
from PIL import Image
import time
import logging
log = logging.getLogger("model-server")
log.setLevel(logging.DEBUG)
handler = logging.StreamHandler()
handler.setLevel(logging.DEBUG)
log.addHandler(handler)
DEVICE = "cuda:0" if torch.cuda.is_available() else "cpu"
class HuggingFaceIdefics:
def __init__(self):
model_load_start = time.time()
self.processor = AutoProcessor.from_pretrained("HuggingFaceM4/idefics2-8b")
self.model = AutoModelForVision2Seq.from_pretrained(
"HuggingFaceM4/idefics2-8b"
).to(DEVICE)
log.info(f"Model loaded in {time.time() - model_load_start} seconds.")
def answer_question_from_image(self, image, question):
image1 = load_image("/app/shared/data/test2.png")
messages = [
{
"role": "user",
"content": [
{"type": "image"},
{"type": "text", "text": "What do we see in this image?"},
],
},
]
prompt = self.processor.apply_chat_template(
messages, add_generation_prompt=True
)
inputs = self.processor(text=prompt, images=[image1], return_tensors="pt")
inputs = {k: v.to(DEVICE) for k, v in inputs.items()}
start_time = time.time()
generated_ids = self.model.generate(**inputs, max_new_tokens=500)
log.info(f"Generated in {time.time() - start_time} seconds.")
start_time = time.time()
generated_texts = self.processor.batch_decode(
generated_ids, skip_special_tokens=True
)
log.info(f"Decoded in {time.time() - start_time} seconds.")
return generated_texts
if __name__ == "__main__":
log.info("Loading model...")
model = HuggingFaceIdefics()
log.info("Model loaded.")
img_path = "/app/shared/data/test2.png"
image = Image.open(img_path)
question = "Describe this image."
log.info("Answering question...")
log.info(model.answer_question_from_image(image, question))
| 2,195 | Python | 31.294117 | 83 | 0.617768 |
AshisGhosh/roboai/model_server/model_server/hf_moondream2.py | from transformers import AutoModelForCausalLM, AutoTokenizer
from PIL import Image
import time
import logging
log = logging.getLogger("model-server")
log.setLevel(logging.DEBUG)
handler = logging.StreamHandler()
handler.setLevel(logging.DEBUG)
log.addHandler(handler)
class HuggingFaceMoonDream2:
def __init__(self):
self.model_id = "vikhyatk/moondream2"
self.revision = "2024-04-02"
model_load_start = time.time()
self.model = AutoModelForCausalLM.from_pretrained(
self.model_id, trust_remote_code=True, revision=self.revision
)
log.info(f"Model loaded in {time.time() - model_load_start} seconds.")
self.tokenizer = AutoTokenizer.from_pretrained(
self.model_id, revision=self.revision
)
def encode_image(self, image):
start_encode = time.time()
encoded_image = self.model.encode_image(image)
log.info(f"Image encoded in {time.time() - start_encode} seconds.")
return encoded_image
def answer_question(self, enc_image, question):
start_model = time.time()
answer = self.model.answer_question(enc_image, question, self.tokenizer)
log.info(f"Answered question in {time.time() - start_model} seconds.")
return answer
def answer_question_from_image(self, image, question):
enc_image = self.encode_image(image)
return self.answer_question(enc_image, question)
if __name__ == "__main__":
model = HuggingFaceMoonDream2()
img_path = "/app/shared/data/test2.png"
image = Image.open(img_path)
enc_image = model.encode_image(image)
question = "Describe this image."
log.info(model.answer_question(enc_image, question))
| 1,727 | Python | 32.230769 | 80 | 0.670527 |
AshisGhosh/roboai/model_server/model_server/hf_nanollava.py | from transformers import AutoModelForCausalLM, AutoTokenizer
from PIL import Image
import time
import torch
import logging
log = logging.getLogger("model-server")
log.setLevel(logging.DEBUG)
handler = logging.StreamHandler()
handler.setLevel(logging.DEBUG)
log.addHandler(handler)
class HuggingFaceNanoLLaVA:
def __init__(self):
torch.set_default_device("cpu")
model_load_start = time.time()
self.model = AutoModelForCausalLM.from_pretrained(
"qnguyen3/nanoLLaVA",
torch_dtype=torch.float16,
device_map="auto",
trust_remote_code=True,
)
log.info(f"Model loaded in {time.time() - model_load_start} seconds.")
self.tokenizer = AutoTokenizer.from_pretrained(
"qnguyen3/nanoLLaVA", trust_remote_code=True
)
def process_image(self, image):
start_process = time.time()
image_tensor = self.model.process_images([image], model.config).to(
dtype=model.dtype
)
log.info(f"Image processed in {time.time() - start_process} seconds.")
return image_tensor
def answer_question(self, image_tensor, prompt):
messages = [{"role": "user", "content": f"<image>\n{prompt}"}]
text = self.tokenizer.apply_chat_template(
messages, tokenize=False, add_generation_prompt=True
)
text_chunks = [
self.tokenizer(chunk).input_ids for chunk in text.split("<image>")
]
input_ids = torch.tensor(
text_chunks[0] + [-200] + text_chunks[1], dtype=torch.long
).unsqueeze(0)
start_model = time.time()
output_ids = model.generate(
input_ids, images=image_tensor, max_new_tokens=2048, use_cache=True
)[0]
log.info(f"Answered question in {time.time() - start_model} seconds.")
output = self.tokenizer.decode(
output_ids[input_ids.shape[1] :], skip_special_tokens=True
).strip()
return output
if __name__ == "__main__":
model = HuggingFaceNanoLLaVA()
img_path = "/app/shared/data/test2.png"
image = Image.open(img_path)
image_tensor = model.encode_image(image)
question = "Describe this image."
print(model.answer_question(image_tensor, question))
| 2,292 | Python | 32.231884 | 79 | 0.623473 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.