file_path
stringlengths
21
207
content
stringlengths
5
1.02M
size
int64
5
1.02M
lang
stringclasses
9 values
avg_line_length
float64
1.33
100
max_line_length
int64
4
993
alphanum_fraction
float64
0.27
0.93
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/config/extension.toml
[package] # Note: Semantic Versioning is used: https://semver.org/ version = "0.15.10" # Description title = "ORBIT framework for Robot Learning" description="Extension providing main framework interfaces and abstractions for robot learning." readme = "docs/README.md" repository = "https://github.com/NVIDIA-Omniverse/Orbit" category = "robotics" keywords = ["kit", "robotics", "learning", "ai"] [dependencies] "omni.isaac.core" = {} "omni.replicator.core" = {} [[python.module]] name = "omni.isaac.orbit"
512
TOML
24.649999
96
0.722656
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/__init__.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """Package containing the core framework.""" import os import toml # Conveniences to other module directories via relative paths ORBIT_EXT_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), "../../../")) """Path to the extension source directory.""" ORBIT_METADATA = toml.load(os.path.join(ORBIT_EXT_DIR, "config", "extension.toml")) """Extension metadata dictionary parsed from the extension.toml file.""" # Configure the module-level variables __version__ = ORBIT_METADATA["package"]["version"]
635
Python
30.799998
85
0.727559
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/devices/device_base.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """Base class for teleoperation interface.""" from __future__ import annotations from abc import ABC, abstractmethod from collections.abc import Callable from typing import Any class DeviceBase(ABC): """An interface class for teleoperation devices.""" def __init__(self): """Initialize the teleoperation interface.""" pass def __str__(self) -> str: """Returns: A string containing the information of joystick.""" return f"{self.__class__.__name__}" """ Operations """ @abstractmethod def reset(self): """Reset the internals.""" raise NotImplementedError @abstractmethod def add_callback(self, key: Any, func: Callable): """Add additional functions to bind keyboard. Args: key: The button to check against. func: The function to call when key is pressed. The callback function should not take any arguments. """ raise NotImplementedError @abstractmethod def advance(self) -> Any: """Provides the joystick event state. Returns: The processed output form the joystick. """ raise NotImplementedError
1,343
Python
23.888888
92
0.627699
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/devices/__init__.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """Sub-package providing interfaces to different teleoperation devices. Currently, the following categories of devices are supported: * **Keyboard**: Standard keyboard with WASD and arrow keys. * **Spacemouse**: 3D mouse with 6 degrees of freedom. * **Gamepad**: Gamepad with 2D two joysticks and buttons. Example: Xbox controller. All device interfaces inherit from the :class:`DeviceBase` class, which provides a common interface for all devices. The device interface reads the input data when the :meth:`DeviceBase.advance` method is called. It also provides the function :meth:`DeviceBase.add_callback` to add user-defined callback functions to be called when a particular input is pressed from the peripheral device. """ from .device_base import DeviceBase from .gamepad import Se2Gamepad, Se3Gamepad from .keyboard import Se2Keyboard, Se3Keyboard from .spacemouse import Se2SpaceMouse, Se3SpaceMouse
1,033
Python
40.359998
110
0.791868
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/devices/spacemouse/se3_spacemouse.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """Spacemouse controller for SE(3) control.""" from __future__ import annotations import hid import numpy as np import threading import time from collections.abc import Callable from scipy.spatial.transform.rotation import Rotation from ..device_base import DeviceBase from .utils import convert_buffer class Se3SpaceMouse(DeviceBase): """A space-mouse controller for sending SE(3) commands as delta poses. This class implements a space-mouse controller to provide commands to a robotic arm with a gripper. It uses the `HID-API`_ which interfaces with USD and Bluetooth HID-class devices across multiple platforms [1]. The command comprises of two parts: * delta pose: a 6D vector of (x, y, z, roll, pitch, yaw) in meters and radians. * gripper: a binary command to open or close the gripper. Note: The interface finds and uses the first supported device connected to the computer. Currently tested for following devices: - SpaceMouse Compact: https://3dconnexion.com/de/product/spacemouse-compact/ .. _HID-API: https://github.com/libusb/hidapi """ def __init__(self, pos_sensitivity: float = 0.4, rot_sensitivity: float = 0.8): """Initialize the space-mouse layer. Args: pos_sensitivity: Magnitude of input position command scaling. Defaults to 0.4. rot_sensitivity: Magnitude of scale input rotation commands scaling. Defaults to 0.8. """ # store inputs self.pos_sensitivity = pos_sensitivity self.rot_sensitivity = rot_sensitivity # acquire device interface self._device = hid.device() self._find_device() # read rotations self._read_rotation = False # command buffers self._close_gripper = False self._delta_pos = np.zeros(3) # (x, y, z) self._delta_rot = np.zeros(3) # (roll, pitch, yaw) # dictionary for additional callbacks self._additional_callbacks = dict() # run a thread for listening to device updates self._thread = threading.Thread(target=self._run_device) self._thread.daemon = True self._thread.start() def __del__(self): """Destructor for the class.""" self._thread.join() def __str__(self) -> str: """Returns: A string containing the information of joystick.""" msg = f"Spacemouse Controller for SE(3): {self.__class__.__name__}\n" msg += f"\tManufacturer: {self._device.get_manufacturer_string()}\n" msg += f"\tProduct: {self._device.get_product_string()}\n" msg += "\t----------------------------------------------\n" msg += "\tRight button: reset command\n" msg += "\tLeft button: toggle gripper command (open/close)\n" msg += "\tMove mouse laterally: move arm horizontally in x-y plane\n" msg += "\tMove mouse vertically: move arm vertically\n" msg += "\tTwist mouse about an axis: rotate arm about a corresponding axis" return msg """ Operations """ def reset(self): # default flags self._close_gripper = False self._delta_pos = np.zeros(3) # (x, y, z) self._delta_rot = np.zeros(3) # (roll, pitch, yaw) def add_callback(self, key: str, func: Callable): # check keys supported by callback if key not in ["L", "R"]: raise ValueError(f"Only left (L) and right (R) buttons supported. Provided: {key}.") # TODO: Improve this to allow multiple buttons on same key. self._additional_callbacks[key] = func def advance(self) -> tuple[np.ndarray, bool]: """Provides the result from spacemouse event state. Returns: A tuple containing the delta pose command and gripper commands. """ rot_vec = Rotation.from_euler("XYZ", self._delta_rot).as_rotvec() # if new command received, reset event flag to False until keyboard updated. return np.concatenate([self._delta_pos, rot_vec]), self._close_gripper """ Internal helpers. """ def _find_device(self): """Find the device connected to computer.""" found = False # implement a timeout for device search for _ in range(5): for device in hid.enumerate(): if device["product_string"] == "SpaceMouse Compact": # set found flag found = True vendor_id = device["vendor_id"] product_id = device["product_id"] # connect to the device self._device.open(vendor_id, product_id) # check if device found if not found: time.sleep(1.0) else: break # no device found: return false if not found: raise OSError("No device found by SpaceMouse. Is the device connected?") def _run_device(self): """Listener thread that keeps pulling new messages.""" # keep running while True: # read the device data data = self._device.read(7) if data is not None: # readings from 6-DoF sensor if data[0] == 1: self._delta_pos[1] = self.pos_sensitivity * convert_buffer(data[1], data[2]) self._delta_pos[0] = self.pos_sensitivity * convert_buffer(data[3], data[4]) self._delta_pos[2] = self.pos_sensitivity * convert_buffer(data[5], data[6]) * -1.0 elif data[0] == 2 and not self._read_rotation: self._delta_rot[1] = self.rot_sensitivity * convert_buffer(data[1], data[2]) self._delta_rot[0] = self.rot_sensitivity * convert_buffer(data[3], data[4]) self._delta_rot[2] = self.rot_sensitivity * convert_buffer(data[5], data[6]) # readings from the side buttons elif data[0] == 3: # press left button if data[1] == 1: # close gripper self._close_gripper = not self._close_gripper # additional callbacks if "L" in self._additional_callbacks: self._additional_callbacks["L"] # right button is for reset if data[1] == 2: # reset layer self.reset() # additional callbacks if "R" in self._additional_callbacks: self._additional_callbacks["R"] if data[1] == 3: self._read_rotation = not self._read_rotation
6,921
Python
38.781609
115
0.569426
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/devices/spacemouse/__init__.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """Spacemouse device for SE(2) and SE(3) control.""" from .se2_spacemouse import Se2SpaceMouse from .se3_spacemouse import Se3SpaceMouse
261
Python
25.199997
56
0.758621
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/devices/spacemouse/utils.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """Helper functions for SpaceMouse.""" # MIT License # # Copyright (c) 2022 Stanford Vision and Learning Lab and UT Robot Perception and Learning Lab # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. def convert_buffer(b1, b2): """Converts raw SpaceMouse readings to commands. Args: b1: 8-bit byte b2: 8-bit byte Returns: Scaled value from Space-mouse message """ return _scale_to_control(_to_int16(b1, b2)) """ Private methods. """ def _to_int16(y1, y2): """Convert two 8 bit bytes to a signed 16 bit integer. Args: y1: 8-bit byte y2: 8-bit byte Returns: 16-bit integer """ x = (y1) | (y2 << 8) if x >= 32768: x = -(65536 - x) return x def _scale_to_control(x, axis_scale=350.0, min_v=-1.0, max_v=1.0): """Normalize raw HID readings to target range. Args: x: Raw reading from HID axis_scale: (Inverted) scaling factor for mapping raw input value min_v: Minimum limit after scaling max_v: Maximum limit after scaling Returns: Clipped, scaled input from HID """ x = x / axis_scale return min(max(x, min_v), max_v)
2,326
Python
28.455696
94
0.686586
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/devices/spacemouse/se2_spacemouse.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """Spacemouse controller for SE(2) control.""" from __future__ import annotations import hid import numpy as np import threading import time from collections.abc import Callable from ..device_base import DeviceBase from .utils import convert_buffer class Se2SpaceMouse(DeviceBase): r"""A space-mouse controller for sending SE(2) commands as delta poses. This class implements a space-mouse controller to provide commands to mobile base. It uses the `HID-API`_ which interfaces with USD and Bluetooth HID-class devices across multiple platforms. The command comprises of the base linear and angular velocity: :math:`(v_x, v_y, \omega_z)`. Note: The interface finds and uses the first supported device connected to the computer. Currently tested for following devices: - SpaceMouse Compact: https://3dconnexion.com/de/product/spacemouse-compact/ .. _HID-API: https://github.com/libusb/hidapi """ def __init__(self, v_x_sensitivity: float = 0.8, v_y_sensitivity: float = 0.4, omega_z_sensitivity: float = 1.0): """Initialize the spacemouse layer. Args: v_x_sensitivity: Magnitude of linear velocity along x-direction scaling. Defaults to 0.8. v_y_sensitivity: Magnitude of linear velocity along y-direction scaling. Defaults to 0.4. omega_z_sensitivity: Magnitude of angular velocity along z-direction scaling. Defaults to 1.0. """ # store inputs self.v_x_sensitivity = v_x_sensitivity self.v_y_sensitivity = v_y_sensitivity self.omega_z_sensitivity = omega_z_sensitivity # acquire device interface self._device = hid.device() self._find_device() # command buffers self._base_command = np.zeros(3) # dictionary for additional callbacks self._additional_callbacks = dict() # run a thread for listening to device updates self._thread = threading.Thread(target=self._run_device) self._thread.daemon = True self._thread.start() def __del__(self): """Destructor for the class.""" self._thread.join() def __str__(self) -> str: """Returns: A string containing the information of joystick.""" msg = f"Spacemouse Controller for SE(2): {self.__class__.__name__}\n" msg += f"\tManufacturer: {self._device.get_manufacturer_string()}\n" msg += f"\tProduct: {self._device.get_product_string()}\n" msg += "\t----------------------------------------------\n" msg += "\tRight button: reset command\n" msg += "\tMove mouse laterally: move base horizontally in x-y plane\n" msg += "\tTwist mouse about z-axis: yaw base about a corresponding axis" return msg """ Operations """ def reset(self): # default flags self._base_command.fill(0.0) def add_callback(self, key: str, func: Callable): # check keys supported by callback if key not in ["L", "R"]: raise ValueError(f"Only left (L) and right (R) buttons supported. Provided: {key}.") # TODO: Improve this to allow multiple buttons on same key. self._additional_callbacks[key] = func def advance(self) -> np.ndarray: """Provides the result from spacemouse event state. Returns: A 3D array containing the linear (x,y) and angular velocity (z). """ return self._base_command """ Internal helpers. """ def _find_device(self): """Find the device connected to computer.""" found = False # implement a timeout for device search for _ in range(5): for device in hid.enumerate(): if device["product_string"] == "SpaceMouse Compact": # set found flag found = True vendor_id = device["vendor_id"] product_id = device["product_id"] # connect to the device self._device.open(vendor_id, product_id) # check if device found if not found: time.sleep(1.0) else: break # no device found: return false if not found: raise OSError("No device found by SpaceMouse. Is the device connected?") def _run_device(self): """Listener thread that keeps pulling new messages.""" # keep running while True: # read the device data data = self._device.read(13) if data is not None: # readings from 6-DoF sensor if data[0] == 1: # along y-axis self._base_command[1] = self.v_y_sensitivity * convert_buffer(data[1], data[2]) # along x-axis self._base_command[0] = self.v_x_sensitivity * convert_buffer(data[3], data[4]) elif data[0] == 2: # along z-axis self._base_command[2] = self.omega_z_sensitivity * convert_buffer(data[3], data[4]) # readings from the side buttons elif data[0] == 3: # press left button if data[1] == 1: # additional callbacks if "L" in self._additional_callbacks: self._additional_callbacks["L"] # right button is for reset if data[1] == 2: # reset layer self.reset() # additional callbacks if "R" in self._additional_callbacks: self._additional_callbacks["R"]
5,913
Python
36.66879
117
0.566041
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/devices/gamepad/se2_gamepad.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """Gamepad controller for SE(2) control.""" from __future__ import annotations import numpy as np import weakref from collections.abc import Callable import carb import omni from ..device_base import DeviceBase class Se2Gamepad(DeviceBase): r"""A gamepad controller for sending SE(2) commands as velocity commands. This class is designed to provide a gamepad controller for mobile base (such as quadrupeds). It uses the Omniverse gamepad interface to listen to gamepad events and map them to robot's task-space commands. The command comprises of the base linear and angular velocity: :math:`(v_x, v_y, \omega_z)`. Key bindings: ====================== ========================= ======================== Command Key (+ve axis) Key (-ve axis) ====================== ========================= ======================== Move along x-axis left stick up left stick down Move along y-axis left stick right left stick left Rotate along z-axis right stick right right stick left ====================== ========================= ======================== .. seealso:: The official documentation for the gamepad interface: `Carb Gamepad Interface <https://docs.omniverse.nvidia.com/kit/docs/carbonite/latest/docs/python/carb.html#carb.input.Gamepad>`__. """ def __init__( self, v_x_sensitivity: float = 1.0, v_y_sensitivity: float = 1.0, omega_z_sensitivity: float = 1.0, dead_zone: float = 0.01, ): """Initialize the gamepad layer. Args: v_x_sensitivity: Magnitude of linear velocity along x-direction scaling. Defaults to 1.0. v_y_sensitivity: Magnitude of linear velocity along y-direction scaling. Defaults to 1.0. omega_z_sensitivity: Magnitude of angular velocity along z-direction scaling. Defaults to 1.0. dead_zone: Magnitude of dead zone for gamepad. An event value from the gamepad less than this value will be ignored. Defaults to 0.01. """ # turn off simulator gamepad control carb_settings_iface = carb.settings.get_settings() carb_settings_iface.set_bool("/persistent/app/omniverse/gamepadCameraControl", False) # store inputs self.v_x_sensitivity = v_x_sensitivity self.v_y_sensitivity = v_y_sensitivity self.omega_z_sensitivity = omega_z_sensitivity self.dead_zone = dead_zone # acquire omniverse interfaces self._appwindow = omni.appwindow.get_default_app_window() self._input = carb.input.acquire_input_interface() self._gamepad = self._appwindow.get_gamepad(0) # note: Use weakref on callbacks to ensure that this object can be deleted when its destructor is called self._gamepad_sub = self._input.subscribe_to_gamepad_events( self._gamepad, lambda event, *args, obj=weakref.proxy(self): obj._on_gamepad_event(event, *args), ) # bindings for gamepad to command self._create_key_bindings() # command buffers # When using the gamepad, two values are provided for each axis. # For example: when the left stick is moved down, there are two evens: `left_stick_down = 0.8` # and `left_stick_up = 0.0`. If only the value of left_stick_up is used, the value will be 0.0, # which is not the desired behavior. Therefore, we save both the values into the buffer and use # the maximum value. # (positive, negative), (x, y, yaw) self._base_command_raw = np.zeros([2, 3]) # dictionary for additional callbacks self._additional_callbacks = dict() def __del__(self): """Unsubscribe from gamepad events.""" self._input.unsubscribe_from_gamepad_events(self._gamepad, self._gamepad_sub) self._gamepad_sub = None def __str__(self) -> str: """Returns: A string containing the information of joystick.""" msg = f"Gamepad Controller for SE(2): {self.__class__.__name__}\n" msg += f"\tDevice name: {self._input.get_gamepad_name(self._gamepad)}\n" msg += "\t----------------------------------------------\n" msg += "\tMove in X-Y plane: left stick\n" msg += "\tRotate in Z-axis: right stick\n" return msg """ Operations """ def reset(self): # default flags self._base_command_raw.fill(0.0) def add_callback(self, key: carb.input.GamepadInput, func: Callable): """Add additional functions to bind gamepad. A list of available gamepad keys are present in the `carb documentation <https://docs.omniverse.nvidia.com/kit/docs/carbonite/latest/docs/python/carb.html#carb.input.GamepadInput>`__. Args: key: The gamepad button to check against. func: The function to call when key is pressed. The callback function should not take any arguments. """ self._additional_callbacks[key] = func def advance(self) -> np.ndarray: """Provides the result from gamepad event state. Returns: A 3D array containing the linear (x,y) and angular velocity (z). """ return self._resolve_command_buffer(self._base_command_raw) """ Internal helpers. """ def _on_gamepad_event(self, event: carb.input.GamepadEvent, *args, **kwargs): """Subscriber callback to when kit is updated. Reference: https://docs.omniverse.nvidia.com/kit/docs/carbonite/latest/docs/python/carb.html?highlight=gamepadeventtype#carb.input.GamepadInput """ # check if the event is a button press cur_val = event.value if abs(cur_val) < self.dead_zone: cur_val = 0 # -- left and right stick if event.input in self._INPUT_STICK_VALUE_MAPPING: direction, axis, value = self._INPUT_STICK_VALUE_MAPPING[event.input] # change the value only if the stick is moved (soft press) self._base_command_raw[direction, axis] = value * cur_val # additional callbacks if event.input in self._additional_callbacks: self._additional_callbacks[event.input]() # since no error, we are fine :) return True def _create_key_bindings(self): """Creates default key binding.""" self._INPUT_STICK_VALUE_MAPPING = { # forward command carb.input.GamepadInput.LEFT_STICK_UP: (0, 0, self.v_x_sensitivity), # backward command carb.input.GamepadInput.LEFT_STICK_DOWN: (1, 0, self.v_x_sensitivity), # right command carb.input.GamepadInput.LEFT_STICK_RIGHT: (0, 1, self.v_y_sensitivity), # left command carb.input.GamepadInput.LEFT_STICK_LEFT: (1, 1, self.v_y_sensitivity), # yaw command (positive) carb.input.GamepadInput.RIGHT_STICK_RIGHT: (0, 2, self.omega_z_sensitivity), # yaw command (negative) carb.input.GamepadInput.RIGHT_STICK_LEFT: (1, 2, self.omega_z_sensitivity), } def _resolve_command_buffer(self, raw_command: np.ndarray) -> np.ndarray: """Resolves the command buffer. Args: raw_command: The raw command from the gamepad. Shape is (2, 3) This is a 2D array since gamepad dpad/stick returns two values corresponding to the positive and negative direction. The first index is the direction (0: positive, 1: negative) and the second index is value (absolute) of the command. Returns: Resolved command. Shape is (3,) """ # compare the positive and negative value decide the sign of the value # if the positive value is larger, the sign is positive (i.e. False, 0) # if the negative value is larger, the sign is positive (i.e. True, 1) command_sign = raw_command[1, :] > raw_command[0, :] # extract the command value command = raw_command.max(axis=0) # apply the sign # if the sign is positive, the value is already positive. # if the sign is negative, the value is negative after applying the sign. command[command_sign] *= -1 return command
8,563
Python
41.396039
192
0.605045
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/devices/gamepad/se3_gamepad.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """Gamepad controller for SE(3) control.""" import numpy as np import weakref from collections.abc import Callable from scipy.spatial.transform.rotation import Rotation import carb import omni from ..device_base import DeviceBase class Se3Gamepad(DeviceBase): """A gamepad controller for sending SE(3) commands as delta poses and binary command (open/close). This class is designed to provide a gamepad controller for a robotic arm with a gripper. It uses the gamepad interface to listen to gamepad events and map them to the robot's task-space commands. The command comprises of two parts: * delta pose: a 6D vector of (x, y, z, roll, pitch, yaw) in meters and radians. * gripper: a binary command to open or close the gripper. Stick and Button bindings: ============================ ========================= ========================= Description Stick/Button (+ve axis) Stick/Button (-ve axis) ============================ ========================= ========================= Toggle gripper(open/close) X Button X Button Move along x-axis Left Stick Up Left Stick Down Move along y-axis Left Stick Left Left Stick Right Move along z-axis Right Stick Up Right Stick Down Rotate along x-axis D-Pad Left D-Pad Right Rotate along y-axis D-Pad Down D-Pad Up Rotate along z-axis Right Stick Left Right Stick Right ============================ ========================= ========================= .. seealso:: The official documentation for the gamepad interface: `Carb Gamepad Interface <https://docs.omniverse.nvidia.com/kit/docs/carbonite/latest/docs/python/carb.html#carb.input.Gamepad>`__. """ def __init__(self, pos_sensitivity: float = 1.0, rot_sensitivity: float = 1.6, dead_zone: float = 0.01): """Initialize the gamepad layer. Args: pos_sensitivity: Magnitude of input position command scaling. Defaults to 1.0. rot_sensitivity: Magnitude of scale input rotation commands scaling. Defaults to 1.6. dead_zone: Magnitude of dead zone for gamepad. An event value from the gamepad less than this value will be ignored. Defaults to 0.01. """ # turn off simulator gamepad control carb_settings_iface = carb.settings.get_settings() carb_settings_iface.set_bool("/persistent/app/omniverse/gamepadCameraControl", False) # store inputs self.pos_sensitivity = pos_sensitivity self.rot_sensitivity = rot_sensitivity self.dead_zone = dead_zone # acquire omniverse interfaces self._appwindow = omni.appwindow.get_default_app_window() self._input = carb.input.acquire_input_interface() self._gamepad = self._appwindow.get_gamepad(0) # note: Use weakref on callbacks to ensure that this object can be deleted when its destructor is called self._gamepad_sub = self._input.subscribe_to_gamepad_events( self._gamepad, lambda event, *args, obj=weakref.proxy(self): obj._on_gamepad_event(event, *args), ) # bindings for gamepad to command self._create_key_bindings() # command buffers self._close_gripper = False # When using the gamepad, two values are provided for each axis. # For example: when the left stick is moved down, there are two evens: `left_stick_down = 0.8` # and `left_stick_up = 0.0`. If only the value of left_stick_up is used, the value will be 0.0, # which is not the desired behavior. Therefore, we save both the values into the buffer and use # the maximum value. # (positive, negative), (x, y, z, roll, pitch, yaw) self._delta_pose_raw = np.zeros([2, 6]) # dictionary for additional callbacks self._additional_callbacks = dict() def __del__(self): """Unsubscribe from gamepad events.""" self._input.unsubscribe_from_gamepad_events(self._gamepad, self._gamepad_sub) self._gamepad_sub = None def __str__(self) -> str: """Returns: A string containing the information of joystick.""" msg = f"Gamepad Controller for SE(3): {self.__class__.__name__}\n" msg += f"\tDevice name: {self._input.get_gamepad_name(self._gamepad)}\n" msg += "\t----------------------------------------------\n" msg += "\tToggle gripper (open/close): X\n" msg += "\tMove arm along x-axis: Left Stick Up/Down\n" msg += "\tMove arm along y-axis: Left Stick Left/Right\n" msg += "\tMove arm along z-axis: Right Stick Up/Down\n" msg += "\tRotate arm along x-axis: D-Pad Right/Left\n" msg += "\tRotate arm along y-axis: D-Pad Down/Up\n" msg += "\tRotate arm along z-axis: Right Stick Left/Right\n" return msg """ Operations """ def reset(self): # default flags self._close_gripper = False self._delta_pose_raw.fill(0.0) def add_callback(self, key: carb.input.GamepadInput, func: Callable): """Add additional functions to bind gamepad. A list of available gamepad keys are present in the `carb documentation <https://docs.omniverse.nvidia.com/kit/docs/carbonite/latest/docs/python/carb.html?highlight=gamepadeventtype#carb.input.GamepadInput>`__. Args: key: The gamepad button to check against. func: The function to call when key is pressed. The callback function should not take any arguments. """ self._additional_callbacks[key] = func def advance(self) -> tuple[np.ndarray, bool]: """Provides the result from gamepad event state. Returns: A tuple containing the delta pose command and gripper commands. """ # -- resolve position command delta_pos = self._resolve_command_buffer(self._delta_pose_raw[:, :3]) # -- resolve rotation command delta_rot = self._resolve_command_buffer(self._delta_pose_raw[:, 3:]) # -- convert to rotation vector rot_vec = Rotation.from_euler("XYZ", delta_rot).as_rotvec() # return the command and gripper state return np.concatenate([delta_pos, rot_vec]), self._close_gripper """ Internal helpers. """ def _on_gamepad_event(self, event, *args, **kwargs): """Subscriber callback to when kit is updated. Reference: https://docs.omniverse.nvidia.com/kit/docs/carbonite/latest/docs/python/carb.html?highlight=gamepadeventtype#carb.input.Gamepad """ # check if the event is a button press cur_val = event.value if abs(cur_val) < self.dead_zone: cur_val = 0 # -- button if event.input == carb.input.GamepadInput.X: # toggle gripper based on the button pressed if cur_val > 0.5: self._close_gripper = not self._close_gripper # -- left and right stick if event.input in self._INPUT_STICK_VALUE_MAPPING: direction, axis, value = self._INPUT_STICK_VALUE_MAPPING[event.input] # change the value only if the stick is moved (soft press) self._delta_pose_raw[direction, axis] = value * cur_val # -- dpad (4 arrow buttons on the console) if event.input in self._INPUT_DPAD_VALUE_MAPPING: direction, axis, value = self._INPUT_DPAD_VALUE_MAPPING[event.input] # change the value only if button is pressed on the DPAD if cur_val > 0.5: self._delta_pose_raw[direction, axis] = value self._delta_pose_raw[1 - direction, axis] = 0 else: self._delta_pose_raw[:, axis] = 0 # additional callbacks if event.input in self._additional_callbacks: self._additional_callbacks[event.input]() # since no error, we are fine :) return True def _create_key_bindings(self): """Creates default key binding.""" # map gamepad input to the element in self._delta_pose_raw # the first index is the direction (0: positive, 1: negative) # the second index is the axis (0: x, 1: y, 2: z, 3: roll, 4: pitch, 5: yaw) # the third index is the sensitivity of the command self._INPUT_STICK_VALUE_MAPPING = { # forward command carb.input.GamepadInput.LEFT_STICK_UP: (0, 0, self.pos_sensitivity), # backward command carb.input.GamepadInput.LEFT_STICK_DOWN: (1, 0, self.pos_sensitivity), # right command carb.input.GamepadInput.LEFT_STICK_RIGHT: (0, 1, self.pos_sensitivity), # left command carb.input.GamepadInput.LEFT_STICK_LEFT: (1, 1, self.pos_sensitivity), # upward command carb.input.GamepadInput.RIGHT_STICK_UP: (0, 2, self.pos_sensitivity), # downward command carb.input.GamepadInput.RIGHT_STICK_DOWN: (1, 2, self.pos_sensitivity), # yaw command (positive) carb.input.GamepadInput.RIGHT_STICK_RIGHT: (0, 5, self.rot_sensitivity), # yaw command (negative) carb.input.GamepadInput.RIGHT_STICK_LEFT: (1, 5, self.rot_sensitivity), } self._INPUT_DPAD_VALUE_MAPPING = { # pitch command (positive) carb.input.GamepadInput.DPAD_UP: (1, 4, self.rot_sensitivity * 0.8), # pitch command (negative) carb.input.GamepadInput.DPAD_DOWN: (0, 4, self.rot_sensitivity * 0.8), # roll command (positive) carb.input.GamepadInput.DPAD_RIGHT: (1, 3, self.rot_sensitivity * 0.8), # roll command (negative) carb.input.GamepadInput.DPAD_LEFT: (0, 3, self.rot_sensitivity * 0.8), } def _resolve_command_buffer(self, raw_command: np.ndarray) -> np.ndarray: """Resolves the command buffer. Args: raw_command: The raw command from the gamepad. Shape is (2, 3) This is a 2D array since gamepad dpad/stick returns two values corresponding to the positive and negative direction. The first index is the direction (0: positive, 1: negative) and the second index is value (absolute) of the command. Returns: Resolved command. Shape is (3,) """ # compare the positive and negative value decide the sign of the value # if the positive value is larger, the sign is positive (i.e. False, 0) # if the negative value is larger, the sign is positive (i.e. True, 1) delta_command_sign = raw_command[1, :] > raw_command[0, :] # extract the command value delta_command = raw_command.max(axis=0) # apply the sign # if the sign is positive, the value is already positive. # if the sign is negative, the value is negative after applying the sign. delta_command[delta_command_sign] *= -1 return delta_command
11,390
Python
45.493877
192
0.599034
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/devices/gamepad/__init__.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """Gamepad device for SE(2) and SE(3) control.""" from .se2_gamepad import Se2Gamepad from .se3_gamepad import Se3Gamepad
246
Python
23.699998
56
0.743902
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/devices/keyboard/__init__.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """Keyboard device for SE(2) and SE(3) control.""" from .se2_keyboard import Se2Keyboard from .se3_keyboard import Se3Keyboard
251
Python
24.199998
56
0.749004
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/devices/keyboard/se2_keyboard.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """Keyboard controller for SE(2) control.""" from __future__ import annotations import numpy as np import weakref from collections.abc import Callable import carb import omni from ..device_base import DeviceBase class Se2Keyboard(DeviceBase): r"""A keyboard controller for sending SE(2) commands as velocity commands. This class is designed to provide a keyboard controller for mobile base (such as quadrupeds). It uses the Omniverse keyboard interface to listen to keyboard events and map them to robot's task-space commands. The command comprises of the base linear and angular velocity: :math:`(v_x, v_y, \omega_z)`. Key bindings: ====================== ========================= ======================== Command Key (+ve axis) Key (-ve axis) ====================== ========================= ======================== Move along x-axis Numpad 8 / Arrow Up Numpad 2 / Arrow Down Move along y-axis Numpad 4 / Arrow Right Numpad 6 / Arrow Left Rotate along z-axis Numpad 7 / X Numpad 9 / Y ====================== ========================= ======================== .. seealso:: The official documentation for the keyboard interface: `Carb Keyboard Interface <https://docs.omniverse.nvidia.com/kit/docs/carbonite/latest/docs/python/carb.html#carb.input.Keyboard>`__. """ def __init__(self, v_x_sensitivity: float = 0.8, v_y_sensitivity: float = 0.4, omega_z_sensitivity: float = 1.0): """Initialize the keyboard layer. Args: v_x_sensitivity: Magnitude of linear velocity along x-direction scaling. Defaults to 0.8. v_y_sensitivity: Magnitude of linear velocity along y-direction scaling. Defaults to 0.4. omega_z_sensitivity: Magnitude of angular velocity along z-direction scaling. Defaults to 1.0. """ # store inputs self.v_x_sensitivity = v_x_sensitivity self.v_y_sensitivity = v_y_sensitivity self.omega_z_sensitivity = omega_z_sensitivity # acquire omniverse interfaces self._appwindow = omni.appwindow.get_default_app_window() self._input = carb.input.acquire_input_interface() self._keyboard = self._appwindow.get_keyboard() # note: Use weakref on callbacks to ensure that this object can be deleted when its destructor is called self._keyboard_sub = self._input.subscribe_to_keyboard_events( self._keyboard, lambda event, *args, obj=weakref.proxy(self): obj._on_keyboard_event(event, *args), ) # bindings for keyboard to command self._create_key_bindings() # command buffers self._base_command = np.zeros(3) # dictionary for additional callbacks self._additional_callbacks = dict() def __del__(self): """Release the keyboard interface.""" self._input.unsubscribe_from_keyboard_events(self._keyboard, self._keyboard_sub) self._keyboard_sub = None def __str__(self) -> str: """Returns: A string containing the information of joystick.""" msg = f"Keyboard Controller for SE(2): {self.__class__.__name__}\n" msg += f"\tKeyboard name: {self._input.get_keyboard_name(self._keyboard)}\n" msg += "\t----------------------------------------------\n" msg += "\tReset all commands: L\n" msg += "\tMove forward (along x-axis): Numpad 8 / Arrow Up\n" msg += "\tMove backward (along x-axis): Numpad 2 / Arrow Down\n" msg += "\tMove right (along y-axis): Numpad 4 / Arrow Right\n" msg += "\tMove left (along y-axis): Numpad 6 / Arrow Left\n" msg += "\tYaw positively (along z-axis): Numpad 7 / X\n" msg += "\tYaw negatively (along z-axis): Numpad 9 / Y" return msg """ Operations """ def reset(self): # default flags self._base_command.fill(0.0) def add_callback(self, key: str, func: Callable): """Add additional functions to bind keyboard. A list of available keys are present in the `carb documentation <https://docs.omniverse.nvidia.com/kit/docs/carbonite/latest/docs/python/carb.html?highlight=keyboardeventtype#carb.input.KeyboardInput>`__. Args: key: The keyboard button to check against. func: The function to call when key is pressed. The callback function should not take any arguments. """ self._additional_callbacks[key] = func def advance(self) -> np.ndarray: """Provides the result from keyboard event state. Returns: 3D array containing the linear (x,y) and angular velocity (z). """ return self._base_command """ Internal helpers. """ def _on_keyboard_event(self, event, *args, **kwargs): """Subscriber callback to when kit is updated. Reference: https://docs.omniverse.nvidia.com/kit/docs/carbonite/latest/docs/python/carb.html?highlight=keyboardeventtype#carb.input.KeyboardInput """ # apply the command when pressed if event.type == carb.input.KeyboardEventType.KEY_PRESS: if event.input.name == "L": self.reset() elif event.input.name in self._INPUT_KEY_MAPPING: self._base_command += self._INPUT_KEY_MAPPING[event.input.name] # remove the command when un-pressed if event.type == carb.input.KeyboardEventType.KEY_RELEASE: if event.input.name in self._INPUT_KEY_MAPPING: self._base_command -= self._INPUT_KEY_MAPPING[event.input.name] # additional callbacks if event.type == carb.input.KeyboardEventType.KEY_PRESS: if event.input.name in self._additional_callbacks: self._additional_callbacks[event.input.name]() # since no error, we are fine :) return True def _create_key_bindings(self): """Creates default key binding.""" self._INPUT_KEY_MAPPING = { # forward command "NUMPAD_8": np.asarray([1.0, 0.0, 0.0]) * self.v_x_sensitivity, "UP": np.asarray([1.0, 0.0, 0.0]) * self.v_x_sensitivity, # back command "NUMPAD_2": np.asarray([-1.0, 0.0, 0.0]) * self.v_x_sensitivity, "DOWN": np.asarray([-1.0, 0.0, 0.0]) * self.v_x_sensitivity, # right command "NUMPAD_4": np.asarray([0.0, 1.0, 0.0]) * self.v_y_sensitivity, "LEFT": np.asarray([0.0, 1.0, 0.0]) * self.v_y_sensitivity, # left command "NUMPAD_6": np.asarray([0.0, -1.0, 0.0]) * self.v_y_sensitivity, "RIGHT": np.asarray([0.0, -1.0, 0.0]) * self.v_y_sensitivity, # yaw command (positive) "NUMPAD_7": np.asarray([0.0, 0.0, 1.0]) * self.omega_z_sensitivity, "X": np.asarray([0.0, 0.0, 1.0]) * self.omega_z_sensitivity, # yaw command (negative) "NUMPAD_9": np.asarray([0.0, 0.0, -1.0]) * self.omega_z_sensitivity, "Z": np.asarray([0.0, 0.0, -1.0]) * self.omega_z_sensitivity, }
7,354
Python
42.264706
195
0.584444
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/devices/keyboard/se3_keyboard.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """Keyboard controller for SE(3) control.""" import numpy as np import weakref from collections.abc import Callable from scipy.spatial.transform.rotation import Rotation import carb import omni from ..device_base import DeviceBase class Se3Keyboard(DeviceBase): """A keyboard controller for sending SE(3) commands as delta poses and binary command (open/close). This class is designed to provide a keyboard controller for a robotic arm with a gripper. It uses the Omniverse keyboard interface to listen to keyboard events and map them to robot's task-space commands. The command comprises of two parts: * delta pose: a 6D vector of (x, y, z, roll, pitch, yaw) in meters and radians. * gripper: a binary command to open or close the gripper. Key bindings: ============================== ================= ================= Description Key (+ve axis) Key (-ve axis) ============================== ================= ================= Toggle gripper (open/close) K Move along x-axis W S Move along y-axis A D Move along z-axis Q E Rotate along x-axis Z X Rotate along y-axis T G Rotate along z-axis C V ============================== ================= ================= .. seealso:: The official documentation for the keyboard interface: `Carb Keyboard Interface <https://docs.omniverse.nvidia.com/kit/docs/carbonite/latest/docs/python/carb.html#carb.input.Keyboard>`__. """ def __init__(self, pos_sensitivity: float = 0.4, rot_sensitivity: float = 0.8): """Initialize the keyboard layer. Args: pos_sensitivity: Magnitude of input position command scaling. Defaults to 0.05. rot_sensitivity: Magnitude of scale input rotation commands scaling. Defaults to 0.5. """ # store inputs self.pos_sensitivity = pos_sensitivity self.rot_sensitivity = rot_sensitivity # acquire omniverse interfaces self._appwindow = omni.appwindow.get_default_app_window() self._input = carb.input.acquire_input_interface() self._keyboard = self._appwindow.get_keyboard() # note: Use weakref on callbacks to ensure that this object can be deleted when its destructor is called. self._keyboard_sub = self._input.subscribe_to_keyboard_events( self._keyboard, lambda event, *args, obj=weakref.proxy(self): obj._on_keyboard_event(event, *args), ) # bindings for keyboard to command self._create_key_bindings() # command buffers self._close_gripper = False self._delta_pos = np.zeros(3) # (x, y, z) self._delta_rot = np.zeros(3) # (roll, pitch, yaw) # dictionary for additional callbacks self._additional_callbacks = dict() def __del__(self): """Release the keyboard interface.""" self._input.unsubscribe_from_keyboard_events(self._keyboard, self._keyboard_sub) self._keyboard_sub = None def __str__(self) -> str: """Returns: A string containing the information of joystick.""" msg = f"Keyboard Controller for SE(3): {self.__class__.__name__}\n" msg += f"\tKeyboard name: {self._input.get_keyboard_name(self._keyboard)}\n" msg += "\t----------------------------------------------\n" msg += "\tToggle gripper (open/close): K\n" msg += "\tMove arm along x-axis: W/S\n" msg += "\tMove arm along y-axis: A/D\n" msg += "\tMove arm along z-axis: Q/E\n" msg += "\tRotate arm along x-axis: Z/X\n" msg += "\tRotate arm along y-axis: T/G\n" msg += "\tRotate arm along z-axis: C/V" return msg """ Operations """ def reset(self): # default flags self._close_gripper = False self._delta_pos = np.zeros(3) # (x, y, z) self._delta_rot = np.zeros(3) # (roll, pitch, yaw) def add_callback(self, key: str, func: Callable): """Add additional functions to bind keyboard. A list of available keys are present in the `carb documentation <https://docs.omniverse.nvidia.com/kit/docs/carbonite/latest/docs/python/carb.html?highlight=keyboardeventtype#carb.input.KeyboardInput>`__. Args: key: The keyboard button to check against. func: The function to call when key is pressed. The callback function should not take any arguments. """ self._additional_callbacks[key] = func def advance(self) -> tuple[np.ndarray, bool]: """Provides the result from keyboard event state. Returns: A tuple containing the delta pose command and gripper commands. """ # convert to rotation vector rot_vec = Rotation.from_euler("XYZ", self._delta_rot).as_rotvec() # return the command and gripper state return np.concatenate([self._delta_pos, rot_vec]), self._close_gripper """ Internal helpers. """ def _on_keyboard_event(self, event, *args, **kwargs): """Subscriber callback to when kit is updated. Reference: https://docs.omniverse.nvidia.com/kit/docs/carbonite/latest/docs/python/carb.html?highlight=keyboardeventtype#carb.input.KeyboardInput """ # apply the command when pressed if event.type == carb.input.KeyboardEventType.KEY_PRESS: if event.input.name == "L": self.reset() if event.input.name == "K": self._close_gripper = not self._close_gripper elif event.input.name in ["W", "S", "A", "D", "Q", "E"]: self._delta_pos += self._INPUT_KEY_MAPPING[event.input.name] elif event.input.name in ["Z", "X", "T", "G", "C", "V"]: self._delta_rot += self._INPUT_KEY_MAPPING[event.input.name] # remove the command when un-pressed if event.type == carb.input.KeyboardEventType.KEY_RELEASE: if event.input.name in ["W", "S", "A", "D", "Q", "E"]: self._delta_pos -= self._INPUT_KEY_MAPPING[event.input.name] elif event.input.name in ["Z", "X", "T", "G", "C", "V"]: self._delta_rot -= self._INPUT_KEY_MAPPING[event.input.name] # additional callbacks if event.type == carb.input.KeyboardEventType.KEY_PRESS: if event.input.name in self._additional_callbacks: self._additional_callbacks[event.input.name]() # since no error, we are fine :) return True def _create_key_bindings(self): """Creates default key binding.""" self._INPUT_KEY_MAPPING = { # toggle: gripper command "K": True, # x-axis (forward) "W": np.asarray([1.0, 0.0, 0.0]) * self.pos_sensitivity, "S": np.asarray([-1.0, 0.0, 0.0]) * self.pos_sensitivity, # y-axis (right-left) "D": np.asarray([0.0, 1.0, 0.0]) * self.pos_sensitivity, "A": np.asarray([0.0, -1.0, 0.0]) * self.pos_sensitivity, # z-axis (up-down) "Q": np.asarray([0.0, 0.0, 1.0]) * self.pos_sensitivity, "E": np.asarray([0.0, 0.0, -1.0]) * self.pos_sensitivity, # roll (around x-axis) "Z": np.asarray([1.0, 0.0, 0.0]) * self.rot_sensitivity, "X": np.asarray([-1.0, 0.0, 0.0]) * self.rot_sensitivity, # pitch (around y-axis) "T": np.asarray([0.0, 1.0, 0.0]) * self.rot_sensitivity, "G": np.asarray([0.0, -1.0, 0.0]) * self.rot_sensitivity, # yaw (around z-axis) "C": np.asarray([0.0, 0.0, 1.0]) * self.rot_sensitivity, "V": np.asarray([0.0, 0.0, -1.0]) * self.rot_sensitivity, }
8,140
Python
42.074074
195
0.561057
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/sensors/sensor_base_cfg.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from __future__ import annotations from dataclasses import MISSING from omni.isaac.orbit.utils import configclass from .sensor_base import SensorBase @configclass class SensorBaseCfg: """Configuration parameters for a sensor.""" class_type: type[SensorBase] = MISSING """The associated sensor class. The class should inherit from :class:`omni.isaac.orbit.sensors.sensor_base.SensorBase`. """ prim_path: str = MISSING """Prim path (or expression) to the sensor. .. note:: The expression can contain the environment namespace regex ``{ENV_REGEX_NS}`` which will be replaced with the environment namespace. Example: ``{ENV_REGEX_NS}/Robot/sensor`` will be replaced with ``/World/envs/env_.*/Robot/sensor``. """ update_period: float = 0.0 """Update period of the sensor buffers (in seconds). Defaults to 0.0 (update every step).""" history_length: int = 0 """Number of past frames to store in the sensor buffers. Defaults to 0, which means that only the current data is stored (no history).""" debug_vis: bool = False """Whether to visualize the sensor. Defaults to False."""
1,297
Python
27.844444
107
0.690825
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/sensors/__init__.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """Sub-package containing various sensor classes implementations. This subpackage contains the sensor classes that are compatible with Isaac Sim. We include both USD-based and custom sensors: * **USD-prim sensors**: Available in Omniverse and require creating a USD prim for them. For instance, RTX ray tracing camera and lidar sensors. * **USD-schema sensors**: Available in Omniverse and require creating a USD schema on an existing prim. For instance, contact sensors and frame transformers. * **Custom sensors**: Implemented in Python and do not require creating any USD prim or schema. For instance, warp-based ray-casters. Due to the above categorization, the prim paths passed to the sensor's configuration class are interpreted differently based on the sensor type. The following table summarizes the interpretation of the prim paths for different sensor types: +---------------------+---------------------------+---------------------------------------------------------------+ | Sensor Type | Example Prim Path | Pre-check | +=====================+===========================+===============================================================+ | Camera | /World/robot/base/camera | Leaf is available, and it will spawn a USD camera | +---------------------+---------------------------+---------------------------------------------------------------+ | Contact Sensor | /World/robot/feet_* | Leaf is available and checks if the schema exists | +---------------------+---------------------------+---------------------------------------------------------------+ | Ray Caster | /World/robot/base | Leaf exists and is a physics body (Articulation / Rigid Body) | +---------------------+---------------------------+---------------------------------------------------------------+ | Frame Transformer | /World/robot/base | Leaf exists and is a physics body (Articulation / Rigid Body) | +---------------------+---------------------------+---------------------------------------------------------------+ """ from .camera import * # noqa: F401, F403 from .contact_sensor import * # noqa: F401, F403 from .frame_transformer import * # noqa: F401 from .ray_caster import * # noqa: F401, F403 from .sensor_base import SensorBase # noqa: F401 from .sensor_base_cfg import SensorBaseCfg # noqa: F401
2,577
Python
60.380951
115
0.510671
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/sensors/sensor_base.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """Base class for sensors. This class defines an interface for sensors similar to how the :class:`omni.isaac.orbit.robot.robot_base.RobotBase` class works. Each sensor class should inherit from this class and implement the abstract methods. """ from __future__ import annotations import inspect import torch import weakref from abc import ABC, abstractmethod from collections.abc import Sequence from typing import TYPE_CHECKING, Any import omni.kit.app import omni.timeline import omni.isaac.orbit.sim as sim_utils if TYPE_CHECKING: from .sensor_base_cfg import SensorBaseCfg class SensorBase(ABC): """The base class for implementing a sensor. The implementation is based on lazy evaluation. The sensor data is only updated when the user tries accessing the data through the :attr:`data` property or sets ``force_compute=True`` in the :meth:`update` method. This is done to avoid unnecessary computation when the sensor data is not used. The sensor is updated at the specified update period. If the update period is zero, then the sensor is updated at every simulation step. """ def __init__(self, cfg: SensorBaseCfg): """Initialize the sensor class. Args: cfg: The configuration parameters for the sensor. """ # check that config is valid if cfg.history_length < 0: raise ValueError(f"History length must be greater than 0! Received: {cfg.history_length}") # store inputs self.cfg = cfg # flag for whether the sensor is initialized self._is_initialized = False # flag for whether the sensor is in visualization mode self._is_visualizing = False # note: Use weakref on callbacks to ensure that this object can be deleted when its destructor is called. # add callbacks for stage play/stop # The order is set to 10 which is arbitrary but should be lower priority than the default order of 0 timeline_event_stream = omni.timeline.get_timeline_interface().get_timeline_event_stream() self._initialize_handle = timeline_event_stream.create_subscription_to_pop_by_type( int(omni.timeline.TimelineEventType.PLAY), lambda event, obj=weakref.proxy(self): obj._initialize_callback(event), order=10, ) self._invalidate_initialize_handle = timeline_event_stream.create_subscription_to_pop_by_type( int(omni.timeline.TimelineEventType.STOP), lambda event, obj=weakref.proxy(self): obj._invalidate_initialize_callback(event), order=10, ) # add handle for debug visualization (this is set to a valid handle inside set_debug_vis) self._debug_vis_handle = None # set initial state of debug visualization self.set_debug_vis(self.cfg.debug_vis) def __del__(self): """Unsubscribe from the callbacks.""" # clear physics events handles if self._initialize_handle: self._initialize_handle.unsubscribe() self._initialize_handle = None if self._invalidate_initialize_handle: self._invalidate_initialize_handle.unsubscribe() self._invalidate_initialize_handle = None # clear debug visualization if self._debug_vis_handle: self._debug_vis_handle.unsubscribe() self._debug_vis_handle = None """ Properties """ @property def num_instances(self) -> int: """Number of instances of the sensor. This is equal to the number of sensors per environment multiplied by the number of environments. """ return self._num_envs @property def device(self) -> str: """Memory device for computation.""" return self._device @property @abstractmethod def data(self) -> Any: """Data from the sensor. This property is only updated when the user tries to access the data. This is done to avoid unnecessary computation when the sensor data is not used. For updating the sensor when this property is accessed, you can use the following code snippet in your sensor implementation: .. code-block:: python # update sensors if needed self._update_outdated_buffers() # return the data (where `_data` is the data for the sensor) return self._data """ raise NotImplementedError @property def has_debug_vis_implementation(self) -> bool: """Whether the sensor has a debug visualization implemented.""" # check if function raises NotImplementedError source_code = inspect.getsource(self._set_debug_vis_impl) return "NotImplementedError" not in source_code """ Operations """ def set_debug_vis(self, debug_vis: bool) -> bool: """Sets whether to visualize the sensor data. Args: debug_vis: Whether to visualize the sensor data. Returns: Whether the debug visualization was successfully set. False if the sensor does not support debug visualization. """ # check if debug visualization is supported if not self.has_debug_vis_implementation: return False # toggle debug visualization objects self._set_debug_vis_impl(debug_vis) # toggle debug visualization flag self._is_visualizing = debug_vis # toggle debug visualization handles if debug_vis: # create a subscriber for the post update event if it doesn't exist if self._debug_vis_handle is None: app_interface = omni.kit.app.get_app_interface() self._debug_vis_handle = app_interface.get_post_update_event_stream().create_subscription_to_pop( lambda event, obj=weakref.proxy(self): obj._debug_vis_callback(event) ) else: # remove the subscriber if it exists if self._debug_vis_handle is not None: self._debug_vis_handle.unsubscribe() self._debug_vis_handle = None # return success return True def reset(self, env_ids: Sequence[int] | None = None): """Resets the sensor internals. Args: env_ids: The sensor ids to reset. Defaults to None. """ # Resolve sensor ids if env_ids is None: env_ids = slice(None) # Reset the timestamp for the sensors self._timestamp[env_ids] = 0.0 self._timestamp_last_update[env_ids] = 0.0 # Set all reset sensors to outdated so that they are updated when data is called the next time. self._is_outdated[env_ids] = True def update(self, dt: float, force_recompute: bool = False): # Update the timestamp for the sensors self._timestamp += dt self._is_outdated |= self._timestamp - self._timestamp_last_update + 1e-6 >= self.cfg.update_period # Update the buffers # TODO (from @mayank): Why is there a history length here when it doesn't mean anything in the sensor base?!? # It is only for the contact sensor but there we should redefine the update function IMO. if force_recompute or self._is_visualizing or (self.cfg.history_length > 0): self._update_outdated_buffers() """ Implementation specific. """ @abstractmethod def _initialize_impl(self): """Initializes the sensor-related handles and internal buffers.""" # Obtain Simulation Context sim = sim_utils.SimulationContext.instance() if sim is None: raise RuntimeError("Simulation Context is not initialized!") # Obtain device and backend self._device = sim.device self._backend = sim.backend self._sim_physics_dt = sim.get_physics_dt() # Count number of environments env_prim_path_expr = self.cfg.prim_path.rsplit("/", 1)[0] self._parent_prims = sim_utils.find_matching_prims(env_prim_path_expr) self._num_envs = len(self._parent_prims) # Boolean tensor indicating whether the sensor data has to be refreshed self._is_outdated = torch.ones(self._num_envs, dtype=torch.bool, device=self._device) # Current timestamp (in seconds) self._timestamp = torch.zeros(self._num_envs, device=self._device) # Timestamp from last update self._timestamp_last_update = torch.zeros_like(self._timestamp) @abstractmethod def _update_buffers_impl(self, env_ids: Sequence[int]): """Fills the sensor data for provided environment ids. This function does not perform any time-based checks and directly fills the data into the data container. Args: env_ids: The indices of the sensors that are ready to capture. """ raise NotImplementedError def _set_debug_vis_impl(self, debug_vis: bool): """Set debug visualization into visualization objects. This function is responsible for creating the visualization objects if they don't exist and input ``debug_vis`` is True. If the visualization objects exist, the function should set their visibility into the stage. """ raise NotImplementedError(f"Debug visualization is not implemented for {self.__class__.__name__}.") def _debug_vis_callback(self, event): """Callback for debug visualization. This function calls the visualization objects and sets the data to visualize into them. """ raise NotImplementedError(f"Debug visualization is not implemented for {self.__class__.__name__}.") """ Internal simulation callbacks. """ def _initialize_callback(self, event): """Initializes the scene elements. Note: PhysX handles are only enabled once the simulator starts playing. Hence, this function needs to be called whenever the simulator "plays" from a "stop" state. """ if not self._is_initialized: self._initialize_impl() self._is_initialized = True def _invalidate_initialize_callback(self, event): """Invalidates the scene elements.""" self._is_initialized = False """ Helper functions. """ def _update_outdated_buffers(self): """Fills the sensor data for the outdated sensors.""" outdated_env_ids = self._is_outdated.nonzero().squeeze(-1) if len(outdated_env_ids) > 0: # obtain new data self._update_buffers_impl(outdated_env_ids) # update the timestamp from last update self._timestamp_last_update[outdated_env_ids] = self._timestamp[outdated_env_ids] # set outdated flag to false for the updated sensors self._is_outdated[outdated_env_ids] = False
11,068
Python
37.975352
128
0.644471
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/sensors/ray_caster/ray_caster_camera.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from __future__ import annotations import torch from collections.abc import Sequence from tensordict import TensorDict from typing import TYPE_CHECKING, ClassVar, Literal import omni.physics.tensors.impl.api as physx from omni.isaac.core.prims import XFormPrimView import omni.isaac.orbit.utils.math as math_utils from omni.isaac.orbit.sensors.camera import CameraData from omni.isaac.orbit.sensors.camera.utils import convert_orientation_convention, create_rotation_matrix_from_view from omni.isaac.orbit.utils.warp import raycast_mesh from .ray_caster import RayCaster if TYPE_CHECKING: from .ray_caster_camera_cfg import RayCasterCameraCfg class RayCasterCamera(RayCaster): """A ray-casting camera sensor. The ray-caster camera uses a set of rays to get the distances to meshes in the scene. The rays are defined in the sensor's local coordinate frame. The sensor has the same interface as the :class:`omni.isaac.orbit.sensors.Camera` that implements the camera class through USD camera prims. However, this class provides a faster image generation. The sensor converts meshes from the list of primitive paths provided in the configuration to Warp meshes. The camera then ray-casts against these Warp meshes only. Currently, only the following annotators are supported: - ``"distance_to_camera"``: An image containing the distance to camera optical center. - ``"distance_to_image_plane"``: An image containing distances of 3D points from camera plane along camera's z-axis. - ``"normals"``: An image containing the local surface normal vectors at each pixel. .. note:: Currently, only static meshes are supported. Extending the warp mesh to support dynamic meshes is a work in progress. """ cfg: RayCasterCameraCfg """The configuration parameters.""" UNSUPPORTED_TYPES: ClassVar[set[str]] = { "rgb", "instance_id_segmentation", "instance_id_segmentation_fast", "instance_segmentation", "instance_segmentation_fast", "semantic_segmentation", "skeleton_data", "motion_vectors", "bounding_box_2d_tight", "bounding_box_2d_tight_fast", "bounding_box_2d_loose", "bounding_box_2d_loose_fast", "bounding_box_3d", "bounding_box_3d_fast", } """A set of sensor types that are not supported by the ray-caster camera.""" def __init__(self, cfg: RayCasterCameraCfg): """Initializes the camera object. Args: cfg: The configuration parameters. Raises: ValueError: If the provided data types are not supported by the ray-caster camera. """ # perform check on supported data types self._check_supported_data_types(cfg) # initialize base class super().__init__(cfg) # create empty variables for storing output data self._data = CameraData() def __str__(self) -> str: """Returns: A string containing information about the instance.""" return ( f"Ray-Caster-Camera @ '{self.cfg.prim_path}': \n" f"\tview type : {self._view.__class__}\n" f"\tupdate period (s) : {self.cfg.update_period}\n" f"\tnumber of meshes : {len(RayCaster.meshes)}\n" f"\tnumber of sensors : {self._view.count}\n" f"\tnumber of rays/sensor: {self.num_rays}\n" f"\ttotal number of rays : {self.num_rays * self._view.count}\n" f"\timage shape : {self.image_shape}" ) """ Properties """ @property def data(self) -> CameraData: # update sensors if needed self._update_outdated_buffers() # return the data return self._data @property def image_shape(self) -> tuple[int, int]: """A tuple containing (height, width) of the camera sensor.""" return (self.cfg.pattern_cfg.height, self.cfg.pattern_cfg.width) @property def frame(self) -> torch.tensor: """Frame number when the measurement took place.""" return self._frame """ Operations. """ def set_intrinsic_matrices( self, matrices: torch.Tensor, focal_length: float = 1.0, env_ids: Sequence[int] | None = None ): """Set the intrinsic matrix of the camera. Args: matrices: The intrinsic matrices for the camera. Shape is (N, 3, 3). focal_length: Focal length to use when computing aperture values. Defaults to 1.0. env_ids: A sensor ids to manipulate. Defaults to None, which means all sensor indices. """ # resolve env_ids if env_ids is None: env_ids = slice(None) # save new intrinsic matrices and focal length self._data.intrinsic_matrices[env_ids] = matrices.to(self._device) self._focal_length = focal_length # recompute ray directions self.ray_starts[env_ids], self.ray_directions[env_ids] = self.cfg.pattern_cfg.func( self.cfg.pattern_cfg, self._data.intrinsic_matrices[env_ids], self._device ) def reset(self, env_ids: Sequence[int] | None = None): # reset the timestamps super().reset(env_ids) # resolve None if env_ids is None: env_ids = slice(None) # reset the data # note: this recomputation is useful if one performs events such as randomizations on the camera poses. pos_w, quat_w = self._compute_camera_world_poses(env_ids) self._data.pos_w[env_ids] = pos_w self._data.quat_w_world[env_ids] = quat_w # Reset the frame count self._frame[env_ids] = 0 def set_world_poses( self, positions: torch.Tensor | None = None, orientations: torch.Tensor | None = None, env_ids: Sequence[int] | None = None, convention: Literal["opengl", "ros", "world"] = "ros", ): """Set the pose of the camera w.r.t. the world frame using specified convention. Since different fields use different conventions for camera orientations, the method allows users to set the camera poses in the specified convention. Possible conventions are: - :obj:`"opengl"` - forward axis: -Z - up axis +Y - Offset is applied in the OpenGL (Usd.Camera) convention - :obj:`"ros"` - forward axis: +Z - up axis -Y - Offset is applied in the ROS convention - :obj:`"world"` - forward axis: +X - up axis +Z - Offset is applied in the World Frame convention See :meth:`omni.isaac.orbit.sensors.camera.utils.convert_orientation_convention` for more details on the conventions. Args: positions: The cartesian coordinates (in meters). Shape is (N, 3). Defaults to None, in which case the camera position in not changed. orientations: The quaternion orientation in (w, x, y, z). Shape is (N, 4). Defaults to None, in which case the camera orientation in not changed. env_ids: A sensor ids to manipulate. Defaults to None, which means all sensor indices. convention: The convention in which the poses are fed. Defaults to "ros". Raises: RuntimeError: If the camera prim is not set. Need to call :meth:`initialize` method first. """ # resolve env_ids if env_ids is None: env_ids = self._ALL_INDICES # get current positions pos_w, quat_w = self._compute_view_world_poses(env_ids) if positions is not None: # transform to camera frame pos_offset_world_frame = positions - pos_w self._offset_pos[env_ids] = math_utils.quat_apply(math_utils.quat_inv(quat_w), pos_offset_world_frame) if orientations is not None: # convert rotation matrix from input convention to world quat_w_set = convert_orientation_convention(orientations, origin=convention, target="world") self._offset_quat[env_ids] = math_utils.quat_mul(math_utils.quat_inv(quat_w), quat_w_set) # update the data pos_w, quat_w = self._compute_camera_world_poses(env_ids) self._data.pos_w[env_ids] = pos_w self._data.quat_w_world[env_ids] = quat_w def set_world_poses_from_view( self, eyes: torch.Tensor, targets: torch.Tensor, env_ids: Sequence[int] | None = None ): """Set the poses of the camera from the eye position and look-at target position. Args: eyes: The positions of the camera's eye. Shape is N, 3). targets: The target locations to look at. Shape is (N, 3). env_ids: A sensor ids to manipulate. Defaults to None, which means all sensor indices. Raises: RuntimeError: If the camera prim is not set. Need to call :meth:`initialize` method first. NotImplementedError: If the stage up-axis is not "Y" or "Z". """ # camera position and rotation in opengl convention orientations = math_utils.quat_from_matrix(create_rotation_matrix_from_view(eyes, targets, device=self._device)) self.set_world_poses(eyes, orientations, env_ids, convention="opengl") """ Implementation. """ def _initialize_rays_impl(self): # Create all indices buffer self._ALL_INDICES = torch.arange(self._view.count, device=self._device, dtype=torch.long) # Create frame count buffer self._frame = torch.zeros(self._view.count, device=self._device, dtype=torch.long) # create buffers self._create_buffers() # compute intrinsic matrices self._compute_intrinsic_matrices() # compute ray stars and directions self.ray_starts, self.ray_directions = self.cfg.pattern_cfg.func( self.cfg.pattern_cfg, self._data.intrinsic_matrices, self._device ) self.num_rays = self.ray_directions.shape[1] # create buffer to store ray hits self.ray_hits_w = torch.zeros(self._view.count, self.num_rays, 3, device=self._device) # set offsets quat_w = convert_orientation_convention( torch.tensor([self.cfg.offset.rot], device=self._device), origin=self.cfg.offset.convention, target="world" ) self._offset_quat = quat_w.repeat(self._view.count, 1) self._offset_pos = torch.tensor(list(self.cfg.offset.pos), device=self._device).repeat(self._view.count, 1) def _update_buffers_impl(self, env_ids: Sequence[int]): """Fills the buffers of the sensor data.""" # increment frame count self._frame[env_ids] += 1 # compute poses from current view pos_w, quat_w = self._compute_camera_world_poses(env_ids) # update the data self._data.pos_w[env_ids] = pos_w self._data.quat_w_world[env_ids] = quat_w # note: full orientation is considered ray_starts_w = math_utils.quat_apply(quat_w.repeat(1, self.num_rays), self.ray_starts[env_ids]) ray_starts_w += pos_w.unsqueeze(1) ray_directions_w = math_utils.quat_apply(quat_w.repeat(1, self.num_rays), self.ray_directions[env_ids]) # ray cast and store the hits # TODO: Make ray-casting work for multiple meshes? # necessary for regular dictionaries. self.ray_hits_w, ray_depth, ray_normal, _ = raycast_mesh( ray_starts_w, ray_directions_w, mesh=RayCasterCamera.meshes[self.cfg.mesh_prim_paths[0]], max_dist=self.cfg.max_distance, return_distance=any( [name in self.cfg.data_types for name in ["distance_to_image_plane", "distance_to_camera"]] ), return_normal="normals" in self.cfg.data_types, ) # update output buffers if "distance_to_image_plane" in self.cfg.data_types: # note: data is in camera frame so we only take the first component (z-axis of camera frame) distance_to_image_plane = ( math_utils.quat_apply( math_utils.quat_inv(quat_w).repeat(1, self.num_rays), (ray_depth[:, :, None] * ray_directions_w), ) )[:, :, 0] self._data.output["distance_to_image_plane"][env_ids] = distance_to_image_plane.view(-1, *self.image_shape) if "distance_to_camera" in self.cfg.data_types: self._data.output["distance_to_camera"][env_ids] = ray_depth.view(-1, *self.image_shape) if "normals" in self.cfg.data_types: self._data.output["normals"][env_ids] = ray_normal.view(-1, *self.image_shape, 3) def _debug_vis_callback(self, event): # in case it crashes be safe if not hasattr(self, "ray_hits_w"): return # show ray hit positions self.ray_visualizer.visualize(self.ray_hits_w.view(-1, 3)) """ Private Helpers """ def _check_supported_data_types(self, cfg: RayCasterCameraCfg): """Checks if the data types are supported by the ray-caster camera.""" # check if there is any intersection in unsupported types # reason: we cannot obtain this data from simplified warp-based ray caster common_elements = set(cfg.data_types) & RayCasterCamera.UNSUPPORTED_TYPES if common_elements: raise ValueError( f"RayCasterCamera class does not support the following sensor types: {common_elements}." "\n\tThis is because these sensor types cannot be obtained in a fast way using ''warp''." "\n\tHint: If you need to work with these sensor types, we recommend using the USD camera" " interface from the omni.isaac.orbit.sensors.camera module." ) def _create_buffers(self): """Create buffers for storing data.""" # prepare drift self.drift = torch.zeros(self._view.count, 3, device=self.device) # create the data object # -- pose of the cameras self._data.pos_w = torch.zeros((self._view.count, 3), device=self._device) self._data.quat_w_world = torch.zeros((self._view.count, 4), device=self._device) # -- intrinsic matrix self._data.intrinsic_matrices = torch.zeros((self._view.count, 3, 3), device=self._device) self._data.intrinsic_matrices[:, 2, 2] = 1.0 self._data.image_shape = self.image_shape # -- output data # create the buffers to store the annotator data. self._data.output = TensorDict({}, batch_size=self._view.count, device=self.device) self._data.info = [{name: None for name in self.cfg.data_types}] * self._view.count for name in self.cfg.data_types: if name in ["distance_to_image_plane", "distance_to_camera"]: shape = (self.cfg.pattern_cfg.height, self.cfg.pattern_cfg.width) elif name in ["normals"]: shape = (self.cfg.pattern_cfg.height, self.cfg.pattern_cfg.width, 3) else: raise ValueError(f"Received unknown data type: {name}. Please check the configuration.") # allocate tensor to store the data self._data.output[name] = torch.zeros((self._view.count, *shape), device=self._device) def _compute_intrinsic_matrices(self): """Computes the intrinsic matrices for the camera based on the config provided.""" # get the sensor properties pattern_cfg = self.cfg.pattern_cfg # compute the intrinsic matrix vertical_aperture = pattern_cfg.horizontal_aperture * pattern_cfg.height / pattern_cfg.width f_x = pattern_cfg.width * pattern_cfg.focal_length / pattern_cfg.horizontal_aperture f_y = pattern_cfg.height * pattern_cfg.focal_length / vertical_aperture c_x = pattern_cfg.horizontal_aperture_offset * f_x + pattern_cfg.width / 2 c_y = pattern_cfg.vertical_aperture_offset * f_y + pattern_cfg.height / 2 # allocate the intrinsic matrices self._data.intrinsic_matrices[:, 0, 0] = f_x self._data.intrinsic_matrices[:, 0, 2] = c_x self._data.intrinsic_matrices[:, 1, 1] = f_y self._data.intrinsic_matrices[:, 1, 2] = c_y # save focal length self._focal_length = pattern_cfg.focal_length def _compute_view_world_poses(self, env_ids: Sequence[int]) -> tuple[torch.Tensor, torch.Tensor]: """Obtains the pose of the view the camera is attached to in the world frame. Returns: A tuple of the position (in meters) and quaternion (w, x, y, z). """ # obtain the poses of the sensors # note: clone arg doesn't exist for xform prim view so we need to do this manually if isinstance(self._view, XFormPrimView): pos_w, quat_w = self._view.get_world_poses(env_ids) elif isinstance(self._view, physx.ArticulationView): pos_w, quat_w = self._view.get_root_transforms()[env_ids].split([3, 4], dim=-1) quat_w = math_utils.convert_quat(quat_w, to="wxyz") elif isinstance(self._view, physx.RigidBodyView): pos_w, quat_w = self._view.get_transforms()[env_ids].split([3, 4], dim=-1) quat_w = math_utils.convert_quat(quat_w, to="wxyz") else: raise RuntimeError(f"Unsupported view type: {type(self._view)}") # return the pose return pos_w.clone(), quat_w.clone() def _compute_camera_world_poses(self, env_ids: Sequence[int]) -> tuple[torch.Tensor, torch.Tensor]: """Computes the pose of the camera in the world frame. This function applies the offset pose to the pose of the view the camera is attached to. Returns: A tuple of the position (in meters) and quaternion (w, x, y, z) in "world" convention. """ # get the pose of the view the camera is attached to pos_w, quat_w = self._compute_view_world_poses(env_ids) # apply offsets # need to apply quat because offset relative to parent frame pos_w += math_utils.quat_apply(quat_w, self._offset_pos[env_ids]) quat_w = math_utils.quat_mul(quat_w, self._offset_quat[env_ids]) return pos_w, quat_w
18,416
Python
45.0425
120
0.627987
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/sensors/ray_caster/ray_caster_data.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from __future__ import annotations import torch from dataclasses import dataclass @dataclass class RayCasterData: """Data container for the ray-cast sensor.""" pos_w: torch.Tensor = None """Position of the sensor origin in world frame. Shape is (N, 3), where N is the number of sensors. """ quat_w: torch.Tensor = None """Orientation of the sensor origin in quaternion (w, x, y, z) in world frame. Shape is (N, 4), where N is the number of sensors. """ ray_hits_w: torch.Tensor = None """The ray hit positions in the world frame. Shape is (N, B, 3), where N is the number of sensors, B is the number of rays in the scan pattern per sensor. """
830
Python
24.968749
82
0.668675
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/sensors/ray_caster/__init__.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """Sub-module for Warp-based ray-cast sensor.""" from . import patterns from .ray_caster import RayCaster from .ray_caster_camera import RayCasterCamera from .ray_caster_camera_cfg import RayCasterCameraCfg from .ray_caster_cfg import RayCasterCfg from .ray_caster_data import RayCasterData
415
Python
28.714284
56
0.787952
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/sensors/ray_caster/ray_caster.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from __future__ import annotations import numpy as np import re import torch from collections.abc import Sequence from typing import TYPE_CHECKING, ClassVar import carb import omni.physics.tensors.impl.api as physx import warp as wp from omni.isaac.core.prims import XFormPrimView from pxr import UsdGeom, UsdPhysics import omni.isaac.orbit.sim as sim_utils from omni.isaac.orbit.markers import VisualizationMarkers from omni.isaac.orbit.terrains.trimesh.utils import make_plane from omni.isaac.orbit.utils.math import convert_quat, quat_apply, quat_apply_yaw from omni.isaac.orbit.utils.warp import convert_to_warp_mesh, raycast_mesh from ..sensor_base import SensorBase from .ray_caster_data import RayCasterData if TYPE_CHECKING: from .ray_caster_cfg import RayCasterCfg class RayCaster(SensorBase): """A ray-casting sensor. The ray-caster uses a set of rays to detect collisions with meshes in the scene. The rays are defined in the sensor's local coordinate frame. The sensor can be configured to ray-cast against a set of meshes with a given ray pattern. The meshes are parsed from the list of primitive paths provided in the configuration. These are then converted to warp meshes and stored in the `warp_meshes` list. The ray-caster then ray-casts against these warp meshes using the ray pattern provided in the configuration. .. note:: Currently, only static meshes are supported. Extending the warp mesh to support dynamic meshes is a work in progress. """ cfg: RayCasterCfg """The configuration parameters.""" meshes: ClassVar[dict[str, wp.Mesh]] = {} """The warp meshes available for raycasting. The keys correspond to the prim path for the meshes, and values are the corresponding warp Mesh objects. Note: We store a global dictionary of all warp meshes to prevent re-loading the mesh for different ray-cast sensor instances. """ def __init__(self, cfg: RayCasterCfg): """Initializes the ray-caster object. Args: cfg: The configuration parameters. """ # check if sensor path is valid # note: currently we do not handle environment indices if there is a regex pattern in the leaf # For example, if the prim path is "/World/Sensor_[1,2]". sensor_path = cfg.prim_path.split("/")[-1] sensor_path_is_regex = re.match(r"^[a-zA-Z0-9/_]+$", sensor_path) is None if sensor_path_is_regex: raise RuntimeError( f"Invalid prim path for the ray-caster sensor: {self.cfg.prim_path}." "\n\tHint: Please ensure that the prim path does not contain any regex patterns in the leaf." ) # Initialize base class super().__init__(cfg) # Create empty variables for storing output data self._data = RayCasterData() def __str__(self) -> str: """Returns: A string containing information about the instance.""" return ( f"Ray-caster @ '{self.cfg.prim_path}': \n" f"\tview type : {self._view.__class__}\n" f"\tupdate period (s) : {self.cfg.update_period}\n" f"\tnumber of meshes : {len(RayCaster.meshes)}\n" f"\tnumber of sensors : {self._view.count}\n" f"\tnumber of rays/sensor: {self.num_rays}\n" f"\ttotal number of rays : {self.num_rays * self._view.count}" ) """ Properties """ @property def num_instances(self) -> int: return self._view.count @property def data(self) -> RayCasterData: # update sensors if needed self._update_outdated_buffers() # return the data return self._data """ Operations. """ def reset(self, env_ids: Sequence[int] | None = None): # reset the timers and counters super().reset(env_ids) # resolve None if env_ids is None: env_ids = slice(None) # resample the drift self.drift[env_ids].uniform_(*self.cfg.drift_range) """ Implementation. """ def _initialize_impl(self): super()._initialize_impl() # create simulation view self._physics_sim_view = physx.create_simulation_view(self._backend) self._physics_sim_view.set_subspace_roots("/") # check if the prim at path is an articulated or rigid prim # we do this since for physics-based view classes we can access their data directly # otherwise we need to use the xform view class which is slower found_supported_prim_class = False prim = sim_utils.find_first_matching_prim(self.cfg.prim_path) if prim is None: raise RuntimeError(f"Failed to find a prim at path expression: {self.cfg.prim_path}") # create view based on the type of prim if prim.HasAPI(UsdPhysics.ArticulationRootAPI): self._view = self._physics_sim_view.create_articulation_view(self.cfg.prim_path.replace(".*", "*")) found_supported_prim_class = True elif prim.HasAPI(UsdPhysics.RigidBodyAPI): self._view = self._physics_sim_view.create_rigid_body_view(self.cfg.prim_path.replace(".*", "*")) found_supported_prim_class = True else: self._view = XFormPrimView(self.cfg.prim_path, reset_xform_properties=False) found_supported_prim_class = True carb.log_warn(f"The prim at path {prim.GetPath().pathString} is not a physics prim! Using XFormPrimView.") # check if prim view class is found if not found_supported_prim_class: raise RuntimeError(f"Failed to find a valid prim view class for the prim paths: {self.cfg.prim_path}") # load the meshes by parsing the stage self._initialize_warp_meshes() # initialize the ray start and directions self._initialize_rays_impl() def _initialize_warp_meshes(self): # check number of mesh prims provided if len(self.cfg.mesh_prim_paths) != 1: raise NotImplementedError( f"RayCaster currently only supports one mesh prim. Received: {len(self.cfg.mesh_prim_paths)}" ) # read prims to ray-cast for mesh_prim_path in self.cfg.mesh_prim_paths: # check if mesh already casted into warp mesh if mesh_prim_path in RayCaster.meshes: continue # check if the prim is a plane - handle PhysX plane as a special case # if a plane exists then we need to create an infinite mesh that is a plane mesh_prim = sim_utils.get_first_matching_child_prim( mesh_prim_path, lambda prim: prim.GetTypeName() == "Plane" ) # if we did not find a plane then we need to read the mesh if mesh_prim is None: # obtain the mesh prim mesh_prim = sim_utils.get_first_matching_child_prim( mesh_prim_path, lambda prim: prim.GetTypeName() == "Mesh" ) # check if valid if mesh_prim is None or not mesh_prim.IsValid(): raise RuntimeError(f"Invalid mesh prim path: {mesh_prim_path}") # cast into UsdGeomMesh mesh_prim = UsdGeom.Mesh(mesh_prim) # read the vertices and faces points = np.asarray(mesh_prim.GetPointsAttr().Get()) indices = np.asarray(mesh_prim.GetFaceVertexIndicesAttr().Get()) wp_mesh = convert_to_warp_mesh(points, indices, device=self.device) # print info carb.log_info( f"Read mesh prim: {mesh_prim.GetPath()} with {len(points)} vertices and {len(indices)} faces." ) else: mesh = make_plane(size=(2e6, 2e6), height=0.0, center_zero=True) wp_mesh = convert_to_warp_mesh(mesh.vertices, mesh.faces, device=self.device) # print info carb.log_info(f"Created infinite plane mesh prim: {mesh_prim.GetPath()}.") # add the warp mesh to the list RayCaster.meshes[mesh_prim_path] = wp_mesh # throw an error if no meshes are found if all([mesh_prim_path not in RayCaster.meshes for mesh_prim_path in self.cfg.mesh_prim_paths]): raise RuntimeError( f"No meshes found for ray-casting! Please check the mesh prim paths: {self.cfg.mesh_prim_paths}" ) def _initialize_rays_impl(self): # compute ray stars and directions self.ray_starts, self.ray_directions = self.cfg.pattern_cfg.func(self.cfg.pattern_cfg, self._device) self.num_rays = len(self.ray_directions) # apply offset transformation to the rays offset_pos = torch.tensor(list(self.cfg.offset.pos), device=self._device) offset_quat = torch.tensor(list(self.cfg.offset.rot), device=self._device) self.ray_directions = quat_apply(offset_quat.repeat(len(self.ray_directions), 1), self.ray_directions) self.ray_starts += offset_pos # repeat the rays for each sensor self.ray_starts = self.ray_starts.repeat(self._view.count, 1, 1) self.ray_directions = self.ray_directions.repeat(self._view.count, 1, 1) # prepare drift self.drift = torch.zeros(self._view.count, 3, device=self.device) # fill the data buffer self._data.pos_w = torch.zeros(self._view.count, 3, device=self._device) self._data.quat_w = torch.zeros(self._view.count, 4, device=self._device) self._data.ray_hits_w = torch.zeros(self._view.count, self.num_rays, 3, device=self._device) def _update_buffers_impl(self, env_ids: Sequence[int]): """Fills the buffers of the sensor data.""" # obtain the poses of the sensors if isinstance(self._view, XFormPrimView): pos_w, quat_w = self._view.get_world_poses(env_ids) elif isinstance(self._view, physx.ArticulationView): pos_w, quat_w = self._view.get_root_transforms()[env_ids].split([3, 4], dim=-1) quat_w = convert_quat(quat_w, to="wxyz") elif isinstance(self._view, physx.RigidBodyView): pos_w, quat_w = self._view.get_transforms()[env_ids].split([3, 4], dim=-1) quat_w = convert_quat(quat_w, to="wxyz") else: raise RuntimeError(f"Unsupported view type: {type(self._view)}") # note: we clone here because we are read-only operations pos_w = pos_w.clone() quat_w = quat_w.clone() # apply drift pos_w += self.drift[env_ids] # store the poses self._data.pos_w[env_ids] = pos_w self._data.quat_w[env_ids] = quat_w # ray cast based on the sensor poses if self.cfg.attach_yaw_only: # only yaw orientation is considered and directions are not rotated ray_starts_w = quat_apply_yaw(quat_w.repeat(1, self.num_rays), self.ray_starts[env_ids]) ray_starts_w += pos_w.unsqueeze(1) ray_directions_w = self.ray_directions[env_ids] else: # full orientation is considered ray_starts_w = quat_apply(quat_w.repeat(1, self.num_rays), self.ray_starts[env_ids]) ray_starts_w += pos_w.unsqueeze(1) ray_directions_w = quat_apply(quat_w.repeat(1, self.num_rays), self.ray_directions[env_ids]) # ray cast and store the hits # TODO: Make this work for multiple meshes? self._data.ray_hits_w[env_ids] = raycast_mesh( ray_starts_w, ray_directions_w, max_dist=self.cfg.max_distance, mesh=RayCaster.meshes[self.cfg.mesh_prim_paths[0]], )[0] def _set_debug_vis_impl(self, debug_vis: bool): # set visibility of markers # note: parent only deals with callbacks. not their visibility if debug_vis: if not hasattr(self, "ray_visualizer"): self.ray_visualizer = VisualizationMarkers(self.cfg.visualizer_cfg) # set their visibility to true self.ray_visualizer.set_visibility(True) else: if hasattr(self, "ray_visualizer"): self.ray_visualizer.set_visibility(False) def _debug_vis_callback(self, event): # show ray hit positions self.ray_visualizer.visualize(self._data.ray_hits_w.view(-1, 3)) """ Internal simulation callbacks. """ def _invalidate_initialize_callback(self, event): """Invalidates the scene elements.""" # call parent super()._invalidate_initialize_callback(event) # set all existing views to None to invalidate them self._physics_sim_view = None self._view = None
12,994
Python
42.902027
130
0.621133
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/sensors/ray_caster/ray_caster_camera_cfg.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """Configuration for the ray-cast sensor.""" from __future__ import annotations from typing import Literal from omni.isaac.orbit.utils import configclass from .ray_caster_camera import RayCasterCamera from .ray_caster_cfg import RayCasterCfg @configclass class RayCasterCameraCfg(RayCasterCfg): """Configuration for the ray-cast sensor.""" @configclass class OffsetCfg: """The offset pose of the sensor's frame from the sensor's parent frame.""" pos: tuple[float, float, float] = (0.0, 0.0, 0.0) """Translation w.r.t. the parent frame. Defaults to (0.0, 0.0, 0.0).""" rot: tuple[float, float, float, float] = (1.0, 0.0, 0.0, 0.0) """Quaternion rotation (w, x, y, z) w.r.t. the parent frame. Defaults to (1.0, 0.0, 0.0, 0.0).""" convention: Literal["opengl", "ros", "world"] = "ros" """The convention in which the frame offset is applied. Defaults to "ros". - ``"opengl"`` - forward axis: ``-Z`` - up axis: ``+Y`` - Offset is applied in the OpenGL (Usd.Camera) convention. - ``"ros"`` - forward axis: ``+Z`` - up axis: ``-Y`` - Offset is applied in the ROS convention. - ``"world"`` - forward axis: ``+X`` - up axis: ``+Z`` - Offset is applied in the World Frame convention. """ class_type: type = RayCasterCamera offset: OffsetCfg = OffsetCfg() """The offset pose of the sensor's frame from the sensor's parent frame. Defaults to identity.""" data_types: list[str] = ["distance_to_image_plane"] """List of sensor names/types to enable for the camera. Defaults to ["distance_to_image_plane"].""" def __post_init__(self): # for cameras, this quantity should be False always. self.attach_yaw_only = False
1,885
Python
35.26923
122
0.637135
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/sensors/ray_caster/ray_caster_cfg.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """Configuration for the ray-cast sensor.""" from __future__ import annotations from dataclasses import MISSING from omni.isaac.orbit.markers import VisualizationMarkersCfg from omni.isaac.orbit.markers.config import RAY_CASTER_MARKER_CFG from omni.isaac.orbit.utils import configclass from ..sensor_base_cfg import SensorBaseCfg from .patterns.patterns_cfg import PatternBaseCfg from .ray_caster import RayCaster @configclass class RayCasterCfg(SensorBaseCfg): """Configuration for the ray-cast sensor.""" @configclass class OffsetCfg: """The offset pose of the sensor's frame from the sensor's parent frame.""" pos: tuple[float, float, float] = (0.0, 0.0, 0.0) """Translation w.r.t. the parent frame. Defaults to (0.0, 0.0, 0.0).""" rot: tuple[float, float, float, float] = (1.0, 0.0, 0.0, 0.0) """Quaternion rotation (w, x, y, z) w.r.t. the parent frame. Defaults to (1.0, 0.0, 0.0, 0.0).""" class_type: type = RayCaster mesh_prim_paths: list[str] = MISSING """The list of mesh primitive paths to ray cast against. Note: Currently, only a single static mesh is supported. We are working on supporting multiple static meshes and dynamic meshes. """ offset: OffsetCfg = OffsetCfg() """The offset pose of the sensor's frame from the sensor's parent frame. Defaults to identity.""" attach_yaw_only: bool = MISSING """Whether the rays' starting positions and directions only track the yaw orientation. This is useful for ray-casting height maps, where only yaw rotation is needed. """ pattern_cfg: PatternBaseCfg = MISSING """The pattern that defines the local ray starting positions and directions.""" max_distance: float = 1e6 """Maximum distance (in meters) from the sensor to ray cast to. Defaults to 1e6.""" drift_range: tuple[float, float] = (0.0, 0.0) """The range of drift (in meters) to add to the ray starting positions (xyz). Defaults to (0.0, 0.0). For floating base robots, this is useful for simulating drift in the robot's pose estimation. """ visualizer_cfg: VisualizationMarkersCfg = RAY_CASTER_MARKER_CFG.replace(prim_path="/Visuals/RayCaster") """The configuration object for the visualization markers. Defaults to RAY_CASTER_MARKER_CFG. Note: This attribute is only used when debug visualization is enabled. """
2,541
Python
34.802816
107
0.696576
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/sensors/ray_caster/patterns/patterns.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from __future__ import annotations import torch from typing import TYPE_CHECKING if TYPE_CHECKING: from . import patterns_cfg def grid_pattern(cfg: patterns_cfg.GridPatternCfg, device: str) -> tuple[torch.Tensor, torch.Tensor]: """A regular grid pattern for ray casting. The grid pattern is made from rays that are parallel to each other. They span a 2D grid in the sensor's local coordinates from ``(-length/2, -width/2)`` to ``(length/2, width/2)``, which is defined by the ``size = (length, width)`` and ``resolution`` parameters in the config. Args: cfg: The configuration instance for the pattern. device: The device to create the pattern on. Returns: The starting positions and directions of the rays. Raises: ValueError: If the ordering is not "xy" or "yx". ValueError: If the resolution is less than or equal to 0. """ # check valid arguments if cfg.ordering not in ["xy", "yx"]: raise ValueError(f"Ordering must be 'xy' or 'yx'. Received: '{cfg.ordering}'.") if cfg.resolution <= 0: raise ValueError(f"Resolution must be greater than 0. Received: '{cfg.resolution}'.") # resolve mesh grid indexing (note: torch meshgrid is different from numpy meshgrid) # check: https://github.com/pytorch/pytorch/issues/15301 indexing = cfg.ordering if cfg.ordering == "xy" else "ij" # define grid pattern x = torch.arange(start=-cfg.size[0] / 2, end=cfg.size[0] / 2 + 1.0e-9, step=cfg.resolution, device=device) y = torch.arange(start=-cfg.size[1] / 2, end=cfg.size[1] / 2 + 1.0e-9, step=cfg.resolution, device=device) grid_x, grid_y = torch.meshgrid(x, y, indexing=indexing) # store into ray starts num_rays = grid_x.numel() ray_starts = torch.zeros(num_rays, 3, device=device) ray_starts[:, 0] = grid_x.flatten() ray_starts[:, 1] = grid_y.flatten() # define ray-cast directions ray_directions = torch.zeros_like(ray_starts) ray_directions[..., :] = torch.tensor(list(cfg.direction), device=device) return ray_starts, ray_directions def pinhole_camera_pattern( cfg: patterns_cfg.PinholeCameraPatternCfg, intrinsic_matrices: torch.Tensor, device: str ) -> tuple[torch.Tensor, torch.Tensor]: """The image pattern for ray casting. .. caution:: This function does not follow the standard pattern interface. It requires the intrinsic matrices of the cameras to be passed in. This is because we want to be able to randomize the intrinsic matrices of the cameras, which is not possible with the standard pattern interface. Args: cfg: The configuration instance for the pattern. intrinsic_matrices: The intrinsic matrices of the cameras. Shape is (N, 3, 3). device: The device to create the pattern on. Returns: The starting positions and directions of the rays. The shape of the tensors are (N, H * W, 3) and (N, H * W, 3) respectively. """ # get image plane mesh grid grid = torch.meshgrid( torch.arange(start=0, end=cfg.width, dtype=torch.int32, device=device), torch.arange(start=0, end=cfg.height, dtype=torch.int32, device=device), indexing="xy", ) pixels = torch.vstack(list(map(torch.ravel, grid))).T # convert to homogeneous coordinate system pixels = torch.hstack([pixels, torch.ones((len(pixels), 1), device=device)]) # get pixel coordinates in camera frame pix_in_cam_frame = torch.matmul(torch.inverse(intrinsic_matrices), pixels.T) # robotics camera frame is (x forward, y left, z up) from camera frame with (x right, y down, z forward) # transform to robotics camera frame transform_vec = torch.tensor([1, -1, -1], device=device).unsqueeze(0).unsqueeze(2) pix_in_cam_frame = pix_in_cam_frame[:, [2, 0, 1], :] * transform_vec # normalize ray directions ray_directions = (pix_in_cam_frame / torch.norm(pix_in_cam_frame, dim=1, keepdim=True)).permute(0, 2, 1) # for camera, we always ray-cast from the sensor's origin ray_starts = torch.zeros_like(ray_directions, device=device) return ray_starts, ray_directions def bpearl_pattern(cfg: patterns_cfg.BpearlPatternCfg, device: str) -> tuple[torch.Tensor, torch.Tensor]: """The RS-Bpearl pattern for ray casting. The `Robosense RS-Bpearl`_ is a short-range LiDAR that has a 360 degrees x 90 degrees super wide field of view. It is designed for near-field blind-spots detection. .. _Robosense RS-Bpearl: https://www.roscomponents.com/en/lidar-laser-scanner/267-rs-bpearl.html Args: cfg: The configuration instance for the pattern. device: The device to create the pattern on. Returns: The starting positions and directions of the rays. """ h = torch.arange(-cfg.horizontal_fov / 2, cfg.horizontal_fov / 2, cfg.horizontal_res, device=device) v = torch.tensor(list(cfg.vertical_ray_angles), device=device) pitch, yaw = torch.meshgrid(v, h, indexing="xy") pitch, yaw = torch.deg2rad(pitch.reshape(-1)), torch.deg2rad(yaw.reshape(-1)) pitch += torch.pi / 2 x = torch.sin(pitch) * torch.cos(yaw) y = torch.sin(pitch) * torch.sin(yaw) z = torch.cos(pitch) ray_directions = -torch.stack([x, y, z], dim=1) ray_starts = torch.zeros_like(ray_directions) return ray_starts, ray_directions
5,519
Python
41.137404
110
0.67784
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/sensors/ray_caster/patterns/__init__.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """Sub-module for ray-casting patterns used by the ray-caster.""" from .patterns import bpearl_pattern, grid_pattern, pinhole_camera_pattern from .patterns_cfg import BpearlPatternCfg, GridPatternCfg, PatternBaseCfg, PinholeCameraPatternCfg
365
Python
35.599996
99
0.791781
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/sensors/ray_caster/patterns/patterns_cfg.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """Configuration for the ray-cast sensor.""" from __future__ import annotations import torch from collections.abc import Callable, Sequence from dataclasses import MISSING from typing import Literal from omni.isaac.orbit.utils import configclass from . import patterns @configclass class PatternBaseCfg: """Base configuration for a pattern.""" func: Callable[[PatternBaseCfg, str], tuple[torch.Tensor, torch.Tensor]] = MISSING """Function to generate the pattern. The function should take in the configuration and the device name as arguments. It should return the pattern's starting positions and directions as a tuple of torch.Tensor. """ @configclass class GridPatternCfg(PatternBaseCfg): """Configuration for the grid pattern for ray-casting. Defines a 2D grid of rays in the coordinates of the sensor. .. attention:: The points are ordered based on the :attr:`ordering` attribute. """ func: Callable = patterns.grid_pattern resolution: float = MISSING """Grid resolution (in meters).""" size: tuple[float, float] = MISSING """Grid size (length, width) (in meters).""" direction: tuple[float, float, float] = (0.0, 0.0, -1.0) """Ray direction. Defaults to (0.0, 0.0, -1.0).""" ordering: Literal["xy", "yx"] = "xy" """Specifies the ordering of points in the generated grid. Defaults to ``"xy"``. Consider a grid pattern with points at :math:`(x, y)` where :math:`x` and :math:`y` are the grid indices. The ordering of the points can be specified as "xy" or "yx". This determines the outer and inner loop order when iterating over the grid points. * If *"xy"* is selected, the points are ordered with outer loop over "x" and inner loop over "y". * If *"yx"* is selected, the points are ordered with outer loop over "y" and inner loop over "x". For example, the grid pattern points with :math:`X = (0, 1, 2)` and :math:`Y = (3, 4)`: * *"xy"* ordering: :math:`[(0, 3), (0, 4), (1, 3), (1, 4), (2, 3), (2, 4)]` * *"yx"* ordering: :math:`[(0, 3), (1, 3), (2, 3), (1, 4), (2, 4), (2, 4)]` """ @configclass class PinholeCameraPatternCfg(PatternBaseCfg): """Configuration for a pinhole camera depth image pattern for ray-casting.""" func: Callable = patterns.pinhole_camera_pattern focal_length: float = 24.0 """Perspective focal length (in cm). Defaults to 24.0cm. Longer lens lengths narrower FOV, shorter lens lengths wider FOV. """ horizontal_aperture: float = 20.955 """Horizontal aperture (in mm). Defaults to 20.955mm. Emulates sensor/film width on a camera. Note: The default value is the horizontal aperture of a 35 mm spherical projector. """ horizontal_aperture_offset: float = 0.0 """Offsets Resolution/Film gate horizontally. Defaults to 0.0.""" vertical_aperture_offset: float = 0.0 """Offsets Resolution/Film gate vertically. Defaults to 0.0.""" width: int = MISSING """Width of the image (in pixels).""" height: int = MISSING """Height of the image (in pixels).""" @configclass class BpearlPatternCfg(PatternBaseCfg): """Configuration for the Bpearl pattern for ray-casting.""" func: Callable = patterns.bpearl_pattern horizontal_fov: float = 360.0 """Horizontal field of view (in degrees). Defaults to 360.0.""" horizontal_res: float = 10.0 """Horizontal resolution (in degrees). Defaults to 10.0.""" # fmt: off vertical_ray_angles: Sequence[float] = [ 89.5, 86.6875, 83.875, 81.0625, 78.25, 75.4375, 72.625, 69.8125, 67.0, 64.1875, 61.375, 58.5625, 55.75, 52.9375, 50.125, 47.3125, 44.5, 41.6875, 38.875, 36.0625, 33.25, 30.4375, 27.625, 24.8125, 22, 19.1875, 16.375, 13.5625, 10.75, 7.9375, 5.125, 2.3125 ] # fmt: on """Vertical ray angles (in degrees). Defaults to a list of 32 angles. Note: We manually set the vertical ray angles to match the Bpearl sensor. The ray-angles are not evenly spaced. """
4,172
Python
32.384
111
0.659396
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/sensors/frame_transformer/frame_transformer.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from __future__ import annotations import torch from collections.abc import Sequence from typing import TYPE_CHECKING import carb import omni.physics.tensors.impl.api as physx from pxr import UsdPhysics import omni.isaac.orbit.sim as sim_utils from omni.isaac.orbit.markers import VisualizationMarkers from omni.isaac.orbit.utils.math import ( combine_frame_transforms, convert_quat, is_identity_pose, subtract_frame_transforms, ) from ..sensor_base import SensorBase from .frame_transformer_data import FrameTransformerData if TYPE_CHECKING: from .frame_transformer_cfg import FrameTransformerCfg class FrameTransformer(SensorBase): """A sensor for reporting frame transforms. This class provides an interface for reporting the transform of one or more frames (target frames) with respect to another frame (source frame). The source frame is specified by the user as a prim path (:attr:`FrameTransformerCfg.prim_path`) and the target frames are specified by the user as a list of prim paths (:attr:`FrameTransformerCfg.target_frames`). The source frame and target frames are assumed to be rigid bodies. The transform of the target frames with respect to the source frame is computed by first extracting the transform of the source frame and target frames from the physics engine and then computing the relative transform between the two. Additionally, the user can specify an offset for the source frame and each target frame. This is useful for specifying the transform of the desired frame with respect to the body's center of mass, for instance. A common example of using this sensor is to track the position and orientation of the end effector of a robotic manipulator. In this case, the source frame would be the body corresponding to the base frame of the manipulator, and the target frame would be the body corresponding to the end effector. Since the end-effector is typically a fictitious body, the user may need to specify an offset from the end-effector to the body of the manipulator. .. note:: Currently, this implementation only handles frames within an articulation. This is because the frame regex expressions are resolved based on their parent prim path. This can be extended to handle frames outside of articulation by using the frame prim path instead. However, this would require additional checks to ensure that the user-specified frames are valid which is not currently implemented. .. warning:: The implementation assumes that the parent body of a target frame is not the same as that of the source frame (i.e. :attr:`FrameTransformerCfg.prim_path`). While a corner case, this can occur if the user specifies the same prim path for both the source frame and target frame. In this case, the target frame will be ignored and not reported. This is a limitation of the current implementation and will be fixed in a future release. """ cfg: FrameTransformerCfg """The configuration parameters.""" def __init__(self, cfg: FrameTransformerCfg): """Initializes the frame transformer object. Args: cfg: The configuration parameters. """ # initialize base class super().__init__(cfg) # Create empty variables for storing output data self._data: FrameTransformerData = FrameTransformerData() def __str__(self) -> str: """Returns: A string containing information about the instance.""" return ( f"FrameTransformer @ '{self.cfg.prim_path}': \n" f"\ttracked body frames: {[self._source_frame_body_name] + self._target_frame_body_names} \n" f"\tnumber of envs: {self._num_envs}\n" f"\tsource body frame: {self._source_frame_body_name}\n" f"\ttarget frames (count: {self._target_frame_names}): {len(self._target_frame_names)}\n" ) """ Properties """ @property def data(self) -> FrameTransformerData: # update sensors if needed self._update_outdated_buffers() # return the data return self._data """ Operations """ def reset(self, env_ids: Sequence[int] | None = None): # reset the timers and counters super().reset(env_ids) # resolve None if env_ids is None: env_ids = ... """ Implementation. """ def _initialize_impl(self): super()._initialize_impl() # resolve source frame offset source_frame_offset_pos = torch.tensor(self.cfg.source_frame_offset.pos, device=self.device) source_frame_offset_quat = torch.tensor(self.cfg.source_frame_offset.rot, device=self.device) # Only need to perform offsetting of source frame if the position offsets is non-zero and rotation offset is # not the identity quaternion for efficiency in _update_buffer_impl self._apply_source_frame_offset = True # Handle source frame offsets if is_identity_pose(source_frame_offset_pos, source_frame_offset_quat): carb.log_verbose(f"No offset application needed for source frame as it is identity: {self.cfg.prim_path}") self._apply_source_frame_offset = False else: carb.log_verbose(f"Applying offset to source frame as it is not identity: {self.cfg.prim_path}") # Store offsets as tensors (duplicating each env's offsets for ease of multiplication later) self._source_frame_offset_pos = source_frame_offset_pos.unsqueeze(0).repeat(self._num_envs, 1) self._source_frame_offset_quat = source_frame_offset_quat.unsqueeze(0).repeat(self._num_envs, 1) # Keep track of mapping from the rigid body name to the desired frame, as there may be multiple frames # based upon the same body name and we don't want to create unnecessary views body_names_to_frames: dict[str, set[str]] = {} # The offsets associated with each target frame target_offsets: dict[str, dict[str, torch.Tensor]] = {} # The frames whose offsets are not identity non_identity_offset_frames: list[str] = [] # Only need to perform offsetting of target frame if any of the position offsets are non-zero or any of the # rotation offsets are not the identity quaternion for efficiency in _update_buffer_impl self._apply_target_frame_offset = False # Collect all target frames, their associated body prim paths and their offsets so that we can extract # the prim, check that it has the appropriate rigid body API in a single loop. # First element is None because user can't specify source frame name frames = [None] + [target_frame.name for target_frame in self.cfg.target_frames] frame_prim_paths = [self.cfg.prim_path] + [target_frame.prim_path for target_frame in self.cfg.target_frames] # First element is None because source frame offset is handled separately frame_offsets = [None] + [target_frame.offset for target_frame in self.cfg.target_frames] for frame, prim_path, offset in zip(frames, frame_prim_paths, frame_offsets): # Find correct prim matching_prims = sim_utils.find_matching_prims(prim_path) if len(matching_prims) == 0: raise ValueError( f"Failed to create frame transformer for frame '{frame}' with path '{prim_path}'." " No matching prims were found." ) for prim in matching_prims: # Get the prim path of the matching prim matching_prim_path = prim.GetPath().pathString # Check if it is a rigid prim if not prim.HasAPI(UsdPhysics.RigidBodyAPI): raise ValueError( f"While resolving expression '{prim_path}' found a prim '{matching_prim_path}' which is not a" " rigid body. The class only supports transformations between rigid bodies." ) # Get the name of the body body_name = matching_prim_path.rsplit("/", 1)[-1] # Use body name if frame isn't specified by user frame_name = frame if frame is not None else body_name # Keep track of which frames are associated with which bodies if body_name in body_names_to_frames: body_names_to_frames[body_name].add(frame_name) else: body_names_to_frames[body_name] = {frame_name} if offset is not None: offset_pos = torch.tensor(offset.pos, device=self.device) offset_quat = torch.tensor(offset.rot, device=self.device) # Check if we need to apply offsets (optimized code path in _update_buffer_impl) if not is_identity_pose(offset_pos, offset_quat): non_identity_offset_frames.append(frame_name) self._apply_target_frame_offset = True target_offsets[frame_name] = {"pos": offset_pos, "quat": offset_quat} if not self._apply_target_frame_offset: carb.log_info( f"No offsets application needed from '{self.cfg.prim_path}' to target frames as all" f" are identity: {frames[1:]}" ) else: carb.log_info( f"Offsets application needed from '{self.cfg.prim_path}' to the following target frames:" f" {non_identity_offset_frames}" ) # The names of bodies that RigidPrimView will be tracking to later extract transforms from tracked_body_names = list(body_names_to_frames.keys()) # Construct regex expression for the body names body_names_regex = r"(" + "|".join(tracked_body_names) + r")" body_names_regex = f"{self.cfg.prim_path.rsplit('/', 1)[0]}/{body_names_regex}" # Create simulation view self._physics_sim_view = physx.create_simulation_view(self._backend) self._physics_sim_view.set_subspace_roots("/") # Create a prim view for all frames and initialize it # order of transforms coming out of view will be source frame followed by target frame(s) self._frame_physx_view = self._physics_sim_view.create_rigid_body_view(body_names_regex.replace(".*", "*")) # Determine the order in which regex evaluated body names so we can later index into frame transforms # by frame name correctly all_prim_paths = self._frame_physx_view.prim_paths # Only need first env as the names and their ordering are the same across environments first_env_prim_paths = all_prim_paths[0 : len(tracked_body_names)] first_env_body_names = [first_env_prim_path.split("/")[-1] for first_env_prim_path in first_env_prim_paths] # Re-parse the list as it may have moved when resolving regex above # -- source frame self._source_frame_body_name = self.cfg.prim_path.split("/")[-1] source_frame_index = first_env_body_names.index(self._source_frame_body_name) # -- target frames self._target_frame_body_names = first_env_body_names[:] self._target_frame_body_names.remove(self._source_frame_body_name) # Determine indices into all tracked body frames for both source and target frames all_ids = torch.arange(self._num_envs * len(tracked_body_names)) self._source_frame_body_ids = torch.arange(self._num_envs) * len(tracked_body_names) + source_frame_index self._target_frame_body_ids = all_ids[~torch.isin(all_ids, self._source_frame_body_ids)] # The name of each of the target frame(s) - either user specified or defaulted to the body name self._target_frame_names: list[str] = [] # The position and rotation components of target frame offsets target_frame_offset_pos = [] target_frame_offset_quat = [] # Stores the indices of bodies that need to be duplicated. For instance, if body "LF_SHANK" is needed # for 2 frames, this list enables us to duplicate the body to both frames when doing the calculations # when updating sensor in _update_buffers_impl duplicate_frame_indices = [] # Go through each body name and determine the number of duplicates we need for that frame # and extract the offsets. This is all done to handles the case where multiple frames # reference the same body, but have different names and/or offsets for i, body_name in enumerate(self._target_frame_body_names): for frame in body_names_to_frames[body_name]: target_frame_offset_pos.append(target_offsets[frame]["pos"]) target_frame_offset_quat.append(target_offsets[frame]["quat"]) self._target_frame_names.append(frame) duplicate_frame_indices.append(i) # To handle multiple environments, need to expand so [0, 1, 1, 2] with 2 environments becomes # [0, 1, 1, 2, 3, 4, 4, 5]. Again, this is a optimization to make _update_buffer_impl more efficient duplicate_frame_indices = torch.tensor(duplicate_frame_indices, device=self.device) num_target_body_frames = len(tracked_body_names) - 1 self._duplicate_frame_indices = torch.cat( [duplicate_frame_indices + num_target_body_frames * env_num for env_num in range(self._num_envs)] ) # Stack up all the frame offsets for shape (num_envs, num_frames, 3) and (num_envs, num_frames, 4) self._target_frame_offset_pos = torch.stack(target_frame_offset_pos).repeat(self._num_envs, 1) self._target_frame_offset_quat = torch.stack(target_frame_offset_quat).repeat(self._num_envs, 1) # fill the data buffer self._data.target_frame_names = self._target_frame_names self._data.source_pos_w = torch.zeros(self._num_envs, 3, device=self._device) self._data.source_quat_w = torch.zeros(self._num_envs, 4, device=self._device) self._data.target_pos_w = torch.zeros(self._num_envs, len(duplicate_frame_indices), 3, device=self._device) self._data.target_quat_w = torch.zeros(self._num_envs, len(duplicate_frame_indices), 4, device=self._device) self._data.target_pos_source = torch.zeros_like(self._data.target_pos_w) self._data.target_quat_source = torch.zeros_like(self._data.target_quat_w) def _update_buffers_impl(self, env_ids: Sequence[int]): """Fills the buffers of the sensor data.""" # default to all sensors if len(env_ids) == self._num_envs: env_ids = ... # Extract transforms from view - shape is: # (the total number of source and target body frames being tracked * self._num_envs, 7) transforms = self._frame_physx_view.get_transforms() # Convert quaternions as PhysX uses xyzw form transforms[:, 3:] = convert_quat(transforms[:, 3:], to="wxyz") # Process source frame transform source_frames = transforms[self._source_frame_body_ids] # Only apply offset if the offsets will result in a coordinate frame transform if self._apply_source_frame_offset: source_pos_w, source_quat_w = combine_frame_transforms( source_frames[:, :3], source_frames[:, 3:], self._source_frame_offset_pos, self._source_frame_offset_quat, ) else: source_pos_w = source_frames[:, :3] source_quat_w = source_frames[:, 3:] # Process target frame transforms target_frames = transforms[self._target_frame_body_ids] duplicated_target_frame_pos_w = target_frames[self._duplicate_frame_indices, :3] duplicated_target_frame_quat_w = target_frames[self._duplicate_frame_indices, 3:] # Only apply offset if the offsets will result in a coordinate frame transform if self._apply_target_frame_offset: target_pos_w, target_quat_w = combine_frame_transforms( duplicated_target_frame_pos_w, duplicated_target_frame_quat_w, self._target_frame_offset_pos, self._target_frame_offset_quat, ) else: target_pos_w = duplicated_target_frame_pos_w target_quat_w = duplicated_target_frame_quat_w # Compute the transform of the target frame with respect to the source frame total_num_frames = len(self._target_frame_names) target_pos_source, target_quat_source = subtract_frame_transforms( source_pos_w.unsqueeze(1).expand(-1, total_num_frames, -1).reshape(-1, 3), source_quat_w.unsqueeze(1).expand(-1, total_num_frames, -1).reshape(-1, 4), target_pos_w, target_quat_w, ) # Update buffers # note: The frame names / ordering don't change so no need to update them after initialization self._data.source_pos_w[:] = source_pos_w.view(-1, 3) self._data.source_quat_w[:] = source_quat_w.view(-1, 4) self._data.target_pos_w[:] = target_pos_w.view(-1, total_num_frames, 3) self._data.target_quat_w[:] = target_quat_w.view(-1, total_num_frames, 4) self._data.target_pos_source[:] = target_pos_source.view(-1, total_num_frames, 3) self._data.target_quat_source[:] = target_quat_source.view(-1, total_num_frames, 4) def _set_debug_vis_impl(self, debug_vis: bool): # set visibility of markers # note: parent only deals with callbacks. not their visibility if debug_vis: if not hasattr(self, "frame_visualizer"): self.frame_visualizer = VisualizationMarkers(self.cfg.visualizer_cfg) # set their visibility to true self.frame_visualizer.set_visibility(True) else: if hasattr(self, "frame_visualizer"): self.frame_visualizer.set_visibility(False) def _debug_vis_callback(self, event): # Update the visualized markers if self.frame_visualizer is not None: self.frame_visualizer.visualize(self._data.target_pos_w.view(-1, 3), self._data.target_quat_w.view(-1, 4)) """ Internal simulation callbacks. """ def _invalidate_initialize_callback(self, event): """Invalidates the scene elements.""" # call parent super()._invalidate_initialize_callback(event) # set all existing views to None to invalidate them self._physics_sim_view = None self._frame_physx_view = None
18,936
Python
50.181081
118
0.647972
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/sensors/frame_transformer/frame_transformer_cfg.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from __future__ import annotations from dataclasses import MISSING from omni.isaac.orbit.markers.config import FRAME_MARKER_CFG, VisualizationMarkersCfg from omni.isaac.orbit.utils import configclass from ..sensor_base_cfg import SensorBaseCfg from .frame_transformer import FrameTransformer @configclass class OffsetCfg: """The offset pose of one frame relative to another frame.""" pos: tuple[float, float, float] = (0.0, 0.0, 0.0) """Translation w.r.t. the parent frame. Defaults to (0.0, 0.0, 0.0).""" rot: tuple[float, float, float, float] = (1.0, 0.0, 0.0, 0.0) """Quaternion rotation (w, x, y, z) w.r.t. the parent frame. Defaults to (1.0, 0.0, 0.0, 0.0).""" @configclass class FrameTransformerCfg(SensorBaseCfg): """Configuration for the frame transformer sensor.""" @configclass class FrameCfg: """Information specific to a coordinate frame.""" prim_path: str = MISSING """The prim path corresponding to the parent rigid body. This prim should be part of the same articulation as :attr:`FrameTransformerCfg.prim_path`. """ name: str | None = None """User-defined name for the new coordinate frame. Defaults to None. If None, then the name is extracted from the leaf of the prim path. """ offset: OffsetCfg = OffsetCfg() """The pose offset from the parent prim frame.""" class_type: type = FrameTransformer prim_path: str = MISSING """The prim path of the body to transform from (source frame).""" source_frame_offset: OffsetCfg = OffsetCfg() """The pose offset from the source prim frame.""" target_frames: list[FrameCfg] = MISSING """A list of the target frames. This allows a single FrameTransformer to handle multiple target prims. For example, in a quadruped, we can use a single FrameTransformer to track each foot's position and orientation in the body frame using four frame offsets. """ visualizer_cfg: VisualizationMarkersCfg = FRAME_MARKER_CFG.replace(prim_path="/Visuals/FrameTransformer") """The configuration object for the visualization markers. Defaults to FRAME_MARKER_CFG. Note: This attribute is only used when debug visualization is enabled. """
2,405
Python
32.887323
109
0.689813
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/sensors/frame_transformer/__init__.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """Sub-module for frame transformer sensor.""" from .frame_transformer import FrameTransformer from .frame_transformer_cfg import FrameTransformerCfg, OffsetCfg from .frame_transformer_data import FrameTransformerData
342
Python
30.181815
65
0.80117
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/sensors/frame_transformer/frame_transformer_data.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from __future__ import annotations import torch import warnings from dataclasses import dataclass @dataclass class FrameTransformerData: """Data container for the frame transformer sensor.""" target_frame_names: list[str] = None """Target frame names (this denotes the order in which that frame data is ordered). The frame names are resolved from the :attr:`FrameTransformerCfg.FrameCfg.name` field. This usually follows the order in which the frames are defined in the config. However, in the case of regex matching, the order may be different. """ target_pos_source: torch.Tensor = None """Position of the target frame(s) relative to source frame. Shape is (N, M, 3), where N is the number of environments, and M is the number of target frames. """ target_quat_source: torch.Tensor = None """Orientation of the target frame(s) relative to source frame quaternion (w, x, y, z). Shape is (N, M, 4), where N is the number of environments, and M is the number of target frames. """ target_pos_w: torch.Tensor = None """Position of the target frame(s) after offset (in world frame). Shape is (N, M, 3), where N is the number of environments, and M is the number of target frames. """ target_quat_w: torch.Tensor = None """Orientation of the target frame(s) after offset (in world frame) quaternion (w, x, y, z). Shape is (N, M, 4), where N is the number of environments, and M is the number of target frames. """ source_pos_w: torch.Tensor = None """Position of the source frame after offset (in world frame). Shape is (N, 3), where N is the number of environments. """ source_quat_w: torch.Tensor = None """Orientation of the source frame after offset (in world frame) quaternion (w, x, y, z). Shape is (N, 4), where N is the number of environments. """ @property def target_rot_source(self) -> torch.Tensor: """Alias for :attr:`target_quat_source`. .. deprecated:: v0.2.1 Use :attr:`target_quat_source` instead. Will be removed in v0.3.0. """ warnings.warn("'target_rot_source' is deprecated, use 'target_quat_source' instead.", DeprecationWarning) return self.target_quat_source @property def target_rot_w(self) -> torch.Tensor: """Alias for :attr:`target_quat_w`. .. deprecated:: v0.2.1 Use :attr:`target_quat_w` instead. Will be removed in v0.3.0. """ warnings.warn("'target_rot_w' is deprecated, use 'target_quat_w' instead.", DeprecationWarning) return self.target_quat_w @property def source_rot_w(self) -> torch.Tensor: """Alias for :attr:`source_quat_w`. .. deprecated:: v0.2.1 Use :attr:`source_quat_w` instead. Will be removed in v0.3.0. """ warnings.warn("'source_rot_w' is deprecated, use 'source_quat_w' instead.", DeprecationWarning) return self.source_quat_w
3,123
Python
33.711111
113
0.654819
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/sensors/camera/camera.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from __future__ import annotations import math import numpy as np import re import torch from collections.abc import Sequence from tensordict import TensorDict from typing import TYPE_CHECKING, Any, Literal import omni.kit.commands import omni.usd from omni.isaac.core.prims import XFormPrimView from omni.syntheticdata.scripts.SyntheticData import SyntheticData from pxr import UsdGeom import omni.isaac.orbit.sim as sim_utils from omni.isaac.orbit.utils import to_camel_case from omni.isaac.orbit.utils.array import convert_to_torch from omni.isaac.orbit.utils.math import quat_from_matrix from ..sensor_base import SensorBase from .camera_data import CameraData from .utils import convert_orientation_convention, create_rotation_matrix_from_view if TYPE_CHECKING: from .camera_cfg import CameraCfg class Camera(SensorBase): r"""The camera sensor for acquiring visual data. This class wraps over the `UsdGeom Camera`_ for providing a consistent API for acquiring visual data. It ensures that the camera follows the ROS convention for the coordinate system. Summarizing from the `replicator extension`_, the following sensor types are supported: - ``"rgb"``: A rendered color image. - ``"distance_to_camera"``: An image containing the distance to camera optical center. - ``"distance_to_image_plane"``: An image containing distances of 3D points from camera plane along camera's z-axis. - ``"normals"``: An image containing the local surface normal vectors at each pixel. - ``"motion_vectors"``: An image containing the motion vector data at each pixel. - ``"semantic_segmentation"``: The semantic segmentation data. - ``"instance_segmentation_fast"``: The instance segmentation data. - ``"instance_id_segmentation_fast"``: The instance id segmentation data. .. note:: Currently the following sensor types are not supported in a "view" format: - ``"instance_segmentation"``: The instance segmentation data. Please use the fast counterparts instead. - ``"instance_id_segmentation"``: The instance id segmentation data. Please use the fast counterparts instead. - ``"bounding_box_2d_tight"``: The tight 2D bounding box data (only contains non-occluded regions). - ``"bounding_box_2d_tight_fast"``: The tight 2D bounding box data (only contains non-occluded regions). - ``"bounding_box_2d_loose"``: The loose 2D bounding box data (contains occluded regions). - ``"bounding_box_2d_loose_fast"``: The loose 2D bounding box data (contains occluded regions). - ``"bounding_box_3d"``: The 3D view space bounding box data. - ``"bounding_box_3d_fast"``: The 3D view space bounding box data. .. _replicator extension: https://docs.omniverse.nvidia.com/extensions/latest/ext_replicator/annotators_details.html#annotator-output .. _USDGeom Camera: https://graphics.pixar.com/usd/docs/api/class_usd_geom_camera.html """ cfg: CameraCfg """The configuration parameters.""" UNSUPPORTED_TYPES: set[str] = { "instance_id_segmentation", "instance_segmentation", "bounding_box_2d_tight", "bounding_box_2d_loose", "bounding_box_3d", "bounding_box_2d_tight_fast", "bounding_box_2d_loose_fast", "bounding_box_3d_fast", } """The set of sensor types that are not supported by the camera class.""" def __init__(self, cfg: CameraCfg): """Initializes the camera sensor. Args: cfg: The configuration parameters. Raises: RuntimeError: If no camera prim is found at the given path. ValueError: If the provided data types are not supported by the camera. """ # check if sensor path is valid # note: currently we do not handle environment indices if there is a regex pattern in the leaf # For example, if the prim path is "/World/Sensor_[1,2]". sensor_path = cfg.prim_path.split("/")[-1] sensor_path_is_regex = re.match(r"^[a-zA-Z0-9/_]+$", sensor_path) is None if sensor_path_is_regex: raise RuntimeError( f"Invalid prim path for the camera sensor: {self.cfg.prim_path}." "\n\tHint: Please ensure that the prim path does not contain any regex patterns in the leaf." ) # perform check on supported data types self._check_supported_data_types(cfg) # initialize base class super().__init__(cfg) # spawn the asset if self.cfg.spawn is not None: # compute the rotation offset rot = torch.tensor(self.cfg.offset.rot, dtype=torch.float32).unsqueeze(0) rot_offset = convert_orientation_convention(rot, origin=self.cfg.offset.convention, target="opengl") rot_offset = rot_offset.squeeze(0).numpy() # spawn the asset self.cfg.spawn.func( self.cfg.prim_path, self.cfg.spawn, translation=self.cfg.offset.pos, orientation=rot_offset ) # check that spawn was successful matching_prims = sim_utils.find_matching_prims(self.cfg.prim_path) if len(matching_prims) == 0: raise RuntimeError(f"Could not find prim with path {self.cfg.prim_path}.") # UsdGeom Camera prim for the sensor self._sensor_prims: list[UsdGeom.Camera] = list() # Create empty variables for storing output data self._data = CameraData() def __del__(self): """Unsubscribes from callbacks and detach from the replicator registry.""" # unsubscribe callbacks super().__del__() # delete from replicator registry for _, annotators in self._rep_registry.items(): for annotator, render_product_path in zip(annotators, self._render_product_paths): annotator.detach([render_product_path]) annotator = None def __str__(self) -> str: """Returns: A string containing information about the instance.""" # message for class return ( f"Camera @ '{self.cfg.prim_path}': \n" f"\tdata types : {self.data.output.sorted_keys} \n" f"\tsemantic filter : {self.cfg.semantic_filter}\n" f"\tcolorize semantic segm. : {self.cfg.colorize_semantic_segmentation}\n" f"\tcolorize instance segm. : {self.cfg.colorize_instance_segmentation}\n" f"\tcolorize instance id segm.: {self.cfg.colorize_instance_id_segmentation}\n" f"\tupdate period (s): {self.cfg.update_period}\n" f"\tshape : {self.image_shape}\n" f"\tnumber of sensors : {self._view.count}" ) """ Properties """ @property def num_instances(self) -> int: return self._view.count @property def data(self) -> CameraData: # update sensors if needed self._update_outdated_buffers() # return the data return self._data @property def frame(self) -> torch.tensor: """Frame number when the measurement took place.""" return self._frame @property def render_product_paths(self) -> list[str]: """The path of the render products for the cameras. This can be used via replicator interfaces to attach to writes or external annotator registry. """ return self._render_product_paths @property def image_shape(self) -> tuple[int, int]: """A tuple containing (height, width) of the camera sensor.""" return (self.cfg.height, self.cfg.width) """ Configuration """ def set_intrinsic_matrices( self, matrices: torch.Tensor, focal_length: float = 1.0, env_ids: Sequence[int] | None = None ): """Set parameters of the USD camera from its intrinsic matrix. The intrinsic matrix and focal length are used to set the following parameters to the USD camera: - ``focal_length``: The focal length of the camera. - ``horizontal_aperture``: The horizontal aperture of the camera. - ``vertical_aperture``: The vertical aperture of the camera. - ``horizontal_aperture_offset``: The horizontal offset of the camera. - ``vertical_aperture_offset``: The vertical offset of the camera. .. warning:: Due to limitations of Omniverse camera, we need to assume that the camera is a spherical lens, i.e. has square pixels, and the optical center is centered at the camera eye. If this assumption is not true in the input intrinsic matrix, then the camera will not set up correctly. Args: matrices: The intrinsic matrices for the camera. Shape is (N, 3, 3). focal_length: Focal length to use when computing aperture values. Defaults to 1.0. env_ids: A sensor ids to manipulate. Defaults to None, which means all sensor indices. """ # resolve env_ids if env_ids is None: env_ids = self._ALL_INDICES # iterate over env_ids for i, matrix in zip(env_ids, matrices): # convert to numpy for sanity intrinsic_matrix = np.asarray(matrix, dtype=float) # extract parameters from matrix f_x = intrinsic_matrix[0, 0] c_x = intrinsic_matrix[0, 2] f_y = intrinsic_matrix[1, 1] c_y = intrinsic_matrix[1, 2] # get viewport parameters height, width = self.image_shape height, width = float(height), float(width) # resolve parameters for usd camera params = { "focal_length": focal_length, "horizontal_aperture": width * focal_length / f_x, "vertical_aperture": height * focal_length / f_y, "horizontal_aperture_offset": (c_x - width / 2) / f_x, "vertical_aperture_offset": (c_y - height / 2) / f_y, } # change data for corresponding camera index sensor_prim = self._sensor_prims[i] # set parameters for camera for param_name, param_value in params.items(): # convert to camel case (CC) param_name = to_camel_case(param_name, to="CC") # get attribute from the class param_attr = getattr(sensor_prim, f"Get{param_name}Attr") # set value # note: We have to do it this way because the camera might be on a different # layer (default cameras are on session layer), and this is the simplest # way to set the property on the right layer. omni.usd.set_prop_val(param_attr(), param_value) """ Operations - Set pose. """ def set_world_poses( self, positions: torch.Tensor | None = None, orientations: torch.Tensor | None = None, env_ids: Sequence[int] | None = None, convention: Literal["opengl", "ros", "world"] = "ros", ): r"""Set the pose of the camera w.r.t. the world frame using specified convention. Since different fields use different conventions for camera orientations, the method allows users to set the camera poses in the specified convention. Possible conventions are: - :obj:`"opengl"` - forward axis: -Z - up axis +Y - Offset is applied in the OpenGL (Usd.Camera) convention - :obj:`"ros"` - forward axis: +Z - up axis -Y - Offset is applied in the ROS convention - :obj:`"world"` - forward axis: +X - up axis +Z - Offset is applied in the World Frame convention See :meth:`omni.isaac.orbit.sensors.camera.utils.convert_orientation_convention` for more details on the conventions. Args: positions: The cartesian coordinates (in meters). Shape is (N, 3). Defaults to None, in which case the camera position in not changed. orientations: The quaternion orientation in (w, x, y, z). Shape is (N, 4). Defaults to None, in which case the camera orientation in not changed. env_ids: A sensor ids to manipulate. Defaults to None, which means all sensor indices. convention: The convention in which the poses are fed. Defaults to "ros". Raises: RuntimeError: If the camera prim is not set. Need to call :meth:`initialize` method first. """ # resolve env_ids if env_ids is None: env_ids = self._ALL_INDICES # convert to backend tensor if positions is not None: if isinstance(positions, np.ndarray): positions = torch.from_numpy(positions).to(device=self._device) elif not isinstance(positions, torch.Tensor): positions = torch.tensor(positions, device=self._device) # convert rotation matrix from input convention to OpenGL if orientations is not None: if isinstance(orientations, np.ndarray): orientations = torch.from_numpy(orientations).to(device=self._device) elif not isinstance(orientations, torch.Tensor): orientations = torch.tensor(orientations, device=self._device) orientations = convert_orientation_convention(orientations, origin=convention, target="opengl") # set the pose self._view.set_world_poses(positions, orientations, env_ids) def set_world_poses_from_view( self, eyes: torch.Tensor, targets: torch.Tensor, env_ids: Sequence[int] | None = None ): """Set the poses of the camera from the eye position and look-at target position. Args: eyes: The positions of the camera's eye. Shape is (N, 3). targets: The target locations to look at. Shape is (N, 3). env_ids: A sensor ids to manipulate. Defaults to None, which means all sensor indices. Raises: RuntimeError: If the camera prim is not set. Need to call :meth:`initialize` method first. NotImplementedError: If the stage up-axis is not "Y" or "Z". """ # resolve env_ids if env_ids is None: env_ids = self._ALL_INDICES # set camera poses using the view orientations = quat_from_matrix(create_rotation_matrix_from_view(eyes, targets, device=self._device)) self._view.set_world_poses(eyes, orientations, env_ids) """ Operations """ def reset(self, env_ids: Sequence[int] | None = None): # reset the timestamps super().reset(env_ids) # resolve None # note: cannot do smart indexing here since we do a for loop over data. if env_ids is None: env_ids = self._ALL_INDICES # reset the data # note: this recomputation is useful if one performs events such as randomizations on the camera poses. self._update_poses(env_ids) self._update_intrinsic_matrices(env_ids) # Reset the frame count self._frame[env_ids] = 0 """ Implementation. """ def _initialize_impl(self): """Initializes the sensor handles and internal buffers. This function creates handles and registers the provided data types with the replicator registry to be able to access the data from the sensor. It also initializes the internal buffers to store the data. Raises: RuntimeError: If the number of camera prims in the view does not match the number of environments. """ import omni.replicator.core as rep # Initialize parent class super()._initialize_impl() # Create a view for the sensor self._view = XFormPrimView(self.cfg.prim_path, reset_xform_properties=False) self._view.initialize() # Check that sizes are correct if self._view.count != self._num_envs: raise RuntimeError( f"Number of camera prims in the view ({self._view.count}) does not match" f" the number of environments ({self._num_envs})." ) # Create all env_ids buffer self._ALL_INDICES = torch.arange(self._view.count, device=self._device, dtype=torch.long) # Create frame count buffer self._frame = torch.zeros(self._view.count, device=self._device, dtype=torch.long) # Attach the sensor data types to render node self._render_product_paths: list[str] = list() self._rep_registry: dict[str, list[rep.annotators.Annotator]] = {name: list() for name in self.cfg.data_types} # Obtain current stage stage = omni.usd.get_context().get_stage() # Convert all encapsulated prims to Camera for cam_prim_path in self._view.prim_paths: # Get camera prim cam_prim = stage.GetPrimAtPath(cam_prim_path) # Check if prim is a camera if not cam_prim.IsA(UsdGeom.Camera): raise RuntimeError(f"Prim at path '{cam_prim_path}' is not a Camera.") # Add to list sensor_prim = UsdGeom.Camera(cam_prim) self._sensor_prims.append(sensor_prim) # Get render product # From Isaac Sim 2023.1 onwards, render product is a HydraTexture so we need to extract the path render_prod_path = rep.create.render_product(cam_prim_path, resolution=(self.cfg.width, self.cfg.height)) if not isinstance(render_prod_path, str): render_prod_path = render_prod_path.path self._render_product_paths.append(render_prod_path) # Check if semantic types or semantic filter predicate is provided if isinstance(self.cfg.semantic_filter, list): semantic_filter_predicate = ":*; ".join(self.cfg.semantic_filter) + ":*" elif isinstance(self.cfg.semantic_filter, str): semantic_filter_predicate = self.cfg.semantic_filter else: raise ValueError(f"Semantic types must be a list or a string. Received: {self.cfg.semantic_filter}.") # set the semantic filter predicate # copied from rep.scripts.writes_default.basic_writer.py SyntheticData.Get().set_instance_mapping_semantic_filter(semantic_filter_predicate) # Iterate over each data type and create annotator # TODO: This will move out of the loop once Replicator supports multiple render products within a single # annotator, i.e.: rep_annotator.attach(self._render_product_paths) for name in self.cfg.data_types: # note: we are verbose here to make it easier to understand the code. # if colorize is true, the data is mapped to colors and a uint8 4 channel image is returned. # if colorize is false, the data is returned as a uint32 image with ids as values. if name == "semantic_segmentation": init_params = {"colorize": self.cfg.colorize_semantic_segmentation} elif name == "instance_segmentation_fast": init_params = {"colorize": self.cfg.colorize_instance_segmentation} elif name == "instance_id_segmentation_fast": init_params = {"colorize": self.cfg.colorize_instance_id_segmentation} else: init_params = None # Resolve device name if "cuda" in self._device: device_name = self._device.split(":")[0] else: device_name = "cpu" # create annotator node rep_annotator = rep.AnnotatorRegistry.get_annotator(name, init_params, device=device_name) rep_annotator.attach(render_prod_path) # add to registry self._rep_registry[name].append(rep_annotator) # Create internal buffers self._create_buffers() def _update_buffers_impl(self, env_ids: Sequence[int]): # Increment frame count self._frame[env_ids] += 1 # -- intrinsic matrix self._update_intrinsic_matrices(env_ids) # -- pose self._update_poses(env_ids) # -- read the data from annotator registry # check if buffer is called for the first time. If so then, allocate the memory if len(self._data.output.sorted_keys) == 0: # this is the first time buffer is called # it allocates memory for all the sensors self._create_annotator_data() else: # iterate over all the data types for name, annotators in self._rep_registry.items(): # iterate over all the annotators for index in env_ids: # get the output output = annotators[index].get_data() # process the output data, info = self._process_annotator_output(name, output) # add data to output self._data.output[name][index] = data # add info to output self._data.info[index][name] = info """ Private Helpers """ def _check_supported_data_types(self, cfg: CameraCfg): """Checks if the data types are supported by the ray-caster camera.""" # check if there is any intersection in unsupported types # reason: these use np structured data types which we can't yet convert to torch tensor common_elements = set(cfg.data_types) & Camera.UNSUPPORTED_TYPES if common_elements: # provide alternative fast counterparts fast_common_elements = [] for item in common_elements: if "instance_segmentation" in item or "instance_id_segmentation" in item: fast_common_elements.append(item + "_fast") # raise error raise ValueError( f"Camera class does not support the following sensor types: {common_elements}." "\n\tThis is because these sensor types output numpy structured data types which" "can't be converted to torch tensors easily." "\n\tHint: If you need to work with these sensor types, we recommend using their fast counterparts." f"\n\t\tFast counterparts: {fast_common_elements}" ) def _create_buffers(self): """Create buffers for storing data.""" # create the data object # -- pose of the cameras self._data.pos_w = torch.zeros((self._view.count, 3), device=self._device) self._data.quat_w_world = torch.zeros((self._view.count, 4), device=self._device) # -- intrinsic matrix self._data.intrinsic_matrices = torch.zeros((self._view.count, 3, 3), device=self._device) self._data.image_shape = self.image_shape # -- output data # lazy allocation of data dictionary # since the size of the output data is not known in advance, we leave it as None # the memory will be allocated when the buffer() function is called for the first time. self._data.output = TensorDict({}, batch_size=self._view.count, device=self.device) self._data.info = [{name: None for name in self.cfg.data_types} for _ in range(self._view.count)] def _update_intrinsic_matrices(self, env_ids: Sequence[int]): """Compute camera's matrix of intrinsic parameters. Also called calibration matrix. This matrix works for linear depth images. We assume square pixels. Note: The calibration matrix projects points in the 3D scene onto an imaginary screen of the camera. The coordinates of points on the image plane are in the homogeneous representation. """ # iterate over all cameras for i in env_ids: # Get corresponding sensor prim sensor_prim = self._sensor_prims[i] # get camera parameters focal_length = sensor_prim.GetFocalLengthAttr().Get() horiz_aperture = sensor_prim.GetHorizontalApertureAttr().Get() # get viewport parameters height, width = self.image_shape # calculate the field of view fov = 2 * math.atan(horiz_aperture / (2 * focal_length)) # calculate the focal length in pixels focal_px = width * 0.5 / math.tan(fov / 2) # create intrinsic matrix for depth linear self._data.intrinsic_matrices[i, 0, 0] = focal_px self._data.intrinsic_matrices[i, 0, 2] = width * 0.5 self._data.intrinsic_matrices[i, 1, 1] = focal_px self._data.intrinsic_matrices[i, 1, 2] = height * 0.5 self._data.intrinsic_matrices[i, 2, 2] = 1 def _update_poses(self, env_ids: Sequence[int]): """Computes the pose of the camera in the world frame with ROS convention. This methods uses the ROS convention to resolve the input pose. In this convention, we assume that the camera front-axis is +Z-axis and up-axis is -Y-axis. Returns: A tuple of the position (in meters) and quaternion (w, x, y, z). """ # check camera prim exists if len(self._sensor_prims) == 0: raise RuntimeError("Camera prim is None. Please call 'sim.play()' first.") # get the poses from the view poses, quat = self._view.get_world_poses(env_ids) self._data.pos_w[env_ids] = poses self._data.quat_w_world[env_ids] = convert_orientation_convention(quat, origin="opengl", target="world") def _create_annotator_data(self): """Create the buffers to store the annotator data. We create a buffer for each annotator and store the data in a dictionary. Since the data shape is not known beforehand, we create a list of buffers and concatenate them later. This is an expensive operation and should be called only once. """ # add data from the annotators for name, annotators in self._rep_registry.items(): # create a list to store the data for each annotator data_all_cameras = list() # iterate over all the annotators for index in self._ALL_INDICES: # get the output output = annotators[index].get_data() # process the output data, info = self._process_annotator_output(name, output) # append the data data_all_cameras.append(data) # store the info self._data.info[index][name] = info # concatenate the data along the batch dimension self._data.output[name] = torch.stack(data_all_cameras, dim=0) def _process_annotator_output(self, name: str, output: Any) -> tuple[torch.tensor, dict | None]: """Process the annotator output. This function is called after the data has been collected from all the cameras. """ # extract info and data from the output if isinstance(output, dict): data = output["data"] info = output["info"] else: data = output info = None # convert data into torch tensor data = convert_to_torch(data, device=self.device) # process data for different segmentation types # Note: Replicator returns raw buffers of dtype int32 for segmentation types # so we need to convert them to uint8 4 channel images for colorized types height, width = self.image_shape if name == "semantic_segmentation": if self.cfg.colorize_semantic_segmentation: data = data.view(torch.uint8).reshape(height, width, -1) else: data = data.view(height, width) elif name == "instance_segmentation_fast": if self.cfg.colorize_instance_segmentation: data = data.view(torch.uint8).reshape(height, width, -1) else: data = data.view(height, width) elif name == "instance_id_segmentation_fast": if self.cfg.colorize_instance_id_segmentation: data = data.view(torch.uint8).reshape(height, width, -1) else: data = data.view(height, width) # return the data and info return data, info """ Internal simulation callbacks. """ def _invalidate_initialize_callback(self, event): """Invalidates the scene elements.""" # call parent super()._invalidate_initialize_callback(event) # set all existing views to None to invalidate them self._view = None
28,980
Python
45.074722
137
0.616874
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/sensors/camera/camera_data.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from __future__ import annotations import torch from dataclasses import dataclass from tensordict import TensorDict from typing import Any from .utils import convert_orientation_convention @dataclass class CameraData: """Data container for the camera sensor.""" ## # Frame state. ## pos_w: torch.Tensor = None """Position of the sensor origin in world frame, following ROS convention. Shape is (N, 3) where N is the number of sensors. """ quat_w_world: torch.Tensor = None """Quaternion orientation `(w, x, y, z)` of the sensor origin in world frame, following the world coordinate frame .. note:: World frame convention follows the camera aligned with forward axis +X and up axis +Z. Shape is (N, 4) where N is the number of sensors. """ ## # Camera data ## image_shape: tuple[int, int] = None """A tuple containing (height, width) of the camera sensor.""" intrinsic_matrices: torch.Tensor = None """The intrinsic matrices for the camera. Shape is (N, 3, 3) where N is the number of sensors. """ output: TensorDict = None """The retrieved sensor data with sensor types as key. The format of the data is available in the `Replicator Documentation`_. For semantic-based data, this corresponds to the ``"data"`` key in the output of the sensor. .. _Replicator Documentation: https://docs.omniverse.nvidia.com/prod_extensions/prod_extensions/ext_replicator/annotators_details.html#annotator-output """ info: list[dict[str, Any]] = None """The retrieved sensor info with sensor types as key. This contains extra information provided by the sensor such as semantic segmentation label mapping, prim paths. For semantic-based data, this corresponds to the ``"info"`` key in the output of the sensor. For other sensor types, the info is empty. """ ## # Additional Frame orientation conventions ## @property def quat_w_ros(self) -> torch.Tensor: """Quaternion orientation `(w, x, y, z)` of the sensor origin in the world frame, following ROS convention. .. note:: ROS convention follows the camera aligned with forward axis +Z and up axis -Y. Shape is (N, 4) where N is the number of sensors. """ return convert_orientation_convention(self.quat_w_world, origin="world", target="ros") @property def quat_w_opengl(self) -> torch.Tensor: """Quaternion orientation `(w, x, y, z)` of the sensor origin in the world frame, following Opengl / USD Camera convention. .. note:: OpenGL convention follows the camera aligned with forward axis -Z and up axis +Y. Shape is (N, 4) where N is the number of sensors. """ return convert_orientation_convention(self.quat_w_world, origin="world", target="opengl")
3,019
Python
30.789473
155
0.670421
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/sensors/camera/__init__.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """Sub-module for camera wrapper around USD camera prim.""" from .camera import Camera from .camera_cfg import CameraCfg from .camera_data import CameraData from .utils import * # noqa: F401, F403
322
Python
25.916665
59
0.751553
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/sensors/camera/utils.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """Helper functions to project between pointcloud and depth images.""" from __future__ import annotations import math import numpy as np import torch import torch.nn.functional as F from collections.abc import Sequence from typing import Literal import omni.isaac.core.utils.stage as stage_utils import warp as wp from pxr import UsdGeom import omni.isaac.orbit.utils.math as math_utils from omni.isaac.orbit.utils.array import TensorData, convert_to_torch """ Depth <-> Pointcloud conversions. """ def transform_points( points: TensorData, position: Sequence[float] | None = None, orientation: Sequence[float] | None = None, device: torch.device | str | None = None, ) -> np.ndarray | torch.Tensor: r"""Transform input points in a given frame to a target frame. This function transform points from a source frame to a target frame. The transformation is defined by the position ``t`` and orientation ``R`` of the target frame in the source frame. .. math:: p_{target} = R_{target} \times p_{source} + t_{target} If either the inputs `position` and `orientation` are None, the corresponding transformation is not applied. Args: points: a tensor of shape (p, 3) or (n, p, 3) comprising of 3d points in source frame. position: The position of source frame in target frame. Defaults to None. orientation: The orientation (w, x, y, z) of source frame in target frame. Defaults to None. device: The device for torch where the computation should be executed. Defaults to None, i.e. takes the device that matches the depth image. Returns: A tensor of shape (N, 3) comprising of 3D points in target frame. If the input is a numpy array, the output is a numpy array. Otherwise, it is a torch tensor. """ # check if numpy is_numpy = isinstance(points, np.ndarray) # decide device if device is None and is_numpy: device = torch.device("cpu") # convert to torch points = convert_to_torch(points, dtype=torch.float32, device=device) # update the device with the device of the depth image # note: this is needed since warp does not provide the device directly device = points.device # apply rotation if orientation is not None: orientation = convert_to_torch(orientation, dtype=torch.float32, device=device) # apply translation if position is not None: position = convert_to_torch(position, dtype=torch.float32, device=device) # apply transformation points = math_utils.transform_points(points, position, orientation) # return everything according to input type if is_numpy: return points.detach().cpu().numpy() else: return points def create_pointcloud_from_depth( intrinsic_matrix: np.ndarray | torch.Tensor | wp.array, depth: np.ndarray | torch.Tensor | wp.array, keep_invalid: bool = False, position: Sequence[float] | None = None, orientation: Sequence[float] | None = None, device: torch.device | str | None = None, ) -> np.ndarray | torch.Tensor: r"""Creates pointcloud from input depth image and camera intrinsic matrix. This function creates a pointcloud from a depth image and camera intrinsic matrix. The pointcloud is computed using the following equation: .. math:: p_{camera} = K^{-1} \times [u, v, 1]^T \times d where :math:`K` is the camera intrinsic matrix, :math:`u` and :math:`v` are the pixel coordinates and :math:`d` is the depth value at the pixel. Additionally, the pointcloud can be transformed from the camera frame to a target frame by providing the position ``t`` and orientation ``R`` of the camera in the target frame: .. math:: p_{target} = R_{target} \times p_{camera} + t_{target} Args: intrinsic_matrix: A (3, 3) array providing camera's calibration matrix. depth: An array of shape (H, W) with values encoding the depth measurement. keep_invalid: Whether to keep invalid points in the cloud or not. Invalid points correspond to pixels with depth values 0.0 or NaN. Defaults to False. position: The position of the camera in a target frame. Defaults to None. orientation: The orientation (w, x, y, z) of the camera in a target frame. Defaults to None. device: The device for torch where the computation should be executed. Defaults to None, i.e. takes the device that matches the depth image. Returns: An array/tensor of shape (N, 3) comprising of 3D coordinates of points. The returned datatype is torch if input depth is of type torch.tensor or wp.array. Otherwise, a np.ndarray is returned. """ # We use PyTorch here for matrix multiplication since it is compiled with Intel MKL while numpy # by default uses OpenBLAS. With PyTorch (CPU), we could process a depth image of size (480, 640) # in 0.0051 secs, while with numpy it took 0.0292 secs. # convert to numpy matrix is_numpy = isinstance(depth, np.ndarray) # decide device if device is None and is_numpy: device = torch.device("cpu") # convert depth to torch tensor depth = convert_to_torch(depth, dtype=torch.float32, device=device) # update the device with the device of the depth image # note: this is needed since warp does not provide the device directly device = depth.device # convert inputs to torch tensors intrinsic_matrix = convert_to_torch(intrinsic_matrix, dtype=torch.float32, device=device) if position is not None: position = convert_to_torch(position, dtype=torch.float32, device=device) if orientation is not None: orientation = convert_to_torch(orientation, dtype=torch.float32, device=device) # compute pointcloud depth_cloud = math_utils.unproject_depth(depth, intrinsic_matrix) # convert 3D points to world frame depth_cloud = math_utils.transform_points(depth_cloud, position, orientation) # keep only valid entries if flag is set if not keep_invalid: pts_idx_to_keep = torch.all(torch.logical_and(~torch.isnan(depth_cloud), ~torch.isinf(depth_cloud)), dim=1) depth_cloud = depth_cloud[pts_idx_to_keep, ...] # return everything according to input type if is_numpy: return depth_cloud.detach().cpu().numpy() else: return depth_cloud def create_pointcloud_from_rgbd( intrinsic_matrix: torch.Tensor | np.ndarray | wp.array, depth: torch.Tensor | np.ndarray | wp.array, rgb: torch.Tensor | wp.array | np.ndarray | tuple[float, float, float] = None, normalize_rgb: bool = False, position: Sequence[float] | None = None, orientation: Sequence[float] | None = None, device: torch.device | str | None = None, num_channels: int = 3, ) -> tuple[torch.Tensor, torch.Tensor] | tuple[np.ndarray, np.ndarray]: """Creates pointcloud from input depth image and camera transformation matrix. This function provides the same functionality as :meth:`create_pointcloud_from_depth` but also allows to provide the RGB values for each point. The ``rgb`` attribute is used to resolve the corresponding point's color: - If a ``np.array``/``wp.array``/``torch.tensor`` of shape (H, W, 3), then the corresponding channels encode RGB values. - If a tuple, then the point cloud has a single color specified by the values (r, g, b). - If None, then default color is white, i.e. (0, 0, 0). If the input ``normalize_rgb`` is set to :obj:`True`, then the RGB values are normalized to be in the range [0, 1]. Args: intrinsic_matrix: A (3, 3) array/tensor providing camera's calibration matrix. depth: An array/tensor of shape (H, W) with values encoding the depth measurement. rgb: Color for generated point cloud. Defaults to None. normalize_rgb: Whether to normalize input rgb. Defaults to False. position: The position of the camera in a target frame. Defaults to None. orientation: The orientation `(w, x, y, z)` of the camera in a target frame. Defaults to None. device: The device for torch where the computation should be executed. Defaults to None, in which case it takes the device that matches the depth image. num_channels: Number of channels in RGB pointcloud. Defaults to 3. Returns: A tuple of (N, 3) arrays or tensors containing the 3D coordinates of points and their RGB color respectively. The returned datatype is torch if input depth is of type torch.tensor or wp.array. Otherwise, a np.ndarray is returned. Raises: ValueError: When rgb image is a numpy array but not of shape (H, W, 3) or (H, W, 4). """ # check valid inputs if rgb is not None and not isinstance(rgb, tuple): if len(rgb.shape) == 3: if rgb.shape[2] not in [3, 4]: raise ValueError(f"Input rgb image of invalid shape: {rgb.shape} != (H, W, 3) or (H, W, 4).") else: raise ValueError(f"Input rgb image not three-dimensional. Received shape: {rgb.shape}.") if num_channels not in [3, 4]: raise ValueError(f"Invalid number of channels: {num_channels} != 3 or 4.") # check if input depth is numpy array is_numpy = isinstance(depth, np.ndarray) # decide device if device is None and is_numpy: device = torch.device("cpu") # convert depth to torch tensor if is_numpy: depth = torch.from_numpy(depth).to(device=device) # retrieve XYZ pointcloud points_xyz = create_pointcloud_from_depth(intrinsic_matrix, depth, True, position, orientation, device=device) # get image height and width im_height, im_width = depth.shape[:2] # total number of points num_points = im_height * im_width # extract color value if rgb is not None: if isinstance(rgb, (np.ndarray, torch.Tensor, wp.array)): # copy numpy array to preserve rgb = convert_to_torch(rgb, device=device, dtype=torch.float32) rgb = rgb[:, :, :3] # convert the matrix to (W, H, 3) from (H, W, 3) since depth processing # is done in the order (u, v) where u: (0, W-1) and v: (0 - H-1) points_rgb = rgb.permute(1, 0, 2).reshape(-1, 3) elif isinstance(rgb, (tuple, list)): # same color for all points points_rgb = torch.Tensor((rgb,) * num_points, device=device, dtype=torch.uint8) else: # default color is white points_rgb = torch.Tensor(((0, 0, 0),) * num_points, device=device, dtype=torch.uint8) else: points_rgb = torch.Tensor(((0, 0, 0),) * num_points, device=device, dtype=torch.uint8) # normalize color values if normalize_rgb: points_rgb = points_rgb.float() / 255 # remove invalid points pts_idx_to_keep = torch.all(torch.logical_and(~torch.isnan(points_xyz), ~torch.isinf(points_xyz)), dim=1) points_rgb = points_rgb[pts_idx_to_keep, ...] points_xyz = points_xyz[pts_idx_to_keep, ...] # add additional channels if required if num_channels == 4: points_rgb = torch.nn.functional.pad(points_rgb, (0, 1), mode="constant", value=1.0) # return everything according to input type if is_numpy: return points_xyz.cpu().numpy(), points_rgb.cpu().numpy() else: return points_xyz, points_rgb def convert_orientation_convention( orientation: torch.Tensor, origin: Literal["opengl", "ros", "world"] = "opengl", target: Literal["opengl", "ros", "world"] = "ros", ) -> torch.Tensor: r"""Converts a quaternion representing a rotation from one convention to another. In USD, the camera follows the ``"opengl"`` convention. Thus, it is always in **Y up** convention. This means that the camera is looking down the -Z axis with the +Y axis pointing up , and +X axis pointing right. However, in ROS, the camera is looking down the +Z axis with the +Y axis pointing down, and +X axis pointing right. Thus, the camera needs to be rotated by :math:`180^{\circ}` around the X axis to follow the ROS convention. .. math:: T_{ROS} = \begin{bmatrix} 1 & 0 & 0 & 0 \\ 0 & -1 & 0 & 0 \\ 0 & 0 & -1 & 0 \\ 0 & 0 & 0 & 1 \end{bmatrix} T_{USD} On the other hand, the typical world coordinate system is with +X pointing forward, +Y pointing left, and +Z pointing up. The camera can also be set in this convention by rotating the camera by :math:`90^{\circ}` around the X axis and :math:`-90^{\circ}` around the Y axis. .. math:: T_{WORLD} = \begin{bmatrix} 0 & 0 & -1 & 0 \\ -1 & 0 & 0 & 0 \\ 0 & 1 & 0 & 0 \\ 0 & 0 & 0 & 1 \end{bmatrix} T_{USD} Thus, based on their application, cameras follow different conventions for their orientation. This function converts a quaternion from one convention to another. Possible conventions are: - :obj:`"opengl"` - forward axis: -Z - up axis +Y - Offset is applied in the OpenGL (Usd.Camera) convention - :obj:`"ros"` - forward axis: +Z - up axis -Y - Offset is applied in the ROS convention - :obj:`"world"` - forward axis: +X - up axis +Z - Offset is applied in the World Frame convention Args: orientation: Quaternion of form `(w, x, y, z)` with shape (..., 4) in source convention origin: Convention to convert to. Defaults to "ros". target: Convention to convert from. Defaults to "opengl". Returns: Quaternion of form `(w, x, y, z)` with shape (..., 4) in target convention """ if target == origin: return orientation.clone() # -- unify input type if origin == "ros": # convert from ros to opengl convention rotm = math_utils.matrix_from_quat(orientation) rotm[:, :, 2] = -rotm[:, :, 2] rotm[:, :, 1] = -rotm[:, :, 1] # convert to opengl convention quat_gl = math_utils.quat_from_matrix(rotm) elif origin == "world": # convert from world (x forward and z up) to opengl convention rotm = math_utils.matrix_from_quat(orientation) rotm = torch.matmul( rotm, math_utils.matrix_from_euler( torch.tensor([math.pi / 2, -math.pi / 2, 0], device=orientation.device), "XYZ" ), ) # convert to isaac-sim convention quat_gl = math_utils.quat_from_matrix(rotm) else: quat_gl = orientation # -- convert to target convention if target == "ros": # convert from opengl to ros convention rotm = math_utils.matrix_from_quat(quat_gl) rotm[:, :, 2] = -rotm[:, :, 2] rotm[:, :, 1] = -rotm[:, :, 1] return math_utils.quat_from_matrix(rotm) elif target == "world": # convert from opengl to world (x forward and z up) convention rotm = math_utils.matrix_from_quat(quat_gl) rotm = torch.matmul( rotm, math_utils.matrix_from_euler( torch.tensor([math.pi / 2, -math.pi / 2, 0], device=orientation.device), "XYZ" ).T, ) return math_utils.quat_from_matrix(rotm) else: return quat_gl.clone() # @torch.jit.script def create_rotation_matrix_from_view( eyes: torch.Tensor, targets: torch.Tensor, device: str = "cpu", ) -> torch.Tensor: """ This function takes a vector ''eyes'' which specifies the location of the camera in world coordinates and the vector ''targets'' which indicate the position of the object. The output is a rotation matrix representing the transformation from world coordinates -> view coordinates. The inputs camera_position and targets can each be a - 3 element tuple/list - torch tensor of shape (1, 3) - torch tensor of shape (N, 3) Args: eyes: position of the camera in world coordinates targets: position of the object in world coordinates The vectors are broadcast against each other so they all have shape (N, 3). Returns: R: (N, 3, 3) batched rotation matrices Reference: Based on PyTorch3D (https://github.com/facebookresearch/pytorch3d/blob/eaf0709d6af0025fe94d1ee7cec454bc3054826a/pytorch3d/renderer/cameras.py#L1635-L1685) """ up_axis_token = stage_utils.get_stage_up_axis() if up_axis_token == UsdGeom.Tokens.y: up_axis = torch.tensor((0, 1, 0), device=device, dtype=torch.float32).repeat(eyes.shape[0], 1) elif up_axis_token == UsdGeom.Tokens.z: up_axis = torch.tensor((0, 0, 1), device=device, dtype=torch.float32).repeat(eyes.shape[0], 1) else: raise ValueError(f"Invalid up axis: {up_axis_token}") # get rotation matrix in opengl format (-Z forward, +Y up) z_axis = -F.normalize(targets - eyes, eps=1e-5) x_axis = F.normalize(torch.cross(up_axis, z_axis, dim=1), eps=1e-5) y_axis = F.normalize(torch.cross(z_axis, x_axis, dim=1), eps=1e-5) is_close = torch.isclose(x_axis, torch.tensor(0.0), atol=5e-3).all(dim=1, keepdim=True) if is_close.any(): replacement = F.normalize(torch.cross(y_axis, z_axis, dim=1), eps=1e-5) x_axis = torch.where(is_close, replacement, x_axis) R = torch.cat((x_axis[:, None, :], y_axis[:, None, :], z_axis[:, None, :]), dim=1) return R.transpose(1, 2)
17,516
Python
42.902256
158
0.653574
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/sensors/camera/camera_cfg.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from __future__ import annotations from dataclasses import MISSING from typing import Literal from omni.isaac.orbit.sim import FisheyeCameraCfg, PinholeCameraCfg from omni.isaac.orbit.utils import configclass from ..sensor_base_cfg import SensorBaseCfg from .camera import Camera @configclass class CameraCfg(SensorBaseCfg): """Configuration for a camera sensor.""" @configclass class OffsetCfg: """The offset pose of the sensor's frame from the sensor's parent frame.""" pos: tuple[float, float, float] = (0.0, 0.0, 0.0) """Translation w.r.t. the parent frame. Defaults to (0.0, 0.0, 0.0).""" rot: tuple[float, float, float, float] = (1.0, 0.0, 0.0, 0.0) """Quaternion rotation (w, x, y, z) w.r.t. the parent frame. Defaults to (1.0, 0.0, 0.0, 0.0).""" convention: Literal["opengl", "ros", "world"] = "ros" """The convention in which the frame offset is applied. Defaults to "ros". - ``"opengl"`` - forward axis: ``-Z`` - up axis: ``+Y`` - Offset is applied in the OpenGL (Usd.Camera) convention. - ``"ros"`` - forward axis: ``+Z`` - up axis: ``-Y`` - Offset is applied in the ROS convention. - ``"world"`` - forward axis: ``+X`` - up axis: ``+Z`` - Offset is applied in the World Frame convention. """ class_type: type = Camera offset: OffsetCfg = OffsetCfg() """The offset pose of the sensor's frame from the sensor's parent frame. Defaults to identity. Note: The parent frame is the frame the sensor attaches to. For example, the parent frame of a camera at path ``/World/envs/env_0/Robot/Camera`` is ``/World/envs/env_0/Robot``. """ spawn: PinholeCameraCfg | FisheyeCameraCfg | None = MISSING """Spawn configuration for the asset. If None, then the prim is not spawned by the asset. Instead, it is assumed that the asset is already present in the scene. """ data_types: list[str] = ["rgb"] """List of sensor names/types to enable for the camera. Defaults to ["rgb"]. Please refer to the :class:`Camera` class for a list of available data types. """ width: int = MISSING """Width of the image in pixels.""" height: int = MISSING """Height of the image in pixels.""" semantic_filter: str | list[str] = "*:*" """A string or a list specifying a semantic filter predicate. Defaults to ``"*:*"``. If a string, it should be a disjunctive normal form of (semantic type, labels). For examples: * ``"typeA : labelA & !labelB | labelC , typeB: labelA ; typeC: labelE"``: All prims with semantic type "typeA" and label "labelA" but not "labelB" or with label "labelC". Also, all prims with semantic type "typeB" and label "labelA", or with semantic type "typeC" and label "labelE". * ``"typeA : * ; * : labelA"``: All prims with semantic type "typeA" or with label "labelA" If a list of strings, each string should be a semantic type. The segmentation for prims with semantics of the specified types will be retrieved. For example, if the list is ["class"], only the segmentation for prims with semantics of type "class" will be retrieved. .. seealso:: For more information on the semantics filter, see the documentation on `Replicator Semantics Schema Editor`_. .. _Replicator Semantics Schema Editor: https://docs.omniverse.nvidia.com/extensions/latest/ext_replicator/semantics_schema_editor.html#semantics-filtering """ colorize_semantic_segmentation: bool = True """Whether to colorize the semantic segmentation images. Defaults to True. If True, semantic segmentation is converted to an image where semantic IDs are mapped to colors and returned as a ``uint8`` 4-channel array. If False, the output is returned as a ``int32`` array. """ colorize_instance_id_segmentation: bool = True """Whether to colorize the instance ID segmentation images. Defaults to True. If True, instance id segmentation is converted to an image where instance IDs are mapped to colors. and returned as a ``uint8`` 4-channel array. If False, the output is returned as a ``int32`` array. """ colorize_instance_segmentation: bool = True """Whether to colorize the instance ID segmentation images. Defaults to True. If True, instance segmentation is converted to an image where instance IDs are mapped to colors. and returned as a ``uint8`` 4-channel array. If False, the output is returned as a ``int32`` array. """
4,659
Python
40.981982
159
0.674394
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/sensors/contact_sensor/contact_sensor.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause # Ignore optional memory usage warning globally # pyright: reportOptionalSubscript=false from __future__ import annotations import torch from collections.abc import Sequence from typing import TYPE_CHECKING import omni.physics.tensors.impl.api as physx from pxr import PhysxSchema import omni.isaac.orbit.sim as sim_utils import omni.isaac.orbit.utils.string as string_utils from omni.isaac.orbit.markers import VisualizationMarkers from omni.isaac.orbit.utils.math import convert_quat from ..sensor_base import SensorBase from .contact_sensor_data import ContactSensorData if TYPE_CHECKING: from .contact_sensor_cfg import ContactSensorCfg class ContactSensor(SensorBase): """A contact reporting sensor. The contact sensor reports the normal contact forces on a rigid body in the world frame. It relies on the `PhysX ContactReporter`_ API to be activated on the rigid bodies. To enable the contact reporter on a rigid body, please make sure to enable the :attr:`omni.isaac.orbit.sim.spawner.RigidObjectSpawnerCfg.activate_contact_sensors` on your asset spawner configuration. This will enable the contact reporter on all the rigid bodies in the asset. The sensor can be configured to report the contact forces on a set of bodies with a given filter pattern. Please check the documentation on `RigidContactView`_ for more details. .. _PhysX ContactReporter: https://docs.omniverse.nvidia.com/kit/docs/omni_usd_schema_physics/104.2/class_physx_schema_physx_contact_report_a_p_i.html .. _RigidContactView: https://docs.omniverse.nvidia.com/py/isaacsim/source/extensions/omni.isaac.core/docs/index.html#omni.isaac.core.prims.RigidContactView """ cfg: ContactSensorCfg """The configuration parameters.""" def __init__(self, cfg: ContactSensorCfg): """Initializes the contact sensor object. Args: cfg: The configuration parameters. """ # initialize base class super().__init__(cfg) # Create empty variables for storing output data self._data: ContactSensorData = ContactSensorData() def __str__(self) -> str: """Returns: A string containing information about the instance.""" return ( f"Contact sensor @ '{self.cfg.prim_path}': \n" f"\tview type : {self.body_physx_view.__class__}\n" f"\tupdate period (s) : {self.cfg.update_period}\n" f"\tnumber of bodies : {self.num_bodies}\n" f"\tbody names : {self.body_names}\n" ) """ Properties """ @property def num_instances(self) -> int: return self.body_physx_view.count @property def data(self) -> ContactSensorData: # update sensors if needed self._update_outdated_buffers() # return the data return self._data @property def num_bodies(self) -> int: """Number of bodies with contact sensors attached.""" return self._num_bodies @property def body_names(self) -> list[str]: """Ordered names of bodies with contact sensors attached.""" prim_paths = self.body_physx_view.prim_paths[: self.num_bodies] return [path.split("/")[-1] for path in prim_paths] @property def body_physx_view(self) -> physx.RigidBodyView: """View for the rigid bodies captured (PhysX). Note: Use this view with caution. It requires handling of tensors in a specific way. """ return self._body_physx_view @property def contact_physx_view(self) -> physx.RigidContactView: """Contact reporter view for the bodies (PhysX). Note: Use this view with caution. It requires handling of tensors in a specific way. """ return self._contact_physx_view """ Operations """ def reset(self, env_ids: Sequence[int] | None = None): # reset the timers and counters super().reset(env_ids) # resolve None if env_ids is None: env_ids = slice(None) # reset accumulative data buffers self._data.net_forces_w[env_ids] = 0.0 self._data.net_forces_w_history[env_ids] = 0.0 if self.cfg.history_length > 0: self._data.net_forces_w_history[env_ids] = 0.0 # reset force matrix if len(self.cfg.filter_prim_paths_expr) != 0: self._data.force_matrix_w[env_ids] = 0.0 # reset the current air time if self.cfg.track_air_time: self._data.current_air_time[env_ids] = 0.0 self._data.last_air_time[env_ids] = 0.0 self._data.current_contact_time[env_ids] = 0.0 self._data.last_contact_time[env_ids] = 0.0 def find_bodies(self, name_keys: str | Sequence[str], preserve_order: bool = False) -> tuple[list[int], list[str]]: """Find bodies in the articulation based on the name keys. Args: name_keys: A regular expression or a list of regular expressions to match the body names. preserve_order: Whether to preserve the order of the name keys in the output. Defaults to False. Returns: A tuple of lists containing the body indices and names. """ return string_utils.resolve_matching_names(name_keys, self.body_names, preserve_order) def compute_first_contact(self, dt: float, abs_tol: float = 1.0e-8) -> torch.Tensor: """Checks if bodies that have established contact within the last :attr:`dt` seconds. This function checks if the bodies have established contact within the last :attr:`dt` seconds by comparing the current contact time with the given time period. If the contact time is less than the given time period, then the bodies are considered to be in contact. Note: The function assumes that :attr:`dt` is a factor of the sensor update time-step. In other words :math:`dt / dt_sensor = n`, where :math:`n` is a natural number. This is always true if the sensor is updated by the physics or the environment stepping time-step and the sensor is read by the environment stepping time-step. Args: dt: The time period since the contact was established. abs_tol: The absolute tolerance for the comparison. Returns: A boolean tensor indicating the bodies that have established contact within the last :attr:`dt` seconds. Shape is (N, B), where N is the number of sensors and B is the number of bodies in each sensor. Raises: RuntimeError: If the sensor is not configured to track contact time. """ # check if the sensor is configured to track contact time if not self.cfg.track_air_time: raise RuntimeError( "The contact sensor is not configured to track contact time." "Please enable the 'track_air_time' in the sensor configuration." ) # check if the bodies are in contact currently_in_contact = self.data.current_contact_time > 0.0 less_than_dt_in_contact = self.data.current_contact_time < (dt + abs_tol) return currently_in_contact * less_than_dt_in_contact def compute_first_air(self, dt: float, abs_tol: float = 1.0e-8) -> torch.Tensor: """Checks if bodies that have broken contact within the last :attr:`dt` seconds. This function checks if the bodies have broken contact within the last :attr:`dt` seconds by comparing the current air time with the given time period. If the air time is less than the given time period, then the bodies are considered to not be in contact. Note: It assumes that :attr:`dt` is a factor of the sensor update time-step. In other words, :math:`dt / dt_sensor = n`, where :math:`n` is a natural number. This is always true if the sensor is updated by the physics or the environment stepping time-step and the sensor is read by the environment stepping time-step. Args: dt: The time period since the contract is broken. abs_tol: The absolute tolerance for the comparison. Returns: A boolean tensor indicating the bodies that have broken contact within the last :attr:`dt` seconds. Shape is (N, B), where N is the number of sensors and B is the number of bodies in each sensor. Raises: RuntimeError: If the sensor is not configured to track contact time. """ # check if the sensor is configured to track contact time if not self.cfg.track_air_time: raise RuntimeError( "The contact sensor is not configured to track contact time." "Please enable the 'track_air_time' in the sensor configuration." ) # check if the sensor is configured to track contact time currently_detached = self.data.current_air_time > 0.0 less_than_dt_detached = self.data.current_air_time < (dt + abs_tol) return currently_detached * less_than_dt_detached """ Implementation. """ def _initialize_impl(self): super()._initialize_impl() # create simulation view self._physics_sim_view = physx.create_simulation_view(self._backend) self._physics_sim_view.set_subspace_roots("/") # check that only rigid bodies are selected leaf_pattern = self.cfg.prim_path.rsplit("/", 1)[-1] template_prim_path = self._parent_prims[0].GetPath().pathString body_names = list() for prim in sim_utils.find_matching_prims(template_prim_path + "/" + leaf_pattern): # check if prim has contact reporter API if prim.HasAPI(PhysxSchema.PhysxContactReportAPI): prim_path = prim.GetPath().pathString body_names.append(prim_path.rsplit("/", 1)[-1]) # check that there is at least one body with contact reporter API if not body_names: raise RuntimeError( f"Sensor at path '{self.cfg.prim_path}' could not find any bodies with contact reporter API." "\nHINT: Make sure to enable 'activate_contact_sensors' in the corresponding asset spawn configuration." ) # construct regex expression for the body names body_names_regex = r"(" + "|".join(body_names) + r")" body_names_regex = f"{self.cfg.prim_path.rsplit('/', 1)[0]}/{body_names_regex}" # construct a new regex expression # create a rigid prim view for the sensor self._body_physx_view = self._physics_sim_view.create_rigid_body_view(body_names_regex.replace(".*", "*")) self._contact_physx_view = self._physics_sim_view.create_rigid_contact_view( body_names_regex.replace(".*", "*"), filter_patterns=self.cfg.filter_prim_paths_expr ) # resolve the true count of bodies self._num_bodies = self.body_physx_view.count // self._num_envs # check that contact reporter succeeded if self._num_bodies != len(body_names): raise RuntimeError( "Failed to initialize contact reporter for specified bodies." f"\n\tInput prim path : {self.cfg.prim_path}" f"\n\tResolved prim paths: {body_names_regex}" ) # prepare data buffers self._data.net_forces_w = torch.zeros(self._num_envs, self._num_bodies, 3, device=self._device) # optional buffers # -- history of net forces if self.cfg.history_length > 0: self._data.net_forces_w_history = torch.zeros( self._num_envs, self.cfg.history_length, self._num_bodies, 3, device=self._device ) else: self._data.net_forces_w_history = self._data.net_forces_w.unsqueeze(1) # -- pose of sensor origins if self.cfg.track_pose: self._data.pos_w = torch.zeros(self._num_envs, self._num_bodies, 3, device=self._device) self._data.quat_w = torch.zeros(self._num_envs, self._num_bodies, 4, device=self._device) # -- air/contact time between contacts if self.cfg.track_air_time: self._data.last_air_time = torch.zeros(self._num_envs, self._num_bodies, device=self._device) self._data.current_air_time = torch.zeros(self._num_envs, self._num_bodies, device=self._device) self._data.last_contact_time = torch.zeros(self._num_envs, self._num_bodies, device=self._device) self._data.current_contact_time = torch.zeros(self._num_envs, self._num_bodies, device=self._device) # force matrix: (num_envs, num_bodies, num_filter_shapes, 3) if len(self.cfg.filter_prim_paths_expr) != 0: num_filters = self.contact_physx_view.filter_count self._data.force_matrix_w = torch.zeros( self._num_envs, self._num_bodies, num_filters, 3, device=self._device ) def _update_buffers_impl(self, env_ids: Sequence[int]): """Fills the buffers of the sensor data.""" # default to all sensors if len(env_ids) == self._num_envs: env_ids = slice(None) # obtain the contact forces # TODO: We are handling the indexing ourself because of the shape; (N, B) vs expected (N * B). # This isn't the most efficient way to do this, but it's the easiest to implement. net_forces_w = self.contact_physx_view.get_net_contact_forces(dt=self._sim_physics_dt) self._data.net_forces_w[env_ids, :, :] = net_forces_w.view(-1, self._num_bodies, 3)[env_ids] # update contact force history if self.cfg.history_length > 0: self._data.net_forces_w_history[env_ids, 1:] = self._data.net_forces_w_history[env_ids, :-1].clone() self._data.net_forces_w_history[env_ids, 0] = self._data.net_forces_w[env_ids] # obtain the contact force matrix if len(self.cfg.filter_prim_paths_expr) != 0: # shape of the filtering matrix: (num_envs, num_bodies, num_filter_shapes, 3) num_filters = self.contact_physx_view.filter_count # acquire and shape the force matrix force_matrix_w = self.contact_physx_view.get_contact_force_matrix(dt=self._sim_physics_dt) force_matrix_w = force_matrix_w.view(-1, self._num_bodies, num_filters, 3) self._data.force_matrix_w[env_ids] = force_matrix_w[env_ids] # obtain the pose of the sensor origin if self.cfg.track_pose: pose = self.body_physx_view.get_transforms().view(-1, self._num_bodies, 7)[env_ids] pose[..., 3:] = convert_quat(pose[..., 3:], to="wxyz") self._data.pos_w[env_ids], self._data.quat_w[env_ids] = pose.split([3, 4], dim=-1) # obtain the air time if self.cfg.track_air_time: # -- time elapsed since last update # since this function is called every frame, we can use the difference to get the elapsed time elapsed_time = self._timestamp[env_ids] - self._timestamp_last_update[env_ids] # -- check contact state of bodies is_contact = torch.norm(self._data.net_forces_w[env_ids, :, :], dim=-1) > self.cfg.force_threshold is_first_contact = (self._data.current_air_time[env_ids] > 0) * is_contact is_first_detached = (self._data.current_contact_time[env_ids] > 0) * ~is_contact # -- update the last contact time if body has just become in contact self._data.last_air_time[env_ids] = torch.where( is_first_contact, self._data.current_air_time[env_ids] + elapsed_time.unsqueeze(-1), self._data.last_air_time[env_ids], ) # -- increment time for bodies that are not in contact self._data.current_air_time[env_ids] = torch.where( ~is_contact, self._data.current_air_time[env_ids] + elapsed_time.unsqueeze(-1), 0.0 ) # -- update the last contact time if body has just detached self._data.last_contact_time[env_ids] = torch.where( is_first_detached, self._data.current_contact_time[env_ids] + elapsed_time.unsqueeze(-1), self._data.last_contact_time[env_ids], ) # -- increment time for bodies that are in contact self._data.current_contact_time[env_ids] = torch.where( is_contact, self._data.current_contact_time[env_ids] + elapsed_time.unsqueeze(-1), 0.0 ) def _set_debug_vis_impl(self, debug_vis: bool): # set visibility of markers # note: parent only deals with callbacks. not their visibility if debug_vis: # create markers if necessary for the first tome if not hasattr(self, "contact_visualizer"): self.contact_visualizer = VisualizationMarkers(self.cfg.visualizer_cfg) # set their visibility to true self.contact_visualizer.set_visibility(True) else: if hasattr(self, "contact_visualizer"): self.contact_visualizer.set_visibility(False) def _debug_vis_callback(self, event): # safely return if view becomes invalid # note: this invalidity happens because of isaac sim view callbacks if self.body_physx_view is None: return # marker indices # 0: contact, 1: no contact net_contact_force_w = torch.norm(self._data.net_forces_w, dim=-1) marker_indices = torch.where(net_contact_force_w > self.cfg.force_threshold, 0, 1) # check if prim is visualized if self.cfg.track_pose: frame_origins: torch.Tensor = self._data.pos_w else: pose = self.body_physx_view.get_transforms() frame_origins = pose.view(-1, self._num_bodies, 7)[:, :, :3] # visualize self.contact_visualizer.visualize(frame_origins.view(-1, 3), marker_indices=marker_indices.view(-1)) """ Internal simulation callbacks. """ def _invalidate_initialize_callback(self, event): """Invalidates the scene elements.""" # call parent super()._invalidate_initialize_callback(event) # set all existing views to None to invalidate them self._physics_sim_view = None self._body_physx_view = None self._contact_physx_view = None
18,724
Python
46.285353
160
0.628124
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/sensors/contact_sensor/contact_sensor_data.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from __future__ import annotations import torch from dataclasses import dataclass @dataclass class ContactSensorData: """Data container for the contact reporting sensor.""" pos_w: torch.Tensor | None = None """Position of the sensor origin in world frame. Shape is (N, 3), where N is the number of sensors. Note: If the :attr:`ContactSensorCfg.track_pose` is False, then this qunatity is None. """ quat_w: torch.Tensor | None = None """Orientation of the sensor origin in quaternion (w, x, y, z) in world frame. Shape is (N, 4), where N is the number of sensors. Note: If the :attr:`ContactSensorCfg.track_pose` is False, then this qunatity is None. """ net_forces_w: torch.Tensor | None = None """The net contact forces in world frame. Shape is (N, B, 3), where N is the number of sensors and B is the number of bodies in each sensor. """ net_forces_w_history: torch.Tensor | None = None """The net contact forces in world frame. Shape is (N, T, B, 3), where N is the number of sensors, T is the configured history length and B is the number of bodies in each sensor. In the history dimension, the first index is the most recent and the last index is the oldest. """ force_matrix_w: torch.Tensor | None = None """The contact forces filtered between the sensor bodies and filtered bodies in world frame. Shape is (N, B, M, 3), where N is the number of sensors, B is number of bodies in each sensor and ``M`` is the number of filtered bodies. Note: If the :attr:`ContactSensorCfg.filter_prim_paths_expr` is empty, then this quantity is None. """ last_air_time: torch.Tensor | None = None """Time spent (in s) in the air before the last contact. Shape is (N, B), where N is the number of sensors and B is the number of bodies in each sensor. Note: If the :attr:`ContactSensorCfg.track_air_time` is False, then this quantity is None. """ current_air_time: torch.Tensor | None = None """Time spent (in s) in the air since the last detach. Shape is (N, B), where N is the number of sensors and B is the number of bodies in each sensor. Note: If the :attr:`ContactSensorCfg.track_air_time` is False, then this quantity is None. """ last_contact_time: torch.Tensor | None = None """Time spent (in s) in contact before the last detach. Shape is (N, B), where N is the number of sensors and B is the number of bodies in each sensor. Note: If the :attr:`ContactSensorCfg.track_air_time` is False, then this quantity is None. """ current_contact_time: torch.Tensor | None = None """Time spent (in s) in contact since the last contact. Shape is (N, B), where N is the number of sensors and B is the number of bodies in each sensor. Note: If the :attr:`ContactSensorCfg.track_air_time` is False, then this quantity is None. """
3,111
Python
32.106383
102
0.668917
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/sensors/contact_sensor/__init__.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """Sub-module for rigid contact sensor based on :class:`omni.isaac.core.prims.RigidContactView`.""" from .contact_sensor import ContactSensor from .contact_sensor_cfg import ContactSensorCfg from .contact_sensor_data import ContactSensorData
366
Python
32.363633
99
0.789617
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/sensors/contact_sensor/contact_sensor_cfg.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from __future__ import annotations from omni.isaac.orbit.markers import VisualizationMarkersCfg from omni.isaac.orbit.markers.config import CONTACT_SENSOR_MARKER_CFG from omni.isaac.orbit.utils import configclass from ..sensor_base_cfg import SensorBaseCfg from .contact_sensor import ContactSensor @configclass class ContactSensorCfg(SensorBaseCfg): """Configuration for the contact sensor.""" class_type: type = ContactSensor track_pose: bool = False """Whether to track the pose of the sensor's origin. Defaults to False.""" track_air_time: bool = False """Whether to track the air/contact time of the bodies (time between contacts). Defaults to False.""" force_threshold: float = 1.0 """The threshold on the norm of the contact force that determines whether two bodies are in collision or not. This value is only used for tracking the mode duration (the time in contact or in air), if :attr:`track_air_time` is True. """ filter_prim_paths_expr: list[str] = list() """The list of primitive paths to filter contacts with. For example, if you want to filter contacts with the ground plane, you can set this to ``["/World/ground_plane"]``. In this case, the contact sensor will only report contacts with the ground plane while using the :meth:`omni.isaac.core.prims.RigidContactView.get_contact_force_matrix` method. If an empty list is provided, then only net contact forces are reported. """ visualizer_cfg: VisualizationMarkersCfg = CONTACT_SENSOR_MARKER_CFG.replace(prim_path="/Visuals/ContactSensor") """The configuration object for the visualization markers. Defaults to CONTACT_SENSOR_MARKER_CFG. Note: This attribute is only used when debug visualization is enabled. """
1,916
Python
35.865384
115
0.733299
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/envs/rl_task_env_cfg.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from __future__ import annotations from dataclasses import MISSING from omni.isaac.orbit.utils import configclass from .base_env_cfg import BaseEnvCfg from .ui import RLTaskEnvWindow @configclass class RLTaskEnvCfg(BaseEnvCfg): """Configuration for a reinforcement learning environment.""" # ui settings ui_window_class_type: type | None = RLTaskEnvWindow # general settings is_finite_horizon: bool = False """Whether the learning task is treated as a finite or infinite horizon problem for the agent. Defaults to False, which means the task is treated as an infinite horizon problem. This flag handles the subtleties of finite and infinite horizon tasks: * **Finite horizon**: no penalty or bootstrapping value is required by the the agent for running out of time. However, the environment still needs to terminate the episode after the time limit is reached. * **Infinite horizon**: the agent needs to bootstrap the value of the state at the end of the episode. This is done by sending a time-limit (or truncated) done signal to the agent, which triggers this bootstrapping calculation. If True, then the environment is treated as a finite horizon problem and no time-out (or truncated) done signal is sent to the agent. If False, then the environment is treated as an infinite horizon problem and a time-out (or truncated) done signal is sent to the agent. Note: The base :class:`RLTaskEnv` class does not use this flag directly. It is used by the environment wrappers to determine what type of done signal to send to the corresponding learning agent. """ episode_length_s: float = MISSING """Duration of an episode (in seconds). Based on the decimation rate and physics time step, the episode length is calculated as: .. code-block:: python episode_length_steps = ceil(episode_length_s / (decimation_rate * physics_time_step)) For example, if the decimation rate is 10, the physics time step is 0.01, and the episode length is 10 seconds, then the episode length in steps is 100. """ # environment settings rewards: object = MISSING """Reward settings. Please refer to the :class:`omni.isaac.orbit.managers.RewardManager` class for more details. """ terminations: object = MISSING """Termination settings. Please refer to the :class:`omni.isaac.orbit.managers.TerminationManager` class for more details. """ curriculum: object = MISSING """Curriculum settings. Please refer to the :class:`omni.isaac.orbit.managers.CurriculumManager` class for more details. """ commands: object = MISSING """Command settings. Please refer to the :class:`omni.isaac.orbit.managers.CommandManager` class for more details. """
2,963
Python
34.710843
115
0.721228
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/envs/__init__.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """Sub-package for environment definitions. Environments define the interface between the agent and the simulation. In the simplest case, the environment provides the agent with the current observations and executes the actions provided by the agent. However, the environment can also provide additional information such as the current reward, done flag, and information about the current episode. Based on these, there are two types of environments: * :class:`BaseEnv`: The base environment which only provides the agent with the current observations and executes the actions provided by the agent. * :class:`RLTaskEnv`: The RL task environment which besides the functionality of the base environment also provides additional Markov Decision Process (MDP) related information such as the current reward, done flag, and information. """ from . import mdp, ui from .base_env import BaseEnv, VecEnvObs from .base_env_cfg import BaseEnvCfg, ViewerCfg from .rl_task_env import RLTaskEnv, VecEnvStepReturn from .rl_task_env_cfg import RLTaskEnvCfg
1,177
Python
39.620688
80
0.796941
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/envs/base_env_cfg.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """Base configuration of the environment. This module defines the general configuration of the environment. It includes parameters for configuring the environment instances, viewer settings, and simulation parameters. """ from __future__ import annotations from dataclasses import MISSING from typing import Literal import omni.isaac.orbit.envs.mdp as mdp from omni.isaac.orbit.managers import EventTermCfg as EventTerm from omni.isaac.orbit.scene import InteractiveSceneCfg from omni.isaac.orbit.sim import SimulationCfg from omni.isaac.orbit.utils import configclass from .ui import BaseEnvWindow @configclass class ViewerCfg: """Configuration of the scene viewport camera.""" eye: tuple[float, float, float] = (7.5, 7.5, 7.5) """Initial camera position (in m). Default is (7.5, 7.5, 7.5).""" lookat: tuple[float, float, float] = (0.0, 0.0, 0.0) """Initial camera target position (in m). Default is (0.0, 0.0, 0.0).""" cam_prim_path: str = "/OmniverseKit_Persp" """The camera prim path to record images from. Default is "/OmniverseKit_Persp", which is the default camera in the viewport. """ resolution: tuple[int, int] = (1280, 720) """The resolution (width, height) of the camera specified using :attr:`cam_prim_path`. Default is (1280, 720). """ origin_type: Literal["world", "env", "asset_root"] = "world" """The frame in which the camera position (eye) and target (lookat) are defined in. Default is "world". Available options are: * ``"world"``: The origin of the world. * ``"env"``: The origin of the environment defined by :attr:`env_index`. * ``"asset_root"``: The center of the asset defined by :attr:`asset_name` in environment :attr:`env_index`. """ env_index: int = 0 """The environment index for frame origin. Default is 0. This quantity is only effective if :attr:`origin` is set to "env" or "asset_root". """ asset_name: str | None = None """The asset name in the interactive scene for the frame origin. Default is None. This quantity is only effective if :attr:`origin` is set to "asset_root". """ @configclass class DefaultEventManagerCfg: """Configuration of the default event manager. This manager is used to reset the scene to a default state. The default state is specified by the scene configuration. """ reset_scene_to_default = EventTerm(func=mdp.reset_scene_to_default, mode="reset") @configclass class BaseEnvCfg: """Base configuration of the environment.""" # simulation settings viewer: ViewerCfg = ViewerCfg() """Viewer configuration. Default is ViewerCfg().""" sim: SimulationCfg = SimulationCfg() """Physics simulation configuration. Default is SimulationCfg().""" # ui settings ui_window_class_type: type | None = BaseEnvWindow """The class type of the UI window. Default is None. If None, then no UI window is created. Note: If you want to make your own UI window, you can create a class that inherits from from :class:`omni.isaac.orbit.envs.ui.base_env_window.BaseEnvWindow`. Then, you can set this attribute to your class type. """ # general settings decimation: int = MISSING """Number of control action updates @ sim dt per policy dt. For instance, if the simulation dt is 0.01s and the policy dt is 0.1s, then the decimation is 10. This means that the control action is updated every 10 simulation steps. """ # environment settings scene: InteractiveSceneCfg = MISSING """Scene settings. Please refer to the :class:`omni.isaac.orbit.scene.InteractiveSceneCfg` class for more details. """ observations: object = MISSING """Observation space settings. Please refer to the :class:`omni.isaac.orbit.managers.ObservationManager` class for more details. """ actions: object = MISSING """Action space settings. Please refer to the :class:`omni.isaac.orbit.managers.ActionManager` class for more details. """ events: object = DefaultEventManagerCfg() """Event settings. Defaults to the basic configuration that resets the scene to its default state. Please refer to the :class:`omni.isaac.orbit.managers.EventManager` class for more details. """ randomization: object | None = None """Randomization settings. Default is None. .. deprecated:: 0.3.0 This attribute is deprecated and will be removed in v0.4.0. Please use the :attr:`events` attribute to configure the randomization settings. """
4,718
Python
32
111
0.693726
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/envs/rl_task_env.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from __future__ import annotations import gymnasium as gym import math import numpy as np import torch from collections.abc import Sequence from typing import Any, ClassVar from omni.isaac.version import get_version from omni.isaac.orbit.managers import CommandManager, CurriculumManager, RewardManager, TerminationManager from .base_env import BaseEnv, VecEnvObs from .rl_task_env_cfg import RLTaskEnvCfg VecEnvStepReturn = tuple[VecEnvObs, torch.Tensor, torch.Tensor, torch.Tensor, dict] """The environment signals processed at the end of each step. The tuple contains batched information for each sub-environment. The information is stored in the following order: 1. **Observations**: The observations from the environment. 2. **Rewards**: The rewards from the environment. 3. **Terminated Dones**: Whether the environment reached a terminal state, such as task success or robot falling etc. 4. **Timeout Dones**: Whether the environment reached a timeout state, such as end of max episode length. 5. **Extras**: A dictionary containing additional information from the environment. """ class RLTaskEnv(BaseEnv, gym.Env): """The superclass for reinforcement learning-based environments. This class inherits from :class:`BaseEnv` and implements the core functionality for reinforcement learning-based environments. It is designed to be used with any RL library. The class is designed to be used with vectorized environments, i.e., the environment is expected to be run in parallel with multiple sub-environments. The number of sub-environments is specified using the ``num_envs``. Each observation from the environment is a batch of observations for each sub- environments. The method :meth:`step` is also expected to receive a batch of actions for each sub-environment. While the environment itself is implemented as a vectorized environment, we do not inherit from :class:`gym.vector.VectorEnv`. This is mainly because the class adds various methods (for wait and asynchronous updates) which are not required. Additionally, each RL library typically has its own definition for a vectorized environment. Thus, to reduce complexity, we directly use the :class:`gym.Env` over here and leave it up to library-defined wrappers to take care of wrapping this environment for their agents. Note: For vectorized environments, it is recommended to **only** call the :meth:`reset` method once before the first call to :meth:`step`, i.e. after the environment is created. After that, the :meth:`step` function handles the reset of terminated sub-environments. This is because the simulator does not support resetting individual sub-environments in a vectorized environment. """ is_vector_env: ClassVar[bool] = True """Whether the environment is a vectorized environment.""" metadata: ClassVar[dict[str, Any]] = { "render_modes": [None, "human", "rgb_array"], "isaac_sim_version": get_version(), } """Metadata for the environment.""" cfg: RLTaskEnvCfg """Configuration for the environment.""" def __init__(self, cfg: RLTaskEnvCfg, render_mode: str | None = None, **kwargs): """Initialize the environment. Args: cfg: The configuration for the environment. render_mode: The render mode for the environment. Defaults to None, which is similar to ``"human"``. """ # initialize the base class to setup the scene. super().__init__(cfg=cfg) # store the render mode self.render_mode = render_mode # initialize data and constants # -- counter for curriculum self.common_step_counter = 0 # -- init buffers self.episode_length_buf = torch.zeros(self.num_envs, device=self.device, dtype=torch.long) # setup the action and observation spaces for Gym self._configure_gym_env_spaces() # perform events at the start of the simulation if "startup" in self.event_manager.available_modes: self.event_manager.apply(mode="startup") # print the environment information print("[INFO]: Completed setting up the environment...") """ Properties. """ @property def max_episode_length_s(self) -> float: """Maximum episode length in seconds.""" return self.cfg.episode_length_s @property def max_episode_length(self) -> int: """Maximum episode length in environment steps.""" return math.ceil(self.max_episode_length_s / self.step_dt) """ Operations - Setup. """ def load_managers(self): # note: this order is important since observation manager needs to know the command and action managers # and the reward manager needs to know the termination manager # -- command manager self.command_manager: CommandManager = CommandManager(self.cfg.commands, self) print("[INFO] Command Manager: ", self.command_manager) # call the parent class to load the managers for observations and actions. super().load_managers() # prepare the managers # -- termination manager self.termination_manager = TerminationManager(self.cfg.terminations, self) print("[INFO] Termination Manager: ", self.termination_manager) # -- reward manager self.reward_manager = RewardManager(self.cfg.rewards, self) print("[INFO] Reward Manager: ", self.reward_manager) # -- curriculum manager self.curriculum_manager = CurriculumManager(self.cfg.curriculum, self) print("[INFO] Curriculum Manager: ", self.curriculum_manager) """ Operations - MDP """ def step(self, action: torch.Tensor) -> VecEnvStepReturn: """Execute one time-step of the environment's dynamics and reset terminated environments. Unlike the :class:`BaseEnv.step` class, the function performs the following operations: 1. Process the actions. 2. Perform physics stepping. 3. Perform rendering if gui is enabled. 4. Update the environment counters and compute the rewards and terminations. 5. Reset the environments that terminated. 6. Compute the observations. 7. Return the observations, rewards, resets and extras. Args: action: The actions to apply on the environment. Shape is (num_envs, action_dim). Returns: A tuple containing the observations, rewards, resets (terminated and truncated) and extras. """ # process actions self.action_manager.process_action(action) # perform physics stepping for _ in range(self.cfg.decimation): # set actions into buffers self.action_manager.apply_action() # set actions into simulator self.scene.write_data_to_sim() # simulate self.sim.step(render=False) # update buffers at sim dt self.scene.update(dt=self.physics_dt) # perform rendering if gui is enabled if self.sim.has_gui(): self.sim.render() # post-step: # -- update env counters (used for curriculum generation) self.episode_length_buf += 1 # step in current episode (per env) self.common_step_counter += 1 # total step (common for all envs) # -- check terminations self.reset_buf = self.termination_manager.compute() self.reset_terminated = self.termination_manager.terminated self.reset_time_outs = self.termination_manager.time_outs # -- reward computation self.reward_buf = self.reward_manager.compute(dt=self.step_dt) # -- reset envs that terminated/timed-out and log the episode information reset_env_ids = self.reset_buf.nonzero(as_tuple=False).squeeze(-1) if len(reset_env_ids) > 0: self._reset_idx(reset_env_ids) # -- update command self.command_manager.compute(dt=self.step_dt) # -- step interval events if "interval" in self.event_manager.available_modes: self.event_manager.apply(mode="interval", dt=self.step_dt) # -- compute observations # note: done after reset to get the correct observations for reset envs self.obs_buf = self.observation_manager.compute() # return observations, rewards, resets and extras return self.obs_buf, self.reward_buf, self.reset_terminated, self.reset_time_outs, self.extras def render(self) -> np.ndarray | None: """Run rendering without stepping through the physics. By convention, if mode is: - **human**: Render to the current display and return nothing. Usually for human consumption. - **rgb_array**: Return an numpy.ndarray with shape (x, y, 3), representing RGB values for an x-by-y pixel image, suitable for turning into a video. Returns: The rendered image as a numpy array if mode is "rgb_array". Otherwise, returns None. Raises: RuntimeError: If mode is set to "rgb_data" and simulation render mode does not support it. In this case, the simulation render mode must be set to ``RenderMode.PARTIAL_RENDERING`` or ``RenderMode.FULL_RENDERING``. NotImplementedError: If an unsupported rendering mode is specified. """ # run a rendering step of the simulator self.sim.render() # decide the rendering mode if self.render_mode == "human" or self.render_mode is None: return None elif self.render_mode == "rgb_array": # check that if any render could have happened if self.sim.render_mode.value < self.sim.RenderMode.PARTIAL_RENDERING.value: raise RuntimeError( f"Cannot render '{self.render_mode}' when the simulation render mode is" f" '{self.sim.render_mode.name}'. Please set the simulation render mode to:" f"'{self.sim.RenderMode.PARTIAL_RENDERING.name}' or '{self.sim.RenderMode.FULL_RENDERING.name}'." ) # create the annotator if it does not exist if not hasattr(self, "_rgb_annotator"): import omni.replicator.core as rep # create render product self._render_product = rep.create.render_product( self.cfg.viewer.cam_prim_path, self.cfg.viewer.resolution ) # create rgb annotator -- used to read data from the render product self._rgb_annotator = rep.AnnotatorRegistry.get_annotator("rgb", device="cpu") self._rgb_annotator.attach([self._render_product]) # obtain the rgb data rgb_data = self._rgb_annotator.get_data() # convert to numpy array rgb_data = np.frombuffer(rgb_data, dtype=np.uint8).reshape(*rgb_data.shape) # return the rgb data # note: initially the renerer is warming up and returns empty data if rgb_data.size == 0: return np.zeros((self.cfg.viewer.resolution[1], self.cfg.viewer.resolution[0], 3), dtype=np.uint8) else: return rgb_data[:, :, :3] else: raise NotImplementedError( f"Render mode '{self.render_mode}' is not supported. Please use: {self.metadata['render_modes']}." ) def close(self): if not self._is_closed: # destructor is order-sensitive del self.command_manager del self.reward_manager del self.termination_manager del self.curriculum_manager # call the parent class to close the environment super().close() """ Helper functions. """ def _configure_gym_env_spaces(self): """Configure the action and observation spaces for the Gym environment.""" # observation space (unbounded since we don't impose any limits) self.single_observation_space = gym.spaces.Dict() for group_name, group_term_names in self.observation_manager.active_terms.items(): # extract quantities about the group has_concatenated_obs = self.observation_manager.group_obs_concatenate[group_name] group_dim = self.observation_manager.group_obs_dim[group_name] group_term_dim = self.observation_manager.group_obs_term_dim[group_name] # check if group is concatenated or not # if not concatenated, then we need to add each term separately as a dictionary if has_concatenated_obs: self.single_observation_space[group_name] = gym.spaces.Box(low=-np.inf, high=np.inf, shape=group_dim) else: self.single_observation_space[group_name] = gym.spaces.Dict({ term_name: gym.spaces.Box(low=-np.inf, high=np.inf, shape=term_dim) for term_name, term_dim in zip(group_term_names, group_term_dim) }) # action space (unbounded since we don't impose any limits) action_dim = sum(self.action_manager.action_term_dim) self.single_action_space = gym.spaces.Box(low=-np.inf, high=np.inf, shape=(action_dim,)) # batch the spaces for vectorized environments self.observation_space = gym.vector.utils.batch_space(self.single_observation_space, self.num_envs) self.action_space = gym.vector.utils.batch_space(self.single_action_space, self.num_envs) def _reset_idx(self, env_ids: Sequence[int]): """Reset environments based on specified indices. Args: env_ids: List of environment ids which must be reset """ # update the curriculum for environments that need a reset self.curriculum_manager.compute(env_ids=env_ids) # reset the internal buffers of the scene elements self.scene.reset(env_ids) # apply events such as randomizations for environments that need a reset if "reset" in self.event_manager.available_modes: self.event_manager.apply(env_ids=env_ids, mode="reset") # iterate over all managers and reset them # this returns a dictionary of information which is stored in the extras # note: This is order-sensitive! Certain things need be reset before others. self.extras["log"] = dict() # -- observation manager info = self.observation_manager.reset(env_ids) self.extras["log"].update(info) # -- action manager info = self.action_manager.reset(env_ids) self.extras["log"].update(info) # -- rewards manager info = self.reward_manager.reset(env_ids) self.extras["log"].update(info) # -- curriculum manager info = self.curriculum_manager.reset(env_ids) self.extras["log"].update(info) # -- command manager info = self.command_manager.reset(env_ids) self.extras["log"].update(info) # -- event manager info = self.event_manager.reset(env_ids) self.extras["log"].update(info) # -- termination manager info = self.termination_manager.reset(env_ids) self.extras["log"].update(info) # reset the episode length buffer self.episode_length_buf[env_ids] = 0
15,635
Python
44.321739
117
0.649376
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/envs/base_env.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from __future__ import annotations import builtins import torch import warnings from collections.abc import Sequence from typing import Any, Dict import carb import omni.isaac.core.utils.torch as torch_utils from omni.isaac.orbit.managers import ActionManager, EventManager, ObservationManager from omni.isaac.orbit.scene import InteractiveScene from omni.isaac.orbit.sim import SimulationContext from omni.isaac.orbit.utils.timer import Timer from .base_env_cfg import BaseEnvCfg from .ui import ViewportCameraController VecEnvObs = Dict[str, torch.Tensor | Dict[str, torch.Tensor]] """Observation returned by the environment. The observations are stored in a dictionary. The keys are the group to which the observations belong. This is useful for various setups such as reinforcement learning with asymmetric actor-critic or multi-agent learning. For non-learning paradigms, this may include observations for different components of a system. Within each group, the observations can be stored either as a dictionary with keys as the names of each observation term in the group, or a single tensor obtained from concatenating all the observation terms. For example, for asymmetric actor-critic, the observation for the actor and the critic can be accessed using the keys ``"policy"`` and ``"critic"`` respectively. Note: By default, most learning frameworks deal with default and privileged observations in different ways. This handling must be taken care of by the wrapper around the :class:`RLTaskEnv` instance. For included frameworks (RSL-RL, RL-Games, skrl), the observations must have the key "policy". In case, the key "critic" is also present, then the critic observations are taken from the "critic" group. Otherwise, they are the same as the "policy" group. """ class BaseEnv: """The base environment encapsulates the simulation scene and the environment managers. While a simulation scene or world comprises of different components such as the robots, objects, and sensors (cameras, lidars, etc.), the environment is a higher level abstraction that provides an interface for interacting with the simulation. The environment is comprised of the following components: * **Scene**: The scene manager that creates and manages the virtual world in which the robot operates. This includes defining the robot, static and dynamic objects, sensors, etc. * **Observation Manager**: The observation manager that generates observations from the current simulation state and the data gathered from the sensors. These observations may include privileged information that is not available to the robot in the real world. Additionally, user-defined terms can be added to process the observations and generate custom observations. For example, using a network to embed high-dimensional observations into a lower-dimensional space. * **Action Manager**: The action manager that processes the raw actions sent to the environment and converts them to low-level commands that are sent to the simulation. It can be configured to accept raw actions at different levels of abstraction. For example, in case of a robotic arm, the raw actions can be joint torques, joint positions, or end-effector poses. Similarly for a mobile base, it can be the joint torques, or the desired velocity of the floating base. * **Event Manager**: The event manager orchestrates operations triggered based on simulation events. This includes resetting the scene to a default state, applying random pushes to the robot at different intervals of time, or randomizing properties such as mass and friction coefficients. This is useful for training and evaluating the robot in a variety of scenarios. The environment provides a unified interface for interacting with the simulation. However, it does not include task-specific quantities such as the reward function, or the termination conditions. These quantities are often specific to defining Markov Decision Processes (MDPs) while the base environment is agnostic to the MDP definition. The environment steps forward in time at a fixed time-step. The physics simulation is decimated at a lower time-step. This is to ensure that the simulation is stable. These two time-steps can be configured independently using the :attr:`BaseEnvCfg.decimation` (number of simulation steps per environment step) and the :attr:`BaseEnvCfg.sim.dt` (physics time-step) parameters. Based on these parameters, the environment time-step is computed as the product of the two. The two time-steps can be obtained by querying the :attr:`physics_dt` and the :attr:`step_dt` properties respectively. """ def __init__(self, cfg: BaseEnvCfg): """Initialize the environment. Args: cfg: The configuration object for the environment. Raises: RuntimeError: If a simulation context already exists. The environment must always create one since it configures the simulation context and controls the simulation. """ # store inputs to class self.cfg = cfg # initialize internal variables self._is_closed = False # create a simulation context to control the simulator if SimulationContext.instance() is None: self.sim = SimulationContext(self.cfg.sim) else: raise RuntimeError("Simulation context already exists. Cannot create a new one.") # print useful information print("[INFO]: Base environment:") print(f"\tEnvironment device : {self.device}") print(f"\tPhysics step-size : {self.physics_dt}") print(f"\tRendering step-size : {self.physics_dt * self.cfg.sim.substeps}") print(f"\tEnvironment step-size : {self.step_dt}") print(f"\tPhysics GPU pipeline : {self.cfg.sim.use_gpu_pipeline}") print(f"\tPhysics GPU simulation: {self.cfg.sim.physx.use_gpu}") # generate scene with Timer("[INFO]: Time taken for scene creation"): self.scene = InteractiveScene(self.cfg.scene) print("[INFO]: Scene manager: ", self.scene) # set up camera viewport controller # viewport is not available in other rendering modes so the function will throw a warning # FIXME: This needs to be fixed in the future when we unify the UI functionalities even for # non-rendering modes. if self.sim.render_mode >= self.sim.RenderMode.PARTIAL_RENDERING: self.viewport_camera_controller = ViewportCameraController(self, self.cfg.viewer) else: self.viewport_camera_controller = None # play the simulator to activate physics handles # note: this activates the physics simulation view that exposes TensorAPIs # note: when started in extension mode, first call sim.reset_async() and then initialize the managers if builtins.ISAAC_LAUNCHED_FROM_TERMINAL is False: print("[INFO]: Starting the simulation. This may take a few seconds. Please wait...") with Timer("[INFO]: Time taken for simulation start"): self.sim.reset() # add timeline event to load managers self.load_managers() # extend UI elements # we need to do this here after all the managers are initialized # this is because they dictate the sensors and commands right now if self.sim.has_gui() and self.cfg.ui_window_class_type is not None: self._window = self.cfg.ui_window_class_type(self, window_name="Orbit") else: # if no window, then we don't need to store the window self._window = None # allocate dictionary to store metrics self.extras = {} def __del__(self): """Cleanup for the environment.""" self.close() """ Properties. """ @property def num_envs(self) -> int: """The number of instances of the environment that are running.""" return self.scene.num_envs @property def physics_dt(self) -> float: """The physics time-step (in s). This is the lowest time-decimation at which the simulation is happening. """ return self.cfg.sim.dt @property def step_dt(self) -> float: """The environment stepping time-step (in s). This is the time-step at which the environment steps forward. """ return self.cfg.sim.dt * self.cfg.decimation @property def device(self): """The device on which the environment is running.""" return self.sim.device """ Operations - Setup. """ def load_managers(self): """Load the managers for the environment. This function is responsible for creating the various managers (action, observation, events, etc.) for the environment. Since the managers require access to physics handles, they can only be created after the simulator is reset (i.e. played for the first time). .. note:: In case of standalone application (when running simulator from Python), the function is called automatically when the class is initialized. However, in case of extension mode, the user must call this function manually after the simulator is reset. This is because the simulator is only reset when the user calls :meth:`SimulationContext.reset_async` and it isn't possible to call async functions in the constructor. """ # check the configs if self.cfg.randomization is not None: msg = ( "The 'randomization' attribute is deprecated and will be removed in a future release. " "Please use the 'events' attribute to configure the randomization settings." ) warnings.warn(msg, category=DeprecationWarning) carb.log_warn(msg) # set the randomization as events (for backward compatibility) self.cfg.events = self.cfg.randomization # prepare the managers # -- action manager self.action_manager = ActionManager(self.cfg.actions, self) print("[INFO] Action Manager: ", self.action_manager) # -- observation manager self.observation_manager = ObservationManager(self.cfg.observations, self) print("[INFO] Observation Manager:", self.observation_manager) # -- event manager self.event_manager = EventManager(self.cfg.events, self) print("[INFO] Event Manager: ", self.event_manager) """ Operations - MDP. """ def reset(self, seed: int | None = None, options: dict[str, Any] | None = None) -> tuple[VecEnvObs, dict]: """Resets all the environments and returns observations. Args: seed: The seed to use for randomization. Defaults to None, in which case the seed is not set. options: Additional information to specify how the environment is reset. Defaults to None. Note: This argument is used for compatibility with Gymnasium environment definition. Returns: A tuple containing the observations and extras. """ # set the seed if seed is not None: self.seed(seed) # reset state of scene indices = torch.arange(self.num_envs, dtype=torch.int64, device=self.device) self._reset_idx(indices) # return observations return self.observation_manager.compute(), self.extras def step(self, action: torch.Tensor) -> VecEnvObs: """Execute one time-step of the environment's dynamics. The environment steps forward at a fixed time-step, while the physics simulation is decimated at a lower time-step. This is to ensure that the simulation is stable. These two time-steps can be configured independently using the :attr:`BaseEnvCfg.decimation` (number of simulation steps per environment step) and the :attr:`BaseEnvCfg.physics_dt` (physics time-step). Based on these parameters, the environment time-step is computed as the product of the two. Args: action: The actions to apply on the environment. Shape is (num_envs, action_dim). Returns: A tuple containing the observations and extras. """ # process actions self.action_manager.process_action(action) # perform physics stepping for _ in range(self.cfg.decimation): # set actions into buffers self.action_manager.apply_action() # set actions into simulator self.scene.write_data_to_sim() # simulate self.sim.step(render=False) # update buffers at sim dt self.scene.update(dt=self.physics_dt) # perform rendering if gui is enabled if self.sim.has_gui(): self.sim.render() # post-step: step interval event if "interval" in self.event_manager.available_modes: self.event_manager.apply(mode="interval", dt=self.step_dt) # return observations and extras return self.observation_manager.compute(), self.extras @staticmethod def seed(seed: int = -1) -> int: """Set the seed for the environment. Args: seed: The seed for random generator. Defaults to -1. Returns: The seed used for random generator. """ # set seed for replicator try: import omni.replicator.core as rep rep.set_global_seed(seed) except ModuleNotFoundError: pass # set seed for torch and other libraries return torch_utils.set_seed(seed) def close(self): """Cleanup for the environment.""" if not self._is_closed: # destructor is order-sensitive del self.action_manager del self.observation_manager del self.event_manager del self.scene del self.viewport_camera_controller # clear callbacks and instance self.sim.clear_all_callbacks() self.sim.clear_instance() # destroy the window if self._window is not None: self._window = None # update closing status self._is_closed = True """ Helper functions. """ def _reset_idx(self, env_ids: Sequence[int]): """Reset environments based on specified indices. Args: env_ids: List of environment ids which must be reset """ # reset the internal buffers of the scene elements self.scene.reset(env_ids) # apply events such as randomizations for environments that need a reset if "reset" in self.event_manager.available_modes: self.event_manager.apply(env_ids=env_ids, mode="reset") # iterate over all managers and reset them # this returns a dictionary of information which is stored in the extras # note: This is order-sensitive! Certain things need be reset before others. self.extras["log"] = dict() # -- observation manager info = self.observation_manager.reset(env_ids) self.extras["log"].update(info) # -- action manager info = self.action_manager.reset(env_ids) self.extras["log"].update(info) # -- event manager info = self.event_manager.reset(env_ids) self.extras["log"].update(info)
15,816
Python
42.936111
118
0.669512
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/envs/ui/rl_task_env_window.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from __future__ import annotations from typing import TYPE_CHECKING from .base_env_window import BaseEnvWindow if TYPE_CHECKING: from ..rl_task_env import RLTaskEnv class RLTaskEnvWindow(BaseEnvWindow): """Window manager for the RL environment. On top of the basic environment window, this class adds controls for the RL environment. This includes visualization of the command manager. """ def __init__(self, env: RLTaskEnv, window_name: str = "Orbit"): """Initialize the window. Args: env: The environment object. window_name: The name of the window. Defaults to "Orbit". """ # initialize base window super().__init__(env, window_name) # add custom UI elements with self.ui_window_elements["main_vstack"]: with self.ui_window_elements["debug_frame"]: with self.ui_window_elements["debug_vstack"]: # add command manager visualization self._create_debug_vis_ui_element("commands", self.env.command_manager)
1,210
Python
30.051281
92
0.64876
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/envs/ui/viewport_camera_controller.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from __future__ import annotations import copy import numpy as np import torch import weakref from collections.abc import Sequence from typing import TYPE_CHECKING import omni.kit.app import omni.timeline if TYPE_CHECKING: from omni.isaac.orbit.envs import BaseEnv, ViewerCfg class ViewportCameraController: """This class handles controlling the camera associated with a viewport in the simulator. It can be used to set the viewpoint camera to track different origin types: - **world**: the center of the world (static) - **env**: the center of an environment (static) - **asset_root**: the root of an asset in the scene (e.g. tracking a robot moving in the scene) On creation, the camera is set to track the origin type specified in the configuration. For the :attr:`asset_root` origin type, the camera is updated at each rendering step to track the asset's root position. For this, it registers a callback to the post update event stream from the simulation app. """ def __init__(self, env: BaseEnv, cfg: ViewerCfg): """Initialize the ViewportCameraController. Args: env: The environment. cfg: The configuration for the viewport camera controller. Raises: ValueError: If origin type is configured to be "env" but :attr:`cfg.env_index` is out of bounds. ValueError: If origin type is configured to be "asset_root" but :attr:`cfg.asset_name` is unset. """ # store inputs self._env = env self._cfg = copy.deepcopy(cfg) # cast viewer eye and look-at to numpy arrays self.default_cam_eye = np.array(self._cfg.eye) self.default_cam_lookat = np.array(self._cfg.lookat) # set the camera origins if self.cfg.origin_type == "env": # check that the env_index is within bounds self.set_view_env_index(self.cfg.env_index) # set the camera origin to the center of the environment self.update_view_to_env() elif self.cfg.origin_type == "asset_root": # note: we do not yet update camera for tracking an asset origin, as the asset may not yet be # in the scene when this is called. Instead, we subscribe to the post update event to update the camera # at each rendering step. if self.cfg.asset_name is None: raise ValueError(f"No asset name provided for viewer with origin type: '{self.cfg.origin_type}'.") else: # set the camera origin to the center of the world self.update_view_to_world() # subscribe to post update event so that camera view can be updated at each rendering step app_interface = omni.kit.app.get_app_interface() app_event_stream = app_interface.get_post_update_event_stream() self._viewport_camera_update_handle = app_event_stream.create_subscription_to_pop( lambda event, obj=weakref.proxy(self): obj._update_tracking_callback(event) ) def __del__(self): """Unsubscribe from the callback.""" # use hasattr to handle case where __init__ has not completed before __del__ is called if hasattr(self, "_viewport_camera_update_handle") and self._viewport_camera_update_handle is not None: self._viewport_camera_update_handle.unsubscribe() self._viewport_camera_update_handle = None """ Properties """ @property def cfg(self) -> ViewerCfg: """The configuration for the viewer.""" return self._cfg """ Public Functions """ def set_view_env_index(self, env_index: int): """Sets the environment index for the camera view. Args: env_index: The index of the environment to set the camera view to. Raises: ValueError: If the environment index is out of bounds. It should be between 0 and num_envs - 1. """ # check that the env_index is within bounds if env_index < 0 or env_index >= self._env.num_envs: raise ValueError( f"Out of range value for attribute 'env_index': {env_index}." f" Expected a value between 0 and {self._env.num_envs - 1} for the current environment." ) # update the environment index self.cfg.env_index = env_index # update the camera view if the origin is set to env type (since, the camera view is static) # note: for assets, the camera view is updated at each rendering step if self.cfg.origin_type == "env": self.update_view_to_env() def update_view_to_world(self): """Updates the viewer's origin to the origin of the world which is (0, 0, 0).""" # set origin type to world self.cfg.origin_type = "world" # update the camera origins self.viewer_origin = torch.zeros(3) # update the camera view self.update_view_location() def update_view_to_env(self): """Updates the viewer's origin to the origin of the selected environment.""" # set origin type to world self.cfg.origin_type = "env" # update the camera origins self.viewer_origin = self._env.scene.env_origins[self.cfg.env_index] # update the camera view self.update_view_location() def update_view_to_asset_root(self, asset_name: str): """Updates the viewer's origin based upon the root of an asset in the scene. Args: asset_name: The name of the asset in the scene. The name should match the name of the asset in the scene. Raises: ValueError: If the asset is not in the scene. """ # check if the asset is in the scene if self.cfg.asset_name != asset_name: asset_entities = [*self._env.scene.rigid_objects.keys(), *self._env.scene.articulations.keys()] if asset_name not in asset_entities: raise ValueError(f"Asset '{asset_name}' is not in the scene. Available entities: {asset_entities}.") # update the asset name self.cfg.asset_name = asset_name # set origin type to asset_root self.cfg.origin_type = "asset_root" # update the camera origins self.viewer_origin = self._env.scene[self.cfg.asset_name].data.root_pos_w[self.cfg.env_index] # update the camera view self.update_view_location() def update_view_location(self, eye: Sequence[float] | None = None, lookat: Sequence[float] | None = None): """Updates the camera view pose based on the current viewer origin and the eye and lookat positions. Args: eye: The eye position of the camera. If None, the current eye position is used. lookat: The lookat position of the camera. If None, the current lookat position is used. """ # store the camera view pose for later use if eye is not None: self.default_cam_eye = np.asarray(eye) if lookat is not None: self.default_cam_lookat = np.asarray(lookat) # set the camera locations viewer_origin = self.viewer_origin.detach().cpu().numpy() cam_eye = viewer_origin + self.default_cam_eye cam_target = viewer_origin + self.default_cam_lookat # set the camera view self._env.sim.set_camera_view(eye=cam_eye, target=cam_target) """ Private Functions """ def _update_tracking_callback(self, event): """Updates the camera view at each rendering step.""" # update the camera view if the origin is set to asset_root # in other cases, the camera view is static and does not need to be updated continuously if self.cfg.origin_type == "asset_root" and self.cfg.asset_name is not None: self.update_view_to_asset_root(self.cfg.asset_name)
8,046
Python
40.6943
116
0.637087
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/envs/ui/__init__.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """Sub-module providing UI window implementation for environments. The UI elements are used to control the environment and visualize the state of the environment. This includes functionalities such as tracking a robot in the simulation, toggling different debug visualization tools, and other user-defined functionalities. """ from .base_env_window import BaseEnvWindow from .rl_task_env_window import RLTaskEnvWindow from .viewport_camera_controller import ViewportCameraController
608
Python
37.062498
95
0.814145
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/envs/ui/base_env_window.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from __future__ import annotations import asyncio import os import weakref from datetime import datetime from typing import TYPE_CHECKING import omni.isaac.ui.ui_utils as ui_utils import omni.kit.app import omni.kit.commands import omni.ui import omni.usd from omni.kit.window.extensions import SimpleCheckBox from pxr import PhysxSchema, Sdf, Usd, UsdGeom, UsdPhysics if TYPE_CHECKING: from ..base_env import BaseEnv class BaseEnvWindow: """Window manager for the basic environment. This class creates a window that is used to control the environment. The window contains controls for rendering, debug visualization, and other environment-specific UI elements. Users can add their own UI elements to the window by using the `with` context manager. This can be done either be inheriting the class or by using the `env.window` object directly from the standalone execution script. Example for adding a UI element from the standalone execution script: >>> with env.window.ui_window_elements["main_vstack"]: >>> ui.Label("My UI element") """ def __init__(self, env: BaseEnv, window_name: str = "Orbit"): """Initialize the window. Args: env: The environment object. window_name: The name of the window. Defaults to "Orbit". """ # store inputs self.env = env # prepare the list of assets that can be followed by the viewport camera # note that the first two options are "World" and "Env" which are special cases self._viewer_assets_options = [ "World", "Env", *self.env.scene.rigid_objects.keys(), *self.env.scene.articulations.keys(), ] print("Creating window for environment.") # create window for UI self.ui_window = omni.ui.Window( window_name, width=400, height=500, visible=True, dock_preference=omni.ui.DockPreference.RIGHT_TOP ) # dock next to properties window asyncio.ensure_future(self._dock_window(window_title=self.ui_window.title)) # keep a dictionary of stacks so that child environments can add their own UI elements # this can be done by using the `with` context manager self.ui_window_elements = dict() # create main frame self.ui_window_elements["main_frame"] = self.ui_window.frame with self.ui_window_elements["main_frame"]: # create main stack self.ui_window_elements["main_vstack"] = omni.ui.VStack(spacing=5, height=0) with self.ui_window_elements["main_vstack"]: # create collapsable frame for simulation self._build_sim_frame() # create collapsable frame for viewer self._build_viewer_frame() # create collapsable frame for debug visualization self._build_debug_vis_frame() def __del__(self): """Destructor for the window.""" # destroy the window if self.ui_window is not None: self.ui_window.visible = False self.ui_window.destroy() self.ui_window = None """ Build sub-sections of the UI. """ def _build_sim_frame(self): """Builds the sim-related controls frame for the UI.""" # create collapsable frame for controls self.ui_window_elements["sim_frame"] = omni.ui.CollapsableFrame( title="Simulation Settings", width=omni.ui.Fraction(1), height=0, collapsed=False, style=ui_utils.get_style(), horizontal_scrollbar_policy=omni.ui.ScrollBarPolicy.SCROLLBAR_AS_NEEDED, vertical_scrollbar_policy=omni.ui.ScrollBarPolicy.SCROLLBAR_ALWAYS_ON, ) with self.ui_window_elements["sim_frame"]: # create stack for controls self.ui_window_elements["sim_vstack"] = omni.ui.VStack(spacing=5, height=0) with self.ui_window_elements["sim_vstack"]: # create rendering mode dropdown render_mode_cfg = { "label": "Rendering Mode", "type": "dropdown", "default_val": self.env.sim.render_mode.value, "items": [member.name for member in self.env.sim.RenderMode if member.value >= 0], "tooltip": "Select a rendering mode\n" + self.env.sim.RenderMode.__doc__, "on_clicked_fn": lambda value: self.env.sim.set_render_mode(self.env.sim.RenderMode[value]), } self.ui_window_elements["render_dropdown"] = ui_utils.dropdown_builder(**render_mode_cfg) # create animation recording box record_animate_cfg = { "label": "Record Animation", "type": "state_button", "a_text": "START", "b_text": "STOP", "tooltip": "Record the animation of the scene. Only effective if fabric is disabled.", "on_clicked_fn": lambda value: self._toggle_recording_animation_fn(value), } self.ui_window_elements["record_animation"] = ui_utils.state_btn_builder(**record_animate_cfg) # disable the button if fabric is not enabled self.ui_window_elements["record_animation"].enabled = not self.env.sim.is_fabric_enabled() def _build_viewer_frame(self): """Build the viewer-related control frame for the UI.""" # create collapsable frame for viewer self.ui_window_elements["viewer_frame"] = omni.ui.CollapsableFrame( title="Viewer Settings", width=omni.ui.Fraction(1), height=0, collapsed=False, style=ui_utils.get_style(), horizontal_scrollbar_policy=omni.ui.ScrollBarPolicy.SCROLLBAR_AS_NEEDED, vertical_scrollbar_policy=omni.ui.ScrollBarPolicy.SCROLLBAR_ALWAYS_ON, ) with self.ui_window_elements["viewer_frame"]: # create stack for controls self.ui_window_elements["viewer_vstack"] = omni.ui.VStack(spacing=5, height=0) with self.ui_window_elements["viewer_vstack"]: # create a number slider to move to environment origin # NOTE: slider is 1-indexed, whereas the env index is 0-indexed viewport_origin_cfg = { "label": "Environment Index", "type": "button", "default_val": self.env.cfg.viewer.env_index + 1, "min": 1, "max": self.env.num_envs, "tooltip": "The environment index to follow. Only effective if follow mode is not 'World'.", } self.ui_window_elements["viewer_env_index"] = ui_utils.int_builder(**viewport_origin_cfg) # create a number slider to move to environment origin self.ui_window_elements["viewer_env_index"].add_value_changed_fn(self._set_viewer_env_index_fn) # create a tracker for the camera location viewer_follow_cfg = { "label": "Follow Mode", "type": "dropdown", "default_val": 0, "items": [name.replace("_", " ").title() for name in self._viewer_assets_options], "tooltip": "Select the viewport camera following mode.", "on_clicked_fn": self._set_viewer_origin_type_fn, } self.ui_window_elements["viewer_follow"] = ui_utils.dropdown_builder(**viewer_follow_cfg) # add viewer default eye and lookat locations self.ui_window_elements["viewer_eye"] = ui_utils.xyz_builder( label="Camera Eye", tooltip="Modify the XYZ location of the viewer eye.", default_val=self.env.cfg.viewer.eye, step=0.1, on_value_changed_fn=[self._set_viewer_location_fn] * 3, ) self.ui_window_elements["viewer_lookat"] = ui_utils.xyz_builder( label="Camera Target", tooltip="Modify the XYZ location of the viewer target.", default_val=self.env.cfg.viewer.lookat, step=0.1, on_value_changed_fn=[self._set_viewer_location_fn] * 3, ) def _build_debug_vis_frame(self): """Builds the debug visualization frame for various scene elements. This function inquires the scene for all elements that have a debug visualization implemented and creates a checkbox to toggle the debug visualization for each element that has it implemented. If the element does not have a debug visualization implemented, a label is created instead. """ # create collapsable frame for debug visualization self.ui_window_elements["debug_frame"] = omni.ui.CollapsableFrame( title="Scene Debug Visualization", width=omni.ui.Fraction(1), height=0, collapsed=False, style=ui_utils.get_style(), horizontal_scrollbar_policy=omni.ui.ScrollBarPolicy.SCROLLBAR_AS_NEEDED, vertical_scrollbar_policy=omni.ui.ScrollBarPolicy.SCROLLBAR_ALWAYS_ON, ) with self.ui_window_elements["debug_frame"]: # create stack for debug visualization self.ui_window_elements["debug_vstack"] = omni.ui.VStack(spacing=5, height=0) with self.ui_window_elements["debug_vstack"]: elements = [ self.env.scene.terrain, *self.env.scene.rigid_objects.values(), *self.env.scene.articulations.values(), *self.env.scene.sensors.values(), ] names = [ "terrain", *self.env.scene.rigid_objects.keys(), *self.env.scene.articulations.keys(), *self.env.scene.sensors.keys(), ] # create one for the terrain for elem, name in zip(elements, names): if elem is not None: self._create_debug_vis_ui_element(name, elem) """ Custom callbacks for UI elements. """ def _toggle_recording_animation_fn(self, value: bool): """Toggles the animation recording.""" if value: # log directory to save the recording if not hasattr(self, "animation_log_dir"): # create a new log directory log_dir = datetime.now().strftime("%Y-%m-%d_%H-%M-%S") self.animation_log_dir = os.path.join(os.getcwd(), "recordings", log_dir) # start the recording _ = omni.kit.commands.execute( "StartRecording", target_paths=[("/World", True)], live_mode=True, use_frame_range=False, start_frame=0, end_frame=0, use_preroll=False, preroll_frame=0, record_to="FILE", fps=0, apply_root_anim=False, increment_name=True, record_folder=self.animation_log_dir, take_name="TimeSample", ) else: # stop the recording _ = omni.kit.commands.execute("StopRecording") # save the current stage stage = omni.usd.get_context().get_stage() source_layer = stage.GetRootLayer() # output the stage to a file stage_usd_path = os.path.join(self.animation_log_dir, "Stage.usd") source_prim_path = "/" # creates empty anon layer temp_layer = Sdf.Find(stage_usd_path) if temp_layer is None: temp_layer = Sdf.Layer.CreateNew(stage_usd_path) temp_stage = Usd.Stage.Open(temp_layer) # update stage data UsdGeom.SetStageUpAxis(temp_stage, UsdGeom.GetStageUpAxis(stage)) UsdGeom.SetStageMetersPerUnit(temp_stage, UsdGeom.GetStageMetersPerUnit(stage)) # copy the prim Sdf.CreatePrimInLayer(temp_layer, source_prim_path) Sdf.CopySpec(source_layer, source_prim_path, temp_layer, source_prim_path) # set the default prim temp_layer.defaultPrim = Sdf.Path(source_prim_path).name # remove all physics from the stage for prim in temp_stage.TraverseAll(): # skip if the prim is an instance if prim.IsInstanceable(): continue # if prim has articulation then disable it if prim.HasAPI(UsdPhysics.ArticulationRootAPI): prim.RemoveAPI(UsdPhysics.ArticulationRootAPI) prim.RemoveAPI(PhysxSchema.PhysxArticulationAPI) # if prim has rigid body then disable it if prim.HasAPI(UsdPhysics.RigidBodyAPI): prim.RemoveAPI(UsdPhysics.RigidBodyAPI) prim.RemoveAPI(PhysxSchema.PhysxRigidBodyAPI) # if prim is a joint type then disable it if prim.IsA(UsdPhysics.Joint): prim.GetAttribute("physics:jointEnabled").Set(False) # resolve all paths relative to layer path omni.usd.resolve_paths(source_layer.identifier, temp_layer.identifier) # save the stage temp_layer.Save() # print the path to the saved stage print("Recording completed.") print(f"\tSaved recorded stage to : {stage_usd_path}") print(f"\tSaved recorded animation to: {os.path.join(self.animation_log_dir, 'TimeSample_tk001.usd')}") print("\nTo play the animation, check the instructions in the following link:") print( "\thttps://docs.omniverse.nvidia.com/extensions/latest/ext_animation_stage-recorder.html#using-the-captured-timesamples" ) print("\n") # reset the log directory self.animation_log_dir = None def _set_viewer_origin_type_fn(self, value: str): """Sets the origin of the viewport's camera. This is based on the drop-down menu in the UI.""" # Extract the viewport camera controller from environment vcc = self.env.viewport_camera_controller if vcc is None: raise ValueError("Viewport camera controller is not initialized! Please check the rendering mode.") # Based on origin type, update the camera view if value == "World": vcc.update_view_to_world() elif value == "Env": vcc.update_view_to_env() else: # find which index the asset is fancy_names = [name.replace("_", " ").title() for name in self._viewer_assets_options] # store the desired env index viewer_asset_name = self._viewer_assets_options[fancy_names.index(value)] # update the camera view vcc.update_view_to_asset_root(viewer_asset_name) def _set_viewer_location_fn(self, model: omni.ui.SimpleFloatModel): """Sets the viewport camera location based on the UI.""" # access the viewport camera controller (for brevity) vcc = self.env.viewport_camera_controller if vcc is None: raise ValueError("Viewport camera controller is not initialized! Please check the rendering mode.") # obtain the camera locations and set them in the viewpoint camera controller eye = [self.ui_window_elements["viewer_eye"][i].get_value_as_float() for i in range(3)] lookat = [self.ui_window_elements["viewer_lookat"][i].get_value_as_float() for i in range(3)] # update the camera view vcc.update_view_location(eye, lookat) def _set_viewer_env_index_fn(self, model: omni.ui.SimpleIntModel): """Sets the environment index and updates the camera if in 'env' origin mode.""" # access the viewport camera controller (for brevity) vcc = self.env.viewport_camera_controller if vcc is None: raise ValueError("Viewport camera controller is not initialized! Please check the rendering mode.") # store the desired env index, UI is 1-indexed vcc.set_view_env_index(model.as_int - 1) """ Helper functions - UI building. """ def _create_debug_vis_ui_element(self, name: str, elem: object): """Create a checkbox for toggling debug visualization for the given element.""" with omni.ui.HStack(): # create the UI element text = ( "Toggle debug visualization." if elem.has_debug_vis_implementation else "Debug visualization not implemented." ) omni.ui.Label( name.replace("_", " ").title(), width=ui_utils.LABEL_WIDTH - 12, alignment=omni.ui.Alignment.LEFT_CENTER, tooltip=text, ) self.ui_window_elements[f"{name}_cb"] = SimpleCheckBox( model=omni.ui.SimpleBoolModel(), enabled=elem.has_debug_vis_implementation, checked=elem.cfg.debug_vis, on_checked_fn=lambda value, e=weakref.proxy(elem): e.set_debug_vis(value), ) ui_utils.add_line_rect_flourish() async def _dock_window(self, window_title: str): """Docks the custom UI window to the property window.""" # wait for the window to be created for _ in range(5): if omni.ui.Workspace.get_window(window_title): break await self.env.sim.app.next_update_async() # dock next to properties window custom_window = omni.ui.Workspace.get_window(window_title) property_window = omni.ui.Workspace.get_window("Property") if custom_window and property_window: custom_window.dock_in(property_window, omni.ui.DockPosition.SAME, 1.0) custom_window.focus()
18,520
Python
45.535176
136
0.582397
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/envs/mdp/__init__.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """Sub-module with implementation of manager terms. The functions can be provided to different managers that are responsible for the different aspects of the MDP. These include the observation, reward, termination, actions, events and curriculum managers. The terms are defined under the ``envs`` module because they are used to define the environment. However, they are not part of the environment directly, but are used to define the environment through their managers. """ from .actions import * # noqa: F401, F403 from .commands import * # noqa: F401, F403 from .curriculums import * # noqa: F401, F403 from .events import * # noqa: F401, F403 from .observations import * # noqa: F401, F403 from .rewards import * # noqa: F401, F403 from .terminations import * # noqa: F401, F403
918
Python
35.759999
81
0.752723
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/envs/mdp/curriculums.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """Common functions that can be used to create curriculum for the learning environment. The functions can be passed to the :class:`omni.isaac.orbit.managers.CurriculumTermCfg` object to enable the curriculum introduced by the function. """ from __future__ import annotations from collections.abc import Sequence from typing import TYPE_CHECKING if TYPE_CHECKING: from omni.isaac.orbit.envs import RLTaskEnv def modify_reward_weight(env: RLTaskEnv, env_ids: Sequence[int], term_name: str, weight: float, num_steps: int): """Curriculum that modifies a reward weight a given number of steps. Args: env: The learning environment. env_ids: Not used since all environments are affected. term_name: The name of the reward term. weight: The weight of the reward term. num_steps: The number of steps after which the change should be applied. """ if env.common_step_counter > num_steps: # obtain term settings term_cfg = env.reward_manager.get_term_cfg(term_name) # update term settings term_cfg.weight = weight env.reward_manager.set_term_cfg(term_name, term_cfg)
1,285
Python
33.756756
112
0.713619
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/envs/mdp/rewards.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """Common functions that can be used to enable reward functions. The functions can be passed to the :class:`omni.isaac.orbit.managers.RewardTermCfg` object to include the reward introduced by the function. """ from __future__ import annotations import torch from typing import TYPE_CHECKING from omni.isaac.orbit.assets import Articulation, RigidObject from omni.isaac.orbit.managers import SceneEntityCfg from omni.isaac.orbit.managers.manager_base import ManagerTermBase from omni.isaac.orbit.managers.manager_term_cfg import RewardTermCfg from omni.isaac.orbit.sensors import ContactSensor if TYPE_CHECKING: from omni.isaac.orbit.envs import RLTaskEnv """ General. """ def is_alive(env: RLTaskEnv) -> torch.Tensor: """Reward for being alive.""" return (~env.termination_manager.terminated).float() def is_terminated(env: RLTaskEnv) -> torch.Tensor: """Penalize terminated episodes that don't correspond to episodic timeouts.""" return env.termination_manager.terminated.float() class is_terminated_term(ManagerTermBase): """Penalize termination for specific terms that don't correspond to episodic timeouts. The parameters are as follows: * attr:`term_keys`: The termination terms to penalize. This can be a string, a list of strings or regular expressions. Default is ".*" which penalizes all terminations. The reward is computed as the sum of the termination terms that are not episodic timeouts. This means that the reward is 0 if the episode is terminated due to an episodic timeout. Otherwise, if two termination terms are active, the reward is 2. """ def __init__(self, cfg: RewardTermCfg, env: RLTaskEnv): # initialize the base class super().__init__(cfg, env) # find and store the termination terms term_keys = cfg.params.get("term_keys", ".*") self._term_names = env.termination_manager.find_terms(term_keys) def __call__(self, env: RLTaskEnv, term_keys: str | list[str] = ".*") -> torch.Tensor: # Return the unweighted reward for the termination terms reset_buf = torch.zeros(env.num_envs, device=env.device) for term in self._term_names: # Sums over terminations term values to account for multiple terminations in the same step reset_buf += env.termination_manager.get_term(term) return (reset_buf * (~env.termination_manager.time_outs)).float() """ Root penalties. """ def lin_vel_z_l2(env: RLTaskEnv, asset_cfg: SceneEntityCfg = SceneEntityCfg("robot")) -> torch.Tensor: """Penalize z-axis base linear velocity using L2-kernel.""" # extract the used quantities (to enable type-hinting) asset: RigidObject = env.scene[asset_cfg.name] return torch.square(asset.data.root_lin_vel_b[:, 2]) def ang_vel_xy_l2(env: RLTaskEnv, asset_cfg: SceneEntityCfg = SceneEntityCfg("robot")) -> torch.Tensor: """Penalize xy-axis base angular velocity using L2-kernel.""" # extract the used quantities (to enable type-hinting) asset: RigidObject = env.scene[asset_cfg.name] return torch.sum(torch.square(asset.data.root_ang_vel_b[:, :2]), dim=1) def flat_orientation_l2(env: RLTaskEnv, asset_cfg: SceneEntityCfg = SceneEntityCfg("robot")) -> torch.Tensor: """Penalize non-flat base orientation using L2-kernel. This is computed by penalizing the xy-components of the projected gravity vector. """ # extract the used quantities (to enable type-hinting) asset: RigidObject = env.scene[asset_cfg.name] return torch.sum(torch.square(asset.data.projected_gravity_b[:, :2]), dim=1) def base_height_l2( env: RLTaskEnv, target_height: float, asset_cfg: SceneEntityCfg = SceneEntityCfg("robot") ) -> torch.Tensor: """Penalize asset height from its target using L2-kernel. Note: Currently, it assumes a flat terrain, i.e. the target height is in the world frame. """ # extract the used quantities (to enable type-hinting) asset: RigidObject = env.scene[asset_cfg.name] # TODO: Fix this for rough-terrain. return torch.square(asset.data.root_pos_w[:, 2] - target_height) def body_lin_acc_l2(env: RLTaskEnv, asset_cfg: SceneEntityCfg = SceneEntityCfg("robot")) -> torch.Tensor: """Penalize the linear acceleration of bodies using L2-kernel.""" asset: Articulation = env.scene[asset_cfg.name] return torch.sum(torch.norm(asset.data.body_lin_acc_w[:, asset_cfg.body_ids, :], dim=-1), dim=1) """ Joint penalties. """ def joint_torques_l2(env: RLTaskEnv, asset_cfg: SceneEntityCfg = SceneEntityCfg("robot")) -> torch.Tensor: """Penalize joint torques applied on the articulation using L2-kernel. NOTE: Only the joints configured in :attr:`asset_cfg.joint_ids` will have their joint torques contribute to the L2 norm. """ # extract the used quantities (to enable type-hinting) asset: Articulation = env.scene[asset_cfg.name] return torch.sum(torch.square(asset.data.applied_torque[:, asset_cfg.joint_ids]), dim=1) def joint_vel_l1(env: RLTaskEnv, asset_cfg: SceneEntityCfg) -> torch.Tensor: """Penalize joint velocities on the articulation using an L1-kernel.""" # extract the used quantities (to enable type-hinting) asset: Articulation = env.scene[asset_cfg.name] return torch.sum(torch.abs(asset.data.joint_vel[:, asset_cfg.joint_ids]), dim=1) def joint_vel_l2(env: RLTaskEnv, asset_cfg: SceneEntityCfg = SceneEntityCfg("robot")) -> torch.Tensor: """Penalize joint velocities on the articulation using L1-kernel. NOTE: Only the joints configured in :attr:`asset_cfg.joint_ids` will have their joint velocities contribute to the L1 norm. """ # extract the used quantities (to enable type-hinting) asset: Articulation = env.scene[asset_cfg.name] return torch.sum(torch.square(asset.data.joint_vel[:, asset_cfg.joint_ids]), dim=1) def joint_acc_l2(env: RLTaskEnv, asset_cfg: SceneEntityCfg = SceneEntityCfg("robot")) -> torch.Tensor: """Penalize joint accelerations on the articulation using L2-kernel. NOTE: Only the joints configured in :attr:`asset_cfg.joint_ids` will have their joint accelerations contribute to the L2 norm. """ # extract the used quantities (to enable type-hinting) asset: Articulation = env.scene[asset_cfg.name] return torch.sum(torch.square(asset.data.joint_acc[:, asset_cfg.joint_ids]), dim=1) def joint_deviation_l1(env, asset_cfg: SceneEntityCfg = SceneEntityCfg("robot")) -> torch.Tensor: """Penalize joint positions that deviate from the default one.""" # extract the used quantities (to enable type-hinting) asset: Articulation = env.scene[asset_cfg.name] # compute out of limits constraints angle = asset.data.joint_pos[:, asset_cfg.joint_ids] - asset.data.default_joint_pos[:, asset_cfg.joint_ids] return torch.sum(torch.abs(angle), dim=1) def joint_pos_limits(env: RLTaskEnv, asset_cfg: SceneEntityCfg = SceneEntityCfg("robot")) -> torch.Tensor: """Penalize joint positions if they cross the soft limits. This is computed as a sum of the absolute value of the difference between the joint position and the soft limits. """ # extract the used quantities (to enable type-hinting) asset: Articulation = env.scene[asset_cfg.name] # compute out of limits constraints out_of_limits = -( asset.data.joint_pos[:, asset_cfg.joint_ids] - asset.data.soft_joint_pos_limits[:, asset_cfg.joint_ids, 0] ).clip(max=0.0) out_of_limits += ( asset.data.joint_pos[:, asset_cfg.joint_ids] - asset.data.soft_joint_pos_limits[:, asset_cfg.joint_ids, 1] ).clip(min=0.0) return torch.sum(out_of_limits, dim=1) def joint_vel_limits( env: RLTaskEnv, soft_ratio: float, asset_cfg: SceneEntityCfg = SceneEntityCfg("robot") ) -> torch.Tensor: """Penalize joint velocities if they cross the soft limits. This is computed as a sum of the absolute value of the difference between the joint velocity and the soft limits. Args: soft_ratio: The ratio of the soft limits to be used. """ # extract the used quantities (to enable type-hinting) asset: Articulation = env.scene[asset_cfg.name] # compute out of limits constraints out_of_limits = ( torch.abs(asset.data.joint_vel[:, asset_cfg.joint_ids]) - asset.data.soft_joint_vel_limits[:, asset_cfg.joint_ids] * soft_ratio ) # clip to max error = 1 rad/s per joint to avoid huge penalties out_of_limits = out_of_limits.clip_(min=0.0, max=1.0) return torch.sum(out_of_limits, dim=1) """ Action penalties. """ def applied_torque_limits(env: RLTaskEnv, asset_cfg: SceneEntityCfg = SceneEntityCfg("robot")) -> torch.Tensor: """Penalize applied torques if they cross the limits. This is computed as a sum of the absolute value of the difference between the applied torques and the limits. .. caution:: Currently, this only works for explicit actuators since we manually compute the applied torques. For implicit actuators, we currently cannot retrieve the applied torques from the physics engine. """ # extract the used quantities (to enable type-hinting) asset: Articulation = env.scene[asset_cfg.name] # compute out of limits constraints # TODO: We need to fix this to support implicit joints. out_of_limits = torch.abs( asset.data.applied_torque[:, asset_cfg.joint_ids] - asset.data.computed_torque[:, asset_cfg.joint_ids] ) return torch.sum(out_of_limits, dim=1) def action_rate_l2(env: RLTaskEnv) -> torch.Tensor: """Penalize the rate of change of the actions using L2-kernel.""" return torch.sum(torch.square(env.action_manager.action - env.action_manager.prev_action), dim=1) def action_l2(env: RLTaskEnv) -> torch.Tensor: """Penalize the actions using L2-kernel.""" return torch.sum(torch.square(env.action_manager.action), dim=1) """ Contact sensor. """ def undesired_contacts(env: RLTaskEnv, threshold: float, sensor_cfg: SceneEntityCfg) -> torch.Tensor: """Penalize undesired contacts as the number of violations that are above a threshold.""" # extract the used quantities (to enable type-hinting) contact_sensor: ContactSensor = env.scene.sensors[sensor_cfg.name] # check if contact force is above threshold net_contact_forces = contact_sensor.data.net_forces_w_history is_contact = torch.max(torch.norm(net_contact_forces[:, :, sensor_cfg.body_ids], dim=-1), dim=1)[0] > threshold # sum over contacts for each environment return torch.sum(is_contact, dim=1) def contact_forces(env: RLTaskEnv, threshold: float, sensor_cfg: SceneEntityCfg) -> torch.Tensor: """Penalize contact forces as the amount of violations of the net contact force.""" # extract the used quantities (to enable type-hinting) contact_sensor: ContactSensor = env.scene.sensors[sensor_cfg.name] net_contact_forces = contact_sensor.data.net_forces_w_history # compute the violation violation = torch.max(torch.norm(net_contact_forces[:, :, sensor_cfg.body_ids], dim=-1), dim=1)[0] - threshold # compute the penalty return torch.sum(violation.clip(min=0.0), dim=1) """ Velocity-tracking rewards. """ def track_lin_vel_xy_exp( env: RLTaskEnv, std: float, command_name: str, asset_cfg: SceneEntityCfg = SceneEntityCfg("robot") ) -> torch.Tensor: """Reward tracking of linear velocity commands (xy axes) using exponential kernel.""" # extract the used quantities (to enable type-hinting) asset: RigidObject = env.scene[asset_cfg.name] # compute the error lin_vel_error = torch.sum( torch.square(env.command_manager.get_command(command_name)[:, :2] - asset.data.root_lin_vel_b[:, :2]), dim=1, ) return torch.exp(-lin_vel_error / std**2) def track_ang_vel_z_exp( env: RLTaskEnv, std: float, command_name: str, asset_cfg: SceneEntityCfg = SceneEntityCfg("robot") ) -> torch.Tensor: """Reward tracking of angular velocity commands (yaw) using exponential kernel.""" # extract the used quantities (to enable type-hinting) asset: RigidObject = env.scene[asset_cfg.name] # compute the error ang_vel_error = torch.square(env.command_manager.get_command(command_name)[:, 2] - asset.data.root_ang_vel_b[:, 2]) return torch.exp(-ang_vel_error / std**2)
12,477
Python
40.732441
130
0.70666
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/envs/mdp/events.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """Common functions that can be used to enable different events. Events include anything related to altering the simulation state. This includes changing the physics materials, applying external forces, and resetting the state of the asset. The functions can be passed to the :class:`omni.isaac.orbit.managers.EventTermCfg` object to enable the event introduced by the function. """ from __future__ import annotations import torch from typing import TYPE_CHECKING from omni.isaac.orbit.assets import Articulation, RigidObject from omni.isaac.orbit.managers import SceneEntityCfg from omni.isaac.orbit.managers.manager_base import ManagerTermBase from omni.isaac.orbit.managers.manager_term_cfg import EventTermCfg from omni.isaac.orbit.terrains import TerrainImporter from omni.isaac.orbit.utils.math import quat_from_euler_xyz, random_orientation, sample_uniform if TYPE_CHECKING: from omni.isaac.orbit.envs import BaseEnv def randomize_rigid_body_material( env: BaseEnv, env_ids: torch.Tensor | None, static_friction_range: tuple[float, float], dynamic_friction_range: tuple[float, float], restitution_range: tuple[float, float], num_buckets: int, asset_cfg: SceneEntityCfg, ): """Randomize the physics materials on all geometries of the asset. This function creates a set of physics materials with random static friction, dynamic friction, and restitution values. The number of materials is specified by ``num_buckets``. The materials are generated by sampling uniform random values from the given ranges. The material properties are then assigned to the geometries of the asset. The assignment is done by creating a random integer tensor of shape (num_instances, max_num_shapes) where ``num_instances`` is the number of assets spawned and ``max_num_shapes`` is the maximum number of shapes in the asset (over all bodies). The integer values are used as indices to select the material properties from the material buckets. .. attention:: This function uses CPU tensors to assign the material properties. It is recommended to use this function only during the initialization of the environment. Otherwise, it may lead to a significant performance overhead. .. note:: PhysX only allows 64000 unique physics materials in the scene. If the number of materials exceeds this limit, the simulation will crash. """ # extract the used quantities (to enable type-hinting) asset: RigidObject | Articulation = env.scene[asset_cfg.name] num_envs = env.scene.num_envs # resolve environment ids if env_ids is None: env_ids = torch.arange(num_envs, device="cpu") # sample material properties from the given ranges material_buckets = torch.zeros(num_buckets, 3) material_buckets[:, 0].uniform_(*static_friction_range) material_buckets[:, 1].uniform_(*dynamic_friction_range) material_buckets[:, 2].uniform_(*restitution_range) # create random material assignments based on the total number of shapes: num_assets x num_shapes # note: not optimal since it creates assignments for all the shapes but only a subset is used in the body indices case. material_ids = torch.randint(0, num_buckets, (len(env_ids), asset.root_physx_view.max_shapes)) if asset_cfg.body_ids == slice(None): # get the current materials of the bodies materials = asset.root_physx_view.get_material_properties() # assign the new materials # material ids are of shape: num_env_ids x num_shapes # material_buckets are of shape: num_buckets x 3 materials[env_ids] = material_buckets[material_ids] # set the material properties into the physics simulation asset.root_physx_view.set_material_properties(materials, env_ids) elif isinstance(asset, Articulation): # obtain number of shapes per body (needed for indexing the material properties correctly) # note: this is a workaround since the Articulation does not provide a direct way to obtain the number of shapes # per body. We use the physics simulation view to obtain the number of shapes per body. num_shapes_per_body = [] for link_path in asset.root_physx_view.link_paths[0]: link_physx_view = asset._physics_sim_view.create_rigid_body_view(link_path) # type: ignore num_shapes_per_body.append(link_physx_view.max_shapes) # get the current materials of the bodies materials = asset.root_physx_view.get_material_properties() # sample material properties from the given ranges for body_id in asset_cfg.body_ids: # start index of shape start_idx = sum(num_shapes_per_body[:body_id]) # end index of shape end_idx = start_idx + num_shapes_per_body[body_id] # assign the new materials # material ids are of shape: num_env_ids x num_shapes # material_buckets are of shape: num_buckets x 3 materials[env_ids, start_idx:end_idx] = material_buckets[material_ids[:, start_idx:end_idx]] # set the material properties into the physics simulation asset.root_physx_view.set_material_properties(materials, env_ids) else: raise ValueError( f"Randomization term 'randomize_rigid_body_material' not supported for asset: '{asset_cfg.name}'" f" with type: '{type(asset)}' and body_ids: '{asset_cfg.body_ids}'." ) def add_body_mass( env: BaseEnv, env_ids: torch.Tensor | None, mass_range: tuple[float, float], asset_cfg: SceneEntityCfg ): """Randomize the mass of the bodies by adding a random value sampled from the given range. .. tip:: This function uses CPU tensors to assign the material properties. It is recommended to use this function only during the initialization of the environment. """ # extract the used quantities (to enable type-hinting) asset: RigidObject | Articulation = env.scene[asset_cfg.name] num_envs = env.scene.num_envs # resolve environment ids if env_ids is None: env_ids = torch.arange(num_envs, device="cpu") # resolve body indices if asset_cfg.body_ids == slice(None): body_ids = torch.arange(asset.num_bodies, dtype=torch.int, device="cpu") else: body_ids = torch.tensor(asset_cfg.body_ids, dtype=torch.int, device="cpu") # get the current masses of the bodies (num_assets, num_bodies) masses = asset.root_physx_view.get_masses() # note: we modify the masses in-place for all environments # however, the setter takes care that only the masses of the specified environments are modified masses[:, body_ids] += sample_uniform(*mass_range, (masses.shape[0], len(body_ids)), device=masses.device) # set the mass into the physics simulation asset.root_physx_view.set_masses(masses, env_ids) def apply_external_force_torque( env: BaseEnv, env_ids: torch.Tensor, force_range: tuple[float, float], torque_range: tuple[float, float], asset_cfg: SceneEntityCfg = SceneEntityCfg("robot"), ): """Randomize the external forces and torques applied to the bodies. This function creates a set of random forces and torques sampled from the given ranges. The number of forces and torques is equal to the number of bodies times the number of environments. The forces and torques are applied to the bodies by calling ``asset.set_external_force_and_torque``. The forces and torques are only applied when ``asset.write_data_to_sim()`` is called in the environment. """ # extract the used quantities (to enable type-hinting) asset: RigidObject | Articulation = env.scene[asset_cfg.name] num_envs = env.scene.num_envs # resolve environment ids if env_ids is None: env_ids = torch.arange(num_envs) # resolve number of bodies num_bodies = len(asset_cfg.body_ids) if isinstance(asset_cfg.body_ids, list) else asset.num_bodies # sample random forces and torques size = (len(env_ids), num_bodies, 3) forces = sample_uniform(*force_range, size, asset.device) torques = sample_uniform(*torque_range, size, asset.device) # set the forces and torques into the buffers # note: these are only applied when you call: `asset.write_data_to_sim()` asset.set_external_force_and_torque(forces, torques, env_ids=env_ids, body_ids=asset_cfg.body_ids) def push_by_setting_velocity( env: BaseEnv, env_ids: torch.Tensor, velocity_range: dict[str, tuple[float, float]], asset_cfg: SceneEntityCfg = SceneEntityCfg("robot"), ): """Push the asset by setting the root velocity to a random value within the given ranges. This creates an effect similar to pushing the asset with a random impulse that changes the asset's velocity. It samples the root velocity from the given ranges and sets the velocity into the physics simulation. The function takes a dictionary of velocity ranges for each axis and rotation. The keys of the dictionary are ``x``, ``y``, ``z``, ``roll``, ``pitch``, and ``yaw``. The values are tuples of the form ``(min, max)``. If the dictionary does not contain a key, the velocity is set to zero for that axis. """ # extract the used quantities (to enable type-hinting) asset: RigidObject | Articulation = env.scene[asset_cfg.name] # velocities vel_w = asset.data.root_vel_w[env_ids] # sample random velocities range_list = [velocity_range.get(key, (0.0, 0.0)) for key in ["x", "y", "z", "roll", "pitch", "yaw"]] ranges = torch.tensor(range_list, device=asset.device) vel_w[:] = sample_uniform(ranges[:, 0], ranges[:, 1], vel_w.shape, device=asset.device) # set the velocities into the physics simulation asset.write_root_velocity_to_sim(vel_w, env_ids=env_ids) def reset_root_state_uniform( env: BaseEnv, env_ids: torch.Tensor, pose_range: dict[str, tuple[float, float]], velocity_range: dict[str, tuple[float, float]], asset_cfg: SceneEntityCfg = SceneEntityCfg("robot"), ): """Reset the asset root state to a random position and velocity uniformly within the given ranges. This function randomizes the root position and velocity of the asset. * It samples the root position from the given ranges and adds them to the default root position, before setting them into the physics simulation. * It samples the root orientation from the given ranges and sets them into the physics simulation. * It samples the root velocity from the given ranges and sets them into the physics simulation. The function takes a dictionary of position and velocity ranges for each axis and rotation. The keys of the dictionary are ``x``, ``y``, ``z``, ``roll``, ``pitch``, and ``yaw``. The values are tuples of the form ``(min, max)``. If the dictionary does not contain a key, the position or velocity is set to zero for that axis. """ # extract the used quantities (to enable type-hinting) asset: RigidObject | Articulation = env.scene[asset_cfg.name] # get default root state root_states = asset.data.default_root_state[env_ids].clone() # poses range_list = [pose_range.get(key, (0.0, 0.0)) for key in ["x", "y", "z", "roll", "pitch", "yaw"]] ranges = torch.tensor(range_list, device=asset.device) rand_samples = sample_uniform(ranges[:, 0], ranges[:, 1], (len(env_ids), 6), device=asset.device) positions = root_states[:, 0:3] + env.scene.env_origins[env_ids] + rand_samples[:, 0:3] orientations = quat_from_euler_xyz(rand_samples[:, 3], rand_samples[:, 4], rand_samples[:, 5]) # velocities range_list = [velocity_range.get(key, (0.0, 0.0)) for key in ["x", "y", "z", "roll", "pitch", "yaw"]] ranges = torch.tensor(range_list, device=asset.device) rand_samples = sample_uniform(ranges[:, 0], ranges[:, 1], (len(env_ids), 6), device=asset.device) velocities = root_states[:, 7:13] + rand_samples # set into the physics simulation asset.write_root_pose_to_sim(torch.cat([positions, orientations], dim=-1), env_ids=env_ids) asset.write_root_velocity_to_sim(velocities, env_ids=env_ids) def reset_root_state_with_random_orientation( env: BaseEnv, env_ids: torch.Tensor, pose_range: dict[str, tuple[float, float]], velocity_range: dict[str, tuple[float, float]], asset_cfg: SceneEntityCfg = SceneEntityCfg("robot"), ): """Reset the asset root position and velocities sampled randomly within the given ranges and the asset root orientation sampled randomly from the SO(3). This function randomizes the root position and velocity of the asset. * It samples the root position from the given ranges and adds them to the default root position, before setting them into the physics simulation. * It samples the root orientation uniformly from the SO(3) and sets them into the physics simulation. * It samples the root velocity from the given ranges and sets them into the physics simulation. The function takes a dictionary of position and velocity ranges for each axis and rotation: * :attr:`pose_range` - a dictionary of position ranges for each axis. The keys of the dictionary are ``x``, ``y``, and ``z``. * :attr:`velocity_range` - a dictionary of velocity ranges for each axis and rotation. The keys of the dictionary are ``x``, ``y``, ``z``, ``roll``, ``pitch``, and ``yaw``. The values are tuples of the form ``(min, max)``. If the dictionary does not contain a particular key, the position is set to zero for that axis. """ # extract the used quantities (to enable type-hinting) asset: RigidObject | Articulation = env.scene[asset_cfg.name] # get default root state root_states = asset.data.default_root_state[env_ids].clone() # poses range_list = [pose_range.get(key, (0.0, 0.0)) for key in ["x", "y", "z"]] ranges = torch.tensor(range_list, device=asset.device) rand_samples = sample_uniform(ranges[:, 0], ranges[:, 1], (len(env_ids), 3), device=asset.device) positions = root_states[:, 0:3] + env.scene.env_origins[env_ids] + rand_samples orientations = random_orientation(len(env_ids), device=asset.device) # velocities range_list = [velocity_range.get(key, (0.0, 0.0)) for key in ["x", "y", "z", "roll", "pitch", "yaw"]] ranges = torch.tensor(range_list, device=asset.device) rand_samples = sample_uniform(ranges[:, 0], ranges[:, 1], (len(env_ids), 6), device=asset.device) velocities = root_states[:, 7:13] + rand_samples # set into the physics simulation asset.write_root_pose_to_sim(torch.cat([positions, orientations], dim=-1), env_ids=env_ids) asset.write_root_velocity_to_sim(velocities, env_ids=env_ids) def reset_robot_root_from_terrain( env: BaseEnv, env_ids: torch.Tensor, pose_range: dict[str, tuple[float, float]], velocity_range: dict[str, tuple[float, float]], asset_cfg: SceneEntityCfg = SceneEntityCfg("robot"), ): """Reset the robot root state by sampling a random valid pose from the terrain. This function samples a random valid pose(based on flat patches) from the terrain and sets the root state of the robot to this pose. The function also samples random velocities from the given ranges and sets them into the physics simulation. Note: The function expects the terrain to have valid flat patches under the key "init_pos". The flat patches are used to sample the random pose for the robot. Raises: ValueError: If the terrain does not have valid flat patches under the key "init_pos". """ # access the used quantities (to enable type-hinting) asset: RigidObject | Articulation = env.scene[asset_cfg.name] terrain: TerrainImporter = env.scene.terrain # obtain all flat patches corresponding to the valid poses valid_poses: torch.Tensor = terrain.flat_patches.get("init_pos") if valid_poses is None: raise ValueError( "The event term 'reset_robot_root_from_terrain' requires valid flat patches under 'init_pos'." f" Found: {list(terrain.flat_patches.keys())}" ) # sample random valid poses ids = torch.randint(0, valid_poses.shape[2], size=(len(env_ids),), device=env.device) positions = valid_poses[terrain.terrain_levels[env_ids], terrain.terrain_types[env_ids], ids] positions += asset.data.default_root_state[env_ids, :3] # sample random orientations range_list = [pose_range.get(key, (0.0, 0.0)) for key in ["roll", "pitch", "yaw"]] ranges = torch.tensor(range_list, device=asset.device) rand_samples = sample_uniform(ranges[:, 0], ranges[:, 1], (len(env_ids), 3), device=asset.device) # convert to quaternions orientations = quat_from_euler_xyz(rand_samples[:, 0], rand_samples[:, 1], rand_samples[:, 2]) # sample random velocities range_list = [velocity_range.get(key, (0.0, 0.0)) for key in ["x", "y", "z", "roll", "pitch", "yaw"]] ranges = torch.tensor(range_list, device=asset.device) rand_samples = sample_uniform(ranges[:, 0], ranges[:, 1], (len(env_ids), 6), device=asset.device) velocities = asset.data.default_root_state[:, 7:13] + rand_samples # set into the physics simulation asset.write_root_pose_to_sim(torch.cat([positions, orientations], dim=-1), env_ids=env_ids) asset.write_root_velocity_to_sim(velocities, env_ids=env_ids) def reset_joints_by_scale( env: BaseEnv, env_ids: torch.Tensor, position_range: tuple[float, float], velocity_range: tuple[float, float], asset_cfg: SceneEntityCfg = SceneEntityCfg("robot"), ): """Reset the robot joints by scaling the default position and velocity by the given ranges. This function samples random values from the given ranges and scales the default joint positions and velocities by these values. The scaled values are then set into the physics simulation. """ # extract the used quantities (to enable type-hinting) asset: Articulation = env.scene[asset_cfg.name] # get default joint state joint_pos = asset.data.default_joint_pos[env_ids].clone() joint_vel = asset.data.default_joint_vel[env_ids].clone() # scale these values randomly joint_pos *= sample_uniform(*position_range, joint_pos.shape, joint_pos.device) joint_vel *= sample_uniform(*velocity_range, joint_vel.shape, joint_vel.device) # clamp joint pos to limits joint_pos_limits = asset.data.soft_joint_pos_limits[env_ids] joint_pos = joint_pos.clamp_(joint_pos_limits[..., 0], joint_pos_limits[..., 1]) # set into the physics simulation asset.write_joint_state_to_sim(joint_pos, joint_vel, env_ids=env_ids) def reset_joints_by_offset( env: BaseEnv, env_ids: torch.Tensor, position_range: tuple[float, float], velocity_range: tuple[float, float], asset_cfg: SceneEntityCfg = SceneEntityCfg("robot"), ): """Reset the robot joints with offsets around the default position and velocity by the given ranges. This function samples random values from the given ranges and biases the default joint positions and velocities by these values. The biased values are then set into the physics simulation. """ # extract the used quantities (to enable type-hinting) asset: Articulation = env.scene[asset_cfg.name] # get default joint state joint_pos = asset.data.default_joint_pos[env_ids].clone() joint_vel = asset.data.default_joint_vel[env_ids].clone() # bias these values randomly joint_pos += sample_uniform(*position_range, joint_pos.shape, joint_pos.device) joint_vel += sample_uniform(*velocity_range, joint_vel.shape, joint_vel.device) # clamp joint pos to limits joint_pos_limits = asset.data.soft_joint_pos_limits[env_ids] joint_pos = joint_pos.clamp_(joint_pos_limits[..., 0], joint_pos_limits[..., 1]) # set into the physics simulation asset.write_joint_state_to_sim(joint_pos, joint_vel, env_ids=env_ids) class reset_joints_within_range(ManagerTermBase): """Reset an articulation's joints to a random position in the given ranges. This function samples random values for the joint position and velocities from the given ranges. The values are then set into the physics simulation. The parameters to the function are: * :attr:`position_range` - a dictionary of position ranges for each joint. The keys of the dictionary are the joint names (or regular expressions) of the asset. * :attr:`velocity_range` - a dictionary of velocity ranges for each joint. The keys of the dictionary are the joint names (or regular expressions) of the asset. * :attr:`use_default_offset` - a boolean flag to indicate if the ranges are offset by the default joint state. Defaults to False. * :attr:`asset_cfg` - the configuration of the asset to reset. Defaults to the entity named "robot" in the scene. The dictionary values are a tuple of the form ``(min, max)``, where ``min`` and ``max`` are the minimum and maximum values. If the dictionary does not contain a key, the joint position or joint velocity is set to the default value for that joint. If the ``min`` or the ``max`` value is ``None``, the joint limits are used instead. """ def __init__(self, cfg: EventTermCfg, env: BaseEnv): # initialize the base class super().__init__(cfg, env) # check if the cfg has the required parameters if "position_range" not in cfg.params or "velocity_range" not in cfg.params: raise ValueError( "The term 'reset_joints_within_range' requires parameters: 'position_range' and 'velocity_range'." f" Received: {list(cfg.params.keys())}." ) # parse the parameters asset_cfg: SceneEntityCfg = cfg.params.get("asset_cfg", SceneEntityCfg("robot")) use_default_offset = cfg.params.get("use_default_offset", False) # extract the used quantities (to enable type-hinting) self._asset: Articulation = env.scene[asset_cfg.name] default_joint_pos = self._asset.data.default_joint_pos[0] default_joint_vel = self._asset.data.default_joint_vel[0] # create buffers to store the joint position and velocity ranges self._pos_ranges = self._asset.data.soft_joint_pos_limits[0].clone() self._vel_ranges = torch.stack( [-self._asset.data.soft_joint_vel_limits[0], self._asset.data.soft_joint_vel_limits[0]], dim=1 ) # parse joint position ranges pos_joint_ids = [] for joint_name, joint_range in cfg.params["position_range"].items(): # find the joint ids joint_ids = self._asset.find_joints(joint_name)[0] pos_joint_ids.extend(joint_ids) # set the joint position ranges based on the given values if joint_range[0] is not None: self._pos_ranges[joint_ids, 0] = joint_range[0] + use_default_offset * default_joint_pos[joint_ids] if joint_range[1] is not None: self._pos_ranges[joint_ids, 1] = joint_range[1] + use_default_offset * default_joint_pos[joint_ids] # store the joint pos ids (used later to sample the joint positions) self._pos_joint_ids = torch.tensor(pos_joint_ids, device=self._pos_ranges.device) # clamp sampling range to the joint position limits joint_pos_limits = self._asset.data.soft_joint_pos_limits[0] self._pos_ranges = self._pos_ranges.clamp(min=joint_pos_limits[:, 0], max=joint_pos_limits[:, 1]) self._pos_ranges = self._pos_ranges[self._pos_joint_ids] # parse joint velocity ranges vel_joint_ids = [] for joint_name, joint_range in cfg.params["velocity_range"].items(): # find the joint ids joint_ids = self._asset.find_joints(joint_name)[0] vel_joint_ids.extend(joint_ids) # set the joint position ranges based on the given values if joint_range[0] is not None: self._vel_ranges[joint_ids, 0] = joint_range[0] + use_default_offset * default_joint_vel[joint_ids] if joint_range[1] is not None: self._vel_ranges[joint_ids, 1] = joint_range[1] + use_default_offset * default_joint_vel[joint_ids] # store the joint vel ids (used later to sample the joint positions) self._vel_joint_ids = torch.tensor(vel_joint_ids, device=self._vel_ranges.device) # clamp sampling range to the joint velocity limits joint_vel_limits = self._asset.data.soft_joint_vel_limits[0] self._vel_ranges = self._vel_ranges.clamp(min=-joint_vel_limits[:, None], max=joint_vel_limits[:, None]) self._vel_ranges = self._vel_ranges[self._vel_joint_ids] def __call__( self, env: BaseEnv, env_ids: torch.Tensor, position_range: dict[str, tuple[float | None, float | None]], velocity_range: dict[str, tuple[float | None, float | None]], use_default_offset: bool = False, asset_cfg: SceneEntityCfg = SceneEntityCfg("robot"), ): # get default joint state joint_pos = self._asset.data.default_joint_pos[env_ids].clone() joint_vel = self._asset.data.default_joint_vel[env_ids].clone() # sample random joint positions for each joint if len(self._pos_joint_ids) > 0: joint_pos_shape = (len(env_ids), len(self._pos_joint_ids)) joint_pos[:, self._pos_joint_ids] = sample_uniform( self._pos_ranges[:, 0], self._pos_ranges[:, 1], joint_pos_shape, device=joint_pos.device ) # sample random joint velocities for each joint if len(self._vel_joint_ids) > 0: joint_vel_shape = (len(env_ids), len(self._vel_joint_ids)) joint_vel[:, self._vel_joint_ids] = sample_uniform( self._vel_ranges[:, 0], self._vel_ranges[:, 1], joint_vel_shape, device=joint_vel.device ) # set into the physics simulation self._asset.write_joint_state_to_sim(joint_pos, joint_vel, env_ids=env_ids) def reset_scene_to_default(env: BaseEnv, env_ids: torch.Tensor): """Reset the scene to the default state specified in the scene configuration.""" # rigid bodies for rigid_object in env.scene.rigid_objects.values(): # obtain default and deal with the offset for env origins default_root_state = rigid_object.data.default_root_state[env_ids].clone() default_root_state[:, 0:3] += env.scene.env_origins[env_ids] # set into the physics simulation rigid_object.write_root_state_to_sim(default_root_state, env_ids=env_ids) # articulations for articulation_asset in env.scene.articulations.values(): # obtain default and deal with the offset for env origins default_root_state = articulation_asset.data.default_root_state[env_ids].clone() default_root_state[:, 0:3] += env.scene.env_origins[env_ids] # set into the physics simulation articulation_asset.write_root_state_to_sim(default_root_state, env_ids=env_ids) # obtain default joint positions default_joint_pos = articulation_asset.data.default_joint_pos[env_ids].clone() default_joint_vel = articulation_asset.data.default_joint_vel[env_ids].clone() # set into the physics simulation articulation_asset.write_joint_state_to_sim(default_joint_pos, default_joint_vel, env_ids=env_ids)
27,752
Python
48.470588
123
0.682077
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/envs/mdp/terminations.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """Common functions that can be used to activate certain terminations. The functions can be passed to the :class:`omni.isaac.orbit.managers.TerminationTermCfg` object to enable the termination introduced by the function. """ from __future__ import annotations import torch from typing import TYPE_CHECKING from omni.isaac.orbit.assets import Articulation, RigidObject from omni.isaac.orbit.managers import SceneEntityCfg from omni.isaac.orbit.sensors import ContactSensor if TYPE_CHECKING: from omni.isaac.orbit.envs import RLTaskEnv from omni.isaac.orbit.managers.command_manager import CommandTerm """ MDP terminations. """ def time_out(env: RLTaskEnv) -> torch.Tensor: """Terminate the episode when the episode length exceeds the maximum episode length.""" return env.episode_length_buf >= env.max_episode_length def command_resample(env: RLTaskEnv, command_name: str, num_resamples: int = 1) -> torch.Tensor: """Terminate the episode based on the total number of times commands have been re-sampled. This makes the maximum episode length fluid in nature as it depends on how the commands are sampled. It is useful in situations where delayed rewards are used :cite:`rudin2022advanced`. """ command: CommandTerm = env.command_manager.get_term(command_name) return torch.logical_and((command.time_left <= env.step_dt), (command.command_counter == num_resamples)) """ Root terminations. """ def bad_orientation( env: RLTaskEnv, limit_angle: float, asset_cfg: SceneEntityCfg = SceneEntityCfg("robot") ) -> torch.Tensor: """Terminate when the asset's orientation is too far from the desired orientation limits. This is computed by checking the angle between the projected gravity vector and the z-axis. """ # extract the used quantities (to enable type-hinting) asset: RigidObject = env.scene[asset_cfg.name] return torch.acos(-asset.data.projected_gravity_b[:, 2]).abs() > limit_angle def base_height( env: RLTaskEnv, minimum_height: float, asset_cfg: SceneEntityCfg = SceneEntityCfg("robot") ) -> torch.Tensor: """Terminate when the asset's height is below the minimum height. Note: This is currently only supported for flat terrains, i.e. the minimum height is in the world frame. """ # extract the used quantities (to enable type-hinting) asset: RigidObject = env.scene[asset_cfg.name] return asset.data.root_pos_w[:, 2] < minimum_height """ Joint terminations. """ def joint_pos_limit(env: RLTaskEnv, asset_cfg: SceneEntityCfg = SceneEntityCfg("robot")) -> torch.Tensor: """Terminate when the asset's joint positions are outside of the soft joint limits.""" # extract the used quantities (to enable type-hinting) asset: Articulation = env.scene[asset_cfg.name] # compute any violations out_of_upper_limits = torch.any(asset.data.joint_pos > asset.data.soft_joint_pos_limits[..., 1], dim=1) out_of_lower_limits = torch.any(asset.data.joint_pos < asset.data.soft_joint_pos_limits[..., 0], dim=1) return torch.logical_or(out_of_upper_limits, out_of_lower_limits) def joint_pos_manual_limit( env: RLTaskEnv, bounds: tuple[float, float], asset_cfg: SceneEntityCfg = SceneEntityCfg("robot") ) -> torch.Tensor: """Terminate when the asset's joint positions are outside of the configured bounds. Note: This function is similar to :func:`joint_pos_limit` but allows the user to specify the bounds manually. """ # extract the used quantities (to enable type-hinting) asset: Articulation = env.scene[asset_cfg.name] if asset_cfg.joint_ids is None: asset_cfg.joint_ids = slice(None) # compute any violations out_of_upper_limits = torch.any(asset.data.joint_pos[:, asset_cfg.joint_ids] > bounds[1], dim=1) out_of_lower_limits = torch.any(asset.data.joint_pos[:, asset_cfg.joint_ids] < bounds[0], dim=1) return torch.logical_or(out_of_upper_limits, out_of_lower_limits) def joint_vel_limit(env: RLTaskEnv, max_velocity, asset_cfg: SceneEntityCfg = SceneEntityCfg("robot")) -> torch.Tensor: """Terminate when the asset's joint velocities are outside of the soft joint limits.""" # extract the used quantities (to enable type-hinting) asset: Articulation = env.scene[asset_cfg.name] # TODO read max velocities per joint from robot return torch.any(torch.abs(asset.data.joint_vel) > max_velocity, dim=1) def joint_torque_limit(env: RLTaskEnv, asset_cfg: SceneEntityCfg = SceneEntityCfg("robot")) -> torch.Tensor: """Terminate when torque applied on the asset's joints are are outside of the soft joint limits.""" # extract the used quantities (to enable type-hinting) asset: Articulation = env.scene[asset_cfg.name] return torch.any( torch.isclose(asset.data.computed_torques, asset.data.applied_torque), dim=1, ) """ Contact sensor. """ def illegal_contact(env: RLTaskEnv, threshold: float, sensor_cfg: SceneEntityCfg) -> torch.Tensor: """Terminate when the contact force on the sensor exceeds the force threshold.""" # extract the used quantities (to enable type-hinting) contact_sensor: ContactSensor = env.scene.sensors[sensor_cfg.name] net_contact_forces = contact_sensor.data.net_forces_w_history # check if any contact force exceeds the threshold return torch.any( torch.max(torch.norm(net_contact_forces[:, :, sensor_cfg.body_ids], dim=-1), dim=1)[0] > threshold, dim=1 )
5,608
Python
39.064285
119
0.720578
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/envs/mdp/observations.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """Common functions that can be used to create observation terms. The functions can be passed to the :class:`omni.isaac.orbit.managers.ObservationTermCfg` object to enable the observation introduced by the function. """ from __future__ import annotations import torch from typing import TYPE_CHECKING import omni.isaac.orbit.utils.math as math_utils from omni.isaac.orbit.assets import Articulation, RigidObject from omni.isaac.orbit.managers import SceneEntityCfg from omni.isaac.orbit.sensors import RayCaster if TYPE_CHECKING: from omni.isaac.orbit.envs import BaseEnv, RLTaskEnv """ Root state. """ def base_pos_z(env: BaseEnv, asset_cfg: SceneEntityCfg = SceneEntityCfg("robot")) -> torch.Tensor: """Root height in the simulation world frame.""" # extract the used quantities (to enable type-hinting) asset: Articulation = env.scene[asset_cfg.name] return asset.data.root_pos_w[:, 2].unsqueeze(-1) def base_lin_vel(env: BaseEnv, asset_cfg: SceneEntityCfg = SceneEntityCfg("robot")) -> torch.Tensor: """Root linear velocity in the asset's root frame.""" # extract the used quantities (to enable type-hinting) asset: RigidObject = env.scene[asset_cfg.name] return asset.data.root_lin_vel_b def base_ang_vel(env: BaseEnv, asset_cfg: SceneEntityCfg = SceneEntityCfg("robot")) -> torch.Tensor: """Root angular velocity in the asset's root frame.""" # extract the used quantities (to enable type-hinting) asset: RigidObject = env.scene[asset_cfg.name] return asset.data.root_ang_vel_b def projected_gravity(env: BaseEnv, asset_cfg: SceneEntityCfg = SceneEntityCfg("robot")) -> torch.Tensor: """Gravity projection on the asset's root frame.""" # extract the used quantities (to enable type-hinting) asset: RigidObject = env.scene[asset_cfg.name] return asset.data.projected_gravity_b def root_pos_w(env: BaseEnv, asset_cfg: SceneEntityCfg = SceneEntityCfg("robot")) -> torch.Tensor: """Asset root position in the environment frame.""" # extract the used quantities (to enable type-hinting) asset: RigidObject = env.scene[asset_cfg.name] return asset.data.root_pos_w - env.scene.env_origins def root_quat_w(env: BaseEnv, asset_cfg: SceneEntityCfg = SceneEntityCfg("robot")) -> torch.Tensor: """Asset root orientation in the environment frame.""" # extract the used quantities (to enable type-hinting) asset: RigidObject = env.scene[asset_cfg.name] return asset.data.root_quat_w def root_lin_vel_w(env: BaseEnv, asset_cfg: SceneEntityCfg = SceneEntityCfg("robot")) -> torch.Tensor: """Asset root linear velocity in the environment frame.""" # extract the used quantities (to enable type-hinting) asset: RigidObject = env.scene[asset_cfg.name] return asset.data.root_lin_vel_w def root_ang_vel_w(env: BaseEnv, asset_cfg: SceneEntityCfg = SceneEntityCfg("robot")) -> torch.Tensor: """Asset root angular velocity in the environment frame.""" # extract the used quantities (to enable type-hinting) asset: RigidObject = env.scene[asset_cfg.name] return asset.data.root_ang_vel_w """ Joint state. """ def joint_pos_rel(env: BaseEnv, asset_cfg: SceneEntityCfg = SceneEntityCfg("robot")) -> torch.Tensor: """The joint positions of the asset w.r.t. the default joint positions. NOTE: Only the joints configured in :attr:`asset_cfg.joint_ids` will have their positions returned. """ # extract the used quantities (to enable type-hinting) asset: Articulation = env.scene[asset_cfg.name] return asset.data.joint_pos[:, asset_cfg.joint_ids] - asset.data.default_joint_pos[:, asset_cfg.joint_ids] def joint_pos_norm(env: BaseEnv, asset_cfg: SceneEntityCfg = SceneEntityCfg("robot")) -> torch.Tensor: """The joint positions of the asset normalized with the asset's joint limits. NOTE: Only the joints configured in :attr:`asset_cfg.joint_ids` will have their normalized positions returned. """ # extract the used quantities (to enable type-hinting) asset: Articulation = env.scene[asset_cfg.name] return math_utils.scale_transform( asset.data.joint_pos[:, asset_cfg.joint_ids], asset.data.soft_joint_pos_limits[:, asset_cfg.joint_ids, 0], asset.data.soft_joint_pos_limits[:, asset_cfg.joint_ids, 1], ) def joint_vel_rel(env: BaseEnv, asset_cfg: SceneEntityCfg = SceneEntityCfg("robot")): """The joint velocities of the asset w.r.t. the default joint velocities. NOTE: Only the joints configured in :attr:`asset_cfg.joint_ids` will have their velocities returned. """ # extract the used quantities (to enable type-hinting) asset: Articulation = env.scene[asset_cfg.name] return asset.data.joint_vel[:, asset_cfg.joint_ids] - asset.data.default_joint_vel[:, asset_cfg.joint_ids] """ Sensors. """ def height_scan(env: BaseEnv, sensor_cfg: SceneEntityCfg, offset: float = 0.5) -> torch.Tensor: """Height scan from the given sensor w.r.t. the sensor's frame. The provided offset (Defaults to 0.5) is subtracted from the returned values. """ # extract the used quantities (to enable type-hinting) sensor: RayCaster = env.scene.sensors[sensor_cfg.name] # height scan: height = sensor_height - hit_point_z - offset return sensor.data.pos_w[:, 2].unsqueeze(1) - sensor.data.ray_hits_w[..., 2] - offset def body_incoming_wrench(env: BaseEnv, asset_cfg: SceneEntityCfg) -> torch.Tensor: """Incoming spatial wrench on bodies of an articulation in the simulation world frame. This is the 6-D wrench (force and torque) applied to the body link by the incoming joint force. """ # extract the used quantities (to enable type-hinting) asset: Articulation = env.scene[asset_cfg.name] # obtain the link incoming forces in world frame link_incoming_forces = asset.root_physx_view.get_link_incoming_joint_force()[:, asset_cfg.body_ids] return link_incoming_forces.view(env.num_envs, -1) """ Actions. """ def last_action(env: BaseEnv, action_name: str | None = None) -> torch.Tensor: """The last input action to the environment. The name of the action term for which the action is required. If None, the entire action tensor is returned. """ if action_name is None: return env.action_manager.action else: return env.action_manager.get_term(action_name).raw_actions """ Commands. """ def generated_commands(env: RLTaskEnv, command_name: str) -> torch.Tensor: """The generated command from command term in the command manager with the given name.""" return env.command_manager.get_command(command_name)
6,773
Python
37.05618
114
0.713569
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/envs/mdp/actions/task_space_actions.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from __future__ import annotations import torch from typing import TYPE_CHECKING import carb import omni.isaac.orbit.utils.math as math_utils from omni.isaac.orbit.assets.articulation import Articulation from omni.isaac.orbit.controllers.differential_ik import DifferentialIKController from omni.isaac.orbit.managers.action_manager import ActionTerm if TYPE_CHECKING: from omni.isaac.orbit.envs import BaseEnv from . import actions_cfg class DifferentialInverseKinematicsAction(ActionTerm): r"""Inverse Kinematics action term. This action term performs pre-processing of the raw actions using scaling transformation. .. math:: \text{action} = \text{scaling} \times \text{input action} \text{joint position} = J^{-} \times \text{action} where :math:`\text{scaling}` is the scaling applied to the input action, and :math:`\text{input action}` is the input action from the user, :math:`J` is the Jacobian over the articulation's actuated joints, and \text{joint position} is the desired joint position command for the articulation's joints. """ cfg: actions_cfg.DifferentialInverseKinematicsActionCfg """The configuration of the action term.""" _asset: Articulation """The articulation asset on which the action term is applied.""" _scale: torch.Tensor """The scaling factor applied to the input action. Shape is (1, action_dim).""" def __init__(self, cfg: actions_cfg.DifferentialInverseKinematicsActionCfg, env: BaseEnv): # initialize the action term super().__init__(cfg, env) # resolve the joints over which the action term is applied self._joint_ids, self._joint_names = self._asset.find_joints(self.cfg.joint_names) self._num_joints = len(self._joint_ids) # parse the body index body_ids, body_names = self._asset.find_bodies(self.cfg.body_name) if len(body_ids) != 1: raise ValueError( f"Expected one match for the body name: {self.cfg.body_name}. Found {len(body_ids)}: {body_names}." ) # save only the first body index self._body_idx = body_ids[0] self._body_name = body_names[0] # check if articulation is fixed-base # if fixed-base then the jacobian for the base is not computed # this means that number of bodies is one less than the articulation's number of bodies if self._asset.is_fixed_base: self._jacobi_body_idx = self._body_idx - 1 else: self._jacobi_body_idx = self._body_idx # log info for debugging carb.log_info( f"Resolved joint names for the action term {self.__class__.__name__}:" f" {self._joint_names} [{self._joint_ids}]" ) carb.log_info( f"Resolved body name for the action term {self.__class__.__name__}: {self._body_name} [{self._body_idx}]" ) # Avoid indexing across all joints for efficiency if self._num_joints == self._asset.num_joints: self._joint_ids = slice(None) # create the differential IK controller self._ik_controller = DifferentialIKController( cfg=self.cfg.controller, num_envs=self.num_envs, device=self.device ) # create tensors for raw and processed actions self._raw_actions = torch.zeros(self.num_envs, self.action_dim, device=self.device) self._processed_actions = torch.zeros_like(self.raw_actions) # save the scale as tensors self._scale = torch.zeros((self.num_envs, self.action_dim), device=self.device) self._scale[:] = torch.tensor(self.cfg.scale, device=self.device) # convert the fixed offsets to torch tensors of batched shape if self.cfg.body_offset is not None: self._offset_pos = torch.tensor(self.cfg.body_offset.pos, device=self.device).repeat(self.num_envs, 1) self._offset_rot = torch.tensor(self.cfg.body_offset.rot, device=self.device).repeat(self.num_envs, 1) else: self._offset_pos, self._offset_rot = None, None """ Properties. """ @property def action_dim(self) -> int: return self._ik_controller.action_dim @property def raw_actions(self) -> torch.Tensor: return self._raw_actions @property def processed_actions(self) -> torch.Tensor: return self._processed_actions """ Operations. """ def process_actions(self, actions: torch.Tensor): # store the raw actions self._raw_actions[:] = actions self._processed_actions[:] = self.raw_actions * self._scale # obtain quantities from simulation ee_pos_curr, ee_quat_curr = self._compute_frame_pose() # set command into controller self._ik_controller.set_command(self._processed_actions, ee_pos_curr, ee_quat_curr) def apply_actions(self): # obtain quantities from simulation ee_pos_curr, ee_quat_curr = self._compute_frame_pose() joint_pos = self._asset.data.joint_pos[:, self._joint_ids] # compute the delta in joint-space if ee_quat_curr.norm() != 0: jacobian = self._compute_frame_jacobian() joint_pos_des = self._ik_controller.compute(ee_pos_curr, ee_quat_curr, jacobian, joint_pos) else: joint_pos_des = joint_pos.clone() # set the joint position command self._asset.set_joint_position_target(joint_pos_des, self._joint_ids) """ Helper functions. """ def _compute_frame_pose(self) -> tuple[torch.Tensor, torch.Tensor]: """Computes the pose of the target frame in the root frame. Returns: A tuple of the body's position and orientation in the root frame. """ # obtain quantities from simulation ee_pose_w = self._asset.data.body_state_w[:, self._body_idx, :7] root_pose_w = self._asset.data.root_state_w[:, :7] # compute the pose of the body in the root frame ee_pose_b, ee_quat_b = math_utils.subtract_frame_transforms( root_pose_w[:, 0:3], root_pose_w[:, 3:7], ee_pose_w[:, 0:3], ee_pose_w[:, 3:7] ) # account for the offset if self.cfg.body_offset is not None: ee_pose_b, ee_quat_b = math_utils.combine_frame_transforms( ee_pose_b, ee_quat_b, self._offset_pos, self._offset_rot ) return ee_pose_b, ee_quat_b def _compute_frame_jacobian(self): """Computes the geometric Jacobian of the target frame in the root frame. This function accounts for the target frame offset and applies the necessary transformations to obtain the right Jacobian from the parent body Jacobian. """ # read the parent jacobian jacobian = self._asset.root_physx_view.get_jacobians()[:, self._jacobi_body_idx, :, self._joint_ids] # account for the offset if self.cfg.body_offset is not None: # Modify the jacobian to account for the offset # -- translational part # v_link = v_ee + w_ee x r_link_ee = v_J_ee * q + w_J_ee * q x r_link_ee # = (v_J_ee + w_J_ee x r_link_ee ) * q # = (v_J_ee - r_link_ee_[x] @ w_J_ee) * q jacobian[:, 0:3, :] += torch.bmm(-math_utils.skew_symmetric_matrix(self._offset_pos), jacobian[:, 3:, :]) # -- rotational part # w_link = R_link_ee @ w_ee jacobian[:, 3:, :] = torch.bmm(math_utils.matrix_from_quat(self._offset_rot), jacobian[:, 3:, :]) return jacobian
7,767
Python
40.100529
117
0.627527
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/envs/mdp/actions/non_holonomic_actions.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from __future__ import annotations import torch from typing import TYPE_CHECKING import carb from omni.isaac.orbit.assets.articulation import Articulation from omni.isaac.orbit.managers.action_manager import ActionTerm from omni.isaac.orbit.utils.math import euler_xyz_from_quat if TYPE_CHECKING: from omni.isaac.orbit.envs import BaseEnv from . import actions_cfg class NonHolonomicAction(ActionTerm): r"""Non-holonomic action that maps a two dimensional action to the velocity of the robot in the x, y and yaw directions. This action term helps model a skid-steer robot base. The action is a 2D vector which comprises of the forward velocity :math:`v_{B,x}` and the turning rate :\omega_{B,z}: in the base frame. Using the current base orientation, the commands are transformed into dummy joint velocity targets as: .. math:: \dot{q}_{0, des} &= v_{B,x} \cos(\theta) \\ \dot{q}_{1, des} &= v_{B,x} \sin(\theta) \\ \dot{q}_{2, des} &= \omega_{B,z} where :math:`\theta` is the yaw of the 2-D base. Since the base is simulated as a dummy joint, the yaw is directly the value of the revolute joint along z, i.e., :math:`q_2 = \theta`. .. note:: The current implementation assumes that the base is simulated with three dummy joints (prismatic joints along x and y, and revolute joint along z). This is because it is easier to consider the mobile base as a floating link controlled by three dummy joints, in comparison to simulating wheels which is at times is tricky because of friction settings. However, the action term can be extended to support other base configurations as well. .. tip:: For velocity control of the base with dummy mechanism, we recommend setting high damping gains to the joints. This ensures that the base remains unperturbed from external disturbances, such as an arm mounted on the base. """ cfg: actions_cfg.NonHolonomicActionCfg """The configuration of the action term.""" _asset: Articulation """The articulation asset on which the action term is applied.""" _scale: torch.Tensor """The scaling factor applied to the input action. Shape is (1, 2).""" _offset: torch.Tensor """The offset applied to the input action. Shape is (1, 2).""" def __init__(self, cfg: actions_cfg.NonHolonomicActionCfg, env: BaseEnv): # initialize the action term super().__init__(cfg, env) # parse the joint information # -- x joint x_joint_id, x_joint_name = self._asset.find_joints(self.cfg.x_joint_name) if len(x_joint_id) != 1: raise ValueError( f"Expected a single joint match for the x joint name: {self.cfg.x_joint_name}, got {len(x_joint_id)}" ) # -- y joint y_joint_id, y_joint_name = self._asset.find_joints(self.cfg.y_joint_name) if len(y_joint_id) != 1: raise ValueError(f"Found more than one joint match for the y joint name: {self.cfg.y_joint_name}") # -- yaw joint yaw_joint_id, yaw_joint_name = self._asset.find_joints(self.cfg.yaw_joint_name) if len(yaw_joint_id) != 1: raise ValueError(f"Found more than one joint match for the yaw joint name: {self.cfg.yaw_joint_name}") # parse the body index self._body_idx, self._body_name = self._asset.find_bodies(self.cfg.body_name) if len(self._body_idx) != 1: raise ValueError(f"Found more than one body match for the body name: {self.cfg.body_name}") # process into a list of joint ids self._joint_ids = [x_joint_id[0], y_joint_id[0], yaw_joint_id[0]] self._joint_names = [x_joint_name[0], y_joint_name[0], yaw_joint_name[0]] # log info for debugging carb.log_info( f"Resolved joint names for the action term {self.__class__.__name__}:" f" {self._joint_names} [{self._joint_ids}]" ) carb.log_info( f"Resolved body name for the action term {self.__class__.__name__}: {self._body_name} [{self._body_idx}]" ) # create tensors for raw and processed actions self._raw_actions = torch.zeros(self.num_envs, self.action_dim, device=self.device) self._processed_actions = torch.zeros_like(self.raw_actions) self._joint_vel_command = torch.zeros(self.num_envs, 3, device=self.device) # save the scale and offset as tensors self._scale = torch.tensor(self.cfg.scale, device=self.device).unsqueeze(0) self._offset = torch.tensor(self.cfg.offset, device=self.device).unsqueeze(0) """ Properties. """ @property def action_dim(self) -> int: return 2 @property def raw_actions(self) -> torch.Tensor: return self._raw_actions @property def processed_actions(self) -> torch.Tensor: return self._processed_actions """ Operations. """ def process_actions(self, actions): # store the raw actions self._raw_actions[:] = actions self._processed_actions = self.raw_actions * self._scale + self._offset def apply_actions(self): # obtain current heading quat_w = self._asset.data.body_quat_w[:, self._body_idx] yaw_w = euler_xyz_from_quat(quat_w)[2] # compute joint velocities targets self._joint_vel_command[:, 0] = torch.cos(yaw_w) * self.processed_actions[:, 0] # x self._joint_vel_command[:, 1] = torch.sin(yaw_w) * self.processed_actions[:, 0] # y self._joint_vel_command[:, 2] = self.processed_actions[:, 1] # yaw # set the joint velocity targets self._asset.set_joint_velocity_target(self._joint_vel_command, joint_ids=self._joint_ids)
5,929
Python
40.760563
119
0.644291
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/envs/mdp/actions/joint_actions.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from __future__ import annotations import torch from collections.abc import Sequence from typing import TYPE_CHECKING import carb import omni.isaac.orbit.utils.string as string_utils from omni.isaac.orbit.assets.articulation import Articulation from omni.isaac.orbit.managers.action_manager import ActionTerm if TYPE_CHECKING: from omni.isaac.orbit.envs import BaseEnv from . import actions_cfg class JointAction(ActionTerm): r"""Base class for joint actions. This action term performs pre-processing of the raw actions using affine transformations (scale and offset). These transformations can be configured to be applied to a subset of the articulation's joints. Mathematically, the action term is defined as: .. math:: \text{action} = \text{offset} + \text{scaling} \times \text{input action} where :math:`\text{action}` is the action that is sent to the articulation's actuated joints, :math:`\text{offset}` is the offset applied to the input action, :math:`\text{scaling}` is the scaling applied to the input action, and :math:`\text{input action}` is the input action from the user. Based on above, this kind of action transformation ensures that the input and output actions are in the same units and dimensions. The child classes of this action term can then map the output action to a specific desired command of the articulation's joints (e.g. position, velocity, etc.). """ cfg: actions_cfg.JointActionCfg """The configuration of the action term.""" _asset: Articulation """The articulation asset on which the action term is applied.""" _scale: torch.Tensor | float """The scaling factor applied to the input action.""" _offset: torch.Tensor | float """The offset applied to the input action.""" def __init__(self, cfg: actions_cfg.JointActionCfg, env: BaseEnv) -> None: # initialize the action term super().__init__(cfg, env) # resolve the joints over which the action term is applied self._joint_ids, self._joint_names = self._asset.find_joints(self.cfg.joint_names) self._num_joints = len(self._joint_ids) # log the resolved joint names for debugging carb.log_info( f"Resolved joint names for the action term {self.__class__.__name__}:" f" {self._joint_names} [{self._joint_ids}]" ) # Avoid indexing across all joints for efficiency if self._num_joints == self._asset.num_joints: self._joint_ids = slice(None) # create tensors for raw and processed actions self._raw_actions = torch.zeros(self.num_envs, self.action_dim, device=self.device) self._processed_actions = torch.zeros_like(self.raw_actions) # parse scale if isinstance(cfg.scale, (float, int)): self._scale = float(cfg.scale) elif isinstance(cfg.scale, dict): self._scale = torch.ones(self.num_envs, self.action_dim, device=self.device) # resolve the dictionary config index_list, _, value_list = string_utils.resolve_matching_names_values(self.cfg.scale, self._joint_names) self._scale[:, index_list] = torch.tensor(value_list, device=self.device) else: raise ValueError(f"Unsupported scale type: {type(cfg.scale)}. Supported types are float and dict.") # parse offset if isinstance(cfg.offset, (float, int)): self._offset = float(cfg.offset) elif isinstance(cfg.offset, dict): self._offset = torch.zeros_like(self._raw_actions) # resolve the dictionary config index_list, _, value_list = string_utils.resolve_matching_names_values(self.cfg.offset, self._joint_names) self._offset[:, index_list] = torch.tensor(value_list, device=self.device) else: raise ValueError(f"Unsupported offset type: {type(cfg.offset)}. Supported types are float and dict.") """ Properties. """ @property def action_dim(self) -> int: return self._num_joints @property def raw_actions(self) -> torch.Tensor: return self._raw_actions @property def processed_actions(self) -> torch.Tensor: return self._processed_actions """ Operations. """ def process_actions(self, actions: torch.Tensor): # store the raw actions self._raw_actions[:] = actions # apply the affine transformations self._processed_actions = self._raw_actions * self._scale + self._offset class JointPositionAction(JointAction): """Joint action term that applies the processed actions to the articulation's joints as position commands.""" cfg: actions_cfg.JointPositionActionCfg """The configuration of the action term.""" def __init__(self, cfg: actions_cfg.JointPositionActionCfg, env: BaseEnv): # initialize the action term super().__init__(cfg, env) # use default joint positions as offset if cfg.use_default_offset: self._offset = self._asset.data.default_joint_pos[:, self._joint_ids].clone() def apply_actions(self): # set position targets self._asset.set_joint_position_target(self.processed_actions, joint_ids=self._joint_ids) class RelativeJointPositionAction(JointAction): r"""Joint action term that applies the processed actions to the articulation's joints as relative position commands. Unlike :class:`JointPositionAction`, this action term applies the processed actions as relative position commands. This means that the processed actions are added to the current joint positions of the articulation's joints before being sent as position commands. This means that the action applied at every step is: .. math:: \text{applied action} = \text{current joint positions} + \text{processed actions} where :math:`\text{current joint positions}` are the current joint positions of the articulation's joints. """ cfg: actions_cfg.RelativeJointPositionActionCfg """The configuration of the action term.""" def __init__(self, cfg: actions_cfg.RelativeJointPositionActionCfg, env: BaseEnv): # initialize the action term super().__init__(cfg, env) # use zero offset for relative position if cfg.use_zero_offset: self._offset = 0.0 def apply_actions(self): # add current joint positions to the processed actions current_actions = self.processed_actions + self._asset.data.joint_pos[:, self._joint_ids] # set position targets self._asset.set_joint_position_target(current_actions, joint_ids=self._joint_ids) class ExponentialMovingAverageJointPositionAction(JointPositionAction): r"""Joint action term that applies the processed actions to the articulation's joints as exponential moving average position commands. Exponential moving average is a type of moving average that gives more weight to the most recent data points. This action term applies the processed actions as moving average position action commands. The moving average is computed as: .. math:: \text{applied action} = \text{weight} \times \text{processed actions} + (1 - \text{weight}) \times \text{previous applied action} where :math:`\text{weight}` is the weight for the moving average, :math:`\text{processed actions}` are the processed actions, and :math:`\text{previous action}` is the previous action that was applied to the articulation's joints. In the trivial case where the weight is 1.0, the action term behaves exactly like :class:`JointPositionAction`. On reset, the previous action is initialized to the current joint positions of the articulation's joints. """ cfg: actions_cfg.ExponentialMovingAverageJointPositionActionCfg """The configuration of the action term.""" def __init__(self, cfg: actions_cfg.ExponentialMovingAverageJointPositionActionCfg, env: BaseEnv): # initialize the action term super().__init__(cfg, env) # parse and save the moving average weight if isinstance(cfg.weight, float): # check that the weight is in the valid range if not 0.0 <= cfg.weight <= 1.0: raise ValueError(f"Moving average weight must be in the range [0, 1]. Got {cfg.weight}.") self._weight = cfg.weight elif isinstance(cfg.weight, dict): self._weight = torch.ones((env.num_envs, self.action_dim), device=self.device) # resolve the dictionary config index_list, names_list, value_list = string_utils.resolve_matching_names_values( cfg.weight, self._joint_names ) # check that the weights are in the valid range for name, value in zip(names_list, value_list): if not 0.0 <= value <= 1.0: raise ValueError( f"Moving average weight must be in the range [0, 1]. Got {value} for joint {name}." ) self._weight[:, index_list] = torch.tensor(value_list, device=self.device) else: raise ValueError( f"Unsupported moving average weight type: {type(cfg.weight)}. Supported types are float and dict." ) # initialize the previous targets self._prev_applied_actions = torch.zeros_like(self.processed_actions) def reset(self, env_ids: Sequence[int] | None = None) -> None: # check if specific environment ids are provided if env_ids is None: env_ids = slice(None) # reset history to current joint positions self._prev_applied_actions[env_ids, :] = self._asset.data.joint_pos[env_ids, self._joint_ids] def apply_actions(self): # set position targets as moving average current_actions = self._weight * self.processed_actions current_actions += (1.0 - self._weight) * self._prev_applied_actions # set position targets self._asset.set_joint_position_target(current_actions, joint_ids=self._joint_ids) # update previous targets self._prev_applied_actions[:] = current_actions[:] class JointVelocityAction(JointAction): """Joint action term that applies the processed actions to the articulation's joints as velocity commands.""" cfg: actions_cfg.JointVelocityActionCfg """The configuration of the action term.""" def __init__(self, cfg: actions_cfg.JointVelocityActionCfg, env: BaseEnv): # initialize the action term super().__init__(cfg, env) # use default joint velocity as offset if cfg.use_default_offset: self._offset = self._asset.data.default_joint_vel[:, self._joint_ids].clone() def apply_actions(self): # set joint velocity targets self._asset.set_joint_velocity_target(self.processed_actions, joint_ids=self._joint_ids) class JointEffortAction(JointAction): """Joint action term that applies the processed actions to the articulation's joints as effort commands.""" cfg: actions_cfg.JointEffortActionCfg """The configuration of the action term.""" def __init__(self, cfg: actions_cfg.JointEffortActionCfg, env: BaseEnv): super().__init__(cfg, env) def apply_actions(self): # set joint effort targets self._asset.set_joint_effort_target(self.processed_actions, joint_ids=self._joint_ids)
11,659
Python
41.246377
137
0.669783
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/envs/mdp/actions/__init__.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """Various action terms that can be used in the environment.""" from .actions_cfg import * from .binary_joint_actions import * from .joint_actions import * from .non_holonomic_actions import *
317
Python
25.499998
63
0.747634
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/envs/mdp/actions/actions_cfg.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from __future__ import annotations from dataclasses import MISSING from omni.isaac.orbit.controllers import DifferentialIKControllerCfg from omni.isaac.orbit.managers.action_manager import ActionTerm, ActionTermCfg from omni.isaac.orbit.utils import configclass from . import binary_joint_actions, joint_actions, non_holonomic_actions, task_space_actions ## # Joint actions. ## @configclass class JointActionCfg(ActionTermCfg): """Configuration for the base joint action term. See :class:`JointAction` for more details. """ joint_names: list[str] = MISSING """List of joint names or regex expressions that the action will be mapped to.""" scale: float | dict[str, float] = 1.0 """Scale factor for the action (float or dict of regex expressions). Defaults to 1.0.""" offset: float | dict[str, float] = 0.0 """Offset factor for the action (float or dict of regex expressions). Defaults to 0.0.""" @configclass class JointPositionActionCfg(JointActionCfg): """Configuration for the joint position action term. See :class:`JointPositionAction` for more details. """ class_type: type[ActionTerm] = joint_actions.JointPositionAction use_default_offset: bool = True """Whether to use default joint positions configured in the articulation asset as offset. Defaults to True. If True, this flag results in overwriting the values of :attr:`offset` to the default joint positions from the articulation asset. """ @configclass class RelativeJointPositionActionCfg(JointActionCfg): """Configuration for the relative joint position action term. See :class:`RelativeJointPositionAction` for more details. """ class_type: type[ActionTerm] = joint_actions.RelativeJointPositionAction use_zero_offset: bool = True """Whether to ignore the offset defined in articulation asset. Defaults to True. If True, this flag results in overwriting the values of :attr:`offset` to zero. """ @configclass class ExponentialMovingAverageJointPositionActionCfg(JointPositionActionCfg): """Configuration for the exponential moving average joint position action term. See :class:`ExponentialMovingAverageJointPositionAction` for more details. """ class_type: type[ActionTerm] = joint_actions.ExponentialMovingAverageJointPositionAction weight: float | dict[str, float] = 1.0 """The weight for the moving average (float or dict of regex expressions). Defaults to 1.0. If set to 1.0, the processed action is applied directly without any moving average window. """ @configclass class JointVelocityActionCfg(JointActionCfg): """Configuration for the joint velocity action term. See :class:`JointVelocityAction` for more details. """ class_type: type[ActionTerm] = joint_actions.JointVelocityAction use_default_offset: bool = True """Whether to use default joint velocities configured in the articulation asset as offset. Defaults to True. This overrides the settings from :attr:`offset` if set to True. """ @configclass class JointEffortActionCfg(JointActionCfg): """Configuration for the joint effort action term. See :class:`JointEffortAction` for more details. """ class_type: type[ActionTerm] = joint_actions.JointEffortAction ## # Gripper actions. ## @configclass class BinaryJointActionCfg(ActionTermCfg): """Configuration for the base binary joint action term. See :class:`BinaryJointAction` for more details. """ joint_names: list[str] = MISSING """List of joint names or regex expressions that the action will be mapped to.""" open_command_expr: dict[str, float] = MISSING """The joint command to move to *open* configuration.""" close_command_expr: dict[str, float] = MISSING """The joint command to move to *close* configuration.""" @configclass class BinaryJointPositionActionCfg(BinaryJointActionCfg): """Configuration for the binary joint position action term. See :class:`BinaryJointPositionAction` for more details. """ class_type: type[ActionTerm] = binary_joint_actions.BinaryJointPositionAction @configclass class BinaryJointVelocityActionCfg(BinaryJointActionCfg): """Configuration for the binary joint velocity action term. See :class:`BinaryJointVelocityAction` for more details. """ class_type: type[ActionTerm] = binary_joint_actions.BinaryJointVelocityAction ## # Non-holonomic actions. ## @configclass class NonHolonomicActionCfg(ActionTermCfg): """Configuration for the non-holonomic action term with dummy joints at the base. See :class:`NonHolonomicAction` for more details. """ class_type: type[ActionTerm] = non_holonomic_actions.NonHolonomicAction body_name: str = MISSING """Name of the body which has the dummy mechanism connected to.""" x_joint_name: str = MISSING """The dummy joint name in the x direction.""" y_joint_name: str = MISSING """The dummy joint name in the y direction.""" yaw_joint_name: str = MISSING """The dummy joint name in the yaw direction.""" scale: tuple[float, float] = (1.0, 1.0) """Scale factor for the action. Defaults to (1.0, 1.0).""" offset: tuple[float, float] = (0.0, 0.0) """Offset factor for the action. Defaults to (0.0, 0.0).""" ## # Task-space Actions. ## @configclass class DifferentialInverseKinematicsActionCfg(ActionTermCfg): """Configuration for inverse differential kinematics action term. See :class:`DifferentialInverseKinematicsAction` for more details. """ @configclass class OffsetCfg: """The offset pose from parent frame to child frame. On many robots, end-effector frames are fictitious frames that do not have a corresponding rigid body. In such cases, it is easier to define this transform w.r.t. their parent rigid body. For instance, for the Franka Emika arm, the end-effector is defined at an offset to the the "panda_hand" frame. """ pos: tuple[float, float, float] = (0.0, 0.0, 0.0) """Translation w.r.t. the parent frame. Defaults to (0.0, 0.0, 0.0).""" rot: tuple[float, float, float, float] = (1.0, 0.0, 0.0, 0.0) """Quaternion rotation ``(w, x, y, z)`` w.r.t. the parent frame. Defaults to (1.0, 0.0, 0.0, 0.0).""" class_type: type[ActionTerm] = task_space_actions.DifferentialInverseKinematicsAction joint_names: list[str] = MISSING """List of joint names or regex expressions that the action will be mapped to.""" body_name: str = MISSING """Name of the body or frame for which IK is performed.""" body_offset: OffsetCfg | None = None """Offset of target frame w.r.t. to the body frame. Defaults to None, in which case no offset is applied.""" scale: float | tuple[float, ...] = 1.0 """Scale factor for the action. Defaults to 1.0.""" controller: DifferentialIKControllerCfg = MISSING """The configuration for the differential IK controller."""
7,163
Python
31.563636
112
0.710596
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/envs/mdp/actions/binary_joint_actions.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from __future__ import annotations import torch from typing import TYPE_CHECKING import carb import omni.isaac.orbit.utils.string as string_utils from omni.isaac.orbit.assets.articulation import Articulation from omni.isaac.orbit.managers.action_manager import ActionTerm if TYPE_CHECKING: from omni.isaac.orbit.envs import BaseEnv from . import actions_cfg class BinaryJointAction(ActionTerm): """Base class for binary joint actions. This action term maps a binary action to the *open* or *close* joint configurations. These configurations are specified through the :class:`BinaryJointActionCfg` object. If the input action is a float vector, the action is considered binary based on the sign of the action values. Based on above, we follow the following convention for the binary action: 1. Open action: 1 (bool) or positive values (float). 2. Close action: 0 (bool) or negative values (float). The action term can mostly be used for gripper actions, where the gripper is either open or closed. This helps in devising a mimicking mechanism for the gripper, since in simulation it is often not possible to add such constraints to the gripper. """ cfg: actions_cfg.BinaryJointActionCfg """The configuration of the action term.""" _asset: Articulation """The articulation asset on which the action term is applied.""" def __init__(self, cfg: actions_cfg.BinaryJointActionCfg, env: BaseEnv) -> None: # initialize the action term super().__init__(cfg, env) # resolve the joints over which the action term is applied self._joint_ids, self._joint_names = self._asset.find_joints(self.cfg.joint_names) self._num_joints = len(self._joint_ids) # log the resolved joint names for debugging carb.log_info( f"Resolved joint names for the action term {self.__class__.__name__}:" f" {self._joint_names} [{self._joint_ids}]" ) # create tensors for raw and processed actions self._raw_actions = torch.zeros(self.num_envs, 1, device=self.device) self._processed_actions = torch.zeros(self.num_envs, self._num_joints, device=self.device) # parse open command self._open_command = torch.zeros(self._num_joints, device=self.device) index_list, name_list, value_list = string_utils.resolve_matching_names_values( self.cfg.open_command_expr, self._joint_names ) if len(index_list) != self._num_joints: raise ValueError( f"Could not resolve all joints for the action term. Missing: {set(self._joint_names) - set(name_list)}" ) self._open_command[index_list] = torch.tensor(value_list, device=self.device) # parse close command self._close_command = torch.zeros_like(self._open_command) index_list, name_list, value_list = string_utils.resolve_matching_names_values( self.cfg.close_command_expr, self._joint_names ) if len(index_list) != self._num_joints: raise ValueError( f"Could not resolve all joints for the action term. Missing: {set(self._joint_names) - set(name_list)}" ) self._close_command[index_list] = torch.tensor(value_list, device=self.device) """ Properties. """ @property def action_dim(self) -> int: return 1 @property def raw_actions(self) -> torch.Tensor: return self._raw_actions @property def processed_actions(self) -> torch.Tensor: return self._processed_actions """ Operations. """ def process_actions(self, actions: torch.Tensor): # store the raw actions self._raw_actions[:] = actions # compute the binary mask if actions.dtype == torch.bool: # true: close, false: open binary_mask = actions == 0 else: # true: close, false: open binary_mask = actions < 0 # compute the command self._processed_actions = torch.where(binary_mask, self._close_command, self._open_command) class BinaryJointPositionAction(BinaryJointAction): """Binary joint action that sets the binary action into joint position targets.""" cfg: actions_cfg.BinaryJointPositionActionCfg """The configuration of the action term.""" def apply_actions(self): self._asset.set_joint_position_target(self._processed_actions, joint_ids=self._joint_ids) class BinaryJointVelocityAction(BinaryJointAction): """Binary joint action that sets the binary action into joint velocity targets.""" cfg: actions_cfg.BinaryJointVelocityActionCfg """The configuration of the action term.""" def apply_actions(self): self._asset.set_joint_velocity_target(self._processed_actions, joint_ids=self._joint_ids)
5,021
Python
35.656934
119
0.667994
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/envs/mdp/commands/commands_cfg.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from __future__ import annotations import math from dataclasses import MISSING from omni.isaac.orbit.managers import CommandTermCfg from omni.isaac.orbit.utils import configclass from .null_command import NullCommand from .pose_2d_command import TerrainBasedPose2dCommand, UniformPose2dCommand from .pose_command import UniformPoseCommand from .velocity_command import NormalVelocityCommand, UniformVelocityCommand @configclass class NullCommandCfg(CommandTermCfg): """Configuration for the null command generator.""" class_type: type = NullCommand def __post_init__(self): """Post initialization.""" # set the resampling time range to infinity to avoid resampling self.resampling_time_range = (math.inf, math.inf) @configclass class UniformVelocityCommandCfg(CommandTermCfg): """Configuration for the uniform velocity command generator.""" class_type: type = UniformVelocityCommand asset_name: str = MISSING """Name of the asset in the environment for which the commands are generated.""" heading_command: bool = MISSING """Whether to use heading command or angular velocity command. If True, the angular velocity command is computed from the heading error, where the target heading is sampled uniformly from provided range. Otherwise, the angular velocity command is sampled uniformly from provided range. """ heading_control_stiffness: float = MISSING """Scale factor to convert the heading error to angular velocity command.""" rel_standing_envs: float = MISSING """Probability threshold for environments where the robots that are standing still.""" rel_heading_envs: float = MISSING """Probability threshold for environments where the robots follow the heading-based angular velocity command (the others follow the sampled angular velocity command).""" @configclass class Ranges: """Uniform distribution ranges for the velocity commands.""" lin_vel_x: tuple[float, float] = MISSING # min max [m/s] lin_vel_y: tuple[float, float] = MISSING # min max [m/s] ang_vel_z: tuple[float, float] = MISSING # min max [rad/s] heading: tuple[float, float] = MISSING # min max [rad] ranges: Ranges = MISSING """Distribution ranges for the velocity commands.""" @configclass class NormalVelocityCommandCfg(UniformVelocityCommandCfg): """Configuration for the normal velocity command generator.""" class_type: type = NormalVelocityCommand heading_command: bool = False # --> we don't use heading command for normal velocity command. @configclass class Ranges: """Normal distribution ranges for the velocity commands.""" mean_vel: tuple[float, float, float] = MISSING """Mean velocity for the normal distribution. The tuple contains the mean linear-x, linear-y, and angular-z velocity. """ std_vel: tuple[float, float, float] = MISSING """Standard deviation for the normal distribution. The tuple contains the standard deviation linear-x, linear-y, and angular-z velocity. """ zero_prob: tuple[float, float, float] = MISSING """Probability of zero velocity for the normal distribution. The tuple contains the probability of zero linear-x, linear-y, and angular-z velocity. """ ranges: Ranges = MISSING """Distribution ranges for the velocity commands.""" @configclass class UniformPoseCommandCfg(CommandTermCfg): """Configuration for uniform pose command generator.""" class_type: type = UniformPoseCommand asset_name: str = MISSING """Name of the asset in the environment for which the commands are generated.""" body_name: str = MISSING """Name of the body in the asset for which the commands are generated.""" @configclass class Ranges: """Uniform distribution ranges for the pose commands.""" pos_x: tuple[float, float] = MISSING # min max [m] pos_y: tuple[float, float] = MISSING # min max [m] pos_z: tuple[float, float] = MISSING # min max [m] roll: tuple[float, float] = MISSING # min max [rad] pitch: tuple[float, float] = MISSING # min max [rad] yaw: tuple[float, float] = MISSING # min max [rad] ranges: Ranges = MISSING """Ranges for the commands.""" @configclass class UniformPose2dCommandCfg(CommandTermCfg): """Configuration for the uniform 2D-pose command generator.""" class_type: type = UniformPose2dCommand asset_name: str = MISSING """Name of the asset in the environment for which the commands are generated.""" simple_heading: bool = MISSING """Whether to use simple heading or not. If True, the heading is in the direction of the target position. """ @configclass class Ranges: """Uniform distribution ranges for the position commands.""" pos_x: tuple[float, float] = MISSING """Range for the x position (in m).""" pos_y: tuple[float, float] = MISSING """Range for the y position (in m).""" heading: tuple[float, float] = MISSING """Heading range for the position commands (in rad). Used only if :attr:`simple_heading` is False. """ ranges: Ranges = MISSING """Distribution ranges for the position commands.""" @configclass class TerrainBasedPose2dCommandCfg(UniformPose2dCommandCfg): """Configuration for the terrain-based position command generator.""" class_type = TerrainBasedPose2dCommand @configclass class Ranges: """Uniform distribution ranges for the position commands.""" heading: tuple[float, float] = MISSING """Heading range for the position commands (in rad). Used only if :attr:`simple_heading` is False. """ ranges: Ranges = MISSING """Distribution ranges for the sampled commands."""
6,065
Python
33.465909
112
0.688541
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/envs/mdp/commands/pose_2d_command.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """Sub-module containing command generators for the 2D-pose for locomotion tasks.""" from __future__ import annotations import torch from collections.abc import Sequence from typing import TYPE_CHECKING from omni.isaac.orbit.assets import Articulation from omni.isaac.orbit.managers import CommandTerm from omni.isaac.orbit.markers import VisualizationMarkers from omni.isaac.orbit.markers.config import GREEN_ARROW_X_MARKER_CFG from omni.isaac.orbit.terrains import TerrainImporter from omni.isaac.orbit.utils.math import quat_from_euler_xyz, quat_rotate_inverse, wrap_to_pi, yaw_quat if TYPE_CHECKING: from omni.isaac.orbit.envs import BaseEnv from .commands_cfg import TerrainBasedPose2dCommandCfg, UniformPose2dCommandCfg class UniformPose2dCommand(CommandTerm): """Command generator that generates pose commands containing a 3-D position and heading. The command generator samples uniform 2D positions around the environment origin. It sets the height of the position command to the default root height of the robot. The heading command is either set to point towards the target or is sampled uniformly. This can be configured through the :attr:`Pose2dCommandCfg.simple_heading` parameter in the configuration. """ cfg: UniformPose2dCommandCfg """Configuration for the command generator.""" def __init__(self, cfg: UniformPose2dCommandCfg, env: BaseEnv): """Initialize the command generator class. Args: cfg: The configuration parameters for the command generator. env: The environment object. """ # initialize the base class super().__init__(cfg, env) # obtain the robot and terrain assets # -- robot self.robot: Articulation = env.scene[cfg.asset_name] # crete buffers to store the command # -- commands: (x, y, z, heading) self.pos_command_w = torch.zeros(self.num_envs, 3, device=self.device) self.heading_command_w = torch.zeros(self.num_envs, device=self.device) self.pos_command_b = torch.zeros_like(self.pos_command_w) self.heading_command_b = torch.zeros_like(self.heading_command_w) # -- metrics self.metrics["error_pos"] = torch.zeros(self.num_envs, device=self.device) self.metrics["error_heading"] = torch.zeros(self.num_envs, device=self.device) def __str__(self) -> str: msg = "PositionCommand:\n" msg += f"\tCommand dimension: {tuple(self.command.shape[1:])}\n" msg += f"\tResampling time range: {self.cfg.resampling_time_range}" return msg """ Properties """ @property def command(self) -> torch.Tensor: """The desired 2D-pose in base frame. Shape is (num_envs, 4).""" return torch.cat([self.pos_command_b, self.heading_command_b.unsqueeze(1)], dim=1) """ Implementation specific functions. """ def _update_metrics(self): # logs data self.metrics["error_pos_2d"] = torch.norm(self.pos_command_w[:, :2] - self.robot.data.root_pos_w[:, :2], dim=1) self.metrics["error_heading"] = torch.abs(wrap_to_pi(self.heading_command_w - self.robot.data.heading_w)) def _resample_command(self, env_ids: Sequence[int]): # obtain env origins for the environments self.pos_command_w[env_ids] = self._env.scene.env_origins[env_ids] # offset the position command by the current root position r = torch.empty(len(env_ids), device=self.device) self.pos_command_w[env_ids, 0] += r.uniform_(*self.cfg.ranges.pos_x) self.pos_command_w[env_ids, 1] += r.uniform_(*self.cfg.ranges.pos_y) self.pos_command_w[env_ids, 2] += self.robot.data.default_root_state[env_ids, 2] if self.cfg.simple_heading: # set heading command to point towards target target_vec = self.pos_command_w[env_ids] - self.robot.data.root_pos_w[env_ids] target_direction = torch.atan2(target_vec[:, 1], target_vec[:, 0]) flipped_target_direction = wrap_to_pi(target_direction + torch.pi) # compute errors to find the closest direction to the current heading # this is done to avoid the discontinuity at the -pi/pi boundary curr_to_target = wrap_to_pi(target_direction - self.robot.data.heading_w[env_ids]).abs() curr_to_flipped_target = wrap_to_pi(flipped_target_direction - self.robot.data.heading_w[env_ids]).abs() # set the heading command to the closest direction self.heading_command_w[env_ids] = torch.where( curr_to_target < curr_to_flipped_target, target_direction, flipped_target_direction, ) else: # random heading command self.heading_command_w[env_ids] = r.uniform_(*self.cfg.ranges.heading) def _update_command(self): """Re-target the position command to the current root state.""" target_vec = self.pos_command_w - self.robot.data.root_pos_w[:, :3] self.pos_command_b[:] = quat_rotate_inverse(yaw_quat(self.robot.data.root_quat_w), target_vec) self.heading_command_b[:] = wrap_to_pi(self.heading_command_w - self.robot.data.heading_w) def _set_debug_vis_impl(self, debug_vis: bool): # create markers if necessary for the first tome if debug_vis: if not hasattr(self, "arrow_goal_visualizer"): marker_cfg = GREEN_ARROW_X_MARKER_CFG.copy() marker_cfg.markers["arrow"].scale = (0.2, 0.2, 0.8) marker_cfg.prim_path = "/Visuals/Command/pose_goal" self.arrow_goal_visualizer = VisualizationMarkers(marker_cfg) # set their visibility to true self.arrow_goal_visualizer.set_visibility(True) else: if hasattr(self, "arrow_goal_visualizer"): self.arrow_goal_visualizer.set_visibility(False) def _debug_vis_callback(self, event): # update the box marker self.arrow_goal_visualizer.visualize( translations=self.pos_command_w, orientations=quat_from_euler_xyz( torch.zeros_like(self.heading_command_w), torch.zeros_like(self.heading_command_w), self.heading_command_w, ), ) class TerrainBasedPose2dCommand(UniformPose2dCommand): """Command generator that generates pose commands based on the terrain. This command generator samples the position commands from the valid patches of the terrain. The heading commands are either set to point towards the target or are sampled uniformly. It expects the terrain to have a valid flat patches under the key 'target'. """ cfg: TerrainBasedPose2dCommandCfg """Configuration for the command generator.""" def __init__(self, cfg: TerrainBasedPose2dCommandCfg, env: BaseEnv): # initialize the base class super().__init__(cfg, env) # obtain the terrain asset self.terrain: TerrainImporter = env.scene["terrain"] # obtain the valid targets from the terrain if "target" not in self.terrain.flat_patches: raise RuntimeError( "The terrain-based command generator requires a valid flat patch under 'target' in the terrain." f" Found: {list(self.terrain.flat_patches.keys())}" ) # valid targets: (terrain_level, terrain_type, num_patches, 3) self.valid_targets: torch.Tensor = self.terrain.flat_patches["target"] def _resample_command(self, env_ids: Sequence[int]): # sample new position targets from the terrain ids = torch.randint(0, self.valid_targets.shape[2], size=(len(env_ids),), device=self.device) self.pos_command_w[env_ids] = self.valid_targets[ self.terrain.terrain_levels[env_ids], self.terrain.terrain_types[env_ids], ids ] # offset the position command by the current root height self.pos_command_w[env_ids, 2] += self.robot.data.default_root_state[env_ids, 2] if self.cfg.simple_heading: # set heading command to point towards target target_vec = self.pos_command_w[env_ids] - self.robot.data.root_pos_w[env_ids] target_direction = torch.atan2(target_vec[:, 1], target_vec[:, 0]) flipped_target_direction = wrap_to_pi(target_direction + torch.pi) # compute errors to find the closest direction to the current heading # this is done to avoid the discontinuity at the -pi/pi boundary curr_to_target = wrap_to_pi(target_direction - self.robot.data.heading_w[env_ids]).abs() curr_to_flipped_target = wrap_to_pi(flipped_target_direction - self.robot.data.heading_w[env_ids]).abs() # set the heading command to the closest direction self.heading_command_w[env_ids] = torch.where( curr_to_target < curr_to_flipped_target, target_direction, flipped_target_direction, ) else: # random heading command r = torch.empty(len(env_ids), device=self.device) self.heading_command_w[env_ids] = r.uniform_(*self.cfg.ranges.heading)
9,435
Python
44.365384
119
0.649921
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/envs/mdp/commands/velocity_command.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """Sub-module containing command generators for the velocity-based locomotion task.""" from __future__ import annotations import torch from collections.abc import Sequence from typing import TYPE_CHECKING import omni.isaac.orbit.utils.math as math_utils from omni.isaac.orbit.assets import Articulation from omni.isaac.orbit.managers import CommandTerm from omni.isaac.orbit.markers import VisualizationMarkers from omni.isaac.orbit.markers.config import BLUE_ARROW_X_MARKER_CFG, GREEN_ARROW_X_MARKER_CFG if TYPE_CHECKING: from omni.isaac.orbit.envs import BaseEnv from .commands_cfg import NormalVelocityCommandCfg, UniformVelocityCommandCfg class UniformVelocityCommand(CommandTerm): r"""Command generator that generates a velocity command in SE(2) from uniform distribution. The command comprises of a linear velocity in x and y direction and an angular velocity around the z-axis. It is given in the robot's base frame. If the :attr:`cfg.heading_command` flag is set to True, the angular velocity is computed from the heading error similar to doing a proportional control on the heading error. The target heading is sampled uniformly from the provided range. Otherwise, the angular velocity is sampled uniformly from the provided range. Mathematically, the angular velocity is computed as follows from the heading command: .. math:: \omega_z = \frac{1}{2} \text{wrap_to_pi}(\theta_{\text{target}} - \theta_{\text{current}}) """ cfg: UniformVelocityCommandCfg """The configuration of the command generator.""" def __init__(self, cfg: UniformVelocityCommandCfg, env: BaseEnv): """Initialize the command generator. Args: cfg: The configuration of the command generator. env: The environment. """ # initialize the base class super().__init__(cfg, env) # obtain the robot asset # -- robot self.robot: Articulation = env.scene[cfg.asset_name] # crete buffers to store the command # -- command: x vel, y vel, yaw vel, heading self.vel_command_b = torch.zeros(self.num_envs, 3, device=self.device) self.heading_target = torch.zeros(self.num_envs, device=self.device) self.is_heading_env = torch.zeros(self.num_envs, dtype=torch.bool, device=self.device) self.is_standing_env = torch.zeros_like(self.is_heading_env) # -- metrics self.metrics["error_vel_xy"] = torch.zeros(self.num_envs, device=self.device) self.metrics["error_vel_yaw"] = torch.zeros(self.num_envs, device=self.device) def __str__(self) -> str: """Return a string representation of the command generator.""" msg = "UniformVelocityCommand:\n" msg += f"\tCommand dimension: {tuple(self.command.shape[1:])}\n" msg += f"\tResampling time range: {self.cfg.resampling_time_range}\n" msg += f"\tHeading command: {self.cfg.heading_command}\n" if self.cfg.heading_command: msg += f"\tHeading probability: {self.cfg.rel_heading_envs}\n" msg += f"\tStanding probability: {self.cfg.rel_standing_envs}" return msg """ Properties """ @property def command(self) -> torch.Tensor: """The desired base velocity command in the base frame. Shape is (num_envs, 3).""" return self.vel_command_b """ Implementation specific functions. """ def _update_metrics(self): # time for which the command was executed max_command_time = self.cfg.resampling_time_range[1] max_command_step = max_command_time / self._env.step_dt # logs data self.metrics["error_vel_xy"] += ( torch.norm(self.vel_command_b[:, :2] - self.robot.data.root_lin_vel_b[:, :2], dim=-1) / max_command_step ) self.metrics["error_vel_yaw"] += ( torch.abs(self.vel_command_b[:, 2] - self.robot.data.root_ang_vel_b[:, 2]) / max_command_step ) def _resample_command(self, env_ids: Sequence[int]): # sample velocity commands r = torch.empty(len(env_ids), device=self.device) # -- linear velocity - x direction self.vel_command_b[env_ids, 0] = r.uniform_(*self.cfg.ranges.lin_vel_x) # -- linear velocity - y direction self.vel_command_b[env_ids, 1] = r.uniform_(*self.cfg.ranges.lin_vel_y) # -- ang vel yaw - rotation around z self.vel_command_b[env_ids, 2] = r.uniform_(*self.cfg.ranges.ang_vel_z) # heading target if self.cfg.heading_command: self.heading_target[env_ids] = r.uniform_(*self.cfg.ranges.heading) # update heading envs self.is_heading_env[env_ids] = r.uniform_(0.0, 1.0) <= self.cfg.rel_heading_envs # update standing envs self.is_standing_env[env_ids] = r.uniform_(0.0, 1.0) <= self.cfg.rel_standing_envs def _update_command(self): """Post-processes the velocity command. This function sets velocity command to zero for standing environments and computes angular velocity from heading direction if the heading_command flag is set. """ # Compute angular velocity from heading direction if self.cfg.heading_command: # resolve indices of heading envs env_ids = self.is_heading_env.nonzero(as_tuple=False).flatten() # compute angular velocity heading_error = math_utils.wrap_to_pi(self.heading_target[env_ids] - self.robot.data.heading_w[env_ids]) self.vel_command_b[env_ids, 2] = torch.clip( self.cfg.heading_control_stiffness * heading_error, min=self.cfg.ranges.ang_vel_z[0], max=self.cfg.ranges.ang_vel_z[1], ) # Enforce standing (i.e., zero velocity command) for standing envs # TODO: check if conversion is needed standing_env_ids = self.is_standing_env.nonzero(as_tuple=False).flatten() self.vel_command_b[standing_env_ids, :] = 0.0 def _set_debug_vis_impl(self, debug_vis: bool): # set visibility of markers # note: parent only deals with callbacks. not their visibility if debug_vis: # create markers if necessary for the first tome if not hasattr(self, "base_vel_goal_visualizer"): # -- goal marker_cfg = GREEN_ARROW_X_MARKER_CFG.copy() marker_cfg.prim_path = "/Visuals/Command/velocity_goal" marker_cfg.markers["arrow"].scale = (0.5, 0.5, 0.5) self.base_vel_goal_visualizer = VisualizationMarkers(marker_cfg) # -- current marker_cfg = BLUE_ARROW_X_MARKER_CFG.copy() marker_cfg.prim_path = "/Visuals/Command/velocity_current" marker_cfg.markers["arrow"].scale = (0.5, 0.5, 0.5) self.base_vel_visualizer = VisualizationMarkers(marker_cfg) # set their visibility to true self.base_vel_goal_visualizer.set_visibility(True) self.base_vel_visualizer.set_visibility(True) else: if hasattr(self, "base_vel_goal_visualizer"): self.base_vel_goal_visualizer.set_visibility(False) self.base_vel_visualizer.set_visibility(False) def _debug_vis_callback(self, event): # get marker location # -- base state base_pos_w = self.robot.data.root_pos_w.clone() base_pos_w[:, 2] += 0.5 # -- resolve the scales and quaternions vel_des_arrow_scale, vel_des_arrow_quat = self._resolve_xy_velocity_to_arrow(self.command[:, :2]) vel_arrow_scale, vel_arrow_quat = self._resolve_xy_velocity_to_arrow(self.robot.data.root_lin_vel_b[:, :2]) # display markers self.base_vel_goal_visualizer.visualize(base_pos_w, vel_des_arrow_quat, vel_des_arrow_scale) self.base_vel_visualizer.visualize(base_pos_w, vel_arrow_quat, vel_arrow_scale) """ Internal helpers. """ def _resolve_xy_velocity_to_arrow(self, xy_velocity: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor]: """Converts the XY base velocity command to arrow direction rotation.""" # obtain default scale of the marker default_scale = self.base_vel_goal_visualizer.cfg.markers["arrow"].scale # arrow-scale arrow_scale = torch.tensor(default_scale, device=self.device).repeat(xy_velocity.shape[0], 1) arrow_scale[:, 0] *= torch.linalg.norm(xy_velocity, dim=1) * 3.0 # arrow-direction heading_angle = torch.atan2(xy_velocity[:, 1], xy_velocity[:, 0]) zeros = torch.zeros_like(heading_angle) arrow_quat = math_utils.quat_from_euler_xyz(zeros, zeros, heading_angle) # convert everything back from base to world frame base_quat_w = self.robot.data.root_quat_w arrow_quat = math_utils.quat_mul(base_quat_w, arrow_quat) return arrow_scale, arrow_quat class NormalVelocityCommand(UniformVelocityCommand): """Command generator that generates a velocity command in SE(2) from a normal distribution. The command comprises of a linear velocity in x and y direction and an angular velocity around the z-axis. It is given in the robot's base frame. The command is sampled from a normal distribution with mean and standard deviation specified in the configuration. With equal probability, the sign of the individual components is flipped. """ cfg: NormalVelocityCommandCfg """The command generator configuration.""" def __init__(self, cfg: NormalVelocityCommandCfg, env: object): """Initializes the command generator. Args: cfg: The command generator configuration. env: The environment. """ super().__init__(self, cfg, env) # create buffers for zero commands envs self.is_zero_vel_x_env = torch.zeros(self.num_envs, dtype=torch.bool, device=self.device) self.is_zero_vel_y_env = torch.zeros_like(self.is_zero_vel_x_env) self.is_zero_vel_yaw_env = torch.zeros_like(self.is_zero_vel_x_env) def __str__(self) -> str: """Return a string representation of the command generator.""" msg = "NormalVelocityCommand:\n" msg += f"\tCommand dimension: {tuple(self.command.shape[1:])}\n" msg += f"\tResampling time range: {self.cfg.resampling_time_range}\n" msg += f"\tStanding probability: {self.cfg.rel_standing_envs}" return msg def _resample_command(self, env_ids): # sample velocity commands r = torch.empty(len(env_ids), device=self.device) # -- linear velocity - x direction self.vel_command_b[env_ids, 0] = r.normal_(mean=self.cfg.ranges.mean_vel[0], std=self.cfg.ranges.std_vel[0]) self.vel_command_b[env_ids, 0] *= torch.where(r.uniform_(0.0, 1.0) <= 0.5, 1.0, -1.0) # -- linear velocity - y direction self.vel_command_b[env_ids, 1] = r.normal_(mean=self.cfg.ranges.mean_vel[1], std=self.cfg.ranges.std_vel[1]) self.vel_command_b[env_ids, 1] *= torch.where(r.uniform_(0.0, 1.0) <= 0.5, 1.0, -1.0) # -- angular velocity - yaw direction self.vel_command_b[env_ids, 2] = r.normal_(mean=self.cfg.ranges.mean_vel[2], std=self.cfg.ranges.std_vel[2]) self.vel_command_b[env_ids, 2] *= torch.where(r.uniform_(0.0, 1.0) <= 0.5, 1.0, -1.0) # update element wise zero velocity command # TODO what is zero prob ? self.is_zero_vel_x_env[env_ids] = r.uniform_(0.0, 1.0) <= self.cfg.ranges.zero_prob[0] self.is_zero_vel_y_env[env_ids] = r.uniform_(0.0, 1.0) <= self.cfg.ranges.zero_prob[1] self.is_zero_vel_yaw_env[env_ids] = r.uniform_(0.0, 1.0) <= self.cfg.ranges.zero_prob[2] # update standing envs self.is_standing_env[env_ids] = r.uniform_(0.0, 1.0) <= self.cfg.rel_standing_envs def _update_command(self): """Sets velocity command to zero for standing envs.""" # Enforce standing (i.e., zero velocity command) for standing envs standing_env_ids = self.is_standing_env.nonzero(as_tuple=False).flatten() # TODO check if conversion is needed self.vel_command_b[standing_env_ids, :] = 0.0 # Enforce zero velocity for individual elements # TODO: check if conversion is needed zero_vel_x_env_ids = self.is_zero_vel_x_env.nonzero(as_tuple=False).flatten() zero_vel_y_env_ids = self.is_zero_vel_y_env.nonzero(as_tuple=False).flatten() zero_vel_yaw_env_ids = self.is_zero_vel_yaw_env.nonzero(as_tuple=False).flatten() self.vel_command_b[zero_vel_x_env_ids, 0] = 0.0 self.vel_command_b[zero_vel_y_env_ids, 1] = 0.0 self.vel_command_b[zero_vel_yaw_env_ids, 2] = 0.0
12,959
Python
46.29927
119
0.641408
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/envs/mdp/commands/pose_command.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """Sub-module containing command generators for pose tracking.""" from __future__ import annotations import torch from collections.abc import Sequence from typing import TYPE_CHECKING from omni.isaac.orbit.assets import Articulation from omni.isaac.orbit.managers import CommandTerm from omni.isaac.orbit.markers import VisualizationMarkers from omni.isaac.orbit.markers.config import FRAME_MARKER_CFG from omni.isaac.orbit.utils.math import combine_frame_transforms, compute_pose_error, quat_from_euler_xyz if TYPE_CHECKING: from omni.isaac.orbit.envs import BaseEnv from .commands_cfg import UniformPoseCommandCfg class UniformPoseCommand(CommandTerm): """Command generator for generating pose commands uniformly. The command generator generates poses by sampling positions uniformly within specified regions in cartesian space. For orientation, it samples uniformly the euler angles (roll-pitch-yaw) and converts them into quaternion representation (w, x, y, z). The position and orientation commands are generated in the base frame of the robot, and not the simulation world frame. This means that users need to handle the transformation from the base frame to the simulation world frame themselves. .. caution:: Sampling orientations uniformly is not strictly the same as sampling euler angles uniformly. This is because rotations are defined by 3D non-Euclidean space, and the mapping from euler angles to rotations is not one-to-one. """ cfg: UniformPoseCommandCfg """Configuration for the command generator.""" def __init__(self, cfg: UniformPoseCommandCfg, env: BaseEnv): """Initialize the command generator class. Args: cfg: The configuration parameters for the command generator. env: The environment object. """ # initialize the base class super().__init__(cfg, env) # extract the robot and body index for which the command is generated self.robot: Articulation = env.scene[cfg.asset_name] self.body_idx = self.robot.find_bodies(cfg.body_name)[0][0] # create buffers # -- commands: (x, y, z, qw, qx, qy, qz) in root frame self.pose_command_b = torch.zeros(self.num_envs, 7, device=self.device) self.pose_command_b[:, 3] = 1.0 self.pose_command_w = torch.zeros_like(self.pose_command_b) # -- metrics self.metrics["position_error"] = torch.zeros(self.num_envs, device=self.device) self.metrics["orientation_error"] = torch.zeros(self.num_envs, device=self.device) def __str__(self) -> str: msg = "UniformPoseCommand:\n" msg += f"\tCommand dimension: {tuple(self.command.shape[1:])}\n" msg += f"\tResampling time range: {self.cfg.resampling_time_range}\n" return msg """ Properties """ @property def command(self) -> torch.Tensor: """The desired pose command. Shape is (num_envs, 7). The first three elements correspond to the position, followed by the quaternion orientation in (w, x, y, z). """ return self.pose_command_b """ Implementation specific functions. """ def _update_metrics(self): # transform command from base frame to simulation world frame self.pose_command_w[:, :3], self.pose_command_w[:, 3:] = combine_frame_transforms( self.robot.data.root_pos_w, self.robot.data.root_quat_w, self.pose_command_b[:, :3], self.pose_command_b[:, 3:], ) # compute the error pos_error, rot_error = compute_pose_error( self.pose_command_w[:, :3], self.pose_command_w[:, 3:], self.robot.data.body_state_w[:, self.body_idx, :3], self.robot.data.body_state_w[:, self.body_idx, 3:7], ) self.metrics["position_error"] = torch.norm(pos_error, dim=-1) self.metrics["orientation_error"] = torch.norm(rot_error, dim=-1) def _resample_command(self, env_ids: Sequence[int]): # sample new pose targets # -- position r = torch.empty(len(env_ids), device=self.device) self.pose_command_b[env_ids, 0] = r.uniform_(*self.cfg.ranges.pos_x) self.pose_command_b[env_ids, 1] = r.uniform_(*self.cfg.ranges.pos_y) self.pose_command_b[env_ids, 2] = r.uniform_(*self.cfg.ranges.pos_z) # -- orientation euler_angles = torch.zeros_like(self.pose_command_b[env_ids, :3]) euler_angles[:, 0].uniform_(*self.cfg.ranges.roll) euler_angles[:, 1].uniform_(*self.cfg.ranges.pitch) euler_angles[:, 2].uniform_(*self.cfg.ranges.yaw) self.pose_command_b[env_ids, 3:] = quat_from_euler_xyz( euler_angles[:, 0], euler_angles[:, 1], euler_angles[:, 2] ) def _update_command(self): pass def _set_debug_vis_impl(self, debug_vis: bool): # create markers if necessary for the first tome if debug_vis: if not hasattr(self, "goal_pose_visualizer"): marker_cfg = FRAME_MARKER_CFG.copy() marker_cfg.markers["frame"].scale = (0.1, 0.1, 0.1) # -- goal pose marker_cfg.prim_path = "/Visuals/Command/goal_pose" self.goal_pose_visualizer = VisualizationMarkers(marker_cfg) # -- current body pose marker_cfg.prim_path = "/Visuals/Command/body_pose" self.body_pose_visualizer = VisualizationMarkers(marker_cfg) # set their visibility to true self.goal_pose_visualizer.set_visibility(True) self.body_pose_visualizer.set_visibility(True) else: if hasattr(self, "goal_pose_visualizer"): self.goal_pose_visualizer.set_visibility(False) self.body_pose_visualizer.set_visibility(False) def _debug_vis_callback(self, event): # update the markers # -- goal pose self.goal_pose_visualizer.visualize(self.pose_command_w[:, :3], self.pose_command_w[:, 3:]) # -- current body pose body_pose_w = self.robot.data.body_state_w[:, self.body_idx] self.body_pose_visualizer.visualize(body_pose_w[:, :3], body_pose_w[:, 3:7])
6,437
Python
40.006369
116
0.63803
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/envs/mdp/commands/null_command.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """Sub-module containing command generator that does nothing.""" from __future__ import annotations from collections.abc import Sequence from typing import TYPE_CHECKING from omni.isaac.orbit.managers import CommandTerm if TYPE_CHECKING: from .commands_cfg import NullCommandCfg class NullCommand(CommandTerm): """Command generator that does nothing. This command generator does not generate any commands. It is used for environments that do not require any commands. """ cfg: NullCommandCfg """Configuration for the command generator.""" def __str__(self) -> str: msg = "NullCommand:\n" msg += "\tCommand dimension: N/A\n" msg += f"\tResampling time range: {self.cfg.resampling_time_range}" return msg """ Properties """ @property def command(self): """Null command. Raises: RuntimeError: No command is generated. Always raises this error. """ raise RuntimeError("NullCommandTerm does not generate any commands.") """ Operations. """ def reset(self, env_ids: Sequence[int] | None = None) -> dict[str, float]: return {} def compute(self, dt: float): pass """ Implementation specific functions. """ def _update_metrics(self): pass def _resample_command(self, env_ids: Sequence[int]): pass def _update_command(self): pass
1,574
Python
21.5
98
0.640407
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/envs/mdp/commands/__init__.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """Various command terms that can be used in the environment.""" from .commands_cfg import ( NormalVelocityCommandCfg, NullCommandCfg, TerrainBasedPose2dCommandCfg, UniformPose2dCommandCfg, UniformPoseCommandCfg, UniformVelocityCommandCfg, ) from .null_command import NullCommand from .pose_2d_command import TerrainBasedPose2dCommand, UniformPose2dCommand from .pose_command import UniformPoseCommand from .velocity_command import NormalVelocityCommand, UniformVelocityCommand
626
Python
30.349998
76
0.801917
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/managers/observation_manager.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """Observation manager for computing observation signals for a given world.""" from __future__ import annotations import torch from collections.abc import Sequence from prettytable import PrettyTable from typing import TYPE_CHECKING from .manager_base import ManagerBase, ManagerTermBase from .manager_term_cfg import ObservationGroupCfg, ObservationTermCfg if TYPE_CHECKING: from omni.isaac.orbit.envs import BaseEnv class ObservationManager(ManagerBase): """Manager for computing observation signals for a given world. Observations are organized into groups based on their intended usage. This allows having different observation groups for different types of learning such as asymmetric actor-critic and student-teacher training. Each group contains observation terms which contain information about the observation function to call, the noise corruption model to use, and the sensor to retrieve data from. Each observation group should inherit from the :class:`ObservationGroupCfg` class. Within each group, each observation term should instantiate the :class:`ObservationTermCfg` class. """ def __init__(self, cfg: object, env: BaseEnv): """Initialize observation manager. Args: cfg: The configuration object or dictionary (``dict[str, ObservationGroupCfg]``). env: The environment instance. """ super().__init__(cfg, env) # compute combined vector for obs group self._group_obs_dim: dict[str, tuple[int, ...]] = dict() for group_name, group_term_dims in self._group_obs_term_dim.items(): term_dims = [torch.tensor(dims, device="cpu") for dims in group_term_dims] self._group_obs_dim[group_name] = tuple(torch.sum(torch.stack(term_dims, dim=0), dim=0).tolist()) def __str__(self) -> str: """Returns: A string representation for the observation manager.""" msg = f"<ObservationManager> contains {len(self._group_obs_term_names)} groups.\n" # add info for each group for group_name, group_dim in self._group_obs_dim.items(): # create table for term information table = PrettyTable() table.title = f"Active Observation Terms in Group: '{group_name}' (shape: {group_dim})" table.field_names = ["Index", "Name", "Shape"] # set alignment of table columns table.align["Name"] = "l" # add info for each term obs_terms = zip( self._group_obs_term_names[group_name], self._group_obs_term_dim[group_name], ) for index, (name, dims) in enumerate(obs_terms): # resolve inputs to simplify prints tab_dims = tuple(dims) # add row table.add_row([index, name, tab_dims]) # convert table to string msg += table.get_string() msg += "\n" return msg """ Properties. """ @property def active_terms(self) -> dict[str, list[str]]: """Name of active observation terms in each group.""" return self._group_obs_term_names @property def group_obs_dim(self) -> dict[str, tuple[int, ...]]: """Shape of observation tensor in each group.""" return self._group_obs_dim @property def group_obs_term_dim(self) -> dict[str, list[tuple[int, ...]]]: """Shape of observation tensor for each term in each group.""" return self._group_obs_term_dim @property def group_obs_concatenate(self) -> dict[str, bool]: """Whether the observation terms are concatenated in each group.""" return self._group_obs_concatenate """ Operations. """ def reset(self, env_ids: Sequence[int] | None = None) -> dict[str, float]: # call all terms that are classes for group_cfg in self._group_obs_class_term_cfgs.values(): for term_cfg in group_cfg: term_cfg.func.reset(env_ids=env_ids) # nothing to log here return {} def compute(self) -> dict[str, torch.Tensor | dict[str, torch.Tensor]]: """Compute the observations per group for all groups. The method computes the observations for all the groups handled by the observation manager. Please check the :meth:`compute_group` on the processing of observations per group. Returns: A dictionary with keys as the group names and values as the computed observations. """ # create a buffer for storing obs from all the groups obs_buffer = dict() # iterate over all the terms in each group for group_name in self._group_obs_term_names: obs_buffer[group_name] = self.compute_group(group_name) # otherwise return a dict with observations of all groups return obs_buffer def compute_group(self, group_name: str) -> torch.Tensor | dict[str, torch.Tensor]: """Computes the observations for a given group. The observations for a given group are computed by calling the registered functions for each term in the group. The functions are called in the order of the terms in the group. The functions are expected to return a tensor with shape (num_envs, ...). If a corruption/noise model is registered for a term, the function is called to corrupt the observation. The corruption function is expected to return a tensor with the same shape as the observation. The observations are clipped and scaled as per the configuration settings. The operations are performed in the order: compute, add corruption/noise, clip, scale. By default, no scaling or clipping is applied. Args: group_name: The name of the group for which to compute the observations. Defaults to None, in which case observations for all the groups are computed and returned. Returns: Depending on the group's configuration, the tensors for individual observation terms are concatenated along the last dimension into a single tensor. Otherwise, they are returned as a dictionary with keys corresponding to the term's name. Raises: ValueError: If input ``group_name`` is not a valid group handled by the manager. """ # check ig group name is valid if group_name not in self._group_obs_term_names: raise ValueError( f"Unable to find the group '{group_name}' in the observation manager." f" Available groups are: {list(self._group_obs_term_names.keys())}" ) # iterate over all the terms in each group group_term_names = self._group_obs_term_names[group_name] # buffer to store obs per group group_obs = dict.fromkeys(group_term_names, None) # read attributes for each term obs_terms = zip(group_term_names, self._group_obs_term_cfgs[group_name]) # evaluate terms: compute, add noise, clip, scale. for name, term_cfg in obs_terms: # compute term's value obs: torch.Tensor = term_cfg.func(self._env, **term_cfg.params).clone() # apply post-processing if term_cfg.noise: obs = term_cfg.noise.func(obs, term_cfg.noise) if term_cfg.clip: obs = obs.clip_(min=term_cfg.clip[0], max=term_cfg.clip[1]) if term_cfg.scale: obs = obs.mul_(term_cfg.scale) # TODO: Introduce delay and filtering models. # Ref: https://robosuite.ai/docs/modules/sensors.html#observables # add value to list group_obs[name] = obs # concatenate all observations in the group together if self._group_obs_concatenate[group_name]: return torch.cat(list(group_obs.values()), dim=-1) else: return group_obs """ Helper functions. """ def _prepare_terms(self): """Prepares a list of observation terms functions.""" # create buffers to store information for each observation group # TODO: Make this more convenient by using data structures. self._group_obs_term_names: dict[str, list[str]] = dict() self._group_obs_term_dim: dict[str, list[int]] = dict() self._group_obs_term_cfgs: dict[str, list[ObservationTermCfg]] = dict() self._group_obs_class_term_cfgs: dict[str, list[ObservationTermCfg]] = dict() self._group_obs_concatenate: dict[str, bool] = dict() # check if config is dict already if isinstance(self.cfg, dict): group_cfg_items = self.cfg.items() else: group_cfg_items = self.cfg.__dict__.items() # iterate over all the groups for group_name, group_cfg in group_cfg_items: # check for non config if group_cfg is None: continue # check if the term is a curriculum term if not isinstance(group_cfg, ObservationGroupCfg): raise TypeError( f"Observation group '{group_name}' is not of type 'ObservationGroupCfg'." f" Received: '{type(group_cfg)}'." ) # initialize list for the group settings self._group_obs_term_names[group_name] = list() self._group_obs_term_dim[group_name] = list() self._group_obs_term_cfgs[group_name] = list() self._group_obs_class_term_cfgs[group_name] = list() # read common config for the group self._group_obs_concatenate[group_name] = group_cfg.concatenate_terms # check if config is dict already if isinstance(group_cfg, dict): group_cfg_items = group_cfg.items() else: group_cfg_items = group_cfg.__dict__.items() # iterate over all the terms in each group for term_name, term_cfg in group_cfg.__dict__.items(): # skip non-obs settings if term_name in ["enable_corruption", "concatenate_terms"]: continue # check for non config if term_cfg is None: continue if not isinstance(term_cfg, ObservationTermCfg): raise TypeError( f"Configuration for the term '{term_name}' is not of type ObservationTermCfg." f" Received: '{type(term_cfg)}'." ) # resolve common terms in the config self._resolve_common_term_cfg(f"{group_name}/{term_name}", term_cfg, min_argc=1) # check noise settings if not group_cfg.enable_corruption: term_cfg.noise = None # add term config to list to list self._group_obs_term_names[group_name].append(term_name) self._group_obs_term_cfgs[group_name].append(term_cfg) # call function the first time to fill up dimensions obs_dims = tuple(term_cfg.func(self._env, **term_cfg.params).shape[1:]) self._group_obs_term_dim[group_name].append(obs_dims) # add term in a separate list if term is a class if isinstance(term_cfg.func, ManagerTermBase): self._group_obs_class_term_cfgs[group_name].append(term_cfg) # call reset (in-case above call to get obs dims changed the state) term_cfg.func.reset()
11,820
Python
44.291188
114
0.609645
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/managers/manager_base.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from __future__ import annotations import copy import inspect from abc import ABC, abstractmethod from collections.abc import Sequence from typing import TYPE_CHECKING, Any import carb import omni.isaac.orbit.utils.string as string_utils from omni.isaac.orbit.utils import string_to_callable from .manager_term_cfg import ManagerTermBaseCfg from .scene_entity_cfg import SceneEntityCfg if TYPE_CHECKING: from omni.isaac.orbit.envs import BaseEnv class ManagerTermBase(ABC): """Base class for manager terms. Manager term implementations can be functions or classes. If the term is a class, it should inherit from this base class and implement the required methods. Each manager is implemented as a class that inherits from the :class:`ManagerBase` class. Each manager class should also have a corresponding configuration class that defines the configuration terms for the manager. Each term should the :class:`ManagerTermBaseCfg` class or its subclass. Example pseudo-code for creating a manager: .. code-block:: python from omni.isaac.orbit.utils import configclass from omni.isaac.orbit.utils.mdp import ManagerBase, ManagerTermBaseCfg @configclass class MyManagerCfg: my_term_1: ManagerTermBaseCfg = ManagerTermBaseCfg(...) my_term_2: ManagerTermBaseCfg = ManagerTermBaseCfg(...) my_term_3: ManagerTermBaseCfg = ManagerTermBaseCfg(...) # define manager instance my_manager = ManagerBase(cfg=ManagerCfg(), env=env) """ def __init__(self, cfg: ManagerTermBaseCfg, env: BaseEnv): """Initialize the manager term. Args: cfg: The configuration object. env: The environment instance. """ # store the inputs self.cfg = cfg self._env = env """ Properties. """ @property def num_envs(self) -> int: """Number of environments.""" return self._env.num_envs @property def device(self) -> str: """Device on which to perform computations.""" return self._env.device """ Operations. """ def reset(self, env_ids: Sequence[int] | None = None) -> None: """Resets the manager term. Args: env_ids: The environment ids. Defaults to None, in which case all environments are considered. """ pass def __call__(self, *args) -> Any: """Returns the value of the term required by the manager. In case of a class implementation, this function is called by the manager to get the value of the term. The arguments passed to this function are the ones specified in the term configuration (see :attr:`ManagerTermBaseCfg.params`). .. attention:: To be consistent with memory-less implementation of terms with functions, it is recommended to ensure that the returned mutable quantities are cloned before returning them. For instance, if the term returns a tensor, it is recommended to ensure that the returned tensor is a clone of the original tensor. This prevents the manager from storing references to the tensors and altering the original tensors. Args: *args: Variable length argument list. Returns: The value of the term. """ raise NotImplementedError class ManagerBase(ABC): """Base class for all managers.""" def __init__(self, cfg: object, env: BaseEnv): """Initialize the manager. Args: cfg: The configuration object. env: The environment instance. """ # store the inputs self.cfg = copy.deepcopy(cfg) self._env = env # parse config to create terms information self._prepare_terms() """ Properties. """ @property def num_envs(self) -> int: """Number of environments.""" return self._env.num_envs @property def device(self) -> str: """Device on which to perform computations.""" return self._env.device @property @abstractmethod def active_terms(self) -> list[str] | dict[str, list[str]]: """Name of active terms.""" raise NotImplementedError """ Operations. """ def reset(self, env_ids: Sequence[int] | None = None) -> dict[str, float]: """Resets the manager and returns logging information for the current time-step. Args: env_ids: The environment ids for which to log data. Defaults None, which logs data for all environments. Returns: Dictionary containing the logging information. """ return {} def find_terms(self, name_keys: str | Sequence[str]) -> list[str]: """Find terms in the manager based on the names. This function searches the manager for terms based on the names. The names can be specified as regular expressions or a list of regular expressions. The search is performed on the active terms in the manager. Please check the :meth:`omni.isaac.orbit.utils.string_utils.resolve_matching_names` function for more information on the name matching. Args: name_keys: A regular expression or a list of regular expressions to match the term names. Returns: A list of term names that match the input keys. """ # resolve search keys if isinstance(self.active_terms, dict): list_of_strings = [] for names in self.active_terms.values(): list_of_strings.extend(names) else: list_of_strings = self.active_terms # return the matching names return string_utils.resolve_matching_names(name_keys, list_of_strings)[1] """ Implementation specific. """ @abstractmethod def _prepare_terms(self): """Prepare terms information from the configuration object.""" raise NotImplementedError """ Helper functions. """ def _resolve_common_term_cfg(self, term_name: str, term_cfg: ManagerTermBaseCfg, min_argc: int = 1): """Resolve common term configuration. Usually, called by the :meth:`_prepare_terms` method to resolve common term configuration. Note: By default, all term functions are expected to have at least one argument, which is the environment object. Some other managers may expect functions to take more arguments, for instance, the environment indices as the second argument. In such cases, the ``min_argc`` argument can be used to specify the minimum number of arguments required by the term function to be called correctly by the manager. Args: term_name: The name of the term. term_cfg: The term configuration. min_argc: The minimum number of arguments required by the term function to be called correctly by the manager. Raises: TypeError: If the term configuration is not of type :class:`ManagerTermBaseCfg`. ValueError: If the scene entity defined in the term configuration does not exist. AttributeError: If the term function is not callable. ValueError: If the term function's arguments are not matched by the parameters. """ # check if the term is a valid term config if not isinstance(term_cfg, ManagerTermBaseCfg): raise TypeError( f"Configuration for the term '{term_name}' is not of type ManagerTermBaseCfg." f" Received: '{type(term_cfg)}'." ) # iterate over all the entities and parse the joint and body names for key, value in term_cfg.params.items(): # deal with string if isinstance(value, SceneEntityCfg): # load the entity try: value.resolve(self._env.scene) except ValueError as e: raise ValueError(f"Error while parsing '{term_name}:{key}'. {e}") # log the entity for checking later msg = f"[{term_cfg.__class__.__name__}:{term_name}] Found entity '{value.name}'." if value.joint_ids is not None: msg += f"\n\tJoint names: {value.joint_names} [{value.joint_ids}]" if value.body_ids is not None: msg += f"\n\tBody names: {value.body_names} [{value.body_ids}]" # print the information carb.log_info(msg) # store the entity term_cfg.params[key] = value # get the corresponding function or functional class if isinstance(term_cfg.func, str): term_cfg.func = string_to_callable(term_cfg.func) # initialize the term if it is a class if inspect.isclass(term_cfg.func): if not issubclass(term_cfg.func, ManagerTermBase): raise TypeError( f"Configuration for the term '{term_name}' is not of type ManagerTermBase." f" Received: '{type(term_cfg.func)}'." ) term_cfg.func = term_cfg.func(cfg=term_cfg, env=self._env) # check if function is callable if not callable(term_cfg.func): raise AttributeError(f"The term '{term_name}' is not callable. Received: {term_cfg.func}") # check if term's arguments are matched by params term_params = list(term_cfg.params.keys()) args = inspect.signature(term_cfg.func).parameters args_with_defaults = [arg for arg in args if args[arg].default is not inspect.Parameter.empty] args_without_defaults = [arg for arg in args if args[arg].default is inspect.Parameter.empty] args = args_without_defaults + args_with_defaults # ignore first two arguments for env and env_ids # Think: Check for cases when kwargs are set inside the function? if len(args) > min_argc: if set(args[min_argc:]) != set(term_params + args_with_defaults): raise ValueError( f"The term '{term_name}' expects mandatory parameters: {args_without_defaults[min_argc:]}" f" and optional parameters: {args_with_defaults}, but received: {term_params}." )
10,629
Python
36.167832
110
0.621884
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/managers/__init__.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """Sub-module for environment managers. The managers are used to handle various aspects of the environment such as randomization events, curriculum, and observations. Each manager implements a specific functionality for the environment. The managers are designed to be modular and can be easily extended to support new functionality. """ from .action_manager import ActionManager, ActionTerm from .command_manager import CommandManager, CommandTerm from .curriculum_manager import CurriculumManager from .event_manager import EventManager, RandomizationManager from .manager_base import ManagerBase, ManagerTermBase from .manager_term_cfg import ( ActionTermCfg, CommandTermCfg, CurriculumTermCfg, EventTermCfg, ManagerTermBaseCfg, ObservationGroupCfg, ObservationTermCfg, RandomizationTermCfg, RewardTermCfg, TerminationTermCfg, ) from .observation_manager import ObservationManager from .reward_manager import RewardManager from .scene_entity_cfg import SceneEntityCfg from .termination_manager import TerminationManager
1,188
Python
33.970587
108
0.808081
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/managers/event_manager.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """Event manager for orchestrating operations based on different simulation events.""" from __future__ import annotations import torch import warnings from collections.abc import Sequence from prettytable import PrettyTable from typing import TYPE_CHECKING import carb from .manager_base import ManagerBase, ManagerTermBase from .manager_term_cfg import EventTermCfg if TYPE_CHECKING: from omni.isaac.orbit.envs import RLTaskEnv class EventManager(ManagerBase): """Manager for orchestrating operations based on different simulation events. The event manager applies operations to the environment based on different simulation events. For example, changing the masses of objects or their friction coefficients during initialization/ reset, or applying random pushes to the robot at a fixed interval of steps. The user can specify several modes of events to fine-tune the behavior based on when to apply the event. The event terms are parsed from a config class containing the manager's settings and each term's parameters. Each event term should instantiate the :class:`EventTermCfg` class. Event terms can be grouped by their mode. The mode is a user-defined string that specifies when the event term should be applied. This provides the user complete control over when event terms should be applied. For a typical training process, you may want to apply events in the following modes: - "startup": Event is applied once at the beginning of the training. - "reset": Event is applied at every reset. - "interval": Event is applied at pre-specified intervals of time. However, you can also define your own modes and use them in the training process as you see fit. For this you will need to add the triggering of that mode in the environment implementation as well. .. note:: The triggering of operations corresponding to the mode ``"interval"`` are the only mode that are directly handled by the manager itself. The other modes are handled by the environment implementation. """ _env: RLTaskEnv """The environment instance.""" def __init__(self, cfg: object, env: RLTaskEnv): """Initialize the event manager. Args: cfg: A configuration object or dictionary (``dict[str, EventTermCfg]``). env: An environment object. """ super().__init__(cfg, env) def __str__(self) -> str: """Returns: A string representation for event manager.""" msg = f"<EventManager> contains {len(self._mode_term_names)} active terms.\n" # add info on each mode for mode in self._mode_term_names: # create table for term information table = PrettyTable() table.title = f"Active Event Terms in Mode: '{mode}'" # add table headers based on mode if mode == "interval": table.field_names = ["Index", "Name", "Interval time range (s)"] table.align["Name"] = "l" for index, (name, cfg) in enumerate(zip(self._mode_term_names[mode], self._mode_term_cfgs[mode])): table.add_row([index, name, cfg.interval_range_s]) else: table.field_names = ["Index", "Name"] table.align["Name"] = "l" for index, name in enumerate(self._mode_term_names[mode]): table.add_row([index, name]) # convert table to string msg += table.get_string() msg += "\n" return msg """ Properties. """ @property def active_terms(self) -> dict[str, list[str]]: """Name of active event terms. The keys are the modes of event and the values are the names of the event terms. """ return self._mode_term_names @property def available_modes(self) -> list[str]: """Modes of events.""" return list(self._mode_term_names.keys()) """ Operations. """ def reset(self, env_ids: Sequence[int] | None = None) -> dict[str, float]: # call all terms that are classes for mode_cfg in self._mode_class_term_cfgs.values(): for term_cfg in mode_cfg: term_cfg.func.reset(env_ids=env_ids) # nothing to log here return {} def apply(self, mode: str, env_ids: Sequence[int] | None = None, dt: float | None = None): """Calls each event term in the specified mode. Note: For interval mode, the time step of the environment is used to determine if the event should be applied. Args: mode: The mode of event. env_ids: The indices of the environments to apply the event to. Defaults to None, in which case the event is applied to all environments. dt: The time step of the environment. This is only used for the "interval" mode. Defaults to None to simplify the call for other modes. Raises: ValueError: If the mode is ``"interval"`` and the time step is not provided. """ # check if mode is valid if mode not in self._mode_term_names: carb.log_warn(f"Event mode '{mode}' is not defined. Skipping event.") return # iterate over all the event terms for index, term_cfg in enumerate(self._mode_term_cfgs[mode]): # resample interval if needed if mode == "interval": if dt is None: raise ValueError( f"Event mode '{mode}' requires the time step of the environment" " to be passed to the event manager." ) # extract time left for this term time_left = self._interval_mode_time_left[index] # update the time left for each environment time_left -= dt # check if the interval has passed env_ids = (time_left <= 0.0).nonzero().flatten() if len(env_ids) > 0: lower, upper = term_cfg.interval_range_s time_left[env_ids] = torch.rand(len(env_ids), device=self.device) * (upper - lower) + lower # call the event term term_cfg.func(self._env, env_ids, **term_cfg.params) """ Operations - Term settings. """ def set_term_cfg(self, term_name: str, cfg: EventTermCfg): """Sets the configuration of the specified term into the manager. The method finds the term by name by searching through all the modes. It then updates the configuration of the term with the first matching name. Args: term_name: The name of the event term. cfg: The configuration for the event term. Raises: ValueError: If the term name is not found. """ term_found = False for mode, terms in self._mode_term_names.items(): if term_name in terms: self._mode_term_cfgs[mode][terms.index(term_name)] = cfg term_found = True break if not term_found: raise ValueError(f"Event term '{term_name}' not found.") def get_term_cfg(self, term_name: str) -> EventTermCfg: """Gets the configuration for the specified term. The method finds the term by name by searching through all the modes. It then returns the configuration of the term with the first matching name. Args: term_name: The name of the event term. Returns: The configuration of the event term. Raises: ValueError: If the term name is not found. """ for mode, terms in self._mode_term_names.items(): if term_name in terms: return self._mode_term_cfgs[mode][terms.index(term_name)] raise ValueError(f"Event term '{term_name}' not found.") """ Helper functions. """ def _prepare_terms(self): """Prepares a list of event functions.""" # parse remaining event terms and decimate their information self._mode_term_names: dict[str, list[str]] = dict() self._mode_term_cfgs: dict[str, list[EventTermCfg]] = dict() self._mode_class_term_cfgs: dict[str, list[EventTermCfg]] = dict() # buffer to store the time left for each environment for "interval" mode self._interval_mode_time_left: list[torch.Tensor] = list() # check if config is dict already if isinstance(self.cfg, dict): cfg_items = self.cfg.items() else: cfg_items = self.cfg.__dict__.items() # iterate over all the terms for term_name, term_cfg in cfg_items: # check for non config if term_cfg is None: continue # check for valid config type if not isinstance(term_cfg, EventTermCfg): raise TypeError( f"Configuration for the term '{term_name}' is not of type EventTermCfg." f" Received: '{type(term_cfg)}'." ) # resolve common parameters self._resolve_common_term_cfg(term_name, term_cfg, min_argc=2) # check if mode is a new mode if term_cfg.mode not in self._mode_term_names: # add new mode self._mode_term_names[term_cfg.mode] = list() self._mode_term_cfgs[term_cfg.mode] = list() self._mode_class_term_cfgs[term_cfg.mode] = list() # add term name and parameters self._mode_term_names[term_cfg.mode].append(term_name) self._mode_term_cfgs[term_cfg.mode].append(term_cfg) # check if the term is a class if isinstance(term_cfg.func, ManagerTermBase): self._mode_class_term_cfgs[term_cfg.mode].append(term_cfg) # resolve the mode of the events if term_cfg.mode == "interval": if term_cfg.interval_range_s is None: raise ValueError( f"Event term '{term_name}' has mode 'interval' but 'interval_range_s' is not specified." ) # sample the time left for each environment lower, upper = term_cfg.interval_range_s time_left = torch.rand(self.num_envs, device=self.device) * (upper - lower) + lower self._interval_mode_time_left.append(time_left) class RandomizationManager(EventManager): """Manager for applying event specific operations to different elements in the scene. .. deprecated:: v0.4.0 As the RandomizationManager also handles events such as resetting the environment, the class has been renamed to EventManager as it is more general purpose. The RandomizationManager will be removed in v0.4.0. """ def __init__(self, cfg: object, env: RLTaskEnv): """Initialize the randomization manager. Args: cfg: A configuration object or dictionary (``dict[str, EventTermCfg]``). env: An environment object. """ dep_msg = "The class 'RandomizationManager' will be removed in v0.4.0. Please use 'EventManager' instead." warnings.warn(dep_msg, DeprecationWarning) carb.log_error(dep_msg) super().__init__(cfg, env) def randomize(self, mode: str, env_ids: Sequence[int] | None = None, dt: float | None = None): """Randomize the environment. .. deprecated:: v0.4.0 This method will be removed in v0.4.0. Please use the method :meth:`EventManager.apply` instead. """ dep_msg = ( "The class 'RandomizationManager' including its method 'randomize' will be removed in v0.4.0. Please use " "the class 'EventManager' with the method 'apply' instead." ) warnings.warn(dep_msg, DeprecationWarning) carb.log_error(dep_msg) self.apply(mode, env_ids, dt)
12,265
Python
39.481848
118
0.603261
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/managers/command_manager.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """Command manager for generating and updating commands.""" from __future__ import annotations import inspect import torch import weakref from abc import abstractmethod from collections.abc import Sequence from prettytable import PrettyTable from typing import TYPE_CHECKING import omni.kit.app from .manager_base import ManagerBase, ManagerTermBase from .manager_term_cfg import CommandTermCfg if TYPE_CHECKING: from omni.isaac.orbit.envs import RLTaskEnv class CommandTerm(ManagerTermBase): """The base class for implementing a command term. A command term is used to generate commands for goal-conditioned tasks. For example, in the case of a goal-conditioned navigation task, the command term can be used to generate a target position for the robot to navigate to. It implements a resampling mechanism that allows the command to be resampled at a fixed frequency. The resampling frequency can be specified in the configuration object. Additionally, it is possible to assign a visualization function to the command term that can be used to visualize the command in the simulator. """ def __init__(self, cfg: CommandTermCfg, env: RLTaskEnv): """Initialize the command generator class. Args: cfg: The configuration parameters for the command generator. env: The environment object. """ super().__init__(cfg, env) # create buffers to store the command # -- metrics that can be used for logging self.metrics = dict() # -- time left before resampling self.time_left = torch.zeros(self.num_envs, device=self.device) # -- counter for the number of times the command has been resampled within the current episode self.command_counter = torch.zeros(self.num_envs, device=self.device, dtype=torch.long) # add handle for debug visualization (this is set to a valid handle inside set_debug_vis) self._debug_vis_handle = None # set initial state of debug visualization self.set_debug_vis(self.cfg.debug_vis) def __del__(self): """Unsubscribe from the callbacks.""" if self._debug_vis_handle: self._debug_vis_handle.unsubscribe() self._debug_vis_handle = None """ Properties """ @property @abstractmethod def command(self) -> torch.Tensor: """The command tensor. Shape is (num_envs, command_dim).""" raise NotImplementedError @property def has_debug_vis_implementation(self) -> bool: """Whether the command generator has a debug visualization implemented.""" # check if function raises NotImplementedError source_code = inspect.getsource(self._set_debug_vis_impl) return "NotImplementedError" not in source_code """ Operations. """ def set_debug_vis(self, debug_vis: bool) -> bool: """Sets whether to visualize the command data. Args: debug_vis: Whether to visualize the command data. Returns: Whether the debug visualization was successfully set. False if the command generator does not support debug visualization. """ # check if debug visualization is supported if not self.has_debug_vis_implementation: return False # toggle debug visualization objects self._set_debug_vis_impl(debug_vis) # toggle debug visualization handles if debug_vis: # create a subscriber for the post update event if it doesn't exist if self._debug_vis_handle is None: app_interface = omni.kit.app.get_app_interface() self._debug_vis_handle = app_interface.get_post_update_event_stream().create_subscription_to_pop( lambda event, obj=weakref.proxy(self): obj._debug_vis_callback(event) ) else: # remove the subscriber if it exists if self._debug_vis_handle is not None: self._debug_vis_handle.unsubscribe() self._debug_vis_handle = None # return success return True def reset(self, env_ids: Sequence[int] | None = None) -> dict[str, float]: """Reset the command generator and log metrics. This function resets the command counter and resamples the command. It should be called at the beginning of each episode. Args: env_ids: The list of environment IDs to reset. Defaults to None. Returns: A dictionary containing the information to log under the "{name}" key. """ # resolve the environment IDs if env_ids is None: env_ids = slice(None) # set the command counter to zero self.command_counter[env_ids] = 0 # resample the command self._resample(env_ids) # add logging metrics extras = {} for metric_name, metric_value in self.metrics.items(): # compute the mean metric value extras[metric_name] = torch.mean(metric_value[env_ids]).item() # reset the metric value metric_value[env_ids] = 0.0 return extras def compute(self, dt: float): """Compute the command. Args: dt: The time step passed since the last call to compute. """ # update the metrics based on current state self._update_metrics() # reduce the time left before resampling self.time_left -= dt # resample the command if necessary resample_env_ids = (self.time_left <= 0.0).nonzero().flatten() if len(resample_env_ids) > 0: self._resample(resample_env_ids) # update the command self._update_command() """ Helper functions. """ def _resample(self, env_ids: Sequence[int]): """Resample the command. This function resamples the command and time for which the command is applied for the specified environment indices. Args: env_ids: The list of environment IDs to resample. """ # resample the time left before resampling self.time_left[env_ids] = self.time_left[env_ids].uniform_(*self.cfg.resampling_time_range) # increment the command counter self.command_counter[env_ids] += 1 # resample the command self._resample_command(env_ids) """ Implementation specific functions. """ @abstractmethod def _update_metrics(self): """Update the metrics based on the current state.""" raise NotImplementedError @abstractmethod def _resample_command(self, env_ids: Sequence[int]): """Resample the command for the specified environments.""" raise NotImplementedError @abstractmethod def _update_command(self): """Update the command based on the current state.""" raise NotImplementedError def _set_debug_vis_impl(self, debug_vis: bool): """Set debug visualization into visualization objects. This function is responsible for creating the visualization objects if they don't exist and input ``debug_vis`` is True. If the visualization objects exist, the function should set their visibility into the stage. """ raise NotImplementedError(f"Debug visualization is not implemented for {self.__class__.__name__}.") def _debug_vis_callback(self, event): """Callback for debug visualization. This function calls the visualization objects and sets the data to visualize into them. """ raise NotImplementedError(f"Debug visualization is not implemented for {self.__class__.__name__}.") class CommandManager(ManagerBase): """Manager for generating commands. The command manager is used to generate commands for an agent to execute. It makes it convenient to switch between different command generation strategies within the same environment. For instance, in an environment consisting of a quadrupedal robot, the command to it could be a velocity command or position command. By keeping the command generation logic separate from the environment, it is easy to switch between different command generation strategies. The command terms are implemented as classes that inherit from the :class:`CommandTerm` class. Each command generator term should also have a corresponding configuration class that inherits from the :class:`CommandTermCfg` class. """ _env: RLTaskEnv """The environment instance.""" def __init__(self, cfg: object, env: RLTaskEnv): """Initialize the command manager. Args: cfg: The configuration object or dictionary (``dict[str, CommandTermCfg]``). env: The environment instance. """ super().__init__(cfg, env) # store the commands self._commands = dict() self.cfg.debug_vis = False for term in self._terms.values(): self.cfg.debug_vis |= term.cfg.debug_vis def __str__(self) -> str: """Returns: A string representation for the command manager.""" msg = f"<CommandManager> contains {len(self._terms.values())} active terms.\n" # create table for term information table = PrettyTable() table.title = "Active Command Terms" table.field_names = ["Index", "Name", "Type"] # set alignment of table columns table.align["Name"] = "l" # add info on each term for index, (name, term) in enumerate(self._terms.items()): table.add_row([index, name, term.__class__.__name__]) # convert table to string msg += table.get_string() msg += "\n" return msg """ Properties. """ @property def active_terms(self) -> list[str]: """Name of active command terms.""" return list(self._terms.keys()) @property def has_debug_vis_implementation(self) -> bool: """Whether the command terms have debug visualization implemented.""" # check if function raises NotImplementedError has_debug_vis = False for term in self._terms.values(): has_debug_vis |= term.has_debug_vis_implementation return has_debug_vis """ Operations. """ def set_debug_vis(self, debug_vis: bool) -> bool: """Sets whether to visualize the command data. Args: debug_vis: Whether to visualize the command data. Returns: Whether the debug visualization was successfully set. False if the command generator does not support debug visualization. """ for term in self._terms.values(): term.set_debug_vis(debug_vis) def reset(self, env_ids: Sequence[int] | None = None) -> dict[str, torch.Tensor]: """Reset the command terms and log their metrics. This function resets the command counter and resamples the command for each term. It should be called at the beginning of each episode. Args: env_ids: The list of environment IDs to reset. Defaults to None. Returns: A dictionary containing the information to log under the "Metrics/{term_name}/{metric_name}" key. """ # resolve environment ids if env_ids is None: env_ids = slice(None) # store information extras = {} for name, term in self._terms.items(): # reset the command term metrics = term.reset(env_ids=env_ids) # compute the mean metric value for metric_name, metric_value in metrics.items(): extras[f"Metrics/{name}/{metric_name}"] = metric_value # return logged information return extras def compute(self, dt: float): """Updates the commands. This function calls each command term managed by the class. Args: dt: The time-step interval of the environment. """ # iterate over all the command terms for term in self._terms.values(): # compute term's value term.compute(dt) def get_command(self, name: str) -> torch.Tensor: """Returns the command for the specified command term. Args: name: The name of the command term. Returns: The command tensor of the specified command term. """ return self._terms[name].command def get_term(self, name: str) -> CommandTerm: """Returns the command term with the specified name. Args: name: The name of the command term. Returns: The command term with the specified name. """ return self._terms[name] """ Helper functions. """ def _prepare_terms(self): """Prepares a list of command terms.""" # parse command terms from the config self._terms: dict[str, CommandTerm] = dict() # check if config is dict already if isinstance(self.cfg, dict): cfg_items = self.cfg.items() else: cfg_items = self.cfg.__dict__.items() # iterate over all the terms for term_name, term_cfg in cfg_items: # check for non config if term_cfg is None: continue # check for valid config type if not isinstance(term_cfg, CommandTermCfg): raise TypeError( f"Configuration for the term '{term_name}' is not of type CommandTermCfg." f" Received: '{type(term_cfg)}'." ) # create the action term term = term_cfg.class_type(term_cfg, self._env) # add class to dict self._terms[term_name] = term
14,055
Python
34.405541
113
0.623764
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/managers/scene_entity_cfg.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """Configuration terms for different managers.""" from __future__ import annotations from dataclasses import MISSING from omni.isaac.orbit.assets import Articulation, RigidObject from omni.isaac.orbit.scene import InteractiveScene from omni.isaac.orbit.utils import configclass @configclass class SceneEntityCfg: """Configuration for a scene entity that is used by the manager's term. This class is used to specify the name of the scene entity that is queried from the :class:`InteractiveScene` and passed to the manager's term function. """ name: str = MISSING """The name of the scene entity. This is the name defined in the scene configuration file. See the :class:`InteractiveSceneCfg` class for more details. """ joint_names: str | list[str] | None = None """The names of the joints from the scene entity. Defaults to None. The names can be either joint names or a regular expression matching the joint names. These are converted to joint indices on initialization of the manager and passed to the term function as a list of joint indices under :attr:`joint_ids`. """ joint_ids: list[int] | slice = slice(None) """The indices of the joints from the asset required by the term. Defaults to slice(None), which means all the joints in the asset (if present). If :attr:`joint_names` is specified, this is filled in automatically on initialization of the manager. """ body_names: str | list[str] | None = None """The names of the bodies from the asset required by the term. Defaults to None. The names can be either body names or a regular expression matching the body names. These are converted to body indices on initialization of the manager and passed to the term function as a list of body indices under :attr:`body_ids`. """ body_ids: list[int] | slice = slice(None) """The indices of the bodies from the asset required by the term. Defaults to slice(None), which means all the bodies in the asset. If :attr:`body_names` is specified, this is filled in automatically on initialization of the manager. """ preserve_order: bool = False """Whether to preserve indices ordering to match with that in the specified joint or body names. Defaults to False. If False, the ordering of the indices are sorted in ascending order (i.e. the ordering in the entity's joints or bodies). Otherwise, the indices are preserved in the order of the specified joint and body names. For more details, see the :meth:`omni.isaac.orbit.utils.string.resolve_matching_names` function. .. note:: This attribute is only used when :attr:`joint_names` or :attr:`body_names` are specified. """ def resolve(self, scene: InteractiveScene): """Resolves the scene entity and converts the joint and body names to indices. This function examines the scene entity from the :class:`InteractiveScene` and resolves the indices and names of the joints and bodies. It is an expensive operation as it resolves regular expressions and should be called only once. Args: scene: The interactive scene instance. Raises: ValueError: If the scene entity is not found. ValueError: If both ``joint_names`` and ``joint_ids`` are specified and are not consistent. ValueError: If both ``body_names`` and ``body_ids`` are specified and are not consistent. """ # check if the entity is valid if self.name not in scene.keys(): raise ValueError(f"The scene entity '{self.name}' does not exist. Available entities: {scene.keys()}.") # convert joint names to indices based on regex if self.joint_names is not None or self.joint_ids != slice(None): entity: Articulation = scene[self.name] # -- if both are not their default values, check if they are valid if self.joint_names is not None and self.joint_ids != slice(None): if isinstance(self.joint_names, str): self.joint_names = [self.joint_names] if isinstance(self.joint_ids, int): self.joint_ids = [self.joint_ids] joint_ids, _ = entity.find_joints(self.joint_names, preserve_order=self.preserve_order) joint_names = [entity.joint_names[i] for i in self.joint_ids] if joint_ids != self.joint_ids or joint_names != self.joint_names: raise ValueError( "Both 'joint_names' and 'joint_ids' are specified, and are not consistent." f"\n\tfrom joint names: {self.joint_names} [{joint_ids}]" f"\n\tfrom joint ids: {joint_names} [{self.joint_ids}]" "\nHint: Use either 'joint_names' or 'joint_ids' to avoid confusion." ) # -- from joint names to joint indices elif self.joint_names is not None: if isinstance(self.joint_names, str): self.joint_names = [self.joint_names] self.joint_ids, _ = entity.find_joints(self.joint_names, preserve_order=self.preserve_order) # performance optimization (slice offers faster indexing than list of indices) # only all joint in the entity order are selected if len(self.joint_ids) == entity.num_joints and self.joint_names == entity.joint_names: self.joint_ids = slice(None) # -- from joint indices to joint names elif self.joint_ids != slice(None): if isinstance(self.joint_ids, int): self.joint_ids = [self.joint_ids] self.joint_names = [entity.joint_names[i] for i in self.joint_ids] # convert body names to indices based on regex if self.body_names is not None or self.body_ids != slice(None): entity: RigidObject = scene[self.name] # -- if both are not their default values, check if they are valid if self.body_names is not None and self.body_ids != slice(None): if isinstance(self.body_names, str): self.body_names = [self.body_names] if isinstance(self.body_ids, int): self.body_ids = [self.body_ids] body_ids, _ = entity.find_bodies(self.body_names, preserve_order=self.preserve_order) body_names = [entity.body_names[i] for i in self.body_ids] if body_ids != self.body_ids or body_names != self.body_names: raise ValueError( "Both 'body_names' and 'body_ids' are specified, and are not consistent." f"\n\tfrom body names: {self.body_names} [{body_ids}]" f"\n\tfrom body ids: {body_names} [{self.body_ids}]" "\nHint: Use either 'body_names' or 'body_ids' to avoid confusion." ) # -- from body names to body indices elif self.body_names is not None: if isinstance(self.body_names, str): self.body_names = [self.body_names] self.body_ids, _ = entity.find_bodies(self.body_names, preserve_order=self.preserve_order) # performance optimization (slice offers faster indexing than list of indices) # only all bodies in the entity order are selected if len(self.body_ids) == entity.num_bodies and self.body_names == entity.body_names: self.body_ids = slice(None) # -- from body indices to body names elif self.body_ids != slice(None): if isinstance(self.body_ids, int): self.body_ids = [self.body_ids] self.body_names = [entity.body_names[i] for i in self.body_ids]
8,102
Python
48.711656
119
0.62429
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/managers/curriculum_manager.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """Curriculum manager for updating environment quantities subject to a training curriculum.""" from __future__ import annotations import torch from collections.abc import Sequence from prettytable import PrettyTable from typing import TYPE_CHECKING from .manager_base import ManagerBase, ManagerTermBase from .manager_term_cfg import CurriculumTermCfg if TYPE_CHECKING: from omni.isaac.orbit.envs import RLTaskEnv class CurriculumManager(ManagerBase): """Manager to implement and execute specific curricula. The curriculum manager updates various quantities of the environment subject to a training curriculum by calling a list of terms. These help stabilize learning by progressively making the learning tasks harder as the agent improves. The curriculum terms are parsed from a config class containing the manager's settings and each term's parameters. Each curriculum term should instantiate the :class:`CurriculumTermCfg` class. """ _env: RLTaskEnv """The environment instance.""" def __init__(self, cfg: object, env: RLTaskEnv): """Initialize the manager. Args: cfg: The configuration object or dictionary (``dict[str, CurriculumTermCfg]``) env: An environment object. Raises: TypeError: If curriculum term is not of type :class:`CurriculumTermCfg`. ValueError: If curriculum term configuration does not satisfy its function signature. """ super().__init__(cfg, env) # prepare logging self._curriculum_state = dict() for term_name in self._term_names: self._curriculum_state[term_name] = None def __str__(self) -> str: """Returns: A string representation for curriculum manager.""" msg = f"<CurriculumManager> contains {len(self._term_names)} active terms.\n" # create table for term information table = PrettyTable() table.title = "Active Curriculum Terms" table.field_names = ["Index", "Name"] # set alignment of table columns table.align["Name"] = "l" # add info on each term for index, name in enumerate(self._term_names): table.add_row([index, name]) # convert table to string msg += table.get_string() msg += "\n" return msg """ Properties. """ @property def active_terms(self) -> list[str]: """Name of active curriculum terms.""" return self._term_names """ Operations. """ def reset(self, env_ids: Sequence[int] | None = None) -> dict[str, float]: """Returns the current state of individual curriculum terms. Note: This function does not use the environment indices :attr:`env_ids` and logs the state of all the terms. The argument is only present to maintain consistency with other classes. Returns: Dictionary of curriculum terms and their states. """ extras = {} for term_name, term_state in self._curriculum_state.items(): if term_state is not None: # deal with dict if isinstance(term_state, dict): # each key is a separate state to log for key, value in term_state.items(): if isinstance(value, torch.Tensor): value = value.item() extras[f"Curriculum/{term_name}/{key}"] = value else: # log directly if not a dict if isinstance(term_state, torch.Tensor): term_state = term_state.item() extras[f"Curriculum/{term_name}"] = term_state # reset all the curriculum terms for term_cfg in self._class_term_cfgs: term_cfg.func.reset(env_ids=env_ids) # return logged information return extras def compute(self, env_ids: Sequence[int] | None = None): """Update the curriculum terms. This function calls each curriculum term managed by the class. Args: env_ids: The list of environment IDs to update. If None, all the environments are updated. Defaults to None. """ # resolve environment indices if env_ids is None: env_ids = slice(None) # iterate over all the curriculum terms for name, term_cfg in zip(self._term_names, self._term_cfgs): state = term_cfg.func(self._env, env_ids, **term_cfg.params) self._curriculum_state[name] = state """ Helper functions. """ def _prepare_terms(self): # parse remaining curriculum terms and decimate their information self._term_names: list[str] = list() self._term_cfgs: list[CurriculumTermCfg] = list() self._class_term_cfgs: list[CurriculumTermCfg] = list() # check if config is dict already if isinstance(self.cfg, dict): cfg_items = self.cfg.items() else: cfg_items = self.cfg.__dict__.items() # iterate over all the terms for term_name, term_cfg in cfg_items: # check for non config if term_cfg is None: continue # check if the term is a valid term config if not isinstance(term_cfg, CurriculumTermCfg): raise TypeError( f"Configuration for the term '{term_name}' is not of type CurriculumTermCfg." f" Received: '{type(term_cfg)}'." ) # resolve common parameters self._resolve_common_term_cfg(term_name, term_cfg, min_argc=2) # add name and config to list self._term_names.append(term_name) self._term_cfgs.append(term_cfg) # check if the term is a class if isinstance(term_cfg.func, ManagerTermBase): self._class_term_cfgs.append(term_cfg)
6,171
Python
35.738095
108
0.602009
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/managers/manager_term_cfg.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """Configuration terms for different managers.""" from __future__ import annotations import torch import warnings from collections.abc import Callable from dataclasses import MISSING from typing import TYPE_CHECKING, Any from omni.isaac.orbit.utils import configclass from omni.isaac.orbit.utils.noise import NoiseCfg from .scene_entity_cfg import SceneEntityCfg if TYPE_CHECKING: from .action_manager import ActionTerm from .command_manager import CommandTerm from .manager_base import ManagerTermBase @configclass class ManagerTermBaseCfg: """Configuration for a manager term.""" func: Callable | ManagerTermBase = MISSING """The function or class to be called for the term. The function must take the environment object as the first argument. The remaining arguments are specified in the :attr:`params` attribute. It also supports `callable classes`_, i.e. classes that implement the :meth:`__call__` method. In this case, the class should inherit from the :class:`ManagerTermBase` class and implement the required methods. .. _`callable classes`: https://docs.python.org/3/reference/datamodel.html#object.__call__ """ params: dict[str, Any | SceneEntityCfg] = dict() """The parameters to be passed to the function as keyword arguments. Defaults to an empty dict. .. note:: If the value is a :class:`SceneEntityCfg` object, the manager will query the scene entity from the :class:`InteractiveScene` and process the entity's joints and bodies as specified in the :class:`SceneEntityCfg` object. """ ## # Action manager. ## @configclass class ActionTermCfg: """Configuration for an action term.""" class_type: type[ActionTerm] = MISSING """The associated action term class. The class should inherit from :class:`omni.isaac.orbit.managers.action_manager.ActionTerm`. """ asset_name: str = MISSING """The name of the scene entity. This is the name defined in the scene configuration file. See the :class:`InteractiveSceneCfg` class for more details. """ ## # Command manager. ## @configclass class CommandTermCfg: """Configuration for a command generator term.""" class_type: type[CommandTerm] = MISSING """The associated command term class to use. The class should inherit from :class:`omni.isaac.orbit.managers.command_manager.CommandTerm`. """ resampling_time_range: tuple[float, float] = MISSING """Time before commands are changed [s].""" debug_vis: bool = False """Whether to visualize debug information. Defaults to False.""" ## # Curriculum manager. ## @configclass class CurriculumTermCfg(ManagerTermBaseCfg): """Configuration for a curriculum term.""" func: Callable[..., float | dict[str, float] | None] = MISSING """The name of the function to be called. This function should take the environment object, environment indices and any other parameters as input and return the curriculum state for logging purposes. If the function returns None, the curriculum state is not logged. """ ## # Observation manager. ## @configclass class ObservationTermCfg(ManagerTermBaseCfg): """Configuration for an observation term.""" func: Callable[..., torch.Tensor] = MISSING """The name of the function to be called. This function should take the environment object and any other parameters as input and return the observation signal as torch float tensors of shape (num_envs, obs_term_dim). """ noise: NoiseCfg | None = None """The noise to add to the observation. Defaults to None, in which case no noise is added.""" clip: tuple[float, float] | None = None """The clipping range for the observation after adding noise. Defaults to None, in which case no clipping is applied.""" scale: float | None = None """The scale to apply to the observation after clipping. Defaults to None, in which case no scaling is applied (same as setting scale to :obj:`1`).""" @configclass class ObservationGroupCfg: """Configuration for an observation group.""" concatenate_terms: bool = True """Whether to concatenate the observation terms in the group. Defaults to True. If true, the observation terms in the group are concatenated along the last dimension. Otherwise, they are kept separate and returned as a dictionary. """ enable_corruption: bool = False """Whether to enable corruption for the observation group. Defaults to False. If true, the observation terms in the group are corrupted by adding noise (if specified). Otherwise, no corruption is applied. """ ## # Event manager ## @configclass class EventTermCfg(ManagerTermBaseCfg): """Configuration for a event term.""" func: Callable[..., None] = MISSING """The name of the function to be called. This function should take the environment object, environment indices and any other parameters as input. """ mode: str = MISSING """The mode in which the event term is applied. Note: The mode name ``"interval"`` is a special mode that is handled by the manager Hence, its name is reserved and cannot be used for other modes. """ interval_range_s: tuple[float, float] | None = None """The range of time in seconds at which the term is applied. Based on this, the interval is sampled uniformly between the specified range for each environment instance. The term is applied on the environment instances where the current time hits the interval time. Note: This is only used if the mode is ``"interval"``. """ @configclass class RandomizationTermCfg(EventTermCfg): """Configuration for a randomization term. .. deprecated:: v0.3.0 This class is deprecated and will be removed in v0.4.0. Please use :class:`EventTermCfg` instead. """ def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) # Deprecation warning. warnings.warn( "The RandomizationTermCfg has been renamed to EventTermCfg and will be removed in v0.4.0. Please use" " EventTermCfg instead.", DeprecationWarning, ) ## # Reward manager. ## @configclass class RewardTermCfg(ManagerTermBaseCfg): """Configuration for a reward term.""" func: Callable[..., torch.Tensor] = MISSING """The name of the function to be called. This function should take the environment object and any other parameters as input and return the reward signals as torch float tensors of shape (num_envs,). """ weight: float = MISSING """The weight of the reward term. This is multiplied with the reward term's value to compute the final reward. Note: If the weight is zero, the reward term is ignored. """ ## # Termination manager. ## @configclass class TerminationTermCfg(ManagerTermBaseCfg): """Configuration for a termination term.""" func: Callable[..., torch.Tensor] = MISSING """The name of the function to be called. This function should take the environment object and any other parameters as input and return the termination signals as torch boolean tensors of shape (num_envs,). """ time_out: bool = False """Whether the termination term contributes towards episodic timeouts. Defaults to False. Note: These usually correspond to tasks that have a fixed time limit. """
7,667
Python
27.295203
113
0.696361
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/managers/reward_manager.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """Reward manager for computing reward signals for a given world.""" from __future__ import annotations import torch from collections.abc import Sequence from prettytable import PrettyTable from typing import TYPE_CHECKING from .manager_base import ManagerBase, ManagerTermBase from .manager_term_cfg import RewardTermCfg if TYPE_CHECKING: from omni.isaac.orbit.envs import RLTaskEnv class RewardManager(ManagerBase): """Manager for computing reward signals for a given world. The reward manager computes the total reward as a sum of the weighted reward terms. The reward terms are parsed from a nested config class containing the reward manger's settings and reward terms configuration. The reward terms are parsed from a config class containing the manager's settings and each term's parameters. Each reward term should instantiate the :class:`RewardTermCfg` class. .. note:: The reward manager multiplies the reward term's ``weight`` with the time-step interval ``dt`` of the environment. This is done to ensure that the computed reward terms are balanced with respect to the chosen time-step interval in the environment. """ _env: RLTaskEnv """The environment instance.""" def __init__(self, cfg: object, env: RLTaskEnv): """Initialize the reward manager. Args: cfg: The configuration object or dictionary (``dict[str, RewardTermCfg]``). env: The environment instance. """ super().__init__(cfg, env) # prepare extra info to store individual reward term information self._episode_sums = dict() for term_name in self._term_names: self._episode_sums[term_name] = torch.zeros(self.num_envs, dtype=torch.float, device=self.device) # create buffer for managing reward per environment self._reward_buf = torch.zeros(self.num_envs, dtype=torch.float, device=self.device) def __str__(self) -> str: """Returns: A string representation for reward manager.""" msg = f"<RewardManager> contains {len(self._term_names)} active terms.\n" # create table for term information table = PrettyTable() table.title = "Active Reward Terms" table.field_names = ["Index", "Name", "Weight"] # set alignment of table columns table.align["Name"] = "l" table.align["Weight"] = "r" # add info on each term for index, (name, term_cfg) in enumerate(zip(self._term_names, self._term_cfgs)): table.add_row([index, name, term_cfg.weight]) # convert table to string msg += table.get_string() msg += "\n" return msg """ Properties. """ @property def active_terms(self) -> list[str]: """Name of active reward terms.""" return self._term_names """ Operations. """ def reset(self, env_ids: Sequence[int] | None = None) -> dict[str, torch.Tensor]: """Returns the episodic sum of individual reward terms. Args: env_ids: The environment ids for which the episodic sum of individual reward terms is to be returned. Defaults to all the environment ids. Returns: Dictionary of episodic sum of individual reward terms. """ # resolve environment ids if env_ids is None: env_ids = slice(None) # store information extras = {} for key in self._episode_sums.keys(): # store information # r_1 + r_2 + ... + r_n episodic_sum_avg = torch.mean(self._episode_sums[key][env_ids]) extras["Episode Reward/" + key] = episodic_sum_avg / self._env.max_episode_length_s # reset episodic sum self._episode_sums[key][env_ids] = 0.0 # reset all the reward terms for term_cfg in self._class_term_cfgs: term_cfg.func.reset(env_ids=env_ids) # return logged information return extras def compute(self, dt: float) -> torch.Tensor: """Computes the reward signal as a weighted sum of individual terms. This function calls each reward term managed by the class and adds them to compute the net reward signal. It also updates the episodic sums corresponding to individual reward terms. Args: dt: The time-step interval of the environment. Returns: The net reward signal of shape (num_envs,). """ # reset computation self._reward_buf[:] = 0.0 # iterate over all the reward terms for name, term_cfg in zip(self._term_names, self._term_cfgs): # skip if weight is zero (kind of a micro-optimization) if term_cfg.weight == 0.0: continue # compute term's value value = term_cfg.func(self._env, **term_cfg.params) * term_cfg.weight * dt # update total reward self._reward_buf += value # update episodic sum self._episode_sums[name] += value return self._reward_buf """ Operations - Term settings. """ def set_term_cfg(self, term_name: str, cfg: RewardTermCfg): """Sets the configuration of the specified term into the manager. Args: term_name: The name of the reward term. cfg: The configuration for the reward term. Raises: ValueError: If the term name is not found. """ if term_name not in self._term_names: raise ValueError(f"Reward term '{term_name}' not found.") # set the configuration self._term_cfgs[self._term_names.index(term_name)] = cfg def get_term_cfg(self, term_name: str) -> RewardTermCfg: """Gets the configuration for the specified term. Args: term_name: The name of the reward term. Returns: The configuration of the reward term. Raises: ValueError: If the term name is not found. """ if term_name not in self._term_names: raise ValueError(f"Reward term '{term_name}' not found.") # return the configuration return self._term_cfgs[self._term_names.index(term_name)] """ Helper functions. """ def _prepare_terms(self): """Prepares a list of reward functions.""" # parse remaining reward terms and decimate their information self._term_names: list[str] = list() self._term_cfgs: list[RewardTermCfg] = list() self._class_term_cfgs: list[RewardTermCfg] = list() # check if config is dict already if isinstance(self.cfg, dict): cfg_items = self.cfg.items() else: cfg_items = self.cfg.__dict__.items() # iterate over all the terms for term_name, term_cfg in cfg_items: # check for non config if term_cfg is None: continue # check for valid config type if not isinstance(term_cfg, RewardTermCfg): raise TypeError( f"Configuration for the term '{term_name}' is not of type RewardTermCfg." f" Received: '{type(term_cfg)}'." ) # check for valid weight type if not isinstance(term_cfg.weight, (float, int)): raise TypeError( f"Weight for the term '{term_name}' is not of type float or int." f" Received: '{type(term_cfg.weight)}'." ) # resolve common parameters self._resolve_common_term_cfg(term_name, term_cfg, min_argc=1) # add function to list self._term_names.append(term_name) self._term_cfgs.append(term_cfg) # check if the term is a class if isinstance(term_cfg.func, ManagerTermBase): self._class_term_cfgs.append(term_cfg)
8,148
Python
35.379464
109
0.602111
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/managers/action_manager.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """Action manager for processing actions sent to the environment.""" from __future__ import annotations import torch from abc import abstractmethod from collections.abc import Sequence from prettytable import PrettyTable from typing import TYPE_CHECKING from omni.isaac.orbit.assets import AssetBase from .manager_base import ManagerBase, ManagerTermBase from .manager_term_cfg import ActionTermCfg if TYPE_CHECKING: from omni.isaac.orbit.envs import BaseEnv class ActionTerm(ManagerTermBase): """Base class for action terms. The action term is responsible for processing the raw actions sent to the environment and applying them to the asset managed by the term. The action term is comprised of two operations: * Processing of actions: This operation is performed once per **environment step** and is responsible for pre-processing the raw actions sent to the environment. * Applying actions: This operation is performed once per **simulation step** and is responsible for applying the processed actions to the asset managed by the term. """ def __init__(self, cfg: ActionTermCfg, env: BaseEnv): """Initialize the action term. Args: cfg: The configuration object. env: The environment instance. """ # call the base class constructor super().__init__(cfg, env) # parse config to obtain asset to which the term is applied self._asset: AssetBase = self._env.scene[self.cfg.asset_name] """ Properties. """ @property @abstractmethod def action_dim(self) -> int: """Dimension of the action term.""" raise NotImplementedError @property @abstractmethod def raw_actions(self) -> torch.Tensor: """The input/raw actions sent to the term.""" raise NotImplementedError @property @abstractmethod def processed_actions(self) -> torch.Tensor: """The actions computed by the term after applying any processing.""" raise NotImplementedError """ Operations. """ @abstractmethod def process_actions(self, actions: torch.Tensor): """Processes the actions sent to the environment. Note: This function is called once per environment step by the manager. Args: actions: The actions to process. """ raise NotImplementedError @abstractmethod def apply_actions(self): """Applies the actions to the asset managed by the term. Note: This is called at every simulation step by the manager. """ raise NotImplementedError class ActionManager(ManagerBase): """Manager for processing and applying actions for a given world. The action manager handles the interpretation and application of user-defined actions on a given world. It is comprised of different action terms that decide the dimension of the expected actions. The action manager performs operations at two stages: * processing of actions: It splits the input actions to each term and performs any pre-processing needed. This should be called once at every environment step. * apply actions: This operation typically sets the processed actions into the assets in the scene (such as robots). It should be called before every simulation step. """ def __init__(self, cfg: object, env: BaseEnv): """Initialize the action manager. Args: cfg: The configuration object or dictionary (``dict[str, ActionTermCfg]``). env: The environment instance. """ super().__init__(cfg, env) # create buffers to store actions self._action = torch.zeros((self.num_envs, self.total_action_dim), device=self.device) self._prev_action = torch.zeros_like(self._action) def __str__(self) -> str: """Returns: A string representation for action manager.""" msg = f"<ActionManager> contains {len(self._term_names)} active terms.\n" # create table for term information table = PrettyTable() table.title = f"Active Action Terms (shape: {self.total_action_dim})" table.field_names = ["Index", "Name", "Dimension"] # set alignment of table columns table.align["Name"] = "l" table.align["Dimension"] = "r" # add info on each term for index, (name, term) in enumerate(self._terms.items()): table.add_row([index, name, term.action_dim]) # convert table to string msg += table.get_string() msg += "\n" return msg """ Properties. """ @property def total_action_dim(self) -> int: """Total dimension of actions.""" return sum(self.action_term_dim) @property def active_terms(self) -> list[str]: """Name of active action terms.""" return self._term_names @property def action_term_dim(self) -> list[int]: """Shape of each action term.""" return [term.action_dim for term in self._terms.values()] @property def action(self) -> torch.Tensor: """The actions sent to the environment. Shape is (num_envs, total_action_dim).""" return self._action @property def prev_action(self) -> torch.Tensor: """The previous actions sent to the environment. Shape is (num_envs, total_action_dim).""" return self._prev_action """ Operations. """ def reset(self, env_ids: Sequence[int] | None = None) -> dict[str, torch.Tensor]: """Resets the action history. Args: env_ids: The environment ids. Defaults to None, in which case all environments are considered. Returns: An empty dictionary. """ # resolve environment ids if env_ids is None: env_ids = slice(None) # reset the action history self._prev_action[env_ids] = 0.0 self._action[env_ids] = 0.0 # reset all action terms for term in self._terms.values(): term.reset(env_ids=env_ids) # nothing to log here return {} def process_action(self, action: torch.Tensor): """Processes the actions sent to the environment. Note: This function should be called once per environment step. Args: action: The actions to process. """ # check if action dimension is valid if self.total_action_dim != action.shape[1]: raise ValueError(f"Invalid action shape, expected: {self.total_action_dim}, received: {action.shape[1]}.") # store the input actions self._prev_action[:] = self._action self._action[:] = action.to(self.device) # split the actions and apply to each tensor idx = 0 for term in self._terms.values(): term_actions = action[:, idx : idx + term.action_dim] term.process_actions(term_actions) idx += term.action_dim def apply_action(self) -> None: """Applies the actions to the environment/simulation. Note: This should be called at every simulation step. """ for term in self._terms.values(): term.apply_actions() def get_term(self, name: str) -> ActionTerm: """Returns the action term with the specified name. Args: name: The name of the action term. Returns: The action term with the specified name. """ return self._terms[name] """ Helper functions. """ def _prepare_terms(self): """Prepares a list of action terms.""" # parse action terms from the config self._term_names: list[str] = list() self._terms: dict[str, ActionTerm] = dict() # check if config is dict already if isinstance(self.cfg, dict): cfg_items = self.cfg.items() else: cfg_items = self.cfg.__dict__.items() for term_name, term_cfg in cfg_items: # check if term config is None if term_cfg is None: continue # check valid type if not isinstance(term_cfg, ActionTermCfg): raise TypeError( f"Configuration for the term '{term_name}' is not of type ActionTermCfg." f" Received: '{type(term_cfg)}'." ) # create the action term term = term_cfg.class_type(term_cfg, self._env) # sanity check if term is valid type if not isinstance(term, ActionTerm): raise TypeError(f"Returned object for the term '{term_name}' is not of type ActionType.") # add term name and parameters self._term_names.append(term_name) self._terms[term_name] = term
9,069
Python
31.862319
118
0.614732
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/managers/termination_manager.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """Termination manager for computing done signals for a given world.""" from __future__ import annotations import torch from collections.abc import Sequence from prettytable import PrettyTable from typing import TYPE_CHECKING from .manager_base import ManagerBase, ManagerTermBase from .manager_term_cfg import TerminationTermCfg if TYPE_CHECKING: from omni.isaac.orbit.envs import RLTaskEnv class TerminationManager(ManagerBase): """Manager for computing done signals for a given world. The termination manager computes the termination signal (also called dones) as a combination of termination terms. Each termination term is a function which takes the environment as an argument and returns a boolean tensor of shape (num_envs,). The termination manager computes the termination signal as the union (logical or) of all the termination terms. Following the `Gymnasium API <https://gymnasium.farama.org/tutorials/gymnasium_basics/handling_time_limits/>`_, the termination signal is computed as the logical OR of the following signals: * **Time-out**: This signal is set to true if the environment has ended after an externally defined condition (that is outside the scope of a MDP). For example, the environment may be terminated if the episode has timed out (i.e. reached max episode length). * **Terminated**: This signal is set to true if the environment has reached a terminal state defined by the environment. This state may correspond to task success, task failure, robot falling, etc. These signals can be individually accessed using the :attr:`time_outs` and :attr:`terminated` properties. The termination terms are parsed from a config class containing the manager's settings and each term's parameters. Each termination term should instantiate the :class:`TerminationTermCfg` class. The term's configuration :attr:`TerminationTermCfg.time_out` decides whether the term is a timeout or a termination term. """ _env: RLTaskEnv """The environment instance.""" def __init__(self, cfg: object, env: RLTaskEnv): """Initializes the termination manager. Args: cfg: The configuration object or dictionary (``dict[str, TerminationTermCfg]``). env: An environment object. """ super().__init__(cfg, env) # prepare extra info to store individual termination term information self._term_dones = dict() for term_name in self._term_names: self._term_dones[term_name] = torch.zeros(self.num_envs, device=self.device, dtype=torch.bool) # create buffer for managing termination per environment self._truncated_buf = torch.zeros(self.num_envs, device=self.device, dtype=torch.bool) self._terminated_buf = torch.zeros_like(self._truncated_buf) def __str__(self) -> str: """Returns: A string representation for termination manager.""" msg = f"<TerminationManager> contains {len(self._term_names)} active terms.\n" # create table for term information table = PrettyTable() table.title = "Active Termination Terms" table.field_names = ["Index", "Name", "Time Out"] # set alignment of table columns table.align["Name"] = "l" # add info on each term for index, (name, term_cfg) in enumerate(zip(self._term_names, self._term_cfgs)): table.add_row([index, name, term_cfg.time_out]) # convert table to string msg += table.get_string() msg += "\n" return msg """ Properties. """ @property def active_terms(self) -> list[str]: """Name of active termination terms.""" return self._term_names @property def dones(self) -> torch.Tensor: """The net termination signal. Shape is (num_envs,).""" return self._truncated_buf | self._terminated_buf @property def time_outs(self) -> torch.Tensor: """The timeout signal (reaching max episode length). Shape is (num_envs,). This signal is set to true if the environment has ended after an externally defined condition (that is outside the scope of a MDP). For example, the environment may be terminated if the episode has timed out (i.e. reached max episode length). """ return self._truncated_buf @property def terminated(self) -> torch.Tensor: """The terminated signal (reaching a terminal state). Shape is (num_envs,). This signal is set to true if the environment has reached a terminal state defined by the environment. This state may correspond to task success, task failure, robot falling, etc. """ return self._terminated_buf """ Operations. """ def reset(self, env_ids: Sequence[int] | None = None) -> dict[str, torch.Tensor]: """Returns the episodic counts of individual termination terms. Args: env_ids: The environment ids. Defaults to None, in which case all environments are considered. Returns: Dictionary of episodic sum of individual reward terms. """ # resolve environment ids if env_ids is None: env_ids = slice(None) # add to episode dict extras = {} for key in self._term_dones.keys(): # store information extras["Episode Termination/" + key] = torch.count_nonzero(self._term_dones[key][env_ids]).item() # reset all the reward terms for term_cfg in self._class_term_cfgs: term_cfg.func.reset(env_ids=env_ids) # return logged information return extras def compute(self) -> torch.Tensor: """Computes the termination signal as union of individual terms. This function calls each termination term managed by the class and performs a logical OR operation to compute the net termination signal. Returns: The combined termination signal of shape (num_envs,). """ # reset computation self._truncated_buf[:] = False self._terminated_buf[:] = False # iterate over all the termination terms for name, term_cfg in zip(self._term_names, self._term_cfgs): value = term_cfg.func(self._env, **term_cfg.params) # store timeout signal separately if term_cfg.time_out: self._truncated_buf |= value else: self._terminated_buf |= value # add to episode dones self._term_dones[name][:] = value # return combined termination signal return self._truncated_buf | self._terminated_buf def get_term(self, name: str) -> torch.Tensor: """Returns the termination term with the specified name. Args: name: The name of the termination term. Returns: The corresponding termination term value. Shape is (num_envs,). """ return self._term_dones[name] """ Operations - Term settings. """ def set_term_cfg(self, term_name: str, cfg: TerminationTermCfg): """Sets the configuration of the specified term into the manager. Args: term_name: The name of the termination term. cfg: The configuration for the termination term. Raises: ValueError: If the term name is not found. """ if term_name not in self._term_names: raise ValueError(f"Termination term '{term_name}' not found.") # set the configuration self._term_cfgs[self._term_names.index(term_name)] = cfg def get_term_cfg(self, term_name: str) -> TerminationTermCfg: """Gets the configuration for the specified term. Args: term_name: The name of the termination term. Returns: The configuration of the termination term. Raises: ValueError: If the term name is not found. """ if term_name not in self._term_names: raise ValueError(f"Termination term '{term_name}' not found.") # return the configuration return self._term_cfgs[self._term_names.index(term_name)] """ Helper functions. """ def _prepare_terms(self): """Prepares a list of termination functions.""" # parse remaining termination terms and decimate their information self._term_names: list[str] = list() self._term_cfgs: list[TerminationTermCfg] = list() self._class_term_cfgs: list[TerminationTermCfg] = list() # check if config is dict already if isinstance(self.cfg, dict): cfg_items = self.cfg.items() else: cfg_items = self.cfg.__dict__.items() # iterate over all the terms for term_name, term_cfg in cfg_items: # check for non config if term_cfg is None: continue # check for valid config type if not isinstance(term_cfg, TerminationTermCfg): raise TypeError( f"Configuration for the term '{term_name}' is not of type TerminationTermCfg." f" Received: '{type(term_cfg)}'." ) # resolve common parameters self._resolve_common_term_cfg(term_name, term_cfg, min_argc=1) # add function to list self._term_names.append(term_name) self._term_cfgs.append(term_cfg) # check if the term is a class if isinstance(term_cfg.func, ManagerTermBase): self._class_term_cfgs.append(term_cfg)
9,845
Python
38.071428
115
0.634027
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/controllers/rmp_flow.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from __future__ import annotations import torch from dataclasses import MISSING import omni.isaac.core.utils.prims as prim_utils from omni.isaac.core.articulations import Articulation from omni.isaac.core.simulation_context import SimulationContext from omni.isaac.motion_generation import ArticulationMotionPolicy from omni.isaac.motion_generation.lula.motion_policies import RmpFlow, RmpFlowSmoothed from omni.isaac.orbit.utils import configclass @configclass class RmpFlowControllerCfg: """Configuration for RMP-Flow controller (provided through LULA library).""" name: str = "rmp_flow" """Name of the controller. Supported: "rmp_flow", "rmp_flow_smoothed". Defaults to "rmp_flow".""" config_file: str = MISSING """Path to the configuration file for the controller.""" urdf_file: str = MISSING """Path to the URDF model of the robot.""" collision_file: str = MISSING """Path to collision model description of the robot.""" frame_name: str = MISSING """Name of the robot frame for task space (must be present in the URDF).""" evaluations_per_frame: float = MISSING """Number of substeps during Euler integration inside LULA world model.""" ignore_robot_state_updates: bool = False """If true, then state of the world model inside controller is rolled out. Defaults to False.""" class RmpFlowController: """Wraps around RMPFlow from IsaacSim for batched environments.""" def __init__(self, cfg: RmpFlowControllerCfg, device: str): """Initialize the controller. Args: cfg: The configuration for the controller. device: The device to use for computation. """ # store input self.cfg = cfg self._device = device # display info print(f"[INFO]: Loading RMPFlow controller URDF from: {self.cfg.urdf_file}") """ Properties. """ @property def num_actions(self) -> int: """Dimension of the action space of controller.""" return 7 """ Operations. """ def initialize(self, prim_paths_expr: str): """Initialize the controller. Args: prim_paths_expr: The expression to find the articulation prim paths. """ # obtain the simulation time physics_dt = SimulationContext.instance().get_physics_dt() # find all prims self._prim_paths = prim_utils.find_matching_prim_paths(prim_paths_expr) self.num_robots = len(self._prim_paths) # resolve controller if self.cfg.name == "rmp_flow": controller_cls = RmpFlow elif self.cfg.name == "rmp_flow_smoothed": controller_cls = RmpFlowSmoothed else: raise ValueError(f"Unsupported controller in Lula library: {self.cfg.name}") # create all franka robots references and their controllers self.articulation_policies = list() for prim_path in self._prim_paths: # add robot reference robot = Articulation(prim_path) robot.initialize() # add controller rmpflow = controller_cls( robot_description_path=self.cfg.collision_file, urdf_path=self.cfg.urdf_file, rmpflow_config_path=self.cfg.config_file, end_effector_frame_name=self.cfg.frame_name, maximum_substep_size=physics_dt / self.cfg.evaluations_per_frame, ignore_robot_state_updates=self.cfg.ignore_robot_state_updates, ) # wrap rmpflow to connect to the Franka robot articulation articulation_policy = ArticulationMotionPolicy(robot, rmpflow, physics_dt) self.articulation_policies.append(articulation_policy) # get number of active joints self.active_dof_names = self.articulation_policies[0].get_motion_policy().get_active_joints() self.num_dof = len(self.active_dof_names) # create buffers # -- for storing command self._command = torch.zeros(self.num_robots, self.num_actions, device=self._device) # -- for policy output self.dof_pos_target = torch.zeros((self.num_robots, self.num_dof), device=self._device) self.dof_vel_target = torch.zeros((self.num_robots, self.num_dof), device=self._device) def reset_idx(self, robot_ids: torch.Tensor = None): """Reset the internals.""" # if no robot ids are provided, then reset all robots if robot_ids is None: robot_ids = torch.arange(self.num_robots, device=self._device) # reset policies for specified robots for index in robot_ids: self.articulation_policies[index].motion_policy.reset() def set_command(self, command: torch.Tensor): """Set target end-effector pose command.""" # store command self._command[:] = command def compute(self) -> tuple[torch.Tensor, torch.Tensor]: """Performs inference with the controller. Returns: The target joint positions and velocity commands. """ # convert command to numpy command = self._command.cpu().numpy() # compute control actions for i, policy in enumerate(self.articulation_policies): # enable type-hinting policy: ArticulationMotionPolicy # set rmpflow target to be the current position of the target cube. policy.get_motion_policy().set_end_effector_target( target_position=command[i, 0:3], target_orientation=command[i, 3:7] ) # apply action on the robot action = policy.get_next_articulation_action() # copy actions into buffer self.dof_pos_target[i, :] = torch.from_numpy(action.joint_positions[:]).to(self.dof_pos_target) self.dof_vel_target[i, :] = torch.from_numpy(action.joint_velocities[:]).to(self.dof_vel_target) return self.dof_pos_target, self.dof_vel_target
6,146
Python
39.440789
108
0.643345
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/controllers/__init__.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """Sub-package for different controllers and motion-generators. Controllers or motion generators are responsible for closed-loop tracking of a given command. The controller can be a simple PID controller or a more complex controller such as impedance control or inverse kinematics control. The controller is responsible for generating the desired joint-level commands to be sent to the robot. """ from .differential_ik import DifferentialIKController from .differential_ik_cfg import DifferentialIKControllerCfg
637
Python
38.874998
99
0.814757
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/controllers/operational_space.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from __future__ import annotations import torch from collections.abc import Sequence from dataclasses import MISSING from omni.isaac.orbit.utils import configclass from omni.isaac.orbit.utils.math import apply_delta_pose, compute_pose_error @configclass class OperationSpaceControllerCfg: """Configuration for operation-space controller.""" command_types: Sequence[str] = MISSING """Type of command. It has two sub-strings joined by underscore: - type of command mode: "position", "pose", "force" - type of command resolving: "abs" (absolute), "rel" (relative) """ impedance_mode: str = MISSING """Type of gains for motion control: "fixed", "variable", "variable_kp".""" uncouple_motion_wrench: bool = False """Whether to decouple the wrench computation from task-space pose (motion) error.""" motion_control_axes: Sequence[int] = (1, 1, 1, 1, 1, 1) """Motion direction to control. Mark as 0/1 for each axis.""" force_control_axes: Sequence[int] = (0, 0, 0, 0, 0, 0) """Force direction to control. Mark as 0/1 for each axis.""" inertial_compensation: bool = False """Whether to perform inertial compensation for motion control (inverse dynamics).""" gravity_compensation: bool = False """Whether to perform gravity compensation.""" stiffness: float | Sequence[float] = MISSING """The positional gain for determining wrenches based on task-space pose error.""" damping_ratio: float | Sequence[float] | None = None """The damping ratio is used in-conjunction with positional gain to compute wrenches based on task-space velocity error. The following math operation is performed for computing velocity gains: :math:`d_gains = 2 * sqrt(p_gains) * damping_ratio`. """ stiffness_limits: tuple[float, float] = (0, 300) """Minimum and maximum values for positional gains. Note: Used only when :obj:`impedance_mode` is "variable" or "variable_kp". """ damping_ratio_limits: tuple[float, float] = (0, 100) """Minimum and maximum values for damping ratios used to compute velocity gains. Note: Used only when :obj:`impedance_mode` is "variable". """ force_stiffness: float | Sequence[float] = None """The positional gain for determining wrenches for closed-loop force control. If obj:`None`, then open-loop control of desired forces is performed. """ position_command_scale: tuple[float, float, float] = (1.0, 1.0, 1.0) """Scaling of the position command received. Used only in relative mode.""" rotation_command_scale: tuple[float, float, float] = (1.0, 1.0, 1.0) """Scaling of the rotation command received. Used only in relative mode.""" class OperationSpaceController: """Operation-space controller. Reference: [1] https://ethz.ch/content/dam/ethz/special-interest/mavt/robotics-n-intelligent-systems/rsl-dam/documents/RobotDynamics2017/RD_HS2017script.pdf """ def __init__(self, cfg: OperationSpaceControllerCfg, num_robots: int, num_dof: int, device: str): """Initialize operation-space controller. Args: cfg: The configuration for operation-space controller. num_robots: The number of robots to control. num_dof: The number of degrees of freedom of the robot. device: The device to use for computations. Raises: ValueError: When invalid control command is provided. """ # store inputs self.cfg = cfg self.num_robots = num_robots self.num_dof = num_dof self._device = device # resolve tasks-pace target dimensions self.target_list = list() for command_type in self.cfg.command_types: if "position" in command_type: self.target_list.append(3) elif command_type == "pose_rel": self.target_list.append(6) elif command_type == "pose_abs": self.target_list.append(7) elif command_type == "force_abs": self.target_list.append(6) else: raise ValueError(f"Invalid control command: {command_type}.") self.target_dim = sum(self.target_list) # create buffers # -- selection matrices self._selection_matrix_motion = torch.diag( torch.tensor(self.cfg.motion_control_axes, dtype=torch.float, device=self._device) ) self._selection_matrix_force = torch.diag( torch.tensor(self.cfg.force_control_axes, dtype=torch.float, device=self._device) ) # -- commands self._task_space_target = torch.zeros(self.num_robots, self.target_dim, device=self._device) # -- scaling of command self._position_command_scale = torch.diag(torch.tensor(self.cfg.position_command_scale, device=self._device)) self._rotation_command_scale = torch.diag(torch.tensor(self.cfg.rotation_command_scale, device=self._device)) # -- motion control gains self._p_gains = torch.zeros(self.num_robots, 6, device=self._device) self._p_gains[:] = torch.tensor(self.cfg.stiffness, device=self._device) self._d_gains = 2 * torch.sqrt(self._p_gains) * torch.tensor(self.cfg.damping_ratio, device=self._device) # -- force control gains if self.cfg.force_stiffness is not None: self._p_wrench_gains = torch.zeros(self.num_robots, 6, device=self._device) self._p_wrench_gains[:] = torch.tensor(self.cfg.force_stiffness, device=self._device) else: self._p_wrench_gains = None # -- position gain limits self._p_gains_limits = torch.zeros(self.num_robots, 6, device=self._device) self._p_gains_limits[..., 0], self._p_gains_limits[..., 1] = ( self.cfg.stiffness_limits[0], self.cfg.stiffness_limits[1], ) # -- damping ratio limits self._damping_ratio_limits = torch.zeros_like(self._p_gains_limits) self._damping_ratio_limits[..., 0], self._damping_ratio_limits[..., 1] = ( self.cfg.damping_ratio_limits[0], self.cfg.damping_ratio_limits[1], ) # -- storing outputs self._desired_torques = torch.zeros(self.num_robots, self.num_dof, self.num_dof, device=self._device) """ Properties. """ @property def num_actions(self) -> int: """Dimension of the action space of controller.""" # impedance mode if self.cfg.impedance_mode == "fixed": # task-space pose return self.target_dim elif self.cfg.impedance_mode == "variable_kp": # task-space pose + stiffness return self.target_dim + 6 elif self.cfg.impedance_mode == "variable": # task-space pose + stiffness + damping return self.target_dim + 6 + 6 else: raise ValueError(f"Invalid impedance mode: {self.cfg.impedance_mode}.") """ Operations. """ def initialize(self): """Initialize the internals.""" pass def reset_idx(self, robot_ids: torch.Tensor = None): """Reset the internals.""" pass def set_command(self, command: torch.Tensor): """Set target end-effector pose or force command. Args: command: The target end-effector pose or force command. """ # check input size if command.shape != (self.num_robots, self.num_actions): raise ValueError( f"Invalid command shape '{command.shape}'. Expected: '{(self.num_robots, self.num_actions)}'." ) # impedance mode if self.cfg.impedance_mode == "fixed": # joint positions self._task_space_target[:] = command elif self.cfg.impedance_mode == "variable_kp": # split input command task_space_command, stiffness = torch.tensor_split(command, (self.target_dim, 6), dim=-1) # format command stiffness = stiffness.clip_(min=self._p_gains_limits[0], max=self._p_gains_limits[1]) # joint positions + stiffness self._task_space_target[:] = task_space_command.squeeze(dim=-1) self._p_gains[:] = stiffness self._d_gains[:] = 2 * torch.sqrt(self._p_gains) # critically damped elif self.cfg.impedance_mode == "variable": # split input command task_space_command, stiffness, damping_ratio = torch.tensor_split(command, 3, dim=-1) # format command stiffness = stiffness.clip_(min=self._p_gains_limits[0], max=self._p_gains_limits[1]) damping_ratio = damping_ratio.clip_(min=self._damping_ratio_limits[0], max=self._damping_ratio_limits[1]) # joint positions + stiffness + damping self._task_space_target[:] = task_space_command self._p_gains[:] = stiffness self._d_gains[:] = 2 * torch.sqrt(self._p_gains) * damping_ratio else: raise ValueError(f"Invalid impedance mode: {self.cfg.impedance_mode}.") def compute( self, jacobian: torch.Tensor, ee_pose: torch.Tensor | None = None, ee_vel: torch.Tensor | None = None, ee_force: torch.Tensor | None = None, mass_matrix: torch.Tensor | None = None, gravity: torch.Tensor | None = None, ) -> torch.Tensor: """Performs inference with the controller. Args: jacobian: The Jacobian matrix of the end-effector. ee_pose: The current end-effector pose. It is a tensor of shape (num_robots, 7), which contains the position and quaternion (w, x, y, z). Defaults to None. ee_vel: The current end-effector velocity. It is a tensor of shape (num_robots, 6), which contains the linear and angular velocities. Defaults to None. ee_force: The current external force on the end-effector. It is a tensor of shape (num_robots, 3), which contains the linear force. Defaults to None. mass_matrix: The joint-space inertial matrix. Defaults to None. gravity: The joint-space gravity vector. Defaults to None. Raises: ValueError: When the end-effector pose is not provided for the 'position_rel' command. ValueError: When the end-effector pose is not provided for the 'position_abs' command. ValueError: When the end-effector pose is not provided for the 'pose_rel' command. ValueError: When an invalid command type is provided. ValueError: When motion-control is enabled but the end-effector pose or velocity is not provided. ValueError: When force-control is enabled but the end-effector force is not provided. ValueError: When inertial compensation is enabled but the mass matrix is not provided. ValueError: When gravity compensation is enabled but the gravity vector is not provided. Returns: The target joint torques commands. """ # buffers for motion/force control desired_ee_pos = None desired_ee_rot = None desired_ee_force = None # resolve the commands target_groups = torch.split(self._task_space_target, self.target_list) for command_type, target in zip(self.cfg.command_types, target_groups): if command_type == "position_rel": # check input is provided if ee_pose is None: raise ValueError("End-effector pose is required for 'position_rel' command.") # scale command target @= self._position_command_scale # compute targets desired_ee_pos = ee_pose[:, :3] + target desired_ee_rot = ee_pose[:, 3:] elif command_type == "position_abs": # check input is provided if ee_pose is None: raise ValueError("End-effector pose is required for 'position_abs' command.") # compute targets desired_ee_pos = target desired_ee_rot = ee_pose[:, 3:] elif command_type == "pose_rel": # check input is provided if ee_pose is None: raise ValueError("End-effector pose is required for 'pose_rel' command.") # scale command target[:, 0:3] @= self._position_command_scale target[:, 3:6] @= self._rotation_command_scale # compute targets desired_ee_pos, desired_ee_rot = apply_delta_pose(ee_pose[:, :3], ee_pose[:, 3:], target) elif command_type == "pose_abs": # compute targets desired_ee_pos = target[:, 0:3] desired_ee_rot = target[:, 3:7] elif command_type == "force_abs": # compute targets desired_ee_force = target else: raise ValueError(f"Invalid control command: {self.cfg.command_type}.") # reset desired joint torques self._desired_torques[:] = 0 # compute for motion-control if desired_ee_pos is not None: # check input is provided if ee_pose is None or ee_vel is None: raise ValueError("End-effector pose and velocity are required for motion control.") # -- end-effector tracking error pose_error = compute_pose_error( ee_pose[:, :3], ee_pose[:, 3:], desired_ee_pos, desired_ee_rot, rot_error_type="axis_angle" ) velocity_error = -ee_vel # zero target velocity # -- desired end-effector acceleration (spring damped system) des_ee_acc = self._p_gains * pose_error + self._d_gains * velocity_error # -- inertial compensation if self.cfg.inertial_compensation: # check input is provided if mass_matrix is None: raise ValueError("Mass matrix is required for inertial compensation.") # compute task-space dynamics quantities # wrench = (J M^(-1) J^T)^(-1) * \ddot(x_des) mass_matrix_inv = torch.inverse(mass_matrix) if self.cfg.uncouple_motion_wrench: # decoupled-mass matrices lambda_pos = torch.inverse(jacobian[:, 0:3] @ mass_matrix_inv * jacobian[:, 0:3].T) lambda_ori = torch.inverse(jacobian[:, 3:6] @ mass_matrix_inv * jacobian[:, 3:6].T) # desired end-effector wrench (from pseudo-dynamics) decoupled_force = lambda_pos @ des_ee_acc[:, 0:3] decoupled_torque = lambda_ori @ des_ee_acc[:, 3:6] des_motion_wrench = torch.cat(decoupled_force, decoupled_torque) else: # coupled dynamics lambda_full = torch.inverse(jacobian @ mass_matrix_inv * jacobian.T) # desired end-effector wrench (from pseudo-dynamics) des_motion_wrench = lambda_full @ des_ee_acc else: # task-space impedance control # wrench = \ddot(x_des) des_motion_wrench = des_ee_acc # -- joint-space wrench self._desired_torques += jacobian.T @ self._selection_matrix_motion @ des_motion_wrench # compute for force control if desired_ee_force is not None: # -- task-space wrench if self.cfg.stiffness is not None: # check input is provided if ee_force is None: raise ValueError("End-effector force is required for closed-loop force control.") # closed-loop control des_force_wrench = desired_ee_force + self._p_wrench_gains * (desired_ee_force - ee_force) else: # open-loop control des_force_wrench = desired_ee_force # -- joint-space wrench self._desired_torques += jacobian.T @ self._selection_matrix_force @ des_force_wrench # add gravity compensation (bias correction) if self.cfg.gravity_compensation: # check input is provided if gravity is None: raise ValueError("Gravity vector is required for gravity compensation.") # add gravity compensation self._desired_torques += gravity return self._desired_torques
16,820
Python
44.462162
153
0.597265
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/controllers/differential_ik.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from __future__ import annotations import torch from typing import TYPE_CHECKING from omni.isaac.orbit.utils.math import apply_delta_pose, compute_pose_error if TYPE_CHECKING: from .differential_ik_cfg import DifferentialIKControllerCfg class DifferentialIKController: r"""Differential inverse kinematics (IK) controller. This controller is based on the concept of differential inverse kinematics [1, 2] which is a method for computing the change in joint positions that yields the desired change in pose. .. math:: \Delta \mathbf{q} = \mathbf{J}^{\dagger} \Delta \mathbf{x} \mathbf{q}_{\text{desired}} = \mathbf{q}_{\text{current}} + \Delta \mathbf{q} where :math:`\mathbf{J}^{\dagger}` is the pseudo-inverse of the Jacobian matrix :math:`\mathbf{J}`, :math:`\Delta \mathbf{x}` is the desired change in pose, and :math:`\mathbf{q}_{\text{current}}` is the current joint positions. To deal with singularity in Jacobian, the following methods are supported for computing inverse of the Jacobian: - "pinv": Moore-Penrose pseudo-inverse - "svd": Adaptive singular-value decomposition (SVD) - "trans": Transpose of matrix - "dls": Damped version of Moore-Penrose pseudo-inverse (also called Levenberg-Marquardt) .. caution:: The controller does not assume anything about the frames of the current and desired end-effector pose, or the joint-space velocities. It is up to the user to ensure that these quantities are given in the correct format. Reference: [1] https://ethz.ch/content/dam/ethz/special-interest/mavt/robotics-n-intelligent-systems/rsl-dam/documents/RobotDynamics2017/RD_HS2017script.pdf [2] https://www.cs.cmu.edu/~15464-s13/lectures/lecture6/iksurvey.pdf """ def __init__(self, cfg: DifferentialIKControllerCfg, num_envs: int, device: str): """Initialize the controller. Args: cfg: The configuration for the controller. num_envs: The number of environments. device: The device to use for computations. """ # store inputs self.cfg = cfg self.num_envs = num_envs self._device = device # create buffers self.ee_pos_des = torch.zeros(self.num_envs, 3, device=self._device) self.ee_quat_des = torch.zeros(self.num_envs, 4, device=self._device) # -- input command self._command = torch.zeros(self.num_envs, self.action_dim, device=self._device) """ Properties. """ @property def action_dim(self) -> int: """Dimension of the controller's input command.""" if self.cfg.command_type == "position": return 3 # (x, y, z) elif self.cfg.command_type == "pose" and self.cfg.use_relative_mode: return 6 # (dx, dy, dz, droll, dpitch, dyaw) else: return 7 # (x, y, z, qw, qx, qy, qz) """ Operations. """ def reset(self, env_ids: torch.Tensor = None): """Reset the internals. Args: env_ids: The environment indices to reset. If None, then all environments are reset. """ pass def set_command( self, command: torch.Tensor, ee_pos: torch.Tensor | None = None, ee_quat: torch.Tensor | None = None ): """Set target end-effector pose command. Based on the configured command type and relative mode, the method computes the desired end-effector pose. It is up to the user to ensure that the command is given in the correct frame. The method only applies the relative mode if the command type is ``position_rel`` or ``pose_rel``. Args: command: The input command in shape (N, 3) or (N, 6) or (N, 7). ee_pos: The current end-effector position in shape (N, 3). This is only needed if the command type is ``position_rel`` or ``pose_rel``. ee_quat: The current end-effector orientation (w, x, y, z) in shape (N, 4). This is only needed if the command type is ``position_*`` or ``pose_rel``. Raises: ValueError: If the command type is ``position_*`` and :attr:`ee_quat` is None. ValueError: If the command type is ``position_rel`` and :attr:`ee_pos` is None. ValueError: If the command type is ``pose_rel`` and either :attr:`ee_pos` or :attr:`ee_quat` is None. """ # store command self._command[:] = command # compute the desired end-effector pose if self.cfg.command_type == "position": # we need end-effector orientation even though we are in position mode # this is only needed for display purposes if ee_quat is None: raise ValueError("End-effector orientation can not be None for `position_*` command type!") # compute targets if self.cfg.use_relative_mode: if ee_pos is None: raise ValueError("End-effector position can not be None for `position_rel` command type!") self.ee_pos_des[:] = ee_pos + self._command self.ee_quat_des[:] = ee_quat else: self.ee_pos_des[:] = self._command self.ee_quat_des[:] = ee_quat else: # compute targets if self.cfg.use_relative_mode: if ee_pos is None or ee_quat is None: raise ValueError( "Neither end-effector position nor orientation can be None for `pose_rel` command type!" ) self.ee_pos_des, self.ee_quat_des = apply_delta_pose(ee_pos, ee_quat, self._command) else: self.ee_pos_des = self._command[:, 0:3] self.ee_quat_des = self._command[:, 3:7] def compute( self, ee_pos: torch.Tensor, ee_quat: torch.Tensor, jacobian: torch.Tensor, joint_pos: torch.Tensor ) -> torch.Tensor: """Computes the target joint positions that will yield the desired end effector pose. Args: ee_pos: The current end-effector position in shape (N, 3). ee_quat: The current end-effector orientation in shape (N, 4). jacobian: The geometric jacobian matrix in shape (N, 6, num_joints). joint_pos: The current joint positions in shape (N, num_joints). Returns: The target joint positions commands in shape (N, num_joints). """ # compute the delta in joint-space if "position" in self.cfg.command_type: position_error = self.ee_pos_des - ee_pos jacobian_pos = jacobian[:, 0:3] delta_joint_pos = self._compute_delta_joint_pos(delta_pose=position_error, jacobian=jacobian_pos) else: position_error, axis_angle_error = compute_pose_error( ee_pos, ee_quat, self.ee_pos_des, self.ee_quat_des, rot_error_type="axis_angle" ) pose_error = torch.cat((position_error, axis_angle_error), dim=1) delta_joint_pos = self._compute_delta_joint_pos(delta_pose=pose_error, jacobian=jacobian) # return the desired joint positions return joint_pos + delta_joint_pos """ Helper functions. """ def _compute_delta_joint_pos(self, delta_pose: torch.Tensor, jacobian: torch.Tensor) -> torch.Tensor: """Computes the change in joint position that yields the desired change in pose. The method uses the Jacobian mapping from joint-space velocities to end-effector velocities to compute the delta-change in the joint-space that moves the robot closer to a desired end-effector position. Args: delta_pose: The desired delta pose in shape (N, 3) or (N, 6). jacobian: The geometric jacobian matrix in shape (N, 3, num_joints) or (N, 6, num_joints). Returns: The desired delta in joint space. Shape is (N, num-jointsß). """ if self.cfg.ik_params is None: raise RuntimeError(f"Inverse-kinematics parameters for method '{self.cfg.ik_method}' is not defined!") # compute the delta in joint-space if self.cfg.ik_method == "pinv": # Jacobian pseudo-inverse # parameters k_val = self.cfg.ik_params["k_val"] # computation jacobian_pinv = torch.linalg.pinv(jacobian) delta_joint_pos = k_val * jacobian_pinv @ delta_pose.unsqueeze(-1) delta_joint_pos = delta_joint_pos.squeeze(-1) elif self.cfg.ik_method == "svd": # adaptive SVD # parameters k_val = self.cfg.ik_params["k_val"] min_singular_value = self.cfg.ik_params["min_singular_value"] # computation # U: 6xd, S: dxd, V: d x num-joint U, S, Vh = torch.linalg.svd(jacobian) S_inv = 1.0 / S S_inv = torch.where(S > min_singular_value, S_inv, torch.zeros_like(S_inv)) jacobian_pinv = ( torch.transpose(Vh, dim0=1, dim1=2)[:, :, :6] @ torch.diag_embed(S_inv) @ torch.transpose(U, dim0=1, dim1=2) ) delta_joint_pos = k_val * jacobian_pinv @ delta_pose.unsqueeze(-1) delta_joint_pos = delta_joint_pos.squeeze(-1) elif self.cfg.ik_method == "trans": # Jacobian transpose # parameters k_val = self.cfg.ik_params["k_val"] # computation jacobian_T = torch.transpose(jacobian, dim0=1, dim1=2) delta_joint_pos = k_val * jacobian_T @ delta_pose.unsqueeze(-1) delta_joint_pos = delta_joint_pos.squeeze(-1) elif self.cfg.ik_method == "dls": # damped least squares # parameters lambda_val = self.cfg.ik_params["lambda_val"] # computation jacobian_T = torch.transpose(jacobian, dim0=1, dim1=2) lambda_matrix = (lambda_val**2) * torch.eye(n=jacobian.shape[1], device=self._device) delta_joint_pos = ( jacobian_T @ torch.inverse(jacobian @ jacobian_T + lambda_matrix) @ delta_pose.unsqueeze(-1) ) delta_joint_pos = delta_joint_pos.squeeze(-1) else: raise ValueError(f"Unsupported inverse-kinematics method: {self.cfg.ik_method}") return delta_joint_pos
10,589
Python
44.06383
153
0.602134
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/controllers/differential_ik_cfg.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from __future__ import annotations from dataclasses import MISSING from typing import Literal from omni.isaac.orbit.utils import configclass from .differential_ik import DifferentialIKController @configclass class DifferentialIKControllerCfg: """Configuration for differential inverse kinematics controller.""" class_type: type = DifferentialIKController """The associated controller class.""" command_type: Literal["position", "pose"] = MISSING """Type of task-space command to control the articulation's body. If "position", then the controller only controls the position of the articulation's body. Otherwise, the controller controls the pose of the articulation's body. """ use_relative_mode: bool = False """Whether to use relative mode for the controller. Defaults to False. If True, then the controller treats the input command as a delta change in the position/pose. Otherwise, the controller treats the input command as the absolute position/pose. """ ik_method: Literal["pinv", "svd", "trans", "dls"] = MISSING """Method for computing inverse of Jacobian.""" ik_params: dict[str, float] | None = None """Parameters for the inverse-kinematics method. Defaults to None, in which case the default parameters for the method are used. - Moore-Penrose pseudo-inverse ("pinv"): - "k_val": Scaling of computed delta-joint positions (default: 1.0). - Adaptive Singular Value Decomposition ("svd"): - "k_val": Scaling of computed delta-joint positions (default: 1.0). - "min_singular_value": Single values less than this are suppressed to zero (default: 1e-5). - Jacobian transpose ("trans"): - "k_val": Scaling of computed delta-joint positions (default: 1.0). - Damped Moore-Penrose pseudo-inverse ("dls"): - "lambda_val": Damping coefficient (default: 0.01). """ def __post_init__(self): # check valid input if self.command_type not in ["position", "pose"]: raise ValueError(f"Unsupported inverse-kinematics command: {self.command_type}.") if self.ik_method not in ["pinv", "svd", "trans", "dls"]: raise ValueError(f"Unsupported inverse-kinematics method: {self.ik_method}.") # default parameters for different inverse kinematics approaches. default_ik_params = { "pinv": {"k_val": 1.0}, "svd": {"k_val": 1.0, "min_singular_value": 1e-5}, "trans": {"k_val": 1.0}, "dls": {"lambda_val": 0.01}, } # update parameters for IK-method if not provided ik_params = default_ik_params[self.ik_method].copy() if self.ik_params is not None: ik_params.update(self.ik_params) self.ik_params = ik_params
2,929
Python
39.136986
100
0.665073
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/controllers/joint_impedance.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from __future__ import annotations import torch from collections.abc import Sequence from dataclasses import MISSING from omni.isaac.orbit.utils import configclass @configclass class JointImpedanceControllerCfg: """Configuration for joint impedance regulation controller.""" command_type: str = "p_abs" """Type of command: p_abs (absolute) or p_rel (relative).""" dof_pos_offset: Sequence[float] | None = None """Offset to DOF position command given to controller. (default: None). If None then position offsets are set to zero. """ impedance_mode: str = MISSING """Type of gains: "fixed", "variable", "variable_kp".""" inertial_compensation: bool = False """Whether to perform inertial compensation (inverse dynamics).""" gravity_compensation: bool = False """Whether to perform gravity compensation.""" stiffness: float | Sequence[float] = MISSING """The positional gain for determining desired torques based on joint position error.""" damping_ratio: float | Sequence[float] | None = None """The damping ratio is used in-conjunction with positional gain to compute desired torques based on joint velocity error. The following math operation is performed for computing velocity gains: :math:`d_gains = 2 * sqrt(p_gains) * damping_ratio`. """ stiffness_limits: tuple[float, float] = (0, 300) """Minimum and maximum values for positional gains. Note: Used only when :obj:`impedance_mode` is "variable" or "variable_kp". """ damping_ratio_limits: tuple[float, float] = (0, 100) """Minimum and maximum values for damping ratios used to compute velocity gains. Note: Used only when :obj:`impedance_mode` is "variable". """ class JointImpedanceController: """Joint impedance regulation control. Reference: [1] https://ethz.ch/content/dam/ethz/special-interest/mavt/robotics-n-intelligent-systems/rsl-dam/documents/RobotDynamics2017/RD_HS2017script.pdf """ def __init__(self, cfg: JointImpedanceControllerCfg, num_robots: int, dof_pos_limits: torch.Tensor, device: str): """Initialize joint impedance controller. Args: cfg: The configuration for the controller. num_robots: The number of robots to control. dof_pos_limits: The joint position limits for each robot. This is a tensor of shape (num_robots, num_dof, 2) where the last dimension contains the lower and upper limits. device: The device to use for computations. Raises: ValueError: When the shape of :obj:`dof_pos_limits` is not (num_robots, num_dof, 2). """ # check valid inputs if len(dof_pos_limits.shape) != 3: raise ValueError(f"Joint position limits has shape '{dof_pos_limits.shape}'. Expected length of shape = 3.") # store inputs self.cfg = cfg self.num_robots = num_robots self.num_dof = dof_pos_limits.shape[1] # (num_robots, num_dof, 2) self._device = device # create buffers # -- commands self._dof_pos_target = torch.zeros(self.num_robots, self.num_dof, device=self._device) # -- offsets self._dof_pos_offset = torch.zeros(self.num_robots, self.num_dof, device=self._device) # -- limits self._dof_pos_limits = dof_pos_limits # -- positional gains self._p_gains = torch.zeros(self.num_robots, self.num_dof, device=self._device) self._p_gains[:] = torch.tensor(self.cfg.stiffness, device=self._device) # -- velocity gains self._d_gains = torch.zeros(self.num_robots, self.num_dof, device=self._device) self._d_gains[:] = 2 * torch.sqrt(self._p_gains) * torch.tensor(self.cfg.damping_ratio, device=self._device) # -- position offsets if self.cfg.dof_pos_offset is not None: self._dof_pos_offset[:] = torch.tensor(self.cfg.dof_pos_offset, device=self._device) # -- position gain limits self._p_gains_limits = torch.zeros_like(self._dof_pos_limits) self._p_gains_limits[..., 0] = self.cfg.stiffness_limits[0] self._p_gains_limits[..., 1] = self.cfg.stiffness_limits[1] # -- damping ratio limits self._damping_ratio_limits = torch.zeros_like(self._dof_pos_limits) self._damping_ratio_limits[..., 0] = self.cfg.damping_ratio_limits[0] self._damping_ratio_limits[..., 1] = self.cfg.damping_ratio_limits[1] """ Properties. """ @property def num_actions(self) -> int: """Dimension of the action space of controller.""" # impedance mode if self.cfg.impedance_mode == "fixed": # joint positions return self.num_dof elif self.cfg.impedance_mode == "variable_kp": # joint positions + stiffness return self.num_dof * 2 elif self.cfg.impedance_mode == "variable": # joint positions + stiffness + damping return self.num_dof * 3 else: raise ValueError(f"Invalid impedance mode: {self.cfg.impedance_mode}.") """ Operations. """ def initialize(self): """Initialize the internals.""" pass def reset_idx(self, robot_ids: torch.Tensor = None): """Reset the internals.""" pass def set_command(self, command: torch.Tensor): """Set target end-effector pose command. Args: command: The command to set. This is a tensor of shape (num_robots, num_actions) where :obj:`num_actions` is the dimension of the action space of the controller. """ # check input size if command.shape != (self.num_robots, self.num_actions): raise ValueError( f"Invalid command shape '{command.shape}'. Expected: '{(self.num_robots, self.num_actions)}'." ) # impedance mode if self.cfg.impedance_mode == "fixed": # joint positions self._dof_pos_target[:] = command elif self.cfg.impedance_mode == "variable_kp": # split input command dof_pos_command, stiffness = torch.tensor_split(command, 2, dim=-1) # format command stiffness = stiffness.clip_(min=self._p_gains_limits[0], max=self._p_gains_limits[1]) # joint positions + stiffness self._dof_pos_target[:] = dof_pos_command self._p_gains[:] = stiffness self._d_gains[:] = 2 * torch.sqrt(self._p_gains) # critically damped elif self.cfg.impedance_mode == "variable": # split input command dof_pos_command, stiffness, damping_ratio = torch.tensor_split(command, 3, dim=-1) # format command stiffness = stiffness.clip_(min=self._p_gains_limits[0], max=self._p_gains_limits[1]) damping_ratio = damping_ratio.clip_(min=self._damping_ratio_limits[0], max=self._damping_ratio_limits[1]) # joint positions + stiffness + damping self._dof_pos_target[:] = dof_pos_command self._p_gains[:] = stiffness self._d_gains[:] = 2 * torch.sqrt(self._p_gains) * damping_ratio else: raise ValueError(f"Invalid impedance mode: {self.cfg.impedance_mode}.") def compute( self, dof_pos: torch.Tensor, dof_vel: torch.Tensor, mass_matrix: torch.Tensor | None = None, gravity: torch.Tensor | None = None, ) -> torch.Tensor: """Performs inference with the controller. Args: dof_pos: The current joint positions. dof_vel: The current joint velocities. mass_matrix: The joint-space inertial matrix. Defaults to None. gravity: The joint-space gravity vector. Defaults to None. Raises: ValueError: When the command type is invalid. Returns: The target joint torques commands. """ # resolve the command type if self.cfg.command_type == "p_abs": desired_dof_pos = self._dof_pos_target + self._dof_pos_offset elif self.cfg.command_type == "p_rel": desired_dof_pos = self._dof_pos_target + dof_pos else: raise ValueError(f"Invalid dof position command mode: {self.cfg.command_type}.") # compute errors desired_dof_pos = desired_dof_pos.clip_(min=self._dof_pos_limits[..., 0], max=self._dof_pos_limits[..., 1]) dof_pos_error = desired_dof_pos - dof_pos dof_vel_error = -dof_vel # compute acceleration des_dof_acc = self._p_gains * dof_pos_error + self._d_gains * dof_vel_error # compute torques # -- inertial compensation if self.cfg.inertial_compensation: # inverse dynamics control desired_torques = mass_matrix @ des_dof_acc else: # decoupled spring-mass control desired_torques = des_dof_acc # -- gravity compensation (bias correction) if self.cfg.gravity_compensation: desired_torques += gravity return desired_torques
9,320
Python
39.176724
153
0.615558
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/controllers/config/rmp_flow.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from __future__ import annotations import os from omni.isaac.core.utils.extensions import get_extension_path_from_name from omni.isaac.orbit.controllers.rmp_flow import RmpFlowControllerCfg # Note: RMP-Flow config files for supported robots are stored in the motion_generation extension _RMP_CONFIG_DIR = os.path.join(get_extension_path_from_name("omni.isaac.motion_generation"), "motion_policy_configs") # Path to current directory _CUR_DIR = os.path.dirname(os.path.realpath(__file__)) FRANKA_RMPFLOW_CFG = RmpFlowControllerCfg( config_file=os.path.join(_RMP_CONFIG_DIR, "franka", "rmpflow", "franka_rmpflow_common.yaml"), urdf_file=os.path.join(_CUR_DIR, "data", "lula_franka_gen.urdf"), collision_file=os.path.join(_RMP_CONFIG_DIR, "franka", "rmpflow", "robot_descriptor.yaml"), frame_name="panda_end_effector", evaluations_per_frame=5, ) """Configuration of RMPFlow for Franka arm (default from `omni.isaac.motion_generation`).""" UR10_RMPFLOW_CFG = RmpFlowControllerCfg( config_file=os.path.join(_RMP_CONFIG_DIR, "ur10", "rmpflow", "ur10_rmpflow_config.yaml"), urdf_file=os.path.join(_RMP_CONFIG_DIR, "ur10", "ur10_robot.urdf"), collision_file=os.path.join(_RMP_CONFIG_DIR, "ur10", "rmpflow", "ur10_robot_description.yaml"), frame_name="ee_link", evaluations_per_frame=5, ) """Configuration of RMPFlow for UR10 arm (default from `omni.isaac.motion_generation`)."""
1,542
Python
39.605262
117
0.730869
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/controllers/config/__init__.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from __future__ import annotations
158
Python
21.714283
56
0.746835
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/sim/__init__.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """Sub-package containing simulation-specific functionalities. These include: * Ability to spawn different objects and materials into Omniverse * Define and modify various schemas on USD prims * Converters to obtain USD file from other file formats (such as URDF, OBJ, STL, FBX) * Utility class to control the simulator .. note:: Currently, only a subset of all possible schemas and prims in Omniverse are supported. We are expanding the these set of functions on a need basis. In case, there are specific prims or schemas that you would like to include, please open an issue on GitHub as a feature request elaborating on the required application. To make it convenient to use the module, we recommend importing the module as follows: .. code-block:: python import omni.isaac.orbit.sim as sim_utils """ from .converters import * # noqa: F401, F403 from .schemas import * # noqa: F401, F403 from .simulation_cfg import PhysxCfg, SimulationCfg # noqa: F401, F403 from .simulation_context import SimulationContext, build_simulation_context # noqa: F401, F403 from .spawners import * # noqa: F401, F403 from .utils import * # noqa: F401, F403
1,296
Python
36.057142
95
0.75463
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/sim/utils.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """Sub-module with USD-related utilities.""" from __future__ import annotations import functools import inspect import re from collections.abc import Callable from typing import TYPE_CHECKING, Any import carb import omni.isaac.core.utils.stage as stage_utils import omni.kit.commands from omni.isaac.cloner import Cloner from pxr import PhysxSchema, Sdf, Semantics, Usd, UsdGeom, UsdPhysics, UsdShade from omni.isaac.orbit.utils.string import to_camel_case from . import schemas if TYPE_CHECKING: from .spawners.spawner_cfg import SpawnerCfg """ Attribute - Setters. """ def safe_set_attribute_on_usd_schema(schema_api: Usd.APISchemaBase, name: str, value: Any, camel_case: bool): """Set the value of an attribute on its USD schema if it exists. A USD API schema serves as an interface or API for authoring and extracting a set of attributes. They typically derive from the :class:`pxr.Usd.SchemaBase` class. This function checks if the attribute exists on the schema and sets the value of the attribute if it exists. Args: schema_api: The USD schema to set the attribute on. name: The name of the attribute. value: The value to set the attribute to. camel_case: Whether to convert the attribute name to camel case. Raises: TypeError: When the input attribute name does not exist on the provided schema API. """ # if value is None, do nothing if value is None: return # convert attribute name to camel case if camel_case: attr_name = to_camel_case(name, to="CC") else: attr_name = name # retrieve the attribute # reference: https://openusd.org/dev/api/_usd__page__common_idioms.html#Usd_Create_Or_Get_Property attr = getattr(schema_api, f"Create{attr_name}Attr", None) # check if attribute exists if attr is not None: attr().Set(value) else: # think: do we ever need to create the attribute if it doesn't exist? # currently, we are not doing this since the schemas are already created with some defaults. carb.log_error(f"Attribute '{attr_name}' does not exist on prim '{schema_api.GetPath()}'.") raise TypeError(f"Attribute '{attr_name}' does not exist on prim '{schema_api.GetPath()}'.") def safe_set_attribute_on_usd_prim(prim: Usd.Prim, attr_name: str, value: Any, camel_case: bool): """Set the value of a attribute on its USD prim. The function creates a new attribute if it does not exist on the prim. This is because in some cases (such as with shaders), their attributes are not exposed as USD prim properties that can be altered. This function allows us to set the value of the attributes in these cases. Args: prim: The USD prim to set the attribute on. attr_name: The name of the attribute. value: The value to set the attribute to. camel_case: Whether to convert the attribute name to camel case. """ # if value is None, do nothing if value is None: return # convert attribute name to camel case if camel_case: attr_name = to_camel_case(attr_name, to="cC") # resolve sdf type based on value if isinstance(value, bool): sdf_type = Sdf.ValueTypeNames.Bool elif isinstance(value, int): sdf_type = Sdf.ValueTypeNames.Int elif isinstance(value, float): sdf_type = Sdf.ValueTypeNames.Float elif isinstance(value, (tuple, list)) and len(value) == 3 and any(isinstance(v, float) for v in value): sdf_type = Sdf.ValueTypeNames.Float3 elif isinstance(value, (tuple, list)) and len(value) == 2 and any(isinstance(v, float) for v in value): sdf_type = Sdf.ValueTypeNames.Float2 else: raise NotImplementedError( f"Cannot set attribute '{attr_name}' with value '{value}'. Please modify the code to support this type." ) # change property omni.kit.commands.execute( "ChangePropertyCommand", prop_path=Sdf.Path(f"{prim.GetPath()}.{attr_name}"), value=value, prev=None, type_to_create_if_not_exist=sdf_type, usd_context_name=prim.GetStage(), ) """ Decorators. """ def apply_nested(func: Callable) -> Callable: """Decorator to apply a function to all prims under a specified prim-path. The function iterates over the provided prim path and all its children to apply input function to all prims under the specified prim path. If the function succeeds to apply to a prim, it will not look at the children of that prim. This is based on the physics behavior that nested schemas are not allowed. For example, a parent prim and its child prim cannot both have a rigid-body schema applied on them, or it is not possible to have nested articulations. While traversing the prims under the specified prim path, the function will throw a warning if it does not succeed to apply the function to any prim. This is because the user may have intended to apply the function to a prim that does not have valid attributes, or the prim may be an instanced prim. Args: func: The function to apply to all prims under a specified prim-path. The function must take the prim-path and other arguments. It should return a boolean indicating whether the function succeeded or not. Returns: The wrapped function that applies the function to all prims under a specified prim-path. Raises: ValueError: If the prim-path does not exist on the stage. """ @functools.wraps(func) def wrapper(prim_path: str | Sdf.Path, *args, **kwargs): # map args and kwargs to function signature so we can get the stage # note: we do this to check if stage is given in arg or kwarg sig = inspect.signature(func) bound_args = sig.bind(prim_path, *args, **kwargs) # get current stage stage = bound_args.arguments.get("stage") if stage is None: stage = stage_utils.get_current_stage() # get USD prim prim: Usd.Prim = stage.GetPrimAtPath(prim_path) # check if prim is valid if not prim.IsValid(): raise ValueError(f"Prim at path '{prim_path}' is not valid.") # add iterable to check if property was applied on any of the prims count_success = 0 instanced_prim_paths = [] # iterate over all prims under prim-path all_prims = [prim] while len(all_prims) > 0: # get current prim child_prim = all_prims.pop(0) child_prim_path = child_prim.GetPath().pathString # type: ignore # check if prim is a prototype if child_prim.IsInstance(): instanced_prim_paths.append(child_prim_path) continue # set properties success = func(child_prim_path, *args, **kwargs) # if successful, do not look at children # this is based on the physics behavior that nested schemas are not allowed if not success: all_prims += child_prim.GetChildren() else: count_success += 1 # check if we were successful in applying the function to any prim if count_success == 0: carb.log_warn( f"Could not perform '{func.__name__}' on any prims under: '{prim_path}'." " This might be because of the following reasons:" "\n\t(1) The desired attribute does not exist on any of the prims." "\n\t(2) The desired attribute exists on an instanced prim." f"\n\t\tDiscovered list of instanced prim paths: {instanced_prim_paths}" ) return wrapper def clone(func: Callable) -> Callable: """Decorator for cloning a prim based on matching prim paths of the prim's parent. The decorator checks if the parent prim path matches any prim paths in the stage. If so, it clones the spawned prim at each matching prim path. For example, if the input prim path is: ``/World/Table_[0-9]/Bottle``, the decorator will clone the prim at each matching prim path of the parent prim: ``/World/Table_0/Bottle``, ``/World/Table_1/Bottle``, etc. Note: For matching prim paths, the decorator assumes that valid prims exist for all matching prim paths. In case no matching prim paths are found, the decorator raises a ``RuntimeError``. Args: func: The function to decorate. Returns: The decorated function that spawns the prim and clones it at each matching prim path. It returns the spawned source prim, i.e., the first prim in the list of matching prim paths. """ @functools.wraps(func) def wrapper(prim_path: str | Sdf.Path, cfg: SpawnerCfg, *args, **kwargs): # cast prim_path to str type in case its an Sdf.Path prim_path = str(prim_path) # check prim path is global if not prim_path.startswith("/"): raise ValueError(f"Prim path '{prim_path}' is not global. It must start with '/'.") # resolve: {SPAWN_NS}/AssetName # note: this assumes that the spawn namespace already exists in the stage root_path, asset_path = prim_path.rsplit("/", 1) # check if input is a regex expression # note: a valid prim path can only contain alphanumeric characters, underscores, and forward slashes is_regex_expression = re.match(r"^[a-zA-Z0-9/_]+$", root_path) is None # resolve matching prims for source prim path expression if is_regex_expression and root_path != "": source_prim_paths = find_matching_prim_paths(root_path) # if no matching prims are found, raise an error if len(source_prim_paths) == 0: raise RuntimeError( f"Unable to find source prim path: '{root_path}'. Please create the prim before spawning." ) else: source_prim_paths = [root_path] # resolve prim paths for spawning and cloning prim_paths = [f"{source_prim_path}/{asset_path}" for source_prim_path in source_prim_paths] # spawn single instance prim = func(prim_paths[0], cfg, *args, **kwargs) # set the prim visibility if hasattr(cfg, "visible"): imageable = UsdGeom.Imageable(prim) if cfg.visible: imageable.MakeVisible() else: imageable.MakeInvisible() # set the semantic annotations if hasattr(cfg, "semantic_tags") and cfg.semantic_tags is not None: # note: taken from replicator scripts.utils.utils.py for semantic_type, semantic_value in cfg.semantic_tags: # deal with spaces by replacing them with underscores semantic_type_sanitized = semantic_type.replace(" ", "_") semantic_value_sanitized = semantic_value.replace(" ", "_") # set the semantic API for the instance instance_name = f"{semantic_type_sanitized}_{semantic_value_sanitized}" sem = Semantics.SemanticsAPI.Apply(prim, instance_name) # create semantic type and data attributes sem.CreateSemanticTypeAttr() sem.CreateSemanticDataAttr() sem.GetSemanticTypeAttr().Set(semantic_type) sem.GetSemanticDataAttr().Set(semantic_value) # activate rigid body contact sensors if hasattr(cfg, "activate_contact_sensors") and cfg.activate_contact_sensors: schemas.activate_contact_sensors(prim_paths[0], cfg.activate_contact_sensors) # clone asset using cloner API if len(prim_paths) > 1: cloner = Cloner() # clone the prim cloner.clone(prim_paths[0], prim_paths[1:], replicate_physics=False, copy_from_source=cfg.copy_from_source) # return the source prim return prim return wrapper """ Material bindings. """ @apply_nested def bind_visual_material( prim_path: str | Sdf.Path, material_path: str | Sdf.Path, stage: Usd.Stage | None = None, stronger_than_descendants: bool = True, ): """Bind a visual material to a prim. This function is a wrapper around the USD command `BindMaterialCommand`_. .. note:: The function is decorated with :meth:`apply_nested` to allow applying the function to a prim path and all its descendants. .. _BindMaterialCommand: https://docs.omniverse.nvidia.com/kit/docs/omni.usd/latest/omni.usd.commands/omni.usd.commands.BindMaterialCommand.html Args: prim_path: The prim path where to apply the material. material_path: The prim path of the material to apply. stage: The stage where the prim and material exist. Defaults to None, in which case the current stage is used. stronger_than_descendants: Whether the material should override the material of its descendants. Defaults to True. Raises: ValueError: If the provided prim paths do not exist on stage. """ # resolve stage if stage is None: stage = stage_utils.get_current_stage() # check if prim and material exists if not stage.GetPrimAtPath(prim_path).IsValid(): raise ValueError(f"Target prim '{material_path}' does not exist.") if not stage.GetPrimAtPath(material_path).IsValid(): raise ValueError(f"Visual material '{material_path}' does not exist.") # resolve token for weaker than descendants if stronger_than_descendants: binding_strength = "strongerThanDescendants" else: binding_strength = "weakerThanDescendants" # obtain material binding API # note: we prefer using the command here as it is more robust than the USD API success, _ = omni.kit.commands.execute( "BindMaterialCommand", prim_path=prim_path, material_path=material_path, strength=binding_strength, stage=stage, ) # return success return success @apply_nested def bind_physics_material( prim_path: str | Sdf.Path, material_path: str | Sdf.Path, stage: Usd.Stage | None = None, stronger_than_descendants: bool = True, ): """Bind a physics material to a prim. `Physics material`_ can be applied only to a prim with physics-enabled on them. This includes having collision APIs, or deformable body APIs, or being a particle system. In case the prim does not have any of these APIs, the function will not apply the material and return False. .. note:: The function is decorated with :meth:`apply_nested` to allow applying the function to a prim path and all its descendants. .. _Physics material: https://docs.omniverse.nvidia.com/extensions/latest/ext_physics/simulation-control/physics-settings.html#physics-materials Args: prim_path: The prim path where to apply the material. material_path: The prim path of the material to apply. stage: The stage where the prim and material exist. Defaults to None, in which case the current stage is used. stronger_than_descendants: Whether the material should override the material of its descendants. Defaults to True. Raises: ValueError: If the provided prim paths do not exist on stage. """ # resolve stage if stage is None: stage = stage_utils.get_current_stage() # check if prim and material exists if not stage.GetPrimAtPath(prim_path).IsValid(): raise ValueError(f"Target prim '{material_path}' does not exist.") if not stage.GetPrimAtPath(material_path).IsValid(): raise ValueError(f"Physics material '{material_path}' does not exist.") # get USD prim prim = stage.GetPrimAtPath(prim_path) # check if prim has collision applied on it has_physics_scene_api = prim.HasAPI(PhysxSchema.PhysxSceneAPI) has_collider = prim.HasAPI(UsdPhysics.CollisionAPI) has_deformable_body = prim.HasAPI(PhysxSchema.PhysxDeformableBodyAPI) has_particle_system = prim.IsA(PhysxSchema.PhysxParticleSystem) if not (has_physics_scene_api or has_collider or has_deformable_body or has_particle_system): carb.log_verbose( f"Cannot apply physics material '{material_path}' on prim '{prim_path}'. It is neither a" " PhysX scene, collider, a deformable body, nor a particle system." ) return False # obtain material binding API if prim.HasAPI(UsdShade.MaterialBindingAPI): material_binding_api = UsdShade.MaterialBindingAPI(prim) else: material_binding_api = UsdShade.MaterialBindingAPI.Apply(prim) # obtain the material prim material = UsdShade.Material(stage.GetPrimAtPath(material_path)) # resolve token for weaker than descendants if stronger_than_descendants: binding_strength = UsdShade.Tokens.strongerThanDescendants else: binding_strength = UsdShade.Tokens.weakerThanDescendants # apply the material material_binding_api.Bind(material, bindingStrength=binding_strength, materialPurpose="physics") # type: ignore # return success return True """ Exporting. """ def export_prim_to_file( path: str | Sdf.Path, source_prim_path: str | Sdf.Path, target_prim_path: str | Sdf.Path | None = None, stage: Usd.Stage | None = None, ): """Exports a prim from a given stage to a USD file. The function creates a new layer at the provided path and copies the prim to the layer. It sets the copied prim as the default prim in the target layer. Additionally, it updates the stage up-axis and meters-per-unit to match the current stage. Args: path: The filepath path to export the prim to. source_prim_path: The prim path to export. target_prim_path: The prim path to set as the default prim in the target layer. Defaults to None, in which case the source prim path is used. stage: The stage where the prim exists. Defaults to None, in which case the current stage is used. Raises: ValueError: If the prim paths are not global (i.e: do not start with '/'). """ # automatically casting to str in case args # are path types path = str(path) source_prim_path = str(source_prim_path) if target_prim_path is not None: target_prim_path = str(target_prim_path) if not source_prim_path.startswith("/"): raise ValueError(f"Source prim path '{source_prim_path}' is not global. It must start with '/'.") if target_prim_path is not None and not target_prim_path.startswith("/"): raise ValueError(f"Target prim path '{target_prim_path}' is not global. It must start with '/'.") # get current stage if stage is None: stage: Usd.Stage = omni.usd.get_context().get_stage() # get root layer source_layer = stage.GetRootLayer() # only create a new layer if it doesn't exist already target_layer = Sdf.Find(path) if target_layer is None: target_layer = Sdf.Layer.CreateNew(path) # open the target stage target_stage = Usd.Stage.Open(target_layer) # update stage data UsdGeom.SetStageUpAxis(target_stage, UsdGeom.GetStageUpAxis(stage)) UsdGeom.SetStageMetersPerUnit(target_stage, UsdGeom.GetStageMetersPerUnit(stage)) # specify the prim to copy source_prim_path = Sdf.Path(source_prim_path) if target_prim_path is None: target_prim_path = source_prim_path # copy the prim Sdf.CreatePrimInLayer(target_layer, target_prim_path) Sdf.CopySpec(source_layer, source_prim_path, target_layer, target_prim_path) # set the default prim target_layer.defaultPrim = Sdf.Path(target_prim_path).name # resolve all paths relative to layer path omni.usd.resolve_paths(source_layer.identifier, target_layer.identifier) # save the stage target_layer.Save() """ USD Prim properties. """ def make_uninstanceable(prim_path: str | Sdf.Path, stage: Usd.Stage | None = None): """Check if a prim and its descendants are instanced and make them uninstanceable. This function checks if the prim at the specified prim path and its descendants are instanced. If so, it makes the respective prim uninstanceable by disabling instancing on the prim. This is useful when we want to modify the properties of a prim that is instanced. For example, if we want to apply a different material on an instanced prim, we need to make the prim uninstanceable first. Args: prim_path: The prim path to check. stage: The stage where the prim exists. Defaults to None, in which case the current stage is used. Raises: ValueError: If the prim path is not global (i.e: does not start with '/'). """ # make paths str type if they aren't already prim_path = str(prim_path) # check if prim path is global if not prim_path.startswith("/"): raise ValueError(f"Prim path '{prim_path}' is not global. It must start with '/'.") # get current stage if stage is None: stage = stage_utils.get_current_stage() # get prim prim: Usd.Prim = stage.GetPrimAtPath(prim_path) # check if prim is valid if not prim.IsValid(): raise ValueError(f"Prim at path '{prim_path}' is not valid.") # iterate over all prims under prim-path all_prims = [prim] while len(all_prims) > 0: # get current prim child_prim = all_prims.pop(0) # check if prim is instanced if child_prim.IsInstance(): # make the prim uninstanceable child_prim.SetInstanceable(False) # add children to list all_prims += child_prim.GetChildren() """ USD Stage traversal. """ def get_first_matching_child_prim( prim_path: str | Sdf.Path, predicate: Callable[[Usd.Prim], bool], stage: Usd.Stage | None = None ) -> Usd.Prim | None: """Recursively get the first USD Prim at the path string that passes the predicate function Args: prim_path: The path of the prim in the stage. predicate: The function to test the prims against. It takes a prim as input and returns a boolean. stage: The stage where the prim exists. Defaults to None, in which case the current stage is used. Returns: The first prim on the path that passes the predicate. If no prim passes the predicate, it returns None. Raises: ValueError: If the prim path is not global (i.e: does not start with '/'). """ # make paths str type if they aren't already prim_path = str(prim_path) # check if prim path is global if not prim_path.startswith("/"): raise ValueError(f"Prim path '{prim_path}' is not global. It must start with '/'.") # get current stage if stage is None: stage = stage_utils.get_current_stage() # get prim prim = stage.GetPrimAtPath(prim_path) # check if prim is valid if not prim.IsValid(): raise ValueError(f"Prim at path '{prim_path}' is not valid.") # iterate over all prims under prim-path all_prims = [prim] while len(all_prims) > 0: # get current prim child_prim = all_prims.pop(0) # check if prim passes predicate if predicate(child_prim): return child_prim # add children to list all_prims += child_prim.GetChildren() return None def get_all_matching_child_prims( prim_path: str | Sdf.Path, predicate: Callable[[Usd.Prim], bool] = lambda _: True, depth: int | None = None, stage: Usd.Stage | None = None, ) -> list[Usd.Prim]: """Performs a search starting from the root and returns all the prims matching the predicate. Args: prim_path: The root prim path to start the search from. predicate: The predicate that checks if the prim matches the desired criteria. It takes a prim as input and returns a boolean. Defaults to a function that always returns True. depth: The maximum depth for traversal, should be bigger than zero if specified. Defaults to None (i.e: traversal happens till the end of the tree). stage: The stage where the prim exists. Defaults to None, in which case the current stage is used. Returns: A list containing all the prims matching the predicate. Raises: ValueError: If the prim path is not global (i.e: does not start with '/'). """ # make paths str type if they aren't already prim_path = str(prim_path) # check if prim path is global if not prim_path.startswith("/"): raise ValueError(f"Prim path '{prim_path}' is not global. It must start with '/'.") # get current stage if stage is None: stage = stage_utils.get_current_stage() # get prim prim = stage.GetPrimAtPath(prim_path) # check if prim is valid if not prim.IsValid(): raise ValueError(f"Prim at path '{prim_path}' is not valid.") # check if depth is valid if depth is not None and depth <= 0: raise ValueError(f"Depth must be bigger than zero, got {depth}.") # iterate over all prims under prim-path # list of tuples (prim, current_depth) all_prims_queue = [(prim, 0)] output_prims = [] while len(all_prims_queue) > 0: # get current prim child_prim, current_depth = all_prims_queue.pop(0) # check if prim passes predicate if predicate(child_prim): output_prims.append(child_prim) # add children to list if depth is None or current_depth < depth: all_prims_queue += [(child, current_depth + 1) for child in child_prim.GetChildren()] return output_prims def find_first_matching_prim(prim_path_regex: str, stage: Usd.Stage | None = None) -> Usd.Prim | None: """Find the first matching prim in the stage based on input regex expression. Args: prim_path_regex: The regex expression for prim path. stage: The stage where the prim exists. Defaults to None, in which case the current stage is used. Returns: The first prim that matches input expression. If no prim matches, returns None. Raises: ValueError: If the prim path is not global (i.e: does not start with '/'). """ # check prim path is global if not prim_path_regex.startswith("/"): raise ValueError(f"Prim path '{prim_path_regex}' is not global. It must start with '/'.") # get current stage if stage is None: stage = stage_utils.get_current_stage() # need to wrap the token patterns in '^' and '$' to prevent matching anywhere in the string pattern = f"^{prim_path_regex}$" compiled_pattern = re.compile(pattern) # obtain matching prim (depth-first search) for prim in stage.Traverse(): # check if prim passes predicate if compiled_pattern.match(prim.GetPath().pathString) is not None: return prim return None def find_matching_prims(prim_path_regex: str, stage: Usd.Stage | None = None) -> list[Usd.Prim]: """Find all the matching prims in the stage based on input regex expression. Args: prim_path_regex: The regex expression for prim path. stage: The stage where the prim exists. Defaults to None, in which case the current stage is used. Returns: A list of prims that match input expression. Raises: ValueError: If the prim path is not global (i.e: does not start with '/'). """ # check prim path is global if not prim_path_regex.startswith("/"): raise ValueError(f"Prim path '{prim_path_regex}' is not global. It must start with '/'.") # get current stage if stage is None: stage = stage_utils.get_current_stage() # need to wrap the token patterns in '^' and '$' to prevent matching anywhere in the string tokens = prim_path_regex.split("/")[1:] tokens = [f"^{token}$" for token in tokens] # iterate over all prims in stage (breath-first search) all_prims = [stage.GetPseudoRoot()] output_prims = [] for index, token in enumerate(tokens): token_compiled = re.compile(token) for prim in all_prims: for child in prim.GetAllChildren(): if token_compiled.match(child.GetName()) is not None: output_prims.append(child) if index < len(tokens) - 1: all_prims = output_prims output_prims = [] return output_prims def find_matching_prim_paths(prim_path_regex: str, stage: Usd.Stage | None = None) -> list[str]: """Find all the matching prim paths in the stage based on input regex expression. Args: prim_path_regex: The regex expression for prim path. stage: The stage where the prim exists. Defaults to None, in which case the current stage is used. Returns: A list of prim paths that match input expression. Raises: ValueError: If the prim path is not global (i.e: does not start with '/'). """ # obtain matching prims output_prims = find_matching_prims(prim_path_regex, stage) # convert prims to prim paths output_prim_paths = [] for prim in output_prims: output_prim_paths.append(prim.GetPath().pathString) return output_prim_paths
29,548
Python
39.983356
148
0.659266
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/sim/simulation_cfg.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """Base configuration of the environment. This module defines the general configuration of the environment. It includes parameters for configuring the environment instances, viewer settings, and simulation parameters. """ from __future__ import annotations from typing import Literal from omni.isaac.orbit.utils import configclass from .spawners.materials import RigidBodyMaterialCfg @configclass class PhysxCfg: """Configuration for PhysX solver-related parameters. These parameters are used to configure the PhysX solver. For more information, see the PhysX 5 SDK documentation. PhysX 5 supports GPU-accelerated physics simulation. This is enabled by default, but can be disabled through the flag `use_gpu`. Unlike CPU PhysX, the GPU simulation feature is not able to dynamically grow all the buffers. Therefore, it is necessary to provide a reasonable estimate of the buffer sizes for GPU features. If insufficient buffer sizes are provided, the simulation will fail with errors and lead to adverse behaviors. The buffer sizes can be adjusted through the `gpu_*` parameters. References: * PhysX 5 documentation: https://nvidia-omniverse.github.io/PhysX/ """ use_gpu: bool = True """Enable/disable GPU accelerated dynamics simulation. Default is True. This enables GPU-accelerated implementations for broad-phase collision checks, contact generation, shape and body management, and constrained solver. """ solver_type: Literal[0, 1] = 1 """The type of solver to use.Default is 1 (TGS). Available solvers: * :obj:`0`: PGS (Projective Gauss-Seidel) * :obj:`1`: TGS (Truncated Gauss-Seidel) """ min_position_iteration_count: int = 1 """Minimum number of solver position iterations (rigid bodies, cloth, particles etc.). Default is 1. .. note:: Each physics actor in Omniverse specifies its own solver iteration count. The solver takes the number of iterations specified by the actor with the highest iteration and clamps it to the range ``[min_position_iteration_count, max_position_iteration_count]``. """ max_position_iteration_count: int = 255 """Maximum number of solver position iterations (rigid bodies, cloth, particles etc.). Default is 255. .. note:: Each physics actor in Omniverse specifies its own solver iteration count. The solver takes the number of iterations specified by the actor with the highest iteration and clamps it to the range ``[min_position_iteration_count, max_position_iteration_count]``. """ min_velocity_iteration_count: int = 0 """Minimum number of solver position iterations (rigid bodies, cloth, particles etc.). Default is 0. .. note:: Each physics actor in Omniverse specifies its own solver iteration count. The solver takes the number of iterations specified by the actor with the highest iteration and clamps it to the range ``[min_velocity_iteration_count, max_velocity_iteration_count]``. """ max_velocity_iteration_count: int = 255 """Maximum number of solver position iterations (rigid bodies, cloth, particles etc.). Default is 255. .. note:: Each physics actor in Omniverse specifies its own solver iteration count. The solver takes the number of iterations specified by the actor with the highest iteration and clamps it to the range ``[min_velocity_iteration_count, max_velocity_iteration_count]``. """ enable_ccd: bool = False """Enable a second broad-phase pass that makes it possible to prevent objects from tunneling through each other. Default is False.""" enable_stabilization: bool = True """Enable/disable additional stabilization pass in solver. Default is True.""" enable_enhanced_determinism: bool = False """Enable/disable improved determinism at the expense of performance. Defaults to False. For more information on PhysX determinism, please check `here`_. .. _here: https://nvidia-omniverse.github.io/PhysX/physx/5.3.1/docs/RigidBodyDynamics.html#enhanced-determinism """ bounce_threshold_velocity: float = 0.5 """Relative velocity threshold for contacts to bounce (in m/s). Default is 0.5 m/s.""" friction_offset_threshold: float = 0.04 """Threshold for contact point to experience friction force (in m). Default is 0.04 m.""" friction_correlation_distance: float = 0.025 """Distance threshold for merging contacts into a single friction anchor point (in m). Default is 0.025 m.""" gpu_max_rigid_contact_count: int = 2**23 """Size of rigid contact stream buffer allocated in pinned host memory. Default is 2 ** 23.""" gpu_max_rigid_patch_count: int = 5 * 2**15 """Size of the rigid contact patch stream buffer allocated in pinned host memory. Default is 5 * 2 ** 15.""" gpu_found_lost_pairs_capacity: int = 2**21 """Capacity of found and lost buffers allocated in GPU global memory. Default is 2 ** 21. This is used for the found/lost pair reports in the BP. """ gpu_found_lost_aggregate_pairs_capacity: int = 2**25 """Capacity of found and lost buffers in aggregate system allocated in GPU global memory. Default is 2 ** 25. This is used for the found/lost pair reports in AABB manager. """ gpu_total_aggregate_pairs_capacity: int = 2**21 """Capacity of total number of aggregate pairs allocated in GPU global memory. Default is 2 ** 21.""" gpu_collision_stack_size: int = 2**26 """Size of the collision stack buffer allocated in pinned host memory. Default is 2 ** 26.""" gpu_heap_capacity: int = 2**26 """Initial capacity of the GPU and pinned host memory heaps. Additional memory will be allocated if more memory is required. Default is 2 ** 26.""" gpu_temp_buffer_capacity: int = 2**24 """Capacity of temp buffer allocated in pinned host memory. Default is 2 ** 24.""" gpu_max_num_partitions: int = 8 """Limitation for the partitions in the GPU dynamics pipeline. Default is 8. This variable must be power of 2. A value greater than 32 is currently not supported. Range: (1, 32) """ gpu_max_soft_body_contacts: int = 2**20 """Size of soft body contacts stream buffer allocated in pinned host memory. Default is 2 ** 20.""" gpu_max_particle_contacts: int = 2**20 """Size of particle contacts stream buffer allocated in pinned host memory. Default is 2 ** 20.""" @configclass class SimulationCfg: """Configuration for simulation physics.""" physics_prim_path: str = "/physicsScene" """The prim path where the USD PhysicsScene is created. Default is "/physicsScene".""" dt: float = 1.0 / 60.0 """The physics simulation time-step (in seconds). Default is 0.0167 seconds.""" substeps: int = 1 """The number of physics simulation steps per rendering step. Default is 1.""" gravity: tuple[float, float, float] = (0.0, 0.0, -9.81) """The gravity vector (in m/s^2). Default is (0.0, 0.0, -9.81). If set to (0.0, 0.0, 0.0), gravity is disabled. """ enable_scene_query_support: bool = False """Enable/disable scene query support for collision shapes. Default is False. This flag allows performing collision queries (raycasts, sweeps, and overlaps) on actors and attached shapes in the scene. This is useful for implementing custom collision detection logic outside of the physics engine. If set to False, the physics engine does not create the scene query manager and the scene query functionality will not be available. However, this provides some performance speed-up. Note: This flag is overridden to True inside the :class:`SimulationContext` class when running the simulation with the GUI enabled. This is to allow certain GUI features to work properly. """ use_fabric: bool = True """Enable/disable reading of physics buffers directly. Default is True. When running the simulation, updates in the states in the scene is normally synchronized with USD. This leads to an overhead in reading the data and does not scale well with massive parallelization. This flag allows disabling the synchronization and reading the data directly from the physics buffers. It is recommended to set this flag to :obj:`True` when running the simulation with a large number of primitives in the scene. Note: When enabled, the GUI will not update the physics parameters in real-time. To enable real-time updates, please set this flag to :obj:`False`. """ disable_contact_processing: bool = False """Enable/disable contact processing. Default is False. By default, the physics engine processes all the contacts in the scene. However, reporting this contact information can be expensive due to its combinatorial complexity. This flag allows disabling the contact processing and querying the contacts manually by the user over a limited set of primitives in the scene. .. note:: It is required to set this flag to :obj:`True` when using the TensorAPIs for contact reporting. """ use_gpu_pipeline: bool = True """Enable/disable GPU pipeline. Default is True. If set to False, the physics data will be read as CPU buffers. """ device: str = "cuda:0" """The device for running the simulation/environment. Default is ``"cuda:0"``.""" physx: PhysxCfg = PhysxCfg() """PhysX solver settings. Default is PhysxCfg().""" physics_material: RigidBodyMaterialCfg = RigidBodyMaterialCfg() """Default physics material settings for rigid bodies. Default is RigidBodyMaterialCfg(). The physics engine defaults to this physics material for all the rigid body prims that do not have any physics material specified on them. The material is created at the path: ``{physics_prim_path}/defaultMaterial``. """
10,098
Python
40.389344
116
0.710537
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/sim/simulation_context.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from __future__ import annotations import builtins import enum import numpy as np import sys import traceback import weakref from collections.abc import Iterator from contextlib import contextmanager from typing import Any import carb import omni.isaac.core.utils.stage as stage_utils import omni.physx from omni.isaac.core.simulation_context import SimulationContext as _SimulationContext from omni.isaac.core.utils.viewports import set_camera_view from omni.isaac.version import get_version from pxr import Gf, Usd from .simulation_cfg import SimulationCfg from .spawners import DomeLightCfg, GroundPlaneCfg from .utils import bind_physics_material class SimulationContext(_SimulationContext): """A class to control simulation-related events such as physics stepping and rendering. The simulation context helps control various simulation aspects. This includes: * configure the simulator with different settings such as the physics time-step, the number of physics substeps, and the physics solver parameters (for more information, see :class:`omni.isaac.orbit.sim.SimulationCfg`) * playing, pausing, stepping and stopping the simulation * adding and removing callbacks to different simulation events such as physics stepping, rendering, etc. This class inherits from the `omni.isaac.core.simulation_context.SimulationContext`_ class and adds additional functionalities such as setting up the simulation context with a configuration object, exposing other commonly used simulator-related functions, and performing version checks of Isaac Sim to ensure compatibility between releases. The simulation context is a singleton object. This means that there can only be one instance of the simulation context at any given time. This is enforced by the parent class. Therefore, it is not possible to create multiple instances of the simulation context. Instead, the simulation context can be accessed using the ``instance()`` method. .. attention:: Since we only support the ``torch <https://pytorch.org/>``_ backend for simulation, the simulation context is configured to use the ``torch`` backend by default. This means that all the data structures used in the simulation are ``torch.Tensor`` objects. The simulation context can be used in two different modes of operations: 1. **Standalone python script**: In this mode, the user has full control over the simulation and can trigger stepping events synchronously (i.e. as a blocking call). In this case the user has to manually call :meth:`step` step the physics simulation and :meth:`render` to render the scene. 2. **Omniverse extension**: In this mode, the user has limited control over the simulation stepping and all the simulation events are triggered asynchronously (i.e. as a non-blocking call). In this case, the user can only trigger the simulation to start, pause, and stop. The simulation takes care of stepping the physics simulation and rendering the scene. Based on above, for most functions in this class there is an equivalent function that is suffixed with ``_async``. The ``_async`` functions are used in the Omniverse extension mode and the non-``_async`` functions are used in the standalone python script mode. .. _omni.isaac.core.simulation_context.SimulationContext: https://docs.omniverse.nvidia.com/py/isaacsim/source/extensions/omni.isaac.core/docs/index.html#module-omni.isaac.core.simulation_context """ class RenderMode(enum.IntEnum): """Different rendering modes for the simulation. Render modes correspond to how the viewport and other UI elements (such as listeners to keyboard or mouse events) are updated. There are three main components that can be updated when the simulation is rendered: 1. **UI elements and other extensions**: These are UI elements (such as buttons, sliders, etc.) and other extensions that are running in the background that need to be updated when the simulation is running. 2. **Cameras**: These are typically based on Hydra textures and are used to render the scene from different viewpoints. They can be attached to a viewport or be used independently to render the scene. 3. **Viewports**: These are windows where you can see the rendered scene. Updating each of the above components has a different overhead. For example, updating the viewports is computationally expensive compared to updating the UI elements. Therefore, it is useful to be able to control what is updated when the simulation is rendered. This is where the render mode comes in. There are four different render modes: * :attr:`NO_GUI_OR_RENDERING`: The simulation is running without a GUI and off-screen rendering flag is disabled, so none of the above are updated. * :attr:`NO_RENDERING`: No rendering, where only 1 is updated at a lower rate. * :attr:`PARTIAL_RENDERING`: Partial rendering, where only 1 and 2 are updated. * :attr:`FULL_RENDERING`: Full rendering, where everything (1, 2, 3) is updated. .. _Viewports: https://docs.omniverse.nvidia.com/extensions/latest/ext_viewport.html """ NO_GUI_OR_RENDERING = -1 """The simulation is running without a GUI and off-screen rendering is disabled.""" NO_RENDERING = 0 """No rendering, where only other UI elements are updated at a lower rate.""" PARTIAL_RENDERING = 1 """Partial rendering, where the simulation cameras and UI elements are updated.""" FULL_RENDERING = 2 """Full rendering, where all the simulation viewports, cameras and UI elements are updated.""" def __init__(self, cfg: SimulationCfg | None = None): """Creates a simulation context to control the simulator. Args: cfg: The configuration of the simulation. Defaults to None, in which case the default configuration is used. """ # store input if cfg is None: cfg = SimulationCfg() self.cfg = cfg # check that simulation is running if stage_utils.get_current_stage() is None: raise RuntimeError("The stage has not been created. Did you run the simulator?") # set flags for simulator # acquire settings interface carb_settings_iface = carb.settings.get_settings() # enable hydra scene-graph instancing # note: this allows rendering of instanceable assets on the GUI carb_settings_iface.set_bool("/persistent/omnihydra/useSceneGraphInstancing", True) # change dispatcher to use the default dispatcher in PhysX SDK instead of carb tasking # note: dispatcher handles how threads are launched for multi-threaded physics carb_settings_iface.set_bool("/physics/physxDispatcher", True) # disable contact processing in omni.physx if requested # note: helpful when creating contact reporting over limited number of objects in the scene if self.cfg.disable_contact_processing: carb_settings_iface.set_bool("/physics/disableContactProcessing", True) # enable custom geometry for cylinder and cone collision shapes to allow contact reporting for them # reason: cylinders and cones aren't natively supported by PhysX so we need to use custom geometry flags # reference: https://nvidia-omniverse.github.io/PhysX/physx/5.2.1/docs/Geometry.html?highlight=capsule#geometry carb_settings_iface.set_bool("/physics/collisionConeCustomGeometry", False) carb_settings_iface.set_bool("/physics/collisionCylinderCustomGeometry", False) # note: we read this once since it is not expected to change during runtime # read flag for whether a local GUI is enabled self._local_gui = carb_settings_iface.get("/app/window/enabled") # read flag for whether livestreaming GUI is enabled self._livestream_gui = carb_settings_iface.get("/app/livestream/enabled") # read flag for whether the orbit viewport capture pipeline will be used, # casting None to False if the flag doesn't exist # this flag is set from the AppLauncher class self._offscreen_render = bool(carb_settings_iface.get("/orbit/offscreen_render/enabled")) # flag for whether any GUI will be rendered (local, livestreamed or viewport) self._has_gui = self._local_gui or self._livestream_gui # store the default render mode if not self._has_gui and not self._offscreen_render: # set default render mode # note: this is the terminal state: cannot exit from this render mode self.render_mode = self.RenderMode.NO_GUI_OR_RENDERING # set viewport context to None self._viewport_context = None self._viewport_window = None elif not self._has_gui and self._offscreen_render: # set default render mode # note: this is the terminal state: cannot exit from this render mode self.render_mode = self.RenderMode.PARTIAL_RENDERING # set viewport context to None self._viewport_context = None self._viewport_window = None else: # note: need to import here in case the UI is not available (ex. headless mode) import omni.ui as ui from omni.kit.viewport.utility import get_active_viewport # set default render mode # note: this can be changed by calling the `set_render_mode` function self.render_mode = self.RenderMode.FULL_RENDERING # acquire viewport context self._viewport_context = get_active_viewport() self._viewport_context.updates_enabled = True # pyright: ignore [reportOptionalMemberAccess] # acquire viewport window # TODO @mayank: Why not just use get_active_viewport_and_window() directly? self._viewport_window = ui.Workspace.get_window("Viewport") # counter for periodic rendering self._render_throttle_counter = 0 # rendering frequency in terms of number of render calls self._render_throttle_period = 5 # override enable scene querying if rendering is enabled # this is needed for some GUI features if self._has_gui: self.cfg.enable_scene_query_support = True # set up flatcache/fabric interface (default is None) # this is needed to flush the flatcache data into Hydra manually when calling `render()` # ref: https://docs.omniverse.nvidia.com/prod_extensions/prod_extensions/ext_physics.html # note: need to do this here because super().__init__ calls render and this variable is needed self._fabric_iface = None # read isaac sim version (this includes build tag, release tag etc.) # note: we do it once here because it reads the VERSION file from disk and is not expected to change. self._isaacsim_version = get_version() # add callback to deal the simulation app when simulation is stopped. # this is needed because physics views go invalid once we stop the simulation if not builtins.ISAAC_LAUNCHED_FROM_TERMINAL: timeline_event_stream = omni.timeline.get_timeline_interface().get_timeline_event_stream() self._app_control_on_stop_handle = timeline_event_stream.create_subscription_to_pop_by_type( int(omni.timeline.TimelineEventType.STOP), lambda *args, obj=weakref.proxy(self): obj._app_control_on_stop_callback(*args), order=15, ) else: self._app_control_on_stop_handle = None # flatten out the simulation dictionary sim_params = self.cfg.to_dict() if sim_params is not None: if "physx" in sim_params: physx_params = sim_params.pop("physx") sim_params.update(physx_params) # create a simulation context to control the simulator super().__init__( stage_units_in_meters=1.0, physics_dt=self.cfg.dt, rendering_dt=self.cfg.dt * self.cfg.substeps, backend="torch", sim_params=sim_params, physics_prim_path=self.cfg.physics_prim_path, device=self.cfg.device, ) """ Operations - New. """ def has_gui(self) -> bool: """Returns whether the simulation has a GUI enabled. True if the simulation has a GUI enabled either locally or live-streamed. """ return self._has_gui def is_fabric_enabled(self) -> bool: """Returns whether the fabric interface is enabled. When fabric interface is enabled, USD read/write operations are disabled. Instead all applications read and write the simulation state directly from the fabric interface. This reduces a lot of overhead that occurs during USD read/write operations. For more information, please check `Fabric documentation`_. .. _Fabric documentation: https://docs.omniverse.nvidia.com/kit/docs/usdrt/latest/docs/usd_fabric_usdrt.html """ return self._fabric_iface is not None def get_version(self) -> tuple[int, int, int]: """Returns the version of the simulator. This is a wrapper around the ``omni.isaac.version.get_version()`` function. The returned tuple contains the following information: * Major version (int): This is the year of the release (e.g. 2022). * Minor version (int): This is the half-year of the release (e.g. 1 or 2). * Patch version (int): This is the patch number of the release (e.g. 0). """ return int(self._isaacsim_version[2]), int(self._isaacsim_version[3]), int(self._isaacsim_version[4]) """ Operations - New utilities. """ @staticmethod def set_camera_view( eye: tuple[float, float, float], target: tuple[float, float, float], camera_prim_path: str = "/OmniverseKit_Persp", ): """Set the location and target of the viewport camera in the stage. Note: This is a wrapper around the :math:`omni.isaac.core.utils.viewports.set_camera_view` function. It is provided here for convenience to reduce the amount of imports needed. Args: eye: The location of the camera eye. target: The location of the camera target. camera_prim_path: The path to the camera primitive in the stage. Defaults to "/OmniverseKit_Persp". """ set_camera_view(eye, target, camera_prim_path) def set_render_mode(self, mode: RenderMode): """Change the current render mode of the simulation. Please see :class:`RenderMode` for more information on the different render modes. .. note:: When no GUI is available (locally or livestreamed), we do not need to choose whether the viewport needs to render or not (since there is no GUI). Thus, in this case, calling the function will not change the render mode. Args: mode (RenderMode): The rendering mode. If different than SimulationContext's rendering mode, SimulationContext's mode is changed to the new mode. Raises: ValueError: If the input mode is not supported. """ # check if mode change is possible -- not possible when no GUI is available if not self._has_gui: carb.log_warn( f"Cannot change render mode when GUI is disabled. Using the default render mode: {self.render_mode}." ) return # check if there is a mode change # note: this is mostly needed for GUI when we want to switch between full rendering and no rendering. if mode != self.render_mode: if mode == self.RenderMode.FULL_RENDERING: # display the viewport and enable updates self._viewport_context.updates_enabled = True # pyright: ignore [reportOptionalMemberAccess] self._viewport_window.visible = True # pyright: ignore [reportOptionalMemberAccess] elif mode == self.RenderMode.PARTIAL_RENDERING: # hide the viewport and disable updates self._viewport_context.updates_enabled = False # pyright: ignore [reportOptionalMemberAccess] self._viewport_window.visible = False # pyright: ignore [reportOptionalMemberAccess] elif mode == self.RenderMode.NO_RENDERING: # hide the viewport and disable updates if self._viewport_context is not None: self._viewport_context.updates_enabled = False # pyright: ignore [reportOptionalMemberAccess] self._viewport_window.visible = False # pyright: ignore [reportOptionalMemberAccess] # reset the throttle counter self._render_throttle_counter = 0 else: raise ValueError(f"Unsupported render mode: {mode}! Please check `RenderMode` for details.") # update render mode self.render_mode = mode def set_setting(self, name: str, value: Any): """Set simulation settings using the Carbonite SDK. .. note:: If the input setting name does not exist, it will be created. If it does exist, the value will be overwritten. Please make sure to use the correct setting name. To understand the settings interface, please refer to the `Carbonite SDK <https://docs.omniverse.nvidia.com/dev-guide/latest/programmer_ref/settings.html>`_ documentation. Args: name: The name of the setting. value: The value of the setting. """ self._settings.set(name, value) def get_setting(self, name: str) -> Any: """Read the simulation setting using the Carbonite SDK. Args: name: The name of the setting. Returns: The value of the setting. """ return self._settings.get(name) """ Operations - Override (standalone) """ def reset(self, soft: bool = False): super().reset(soft=soft) # perform additional rendering steps to warm up replicator buffers # this is only needed for the first time we set the simulation if not soft: for _ in range(2): self.render() def step(self, render: bool = True): """Steps the physics simulation with the pre-defined time-step. .. note:: This function blocks if the timeline is paused. It only returns when the timeline is playing. Args: render: Whether to render the scene after stepping the physics simulation. If set to False, the scene is not rendered and only the physics simulation is stepped. """ # check if the simulation timeline is paused. in that case keep stepping until it is playing if not self.is_playing(): # step the simulator (but not the physics) to have UI still active while not self.is_playing(): self.render() # meantime if someone stops, break out of the loop if self.is_stopped(): break # need to do one step to refresh the app # reason: physics has to parse the scene again and inform other extensions like hydra-delegate. # without this the app becomes unresponsive. # FIXME: This steps physics as well, which we is not good in general. self.app.update() # step the simulation super().step(render=render) def render(self, mode: RenderMode | None = None): """Refreshes the rendering components including UI elements and view-ports depending on the render mode. This function is used to refresh the rendering components of the simulation. This includes updating the view-ports, UI elements, and other extensions (besides physics simulation) that are running in the background. The rendering components are refreshed based on the render mode. Please see :class:`RenderMode` for more information on the different render modes. Args: mode: The rendering mode. Defaults to None, in which case the current rendering mode is used. """ # check if we need to change the render mode if mode is not None: self.set_render_mode(mode) # render based on the render mode if self.render_mode == self.RenderMode.NO_GUI_OR_RENDERING: # we never want to render anything here (this is for complete headless mode) pass elif self.render_mode == self.RenderMode.NO_RENDERING: # throttle the rendering frequency to keep the UI responsive self._render_throttle_counter += 1 if self._render_throttle_counter % self._render_throttle_period == 0: self._render_throttle_counter = 0 # here we don't render viewport so don't need to flush fabric data # note: we don't call super().render() anymore because they do flush the fabric data self.set_setting("/app/player/playSimulations", False) self._app.update() self.set_setting("/app/player/playSimulations", True) else: # manually flush the fabric data to update Hydra textures if self._fabric_iface is not None: self._fabric_iface.update(0.0, 0.0) # render the simulation # note: we don't call super().render() anymore because they do above operation inside # and we don't want to do it twice. We may remove it once we drop support for Isaac Sim 2022.2. self.set_setting("/app/player/playSimulations", False) self._app.update() self.set_setting("/app/player/playSimulations", True) """ Operations - Override (extension) """ async def reset_async(self, soft: bool = False): # need to load all "physics" information from the USD file if not soft: omni.physx.acquire_physx_interface().force_load_physics_from_usd() # play the simulation await super().reset_async(soft=soft) """ Initialization/Destruction - Override. """ def _init_stage(self, *args, **kwargs) -> Usd.Stage: _ = super()._init_stage(*args, **kwargs) # set additional physx parameters and bind material self._set_additional_physx_params() # load flatcache/fabric interface self._load_fabric_interface() # return the stage return self.stage async def _initialize_stage_async(self, *args, **kwargs) -> Usd.Stage: await super()._initialize_stage_async(*args, **kwargs) # set additional physx parameters and bind material self._set_additional_physx_params() # load flatcache/fabric interface self._load_fabric_interface() # return the stage return self.stage @classmethod def clear_instance(cls): # clear the callback if cls._instance is not None: if cls._instance._app_control_on_stop_handle is not None: cls._instance._app_control_on_stop_handle.unsubscribe() cls._instance._app_control_on_stop_handle = None # call parent to clear the instance super().clear_instance() """ Helper Functions """ def _set_additional_physx_params(self): """Sets additional PhysX parameters that are not directly supported by the parent class.""" # obtain the physics scene api physics_scene = self._physics_context._physics_scene # pyright: ignore [reportPrivateUsage] physx_scene_api = self._physics_context._physx_scene_api # pyright: ignore [reportPrivateUsage] # assert that scene api is not None if physx_scene_api is None: raise RuntimeError("Physics scene API is None! Please create the scene first.") # set parameters not directly supported by the constructor # -- Continuous Collision Detection (CCD) # ref: https://nvidia-omniverse.github.io/PhysX/physx/5.2.1/docs/AdvancedCollisionDetection.html?highlight=ccd#continuous-collision-detection self._physics_context.enable_ccd(self.cfg.physx.enable_ccd) # -- GPU collision stack size physx_scene_api.CreateGpuCollisionStackSizeAttr(self.cfg.physx.gpu_collision_stack_size) # -- Improved determinism by PhysX physx_scene_api.CreateEnableEnhancedDeterminismAttr(self.cfg.physx.enable_enhanced_determinism) # -- Gravity # note: Isaac sim only takes the "up-axis" as the gravity direction. But physics allows any direction so we # need to convert the gravity vector to a direction and magnitude pair explicitly. gravity = np.asarray(self.cfg.gravity) gravity_magnitude = np.linalg.norm(gravity) # Avoid division by zero if gravity_magnitude != 0.0: gravity_direction = gravity / gravity_magnitude else: gravity_direction = gravity physics_scene.CreateGravityDirectionAttr(Gf.Vec3f(*gravity_direction)) physics_scene.CreateGravityMagnitudeAttr(gravity_magnitude) # position iteration count physx_scene_api.CreateMinPositionIterationCountAttr(self.cfg.physx.min_position_iteration_count) physx_scene_api.CreateMaxPositionIterationCountAttr(self.cfg.physx.max_position_iteration_count) # velocity iteration count physx_scene_api.CreateMinVelocityIterationCountAttr(self.cfg.physx.min_velocity_iteration_count) physx_scene_api.CreateMaxVelocityIterationCountAttr(self.cfg.physx.max_velocity_iteration_count) # create the default physics material # this material is used when no material is specified for a primitive # check: https://docs.omniverse.nvidia.com/extensions/latest/ext_physics/simulation-control/physics-settings.html#physics-materials material_path = f"{self.cfg.physics_prim_path}/defaultMaterial" self.cfg.physics_material.func(material_path, self.cfg.physics_material) # bind the physics material to the scene bind_physics_material(self.cfg.physics_prim_path, material_path) def _load_fabric_interface(self): """Loads the fabric interface if enabled.""" if self.cfg.use_fabric: from omni.physxfabric import get_physx_fabric_interface # acquire fabric interface self._fabric_iface = get_physx_fabric_interface() """ Callbacks. """ def _app_control_on_stop_callback(self, event: carb.events.IEvent): """Callback to deal with the app when the simulation is stopped. Once the simulation is stopped, the physics handles go invalid. After that, it is not possible to resume the simulation from the last state. This leaves the app in an inconsistent state, where two possible actions can be taken: 1. **Keep the app rendering**: In this case, the simulation is kept running and the app is not shutdown. However, the physics is not updated and the script cannot be resumed from the last state. The user has to manually close the app to stop the simulation. 2. **Shutdown the app**: This is the default behavior. In this case, the app is shutdown and the simulation is stopped. Note: This callback is used only when running the simulation in a standalone python script. In an extension, it is expected that the user handles the extension shutdown. """ # check if the simulation is stopped if event.type == int(omni.timeline.TimelineEventType.STOP): # keep running the simulator when configured to not shutdown the app if self._has_gui and sys.exc_info()[0] is None: self.app.print_and_log( "Simulation is stopped. The app will keep running with physics disabled." " Press Ctrl+C or close the window to exit the app." ) while self.app.is_running(): self.render() # make sure that any replicator workflows finish rendering/writing if not builtins.ISAAC_LAUNCHED_FROM_TERMINAL: try: import omni.replicator.core as rep rep_status = rep.orchestrator.get_status() if rep_status not in [rep.orchestrator.Status.STOPPED, rep.orchestrator.Status.STOPPING]: rep.orchestrator.stop() if rep_status != rep.orchestrator.Status.STOPPED: rep.orchestrator.wait_until_complete() except Exception: pass # clear the instance and all callbacks # note: clearing callbacks is important to prevent memory leaks self.clear_all_callbacks() # workaround for exit issues, clean the stage first: if omni.usd.get_context().can_close_stage(): omni.usd.get_context().close_stage() # print logging information self.app.print_and_log("Simulation is stopped. Shutting down the app.") # shutdown the simulator self.app.shutdown() # disabled on linux to avoid a crash carb.get_framework().unload_all_plugins() @contextmanager def build_simulation_context( create_new_stage: bool = True, gravity_enabled: bool = True, device: str = "cuda:0", dt: float = 0.01, sim_cfg: SimulationCfg | None = None, add_ground_plane: bool = False, add_lighting: bool = False, auto_add_lighting: bool = False, ) -> Iterator[SimulationContext]: """Context manager to build a simulation context with the provided settings. This function facilitates the creation of a simulation context and provides flexibility in configuring various aspects of the simulation, such as time step, gravity, device, and scene elements like ground plane and lighting. If :attr:`sim_cfg` is None, then an instance of :class:`SimulationCfg` is created with default settings, with parameters overwritten based on arguments to the function. An example usage of the context manager function: .. code-block:: python with build_simulation_context() as sim: # Design the scene # Play the simulation sim.reset() while sim.is_playing(): sim.step() Args: create_new_stage: Whether to create a new stage. Defaults to True. gravity_enabled: Whether to enable gravity in the simulation. Defaults to True. device: Device to run the simulation on. Defaults to "cuda:0". dt: Time step for the simulation: Defaults to 0.01. sim_cfg: :class:`omni.isaac.orbit.sim.SimulationCfg` to use for the simulation. Defaults to None. add_ground_plane: Whether to add a ground plane to the simulation. Defaults to False. add_lighting: Whether to add a dome light to the simulation. Defaults to False. auto_add_lighting: Whether to automatically add a dome light to the simulation if the simulation has a GUI. Defaults to False. This is useful for debugging tests in the GUI. Yields: The simulation context to use for the simulation. """ try: if create_new_stage: stage_utils.create_new_stage() if sim_cfg is None: # Construct one and overwrite the dt, gravity, and device sim_cfg = SimulationCfg(dt=dt) # Set up gravity if gravity_enabled: sim_cfg.gravity = (0.0, 0.0, -9.81) else: sim_cfg.gravity = (0.0, 0.0, 0.0) # Set device sim_cfg.device = device # Construct simulation context sim = SimulationContext(sim_cfg) if add_ground_plane: # Ground-plane cfg = GroundPlaneCfg() cfg.func("/World/defaultGroundPlane", cfg) if add_lighting or (auto_add_lighting and sim.has_gui()): # Lighting cfg = DomeLightCfg( color=(0.1, 0.1, 0.1), enable_color_temperature=True, color_temperature=5500, intensity=10000, ) # Dome light named specifically to avoid conflicts cfg.func(prim_path="/World/defaultDomeLight", cfg=cfg, translation=(0.0, 0.0, 10.0)) yield sim except Exception: carb.log_error(traceback.format_exc()) raise finally: if not sim.has_gui(): # Stop simulation only if we aren't rendering otherwise the app will hang indefinitely sim.stop() # Clear the stage sim.clear_all_callbacks() sim.clear_instance()
33,577
Python
47.106017
199
0.653751
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/sim/schemas/schemas_cfg.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from __future__ import annotations from typing import Literal from omni.isaac.orbit.utils import configclass @configclass class ArticulationRootPropertiesCfg: """Properties to apply to the root of an articulation. See :meth:`modify_articulation_root_properties` for more information. .. note:: If the values are None, they are not modified. This is useful when you want to set only a subset of the properties and leave the rest as-is. """ articulation_enabled: bool | None = None """Whether to enable or disable articulation.""" enabled_self_collisions: bool | None = None """Whether to enable or disable self-collisions.""" solver_position_iteration_count: int | None = None """Solver position iteration counts for the body.""" solver_velocity_iteration_count: int | None = None """Solver position iteration counts for the body.""" sleep_threshold: float | None = None """Mass-normalized kinetic energy threshold below which an actor may go to sleep.""" stabilization_threshold: float | None = None """The mass-normalized kinetic energy threshold below which an articulation may participate in stabilization.""" @configclass class RigidBodyPropertiesCfg: """Properties to apply to a rigid body. See :meth:`modify_rigid_body_properties` for more information. .. note:: If the values are None, they are not modified. This is useful when you want to set only a subset of the properties and leave the rest as-is. """ rigid_body_enabled: bool | None = None """Whether to enable or disable the rigid body.""" kinematic_enabled: bool | None = None """Determines whether the body is kinematic or not. A kinematic body is a body that is moved through animated poses or through user defined poses. The simulation still derives velocities for the kinematic body based on the external motion. For more information on kinematic bodies, please refer to the `documentation <https://openusd.org/release/wp_rigid_body_physics.html#kinematic-bodies>`_. """ disable_gravity: bool | None = None """Disable gravity for the actor.""" linear_damping: float | None = None """Linear damping for the body.""" angular_damping: float | None = None """Angular damping for the body.""" max_linear_velocity: float | None = None """Maximum linear velocity for rigid bodies (in m/s).""" max_angular_velocity: float | None = None """Maximum angular velocity for rigid bodies (in rad/s).""" max_depenetration_velocity: float | None = None """Maximum depenetration velocity permitted to be introduced by the solver (in m/s).""" max_contact_impulse: float | None = None """The limit on the impulse that may be applied at a contact.""" enable_gyroscopic_forces: bool | None = None """Enables computation of gyroscopic forces on the rigid body.""" retain_accelerations: bool | None = None """Carries over forces/accelerations over sub-steps.""" solver_position_iteration_count: int | None = None """Solver position iteration counts for the body.""" solver_velocity_iteration_count: int | None = None """Solver position iteration counts for the body.""" sleep_threshold: float | None = None """Mass-normalized kinetic energy threshold below which an actor may go to sleep.""" stabilization_threshold: float | None = None """The mass-normalized kinetic energy threshold below which an actor may participate in stabilization.""" @configclass class CollisionPropertiesCfg: """Properties to apply to colliders in a rigid body. See :meth:`modify_collision_properties` for more information. .. note:: If the values are None, they are not modified. This is useful when you want to set only a subset of the properties and leave the rest as-is. """ collision_enabled: bool | None = None """Whether to enable or disable collisions.""" contact_offset: float | None = None """Contact offset for the collision shape (in m). The collision detector generates contact points as soon as two shapes get closer than the sum of their contact offsets. This quantity should be non-negative which means that contact generation can potentially start before the shapes actually penetrate. """ rest_offset: float | None = None """Rest offset for the collision shape (in m). The rest offset quantifies how close a shape gets to others at rest, At rest, the distance between two vertically stacked objects is the sum of their rest offsets. If a pair of shapes have a positive rest offset, the shapes will be separated at rest by an air gap. """ torsional_patch_radius: float | None = None """Radius of the contact patch for applying torsional friction (in m). It is used to approximate rotational friction introduced by the compression of contacting surfaces. If the radius is zero, no torsional friction is applied. """ min_torsional_patch_radius: float | None = None """Minimum radius of the contact patch for applying torsional friction (in m).""" @configclass class MassPropertiesCfg: """Properties to define explicit mass properties of a rigid body. See :meth:`modify_mass_properties` for more information. .. note:: If the values are None, they are not modified. This is useful when you want to set only a subset of the properties and leave the rest as-is. """ mass: float | None = None """The mass of the rigid body (in kg). Note: If non-zero, the mass is ignored and the density is used to compute the mass. """ density: float | None = None """The density of the rigid body (in kg/m^3). The density indirectly defines the mass of the rigid body. It is generally computed using the collision approximation of the body. """ @configclass class JointDrivePropertiesCfg: """Properties to define the drive mechanism of a joint. See :meth:`modify_joint_drive_properties` for more information. .. note:: If the values are None, they are not modified. This is useful when you want to set only a subset of the properties and leave the rest as-is. """ drive_type: Literal["force", "acceleration"] | None = None """Joint drive type to apply. If the drive type is "force", then the joint is driven by a force. If the drive type is "acceleration", then the joint is driven by an acceleration (usually used for kinematic joints). """ @configclass class FixedTendonPropertiesCfg: """Properties to define fixed tendons of an articulation. See :meth:`modify_fixed_tendon_properties` for more information. .. note:: If the values are None, they are not modified. This is useful when you want to set only a subset of the properties and leave the rest as-is. """ tendon_enabled: bool | None = None """Whether to enable or disable the tendon.""" stiffness: float | None = None """Spring stiffness term acting on the tendon's length.""" damping: float | None = None """The damping term acting on both the tendon length and the tendon-length limits.""" limit_stiffness: float | None = None """Limit stiffness term acting on the tendon's length limits.""" offset: float | None = None """Length offset term for the tendon. It defines an amount to be added to the accumulated length computed for the tendon. This allows the application to actuate the tendon by shortening or lengthening it. """ rest_length: float | None = None """Spring rest length of the tendon."""
7,830
Python
39.158974
157
0.698595
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/sim/schemas/__init__.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """Sub-module containing utilities for schemas used in Omniverse. We wrap the USD schemas for PhysX and USD Physics in a more convenient API for setting the parameters from Python. This is done so that configuration objects can define the schema properties to set and make it easier to tune the physics parameters without requiring to open Omniverse Kit and manually set the parameters into the respective USD attributes. .. caution:: Schema properties cannot be applied on prims that are prototypes as they are read-only prims. This particularly affects instanced assets where some of the prims (usually the visual and collision meshes) are prototypes so that the instancing can be done efficiently. In such cases, it is assumed that the prototypes have sim-ready properties on them that don't need to be modified. Trying to set properties into prototypes will throw a warning saying that the prim is a prototype and the properties cannot be set. The schemas are defined in the following links: * `UsdPhysics schema <https://openusd.org/dev/api/usd_physics_page_front.html>`_ * `PhysxSchema schema <https://docs.omniverse.nvidia.com/kit/docs/omni_usd_schema_physics/104.2/index.html>`_ Locally, the schemas are defined in the following files: * ``_isaac_sim/kit/extsPhysics/omni.usd.schema.physics/plugins/UsdPhysics/resources/UsdPhysics/schema.usda`` * ``_isaac_sim/kit/extsPhysics/omni.usd.schema.physx/plugins/PhysxSchema/resources/PhysxSchema/schema.usda`` """ from .schemas import ( activate_contact_sensors, define_articulation_root_properties, define_collision_properties, define_mass_properties, define_rigid_body_properties, modify_articulation_root_properties, modify_collision_properties, modify_fixed_tendon_properties, modify_joint_drive_properties, modify_mass_properties, modify_rigid_body_properties, ) from .schemas_cfg import ( ArticulationRootPropertiesCfg, CollisionPropertiesCfg, FixedTendonPropertiesCfg, JointDrivePropertiesCfg, MassPropertiesCfg, RigidBodyPropertiesCfg, )
2,223
Python
38.714285
118
0.774629
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/sim/schemas/schemas.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from __future__ import annotations import carb import omni.isaac.core.utils.stage as stage_utils from pxr import PhysxSchema, Usd, UsdPhysics from ..utils import apply_nested, safe_set_attribute_on_usd_schema from . import schemas_cfg """ Articulation root properties. """ def define_articulation_root_properties( prim_path: str, cfg: schemas_cfg.ArticulationRootPropertiesCfg, stage: Usd.Stage | None = None ): """Apply the articulation root schema on the input prim and set its properties. See :func:`modify_articulation_root_properties` for more details on how the properties are set. Args: prim_path: The prim path where to apply the articulation root schema. cfg: The configuration for the articulation root. stage: The stage where to find the prim. Defaults to None, in which case the current stage is used. Raises: ValueError: When the prim path is not valid. TypeError: When the prim already has conflicting API schemas. """ # obtain stage if stage is None: stage = stage_utils.get_current_stage() # get articulation USD prim prim = stage.GetPrimAtPath(prim_path) # check if prim path is valid if not prim.IsValid(): raise ValueError(f"Prim path '{prim_path}' is not valid.") # check if prim has articulation applied on it if not UsdPhysics.ArticulationRootAPI(prim): UsdPhysics.ArticulationRootAPI.Apply(prim) # set articulation root properties modify_articulation_root_properties(prim_path, cfg, stage) @apply_nested def modify_articulation_root_properties( prim_path: str, cfg: schemas_cfg.ArticulationRootPropertiesCfg, stage: Usd.Stage | None = None ) -> bool: """Modify PhysX parameters for an articulation root prim. The `articulation root`_ marks the root of an articulation tree. For floating articulations, this should be on the root body. For fixed articulations, this API can be on a direct or indirect parent of the root joint which is fixed to the world. The schema comprises of attributes that belong to the `ArticulationRootAPI`_ and `PhysxArticulationAPI`_. schemas. The latter contains the PhysX parameters for the articulation root. The properties are applied to the articulation root prim. The common properties (such as solver position and velocity iteration counts, sleep threshold, stabilization threshold) take precedence over those specified in the rigid body schemas for all the rigid bodies in the articulation. .. note:: This function is decorated with :func:`apply_nested` that set the properties to all the prims (that have the schema applied on them) under the input prim path. .. _articulation root: https://nvidia-omniverse.github.io/PhysX/physx/5.2.1/docs/Articulations.html .. _ArticulationRootAPI: https://openusd.org/dev/api/class_usd_physics_articulation_root_a_p_i.html .. _PhysxArticulationAPI: https://docs.omniverse.nvidia.com/kit/docs/omni_usd_schema_physics/104.2/class_physx_schema_physx_articulation_a_p_i.html Args: prim_path: The prim path to the articulation root. cfg: The configuration for the articulation root. stage: The stage where to find the prim. Defaults to None, in which case the current stage is used. Returns: True if the properties were successfully set, False otherwise. """ # obtain stage if stage is None: stage = stage_utils.get_current_stage() # get articulation USD prim articulation_prim = stage.GetPrimAtPath(prim_path) # check if prim has articulation applied on it if not UsdPhysics.ArticulationRootAPI(articulation_prim): return False # retrieve the articulation api physx_articulation_api = PhysxSchema.PhysxArticulationAPI(articulation_prim) if not physx_articulation_api: physx_articulation_api = PhysxSchema.PhysxArticulationAPI.Apply(articulation_prim) # convert to dict cfg = cfg.to_dict() # set into physx api for attr_name, value in cfg.items(): safe_set_attribute_on_usd_schema(physx_articulation_api, attr_name, value, camel_case=True) # success return True """ Rigid body properties. """ def define_rigid_body_properties( prim_path: str, cfg: schemas_cfg.RigidBodyPropertiesCfg, stage: Usd.Stage | None = None ): """Apply the rigid body schema on the input prim and set its properties. See :func:`modify_rigid_body_properties` for more details on how the properties are set. Args: prim_path: The prim path where to apply the rigid body schema. cfg: The configuration for the rigid body. stage: The stage where to find the prim. Defaults to None, in which case the current stage is used. Raises: ValueError: When the prim path is not valid. TypeError: When the prim already has conflicting API schemas. """ # obtain stage if stage is None: stage = stage_utils.get_current_stage() # get USD prim prim = stage.GetPrimAtPath(prim_path) # check if prim path is valid if not prim.IsValid(): raise ValueError(f"Prim path '{prim_path}' is not valid.") # check if prim has rigid body applied on it if not UsdPhysics.RigidBodyAPI(prim): UsdPhysics.RigidBodyAPI.Apply(prim) # set rigid body properties modify_rigid_body_properties(prim_path, cfg, stage) @apply_nested def modify_rigid_body_properties( prim_path: str, cfg: schemas_cfg.RigidBodyPropertiesCfg, stage: Usd.Stage | None = None ) -> bool: """Modify PhysX parameters for a rigid body prim. A `rigid body`_ is a single body that can be simulated by PhysX. It can be either dynamic or kinematic. A dynamic body responds to forces and collisions. A `kinematic body`_ can be moved by the user, but does not respond to forces. They are similar to having static bodies that can be moved around. The schema comprises of attributes that belong to the `RigidBodyAPI`_ and `PhysxRigidBodyAPI`_. schemas. The latter contains the PhysX parameters for the rigid body. .. note:: This function is decorated with :func:`apply_nested` that sets the properties to all the prims (that have the schema applied on them) under the input prim path. .. _rigid body: https://nvidia-omniverse.github.io/PhysX/physx/5.2.1/docs/RigidBodyOverview.html .. _kinematic body: https://openusd.org/release/wp_rigid_body_physics.html#kinematic-bodies .. _RigidBodyAPI: https://openusd.org/dev/api/class_usd_physics_rigid_body_a_p_i.html .. _PhysxRigidBodyAPI: https://docs.omniverse.nvidia.com/kit/docs/omni_usd_schema_physics/104.2/class_physx_schema_physx_rigid_body_a_p_i.html Args: prim_path: The prim path to the rigid body. cfg: The configuration for the rigid body. stage: The stage where to find the prim. Defaults to None, in which case the current stage is used. Returns: True if the properties were successfully set, False otherwise. """ # obtain stage if stage is None: stage = stage_utils.get_current_stage() # get rigid-body USD prim rigid_body_prim = stage.GetPrimAtPath(prim_path) # check if prim has rigid-body applied on it if not UsdPhysics.RigidBodyAPI(rigid_body_prim): return False # retrieve the USD rigid-body api usd_rigid_body_api = UsdPhysics.RigidBodyAPI(rigid_body_prim) # retrieve the physx rigid-body api physx_rigid_body_api = PhysxSchema.PhysxRigidBodyAPI(rigid_body_prim) if not physx_rigid_body_api: physx_rigid_body_api = PhysxSchema.PhysxRigidBodyAPI.Apply(rigid_body_prim) # convert to dict cfg = cfg.to_dict() # set into USD API for attr_name in ["rigid_body_enabled", "kinematic_enabled"]: value = cfg.pop(attr_name, None) safe_set_attribute_on_usd_schema(usd_rigid_body_api, attr_name, value, camel_case=True) # set into PhysX API for attr_name, value in cfg.items(): safe_set_attribute_on_usd_schema(physx_rigid_body_api, attr_name, value, camel_case=True) # success return True """ Collision properties. """ def define_collision_properties( prim_path: str, cfg: schemas_cfg.CollisionPropertiesCfg, stage: Usd.Stage | None = None ): """Apply the collision schema on the input prim and set its properties. See :func:`modify_collision_properties` for more details on how the properties are set. Args: prim_path: The prim path where to apply the rigid body schema. cfg: The configuration for the collider. stage: The stage where to find the prim. Defaults to None, in which case the current stage is used. Raises: ValueError: When the prim path is not valid. """ # obtain stage if stage is None: stage = stage_utils.get_current_stage() # get USD prim prim = stage.GetPrimAtPath(prim_path) # check if prim path is valid if not prim.IsValid(): raise ValueError(f"Prim path '{prim_path}' is not valid.") # check if prim has collision applied on it if not UsdPhysics.CollisionAPI(prim): UsdPhysics.CollisionAPI.Apply(prim) # set collision properties modify_collision_properties(prim_path, cfg, stage) @apply_nested def modify_collision_properties( prim_path: str, cfg: schemas_cfg.CollisionPropertiesCfg, stage: Usd.Stage | None = None ) -> bool: """Modify PhysX properties of collider prim. These properties are based on the `UsdPhysics.CollisionAPI`_ and `PhysxSchema.PhysxCollisionAPI`_ schemas. For more information on the properties, please refer to the official documentation. Tuning these parameters influence the contact behavior of the rigid body. For more information on tune them and their effect on the simulation, please refer to the `PhysX documentation <https://nvidia-omniverse.github.io/PhysX/physx/5.2.1/docs/AdvancedCollisionDetection.html>`__. .. note:: This function is decorated with :func:`apply_nested` that sets the properties to all the prims (that have the schema applied on them) under the input prim path. .. _UsdPhysics.CollisionAPI: https://openusd.org/dev/api/class_usd_physics_collision_a_p_i.html .. _PhysxSchema.PhysxCollisionAPI: https://docs.omniverse.nvidia.com/kit/docs/omni_usd_schema_physics/104.2/class_physx_schema_physx_collision_a_p_i.html Args: prim_path: The prim path of parent. cfg: The configuration for the collider. stage: The stage where to find the prim. Defaults to None, in which case the current stage is used. Returns: True if the properties were successfully set, False otherwise. """ # obtain stage if stage is None: stage = stage_utils.get_current_stage() # get USD prim collider_prim = stage.GetPrimAtPath(prim_path) # check if prim has collision applied on it if not UsdPhysics.CollisionAPI(collider_prim): return False # retrieve the USD collision api usd_collision_api = UsdPhysics.CollisionAPI(collider_prim) # retrieve the collision api physx_collision_api = PhysxSchema.PhysxCollisionAPI(collider_prim) if not physx_collision_api: physx_collision_api = PhysxSchema.PhysxCollisionAPI.Apply(collider_prim) # convert to dict cfg = cfg.to_dict() # set into USD API for attr_name in ["collision_enabled"]: value = cfg.pop(attr_name, None) safe_set_attribute_on_usd_schema(usd_collision_api, attr_name, value, camel_case=True) # set into PhysX API for attr_name, value in cfg.items(): safe_set_attribute_on_usd_schema(physx_collision_api, attr_name, value, camel_case=True) # success return True """ Mass properties. """ def define_mass_properties(prim_path: str, cfg: schemas_cfg.MassPropertiesCfg, stage: Usd.Stage | None = None): """Apply the mass schema on the input prim and set its properties. See :func:`modify_mass_properties` for more details on how the properties are set. Args: prim_path: The prim path where to apply the rigid body schema. cfg: The configuration for the mass properties. stage: The stage where to find the prim. Defaults to None, in which case the current stage is used. Raises: ValueError: When the prim path is not valid. """ # obtain stage if stage is None: stage = stage_utils.get_current_stage() # get USD prim prim = stage.GetPrimAtPath(prim_path) # check if prim path is valid if not prim.IsValid(): raise ValueError(f"Prim path '{prim_path}' is not valid.") # check if prim has mass applied on it if not UsdPhysics.MassAPI(prim): UsdPhysics.MassAPI.Apply(prim) # set mass properties modify_mass_properties(prim_path, cfg, stage) @apply_nested def modify_mass_properties(prim_path: str, cfg: schemas_cfg.MassPropertiesCfg, stage: Usd.Stage | None = None) -> bool: """Set properties for the mass of a rigid body prim. These properties are based on the `UsdPhysics.MassAPI` schema. If the mass is not defined, the density is used to compute the mass. However, in that case, a collision approximation of the rigid body is used to compute the density. For more information on the properties, please refer to the `documentation <https://openusd.org/release/wp_rigid_body_physics.html#body-mass-properties>`__. .. caution:: The mass of an object can be specified in multiple ways and have several conflicting settings that are resolved based on precedence. Please make sure to understand the precedence rules before using this property. .. note:: This function is decorated with :func:`apply_nested` that sets the properties to all the prims (that have the schema applied on them) under the input prim path. .. UsdPhysics.MassAPI: https://openusd.org/dev/api/class_usd_physics_mass_a_p_i.html Args: prim_path: The prim path of the rigid body. cfg: The configuration for the mass properties. stage: The stage where to find the prim. Defaults to None, in which case the current stage is used. Returns: True if the properties were successfully set, False otherwise. """ # obtain stage if stage is None: stage = stage_utils.get_current_stage() # get USD prim rigid_prim = stage.GetPrimAtPath(prim_path) # check if prim has mass API applied on it if not UsdPhysics.MassAPI(rigid_prim): return False # retrieve the USD mass api usd_physics_mass_api = UsdPhysics.MassAPI(rigid_prim) # convert to dict cfg = cfg.to_dict() # set into USD API for attr_name in ["mass", "density"]: value = cfg.pop(attr_name, None) safe_set_attribute_on_usd_schema(usd_physics_mass_api, attr_name, value, camel_case=True) # success return True """ Contact sensor. """ def activate_contact_sensors(prim_path: str, threshold: float = 0.0, stage: Usd.Stage = None): """Activate the contact sensor on all rigid bodies under a specified prim path. This function adds the PhysX contact report API to all rigid bodies under the specified prim path. It also sets the force threshold beyond which the contact sensor reports the contact. The contact reporting API can only be added to rigid bodies. Args: prim_path: The prim path under which to search and prepare contact sensors. threshold: The threshold for the contact sensor. Defaults to 0.0. stage: The stage where to find the prim. Defaults to None, in which case the current stage is used. Raises: ValueError: If the input prim path is not valid. ValueError: If there are no rigid bodies under the prim path. """ # obtain stage if stage is None: stage = stage_utils.get_current_stage() # get prim prim: Usd.Prim = stage.GetPrimAtPath(prim_path) # check if prim is valid if not prim.IsValid(): raise ValueError(f"Prim path '{prim_path}' is not valid.") # iterate over all children num_contact_sensors = 0 all_prims = [prim] while len(all_prims) > 0: # get current prim child_prim = all_prims.pop(0) # check if prim is a rigid body # nested rigid bodies are not allowed by SDK so we can safely assume that # if a prim has a rigid body API, it is a rigid body and we don't need to # check its children if child_prim.HasAPI(UsdPhysics.RigidBodyAPI): # set sleep threshold to zero rb = PhysxSchema.PhysxRigidBodyAPI.Get(stage, prim.GetPrimPath()) rb.CreateSleepThresholdAttr().Set(0.0) # add contact report API with threshold of zero if not child_prim.HasAPI(PhysxSchema.PhysxContactReportAPI): carb.log_verbose(f"Adding contact report API to prim: '{child_prim.GetPrimPath()}'") cr_api = PhysxSchema.PhysxContactReportAPI.Apply(child_prim) else: carb.log_verbose(f"Contact report API already exists on prim: '{child_prim.GetPrimPath()}'") cr_api = PhysxSchema.PhysxContactReportAPI.Get(stage, child_prim.GetPrimPath()) # set threshold to zero cr_api.CreateThresholdAttr().Set(threshold) # increment number of contact sensors num_contact_sensors += 1 else: # add all children to tree all_prims += child_prim.GetChildren() # check if no contact sensors were found if num_contact_sensors == 0: raise ValueError( f"No contact sensors added to the prim: '{prim_path}'. This means that no rigid bodies" " are present under this prim. Please check the prim path." ) # success return True """ Joint drive properties. """ @apply_nested def modify_joint_drive_properties( prim_path: str, drive_props: schemas_cfg.JointDrivePropertiesCfg, stage: Usd.Stage | None = None ) -> bool: """Modify PhysX parameters for a joint prim. This function checks if the input prim is a prismatic or revolute joint and applies the joint drive schema on it. If the joint is a tendon (i.e., it has the `PhysxTendonAxisAPI`_ schema applied on it), then the joint drive schema is not applied. Based on the configuration, this method modifies the properties of the joint drive. These properties are based on the `UsdPhysics.DriveAPI`_ schema. For more information on the properties, please refer to the official documentation. .. caution:: We highly recommend modifying joint properties of articulations through the functionalities in the :mod:`omni.isaac.orbit.actuators` module. The methods here are for setting simulation low-level properties only. .. _UsdPhysics.DriveAPI: https://openusd.org/dev/api/class_usd_physics_drive_a_p_i.html .. _PhysxTendonAxisAPI: https://docs.omniverse.nvidia.com/kit/docs/omni_usd_schema_physics/104.2/class_physx_schema_physx_tendon_axis_a_p_i.html Args: prim_path: The prim path where to apply the joint drive schema. drive_props: The configuration for the joint drive. stage: The stage where to find the prim. Defaults to None, in which case the current stage is used. Returns: True if the properties were successfully set, False otherwise. Raises: ValueError: If the input prim path is not valid. """ # obtain stage if stage is None: stage = stage_utils.get_current_stage() # get USD prim prim = stage.GetPrimAtPath(prim_path) # check if prim path is valid if not prim.IsValid(): raise ValueError(f"Prim path '{prim_path}' is not valid.") # check if prim has joint drive applied on it if prim.IsA(UsdPhysics.RevoluteJoint): drive_api_name = "angular" elif prim.IsA(UsdPhysics.PrismaticJoint): drive_api_name = "linear" else: return False # check that prim is not a tendon child prim # note: root prim is what "controls" the tendon so we still want to apply the drive to it if prim.HasAPI(PhysxSchema.PhysxTendonAxisAPI) and not prim.HasAPI(PhysxSchema.PhysxTendonAxisRootAPI): return False # check if prim has joint drive applied on it usd_drive_api = UsdPhysics.DriveAPI(prim, drive_api_name) if not usd_drive_api: usd_drive_api = UsdPhysics.DriveAPI.Apply(prim, drive_api_name) # change the drive type to input if drive_props.drive_type is not None: usd_drive_api.CreateTypeAttr().Set(drive_props.drive_type) return True """ Fixed tendon properties. """ @apply_nested def modify_fixed_tendon_properties( prim_path: str, cfg: schemas_cfg.FixedTendonPropertiesCfg, stage: Usd.Stage | None = None ) -> bool: """Modify PhysX parameters for a fixed tendon attachment prim. A `fixed tendon`_ can be used to link multiple degrees of freedom of articulation joints through length and limit constraints. For instance, it can be used to set up an equality constraint between a driven and passive revolute joints. The schema comprises of attributes that belong to the `PhysxTendonAxisRootAPI`_ schema. .. note:: This function is decorated with :func:`apply_nested` that sets the properties to all the prims (that have the schema applied on them) under the input prim path. .. _fixed tendon: https://nvidia-omniverse.github.io/PhysX/physx/5.3.1/_api_build/class_px_articulation_fixed_tendon.html .. _PhysxTendonAxisRootAPI: https://docs.omniverse.nvidia.com/kit/docs/omni_usd_schema_physics/104.2/class_physx_schema_physx_tendon_axis_root_a_p_i.html Args: prim_path: The prim path to the tendon attachment. cfg: The configuration for the tendon attachment. stage: The stage where to find the prim. Defaults to None, in which case the current stage is used. Returns: True if the properties were successfully set, False otherwise. Raises: ValueError: If the input prim path is not valid. """ # obtain stage if stage is None: stage = stage_utils.get_current_stage() # get USD prim tendon_prim = stage.GetPrimAtPath(prim_path) # check if prim has fixed tendon applied on it has_root_fixed_tendon = tendon_prim.HasAPI(PhysxSchema.PhysxTendonAxisRootAPI) if not has_root_fixed_tendon: return False # resolve all available instances of the schema since it is multi-instance for schema_name in tendon_prim.GetAppliedSchemas(): # only consider the fixed tendon schema if "PhysxTendonAxisRootAPI" not in schema_name: continue # retrieve the USD tendon api instance_name = schema_name.split(":")[-1] physx_tendon_axis_api = PhysxSchema.PhysxTendonAxisRootAPI(tendon_prim, instance_name) # convert to dict cfg = cfg.to_dict() # set into PhysX API for attr_name, value in cfg.items(): safe_set_attribute_on_usd_schema(physx_tendon_axis_api, attr_name, value, camel_case=True) # success return True
23,529
Python
39.222222
157
0.69085
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/sim/spawners/spawner_cfg.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from __future__ import annotations from collections.abc import Callable from dataclasses import MISSING from pxr import Usd from omni.isaac.orbit.sim import schemas from omni.isaac.orbit.utils import configclass @configclass class SpawnerCfg: """Configuration parameters for spawning an asset. Spawning an asset is done by calling the :attr:`func` function. The function takes in the prim path to spawn the asset at, the configuration instance and transformation, and returns the prim path of the spawned asset. The function is typically decorated with :func:`omni.isaac.orbit.sim.spawner.utils.clone` decorator that checks if input prim path is a regex expression and spawns the asset at all matching prims. For this, the decorator uses the Cloner API from Isaac Sim and handles the :attr:`copy_from_source` parameter. """ func: Callable[..., Usd.Prim] = MISSING """Function to use for spawning the asset. The function takes in the prim path (or expression) to spawn the asset at, the configuration instance and transformation, and returns the source prim spawned. """ visible: bool = True """Whether the spawned asset should be visible. Defaults to True.""" semantic_tags: list[tuple[str, str]] | None = None """List of semantic tags to add to the spawned asset. Defaults to None, which means no semantic tags will be added. The semantic tags follow the `Replicator Semantic` tagging system. Each tag is a tuple of the form ``(type, data)``, where ``type`` is the type of the tag and ``data`` is the semantic label associated with the tag. For example, to annotate a spawned asset in the class avocado, the semantic tag would be ``[("class", "avocado")]``. You can specify multiple semantic tags by passing in a list of tags. For example, to annotate a spawned asset in the class avocado and the color green, the semantic tags would be ``[("class", "avocado"), ("color", "green")]``. .. seealso:: For more information on the semantics filter, see the documentation for the `semantics schema editor`_. .. _semantics schema editor: https://docs.omniverse.nvidia.com/extensions/latest/ext_replicator/semantics_schema_editor.html#semantics-filtering """ copy_from_source: bool = True """Whether to copy the asset from the source prim or inherit it. Defaults to True. This parameter is only used when cloning prims. If False, then the asset will be inherited from the source prim, i.e. all USD changes to the source prim will be reflected in the cloned prims. .. versionadded:: 2023.1 This parameter is only supported from Isaac Sim 2023.1 onwards. If you are using an older version of Isaac Sim, this parameter will be ignored. """ @configclass class RigidObjectSpawnerCfg(SpawnerCfg): """Configuration parameters for spawning a rigid asset. Note: By default, all properties are set to None. This means that no properties will be added or modified to the prim outside of the properties available by default when spawning the prim. """ mass_props: schemas.MassPropertiesCfg | None = None """Mass properties.""" rigid_props: schemas.RigidBodyPropertiesCfg | None = None """Rigid body properties.""" collision_props: schemas.CollisionPropertiesCfg | None = None """Properties to apply to all collision meshes.""" activate_contact_sensors: bool = False """Activate contact reporting on all rigid bodies. Defaults to False. This adds the PhysxContactReporter API to all the rigid bodies in the given prim path and its children. """
3,807
Python
38.666666
148
0.720252
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/sim/spawners/__init__.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """Sub-module containing utilities for creating prims in Omniverse. Spawners are used to create prims into Omniverse simulator. At their core, they are calling the USD Python API or Omniverse Kit Commands to create prims. However, they also provide a convenient interface for creating prims from their respective config classes. There are two main ways of using the spawners: 1. Using the function from the module .. code-block:: python import omni.isaac.orbit.sim as sim_utils from omni.isaac.orbit.utils.assets import ISAAC_ORBIT_NUCLEUS_DIR # spawn from USD file cfg = sim_utils.UsdFileCfg(usd_path=f"{ISAAC_ORBIT_NUCLEUS_DIR}/Robots/FrankaEmika/panda_instanceable.usd") prim_path = "/World/myAsset" # spawn using the function from the module sim_utils.spawn_from_usd(prim_path, cfg) 2. Using the `func` reference in the config class .. code-block:: python import omni.isaac.orbit.sim as sim_utils from omni.isaac.orbit.utils.assets import ISAAC_ORBIT_NUCLEUS_DIR # spawn from USD file cfg = sim_utils.UsdFileCfg(usd_path=f"{ISAAC_ORBIT_NUCLEUS_DIR}/Robots/FrankaEmika/panda_instanceable.usd") prim_path = "/World/myAsset" # use the `func` reference in the config class cfg.func(prim_path, cfg) For convenience, we recommend using the second approach, as it allows to easily change the config class and the function call in a single line of code. Depending on the type of prim, the spawning-functions can also deal with the creation of prims over multiple prim path. These need to be provided as a regex prim path expressions, which are resolved based on the parent prim paths using the :meth:`omni.isaac.orbit.sim.utils.clone` function decorator. For example: * ``/World/Table_[1,2]/Robot`` will create the prims ``/World/Table_1/Robot`` and ``/World/Table_2/Robot`` only if the parent prim ``/World/Table_1`` and ``/World/Table_2`` exist. * ``/World/Robot_[1,2]`` will **NOT** create the prims ``/World/Robot_1`` and ``/World/Robot_2`` as the prim path expression can be resolved to multiple prims. """ from .from_files import * # noqa: F401, F403 from .lights import * # noqa: F401, F403 from .materials import * # noqa: F401, F403 from .sensors import * # noqa: F401, F403 from .shapes import * # noqa: F401, F403 from .spawner_cfg import * # noqa: F401, F403
2,479
Python
38.365079
111
0.73215
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/sim/spawners/sensors/sensors.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from __future__ import annotations from typing import TYPE_CHECKING import omni.isaac.core.utils.prims as prim_utils import omni.kit.commands from pxr import Sdf, Usd from omni.isaac.orbit.sim.utils import clone from omni.isaac.orbit.utils import to_camel_case if TYPE_CHECKING: from . import sensors_cfg CUSTOM_PINHOLE_CAMERA_ATTRIBUTES = { "projection_type": ("cameraProjectionType", Sdf.ValueTypeNames.Token), } """Custom attributes for pinhole camera model. The dictionary maps the attribute name in the configuration to the attribute name in the USD prim. """ CUSTOM_FISHEYE_CAMERA_ATTRIBUTES = { "projection_type": ("cameraProjectionType", Sdf.ValueTypeNames.Token), "fisheye_nominal_width": ("fthetaWidth", Sdf.ValueTypeNames.Float), "fisheye_nominal_height": ("fthetaHeight", Sdf.ValueTypeNames.Float), "fisheye_optical_centre_x": ("fthetaCx", Sdf.ValueTypeNames.Float), "fisheye_optical_centre_y": ("fthetaCy", Sdf.ValueTypeNames.Float), "fisheye_max_fov": ("fthetaMaxFov", Sdf.ValueTypeNames.Float), "fisheye_polynomial_a": ("fthetaPolyA", Sdf.ValueTypeNames.Float), "fisheye_polynomial_b": ("fthetaPolyB", Sdf.ValueTypeNames.Float), "fisheye_polynomial_c": ("fthetaPolyC", Sdf.ValueTypeNames.Float), "fisheye_polynomial_d": ("fthetaPolyD", Sdf.ValueTypeNames.Float), "fisheye_polynomial_e": ("fthetaPolyE", Sdf.ValueTypeNames.Float), "fisheye_polynomial_f": ("fthetaPolyF", Sdf.ValueTypeNames.Float), } """Custom attributes for fisheye camera model. The dictionary maps the attribute name in the configuration to the attribute name in the USD prim. """ @clone def spawn_camera( prim_path: str, cfg: sensors_cfg.PinholeCameraCfg | sensors_cfg.FisheyeCameraCfg, translation: tuple[float, float, float] | None = None, orientation: tuple[float, float, float, float] | None = None, ) -> Usd.Prim: """Create a USD camera prim with given projection type. The function creates various attributes on the camera prim that specify the camera's properties. These are later used by ``omni.replicator.core`` to render the scene with the given camera. .. note:: This function is decorated with :func:`clone` that resolves prim path into list of paths if the input prim path is a regex pattern. This is done to support spawning multiple assets from a single and cloning the USD prim at the given path expression. Args: prim_path: The prim path or pattern to spawn the asset at. If the prim path is a regex pattern, then the asset is spawned at all the matching prim paths. cfg: The configuration instance. translation: The translation to apply to the prim w.r.t. its parent prim. Defaults to None, in which case this is set to the origin. orientation: The orientation in (w, x, y, z) to apply to the prim w.r.t. its parent prim. Defaults to None, in which case this is set to identity. Returns: The created prim. Raises: ValueError: If a prim already exists at the given path. """ # spawn camera if it doesn't exist. if not prim_utils.is_prim_path_valid(prim_path): prim_utils.create_prim(prim_path, "Camera", translation=translation, orientation=orientation) else: raise ValueError(f"A prim already exists at path: '{prim_path}'.") # lock camera from viewport (this disables viewport movement for camera) if cfg.lock_camera: omni.kit.commands.execute( "ChangePropertyCommand", prop_path=Sdf.Path(f"{prim_path}.omni:kit:cameraLock"), value=True, prev=None, type_to_create_if_not_exist=Sdf.ValueTypeNames.Bool, ) # decide the custom attributes to add if cfg.projection_type == "pinhole": attribute_types = CUSTOM_PINHOLE_CAMERA_ATTRIBUTES else: attribute_types = CUSTOM_FISHEYE_CAMERA_ATTRIBUTES # custom attributes in the config that are not USD Camera parameters non_usd_cfg_param_names = ["func", "copy_from_source", "lock_camera", "visible", "semantic_tags"] # get camera prim prim = prim_utils.get_prim_at_path(prim_path) # create attributes for the fisheye camera model # note: for pinhole those are already part of the USD camera prim for attr_name, attr_type in attribute_types.values(): # check if attribute does not exist if prim.GetAttribute(attr_name).Get() is None: # create attribute based on type prim.CreateAttribute(attr_name, attr_type) # set attribute values for param_name, param_value in cfg.__dict__.items(): # check if value is valid if param_value is None or param_name in non_usd_cfg_param_names: continue # obtain prim property name if param_name in attribute_types: # check custom attributes prim_prop_name = attribute_types[param_name][0] else: # convert attribute name in prim to cfg name prim_prop_name = to_camel_case(param_name, to="cC") # get attribute from the class prim.GetAttribute(prim_prop_name).Set(param_value) # return the prim return prim_utils.get_prim_at_path(prim_path)
5,404
Python
40.576923
115
0.687454
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/sim/spawners/sensors/__init__.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """Sub-module for spawners that spawn sensors in the simulation. Currently, the following sensors are supported: * Camera: A USD camera prim with settings for pinhole or fisheye projections. """ from .sensors import spawn_camera from .sensors_cfg import FisheyeCameraCfg, PinholeCameraCfg
416
Python
25.062498
77
0.778846
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/sim/spawners/sensors/sensors_cfg.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from __future__ import annotations from collections.abc import Callable from typing import Literal from omni.isaac.orbit.sim.spawners.spawner_cfg import SpawnerCfg from omni.isaac.orbit.utils import configclass from . import sensors @configclass class PinholeCameraCfg(SpawnerCfg): """Configuration parameters for a USD camera prim with pinhole camera settings. For more information on the parameters, please refer to the `camera documentation <https://docs.omniverse.nvidia.com/materials-and-rendering/latest/cameras.html>`__. .. note:: The default values are taken from the `Replicator camera <https://docs.omniverse.nvidia.com/py/replicator/1.9.8/source/extensions/omni.replicator.core/docs/API.html#omni.replicator.core.create.camera>`__ function. """ func: Callable = sensors.spawn_camera projection_type: str = "pinhole" """Type of projection to use for the camera. Defaults to "pinhole". Note: Currently only "pinhole" is supported. """ clipping_range: tuple[float, float] = (0.01, 1e6) """Near and far clipping distances (in m). Defaults to (0.01, 1e6). The minimum clipping range will shift the camera forward by the specified distance. Don't set it too high to avoid issues for distance related data types (e.g., ``distance_to_image_plane``). """ focal_length: float = 24.0 """Perspective focal length (in cm). Defaults to 24.0cm. Longer lens lengths narrower FOV, shorter lens lengths wider FOV. """ focus_distance: float = 400.0 """Distance from the camera to the focus plane (in m). Defaults to 400.0. The distance at which perfect sharpness is achieved. """ f_stop: float = 0.0 """Lens aperture. Defaults to 0.0, which turns off focusing. Controls Distance Blurring. Lower Numbers decrease focus range, larger numbers increase it. """ horizontal_aperture: float = 20.955 """Horizontal aperture (in mm). Defaults to 20.955mm. Emulates sensor/film width on a camera. Note: The default value is the horizontal aperture of a 35 mm spherical projector. """ horizontal_aperture_offset: float = 0.0 """Offsets Resolution/Film gate horizontally. Defaults to 0.0.""" vertical_aperture_offset: float = 0.0 """Offsets Resolution/Film gate vertically. Defaults to 0.0.""" lock_camera: bool = True """Locks the camera in the Omniverse viewport. Defaults to True. If True, then the camera remains fixed at its configured transform. This is useful when wanting to view the camera output on the GUI and not accidentally moving the camera through the GUI interactions. """ @configclass class FisheyeCameraCfg(PinholeCameraCfg): """Configuration parameters for a USD camera prim with `fish-eye camera`_ settings. For more information on the parameters, please refer to the `camera documentation <https://docs.omniverse.nvidia.com/materials-and-rendering/latest/cameras.html#fisheye-properties>`__. .. note:: The default values are taken from the `Replicator camera <https://docs.omniverse.nvidia.com/py/replicator/1.9.8/source/extensions/omni.replicator.core/docs/API.html#omni.replicator.core.create.camera>`__ function. .. _fish-eye camera: https://en.wikipedia.org/wiki/Fisheye_lens """ func: Callable = sensors.spawn_camera projection_type: Literal[ "fisheye_orthographic", "fisheye_equidistant", "fisheye_equisolid", "fisheye_polynomial", "fisheye_spherical" ] = "fisheye_polynomial" r"""Type of projection to use for the camera. Defaults to "fisheye_polynomial". Available options: - ``"fisheye_orthographic"``: Fisheye camera model using orthographic correction. - ``"fisheye_equidistant"``: Fisheye camera model using equidistant correction. - ``"fisheye_equisolid"``: Fisheye camera model using equisolid correction. - ``"fisheye_polynomial"``: Fisheye camera model with :math:`360^{\circ}` spherical projection. - ``"fisheye_spherical"``: Fisheye camera model with :math:`360^{\circ}` full-frame projection. """ fisheye_nominal_width: float = 1936.0 """Nominal width of fisheye lens model (in pixels). Defaults to 1936.0.""" fisheye_nominal_height: float = 1216.0 """Nominal height of fisheye lens model (in pixels). Defaults to 1216.0.""" fisheye_optical_centre_x: float = 970.94244 """Horizontal optical centre position of fisheye lens model (in pixels). Defaults to 970.94244.""" fisheye_optical_centre_y: float = 600.37482 """Vertical optical centre position of fisheye lens model (in pixels). Defaults to 600.37482.""" fisheye_max_fov: float = 200.0 """Maximum field of view of fisheye lens model (in degrees). Defaults to 200.0 degrees.""" fisheye_polynomial_a: float = 0.0 """First component of fisheye polynomial. Defaults to 0.0.""" fisheye_polynomial_b: float = 0.00245 """Second component of fisheye polynomial. Defaults to 0.00245.""" fisheye_polynomial_c: float = 0.0 """Third component of fisheye polynomial. Defaults to 0.0.""" fisheye_polynomial_d: float = 0.0 """Fourth component of fisheye polynomial. Defaults to 0.0.""" fisheye_polynomial_e: float = 0.0 """Fifth component of fisheye polynomial. Defaults to 0.0.""" fisheye_polynomial_f: float = 0.0 """Sixth component of fisheye polynomial. Defaults to 0.0."""
5,559
Python
42.4375
211
0.706602
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/sim/spawners/lights/lights_cfg.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from __future__ import annotations from collections.abc import Callable from dataclasses import MISSING from typing import Literal from omni.isaac.orbit.sim.spawners.spawner_cfg import SpawnerCfg from omni.isaac.orbit.utils import configclass from . import lights @configclass class LightCfg(SpawnerCfg): """Configuration parameters for creating a light in the scene. Please refer to the documentation on `USD LuxLight <https://openusd.org/dev/api/class_usd_lux_light_a_p_i.html>`_ for more information. .. note:: The default values for the attributes are those specified in the their official documentation. """ func: Callable = lights.spawn_light prim_type: str = MISSING """The prim type name for the light prim.""" color: tuple[float, float, float] = (1.0, 1.0, 1.0) """The color of emitted light, in energy-linear terms. Defaults to white.""" enable_color_temperature: bool = False """Enables color temperature. Defaults to false.""" color_temperature: float = 6500.0 """Color temperature (in Kelvin) representing the white point. The valid range is [1000, 10000]. Defaults to 6500K. The `color temperature <https://en.wikipedia.org/wiki/Color_temperature>`_ corresponds to the warmth or coolness of light. Warmer light has a lower color temperature, while cooler light has a higher color temperature. Note: It only takes effect when :attr:`enable_color_temperature` is true. """ normalize: bool = False """Normalizes power by the surface area of the light. Defaults to false. This makes it easier to independently adjust the power and shape of the light, by causing the power to not vary with the area or angular size of the light. """ exposure: float = 0.0 """Scales the power of the light exponentially as a power of 2. Defaults to 0.0. The result is multiplied against the intensity. """ intensity: float = 1.0 """Scales the power of the light linearly. Defaults to 1.0.""" @configclass class DiskLightCfg(LightCfg): """Configuration parameters for creating a disk light in the scene. A disk light is a light source that emits light from a disk. It is useful for simulating fluorescent lights. For more information, please refer to the documentation on `USDLux DiskLight <https://openusd.org/dev/api/class_usd_lux_disk_light.html>`_. .. note:: The default values for the attributes are those specified in the their official documentation. """ prim_type = "DiskLight" radius: float = 0.5 """Radius of the disk (in m). Defaults to 0.5m.""" @configclass class DistantLightCfg(LightCfg): """Configuration parameters for creating a distant light in the scene. A distant light is a light source that is infinitely far away, and emits parallel rays of light. It is useful for simulating sun/moon light. For more information, please refer to the documentation on `USDLux DistantLight <https://openusd.org/dev/api/class_usd_lux_distant_light.html>`_. .. note:: The default values for the attributes are those specified in the their official documentation. """ prim_type = "DistantLight" angle: float = 0.53 """Angular size of the light (in degrees). Defaults to 0.53 degrees. As an example, the Sun is approximately 0.53 degrees as seen from Earth. Higher values broaden the light and therefore soften shadow edges. """ @configclass class DomeLightCfg(LightCfg): """Configuration parameters for creating a dome light in the scene. A dome light is a light source that emits light inwards from all directions. It is also possible to attach a texture to the dome light, which will be used to emit light. For more information, please refer to the documentation on `USDLux DomeLight <https://openusd.org/dev/api/class_usd_lux_dome_light.html>`_. .. note:: The default values for the attributes are those specified in the their official documentation. """ prim_type = "DomeLight" texture_file: str | None = None """A color texture to use on the dome, such as an HDR (high dynamic range) texture intended for IBL (image based lighting). Defaults to None. If None, the dome will emit a uniform color. """ texture_format: Literal["automatic", "latlong", "mirroredBall", "angular", "cubeMapVerticalCross"] = "automatic" """The parametrization format of the color map file. Defaults to "automatic". Valid values are: * ``"automatic"``: Tries to determine the layout from the file itself. For example, Renderman texture files embed an explicit parameterization. * ``"latlong"``: Latitude as X, longitude as Y. * ``"mirroredBall"``: An image of the environment reflected in a sphere, using an implicitly orthogonal projection. * ``"angular"``: Similar to mirroredBall but the radial dimension is mapped linearly to the angle, providing better sampling at the edges. * ``"cubeMapVerticalCross"``: A cube map with faces laid out as a vertical cross. """ @configclass class CylinderLightCfg(LightCfg): """Configuration parameters for creating a cylinder light in the scene. A cylinder light is a light source that emits light from a cylinder. It is useful for simulating fluorescent lights. For more information, please refer to the documentation on `USDLux CylinderLight <https://openusd.org/dev/api/class_usd_lux_cylinder_light.html>`_. .. note:: The default values for the attributes are those specified in the their official documentation. """ prim_type = "CylinderLight" length: float = 1.0 """Length of the cylinder (in m). Defaults to 1.0m.""" radius: float = 0.5 """Radius of the cylinder (in m). Defaults to 0.5m.""" treat_as_line: bool = False """Treats the cylinder as a line source, i.e. a zero-radius cylinder. Defaults to false.""" @configclass class SphereLightCfg(LightCfg): """Configuration parameters for creating a sphere light in the scene. A sphere light is a light source that emits light outward from a sphere. For more information, please refer to the documentation on `USDLux SphereLight <https://openusd.org/dev/api/class_usd_lux_sphere_light.html>`_. .. note:: The default values for the attributes are those specified in the their official documentation. """ prim_type = "SphereLight" radius: float = 0.5 """Radius of the sphere. Defaults to 0.5m.""" treat_as_point: bool = False """Treats the sphere as a point source, i.e. a zero-radius sphere. Defaults to false."""
6,801
Python
35.767567
147
0.708278
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/sim/spawners/lights/__init__.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """Sub-module for spawners that spawn lights in the simulation. There are various different kinds of lights that can be spawned into the USD stage. Please check the Omniverse documentation for `lighting overview <https://docs.omniverse.nvidia.com/materials-and-rendering/latest/103/lighting.html>`_. """ from .lights import spawn_light from .lights_cfg import CylinderLightCfg, DiskLightCfg, DistantLightCfg, DomeLightCfg, LightCfg, SphereLightCfg
573
Python
37.266664
111
0.794066
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/sim/spawners/lights/lights.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from __future__ import annotations from typing import TYPE_CHECKING import omni.isaac.core.utils.prims as prim_utils from pxr import Usd, UsdLux from omni.isaac.orbit.sim.utils import clone, safe_set_attribute_on_usd_prim if TYPE_CHECKING: from . import lights_cfg @clone def spawn_light( prim_path: str, cfg: lights_cfg.LightCfg, translation: tuple[float, float, float] | None = None, orientation: tuple[float, float, float, float] | None = None, ) -> Usd.Prim: """Create a light prim at the specified prim path with the specified configuration. The created prim is based on the `USD.LuxLight <https://openusd.org/dev/api/class_usd_lux_light_a_p_i.html>`_ API. .. note:: This function is decorated with :func:`clone` that resolves prim path into list of paths if the input prim path is a regex pattern. This is done to support spawning multiple assets from a single and cloning the USD prim at the given path expression. Args: prim_path: The prim path or pattern to spawn the asset at. If the prim path is a regex pattern, then the asset is spawned at all the matching prim paths. cfg: The configuration for the light source. translation: The translation of the prim. Defaults to None, in which case this is set to the origin. orientation: The orientation of the prim as (w, x, y, z). Defaults to None, in which case this is set to identity. Raises: ValueError: When a prim already exists at the specified prim path. """ # check if prim already exists if prim_utils.is_prim_path_valid(prim_path): raise ValueError(f"A prim already exists at path: '{prim_path}'.") # create the prim prim = prim_utils.create_prim(prim_path, prim_type=cfg.prim_type, translation=translation, orientation=orientation) # convert to dict cfg = cfg.to_dict() # delete spawner func specific parameters del cfg["prim_type"] # delete custom attributes in the config that are not USD parameters non_usd_cfg_param_names = ["func", "copy_from_source", "visible", "semantic_tags"] for param_name in non_usd_cfg_param_names: del cfg[param_name] # set into USD API for attr_name, value in cfg.items(): # special operation for texture properties # note: this is only used for dome light if "texture" in attr_name: light_prim = UsdLux.DomeLight(prim) if attr_name == "texture_file": light_prim.CreateTextureFileAttr(value) elif attr_name == "texture_format": light_prim.CreateTextureFormatAttr(value) else: raise ValueError(f"Unsupported texture attribute: '{attr_name}'.") else: prim_prop_name = f"inputs:{attr_name}" # set the attribute safe_set_attribute_on_usd_prim(prim, prim_prop_name, value, camel_case=True) # return the prim return prim
3,120
Python
39.01282
119
0.666346