file_path
stringlengths 20
207
| content
stringlengths 5
3.85M
| size
int64 5
3.85M
| lang
stringclasses 9
values | avg_line_length
float64 1.33
100
| max_line_length
int64 4
993
| alphanum_fraction
float64 0.26
0.93
|
---|---|---|---|---|---|---|
fnuabhimanyu8713/orbit/source/extensions/omni.isaac.orbit/test/terrains/check_mesh_subterrains.py | # Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""Launch Isaac Sim Simulator first."""
import argparse
parser = argparse.ArgumentParser(description="Generate terrains using trimesh")
parser.add_argument(
"--headless", action="store_true", default=False, help="Don't create a window to display each output."
)
args_cli = parser.parse_args()
from omni.isaac.orbit.app import AppLauncher
# launch omniverse app
# note: we only need to do this because of `TerrainImporter` which uses Omniverse functions
app_launcher = AppLauncher(headless=True)
simulation_app = app_launcher.app
"""Rest everything follows."""
import argparse
import os
import trimesh
import omni.isaac.orbit.terrains.trimesh as mesh_gen
from omni.isaac.orbit.terrains.utils import color_meshes_by_height
def test_flat_terrain(difficulty: float, output_dir: str, headless: bool):
# parameters for the terrain
cfg = mesh_gen.MeshPlaneTerrainCfg(size=(8.0, 8.0))
# generate the terrain
meshes, origin = cfg.function(difficulty=difficulty, cfg=cfg)
# add colors to the meshes based on the height
colored_mesh = color_meshes_by_height(meshes)
# add a marker for the origin
origin_transform = trimesh.transformations.translation_matrix(origin)
origin_marker = trimesh.creation.axis(origin_size=0.1, transform=origin_transform)
# visualize the meshes
scene = trimesh.Scene([colored_mesh, origin_marker])
# save the scene to a png file
data = scene.save_image(resolution=(640, 480))
# write the image to a file
with open(os.path.join(output_dir, "flat_terrain.jpg"), "wb") as f:
f.write(data)
# show the scene in a window
if not headless:
trimesh.viewer.SceneViewer(scene=scene, caption="Flat Terrain")
def test_pyramid_stairs_terrain(difficulty: float, holes: bool, output_dir: str, headless: bool):
# parameters for the terrain
cfg = mesh_gen.MeshPyramidStairsTerrainCfg(
size=(8.0, 8.0),
border_width=0.2,
step_width=0.3,
step_height_range=(0.05, 0.23),
platform_width=1.5,
holes=holes,
)
# generate the terrain
meshes, origin = cfg.function(difficulty=difficulty, cfg=cfg)
# add colors to the meshes based on the height
colored_mesh = color_meshes_by_height(meshes)
# add a marker for the origin
origin_transform = trimesh.transformations.translation_matrix(origin)
origin_marker = trimesh.creation.axis(origin_size=0.1, transform=origin_transform)
# visualize the meshes
scene = trimesh.Scene([colored_mesh, origin_marker])
# save the scene to a png file
data = scene.save_image(resolution=(640, 480))
# resolve file name
if holes:
caption = "Pyramid Stairs Terrain with Holes"
filename = "pyramid_stairs_terrain_with_holes.jpg"
else:
caption = "Pyramid Stairs Terrain"
filename = "pyramid_stairs_terrain.jpg"
# write the image to a file
with open(os.path.join(output_dir, filename), "wb") as f:
f.write(data)
# show the scene in a window
if not headless:
trimesh.viewer.SceneViewer(scene=scene, caption=caption)
def test_inverted_pyramid_stairs_terrain(difficulty: float, holes: bool, output_dir: str, headless: bool):
# parameters for the terrain
cfg = mesh_gen.MeshInvertedPyramidStairsTerrainCfg(
size=(8.0, 8.0),
border_width=0.2,
step_width=0.3,
step_height_range=(0.05, 0.23),
platform_width=1.5,
holes=holes,
)
# generate the terrain
meshes, origin = cfg.function(difficulty=difficulty, cfg=cfg)
# add colors to the meshes based on the height
colored_mesh = color_meshes_by_height(meshes)
# add a marker for the origin
origin_transform = trimesh.transformations.translation_matrix(origin)
origin_marker = trimesh.creation.axis(origin_size=0.1, transform=origin_transform)
# visualize the meshes
scene = trimesh.Scene([colored_mesh, origin_marker])
# save the scene to a png file
data = scene.save_image(resolution=(640, 480))
# resolve file name
if holes:
caption = "Inverted Pyramid Stairs Terrain with Holes"
filename = "inverted_pyramid_stairs_terrain_with_holes.jpg"
else:
caption = "Inverted Pyramid Stairs Terrain"
filename = "inverted_pyramid_stairs_terrain.jpg"
# write the image to a file
with open(os.path.join(output_dir, filename), "wb") as f:
f.write(data)
# show the scene in a window
if not headless:
trimesh.viewer.SceneViewer(scene=scene, caption=caption)
def test_random_grid_terrain(difficulty: float, holes: bool, output_dir: str, headless: bool):
# parameters for the terrain
cfg = mesh_gen.MeshRandomGridTerrainCfg(
size=(8.0, 8.0),
platform_width=1.5,
grid_width=0.75,
grid_height_range=(0.025, 0.2),
holes=holes,
)
# generate the terrain
meshes, origin = cfg.function(difficulty=difficulty, cfg=cfg)
# add colors to the meshes based on the height
colored_mesh = color_meshes_by_height(meshes)
# add a marker for the origin
origin_transform = trimesh.transformations.translation_matrix(origin)
origin_marker = trimesh.creation.axis(origin_size=0.1, transform=origin_transform)
# visualize the meshes
scene = trimesh.Scene([colored_mesh, origin_marker])
# save the scene to a png file
data = scene.save_image(resolution=(640, 480))
# resolve file name
if holes:
caption = "Random Grid Terrain with Holes"
filename = "random_grid_terrain_with_holes.jpg"
else:
caption = "Random Grid Terrain"
filename = "random_grid_terrain.jpg"
# write the image to a file
with open(os.path.join(output_dir, filename), "wb") as f:
f.write(data)
# show the scene in a window
if not headless:
trimesh.viewer.SceneViewer(scene=scene, caption=caption)
def test_rails_terrain(difficulty: float, output_dir: str, headless: bool):
# parameters for the terrain
cfg = mesh_gen.MeshRailsTerrainCfg(
size=(8.0, 8.0),
platform_width=1.5,
rail_thickness_range=(0.05, 0.1),
rail_height_range=(0.05, 0.3),
)
# generate the terrain
meshes, origin = cfg.function(difficulty=difficulty, cfg=cfg)
# add colors to the meshes based on the height
colored_mesh = color_meshes_by_height(meshes)
# add a marker for the origin
origin_transform = trimesh.transformations.translation_matrix(origin)
origin_marker = trimesh.creation.axis(origin_size=0.1, transform=origin_transform)
# visualize the meshes
scene = trimesh.Scene([colored_mesh, origin_marker])
# save the scene to a png file
data = scene.save_image(resolution=(640, 480))
# write the image to a file
with open(os.path.join(output_dir, "rails_terrain.jpg"), "wb") as f:
f.write(data)
# show the scene in a window
if not headless:
trimesh.viewer.SceneViewer(scene=scene, caption="Rail Terrain")
def test_pit_terrain(difficulty: float, double_pit: bool, output_dir: str, headless: bool):
# parameters for the terrain
cfg = mesh_gen.MeshPitTerrainCfg(
size=(8.0, 8.0), platform_width=1.5, pit_depth_range=(0.05, 1.1), double_pit=double_pit
)
# generate the terrain
meshes, origin = cfg.function(difficulty=difficulty, cfg=cfg)
# add colors to the meshes based on the height
colored_mesh = color_meshes_by_height(meshes)
# add a marker for the origin
origin_transform = trimesh.transformations.translation_matrix(origin)
origin_marker = trimesh.creation.axis(origin_size=0.1, transform=origin_transform)
# visualize the meshes
scene = trimesh.Scene([colored_mesh, origin_marker])
# save the scene to a png file
data = scene.save_image(resolution=(640, 480))
# resolve file name
if double_pit:
caption = "Pit Terrain with Two Levels"
filename = "pit_terrain_with_two_levels.jpg"
else:
caption = "Pit Terrain"
filename = "pit_terrain.jpg"
# write the image to a file
with open(os.path.join(output_dir, filename), "wb") as f:
f.write(data)
# show the scene in a window
if not headless:
trimesh.viewer.SceneViewer(scene=scene, caption=caption)
def test_box_terrain(difficulty: float, double_box: bool, output_dir: str, headless: bool):
# parameters for the terrain
cfg = mesh_gen.MeshBoxTerrainCfg(
size=(8.0, 8.0),
platform_width=1.5,
box_height_range=(0.05, 0.2),
double_box=double_box,
)
# generate the terrain
meshes, origin = cfg.function(difficulty=difficulty, cfg=cfg)
# add colors to the meshes based on the height
colored_mesh = color_meshes_by_height(meshes)
# add a marker for the origin
origin_transform = trimesh.transformations.translation_matrix(origin)
origin_marker = trimesh.creation.axis(origin_size=0.1, transform=origin_transform)
# visualize the meshes
scene = trimesh.Scene([colored_mesh, origin_marker])
# save the scene to a png file
data = scene.save_image(resolution=(640, 480))
# resolve file name
if double_box:
caption = "Box Terrain with Two Levels"
filename = "box_terrain_with_two_boxes.jpg"
else:
caption = "Box Terrain"
filename = "box_terrain.jpg"
# write the image to a file
with open(os.path.join(output_dir, filename), "wb") as f:
f.write(data)
# show the scene in a window
if not headless:
trimesh.viewer.SceneViewer(scene=scene, caption=caption)
def test_gap_terrain(difficulty: float, output_dir: str, headless: bool):
# parameters for the terrain
cfg = mesh_gen.MeshGapTerrainCfg(
size=(8.0, 8.0),
platform_width=1.5,
gap_width_range=(0.05, 1.1),
)
# generate the terrain
meshes, origin = cfg.function(difficulty=difficulty, cfg=cfg)
# add colors to the meshes based on the height
colored_mesh = color_meshes_by_height(meshes)
# add a marker for the origin
origin_transform = trimesh.transformations.translation_matrix(origin)
origin_marker = trimesh.creation.axis(origin_size=0.1, transform=origin_transform)
# visualize the meshes
scene = trimesh.Scene([colored_mesh, origin_marker])
# save the scene to a png file
data = scene.save_image(resolution=(640, 480))
# write the image to a file
with open(os.path.join(output_dir, "gap_terrain.jpg"), "wb") as f:
f.write(data)
# show the scene in a window
if not headless:
trimesh.viewer.SceneViewer(scene=scene, caption="Gap Terrain")
def test_floating_ring_terrain(difficulty: float, output_dir: str, headless: bool):
# parameters for the terrain
cfg = mesh_gen.MeshFloatingRingTerrainCfg(
size=(8.0, 8.0),
platform_width=1.5,
ring_height_range=(0.4, 1.0),
ring_width_range=(0.5, 1.0),
ring_thickness=0.05,
)
# generate the terrain
meshes, origin = cfg.function(difficulty=difficulty, cfg=cfg)
# add colors to the meshes based on the height
colored_mesh = color_meshes_by_height(meshes)
# add a marker for the origin
origin_transform = trimesh.transformations.translation_matrix(origin)
origin_marker = trimesh.creation.axis(origin_size=0.1, transform=origin_transform)
# visualize the meshes
scene = trimesh.Scene([colored_mesh, origin_marker])
# save the scene to a png file
data = scene.save_image(resolution=(640, 480))
# write the image to a file
with open(os.path.join(output_dir, "floating_ring_terrain.jpg"), "wb") as f:
f.write(data)
# show the scene in a window
if not headless:
trimesh.viewer.SceneViewer(scene=scene, caption="Floating Ring Terrain")
def test_star_terrain(difficulty: float, output_dir: str, headless: bool):
# parameters for the terrain
cfg = mesh_gen.MeshStarTerrainCfg(
size=(8.0, 8.0),
platform_width=1.5,
num_bars=5,
bar_width_range=(0.5, 1.0),
bar_height_range=(0.05, 0.2),
)
# generate the terrain
meshes, origin = cfg.function(difficulty=difficulty, cfg=cfg)
# add colors to the meshes based on the height
colored_mesh = color_meshes_by_height(meshes)
# add a marker for the origin
origin_transform = trimesh.transformations.translation_matrix(origin)
origin_marker = trimesh.creation.axis(origin_size=0.1, transform=origin_transform)
# visualize the meshes
scene = trimesh.Scene([colored_mesh, origin_marker])
# save the scene to a png file
data = scene.save_image(resolution=(640, 480))
# write the image to a file
with open(os.path.join(output_dir, "star_terrain.jpg"), "wb") as f:
f.write(data)
# show the scene in a window
if not headless:
trimesh.viewer.SceneViewer(scene=scene, caption="Star Terrain")
def test_repeated_objects_terrain(
difficulty: float, object_type: str, output_dir: str, headless: bool, provide_as_string: bool = False
):
# parameters for the terrain
if object_type == "pyramid":
cfg = mesh_gen.MeshRepeatedPyramidsTerrainCfg(
size=(8.0, 8.0),
platform_width=1.5,
max_height_noise=0.5,
object_params_start=mesh_gen.MeshRepeatedPyramidsTerrainCfg.ObjectCfg(
num_objects=40, height=0.05, radius=0.6, max_yx_angle=0.0, degrees=True
),
object_params_end=mesh_gen.MeshRepeatedPyramidsTerrainCfg.ObjectCfg(
num_objects=80, height=0.15, radius=0.6, max_yx_angle=60.0, degrees=True
),
)
elif object_type == "box":
cfg = mesh_gen.MeshRepeatedBoxesTerrainCfg(
size=(8.0, 8.0),
platform_width=1.5,
max_height_noise=0.5,
object_params_start=mesh_gen.MeshRepeatedBoxesTerrainCfg.ObjectCfg(
num_objects=40, height=0.05, size=(0.6, 0.6), max_yx_angle=0.0, degrees=True
),
object_params_end=mesh_gen.MeshRepeatedBoxesTerrainCfg.ObjectCfg(
num_objects=80, height=0.15, size=(0.6, 0.6), max_yx_angle=60.0, degrees=True
),
)
elif object_type == "cylinder":
cfg = mesh_gen.MeshRepeatedCylindersTerrainCfg(
size=(8.0, 8.0),
platform_width=1.5,
max_height_noise=0.5,
object_params_start=mesh_gen.MeshRepeatedCylindersTerrainCfg.ObjectCfg(
num_objects=40, height=0.05, radius=0.6, max_yx_angle=0.0, degrees=True
),
object_params_end=mesh_gen.MeshRepeatedCylindersTerrainCfg.ObjectCfg(
num_objects=80, height=0.15, radius=0.6, max_yx_angle=60.0, degrees=True
),
)
else:
raise ValueError(f"Invalid object type for repeated objects terrain: {object_type}")
# provide object_type as string (check that the import works)
if provide_as_string:
cfg.object_type = object_type
# generate the terrain
meshes, origin = cfg.function(difficulty=difficulty, cfg=cfg)
# add colors to the meshes based on the height
colored_mesh = color_meshes_by_height(meshes)
# add a marker for the origin
origin_transform = trimesh.transformations.translation_matrix(origin)
origin_marker = trimesh.creation.axis(origin_size=0.1, transform=origin_transform)
# visualize the meshes
scene = trimesh.Scene([colored_mesh, origin_marker])
# save the scene to a png file
data = scene.save_image(resolution=(640, 480))
# write the image to a file
with open(os.path.join(output_dir, f"repeated_objects_{object_type}_terrain.jpg"), "wb") as f:
f.write(data)
# show the scene in a window
if not headless:
trimesh.viewer.SceneViewer(scene=scene, caption=f"Repeated Objects Terrain: {object_type}")
def main():
# Create directory to dump results
test_dir = os.path.dirname(os.path.abspath(__file__))
output_dir = os.path.join(test_dir, "output", "terrains", "trimesh")
if not os.path.exists(output_dir):
os.makedirs(output_dir, exist_ok=True)
# Read headless mode
headless = args_cli.headless
# generate terrains
test_flat_terrain(difficulty=0.0, output_dir=output_dir, headless=headless)
test_pyramid_stairs_terrain(difficulty=0.75, holes=False, output_dir=output_dir, headless=headless)
test_pyramid_stairs_terrain(difficulty=0.75, holes=True, output_dir=output_dir, headless=headless)
test_inverted_pyramid_stairs_terrain(difficulty=0.75, holes=False, output_dir=output_dir, headless=headless)
test_inverted_pyramid_stairs_terrain(difficulty=0.75, holes=True, output_dir=output_dir, headless=headless)
test_random_grid_terrain(difficulty=0.75, holes=False, output_dir=output_dir, headless=headless)
test_random_grid_terrain(difficulty=0.75, holes=True, output_dir=output_dir, headless=headless)
test_star_terrain(difficulty=0.75, output_dir=output_dir, headless=headless)
test_repeated_objects_terrain(difficulty=0.75, object_type="pyramid", output_dir=output_dir, headless=headless)
test_repeated_objects_terrain(difficulty=0.75, object_type="cylinder", output_dir=output_dir, headless=headless)
test_repeated_objects_terrain(difficulty=0.75, object_type="box", output_dir=output_dir, headless=headless)
test_repeated_objects_terrain(
difficulty=0.75, object_type="cylinder", provide_as_string=True, output_dir=output_dir, headless=headless
)
if __name__ == "__main__":
# run the main function
main()
# close sim app
simulation_app.close()
| 17,790 | Python | 40.18287 | 116 | 0.674143 |
fnuabhimanyu8713/orbit/source/extensions/omni.isaac.orbit/test/terrains/check_terrain_importer.py | # Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""
This script shows how to use the terrain generator from the Orbit framework.
The terrains are generated using the :class:`TerrainGenerator` class and imported using the :class:`TerrainImporter`
class. The terrains can be imported from a file or generated procedurally.
Example usage:
.. code-block:: bash
# generate terrain
# -- use physics sphere mesh
./orbit.sh -p source/extensions/omni.isaac.orbit/test/terrains/check_terrain_importer.py --terrain_type generator
# -- usd usd sphere geom
./orbit.sh -p source/extensions/omni.isaac.orbit/test/terrains/check_terrain_importer.py --terrain_type generator --geom_sphere
# usd terrain
./orbit.sh -p source/extensions/omni.isaac.orbit/test/terrains/check_terrain_importer.py --terrain_type usd
# plane terrain
./orbit.sh -p source/extensions/omni.isaac.orbit/test/terrains/check_terrain_importer.py --terrain_type plane
"""
"""Launch Isaac Sim Simulator first."""
import argparse
# omni-isaac-orbit
from omni.isaac.orbit.app import AppLauncher
# add argparse arguments
parser = argparse.ArgumentParser(description="This script shows how to use the terrain importer.")
parser.add_argument("--geom_sphere", action="store_true", default=False, help="Whether to use sphere mesh or shape.")
parser.add_argument(
"--terrain_type",
type=str,
choices=["generator", "usd", "plane"],
default="generator",
help="Type of terrain to import. Can be 'generator' or 'usd' or 'plane'.",
)
parser.add_argument(
"--color_scheme",
type=str,
default="height",
choices=["height", "random", "none"],
help="The color scheme to use for the generated terrain.",
)
# append AppLauncher cli args
AppLauncher.add_app_launcher_args(parser)
# parse the arguments
args_cli = parser.parse_args()
# launch omniverse app
app_launcher = AppLauncher(args_cli)
simulation_app = app_launcher.app
"""Rest everything follows."""
import numpy as np
import omni.isaac.core.utils.prims as prim_utils
import omni.kit.commands
from omni.isaac.cloner import GridCloner
from omni.isaac.core.materials import PhysicsMaterial, PreviewSurface
from omni.isaac.core.objects import DynamicSphere
from omni.isaac.core.prims import GeometryPrim, RigidPrim, RigidPrimView
from omni.isaac.core.simulation_context import SimulationContext
from omni.isaac.core.utils.viewports import set_camera_view
import omni.isaac.orbit.sim as sim_utils
import omni.isaac.orbit.terrains as terrain_gen
from omni.isaac.orbit.terrains.config.rough import ROUGH_TERRAINS_CFG
from omni.isaac.orbit.terrains.terrain_importer import TerrainImporter
from omni.isaac.orbit.utils.assets import ISAAC_NUCLEUS_DIR
def main():
"""Generates a terrain from orbit."""
# Load kit helper
sim_params = {
"use_gpu": True,
"use_gpu_pipeline": True,
"use_flatcache": True,
"use_fabric": True,
"enable_scene_query_support": True,
}
sim = SimulationContext(
physics_dt=1.0 / 60.0, rendering_dt=1.0 / 60.0, sim_params=sim_params, backend="torch", device="cuda:0"
)
# Set main camera
set_camera_view([0.0, 30.0, 25.0], [0.0, 0.0, -2.5])
# Parameters
num_balls = 2048
# Create interface to clone the scene
cloner = GridCloner(spacing=2.0)
cloner.define_base_env("/World/envs")
# Everything under the namespace "/World/envs/env_0" will be cloned
prim_utils.define_prim("/World/envs/env_0")
# Handler for terrains importing
terrain_importer_cfg = terrain_gen.TerrainImporterCfg(
num_envs=2048,
env_spacing=3.0,
prim_path="/World/ground",
max_init_terrain_level=None,
terrain_type=args_cli.terrain_type,
terrain_generator=ROUGH_TERRAINS_CFG.replace(curriculum=True, color_scheme=args_cli.color_scheme),
usd_path=f"{ISAAC_NUCLEUS_DIR}/Environments/Terrains/rough_plane.usd",
)
terrain_importer = TerrainImporter(terrain_importer_cfg)
# Define the scene
# -- Light
cfg = sim_utils.DistantLightCfg(intensity=1000.0)
cfg.func("/World/Light", cfg)
# -- Ball
if args_cli.geom_sphere:
# -- Ball physics
_ = DynamicSphere(
prim_path="/World/envs/env_0/ball", translation=np.array([0.0, 0.0, 5.0]), mass=0.5, radius=0.25
)
else:
# -- Ball geometry
cube_prim_path = omni.kit.commands.execute("CreateMeshPrimCommand", prim_type="Sphere")[1]
prim_utils.move_prim(cube_prim_path, "/World/envs/env_0/ball")
# -- Ball physics
RigidPrim(prim_path="/World/envs/env_0/ball", mass=0.5, scale=(0.5, 0.5, 0.5), translation=(0.0, 0.0, 0.5))
GeometryPrim(prim_path="/World/envs/env_0/ball", collision=True)
# -- Ball material
sphere_geom = GeometryPrim(prim_path="/World/envs/env_0/ball", collision=True)
visual_material = PreviewSurface(prim_path="/World/Looks/ballColorMaterial", color=np.asarray([0.0, 0.0, 1.0]))
physics_material = PhysicsMaterial(
prim_path="/World/Looks/ballPhysicsMaterial",
dynamic_friction=1.0,
static_friction=0.2,
restitution=0.0,
)
sphere_geom.set_collision_approximation("convexHull")
sphere_geom.apply_visual_material(visual_material)
sphere_geom.apply_physics_material(physics_material)
# Clone the scene
cloner.define_base_env("/World/envs")
envs_prim_paths = cloner.generate_paths("/World/envs/env", num_paths=num_balls)
cloner.clone(source_prim_path="/World/envs/env_0", prim_paths=envs_prim_paths, replicate_physics=True)
physics_scene_path = sim.get_physics_context().prim_path
cloner.filter_collisions(
physics_scene_path, "/World/collisions", prim_paths=envs_prim_paths, global_paths=["/World/ground"]
)
# Set ball positions over terrain origins
# Create a view over all the balls
ball_view = RigidPrimView("/World/envs/env_.*/ball", reset_xform_properties=False)
# cache initial state of the balls
ball_initial_positions = terrain_importer.env_origins
ball_initial_positions[:, 2] += 5.0
# set initial poses
# note: setting here writes to USD :)
ball_view.set_world_poses(positions=ball_initial_positions)
# Play simulator
sim.reset()
# Initialize the ball views for physics simulation
ball_view.initialize()
ball_initial_velocities = ball_view.get_velocities()
# Create a counter for resetting the scene
step_count = 0
# Simulate physics
while simulation_app.is_running():
# If simulation is stopped, then exit.
if sim.is_stopped():
break
# If simulation is paused, then skip.
if not sim.is_playing():
sim.step()
continue
# Reset the scene
if step_count % 500 == 0:
# reset the balls
ball_view.set_world_poses(positions=ball_initial_positions)
ball_view.set_velocities(ball_initial_velocities)
# reset the counter
step_count = 0
# Step simulation
sim.step()
# Update counter
step_count += 1
if __name__ == "__main__":
# run the main function
main()
# close sim app
simulation_app.close()
| 7,340 | Python | 34.809756 | 131 | 0.680518 |
fnuabhimanyu8713/orbit/source/extensions/omni.isaac.orbit/test/terrains/check_height_field_subterrains.py | # Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""Launch Isaac Sim Simulator first."""
import argparse
parser = argparse.ArgumentParser(description="Generate terrains using trimesh")
parser.add_argument(
"--headless", action="store_true", default=False, help="Don't create a window to display each output."
)
args_cli = parser.parse_args()
from omni.isaac.orbit.app import AppLauncher
# launch omniverse app
# note: we only need to do this because of `TerrainImporter` which uses Omniverse functions
app_launcher = AppLauncher(headless=True)
simulation_app = app_launcher.app
"""Rest everything follows."""
import os
import trimesh
import omni.isaac.orbit.terrains.height_field as hf_gen
from omni.isaac.orbit.terrains.utils import color_meshes_by_height
def test_random_uniform_terrain(difficulty: float, output_dir: str, headless: bool):
# parameters for the terrain
cfg = hf_gen.HfRandomUniformTerrainCfg(
size=(8.0, 8.0),
horizontal_scale=0.1,
vertical_scale=0.005,
border_width=0.0,
noise_range=(-0.05, 0.05),
noise_step=0.005,
downsampled_scale=0.2,
)
# generate terrain
meshes, origin = cfg.function(difficulty=difficulty, cfg=cfg)
# add colors to the meshes based on the height
colored_mesh = color_meshes_by_height(meshes)
# add a marker for the origin
origin_transform = trimesh.transformations.translation_matrix(origin)
origin_marker = trimesh.creation.axis(origin_size=0.1, transform=origin_transform)
# visualize the meshes
scene = trimesh.Scene([colored_mesh, origin_marker])
# save the scene to a png file
data = scene.save_image(resolution=(640, 480))
# write the image to a file
with open(os.path.join(output_dir, "random_uniform_terrain.jpg"), "wb") as f:
f.write(data)
# show the scene in a window
if not headless:
trimesh.viewer.SceneViewer(scene=scene, caption="Random Uniform Terrain")
def test_pyramid_sloped_terrain(difficulty: float, inverted: bool, output_dir: str, headless: bool):
# parameters for the terrain
cfg = hf_gen.HfPyramidSlopedTerrainCfg(
size=(8.0, 8.0),
horizontal_scale=0.1,
vertical_scale=0.005,
border_width=0.0,
slope_range=(0.0, 0.4),
platform_width=1.5,
inverted=inverted,
)
# generate terrain
meshes, origin = cfg.function(difficulty=difficulty, cfg=cfg)
# add colors to the meshes based on the height
colored_mesh = color_meshes_by_height(meshes)
# add a marker for the origin
origin_transform = trimesh.transformations.translation_matrix(origin)
origin_marker = trimesh.creation.axis(origin_size=0.1, transform=origin_transform)
# visualize the meshes
scene = trimesh.Scene([colored_mesh, origin_marker])
# save the scene to a png file
data = scene.save_image(resolution=(640, 480))
# resolve file name
if inverted:
caption = "Inverted Pyramid Sloped Terrain"
filename = "inverted_pyramid_sloped_terrain.jpg"
else:
caption = "Pyramid Sloped Terrain"
filename = "pyramid_sloped_terrain.jpg"
# write the image to a file
with open(os.path.join(output_dir, filename), "wb") as f:
f.write(data)
# show the scene in a window
if not headless:
trimesh.viewer.SceneViewer(scene=scene, caption=caption)
def test_pyramid_stairs_terrain(difficulty: float, inverted: bool, output_dir: str, headless: bool):
# parameters for the terrain
cfg = hf_gen.HfPyramidStairsTerrainCfg(
size=(8.0, 8.0),
horizontal_scale=0.1,
vertical_scale=0.005,
border_width=0.0,
platform_width=1.5,
step_width=0.301,
step_height_range=(0.05, 0.23),
inverted=inverted,
)
# generate terrain
meshes, origin = cfg.function(difficulty=difficulty, cfg=cfg)
# add colors to the meshes based on the height
colored_mesh = color_meshes_by_height(meshes)
# add a marker for the origin
origin_transform = trimesh.transformations.translation_matrix(origin)
origin_marker = trimesh.creation.axis(origin_size=0.1, transform=origin_transform)
# visualize the meshes
scene = trimesh.Scene([colored_mesh, origin_marker])
# save the scene to a png file
data = scene.save_image(resolution=(640, 480))
# resolve file name
if inverted:
caption = "Inverted Pyramid Stairs Terrain"
filename = "inverted_pyramid_stairs_terrain.jpg"
else:
caption = "Pyramid Stairs Terrain"
filename = "pyramid_stairs_terrain.jpg"
# write the image to a file
with open(os.path.join(output_dir, filename), "wb") as f:
f.write(data)
# show the scene in a window
if not headless:
trimesh.viewer.SceneViewer(scene=scene, caption=caption)
def test_discrete_obstacles_terrain(difficulty: float, obstacle_height_mode: str, output_dir: str, headless: bool):
# parameters for the terrain
cfg = hf_gen.HfDiscreteObstaclesTerrainCfg(
size=(8.0, 8.0),
horizontal_scale=0.1,
vertical_scale=0.005,
border_width=0.0,
num_obstacles=50,
obstacle_height_mode=obstacle_height_mode,
obstacle_width_range=(0.25, 0.75),
obstacle_height_range=(1.0, 2.0),
platform_width=1.5,
)
# generate terrain
meshes, origin = cfg.function(difficulty=difficulty, cfg=cfg)
# add colors to the meshes based on the height
colored_mesh = color_meshes_by_height(meshes)
# add a marker for the origin
origin_transform = trimesh.transformations.translation_matrix(origin)
origin_marker = trimesh.creation.axis(origin_size=0.1, transform=origin_transform)
# visualize the meshes
scene = trimesh.Scene([colored_mesh, origin_marker])
# save the scene to a png file
data = scene.save_image(resolution=(640, 480))
# resolve file name
if obstacle_height_mode == "choice":
caption = "Discrete Obstacles Terrain (Sampled Height)"
filename = "discrete_obstacles_terrain_choice.jpg"
elif obstacle_height_mode == "fixed":
caption = "Discrete Obstacles Terrain (Fixed Height)"
filename = "discrete_obstacles_terrain_fixed.jpg"
else:
raise ValueError(f"Unknown obstacle height mode: {obstacle_height_mode}")
# write the image to a file
with open(os.path.join(output_dir, filename), "wb") as f:
f.write(data)
# show the scene in a window
if not headless:
trimesh.viewer.SceneViewer(scene=scene, caption=caption)
def test_wave_terrain(difficulty: float, output_dir: str, headless: bool):
# parameters for the terrain
cfg = hf_gen.HfWaveTerrainCfg(
size=(8.0, 8.0),
horizontal_scale=0.1,
vertical_scale=0.005,
border_width=0.0,
num_waves=5,
amplitude_range=(0.5, 1.0),
)
# generate terrain
meshes, origin = cfg.function(difficulty=difficulty, cfg=cfg)
# add colors to the meshes based on the height
colored_mesh = color_meshes_by_height(meshes)
# add a marker for the origin
origin_transform = trimesh.transformations.translation_matrix(origin)
origin_marker = trimesh.creation.axis(origin_size=0.1, transform=origin_transform)
# visualize the meshes
scene = trimesh.Scene([colored_mesh, origin_marker])
# save the scene to a png file
data = scene.save_image(resolution=(640, 480))
# write the image to a file
with open(os.path.join(output_dir, "wave_terrain.jpg"), "wb") as f:
f.write(data)
# show the scene in a window
if not headless:
trimesh.viewer.SceneViewer(scene=scene, caption="Wave Terrain")
def test_stepping_stones_terrain(difficulty: float, output_dir: str, headless: bool):
# parameters for the terrain
cfg = hf_gen.HfSteppingStonesTerrainCfg(
size=(8.0, 8.0),
horizontal_scale=0.1,
vertical_scale=0.005,
platform_width=1.5,
border_width=0.0,
stone_width_range=(0.25, 1.575),
stone_height_max=0.2,
stone_distance_range=(0.05, 0.1),
holes_depth=-2.0,
)
# generate terrain
meshes, origin = cfg.function(difficulty=difficulty, cfg=cfg)
# add colors to the meshes based on the height
colored_mesh = color_meshes_by_height(meshes)
# add a marker for the origin
origin_transform = trimesh.transformations.translation_matrix(origin)
origin_marker = trimesh.creation.axis(origin_size=0.1, transform=origin_transform)
# visualize the meshes
scene = trimesh.Scene([colored_mesh, origin_marker])
# save the scene to a png file
data = scene.save_image(resolution=(640, 480))
# write the image to a file
with open(os.path.join(output_dir, "stepping_stones_terrain.jpg"), "wb") as f:
f.write(data)
# show the scene in a window
if not headless:
trimesh.viewer.SceneViewer(scene=scene, caption="Stepping Stones Terrain")
def main():
# Create directory to dump results
test_dir = os.path.dirname(os.path.abspath(__file__))
output_dir = os.path.join(test_dir, "output", "terrains", "height_field")
if not os.path.exists(output_dir):
os.makedirs(output_dir, exist_ok=True)
# Read headless mode
headless = args_cli.headless
# generate terrains
test_random_uniform_terrain(difficulty=0.25, output_dir=output_dir, headless=headless)
test_pyramid_sloped_terrain(difficulty=0.25, inverted=False, output_dir=output_dir, headless=headless)
test_pyramid_sloped_terrain(difficulty=0.25, inverted=True, output_dir=output_dir, headless=headless)
test_pyramid_stairs_terrain(difficulty=0.25, inverted=False, output_dir=output_dir, headless=headless)
test_pyramid_stairs_terrain(difficulty=0.25, inverted=True, output_dir=output_dir, headless=headless)
test_discrete_obstacles_terrain(
difficulty=0.25, obstacle_height_mode="choice", output_dir=output_dir, headless=headless
)
test_discrete_obstacles_terrain(
difficulty=0.25, obstacle_height_mode="fixed", output_dir=output_dir, headless=headless
)
test_wave_terrain(difficulty=0.25, output_dir=output_dir, headless=headless)
test_stepping_stones_terrain(difficulty=1.0, output_dir=output_dir, headless=headless)
if __name__ == "__main__":
# run the main function
main()
# close sim app
simulation_app.close()
| 10,509 | Python | 38.216418 | 115 | 0.680845 |
fnuabhimanyu8713/orbit/source/extensions/omni.isaac.orbit/test/terrains/test_terrain_importer.py | # Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""Launch Isaac Sim Simulator first."""
from omni.isaac.orbit.app import AppLauncher, run_tests
# launch omniverse app
simulation_app = AppLauncher(headless=True).app
"""Rest everything follows."""
import numpy as np
import torch
import unittest
import omni.isaac.core.utils.prims as prim_utils
import omni.kit.commands
from omni.isaac.cloner import GridCloner
from omni.isaac.core.materials import PhysicsMaterial, PreviewSurface
from omni.isaac.core.objects import DynamicSphere
from omni.isaac.core.prims import GeometryPrim, RigidPrim, RigidPrimView
import omni.isaac.orbit.terrains as terrain_gen
from omni.isaac.orbit.sim import SimulationContext, build_simulation_context
from omni.isaac.orbit.terrains import TerrainImporter, TerrainImporterCfg
from omni.isaac.orbit.terrains.config.rough import ROUGH_TERRAINS_CFG
from omni.isaac.orbit.utils.assets import ISAAC_NUCLEUS_DIR
class TestTerrainImporter(unittest.TestCase):
"""Test the terrain importer for different ground and procedural terrains."""
def test_grid_clone_env_origins(self):
"""Tests that env origins are consistent when computed using the TerrainImporter and IsaacSim GridCloner."""
# iterate over different number of environments and environment spacing
for device in ("cuda:0", "cpu"):
for env_spacing in [1.0, 4.325, 8.0]:
for num_envs in [1, 4, 125, 379, 1024]:
with self.subTest(num_envs=num_envs, env_spacing=env_spacing):
with build_simulation_context(device=device, auto_add_lighting=True) as sim:
# create terrain importer
terrain_importer_cfg = TerrainImporterCfg(
num_envs=num_envs,
env_spacing=env_spacing,
prim_path="/World/ground",
terrain_type="plane", # for flat ground, origins are in grid
terrain_generator=None,
)
terrain_importer = TerrainImporter(terrain_importer_cfg)
# obtain env origins using terrain importer
terrain_importer_origins = terrain_importer.env_origins
# obtain env origins using grid cloner
grid_cloner_origins = self._obtain_grid_cloner_env_origins(
num_envs, env_spacing, device=sim.device
)
# check if the env origins are the same
torch.testing.assert_close(
terrain_importer_origins, grid_cloner_origins, rtol=1e-5, atol=1e-5
)
def test_terrain_generation(self) -> None:
"""Generates assorted terrains and tests that the resulting mesh has the correct size."""
for device in ("cuda:0", "cpu"):
with build_simulation_context(device=device, auto_add_lighting=True) as _:
# Handler for terrains importing
terrain_importer_cfg = terrain_gen.TerrainImporterCfg(
prim_path="/World/ground",
max_init_terrain_level=None,
terrain_type="generator",
terrain_generator=ROUGH_TERRAINS_CFG.replace(curriculum=True),
num_envs=1,
)
terrain_importer = TerrainImporter(terrain_importer_cfg)
# check mesh exists
mesh = terrain_importer.meshes["terrain"]
self.assertIsNotNone(mesh)
# calculate expected size from config
cfg = terrain_importer.cfg.terrain_generator
self.assertIsNotNone(cfg)
expectedSizeX = cfg.size[0] * cfg.num_rows + 2 * cfg.border_width
expectedSizeY = cfg.size[1] * cfg.num_cols + 2 * cfg.border_width
# get size from mesh bounds
bounds = mesh.bounds
actualSize = abs(bounds[1] - bounds[0])
self.assertAlmostEqual(actualSize[0], expectedSizeX)
self.assertAlmostEqual(actualSize[1], expectedSizeY)
def test_plane(self) -> None:
"""Generates a plane and tests that the resulting mesh has the correct size."""
for device in ("cuda:0", "cpu"):
with build_simulation_context(device=device, auto_add_lighting=True) as _:
expectedSizeX = 2.0e6
expectedSizeY = 2.0e6
# Handler for terrains importing
terrain_importer_cfg = terrain_gen.TerrainImporterCfg(
prim_path="/World/ground",
terrain_type="plane",
num_envs=1,
env_spacing=1.0,
)
terrain_importer = TerrainImporter(terrain_importer_cfg)
# check mesh exists
mesh = terrain_importer.meshes["terrain"]
self.assertIsNotNone(mesh)
# get size from mesh bounds
bounds = mesh.bounds
actualSize = abs(bounds[1] - bounds[0])
self.assertAlmostEqual(actualSize[0], expectedSizeX)
self.assertAlmostEqual(actualSize[1], expectedSizeY)
def test_usd(self) -> None:
"""Imports terrain from a usd and tests that the resulting mesh has the correct size."""
for device in ("cuda:0", "cpu"):
with build_simulation_context(device=device, auto_add_lighting=True) as _:
# Handler for terrains importing
terrain_importer_cfg = terrain_gen.TerrainImporterCfg(
prim_path="/World/ground",
terrain_type="usd",
usd_path=f"{ISAAC_NUCLEUS_DIR}/Environments/Terrains/rough_plane.usd",
num_envs=1,
env_spacing=1.0,
)
terrain_importer = TerrainImporter(terrain_importer_cfg)
# check mesh exists
mesh = terrain_importer.meshes["terrain"]
self.assertIsNotNone(mesh)
# expect values from USD file
expectedSizeX = 96
expectedSizeY = 96
# get size from mesh bounds
bounds = mesh.bounds
actualSize = abs(bounds[1] - bounds[0])
self.assertAlmostEqual(actualSize[0], expectedSizeX)
self.assertAlmostEqual(actualSize[1], expectedSizeY)
def test_ball_drop(self) -> None:
"""Generates assorted terrains and spheres. Tests that spheres fall onto terrain and do not pass through it"""
for device in ("cuda:0", "cpu"):
with build_simulation_context(device=device, auto_add_lighting=True) as sim:
self._populate_scene(geom_sphere=False, sim=sim)
ball_view = RigidPrimView("/World/envs/env_.*/ball", reset_xform_properties=False)
sim.reset()
# Initialize the ball views for physics simulation
ball_view.initialize()
# Play simulator
for _ in range(500):
sim.step(render=False)
# Ball may have some small non-zero velocity if the roll on terrain <~.2
# If balls fall through terrain velocity is much higher ~82.0
max_velocity_z = torch.max(torch.abs(ball_view.get_linear_velocities()[:, 2]))
self.assertLessEqual(max_velocity_z.item(), 0.5)
def test_ball_drop_geom_sphere(self) -> None:
"""Generates assorted terrains and geom sepheres. Tests that spheres fall onto terrain and do not pass through it"""
for device in ("cuda:0", "cpu"):
with build_simulation_context(device=device, auto_add_lighting=True) as sim:
self._populate_scene(geom_sphere=False, sim=sim)
ball_view = RigidPrimView("/World/envs/env_.*/ball", reset_xform_properties=False)
sim.reset()
# Initialize the ball views for physics simulation
ball_view.initialize()
# Play simulator
for _ in range(500):
sim.step(render=False)
# Ball may have some small non-zero velocity if the roll on terrain <~.2
# If balls fall through terrain velocity is much higher ~82.0
max_velocity_z = torch.max(torch.abs(ball_view.get_linear_velocities()[:, 2]))
self.assertLessEqual(max_velocity_z.item(), 0.5)
"""
Helper functions.
"""
@staticmethod
def _obtain_grid_cloner_env_origins(num_envs: int, env_spacing: float, device: str) -> torch.Tensor:
"""Obtain the env origins generated by IsaacSim GridCloner (grid_cloner.py)."""
# create grid cloner
cloner = GridCloner(spacing=env_spacing)
cloner.define_base_env("/World/envs")
envs_prim_paths = cloner.generate_paths("/World/envs/env", num_paths=num_envs)
prim_utils.define_prim("/World/envs/env_0")
# clone envs using grid cloner
env_origins = cloner.clone(
source_prim_path="/World/envs/env_0", prim_paths=envs_prim_paths, replicate_physics=True
)
# return as tensor
return torch.tensor(env_origins, dtype=torch.float32, device=device)
def _populate_scene(self, sim: SimulationContext, num_balls: int = 2048, geom_sphere: bool = False):
"""Create a scene with terrain and randomly spawned balls.
The spawned balls are either USD Geom Spheres or are USD Meshes. We check against both these to make sure
both USD-shape and USD-mesh collisions work as expected.
"""
# Handler for terrains importing
terrain_importer_cfg = terrain_gen.TerrainImporterCfg(
prim_path="/World/ground",
max_init_terrain_level=None,
terrain_type="generator",
terrain_generator=ROUGH_TERRAINS_CFG.replace(curriculum=True),
num_envs=num_balls,
)
terrain_importer = TerrainImporter(terrain_importer_cfg)
# Create interface to clone the scene
cloner = GridCloner(spacing=2.0)
cloner.define_base_env("/World/envs")
# Everything under the namespace "/World/envs/env_0" will be cloned
prim_utils.define_prim(prim_path="/World/envs/env_0", prim_type="Xform")
# Define the scene
# -- Ball
if geom_sphere:
# -- Ball physics
_ = DynamicSphere(
prim_path="/World/envs/env_0/ball", translation=np.array([0.0, 0.0, 5.0]), mass=0.5, radius=0.25
)
else:
# -- Ball geometry
cube_prim_path = omni.kit.commands.execute("CreateMeshPrimCommand", prim_type="Sphere")[1]
prim_utils.move_prim(cube_prim_path, "/World/envs/env_0/ball")
# -- Ball physics
RigidPrim(prim_path="/World/envs/env_0/ball", mass=0.5, scale=(0.5, 0.5, 0.5), translation=(0.0, 0.0, 0.5))
GeometryPrim(prim_path="/World/envs/env_0/ball", collision=True)
# -- Ball material
sphere_geom = GeometryPrim(prim_path="/World/envs/env_0/ball", collision=True)
visual_material = PreviewSurface(prim_path="/World/Looks/ballColorMaterial", color=np.asarray([0.0, 0.0, 1.0]))
physics_material = PhysicsMaterial(
prim_path="/World/Looks/ballPhysicsMaterial",
dynamic_friction=1.0,
static_friction=0.2,
restitution=0.0,
)
sphere_geom.set_collision_approximation("convexHull")
sphere_geom.apply_visual_material(visual_material)
sphere_geom.apply_physics_material(physics_material)
# Clone the scene
cloner.define_base_env("/World/envs")
envs_prim_paths = cloner.generate_paths("/World/envs/env", num_paths=num_balls)
cloner.clone(source_prim_path="/World/envs/env_0", prim_paths=envs_prim_paths, replicate_physics=True)
physics_scene_path = sim.get_physics_context().prim_path
cloner.filter_collisions(
physics_scene_path, "/World/collisions", prim_paths=envs_prim_paths, global_paths=["/World/ground"]
)
# Set ball positions over terrain origins
# Create a view over all the balls
ball_view = RigidPrimView("/World/envs/env_.*/ball", reset_xform_properties=False)
# cache initial state of the balls
ball_initial_positions = terrain_importer.env_origins
ball_initial_positions[:, 2] += 5.0
# set initial poses
# note: setting here writes to USD :)
ball_view.set_world_poses(positions=ball_initial_positions)
if __name__ == "__main__":
run_tests()
| 13,068 | Python | 44.85614 | 124 | 0.592133 |
fnuabhimanyu8713/orbit/source/extensions/omni.isaac.orbit/test/utils/test_string.py | # Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
# NOTE: While we don't actually use the simulation app in this test, we still need to launch it
# because warp is only available in the context of a running simulation
"""Launch Isaac Sim Simulator first."""
from omni.isaac.orbit.app import AppLauncher, run_tests
# launch omniverse app
app_launcher = AppLauncher(headless=True)
simulation_app = app_launcher.app
"""Rest everything follows."""
import random
import unittest
import omni.isaac.orbit.utils.string as string_utils
class TestStringUtilities(unittest.TestCase):
"""Test fixture for checking string utilities."""
def test_case_conversion(self):
"""Test case conversion between camel case and snake case."""
# test camel case to snake case
self.assertEqual(string_utils.to_snake_case("CamelCase"), "camel_case")
self.assertEqual(string_utils.to_snake_case("camelCase"), "camel_case")
self.assertEqual(string_utils.to_snake_case("CamelCaseString"), "camel_case_string")
# test snake case to camel case
self.assertEqual(string_utils.to_camel_case("snake_case", to="CC"), "SnakeCase")
self.assertEqual(string_utils.to_camel_case("snake_case_string", to="CC"), "SnakeCaseString")
self.assertEqual(string_utils.to_camel_case("snake_case_string", to="cC"), "snakeCaseString")
def test_resolve_matching_names_with_basic_strings(self):
"""Test resolving matching names with a basic expression."""
# list of strings
target_names = ["a", "b", "c", "d", "e"]
# test matching names
query_names = ["a|c", "b"]
index_list, names_list = string_utils.resolve_matching_names(query_names, target_names)
self.assertEqual(index_list, [0, 1, 2])
self.assertEqual(names_list, ["a", "b", "c"])
# test matching names with regex
query_names = ["a.*", "b"]
index_list, names_list = string_utils.resolve_matching_names(query_names, target_names)
self.assertEqual(index_list, [0, 1])
self.assertEqual(names_list, ["a", "b"])
# test duplicate names
query_names = ["a|c", "b", "a|c"]
with self.assertRaises(ValueError):
_ = string_utils.resolve_matching_names(query_names, target_names)
# test no regex match
query_names = ["a|c", "b", "f"]
with self.assertRaises(ValueError):
_ = string_utils.resolve_matching_names(query_names, target_names)
def test_resolve_matching_names_with_joint_name_strings(self):
"""Test resolving matching names with joint names."""
# list of strings
robot_joint_names = []
for i in ["hip", "thigh", "calf"]:
for j in ["FL", "FR", "RL", "RR"]:
robot_joint_names.append(f"{j}_{i}_joint")
# test matching names
index_list, names_list = string_utils.resolve_matching_names(".*", robot_joint_names)
self.assertEqual(index_list, list(range(len(robot_joint_names))))
self.assertEqual(names_list, robot_joint_names)
# test matching names with regex
index_list, names_list = string_utils.resolve_matching_names(".*_joint", robot_joint_names)
self.assertEqual(index_list, list(range(len(robot_joint_names))))
self.assertEqual(names_list, robot_joint_names)
# test matching names with regex
index_list, names_list = string_utils.resolve_matching_names(["FL.*", "FR.*"], robot_joint_names)
ground_truth_index_list = [0, 1, 4, 5, 8, 9]
self.assertEqual(index_list, ground_truth_index_list)
self.assertEqual(names_list, [robot_joint_names[i] for i in ground_truth_index_list])
# test matching names with regex
query_list = [
"FL_hip_joint",
"FL_thigh_joint",
"FR_hip_joint",
"FR_thigh_joint",
"FL_calf_joint",
"FR_calf_joint",
]
index_list, names_list = string_utils.resolve_matching_names(query_list, robot_joint_names)
ground_truth_index_list = [0, 1, 4, 5, 8, 9]
self.assertNotEqual(names_list, query_list)
self.assertEqual(index_list, ground_truth_index_list)
self.assertEqual(names_list, [robot_joint_names[i] for i in ground_truth_index_list])
# test matching names with regex but shuffled
# randomize order of previous query list
random.shuffle(query_list)
index_list, names_list = string_utils.resolve_matching_names(query_list, robot_joint_names)
ground_truth_index_list = [0, 1, 4, 5, 8, 9]
self.assertNotEqual(names_list, query_list)
self.assertEqual(index_list, ground_truth_index_list)
self.assertEqual(names_list, [robot_joint_names[i] for i in ground_truth_index_list])
def test_resolve_matching_names_with_preserved_order(self):
# list of strings and query list
robot_joint_names = []
for i in ["hip", "thigh", "calf"]:
for j in ["FL", "FR", "RL", "RR"]:
robot_joint_names.append(f"{j}_{i}_joint")
query_list = [
"FL_hip_joint",
"FL_thigh_joint",
"FR_hip_joint",
"FR_thigh_joint",
"FL_calf_joint",
"FR_calf_joint",
]
# test return in target ordering with sublist
query_list.reverse()
index_list, names_list = string_utils.resolve_matching_names(query_list, robot_joint_names, preserve_order=True)
ground_truth_index_list = [9, 8, 5, 1, 4, 0]
self.assertEqual(names_list, query_list)
self.assertEqual(index_list, ground_truth_index_list)
# test return in target ordering with regex expression
index_list, names_list = string_utils.resolve_matching_names(
["FR.*", "FL.*"], robot_joint_names, preserve_order=True
)
ground_truth_index_list = [1, 5, 9, 0, 4, 8]
self.assertEqual(index_list, ground_truth_index_list)
self.assertEqual(names_list, [robot_joint_names[i] for i in ground_truth_index_list])
# test return in target ordering with a mix of regex and non-regex expression
index_list, names_list = string_utils.resolve_matching_names(
["FR.*", "FL_calf_joint", "FL_thigh_joint", "FL_hip_joint"], robot_joint_names, preserve_order=True
)
ground_truth_index_list = [1, 5, 9, 8, 4, 0]
self.assertEqual(index_list, ground_truth_index_list)
self.assertEqual(names_list, [robot_joint_names[i] for i in ground_truth_index_list])
def test_resolve_matching_names_values_with_basic_strings(self):
"""Test resolving matching names with a basic expression."""
# list of strings
target_names = ["a", "b", "c", "d", "e"]
# test matching names
data = {"a|c": 1, "b": 2}
index_list, names_list, values_list = string_utils.resolve_matching_names_values(data, target_names)
self.assertEqual(index_list, [0, 1, 2])
self.assertEqual(names_list, ["a", "b", "c"])
self.assertEqual(values_list, [1, 2, 1])
# test matching names with regex
data = {"a|d|e": 1, "b|c": 2}
index_list, names_list, values_list = string_utils.resolve_matching_names_values(data, target_names)
self.assertEqual(index_list, [0, 1, 2, 3, 4])
self.assertEqual(names_list, ["a", "b", "c", "d", "e"])
self.assertEqual(values_list, [1, 2, 2, 1, 1])
# test matching names with regex
data = {"a|d|e|b": 1, "b|c": 2}
with self.assertRaises(ValueError):
_ = string_utils.resolve_matching_names_values(data, target_names)
# test no regex match
query_names = {"a|c": 1, "b": 0, "f": 2}
with self.assertRaises(ValueError):
_ = string_utils.resolve_matching_names_values(query_names, target_names)
def test_resolve_matching_names_values_with_basic_strings_and_preserved_order(self):
"""Test resolving matching names with a basic expression."""
# list of strings
target_names = ["a", "b", "c", "d", "e"]
# test matching names
data = {"a|c": 1, "b": 2}
index_list, names_list, values_list = string_utils.resolve_matching_names_values(
data, target_names, preserve_order=True
)
self.assertEqual(index_list, [0, 2, 1])
self.assertEqual(names_list, ["a", "c", "b"])
self.assertEqual(values_list, [1, 1, 2])
# test matching names with regex
data = {"a|d|e": 1, "b|c": 2}
index_list, names_list, values_list = string_utils.resolve_matching_names_values(
data, target_names, preserve_order=True
)
self.assertEqual(index_list, [0, 3, 4, 1, 2])
self.assertEqual(names_list, ["a", "d", "e", "b", "c"])
self.assertEqual(values_list, [1, 1, 1, 2, 2])
# test matching names with regex
data = {"a|d|e|b": 1, "b|c": 2}
with self.assertRaises(ValueError):
_ = string_utils.resolve_matching_names_values(data, target_names, preserve_order=True)
# test no regex match
query_names = {"a|c": 1, "b": 0, "f": 2}
with self.assertRaises(ValueError):
_ = string_utils.resolve_matching_names_values(query_names, target_names, preserve_order=True)
if __name__ == "__main__":
run_tests()
| 9,482 | Python | 47.382653 | 120 | 0.614849 |
fnuabhimanyu8713/orbit/source/extensions/omni.isaac.orbit/test/utils/test_dict.py | # Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
# NOTE: While we don't actually use the simulation app in this test, we still need to launch it
# because warp is only available in the context of a running simulation
"""Launch Isaac Sim Simulator first."""
from omni.isaac.orbit.app import AppLauncher, run_tests
# launch omniverse app
app_launcher = AppLauncher(headless=True)
simulation_app = app_launcher.app
"""Rest everything follows."""
import unittest
import omni.isaac.orbit.utils.dict as dict_utils
def test_function(x):
"""Test function for string <-> callable conversion."""
return x**2
def test_lambda_function(x):
"""Test function for string <-> callable conversion."""
return x**2
class TestDictUtilities(unittest.TestCase):
"""Test fixture for checking dictionary utilities in Orbit."""
def test_print_dict(self):
"""Test printing of dictionary."""
# create a complex nested dictionary
test_dict = {
"a": 1,
"b": 2,
"c": {"d": 3, "e": 4, "f": {"g": 5, "h": 6}},
"i": 7,
"j": lambda x: x**2, # noqa: E731
"k": dict_utils.class_to_dict,
}
# print the dictionary
dict_utils.print_dict(test_dict)
def test_string_callable_function_conversion(self):
"""Test string <-> callable conversion for function."""
# convert function to string
test_string = dict_utils.callable_to_string(test_function)
# convert string to function
test_function_2 = dict_utils.string_to_callable(test_string)
# check that functions are the same
self.assertEqual(test_function(2), test_function_2(2))
def test_string_callable_function_with_lambda_in_name_conversion(self):
"""Test string <-> callable conversion for function which has lambda in its name."""
# convert function to string
test_string = dict_utils.callable_to_string(test_lambda_function)
# convert string to function
test_function_2 = dict_utils.string_to_callable(test_string)
# check that functions are the same
self.assertEqual(test_function(2), test_function_2(2))
def test_string_callable_lambda_conversion(self):
"""Test string <-> callable conversion for lambda expression."""
# create lambda function
func = lambda x: x**2 # noqa: E731
# convert function to string
test_string = dict_utils.callable_to_string(func)
# convert string to function
func_2 = dict_utils.string_to_callable(test_string)
# check that functions are the same
self.assertEqual(func(2), func_2(2))
if __name__ == "__main__":
run_tests()
| 2,811 | Python | 32.082353 | 95 | 0.642476 |
fnuabhimanyu8713/orbit/source/extensions/omni.isaac.orbit/test/utils/test_math.py | # Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
import torch
import unittest
from math import pi as PI
"""Launch Isaac Sim Simulator first.
This is only needed because of warp dependency.
"""
from omni.isaac.orbit.app import AppLauncher, run_tests
# launch omniverse app in headless mode
simulation_app = AppLauncher(headless=True).app
import omni.isaac.orbit.utils.math as math_utils
class TestMathUtilities(unittest.TestCase):
"""Test fixture for checking math utilities in Orbit."""
def test_is_identity_pose(self):
"""Test is_identity_pose method."""
identity_pos_one_row = torch.zeros(3)
identity_rot_one_row = torch.tensor((1.0, 0.0, 0.0, 0.0))
self.assertTrue(math_utils.is_identity_pose(identity_pos_one_row, identity_rot_one_row))
identity_pos_one_row[0] = 1.0
identity_rot_one_row[1] = 1.0
self.assertFalse(math_utils.is_identity_pose(identity_pos_one_row, identity_rot_one_row))
identity_pos_multi_row = torch.zeros(3, 3)
identity_rot_multi_row = torch.zeros(3, 4)
identity_rot_multi_row[:, 0] = 1.0
self.assertTrue(math_utils.is_identity_pose(identity_pos_multi_row, identity_rot_multi_row))
identity_pos_multi_row[0, 0] = 1.0
identity_rot_multi_row[0, 1] = 1.0
self.assertFalse(math_utils.is_identity_pose(identity_pos_multi_row, identity_rot_multi_row))
def test_axis_angle_from_quat(self):
"""Test axis_angle_from_quat method."""
# Quaternions of the form (2,4) and (2,2,4)
quats = [
torch.Tensor([[1.0, 0.0, 0.0, 0.0], [0.8418536, 0.142006, 0.0, 0.5206887]]),
torch.Tensor([
[[1.0, 0.0, 0.0, 0.0], [0.8418536, 0.142006, 0.0, 0.5206887]],
[[1.0, 0.0, 0.0, 0.0], [0.9850375, 0.0995007, 0.0995007, 0.0995007]],
]),
]
# Angles of the form (2,3) and (2,2,3)
angles = [
torch.Tensor([[0.0, 0.0, 0.0], [0.3, 0.0, 1.1]]),
torch.Tensor([[[0.0, 0.0, 0.0], [0.3, 0.0, 1.1]], [[0.0, 0.0, 0.0], [0.2, 0.2, 0.2]]]),
]
for quat, angle in zip(quats, angles):
with self.subTest(quat=quat, angle=angle):
torch.testing.assert_close(math_utils.axis_angle_from_quat(quat), angle)
def test_axis_angle_from_quat_approximation(self):
"""Test the Taylor approximation from axis_angle_from_quat method.
This test checks for unstable conversions where theta is very small.
"""
# Generate a small rotation quaternion
# Small angle
theta = torch.Tensor([0.0000001])
# Arbitrary normalized axis of rotation in rads, (x,y,z)
axis = [-0.302286, 0.205494, -0.930803]
# Generate quaternion
qw = torch.cos(theta / 2)
quat_vect = [qw] + [d * torch.sin(theta / 2) for d in axis]
quaternion = torch.tensor(quat_vect, dtype=torch.float32)
# Convert quaternion to axis-angle
axis_angle_computed = math_utils.axis_angle_from_quat(quaternion)
# Expected axis-angle representation
axis_angle_expected = torch.tensor([theta * d for d in axis], dtype=torch.float32)
# Assert that the computed values are close to the expected values
torch.testing.assert_close(axis_angle_computed, axis_angle_expected)
def test_quat_error_magnitude(self):
"""Test quat_error_magnitude method."""
# Define test cases
# Each tuple contains: q1, q2, expected error
test_cases = [
# No rotation
(torch.Tensor([1, 0, 0, 0]), torch.Tensor([1, 0, 0, 0]), torch.Tensor([0.0])),
# PI/2 rotation
(torch.Tensor([1.0, 0, 0.0, 0]), torch.Tensor([0.7071068, 0.7071068, 0, 0]), torch.Tensor([PI / 2])),
# PI rotation
(torch.Tensor([1.0, 0, 0.0, 0]), torch.Tensor([0.0, 0.0, 1.0, 0]), torch.Tensor([PI])),
]
# Test higher dimension (batched) inputs
q1_list = torch.stack([t[0] for t in test_cases], dim=0)
q2_list = torch.stack([t[1] for t in test_cases], dim=0)
expected_diff_list = torch.stack([t[2] for t in test_cases], dim=0).flatten()
test_cases += [(q1_list, q2_list, expected_diff_list)]
# Iterate over test cases
for q1, q2, expected_diff in test_cases:
with self.subTest(q1=q1, q2=q2):
# Compute the error
q12_diff = math_utils.quat_error_magnitude(q1, q2)
# Check that the error is close to the expected value
if len(q1.shape) > 1:
torch.testing.assert_close(q12_diff, expected_diff)
else:
self.assertAlmostEqual(q12_diff.item(), expected_diff.item(), places=5)
def test_quat_unique(self):
"""Test quat_unique method."""
# Define test cases
quats = math_utils.random_orientation(num=1024, device="cpu")
# Test positive real quaternion
pos_real_quats = math_utils.quat_unique(quats)
# Test that the real part is positive
self.assertTrue(torch.all(pos_real_quats[:, 0] > 0).item())
non_pos_indices = quats[:, 0] < 0
# Check imaginary part have sign flipped if real part is negative
torch.testing.assert_close(pos_real_quats[non_pos_indices], -quats[non_pos_indices])
torch.testing.assert_close(pos_real_quats[~non_pos_indices], quats[~non_pos_indices])
def test_quat_mul_with_quat_unique(self):
"""Test quat_mul method with different quaternions.
This test checks that the quaternion multiplication is consistent when using positive real quaternions
and regular quaternions. It makes sure that the result is the same regardless of the input quaternion sign
(i.e. q and -q are same quaternion in the context of rotations).
"""
quats_1 = math_utils.random_orientation(num=1024, device="cpu")
quats_2 = math_utils.random_orientation(num=1024, device="cpu")
# Make quats positive real
quats_1_pos_real = math_utils.quat_unique(quats_1)
quats_2_pos_real = math_utils.quat_unique(quats_2)
# Option 1: Direct computation on quaternions
quat_result_1 = math_utils.quat_mul(quats_1, math_utils.quat_conjugate(quats_2))
quat_result_1 = math_utils.quat_unique(quat_result_1)
# Option 2: Computation on positive real quaternions
quat_result_2 = math_utils.quat_mul(quats_1_pos_real, math_utils.quat_conjugate(quats_2_pos_real))
quat_result_2 = math_utils.quat_unique(quat_result_2)
# Option 3: Mixed computation
quat_result_3 = math_utils.quat_mul(quats_1, math_utils.quat_conjugate(quats_2_pos_real))
quat_result_3 = math_utils.quat_unique(quat_result_3)
# Check that the result is close to the expected value
torch.testing.assert_close(quat_result_1, quat_result_2)
torch.testing.assert_close(quat_result_2, quat_result_3)
torch.testing.assert_close(quat_result_3, quat_result_1)
def test_quat_error_mag_with_quat_unique(self):
"""Test quat_error_magnitude method with positive real quaternions."""
quats_1 = math_utils.random_orientation(num=1024, device="cpu")
quats_2 = math_utils.random_orientation(num=1024, device="cpu")
# Make quats positive real
quats_1_pos_real = math_utils.quat_unique(quats_1)
quats_2_pos_real = math_utils.quat_unique(quats_2)
# Compute the error
error_1 = math_utils.quat_error_magnitude(quats_1, quats_2)
error_2 = math_utils.quat_error_magnitude(quats_1_pos_real, quats_2_pos_real)
error_3 = math_utils.quat_error_magnitude(quats_1, quats_2_pos_real)
error_4 = math_utils.quat_error_magnitude(quats_1_pos_real, quats_2)
# Check that the error is close to the expected value
torch.testing.assert_close(error_1, error_2)
torch.testing.assert_close(error_2, error_3)
torch.testing.assert_close(error_3, error_4)
torch.testing.assert_close(error_4, error_1)
if __name__ == "__main__":
run_tests()
| 8,257 | Python | 41.132653 | 114 | 0.622865 |
fnuabhimanyu8713/orbit/source/extensions/omni.isaac.orbit/test/utils/test_timer.py | # Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
# NOTE: While we don't actually use the simulation app in this test, we still need to launch it
# because warp is only available in the context of a running simulation
"""Launch Isaac Sim Simulator first."""
from omni.isaac.orbit.app import AppLauncher, run_tests
# launch omniverse app
app_launcher = AppLauncher(headless=True)
simulation_app = app_launcher.app
"""Rest everything follows."""
import time
import unittest
from omni.isaac.orbit.utils.timer import Timer
class TestTimer(unittest.TestCase):
"""Test fixture for the Timer class."""
def setUp(self):
# number of decimal places to check
self.precision_places = 2
def test_timer_as_object(self):
"""Test using a `Timer` as a regular object."""
timer = Timer()
timer.start()
self.assertAlmostEqual(0, timer.time_elapsed, self.precision_places)
time.sleep(1)
self.assertAlmostEqual(1, timer.time_elapsed, self.precision_places)
timer.stop()
self.assertAlmostEqual(1, timer.total_run_time, self.precision_places)
def test_timer_as_context_manager(self):
"""Test using a `Timer` as a context manager."""
with Timer() as timer:
self.assertAlmostEqual(0, timer.time_elapsed, self.precision_places)
time.sleep(1)
self.assertAlmostEqual(1, timer.time_elapsed, self.precision_places)
if __name__ == "__main__":
run_tests()
| 1,567 | Python | 29.745097 | 95 | 0.679004 |
fnuabhimanyu8713/orbit/source/extensions/omni.isaac.orbit/test/utils/test_assets.py | # Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
from __future__ import annotations
"""Launch Isaac Sim Simulator first."""
from omni.isaac.orbit.app import AppLauncher, run_tests
# launch omniverse app
app_launcher = AppLauncher(headless=True)
simulation_app = app_launcher.app
"""Rest everything follows."""
import unittest
import omni.isaac.orbit.utils.assets as assets_utils
class TestAssetsUtils(unittest.TestCase):
"""Test cases for the assets utility functions."""
def test_nucleus_connection(self):
"""Test checking the Nucleus connection."""
# check nucleus connection
self.assertIsNotNone(assets_utils.NUCLEUS_ASSET_ROOT_DIR)
def test_check_file_path_nucleus(self):
"""Test checking a file path on the Nucleus server."""
# robot file path
usd_path = f"{assets_utils.ISAAC_ORBIT_NUCLEUS_DIR}/Robots/FrankaEmika/panda_instanceable.usd"
# check file path
self.assertEqual(assets_utils.check_file_path(usd_path), 2)
def test_check_file_path_invalid(self):
"""Test checking an invalid file path."""
# robot file path
usd_path = f"{assets_utils.ISAAC_ORBIT_NUCLEUS_DIR}/Robots/FrankaEmika/panda_xyz.usd"
# check file path
self.assertEqual(assets_utils.check_file_path(usd_path), 0)
if __name__ == "__main__":
run_tests()
| 1,436 | Python | 28.937499 | 102 | 0.690111 |
fnuabhimanyu8713/orbit/source/extensions/omni.isaac.orbit/test/utils/test_configclass.py | # Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
# NOTE: While we don't actually use the simulation app in this test, we still need to launch it
# because warp is only available in the context of a running simulation
"""Launch Isaac Sim Simulator first."""
from omni.isaac.orbit.app import AppLauncher, run_tests
# launch omniverse app
app_launcher = AppLauncher(headless=True)
simulation_app = app_launcher.app
"""Rest everything follows."""
import copy
import os
import unittest
from collections.abc import Callable
from dataclasses import MISSING, asdict, field
from functools import wraps
from typing import ClassVar
from omni.isaac.orbit.utils.configclass import configclass
from omni.isaac.orbit.utils.dict import class_to_dict, update_class_from_dict
from omni.isaac.orbit.utils.io import dump_yaml, load_yaml
"""
Mock classes and functions.
"""
def dummy_function1() -> int:
"""Dummy function 1."""
return 1
def dummy_function2() -> int:
"""Dummy function 2."""
return 2
def dummy_wrapper(func):
"""Decorator for wrapping function."""
@wraps(func)
def wrapper():
return func() + 1
return wrapper
@dummy_wrapper
def wrapped_dummy_function3():
"""Dummy function 3."""
return 3
@dummy_wrapper
def wrapped_dummy_function4():
"""Dummy function 4."""
return 4
class DummyClass:
"""Dummy class."""
def __init__(self):
"""Initialize dummy class."""
self.a = 1
self.b = 2
"""
Dummy configuration: Basic
"""
def double(x):
"""Dummy function."""
return 2 * x
@configclass
class ViewerCfg:
eye: list = [7.5, 7.5, 7.5] # field missing on purpose
lookat: list = field(default_factory=lambda: [0.0, 0.0, 0.0])
@configclass
class EnvCfg:
num_envs: int = double(28) # uses function for assignment
episode_length: int = 2000
viewer: ViewerCfg = ViewerCfg()
@configclass
class RobotDefaultStateCfg:
pos = (0.0, 0.0, 0.0) # type annotation missing on purpose (immutable)
rot: tuple = (1.0, 0.0, 0.0, 0.0)
dof_pos: tuple = (0.0, 0.0, 0.0, 0.0, 0.0, 0.0)
dof_vel = [0.0, 0.0, 0.0, 0.0, 0.0, 1.0] # type annotation missing on purpose (mutable)
@configclass
class BasicDemoCfg:
"""Dummy configuration class."""
device_id: int = 0
env: EnvCfg = EnvCfg()
robot_default_state: RobotDefaultStateCfg = RobotDefaultStateCfg()
@configclass
class BasicDemoPostInitCfg:
"""Dummy configuration class."""
device_id: int = 0
env: EnvCfg = EnvCfg()
robot_default_state: RobotDefaultStateCfg = RobotDefaultStateCfg()
def __post_init__(self):
self.device_id = 1
self.add_variable = 3
"""
Dummy configuration to check type annotations ordering.
"""
@configclass
class TypeAnnotationOrderingDemoCfg:
"""Config class with type annotations."""
anymal: RobotDefaultStateCfg = RobotDefaultStateCfg()
unitree: RobotDefaultStateCfg = RobotDefaultStateCfg()
franka: RobotDefaultStateCfg = RobotDefaultStateCfg()
@configclass
class NonTypeAnnotationOrderingDemoCfg:
"""Config class without type annotations."""
anymal = RobotDefaultStateCfg()
unitree = RobotDefaultStateCfg()
franka = RobotDefaultStateCfg()
@configclass
class InheritedNonTypeAnnotationOrderingDemoCfg(NonTypeAnnotationOrderingDemoCfg):
"""Inherited config class without type annotations."""
pass
"""
Dummy configuration: Inheritance
"""
@configclass
class ParentDemoCfg:
"""Dummy parent configuration with missing fields."""
a: int = MISSING # add new missing field
b = 2 # type annotation missing on purpose
c: RobotDefaultStateCfg = MISSING # add new missing field
m: RobotDefaultStateCfg = RobotDefaultStateCfg() # Add class type with defaults
j: list[str] = MISSING # add new missing field
i: list[str] = MISSING # add new missing field
func: Callable = MISSING # add new missing field
@configclass
class ChildADemoCfg(ParentDemoCfg):
"""Dummy child configuration with missing fields."""
func = dummy_function1 # set default value for missing field
c = RobotDefaultStateCfg() # set default value for missing field
func_2: Callable = MISSING # add new missing field
d: int = MISSING # add new missing field
k: list[str] = ["c", "d"]
e: ViewerCfg = MISSING # add new missing field
dummy_class = DummyClass
def __post_init__(self):
self.b = 3 # change value of existing field
self.m.rot = (2.0, 0.0, 0.0, 0.0) # change value of default
self.i = ["a", "b"] # change value of existing field
@configclass
class ChildBDemoCfg(ParentDemoCfg):
"""Dummy child configuration to test inheritance across instances."""
a = 100 # set default value for missing field
j = ["3", "4"] # set default value for missing field
def __post_init__(self):
self.b = 8 # change value of existing field
self.i = ["1", "2"] # change value of existing field
@configclass
class ChildChildDemoCfg(ChildADemoCfg):
"""Dummy child configuration with missing fields."""
func_2 = dummy_function2
d = 2 # set default value for missing field
def __post_init__(self):
"""Post initialization function."""
super().__post_init__()
self.b = 4 # set default value for missing field
self.f = "new" # add new missing field
"""
Configuration with class inside.
"""
@configclass
class DummyClassCfg:
"""Dummy class configuration with class type."""
class_name_1: type = DummyClass
class_name_2: type[DummyClass] = DummyClass
class_name_3 = DummyClass
class_name_4: ClassVar[type[DummyClass]] = DummyClass
b: str = "dummy"
"""
Configuration with nested classes.
"""
@configclass
class OutsideClassCfg:
"""Outermost dummy configuration."""
@configclass
class InsideClassCfg:
"""Inner dummy configuration."""
@configclass
class InsideInsideClassCfg:
"""Dummy configuration with class type."""
u: list[int] = [1, 2, 3]
class_type: type = DummyClass
b: str = "dummy"
inside: InsideClassCfg = InsideClassCfg()
x: int = 20
def __post_init__(self):
self.inside.b = "dummy_changed"
"""
Dummy configuration: Functions
"""
@configclass
class FunctionsDemoCfg:
"""Dummy configuration class with functions as attributes."""
func = dummy_function1
wrapped_func = wrapped_dummy_function3
func_in_dict = {"func": dummy_function1}
@configclass
class FunctionImplementedDemoCfg:
"""Dummy configuration class with functions as attributes."""
func = dummy_function1
a: int = 5
k = 100.0
def set_a(self, a: int):
self.a = a
"""
Test solutions: Basic
"""
basic_demo_cfg_correct = {
"env": {"num_envs": 56, "episode_length": 2000, "viewer": {"eye": [7.5, 7.5, 7.5], "lookat": [0.0, 0.0, 0.0]}},
"robot_default_state": {
"pos": (0.0, 0.0, 0.0),
"rot": (1.0, 0.0, 0.0, 0.0),
"dof_pos": (0.0, 0.0, 0.0, 0.0, 0.0, 0.0),
"dof_vel": [0.0, 0.0, 0.0, 0.0, 0.0, 1.0],
},
"device_id": 0,
}
basic_demo_cfg_change_correct = {
"env": {"num_envs": 22, "episode_length": 2000, "viewer": {"eye": (2.0, 2.0, 2.0), "lookat": [0.0, 0.0, 0.0]}},
"robot_default_state": {
"pos": (0.0, 0.0, 0.0),
"rot": (1.0, 0.0, 0.0, 0.0),
"dof_pos": (0.0, 0.0, 0.0, 0.0, 0.0, 0.0),
"dof_vel": [0.0, 0.0, 0.0, 0.0, 0.0, 1.0],
},
"device_id": 0,
}
basic_demo_post_init_cfg_correct = {
"env": {"num_envs": 56, "episode_length": 2000, "viewer": {"eye": [7.5, 7.5, 7.5], "lookat": [0.0, 0.0, 0.0]}},
"robot_default_state": {
"pos": (0.0, 0.0, 0.0),
"rot": (1.0, 0.0, 0.0, 0.0),
"dof_pos": (0.0, 0.0, 0.0, 0.0, 0.0, 0.0),
"dof_vel": [0.0, 0.0, 0.0, 0.0, 0.0, 1.0],
},
"device_id": 1,
"add_variable": 3,
}
"""
Test solutions: Functions
"""
functions_demo_cfg_correct = {
"func": "__main__:dummy_function1",
"wrapped_func": "__main__:wrapped_dummy_function3",
"func_in_dict": {"func": "__main__:dummy_function1"},
}
functions_demo_cfg_for_updating = {
"func": "__main__:dummy_function2",
"wrapped_func": "__main__:wrapped_dummy_function4",
"func_in_dict": {"func": "__main__:dummy_function2"},
}
"""
Test fixtures.
"""
class TestConfigClass(unittest.TestCase):
"""Test cases for various situations with configclass decorator for configuration."""
def test_str(self):
"""Test printing the configuration."""
cfg = BasicDemoCfg()
print()
print(cfg)
def test_str_dict(self):
"""Test printing the configuration using dataclass utility."""
cfg = BasicDemoCfg()
print()
print("Using dataclass function: ", asdict(cfg))
print("Using internal function: ", cfg.to_dict())
self.assertDictEqual(asdict(cfg), cfg.to_dict())
def test_dict_conversion(self):
"""Test dictionary conversion of configclass instance."""
cfg = BasicDemoCfg()
# dataclass function
self.assertDictEqual(asdict(cfg), basic_demo_cfg_correct)
self.assertDictEqual(asdict(cfg.env), basic_demo_cfg_correct["env"])
# utility function
self.assertDictEqual(class_to_dict(cfg), basic_demo_cfg_correct)
self.assertDictEqual(class_to_dict(cfg.env), basic_demo_cfg_correct["env"])
# internal function
self.assertDictEqual(cfg.to_dict(), basic_demo_cfg_correct)
self.assertDictEqual(cfg.env.to_dict(), basic_demo_cfg_correct["env"])
def test_dict_conversion_order(self):
"""Tests that order is conserved when converting to dictionary."""
true_outer_order = ["device_id", "env", "robot_default_state"]
true_env_order = ["num_envs", "episode_length", "viewer"]
# create config
cfg = BasicDemoCfg()
# check ordering
for label, parsed_value in zip(true_outer_order, cfg.__dict__.keys()):
self.assertEqual(label, parsed_value)
for label, parsed_value in zip(true_env_order, cfg.env.__dict__.keys()):
self.assertEqual(label, parsed_value)
# convert config to dictionary
cfg_dict = class_to_dict(cfg)
# check ordering
for label, parsed_value in zip(true_outer_order, cfg_dict.keys()):
self.assertEqual(label, parsed_value)
for label, parsed_value in zip(true_env_order, cfg_dict["env"].keys()):
self.assertEqual(label, parsed_value)
# check ordering when copied
cfg_dict_copied = copy.deepcopy(cfg_dict)
cfg_dict_copied.pop("robot_default_state")
# check ordering
for label, parsed_value in zip(true_outer_order, cfg_dict_copied.keys()):
self.assertEqual(label, parsed_value)
for label, parsed_value in zip(true_env_order, cfg_dict_copied["env"].keys()):
self.assertEqual(label, parsed_value)
def test_config_update_via_constructor(self):
"""Test updating configclass through initialization."""
cfg = BasicDemoCfg(env=EnvCfg(num_envs=22, viewer=ViewerCfg(eye=(2.0, 2.0, 2.0))))
self.assertDictEqual(asdict(cfg), basic_demo_cfg_change_correct)
def test_config_update_after_init(self):
"""Test updating configclass using instance members."""
cfg = BasicDemoCfg()
cfg.env.num_envs = 22
cfg.env.viewer.eye = (2.0, 2.0, 2.0) # note: changes from list to tuple
self.assertDictEqual(asdict(cfg), basic_demo_cfg_change_correct)
def test_config_update_dict(self):
"""Test updating configclass using dictionary."""
cfg = BasicDemoCfg()
cfg_dict = {"env": {"num_envs": 22, "viewer": {"eye": (2.0, 2.0, 2.0)}}}
update_class_from_dict(cfg, cfg_dict)
self.assertDictEqual(asdict(cfg), basic_demo_cfg_change_correct)
def test_config_update_dict_using_internal(self):
"""Test updating configclass from a dictionary using configclass method."""
cfg = BasicDemoCfg()
cfg_dict = {"env": {"num_envs": 22, "viewer": {"eye": (2.0, 2.0, 2.0)}}}
cfg.from_dict(cfg_dict)
self.assertDictEqual(cfg.to_dict(), basic_demo_cfg_change_correct)
def test_config_update_dict_using_post_init(self):
cfg = BasicDemoPostInitCfg()
self.assertDictEqual(cfg.to_dict(), basic_demo_post_init_cfg_correct)
def test_invalid_update_key(self):
"""Test invalid key update."""
cfg = BasicDemoCfg()
cfg_dict = {"env": {"num_envs": 22, "viewer": {"pos": (2.0, 2.0, 2.0)}}}
with self.assertRaises(KeyError):
update_class_from_dict(cfg, cfg_dict)
def test_multiple_instances(self):
"""Test multiple instances with twice instantiation."""
# create two config instances
cfg1 = BasicDemoCfg()
cfg2 = BasicDemoCfg()
# check variables
# mutable -- variables should be different
self.assertNotEqual(id(cfg1.env.viewer.eye), id(cfg2.env.viewer.eye))
self.assertNotEqual(id(cfg1.env.viewer.lookat), id(cfg2.env.viewer.lookat))
self.assertNotEqual(id(cfg1.robot_default_state), id(cfg2.robot_default_state))
# immutable -- variables are the same
self.assertEqual(id(cfg1.robot_default_state.dof_pos), id(cfg2.robot_default_state.dof_pos))
self.assertEqual(id(cfg1.env.num_envs), id(cfg2.env.num_envs))
self.assertEqual(id(cfg1.device_id), id(cfg2.device_id))
# check values
self.assertDictEqual(cfg1.env.to_dict(), cfg2.env.to_dict())
self.assertDictEqual(cfg1.robot_default_state.to_dict(), cfg2.robot_default_state.to_dict())
def test_alter_values_multiple_instances(self):
"""Test alterations in multiple instances of the same configclass."""
# create two config instances
cfg1 = BasicDemoCfg()
cfg2 = BasicDemoCfg()
# alter configurations
cfg1.env.num_envs = 22 # immutable data: int
cfg1.env.viewer.eye[0] = 1.0 # mutable data: list
cfg1.env.viewer.lookat[2] = 12.0 # mutable data: list
# check variables
# values should be different
self.assertNotEqual(cfg1.env.num_envs, cfg2.env.num_envs)
self.assertNotEqual(cfg1.env.viewer.eye, cfg2.env.viewer.eye)
self.assertNotEqual(cfg1.env.viewer.lookat, cfg2.env.viewer.lookat)
# mutable -- variables are different ids
self.assertNotEqual(id(cfg1.env.viewer.eye), id(cfg2.env.viewer.eye))
self.assertNotEqual(id(cfg1.env.viewer.lookat), id(cfg2.env.viewer.lookat))
# immutable -- altered variables are different ids
self.assertNotEqual(id(cfg1.env.num_envs), id(cfg2.env.num_envs))
def test_multiple_instances_with_replace(self):
"""Test multiple instances with creation through replace function."""
# create two config instances
cfg1 = BasicDemoCfg()
cfg2 = cfg1.replace()
# check variable IDs
# mutable -- variables should be different
self.assertNotEqual(id(cfg1.env.viewer.eye), id(cfg2.env.viewer.eye))
self.assertNotEqual(id(cfg1.env.viewer.lookat), id(cfg2.env.viewer.lookat))
self.assertNotEqual(id(cfg1.robot_default_state), id(cfg2.robot_default_state))
# immutable -- variables are the same
self.assertEqual(id(cfg1.robot_default_state.dof_pos), id(cfg2.robot_default_state.dof_pos))
self.assertEqual(id(cfg1.env.num_envs), id(cfg2.env.num_envs))
self.assertEqual(id(cfg1.device_id), id(cfg2.device_id))
# check values
self.assertDictEqual(cfg1.to_dict(), cfg2.to_dict())
def test_alter_values_multiple_instances_wth_replace(self):
"""Test alterations in multiple instances through replace function."""
# create two config instances
cfg1 = BasicDemoCfg()
cfg2 = cfg1.replace(device_id=1)
# alter configurations
cfg1.env.num_envs = 22 # immutable data: int
cfg1.env.viewer.eye[0] = 1.0 # mutable data: list
cfg1.env.viewer.lookat[2] = 12.0 # mutable data: list
# check variables
# values should be different
self.assertNotEqual(cfg1.env.num_envs, cfg2.env.num_envs)
self.assertNotEqual(cfg1.env.viewer.eye, cfg2.env.viewer.eye)
self.assertNotEqual(cfg1.env.viewer.lookat, cfg2.env.viewer.lookat)
# mutable -- variables are different ids
self.assertNotEqual(id(cfg1.env.viewer.eye), id(cfg2.env.viewer.eye))
self.assertNotEqual(id(cfg1.env.viewer.lookat), id(cfg2.env.viewer.lookat))
# immutable -- altered variables are different ids
self.assertNotEqual(id(cfg1.env.num_envs), id(cfg2.env.num_envs))
self.assertNotEqual(id(cfg1.device_id), id(cfg2.device_id))
def test_configclass_type_ordering(self):
"""Checks ordering of config objects when no type annotation is provided."""
cfg_1 = TypeAnnotationOrderingDemoCfg()
cfg_2 = NonTypeAnnotationOrderingDemoCfg()
cfg_3 = InheritedNonTypeAnnotationOrderingDemoCfg()
# check ordering
self.assertEqual(list(cfg_1.__dict__.keys()), list(cfg_2.__dict__.keys()))
self.assertEqual(list(cfg_3.__dict__.keys()), list(cfg_2.__dict__.keys()))
self.assertEqual(list(cfg_1.__dict__.keys()), list(cfg_3.__dict__.keys()))
def test_functions_config(self):
"""Tests having functions as values in the configuration instance."""
cfg = FunctionsDemoCfg()
# check types
self.assertEqual(cfg.__annotations__["func"], type(dummy_function1))
self.assertEqual(cfg.__annotations__["wrapped_func"], type(wrapped_dummy_function3))
self.assertEqual(cfg.__annotations__["func_in_dict"], dict)
# check calling
self.assertEqual(cfg.func(), 1)
self.assertEqual(cfg.wrapped_func(), 4)
self.assertEqual(cfg.func_in_dict["func"](), 1)
def test_function_impl_config(self):
cfg = FunctionImplementedDemoCfg()
# change value
self.assertEqual(cfg.a, 5)
cfg.set_a(10)
self.assertEqual(cfg.a, 10)
def test_dict_conversion_functions_config(self):
"""Tests conversion of config with functions into dictionary."""
cfg = FunctionsDemoCfg()
cfg_dict = class_to_dict(cfg)
self.assertEqual(cfg_dict["func"], functions_demo_cfg_correct["func"])
self.assertEqual(cfg_dict["wrapped_func"], functions_demo_cfg_correct["wrapped_func"])
self.assertEqual(cfg_dict["func_in_dict"]["func"], functions_demo_cfg_correct["func_in_dict"]["func"])
def test_update_functions_config_with_functions(self):
"""Tests updating config with functions."""
cfg = FunctionsDemoCfg()
# update config
update_class_from_dict(cfg, functions_demo_cfg_for_updating)
# check calling
self.assertEqual(cfg.func(), 2)
self.assertEqual(cfg.wrapped_func(), 5)
self.assertEqual(cfg.func_in_dict["func"](), 2)
def test_missing_type_in_config(self):
"""Tests missing type annotation in config.
Should complain that 'c' is missing type annotation since it cannot be inferred
from 'MISSING' value.
"""
with self.assertRaises(TypeError):
@configclass
class MissingTypeDemoCfg:
a: int = 1
b = 2
c = MISSING
def test_missing_default_value_in_config(self):
"""Tests missing default value in config.
Should complain that 'a' is missing default value since it cannot be inferred
from type annotation.
"""
with self.assertRaises(ValueError):
@configclass
class MissingTypeDemoCfg:
a: int
b = 2
def test_required_argument_for_missing_type_in_config(self):
"""Tests required positional argument for missing type annotation in config creation."""
@configclass
class MissingTypeDemoCfg:
a: int = 1
b = 2
c: int = MISSING
# should complain that 'c' is missed in positional arguments
# TODO: Uncomment this when we move to 3.10.
# with self.assertRaises(TypeError):
# cfg = MissingTypeDemoCfg(a=1)
# should not complain
cfg = MissingTypeDemoCfg(a=1, c=3)
self.assertEqual(cfg.a, 1)
self.assertEqual(cfg.b, 2)
def test_config_inheritance(self):
"""Tests that inheritance works properly."""
# check variables
cfg_a = ChildADemoCfg(a=20, d=3, e=ViewerCfg(), j=["c", "d"])
self.assertEqual(cfg_a.func, dummy_function1)
self.assertEqual(cfg_a.a, 20)
self.assertEqual(cfg_a.d, 3)
self.assertEqual(cfg_a.j, ["c", "d"])
# check post init
self.assertEqual(cfg_a.b, 3)
self.assertEqual(cfg_a.i, ["a", "b"])
self.assertEqual(cfg_a.m.rot, (2.0, 0.0, 0.0, 0.0))
def test_config_inheritance_independence(self):
"""Tests that subclass instantions have fully unique members,
rather than references to members of the parent class"""
# instantiate two classes which inherit from a shared parent,
# but which will differently modify their members in their
# __init__ and __post_init__
cfg_a = ChildADemoCfg()
cfg_b = ChildBDemoCfg()
# Test various combinations of initialization
# and defaults across inherited members in
# instances to verify independence between the subclasses
self.assertIsInstance(cfg_a.a, type(MISSING))
self.assertEqual(cfg_b.a, 100)
self.assertEqual(cfg_a.b, 3)
self.assertEqual(cfg_b.b, 8)
self.assertEqual(cfg_a.c, RobotDefaultStateCfg())
self.assertIsInstance(cfg_b.c, type(MISSING))
self.assertEqual(cfg_a.m.rot, (2.0, 0.0, 0.0, 0.0))
self.assertEqual(cfg_b.m.rot, (1.0, 0.0, 0.0, 0.0))
self.assertIsInstance(cfg_a.j, type(MISSING))
self.assertEqual(cfg_b.j, ["3", "4"])
self.assertEqual(cfg_a.i, ["a", "b"])
self.assertEqual(cfg_b.i, ["1", "2"])
self.assertEqual(cfg_a.func, dummy_function1)
self.assertIsInstance(cfg_b.func, type(MISSING))
# Explicitly assert that members are not the same object
# for different levels and kinds of data types
self.assertIsNot(cfg_a.m, cfg_b.m)
self.assertIsNot(cfg_a.m.rot, cfg_b.m.rot)
self.assertIsNot(cfg_a.i, cfg_b.i)
self.assertIsNot(cfg_a.b, cfg_b.b)
def test_config_double_inheritance(self):
"""Tests that inheritance works properly when inheriting twice."""
# check variables
cfg = ChildChildDemoCfg(a=20, d=3, e=ViewerCfg(), j=["c", "d"])
self.assertEqual(cfg.func, dummy_function1)
self.assertEqual(cfg.func_2, dummy_function2)
self.assertEqual(cfg.a, 20)
self.assertEqual(cfg.d, 3)
self.assertEqual(cfg.j, ["c", "d"])
# check post init
self.assertEqual(cfg.b, 4)
self.assertEqual(cfg.f, "new")
self.assertEqual(cfg.i, ["a", "b"])
def test_config_with_class_type(self):
"""Tests that configclass works properly with class type."""
cfg = DummyClassCfg()
# since python 3.10, annotations are stored as strings
annotations = {k: eval(v) if isinstance(v, str) else v for k, v in cfg.__annotations__.items()}
# check types
self.assertEqual(annotations["class_name_1"], type)
self.assertEqual(annotations["class_name_2"], type[DummyClass])
self.assertEqual(annotations["class_name_3"], type[DummyClass])
self.assertEqual(annotations["class_name_4"], ClassVar[type[DummyClass]])
# check values
self.assertEqual(cfg.class_name_1, DummyClass)
self.assertEqual(cfg.class_name_2, DummyClass)
self.assertEqual(cfg.class_name_3, DummyClass)
self.assertEqual(cfg.class_name_4, DummyClass)
self.assertEqual(cfg.b, "dummy")
def test_nested_config_class_declarations(self):
"""Tests that configclass works properly with nested class class declarations."""
cfg = OutsideClassCfg()
# check types
self.assertNotIn("InsideClassCfg", cfg.__annotations__)
self.assertNotIn("InsideClassCfg", OutsideClassCfg.__annotations__)
self.assertNotIn("InsideInsideClassCfg", OutsideClassCfg.InsideClassCfg.__annotations__)
self.assertNotIn("InsideInsideClassCfg", cfg.inside.__annotations__)
# check values
self.assertEqual(cfg.inside.class_type, DummyClass)
self.assertEqual(cfg.inside.b, "dummy_changed")
self.assertEqual(cfg.x, 20)
def test_config_dumping(self):
"""Check that config dumping works properly."""
# file for dumping
dirname = os.path.dirname(os.path.abspath(__file__))
filename = os.path.join(dirname, "output", "configclass", "test_config.yaml")
# create config
cfg = ChildADemoCfg(a=20, d=3, e=ViewerCfg(), j=["c", "d"])
# save config
dump_yaml(filename, cfg)
# load config
cfg_loaded = load_yaml(filename)
# check dictionaries are the same
self.assertEqual(list(cfg.to_dict().keys()), list(cfg_loaded.keys()))
self.assertDictEqual(cfg.to_dict(), cfg_loaded)
# save config with sorted order won't work!
# save config
dump_yaml(filename, cfg, sort_keys=True)
# load config
cfg_loaded = load_yaml(filename)
# check dictionaries are the same
self.assertNotEqual(list(cfg.to_dict().keys()), list(cfg_loaded.keys()))
self.assertDictEqual(cfg.to_dict(), cfg_loaded)
if __name__ == "__main__":
run_tests()
| 26,042 | Python | 33.448413 | 115 | 0.63244 |
fnuabhimanyu8713/orbit/source/extensions/omni.isaac.orbit/test/assets/check_fixed_base_assets.py | # Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""
This script demonstrates fixed-base API for different robots.
.. code-block:: bash
# Usage
./orbit.sh -p source/extensions/omni.isaac.orbit/test/assets/check_fixed_base_assets.py
"""
"""Launch Isaac Sim Simulator first."""
import argparse
from omni.isaac.orbit.app import AppLauncher
# add argparse arguments
parser = argparse.ArgumentParser(description="This script checks the fixed-base API for different robots.")
# append AppLauncher cli args
AppLauncher.add_app_launcher_args(parser)
# parse the arguments
args_cli = parser.parse_args()
# launch omniverse app
app_launcher = AppLauncher(args_cli)
simulation_app = app_launcher.app
"""Rest everything follows."""
import numpy as np
import torch
import omni.isaac.core.utils.prims as prim_utils
import omni.isaac.orbit.sim as sim_utils
from omni.isaac.orbit.assets import Articulation
##
# Pre-defined configs
##
from omni.isaac.orbit_assets import ANYMAL_C_CFG, FRANKA_PANDA_CFG # isort:skip
def define_origins(num_origins: int, spacing: float) -> list[list[float]]:
"""Defines the origins of the the scene."""
# create tensor based on number of environments
env_origins = torch.zeros(num_origins, 3)
# create a grid of origins
num_cols = np.floor(np.sqrt(num_origins))
num_rows = np.ceil(num_origins / num_cols)
xx, yy = torch.meshgrid(torch.arange(num_rows), torch.arange(num_cols), indexing="xy")
env_origins[:, 0] = spacing * xx.flatten()[:num_origins] - spacing * (num_rows - 1) / 2
env_origins[:, 1] = spacing * yy.flatten()[:num_origins] - spacing * (num_cols - 1) / 2
env_origins[:, 2] = 0.0
# return the origins
return env_origins.tolist()
def design_scene() -> tuple[dict, list[list[float]]]:
"""Designs the scene."""
# Ground-plane
cfg = sim_utils.GroundPlaneCfg()
cfg.func("/World/defaultGroundPlane", cfg)
# Lights
cfg = sim_utils.DomeLightCfg(intensity=2000.0, color=(0.75, 0.75, 0.75))
cfg.func("/World/Light", cfg)
# Create separate groups called "Origin1", "Origin2", "Origin3"
# Each group will have a mount and a robot on top of it
origins = define_origins(num_origins=4, spacing=2.0)
# Origin 1 with Franka Panda
prim_utils.create_prim("/World/Origin1", "Xform", translation=origins[0])
# -- Robot
franka = Articulation(FRANKA_PANDA_CFG.replace(prim_path="/World/Origin1/Robot"))
# Origin 2 with Anymal C
prim_utils.create_prim("/World/Origin2", "Xform", translation=origins[1])
# -- Robot
robot_cfg = ANYMAL_C_CFG.replace(prim_path="/World/Origin2/Robot")
robot_cfg.spawn.articulation_props.fix_root_link = True
anymal_c = Articulation(robot_cfg)
# return the scene information
scene_entities = {
"franka": franka,
"anymal_c": anymal_c,
}
return scene_entities, origins
def run_simulator(sim: sim_utils.SimulationContext, entities: dict[str, Articulation], origins: torch.Tensor):
"""Runs the simulation loop."""
# Define simulation stepping
sim_dt = sim.get_physics_dt()
sim_time = 0.0
count = 0
# Simulate physics
while simulation_app.is_running():
# reset
if count % 200 == 0:
# reset counters
sim_time = 0.0
count = 0
# reset robots
for index, robot in enumerate(entities.values()):
# root state
root_state = robot.data.default_root_state.clone()
root_state[:, :3] += origins[index]
root_state[:, :2] += torch.randn_like(root_state[:, :2]) * 0.25
robot.write_root_state_to_sim(root_state)
# joint state
joint_pos, joint_vel = robot.data.default_joint_pos.clone(), robot.data.default_joint_vel.clone()
robot.write_joint_state_to_sim(joint_pos, joint_vel)
# reset the internal state
robot.reset()
print("[INFO]: Resetting robots state...")
# apply default actions to the quadrupedal robots
for name, robot in entities.items():
if count % 200 == 0:
print("Name: ", name, "is_fixed_base: ", robot.is_fixed_base)
# generate random joint positions
joint_pos_target = robot.data.default_joint_pos + torch.randn_like(robot.data.joint_pos) * 0.1
# apply action to the robot
robot.set_joint_position_target(joint_pos_target)
# write data to sim
robot.write_data_to_sim()
# perform step
sim.step()
# update sim-time
sim_time += sim_dt
count += 1
# update buffers
for robot in entities.values():
robot.update(sim_dt)
def main():
"""Main function."""
# Initialize the simulation context
sim = sim_utils.SimulationContext(sim_utils.SimulationCfg(dt=0.01, substeps=1))
# Set main camera
sim.set_camera_view(eye=[2.5, 2.5, 2.5], target=[0.0, 0.0, 0.0])
# design scene
scene_entities, scene_origins = design_scene()
scene_origins = torch.tensor(scene_origins, device=sim.device)
# Play the simulator
sim.reset()
# Now we are ready!
print("[INFO]: Setup complete...")
# Run the simulator
run_simulator(sim, scene_entities, scene_origins)
if __name__ == "__main__":
# run the main function
main()
# close sim app
simulation_app.close()
| 5,552 | Python | 32.451807 | 113 | 0.634906 |
fnuabhimanyu8713/orbit/source/extensions/omni.isaac.orbit/test/assets/check_ridgeback_franka.py | # Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""
This script demonstrates how to simulate a mobile manipulator.
.. code-block:: bash
# Usage
./orbit.sh -p source/extensions/omni.isaac.orbit/test/assets/check_ridgeback_franka.py
"""
"""Launch Isaac Sim Simulator first."""
import argparse
from omni.isaac.orbit.app import AppLauncher
# add argparse arguments
parser = argparse.ArgumentParser(
description="This script demonstrates how to simulate a mobile manipulator with dummy joints."
)
# append AppLauncher cli args
AppLauncher.add_app_launcher_args(parser)
# parse the arguments
args_cli = parser.parse_args()
# launch omniverse app
app_launcher = AppLauncher(args_cli)
simulation_app = app_launcher.app
"""Rest everything follows."""
import torch
import omni.isaac.orbit.sim as sim_utils
from omni.isaac.orbit.assets import Articulation
##
# Pre-defined configs
##
from omni.isaac.orbit_assets.ridgeback_franka import RIDGEBACK_FRANKA_PANDA_CFG # isort:skip
def design_scene():
"""Designs the scene."""
# Ground-plane
cfg = sim_utils.GroundPlaneCfg()
cfg.func("/World/defaultGroundPlane", cfg)
# Lights
cfg = sim_utils.DistantLightCfg(intensity=3000.0, color=(0.75, 0.75, 0.75))
cfg.func("/World/Light", cfg)
# add robots and return them
return add_robots()
def add_robots() -> Articulation:
"""Adds robots to the scene."""
robot_cfg = RIDGEBACK_FRANKA_PANDA_CFG
# -- Spawn robot
robot_cfg.spawn.func("/World/Robot_1", robot_cfg.spawn, translation=(0.0, -1.0, 0.0))
robot_cfg.spawn.func("/World/Robot_2", robot_cfg.spawn, translation=(0.0, 1.0, 0.0))
# -- Create interface
robot = Articulation(cfg=robot_cfg.replace(prim_path="/World/Robot.*"))
return robot
def run_simulator(sim: sim_utils.SimulationContext, robot: Articulation):
"""Runs the simulator by applying actions to the robot at every time-step"""
# dummy action
actions = robot.data.default_joint_pos.clone()
# Define simulation stepping
sim_dt = sim.get_physics_dt()
# episode counter
sim_time = 0.0
ep_step_count = 0
# Simulate physics
while simulation_app.is_running():
# reset
if ep_step_count % 1000 == 0:
# reset counters
sim_time = 0.0
ep_step_count = 0
# reset dof state
joint_pos, joint_vel = robot.data.default_joint_pos.clone(), robot.data.default_joint_vel.clone()
robot.write_joint_state_to_sim(joint_pos, joint_vel)
# reset internals
robot.reset()
# reset command
actions = torch.rand_like(robot.data.default_joint_pos) + robot.data.default_joint_pos
# -- base
actions[:, 0:3] = 0.0
# -- gripper
actions[:, -2:] = 0.04
print("[INFO]: Resetting robots state...")
# change the gripper action
if ep_step_count % 200 == 0:
# flip command for the gripper
actions[:, -2:] = 0.0 if actions[0, -2] > 0.0 else 0.04
# change the base action
# -- forward and backward (x-axis)
if ep_step_count == 200:
actions[:, :3] = 0.0
actions[:, 0] = 1.0
if ep_step_count == 300:
actions[:, :3] = 0.0
actions[:, 0] = -1.0
# -- right and left (y-axis)
if ep_step_count == 400:
actions[:, :3] = 0.0
actions[:, 1] = 1.0
if ep_step_count == 500:
actions[:, :3] = 0.0
actions[:, 1] = -1.0
# -- turn right and left (z-axis)
if ep_step_count == 600:
actions[:, :3] = 0.0
actions[:, 2] = 1.0
if ep_step_count == 700:
actions[:, :3] = 0.0
actions[:, 2] = -1.0
if ep_step_count == 900:
actions[:, :3] = 0.0
actions[:, 2] = 1.0
# change the arm action
if ep_step_count % 100:
actions[:, 3:10] = torch.rand(robot.num_instances, 7, device=robot.device)
actions[:, 3:10] += robot.data.default_joint_pos[:, 3:10]
# apply action
robot.set_joint_velocity_target(actions[:, :3], joint_ids=[0, 1, 2])
robot.set_joint_position_target(actions[:, 3:], joint_ids=[3, 4, 5, 6, 7, 8, 9, 10, 11])
robot.write_data_to_sim()
# perform step
sim.step()
# update sim-time
sim_time += sim_dt
ep_step_count += 1
# update buffers
robot.update(sim_dt)
def main():
"""Main function."""
# Initialize the simulation context
sim = sim_utils.SimulationContext(sim_utils.SimulationCfg())
# Set main camera
sim.set_camera_view([1.5, 1.5, 1.5], [0.0, 0.0, 0.0])
# design scene
robot = design_scene()
# Play the simulator
sim.reset()
# Now we are ready!
print("[INFO]: Setup complete...")
# Run the simulator
run_simulator(sim, robot)
if __name__ == "__main__":
# run the main function
main()
# close sim app
simulation_app.close()
| 5,157 | Python | 29.702381 | 109 | 0.587357 |
fnuabhimanyu8713/orbit/source/extensions/omni.isaac.orbit/test/assets/test_rigid_object.py | # Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
# ignore private usage of variables warning
# pyright: reportPrivateUsage=none
"""Launch Isaac Sim Simulator first."""
from omni.isaac.orbit.app import AppLauncher, run_tests
# Can set this to False to see the GUI for debugging
# This will also add lights to the scene
HEADLESS = True
# launch omniverse app
app_launcher = AppLauncher(headless=HEADLESS)
simulation_app = app_launcher.app
"""Rest everything follows."""
import ctypes
import torch
import unittest
import omni.isaac.core.utils.prims as prim_utils
import omni.isaac.orbit.sim as sim_utils
from omni.isaac.orbit.assets import RigidObject, RigidObjectCfg
from omni.isaac.orbit.sim import build_simulation_context
from omni.isaac.orbit.sim.spawners import materials
from omni.isaac.orbit.utils.assets import ISAAC_NUCLEUS_DIR
from omni.isaac.orbit.utils.math import default_orientation, random_orientation
def generate_cubes_scene(
num_cubes: int = 1, height=1.0, has_api: bool = True, kinematic_enabled: bool = False, device: str = "cuda:0"
) -> tuple[RigidObject, torch.Tensor]:
"""Generate a scene with the provided number of cubes.
Args:
num_cubes: Number of cubes to generate.
height: Height of the cubes.
has_api: Whether the cubes have a rigid body API on them.
kinematic_enabled: Whether the cubes are kinematic.
device: Device to use for the simulation.
Returns:
RigidObject: The rigid object representing the cubes.
origins: The origins of the cubes.
"""
origins = torch.tensor([(i * 1.0, 0, height) for i in range(num_cubes)]).to(device)
# Create Top-level Xforms, one for each cube
for i, origin in enumerate(origins):
prim_utils.create_prim(f"/World/Table_{i}", "Xform", translation=origin)
# Resolve spawn configuration
if has_api:
spawn_cfg = sim_utils.UsdFileCfg(
usd_path=f"{ISAAC_NUCLEUS_DIR}/Props/Blocks/DexCube/dex_cube_instanceable.usd",
rigid_props=sim_utils.RigidBodyPropertiesCfg(kinematic_enabled=kinematic_enabled),
)
else:
# since no rigid body properties defined, this is just a static collider
spawn_cfg = sim_utils.CuboidCfg(
size=(0.1, 0.1, 0.1),
collision_props=sim_utils.CollisionPropertiesCfg(),
)
# Create rigid object
cube_object_cfg = RigidObjectCfg(
prim_path="/World/Table_.*/Object",
spawn=spawn_cfg,
init_state=RigidObjectCfg.InitialStateCfg(pos=(0.0, 0.0, height)),
)
cube_object = RigidObject(cfg=cube_object_cfg)
return cube_object, origins
class TestRigidObject(unittest.TestCase):
"""Test for rigid object class."""
"""
Tests
"""
def test_initialization(self):
"""Test initialization for prim with rigid body API at the provided prim path."""
for num_cubes in (1, 2):
for device in ("cuda:0", "cpu"):
with self.subTest(num_cubes=num_cubes, device=device):
with build_simulation_context(device=device, auto_add_lighting=True) as sim:
cube_object, _ = generate_cubes_scene(num_cubes=num_cubes, device=device)
# Check that boundedness of rigid object is correct
self.assertEqual(ctypes.c_long.from_address(id(cube_object)).value, 1)
# Play sim
sim.reset()
# Check if object is initialized
self.assertTrue(cube_object._is_initialized)
self.assertEqual(len(cube_object.body_names), 1)
# Check buffers that exists and have correct shapes
self.assertEqual(cube_object.data.root_pos_w.shape, (num_cubes, 3))
self.assertEqual(cube_object.data.root_quat_w.shape, (num_cubes, 4))
# Simulate physics
for _ in range(2):
# perform rendering
sim.step()
# update object
cube_object.update(sim.cfg.dt)
def test_initialization_with_kinematic_enabled(self):
"""Test that initialization for prim with kinematic flag enabled."""
for num_cubes in (1, 2):
for device in ("cuda:0", "cpu"):
with self.subTest(num_cubes=num_cubes, device=device):
with build_simulation_context(device=device, auto_add_lighting=True) as sim:
cube_object, origins = generate_cubes_scene(
num_cubes=num_cubes, kinematic_enabled=True, device=device
)
# Check that boundedness of rigid object is correct
self.assertEqual(ctypes.c_long.from_address(id(cube_object)).value, 1)
# Play sim
sim.reset()
# Check if object is initialized
self.assertTrue(cube_object._is_initialized)
self.assertEqual(len(cube_object.body_names), 1)
# Check buffers that exists and have correct shapes
self.assertEqual(cube_object.data.root_pos_w.shape, (num_cubes, 3))
self.assertEqual(cube_object.data.root_quat_w.shape, (num_cubes, 4))
# Simulate physics
for _ in range(2):
# perform rendering
sim.step()
# update object
cube_object.update(sim.cfg.dt)
# check that the object is kinematic
default_root_state = cube_object.data.default_root_state.clone()
default_root_state[:, :3] += origins
torch.testing.assert_allclose(cube_object.data.root_state_w, default_root_state)
def test_initialization_with_no_rigid_body(self):
"""Test that initialization fails when no rigid body is found at the provided prim path."""
for num_cubes in (1, 2):
for device in ("cuda:0", "cpu"):
with self.subTest(num_cubes=num_cubes, device=device):
with build_simulation_context(device=device, auto_add_lighting=True) as sim:
cube_object, _ = generate_cubes_scene(num_cubes=num_cubes, has_api=False, device=device)
# Check that boundedness of rigid object is correct
self.assertEqual(ctypes.c_long.from_address(id(cube_object)).value, 1)
# Play sim
sim.reset()
# Check if object is initialized
self.assertFalse(cube_object._is_initialized)
def test_external_force_on_single_body(self):
"""Test application of external force on the base of the object.
In this test, we apply a force equal to the weight of an object on the base of
one of the objects. We check that the object does not move. For the other object,
we do not apply any force and check that it falls down.
"""
for num_cubes in (2, 4):
for device in ("cuda:0", "cpu"):
with self.subTest(num_cubes=num_cubes, device=device):
with build_simulation_context(device=device, add_ground_plane=True, auto_add_lighting=True) as sim:
cube_object, origins = generate_cubes_scene(num_cubes=num_cubes, device=device)
# Play the simulator
sim.reset()
# Find bodies to apply the force
body_ids, body_names = cube_object.find_bodies(".*")
# Sample a force equal to the weight of the object
external_wrench_b = torch.zeros(cube_object.num_instances, len(body_ids), 6, device=sim.device)
# Every 2nd cube should have a force applied to it
external_wrench_b[0::2, :, 2] = 9.81 * cube_object.root_physx_view.get_masses()[0]
# Now we are ready!
for _ in range(5):
# reset root state
root_state = cube_object.data.default_root_state.clone()
# need to shift the position of the cubes otherwise they will be on top of each other
root_state[:, :3] = origins
cube_object.write_root_state_to_sim(root_state)
# reset object
cube_object.reset()
# apply force
cube_object.set_external_force_and_torque(
external_wrench_b[..., :3], external_wrench_b[..., 3:], body_ids=body_ids
)
# perform simulation
for _ in range(5):
# apply action to the object
cube_object.write_data_to_sim()
# perform step
sim.step()
# update buffers
cube_object.update(sim.cfg.dt)
# First object should still be at the same Z position (1.0)
torch.testing.assert_close(
cube_object.data.root_pos_w[0::2, 2], torch.ones(num_cubes // 2, device=sim.device)
)
# Second object should have fallen, so it's Z height should be less than initial height of 1.0
self.assertTrue(torch.all(cube_object.data.root_pos_w[1::2, 2] < 1.0))
def test_set_rigid_object_state(self):
"""Test setting the state of the rigid object.
In this test, we set the state of the rigid object to a random state and check
that the object is in that state after simulation. We set gravity to zero as
we don't want any external forces acting on the object to ensure state remains static.
"""
for num_cubes in (1, 2):
for device in ("cuda:0", "cpu"):
with self.subTest(num_cubes=num_cubes, device=device):
# Turn off gravity for this test as we don't want any external forces acting on the object
# to ensure state remains static
with build_simulation_context(device=device, gravity_enabled=False, auto_add_lighting=True) as sim:
cube_object, _ = generate_cubes_scene(num_cubes=num_cubes, device=device)
# Play the simulator
sim.reset()
state_types = ["root_pos_w", "root_quat_w", "root_lin_vel_w", "root_ang_vel_w"]
# Set each state type individually as they are dependent on each other
for state_type_to_randomize in state_types:
state_dict = {
"root_pos_w": torch.zeros_like(cube_object.data.root_pos_w, device=sim.device),
"root_quat_w": default_orientation(num=num_cubes, device=sim.device),
"root_lin_vel_w": torch.zeros_like(cube_object.data.root_lin_vel_w, device=sim.device),
"root_ang_vel_w": torch.zeros_like(cube_object.data.root_ang_vel_w, device=sim.device),
}
# Now we are ready!
for _ in range(5):
# reset object
cube_object.reset()
# Set random state
if state_type_to_randomize == "root_quat_w":
state_dict[state_type_to_randomize] = random_orientation(
num=num_cubes, device=sim.device
)
else:
state_dict[state_type_to_randomize] = torch.randn(num_cubes, 3, device=sim.device)
# perform simulation
for _ in range(5):
root_state = torch.cat(
[
state_dict["root_pos_w"],
state_dict["root_quat_w"],
state_dict["root_lin_vel_w"],
state_dict["root_ang_vel_w"],
],
dim=-1,
)
# reset root state
cube_object.write_root_state_to_sim(root_state=root_state)
sim.step()
# assert that set root quantities are equal to the ones set in the state_dict
for key, expected_value in state_dict.items():
value = getattr(cube_object.data, key)
torch.testing.assert_close(value, expected_value, rtol=1e-5, atol=1e-5)
cube_object.update(sim.cfg.dt)
def test_reset_rigid_object(self):
"""Test resetting the state of the rigid object."""
for num_cubes in (1, 2):
for device in ("cuda:0", "cpu"):
with self.subTest(num_cubes=num_cubes, device=device):
with build_simulation_context(device=device, gravity_enabled=True, auto_add_lighting=True) as sim:
cube_object, _ = generate_cubes_scene(num_cubes=num_cubes, device=device)
# Play the simulator
sim.reset()
for i in range(5):
# perform rendering
sim.step()
# update object
cube_object.update(sim.cfg.dt)
# Move the object to a random position
root_state = cube_object.data.default_root_state.clone()
root_state[:, :3] = torch.randn(num_cubes, 3, device=sim.device)
# Random orientation
root_state[:, 3:7] = random_orientation(num=num_cubes, device=sim.device)
cube_object.write_root_state_to_sim(root_state)
if i % 2 == 0:
# reset object
cube_object.reset()
# Reset should zero external forces and torques and set last body velocity to zero
self.assertFalse(cube_object.has_external_wrench)
self.assertEqual(torch.count_nonzero(cube_object._external_force_b), 0)
self.assertEqual(torch.count_nonzero(cube_object._external_torque_b), 0)
self.assertEqual(torch.count_nonzero(cube_object._last_body_vel_w), 0)
def test_rigid_body_set_material_properties(self):
"""Test getting and setting material properties of rigid object."""
for num_cubes in (1, 2):
for device in ("cuda:0", "cpu"):
with self.subTest(num_cubes=num_cubes, device=device):
with build_simulation_context(
device=device, gravity_enabled=True, add_ground_plane=True, auto_add_lighting=True
) as sim:
# Create rigid object(s)
cube_object, _ = generate_cubes_scene(num_cubes=num_cubes, device=device)
# Play sim
sim.reset()
# Set material properties
static_friction = torch.FloatTensor(num_cubes, 1).uniform_(0.4, 0.8)
dynamic_friction = torch.FloatTensor(num_cubes, 1).uniform_(0.4, 0.8)
restitution = torch.FloatTensor(num_cubes, 1).uniform_(0.0, 0.2)
materials = torch.cat([static_friction, dynamic_friction, restitution], dim=-1)
indices = torch.tensor(range(num_cubes), dtype=torch.int)
# Add friction to cube
cube_object.root_physx_view.set_material_properties(materials, indices)
# Simulate physics
# perform rendering
sim.step()
# update object
cube_object.update(sim.cfg.dt)
# Get material properties
materials_to_check = cube_object.root_physx_view.get_material_properties()
# Check if material properties are set correctly
torch.testing.assert_close(materials_to_check.reshape(num_cubes, 3), materials)
def test_rigid_body_no_friction(self):
"""Test that a rigid object with no friction will maintain it's velocity when sliding across a plane."""
for num_cubes in (1, 2):
for device in ("cuda:0", "cpu"):
with self.subTest(num_cubes=num_cubes, device=device):
with build_simulation_context(device=device, auto_add_lighting=True) as sim:
cube_object, _ = generate_cubes_scene(num_cubes=num_cubes, height=0.0, device=device)
# Create ground plane with no friction
cfg = sim_utils.GroundPlaneCfg(
physics_material=materials.RigidBodyMaterialCfg(
static_friction=0.0,
dynamic_friction=0.0,
restitution=0.0,
)
)
cfg.func("/World/GroundPlane", cfg)
# Play sim
sim.reset()
# Set material friction properties to be all zero
static_friction = torch.zeros(num_cubes, 1)
dynamic_friction = torch.zeros(num_cubes, 1)
restitution = torch.FloatTensor(num_cubes, 1).uniform_(0.0, 0.2)
cube_object_materials = torch.cat([static_friction, dynamic_friction, restitution], dim=-1)
indices = torch.tensor(range(num_cubes), dtype=torch.int)
cube_object.root_physx_view.set_material_properties(cube_object_materials, indices)
# Set initial velocity
# Initial velocity in X to get the block moving
initial_velocity = torch.zeros((num_cubes, 6), device=sim.cfg.device)
initial_velocity[:, 0] = 0.1
cube_object.write_root_velocity_to_sim(initial_velocity)
# Simulate physics
for _ in range(5):
# perform rendering
sim.step()
# update object
cube_object.update(sim.cfg.dt)
# Non-deterministic when on GPU, so we use different tolerances
if device == "cuda:0":
tolerance = 1e-2
else:
tolerance = 1e-5
torch.testing.assert_close(
cube_object.data.root_lin_vel_w, initial_velocity[:, :3], rtol=1e-5, atol=tolerance
)
def test_rigid_body_with_static_friction(self):
"""Test that static friction applied to rigid object works as expected.
This test works by applying a force to the object and checking if the object moves or not based on the
mu (coefficient of static friction) value set for the object. We set the static friction to be non-zero and
apply a force to the object. When the force applied is below mu, the object should not move. When the force
applied is above mu, the object should move.
"""
for num_cubes in (1, 2):
for device in ("cuda:0", "cpu"):
with self.subTest(num_cubes=num_cubes, device=device):
with build_simulation_context(device=device, add_ground_plane=True, auto_add_lighting=True) as sim:
cube_object, _ = generate_cubes_scene(num_cubes=num_cubes, height=0.03125, device=device)
# Create ground plane with no friction
cfg = sim_utils.GroundPlaneCfg(
physics_material=materials.RigidBodyMaterialCfg(
static_friction=0.0,
dynamic_friction=0.0,
)
)
cfg.func("/World/GroundPlane", cfg)
# Play sim
sim.reset()
# Set static friction to be non-zero
static_friction_coefficient = 0.5
static_friction = torch.Tensor([[static_friction_coefficient]] * num_cubes)
dynamic_friction = torch.zeros(num_cubes, 1)
restitution = torch.FloatTensor(num_cubes, 1).uniform_(0.0, 0.2)
cube_object_materials = torch.cat([static_friction, dynamic_friction, restitution], dim=-1)
indices = torch.tensor(range(num_cubes), dtype=torch.int)
# Add friction to cube
cube_object.root_physx_view.set_material_properties(cube_object_materials, indices)
# 2 cases: force applied is below and above mu
# below mu: block should not move as the force applied is <= mu
# above mu: block should move as the force applied is > mu
for force in "below_mu", "above_mu":
with self.subTest(force=force):
external_wrench_b = torch.zeros((num_cubes, 1, 6), device=sim.device)
if force == "below_mu":
external_wrench_b[:, 0, 0] = static_friction_coefficient * 0.999
else:
external_wrench_b[:, 0, 0] = static_friction_coefficient * 1.001
cube_object.set_external_force_and_torque(
external_wrench_b[..., :3],
external_wrench_b[..., 3:],
)
# Get root state
initial_root_state = cube_object.data.root_state_w
# Simulate physics
for _ in range(10):
# perform rendering
sim.step()
# update object
cube_object.update(sim.cfg.dt)
if force == "below_mu":
# Assert that the block has not moved
torch.testing.assert_close(
cube_object.data.root_state_w, initial_root_state, rtol=1e-5, atol=1e-5
)
else:
torch.testing.assert_close(
cube_object.data.root_state_w, initial_root_state, rtol=1e-5, atol=1e-5
)
def test_rigid_body_with_restitution(self):
"""Test that restitution when applied to rigid object works as expected.
This test works by dropping a block from a height and checking if the block bounces or not based on the
restitution value set for the object. We set the restitution to be non-zero and drop the block from a height.
When the restitution is 0, the block should not bounce. When the restitution is 1, the block should bounce
with the same energy. When the restitution is between 0 and 1, the block should bounce with less energy.
"""
for num_cubes in (1, 2):
for device in ("cuda:0", "cpu"):
with self.subTest(num_cubes=num_cubes, device=device):
with build_simulation_context(device=device, add_ground_plane=True, auto_add_lighting=True) as sim:
cube_object, _ = generate_cubes_scene(num_cubes=num_cubes, height=1.0, device=device)
# Create ground plane such that has a restitution of 1.0 (perfectly elastic collision)
cfg = sim_utils.GroundPlaneCfg(
physics_material=materials.RigidBodyMaterialCfg(
restitution=1.0,
)
)
cfg.func("/World/GroundPlane", cfg)
indices = torch.tensor(range(num_cubes), dtype=torch.int)
# Play sim
sim.reset()
# 3 cases: inelastic, partially elastic, elastic
# inelastic: resitution = 0, block should not bounce
# partially elastic: 0 <= restitution <= 1, block should bounce with less energy
# elastic: restitution = 1, block should bounce with same energy
for expected_collision_type in "inelastic", "partially_elastic", "elastic":
root_state = torch.zeros(1, 13, device=sim.device)
root_state[0, 3] = 1.0 # To make orientation a quaternion
root_state[0, 2] = 0.1 # Set an initial drop height
root_state[0, 9] = -1.0 # Set an initial downward velocity
cube_object.write_root_state_to_sim(root_state=root_state)
prev_z_velocity = 0.0
curr_z_velocity = 0.0
with self.subTest(expected_collision_type=expected_collision_type):
# cube_object.reset()
# Set static friction to be non-zero
if expected_collision_type == "inelastic":
restitution_coefficient = 0.0
elif expected_collision_type == "partially_elastic":
restitution_coefficient = 0.5
else:
restitution_coefficient = 1.0
restitution = 0.5
static_friction = torch.zeros(num_cubes, 1)
dynamic_friction = torch.zeros(num_cubes, 1)
restitution = torch.Tensor([[restitution_coefficient]] * num_cubes)
cube_object_materials = torch.cat(
[static_friction, dynamic_friction, restitution], dim=-1
)
# Add friction to cube
cube_object.root_physx_view.set_material_properties(cube_object_materials, indices)
curr_z_velocity = cube_object.data.root_lin_vel_w[:, 2]
while torch.all(curr_z_velocity <= 0.0):
# Simulate physics
curr_z_velocity = cube_object.data.root_lin_vel_w[:, 2]
# perform rendering
sim.step()
# update object
cube_object.update(sim.cfg.dt)
if torch.all(curr_z_velocity <= 0.0):
# Still in the air
prev_z_velocity = curr_z_velocity
# We have made contact with the ground and can verify expected collision type
# based on how velocity has changed after the collision
if expected_collision_type == "inelastic":
# Assert that the block has lost most energy by checking that the z velocity is < 1/2 previous
# velocity. This is because the floor's resitution means it will bounce back an object that itself
# has restitution set to 0.0
self.assertTrue(torch.all(torch.le(curr_z_velocity / 2, abs(prev_z_velocity))))
elif expected_collision_type == "partially_elastic":
# Assert that the block has lost some energy by checking that the z velocity is less
self.assertTrue(torch.all(torch.le(abs(curr_z_velocity), abs(prev_z_velocity))))
elif expected_collision_type == "elastic":
# Assert that the block has not lost any energy by checking that the z velocity is the same
torch.testing.assert_close(abs(curr_z_velocity), abs(prev_z_velocity))
def test_rigid_body_set_mass(self):
"""Test getting and setting mass of rigid object."""
for num_cubes in (1, 2):
for device in ("cuda:0", "cpu"):
with self.subTest(num_cubes=num_cubes, device=device):
with build_simulation_context(
device=device, gravity_enabled=False, add_ground_plane=True, auto_add_lighting=True
) as sim:
cube_object, _ = generate_cubes_scene(num_cubes=num_cubes, height=1.0, device=device)
# Play sim
sim.reset()
# Get masses before increasing
original_masses = cube_object.root_physx_view.get_masses()
self.assertEqual(original_masses.shape, (num_cubes, 1))
# Randomize mass of the object
masses = original_masses + torch.FloatTensor(num_cubes, 1).uniform_(4, 8)
indices = torch.tensor(range(num_cubes), dtype=torch.int)
# Add friction to cube
cube_object.root_physx_view.set_masses(masses, indices)
torch.testing.assert_close(cube_object.root_physx_view.get_masses(), masses)
# Simulate physics
# perform rendering
sim.step()
# update object
cube_object.update(sim.cfg.dt)
masses_to_check = cube_object.root_physx_view.get_masses()
# Check if mass is set correctly
torch.testing.assert_close(masses, masses_to_check)
if __name__ == "__main__":
run_tests()
| 32,401 | Python | 50.107255 | 134 | 0.493503 |
fnuabhimanyu8713/orbit/source/extensions/omni.isaac.orbit/test/assets/test_articulation.py | # Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
# ignore private usage of variables warning
# pyright: reportPrivateUsage=none
"""Launch Isaac Sim Simulator first."""
from omni.isaac.orbit.app import AppLauncher, run_tests
# launch omniverse app
app_launcher = AppLauncher(headless=True)
simulation_app = app_launcher.app
"""Rest everything follows."""
import ctypes
import torch
import unittest
import omni.isaac.core.utils.stage as stage_utils
import omni.isaac.orbit.sim as sim_utils
import omni.isaac.orbit.utils.string as string_utils
from omni.isaac.orbit.actuators import ImplicitActuatorCfg
from omni.isaac.orbit.assets import Articulation, ArticulationCfg
from omni.isaac.orbit.utils.assets import ISAAC_NUCLEUS_DIR
##
# Pre-defined configs
##
from omni.isaac.orbit_assets import ANYMAL_C_CFG, FRANKA_PANDA_CFG, SHADOW_HAND_CFG # isort:skip
class TestArticulation(unittest.TestCase):
"""Test for articulation class."""
def setUp(self):
"""Create a blank new stage for each test."""
# Create a new stage
stage_utils.create_new_stage()
# Simulation time-step
self.dt = 0.005
# Load kit helper
sim_cfg = sim_utils.SimulationCfg(dt=self.dt, device="cuda:0")
self.sim = sim_utils.SimulationContext(sim_cfg)
def tearDown(self):
"""Stops simulator after each test."""
# stop simulation
self.sim.stop()
# clear the stage
self.sim.clear_instance()
"""
Tests
"""
def test_initialization_floating_base_non_root(self):
"""Test initialization for a floating-base with articulation root on a rigid body
under the provided prim path."""
# Create articulation
robot_cfg = ArticulationCfg(
prim_path="/World/Robot",
spawn=sim_utils.UsdFileCfg(usd_path=f"{ISAAC_NUCLEUS_DIR}/Robots/Humanoid/humanoid_instanceable.usd"),
init_state=ArticulationCfg.InitialStateCfg(pos=(0.0, 0.0, 1.34)),
actuators={"body": ImplicitActuatorCfg(joint_names_expr=[".*"], stiffness=0.0, damping=0.0)},
)
robot = Articulation(cfg=robot_cfg)
# Check that boundedness of articulation is correct
self.assertEqual(ctypes.c_long.from_address(id(robot)).value, 1)
# Play sim
self.sim.reset()
# Check if robot is initialized
self.assertTrue(robot._is_initialized)
# Check that floating base
self.assertFalse(robot.is_fixed_base)
# Check buffers that exists and have correct shapes
self.assertTrue(robot.data.root_pos_w.shape == (1, 3))
self.assertTrue(robot.data.root_quat_w.shape == (1, 4))
self.assertTrue(robot.data.joint_pos.shape == (1, 21))
# Check some internal physx data for debugging
# -- joint related
self.assertEqual(robot.root_physx_view.max_dofs, robot.root_physx_view.shared_metatype.dof_count)
# -- link related
self.assertEqual(robot.root_physx_view.max_links, robot.root_physx_view.shared_metatype.link_count)
# -- link names (check within articulation ordering is correct)
prim_path_body_names = [path.split("/")[-1] for path in robot.root_physx_view.link_paths[0]]
self.assertListEqual(prim_path_body_names, robot.body_names)
# Check that the body_physx_view is deprecated
with self.assertWarns(DeprecationWarning):
robot.body_physx_view
# Simulate physics
for _ in range(10):
# perform rendering
self.sim.step()
# update robot
robot.update(self.dt)
def test_initialization_floating_base(self):
"""Test initialization for a floating-base with articulation root on provided prim path."""
# Create articulation
robot = Articulation(cfg=ANYMAL_C_CFG.replace(prim_path="/World/Robot"))
# Check that boundedness of articulation is correct
self.assertEqual(ctypes.c_long.from_address(id(robot)).value, 1)
# Play sim
self.sim.reset()
# Check if robot is initialized
self.assertTrue(robot._is_initialized)
# Check that floating base
self.assertFalse(robot.is_fixed_base)
# Check buffers that exists and have correct shapes
self.assertTrue(robot.data.root_pos_w.shape == (1, 3))
self.assertTrue(robot.data.root_quat_w.shape == (1, 4))
self.assertTrue(robot.data.joint_pos.shape == (1, 12))
# Check some internal physx data for debugging
# -- joint related
self.assertEqual(robot.root_physx_view.max_dofs, robot.root_physx_view.shared_metatype.dof_count)
# -- link related
self.assertEqual(robot.root_physx_view.max_links, robot.root_physx_view.shared_metatype.link_count)
# -- link names (check within articulation ordering is correct)
prim_path_body_names = [path.split("/")[-1] for path in robot.root_physx_view.link_paths[0]]
self.assertListEqual(prim_path_body_names, robot.body_names)
# Check that the body_physx_view is deprecated
with self.assertWarns(DeprecationWarning):
robot.body_physx_view
# Simulate physics
for _ in range(10):
# perform rendering
self.sim.step()
# update robot
robot.update(self.dt)
def test_initialization_fixed_base(self):
"""Test initialization for fixed base."""
# Create articulation
robot = Articulation(cfg=FRANKA_PANDA_CFG.replace(prim_path="/World/Robot"))
# Check that boundedness of articulation is correct
self.assertEqual(ctypes.c_long.from_address(id(robot)).value, 1)
# Play sim
self.sim.reset()
# Check if robot is initialized
self.assertTrue(robot._is_initialized)
# Check that fixed base
self.assertTrue(robot.is_fixed_base)
# Check buffers that exists and have correct shapes
self.assertTrue(robot.data.root_pos_w.shape == (1, 3))
self.assertTrue(robot.data.root_quat_w.shape == (1, 4))
self.assertTrue(robot.data.joint_pos.shape == (1, 9))
# Check some internal physx data for debugging
# -- joint related
self.assertEqual(robot.root_physx_view.max_dofs, robot.root_physx_view.shared_metatype.dof_count)
# -- link related
self.assertEqual(robot.root_physx_view.max_links, robot.root_physx_view.shared_metatype.link_count)
# -- link names (check within articulation ordering is correct)
prim_path_body_names = [path.split("/")[-1] for path in robot.root_physx_view.link_paths[0]]
self.assertListEqual(prim_path_body_names, robot.body_names)
# Check that the body_physx_view is deprecated
with self.assertWarns(DeprecationWarning):
robot.body_physx_view
# Simulate physics
for _ in range(10):
# perform rendering
self.sim.step()
# update robot
robot.update(self.dt)
# check that the root is at the correct state
default_root_state = robot.data.default_root_state.clone()
torch.testing.assert_close(robot.data.root_state_w, default_root_state)
def test_initialization_fixed_base_single_joint(self):
"""Test initialization for fixed base articulation with a single joint."""
# Create articulation
robot_cfg = ArticulationCfg(
spawn=sim_utils.UsdFileCfg(usd_path=f"{ISAAC_NUCLEUS_DIR}/Robots/Simple/revolute_articulation.usd"),
actuators={
"joint": ImplicitActuatorCfg(
joint_names_expr=[".*"],
effort_limit=400.0,
velocity_limit=100.0,
stiffness=0.0,
damping=10.0,
),
},
)
robot = Articulation(cfg=robot_cfg.replace(prim_path="/World/Robot"))
# Check that boundedness of articulation is correct
self.assertEqual(ctypes.c_long.from_address(id(robot)).value, 1)
# Play sim
self.sim.reset()
# Check if robot is initialized
self.assertTrue(robot._is_initialized)
# Check that fixed base
self.assertTrue(robot.is_fixed_base)
# Check buffers that exists and have correct shapes
self.assertTrue(robot.data.root_pos_w.shape == (1, 3))
self.assertTrue(robot.data.root_quat_w.shape == (1, 4))
self.assertTrue(robot.data.joint_pos.shape == (1, 1))
# Check some internal physx data for debugging
# -- joint related
self.assertEqual(robot.root_physx_view.max_dofs, robot.root_physx_view.shared_metatype.dof_count)
# -- link related
self.assertEqual(robot.root_physx_view.max_links, robot.root_physx_view.shared_metatype.link_count)
# -- link names (check within articulation ordering is correct)
prim_path_body_names = [path.split("/")[-1] for path in robot.root_physx_view.link_paths[0]]
self.assertListEqual(prim_path_body_names, robot.body_names)
# Check that the body_physx_view is deprecated
with self.assertWarns(DeprecationWarning):
robot.body_physx_view
# Simulate physics
for _ in range(10):
# perform rendering
self.sim.step()
# update robot
robot.update(self.dt)
def test_initialization_hand_with_tendons(self):
"""Test initialization for fixed base articulated hand with tendons."""
# Create articulation
robot_cfg = SHADOW_HAND_CFG
robot = Articulation(cfg=robot_cfg.replace(prim_path="/World/Robot"))
# Check that boundedness of articulation is correct
self.assertEqual(ctypes.c_long.from_address(id(robot)).value, 1)
# Play sim
self.sim.reset()
# Check if robot is initialized
self.assertTrue(robot._is_initialized)
# Check that fixed base
self.assertTrue(robot.is_fixed_base)
# Check buffers that exists and have correct shapes
self.assertTrue(robot.data.root_pos_w.shape == (1, 3))
self.assertTrue(robot.data.root_quat_w.shape == (1, 4))
self.assertTrue(robot.data.joint_pos.shape == (1, 24))
# Check some internal physx data for debugging
# -- joint related
self.assertEqual(robot.root_physx_view.max_dofs, robot.root_physx_view.shared_metatype.dof_count)
# -- link related
self.assertEqual(robot.root_physx_view.max_links, robot.root_physx_view.shared_metatype.link_count)
# Simulate physics
for _ in range(10):
# perform rendering
self.sim.step()
# update robot
robot.update(self.dt)
def test_initialization_floating_base_made_fixed_base(self):
"""Test initialization for a floating-base articulation made fixed-base using schema properties."""
# Create articulation
robot_cfg: ArticulationCfg = ANYMAL_C_CFG.replace(prim_path="/World/Robot")
robot_cfg.spawn.articulation_props.fix_root_link = True
robot = Articulation(cfg=robot_cfg)
# Check that boundedness of articulation is correct
self.assertEqual(ctypes.c_long.from_address(id(robot)).value, 1)
# Play sim
self.sim.reset()
# Check if robot is initialized
self.assertTrue(robot._is_initialized)
# Check that floating base
self.assertTrue(robot.is_fixed_base)
# Check buffers that exists and have correct shapes
self.assertTrue(robot.data.root_pos_w.shape == (1, 3))
self.assertTrue(robot.data.root_quat_w.shape == (1, 4))
self.assertTrue(robot.data.joint_pos.shape == (1, 12))
# Check some internal physx data for debugging
# -- joint related
self.assertEqual(robot.root_physx_view.max_dofs, robot.root_physx_view.shared_metatype.dof_count)
# -- link related
self.assertEqual(robot.root_physx_view.max_links, robot.root_physx_view.shared_metatype.link_count)
# -- link names (check within articulation ordering is correct)
prim_path_body_names = [path.split("/")[-1] for path in robot.root_physx_view.link_paths[0]]
self.assertListEqual(prim_path_body_names, robot.body_names)
# Check that the body_physx_view is deprecated
with self.assertWarns(DeprecationWarning):
robot.body_physx_view
# Root state should be at the default state
robot.write_root_state_to_sim(robot.data.default_root_state.clone())
# Simulate physics
for _ in range(10):
# perform rendering
self.sim.step()
# update robot
robot.update(self.dt)
# check that the root is at the correct state
default_root_state = robot.data.default_root_state.clone()
torch.testing.assert_close(robot.data.root_state_w, default_root_state)
def test_initialization_fixed_base_made_floating_base(self):
"""Test initialization for fixed base made floating-base using schema properties."""
# Create articulation
robot_cfg = FRANKA_PANDA_CFG.replace(prim_path="/World/Robot")
robot_cfg.spawn.articulation_props.fix_root_link = False
robot = Articulation(cfg=robot_cfg)
# Check that boundedness of articulation is correct
self.assertEqual(ctypes.c_long.from_address(id(robot)).value, 1)
# Play sim
self.sim.reset()
# Check if robot is initialized
self.assertTrue(robot._is_initialized)
# Check that fixed base
self.assertFalse(robot.is_fixed_base)
# Check buffers that exists and have correct shapes
self.assertTrue(robot.data.root_pos_w.shape == (1, 3))
self.assertTrue(robot.data.root_quat_w.shape == (1, 4))
self.assertTrue(robot.data.joint_pos.shape == (1, 9))
# Check some internal physx data for debugging
# -- joint related
self.assertEqual(robot.root_physx_view.max_dofs, robot.root_physx_view.shared_metatype.dof_count)
# -- link related
self.assertEqual(robot.root_physx_view.max_links, robot.root_physx_view.shared_metatype.link_count)
# -- link names (check within articulation ordering is correct)
prim_path_body_names = [path.split("/")[-1] for path in robot.root_physx_view.link_paths[0]]
self.assertListEqual(prim_path_body_names, robot.body_names)
# Simulate physics
for _ in range(10):
# perform rendering
self.sim.step()
# update robot
robot.update(self.dt)
# check that the root is at the correct state
default_root_state = robot.data.default_root_state.clone()
is_close = torch.any(torch.isclose(robot.data.root_state_w, default_root_state))
self.assertFalse(is_close)
def test_out_of_range_default_joint_pos(self):
"""Test that the default joint position from configuration is out of range."""
# Create articulation
robot_cfg = FRANKA_PANDA_CFG.replace(prim_path="/World/Robot")
robot_cfg.init_state.joint_pos = {
"panda_joint1": 10.0,
"panda_joint[2, 4]": -20.0,
}
robot = Articulation(robot_cfg)
# Check that boundedness of articulation is correct
self.assertEqual(ctypes.c_long.from_address(id(robot)).value, 1)
# Play sim
self.sim.reset()
# Check if robot is initialized
self.assertFalse(robot._is_initialized)
def test_out_of_range_default_joint_vel(self):
"""Test that the default joint velocity from configuration is out of range."""
# Create articulation
robot_cfg = FRANKA_PANDA_CFG.replace(prim_path="/World/Robot")
robot_cfg.init_state.joint_vel = {
"panda_joint1": 100.0,
"panda_joint[2, 4]": -60.0,
}
robot = Articulation(robot_cfg)
# Check that boundedness of articulation is correct
self.assertEqual(ctypes.c_long.from_address(id(robot)).value, 1)
# Play sim
self.sim.reset()
# Check if robot is initialized
self.assertFalse(robot._is_initialized)
def test_external_force_on_single_body(self):
"""Test application of external force on the base of the robot."""
# Robots
robot_cfg = ANYMAL_C_CFG
robot_cfg.spawn.func("/World/Anymal_c/Robot_1", robot_cfg.spawn, translation=(0.0, -0.5, 0.65))
robot_cfg.spawn.func("/World/Anymal_c/Robot_2", robot_cfg.spawn, translation=(0.0, 0.5, 0.65))
# create handles for the robots
robot = Articulation(robot_cfg.replace(prim_path="/World/Anymal_c/Robot.*"))
# Play the simulator
self.sim.reset()
# Find bodies to apply the force
body_ids, _ = robot.find_bodies("base")
# Sample a large force
external_wrench_b = torch.zeros(robot.num_instances, len(body_ids), 6, device=self.sim.device)
external_wrench_b[..., 1] = 1000.0
# Now we are ready!
for _ in range(5):
# reset root state
root_state = robot.data.default_root_state.clone()
root_state[0, :2] = torch.tensor([0.0, -0.5], device=self.sim.device)
root_state[1, :2] = torch.tensor([0.0, 0.5], device=self.sim.device)
robot.write_root_state_to_sim(root_state)
# reset dof state
joint_pos, joint_vel = robot.data.default_joint_pos, robot.data.default_joint_vel
robot.write_joint_state_to_sim(joint_pos, joint_vel)
# reset robot
robot.reset()
# apply force
robot.set_external_force_and_torque(
external_wrench_b[..., :3], external_wrench_b[..., 3:], body_ids=body_ids
)
# perform simulation
for _ in range(100):
# apply action to the robot
robot.set_joint_position_target(robot.data.default_joint_pos.clone())
robot.write_data_to_sim()
# perform step
self.sim.step()
# update buffers
robot.update(self.dt)
# check condition that the robots have fallen down
self.assertTrue(robot.data.root_pos_w[0, 2].item() < 0.2)
self.assertTrue(robot.data.root_pos_w[1, 2].item() < 0.2)
def test_external_force_on_multiple_bodies(self):
"""Test application of external force on the legs of the robot."""
# Robots
robot_cfg = ANYMAL_C_CFG
robot_cfg.spawn.func("/World/Anymal_c/Robot_1", robot_cfg.spawn, translation=(0.0, -0.5, 0.65))
robot_cfg.spawn.func("/World/Anymal_c/Robot_2", robot_cfg.spawn, translation=(0.0, 0.5, 0.65))
# create handles for the robots
robot = Articulation(robot_cfg.replace(prim_path="/World/Anymal_c/Robot.*"))
# Play the simulator
self.sim.reset()
# Find bodies to apply the force
body_ids, _ = robot.find_bodies(".*_SHANK")
# Sample a large force
external_wrench_b = torch.zeros(robot.num_instances, len(body_ids), 6, device=self.sim.device)
external_wrench_b[..., 1] = 100.0
# Now we are ready!
for _ in range(5):
# reset root state
root_state = robot.data.default_root_state.clone()
root_state[0, :2] = torch.tensor([0.0, -0.5], device=self.sim.device)
root_state[1, :2] = torch.tensor([0.0, 0.5], device=self.sim.device)
robot.write_root_state_to_sim(root_state)
# reset dof state
joint_pos, joint_vel = robot.data.default_joint_pos, robot.data.default_joint_vel
robot.write_joint_state_to_sim(joint_pos, joint_vel)
# reset robot
robot.reset()
# apply force
robot.set_external_force_and_torque(
external_wrench_b[..., :3], external_wrench_b[..., 3:], body_ids=body_ids
)
# perform simulation
for _ in range(100):
# apply action to the robot
robot.set_joint_position_target(robot.data.default_joint_pos.clone())
robot.write_data_to_sim()
# perform step
self.sim.step()
# update buffers
robot.update(self.dt)
# check condition
# since there is a moment applied on the robot, the robot should rotate
self.assertTrue(robot.data.root_ang_vel_w[0, 2].item() > 0.1)
self.assertTrue(robot.data.root_ang_vel_w[1, 2].item() > 0.1)
def test_loading_gains_from_usd(self):
"""Test that gains are loaded from USD file if actuator model has them as None."""
# Create articulation
robot_cfg = ArticulationCfg(
prim_path="/World/Robot",
spawn=sim_utils.UsdFileCfg(usd_path=f"{ISAAC_NUCLEUS_DIR}/Robots/Humanoid/humanoid_instanceable.usd"),
init_state=ArticulationCfg.InitialStateCfg(pos=(0.0, 0.0, 1.34)),
actuators={"body": ImplicitActuatorCfg(joint_names_expr=[".*"], stiffness=None, damping=None)},
)
robot = Articulation(cfg=robot_cfg)
# Play sim
self.sim.reset()
# Expected gains
# -- Stiffness values
expected_stiffness = {
".*_waist.*": 20.0,
".*_upper_arm.*": 10.0,
"pelvis": 10.0,
".*_lower_arm": 2.0,
".*_thigh:0": 10.0,
".*_thigh:1": 20.0,
".*_thigh:2": 10.0,
".*_shin": 5.0,
".*_foot.*": 2.0,
}
indices_list, _, values_list = string_utils.resolve_matching_names_values(expected_stiffness, robot.joint_names)
expected_stiffness = torch.zeros(robot.num_instances, robot.num_joints, device=robot.device)
expected_stiffness[:, indices_list] = torch.tensor(values_list, device=robot.device)
# -- Damping values
expected_damping = {
".*_waist.*": 5.0,
".*_upper_arm.*": 5.0,
"pelvis": 5.0,
".*_lower_arm": 1.0,
".*_thigh:0": 5.0,
".*_thigh:1": 5.0,
".*_thigh:2": 5.0,
".*_shin": 0.1,
".*_foot.*": 1.0,
}
indices_list, _, values_list = string_utils.resolve_matching_names_values(expected_damping, robot.joint_names)
expected_damping = torch.zeros_like(expected_stiffness)
expected_damping[:, indices_list] = torch.tensor(values_list, device=robot.device)
# Check that gains are loaded from USD file
torch.testing.assert_close(robot.actuators["body"].stiffness, expected_stiffness)
torch.testing.assert_close(robot.actuators["body"].damping, expected_damping)
def test_setting_gains_from_cfg(self):
"""Test that gains are loaded from the configuration correctly.
Note: We purposefully give one argument as int and other as float to check that it is handled correctly.
"""
# Create articulation
robot_cfg = ArticulationCfg(
prim_path="/World/Robot",
spawn=sim_utils.UsdFileCfg(usd_path=f"{ISAAC_NUCLEUS_DIR}/Robots/Humanoid/humanoid_instanceable.usd"),
init_state=ArticulationCfg.InitialStateCfg(pos=(0.0, 0.0, 1.34)),
actuators={"body": ImplicitActuatorCfg(joint_names_expr=[".*"], stiffness=10, damping=2.0)},
)
robot = Articulation(cfg=robot_cfg)
# Play sim
self.sim.reset()
# Expected gains
expected_stiffness = torch.full((robot.num_instances, robot.num_joints), 10.0, device=robot.device)
expected_damping = torch.full_like(expected_stiffness, 2.0)
# Check that gains are loaded from USD file
torch.testing.assert_close(robot.actuators["body"].stiffness, expected_stiffness)
torch.testing.assert_close(robot.actuators["body"].damping, expected_damping)
def test_setting_gains_from_cfg_dict(self):
"""Test that gains are loaded from the configuration dictionary correctly.
Note: We purposefully give one argument as int and other as float to check that it is handled correctly.
"""
# Create articulation
robot_cfg = ArticulationCfg(
prim_path="/World/Robot",
spawn=sim_utils.UsdFileCfg(usd_path=f"{ISAAC_NUCLEUS_DIR}/Robots/Humanoid/humanoid_instanceable.usd"),
init_state=ArticulationCfg.InitialStateCfg(pos=(0.0, 0.0, 1.34)),
actuators={"body": ImplicitActuatorCfg(joint_names_expr=[".*"], stiffness={".*": 10}, damping={".*": 2.0})},
)
robot = Articulation(cfg=robot_cfg)
# Play sim
self.sim.reset()
# Expected gains
expected_stiffness = torch.full((robot.num_instances, robot.num_joints), 10.0, device=robot.device)
expected_damping = torch.full_like(expected_stiffness, 2.0)
# Check that gains are loaded from USD file
torch.testing.assert_close(robot.actuators["body"].stiffness, expected_stiffness)
torch.testing.assert_close(robot.actuators["body"].damping, expected_damping)
if __name__ == "__main__":
run_tests()
| 25,497 | Python | 41.853781 | 120 | 0.619759 |
fnuabhimanyu8713/orbit/source/extensions/omni.isaac.orbit/test/assets/check_external_force.py | # Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""
This script checks if the external force is applied correctly on the robot.
.. code-block:: bash
# Usage to apply force on base
./orbit.sh -p source/extensions/omni.isaac.orbit/test/assets/check_external_force.py --body base --force 1000
# Usage to apply force on legs
./orbit.sh -p source/extensions/omni.isaac.orbit/test/assets/check_external_force.py --body .*_SHANK --force 100
"""
"""Launch Isaac Sim Simulator first."""
import argparse
from omni.isaac.orbit.app import AppLauncher
# add argparse arguments
parser = argparse.ArgumentParser(description="This script demonstrates how to external force on a legged robot.")
parser.add_argument("--body", default="base", type=str, help="Name of the body to apply force on.")
parser.add_argument("--force", default=1000.0, type=float, help="Force to apply on the body.")
# append AppLauncher cli args
AppLauncher.add_app_launcher_args(parser)
# parse the arguments
args_cli = parser.parse_args()
# launch omniverse app
app_launcher = AppLauncher(args_cli)
simulation_app = app_launcher.app
"""Rest everything follows."""
import torch
import omni.isaac.orbit.sim as sim_utils
from omni.isaac.orbit.assets import Articulation
from omni.isaac.orbit.sim import SimulationContext
##
# Pre-defined configs
##
from omni.isaac.orbit_assets.anymal import ANYMAL_C_CFG # isort:skip
def main():
"""Main function."""
# Load kit helper
sim = SimulationContext(sim_utils.SimulationCfg(dt=0.005))
# Set main camera
sim.set_camera_view(eye=[3.5, 3.5, 3.5], target=[0.0, 0.0, 0.0])
# Spawn things into stage
# Ground-plane
cfg = sim_utils.GroundPlaneCfg()
cfg.func("/World/defaultGroundPlane", cfg)
# Lights
cfg = sim_utils.DistantLightCfg(intensity=1000.0, color=(0.75, 0.75, 0.75))
cfg.func("/World/Light/greyLight", cfg)
# Robots
robot_cfg = ANYMAL_C_CFG
robot_cfg.spawn.func("/World/Anymal_c/Robot_1", robot_cfg.spawn, translation=(0.0, -0.5, 0.65))
robot_cfg.spawn.func("/World/Anymal_c/Robot_2", robot_cfg.spawn, translation=(0.0, 0.5, 0.65))
# create handles for the robots
robot = Articulation(robot_cfg.replace(prim_path="/World/Anymal_c/Robot.*"))
# Play the simulator
sim.reset()
# Find bodies to apply the force
body_ids, body_names = robot.find_bodies(args_cli.body)
# Sample a large force
external_wrench_b = torch.zeros(robot.num_instances, len(body_ids), 6, device=sim.device)
external_wrench_b[..., 1] = args_cli.force
# Now we are ready!
print("[INFO]: Setup complete...")
print("[INFO]: Applying force on the robot: ", args_cli.body, " -> ", body_names)
# Define simulation stepping
sim_dt = sim.get_physics_dt()
sim_time = 0.0
count = 0
# Simulate physics
while simulation_app.is_running():
# reset
if count % 100 == 0:
# reset counters
sim_time = 0.0
count = 0
# reset root state
root_state = robot.data.default_root_state.clone()
root_state[0, :2] = torch.tensor([0.0, -0.5], device=sim.device)
root_state[1, :2] = torch.tensor([0.0, 0.5], device=sim.device)
robot.write_root_state_to_sim(root_state)
# reset dof state
joint_pos, joint_vel = robot.data.default_joint_pos, robot.data.default_joint_vel
robot.write_joint_state_to_sim(joint_pos, joint_vel)
robot.reset()
# apply force
robot.set_external_force_and_torque(
external_wrench_b[..., :3], external_wrench_b[..., 3:], body_ids=body_ids
)
# reset command
print(">>>>>>>> Reset!")
# apply action to the robot
robot.set_joint_position_target(robot.data.default_joint_pos.clone())
robot.write_data_to_sim()
# perform step
sim.step()
# update sim-time
sim_time += sim_dt
count += 1
# update buffers
robot.update(sim_dt)
if __name__ == "__main__":
# run the main function
main()
# close sim app
simulation_app.close()
| 4,259 | Python | 31.769231 | 116 | 0.640526 |
fnuabhimanyu8713/orbit/source/extensions/omni.isaac.orbit/test/markers/test_visualization_markers.py | # Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""Launch Isaac Sim Simulator first."""
from omni.isaac.orbit.app import AppLauncher, run_tests
# launch omniverse app
config = {"headless": True}
simulation_app = AppLauncher(config).app
"""Rest everything follows."""
import torch
import unittest
import omni.isaac.core.utils.stage as stage_utils
from omni.isaac.core.simulation_context import SimulationContext
import omni.isaac.orbit.sim as sim_utils
from omni.isaac.orbit.markers import VisualizationMarkers, VisualizationMarkersCfg
from omni.isaac.orbit.markers.config import FRAME_MARKER_CFG, POSITION_GOAL_MARKER_CFG
from omni.isaac.orbit.utils.math import random_orientation
from omni.isaac.orbit.utils.timer import Timer
class TestUsdVisualizationMarkers(unittest.TestCase):
"""Test fixture for the VisualizationMarker class."""
def setUp(self):
"""Create a blank new stage for each test."""
# Simulation time-step
self.dt = 0.01
# Open a new stage
stage_utils.create_new_stage()
# Load kit helper
self.sim = SimulationContext(physics_dt=self.dt, rendering_dt=self.dt, backend="torch", device="cuda:0")
def tearDown(self) -> None:
"""Stops simulator after each test."""
# stop simulation
self.sim.stop()
# close stage
stage_utils.close_stage()
# clear the simulation context
self.sim.clear_instance()
def test_instantiation(self):
"""Test that the class can be initialized properly."""
config = VisualizationMarkersCfg(
prim_path="/World/Visuals/test",
markers={
"test": sim_utils.SphereCfg(radius=1.0),
},
)
test_marker = VisualizationMarkers(config)
print(test_marker)
# check number of markers
self.assertEqual(test_marker.num_prototypes, 1)
def test_usd_marker(self):
"""Test with marker from a USD."""
# create a marker
config = FRAME_MARKER_CFG.replace(prim_path="/World/Visuals/test_frames")
test_marker = VisualizationMarkers(config)
# play the simulation
self.sim.reset()
# create a buffer
num_frames = 0
# run with randomization of poses
for count in range(1000):
# sample random poses
if count % 50 == 0:
num_frames = torch.randint(10, 1000, (1,)).item()
frame_translations = torch.randn((num_frames, 3))
frame_rotations = random_orientation(num_frames, device=self.sim.device)
# set the marker
test_marker.visualize(translations=frame_translations, orientations=frame_rotations)
# update the kit
self.sim.step()
# asset that count is correct
self.assertEqual(test_marker.count, num_frames)
def test_usd_marker_color(self):
"""Test with marker from a USD with its color modified."""
# create a marker
config = FRAME_MARKER_CFG.copy()
config.prim_path = "/World/Visuals/test_frames"
config.markers["frame"].visual_material = sim_utils.PreviewSurfaceCfg(diffuse_color=(1.0, 0.0, 0.0))
test_marker = VisualizationMarkers(config)
# play the simulation
self.sim.reset()
# run with randomization of poses
for count in range(1000):
# sample random poses
if count % 50 == 0:
num_frames = torch.randint(10, 1000, (1,)).item()
frame_translations = torch.randn((num_frames, 3))
frame_rotations = random_orientation(num_frames, device=self.sim.device)
# set the marker
test_marker.visualize(translations=frame_translations, orientations=frame_rotations)
# update the kit
self.sim.step()
def test_multiple_prototypes_marker(self):
"""Test with multiple prototypes of spheres."""
# create a marker
config = POSITION_GOAL_MARKER_CFG.replace(prim_path="/World/Visuals/test_protos")
test_marker = VisualizationMarkers(config)
# play the simulation
self.sim.reset()
# run with randomization of poses
for count in range(1000):
# sample random poses
if count % 50 == 0:
num_frames = torch.randint(100, 1000, (1,)).item()
frame_translations = torch.randn((num_frames, 3))
# randomly choose a prototype
marker_indices = torch.randint(0, test_marker.num_prototypes, (num_frames,))
# set the marker
test_marker.visualize(translations=frame_translations, marker_indices=marker_indices)
# update the kit
self.sim.step()
def test_visualization_time_based_on_prototypes(self):
"""Test with time taken when number of prototypes is increased."""
# create a marker
config = POSITION_GOAL_MARKER_CFG.replace(prim_path="/World/Visuals/test_protos")
test_marker = VisualizationMarkers(config)
# play the simulation
self.sim.reset()
# number of frames
num_frames = 4096
# check that visibility is true
self.assertTrue(test_marker.is_visible())
# run with randomization of poses and indices
frame_translations = torch.randn((num_frames, 3))
marker_indices = torch.randint(0, test_marker.num_prototypes, (num_frames,))
# set the marker
with Timer("Marker visualization with explicit indices") as timer:
test_marker.visualize(translations=frame_translations, marker_indices=marker_indices)
# save the time
time_with_marker_indices = timer.time_elapsed
with Timer("Marker visualization with no indices") as timer:
test_marker.visualize(translations=frame_translations)
# save the time
time_with_no_marker_indices = timer.time_elapsed
# update the kit
self.sim.step()
# check that the time is less
self.assertLess(time_with_no_marker_indices, time_with_marker_indices)
def test_visualization_time_based_on_visibility(self):
"""Test with visibility of markers. When invisible, the visualize call should return."""
# create a marker
config = POSITION_GOAL_MARKER_CFG.replace(prim_path="/World/Visuals/test_protos")
test_marker = VisualizationMarkers(config)
# play the simulation
self.sim.reset()
# number of frames
num_frames = 4096
# check that visibility is true
self.assertTrue(test_marker.is_visible())
# run with randomization of poses and indices
frame_translations = torch.randn((num_frames, 3))
marker_indices = torch.randint(0, test_marker.num_prototypes, (num_frames,))
# set the marker
with Timer("Marker visualization") as timer:
test_marker.visualize(translations=frame_translations, marker_indices=marker_indices)
# save the time
time_with_visualization = timer.time_elapsed
# update the kit
self.sim.step()
# make invisible
test_marker.set_visibility(False)
# check that visibility is false
self.assertFalse(test_marker.is_visible())
# run with randomization of poses and indices
frame_translations = torch.randn((num_frames, 3))
marker_indices = torch.randint(0, test_marker.num_prototypes, (num_frames,))
# set the marker
with Timer("Marker no visualization") as timer:
test_marker.visualize(translations=frame_translations, marker_indices=marker_indices)
# save the time
time_with_no_visualization = timer.time_elapsed
# check that the time is less
self.assertLess(time_with_no_visualization, time_with_visualization)
if __name__ == "__main__":
run_tests()
| 8,095 | Python | 37.923077 | 112 | 0.631254 |
fnuabhimanyu8713/orbit/source/extensions/omni.isaac.orbit/test/markers/check_markers_visibility.py | # Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""
This script checks if the debug markers are visible from the camera.
To check if the markers are visible on different rendering modalities, you can switch them by going
through the synthetic data generation tool in the Isaac Sim UI. For more information,
please check: https://www.youtube.com/watch?v=vLk-f9LWj48&ab_channel=NVIDIAOmniverse
.. code-block:: bash
# Usage
./orbit.sh -p source/extensions/omni.isaac.orbit/test/markers/check_markers_visibility.py
"""
"""Launch Isaac Sim Simulator first."""
import argparse
from omni.isaac.orbit.app import AppLauncher
# add argparse arguments
parser = argparse.ArgumentParser(description="Check if the debug markers are visible from the camera.")
parser.add_argument("--num_envs", type=int, default=2, help="Number of environments to spawn.")
# append AppLauncher cli args
AppLauncher.add_app_launcher_args(parser)
# parse the arguments
args_cli = parser.parse_args()
# launch omniverse app
app_launcher = AppLauncher(args_cli)
simulation_app = app_launcher.app
"""Rest everything follows."""
import omni.isaac.orbit.sim as sim_utils
from omni.isaac.orbit.assets import ArticulationCfg, AssetBaseCfg
from omni.isaac.orbit.scene import InteractiveScene, InteractiveSceneCfg
from omni.isaac.orbit.sensors import RayCasterCfg, patterns
from omni.isaac.orbit.utils import configclass
##
# Pre-defined configs
##
from omni.isaac.orbit_assets.anymal import ANYMAL_C_CFG # isort:skip
@configclass
class SensorsSceneCfg(InteractiveSceneCfg):
"""Design the scene with sensors on the robot."""
# ground plane
ground = AssetBaseCfg(prim_path="/World/defaultGroundPlane", spawn=sim_utils.GroundPlaneCfg())
# lights
dome_light = AssetBaseCfg(
prim_path="/World/Light", spawn=sim_utils.DomeLightCfg(intensity=3000.0, color=(0.75, 0.75, 0.75))
)
# robot
robot: ArticulationCfg = ANYMAL_C_CFG.replace(prim_path="{ENV_REGEX_NS}/Robot")
# sensors
height_scanner = RayCasterCfg(
prim_path="{ENV_REGEX_NS}/Robot/base",
update_period=0.02,
offset=RayCasterCfg.OffsetCfg(pos=(0.0, 0.0, 20.0)),
attach_yaw_only=True,
pattern_cfg=patterns.GridPatternCfg(resolution=0.1, size=[1.6, 1.0]),
debug_vis=True,
mesh_prim_paths=["/World/defaultGroundPlane"],
)
def run_simulator(
sim: sim_utils.SimulationContext,
scene: InteractiveScene,
):
"""Run the simulator."""
# Define simulation stepping
sim_dt = sim.get_physics_dt()
sim_time = 0.0
count = 0
# Simulate physics
while simulation_app.is_running():
# Reset
if count % 500 == 0:
# reset counter
count = 0
# reset the scene entities
# root state
root_state = scene["robot"].data.default_root_state.clone()
root_state[:, :3] += scene.env_origins
scene["robot"].write_root_state_to_sim(root_state)
# set joint positions with some noise
joint_pos, joint_vel = (
scene["robot"].data.default_joint_pos.clone(),
scene["robot"].data.default_joint_vel.clone(),
)
scene["robot"].write_joint_state_to_sim(joint_pos, joint_vel)
# clear internal buffers
scene.reset()
print("[INFO]: Resetting robot state...")
# Apply default actions to the robot
# -- generate actions/commands
targets = scene["robot"].data.default_joint_pos
# -- apply action to the robot
scene["robot"].set_joint_position_target(targets)
# -- write data to sim
scene.write_data_to_sim()
# perform step
sim.step()
# update sim-time
sim_time += sim_dt
count += 1
# update buffers
scene.update(sim_dt)
def main():
"""Main function."""
# Initialize the simulation context
sim_cfg = sim_utils.SimulationCfg(dt=0.005, substeps=1)
sim = sim_utils.SimulationContext(sim_cfg)
# Set main camera
sim.set_camera_view(eye=[3.5, 3.5, 3.5], target=[0.0, 0.0, 0.0])
# design scene
scene_cfg = SensorsSceneCfg(num_envs=args_cli.num_envs, env_spacing=2.0)
scene = InteractiveScene(scene_cfg)
# Play the simulator
sim.reset()
# Now we are ready!
print("[INFO]: Setup complete...")
# Run the simulator
run_simulator(sim, scene)
if __name__ == "__main__":
# run the main function
main()
# close sim app
simulation_app.close()
| 4,642 | Python | 29.748344 | 106 | 0.65489 |
fnuabhimanyu8713/orbit/source/extensions/omni.isaac.orbit/config/extension.toml | [package]
# Note: Semantic Versioning is used: https://semver.org/
version = "0.16.5"
# Description
title = "ORBIT framework for Robot Learning"
description="Extension providing main framework interfaces and abstractions for robot learning."
readme = "docs/README.md"
repository = "https://github.com/NVIDIA-Omniverse/Orbit"
category = "robotics"
keywords = ["kit", "robotics", "learning", "ai"]
[dependencies]
"omni.isaac.core" = {}
"omni.replicator.core" = {}
[[python.module]]
name = "omni.isaac.orbit"
| 511 | TOML | 24.599999 | 96 | 0.722114 |
fnuabhimanyu8713/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/__init__.py | # Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""Package containing the core framework."""
import os
import toml
# Conveniences to other module directories via relative paths
ORBIT_EXT_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), "../../../"))
"""Path to the extension source directory."""
ORBIT_METADATA = toml.load(os.path.join(ORBIT_EXT_DIR, "config", "extension.toml"))
"""Extension metadata dictionary parsed from the extension.toml file."""
# Configure the module-level variables
__version__ = ORBIT_METADATA["package"]["version"]
| 635 | Python | 30.799998 | 85 | 0.727559 |
fnuabhimanyu8713/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/devices/device_base.py | # Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""Base class for teleoperation interface."""
from abc import ABC, abstractmethod
from collections.abc import Callable
from typing import Any
class DeviceBase(ABC):
"""An interface class for teleoperation devices."""
def __init__(self):
"""Initialize the teleoperation interface."""
pass
def __str__(self) -> str:
"""Returns: A string containing the information of joystick."""
return f"{self.__class__.__name__}"
"""
Operations
"""
@abstractmethod
def reset(self):
"""Reset the internals."""
raise NotImplementedError
@abstractmethod
def add_callback(self, key: Any, func: Callable):
"""Add additional functions to bind keyboard.
Args:
key: The button to check against.
func: The function to call when key is pressed. The callback function should not
take any arguments.
"""
raise NotImplementedError
@abstractmethod
def advance(self) -> Any:
"""Provides the joystick event state.
Returns:
The processed output form the joystick.
"""
raise NotImplementedError
| 1,307 | Python | 24.153846 | 92 | 0.624331 |
fnuabhimanyu8713/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/devices/__init__.py | # Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""Sub-package providing interfaces to different teleoperation devices.
Currently, the following categories of devices are supported:
* **Keyboard**: Standard keyboard with WASD and arrow keys.
* **Spacemouse**: 3D mouse with 6 degrees of freedom.
* **Gamepad**: Gamepad with 2D two joysticks and buttons. Example: Xbox controller.
All device interfaces inherit from the :class:`DeviceBase` class, which provides a
common interface for all devices. The device interface reads the input data when
the :meth:`DeviceBase.advance` method is called. It also provides the function :meth:`DeviceBase.add_callback`
to add user-defined callback functions to be called when a particular input is pressed from
the peripheral device.
"""
from .device_base import DeviceBase
from .gamepad import Se2Gamepad, Se3Gamepad
from .keyboard import Se2Keyboard, Se3Keyboard
from .spacemouse import Se2SpaceMouse, Se3SpaceMouse
| 1,033 | Python | 40.359998 | 110 | 0.791868 |
fnuabhimanyu8713/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/devices/spacemouse/se3_spacemouse.py | # Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""Spacemouse controller for SE(3) control."""
import hid
import numpy as np
import threading
import time
from collections.abc import Callable
from scipy.spatial.transform.rotation import Rotation
from ..device_base import DeviceBase
from .utils import convert_buffer
class Se3SpaceMouse(DeviceBase):
"""A space-mouse controller for sending SE(3) commands as delta poses.
This class implements a space-mouse controller to provide commands to a robotic arm with a gripper.
It uses the `HID-API`_ which interfaces with USD and Bluetooth HID-class devices across multiple platforms [1].
The command comprises of two parts:
* delta pose: a 6D vector of (x, y, z, roll, pitch, yaw) in meters and radians.
* gripper: a binary command to open or close the gripper.
Note:
The interface finds and uses the first supported device connected to the computer.
Currently tested for following devices:
- SpaceMouse Compact: https://3dconnexion.com/de/product/spacemouse-compact/
.. _HID-API: https://github.com/libusb/hidapi
"""
def __init__(self, pos_sensitivity: float = 0.4, rot_sensitivity: float = 0.8):
"""Initialize the space-mouse layer.
Args:
pos_sensitivity: Magnitude of input position command scaling. Defaults to 0.4.
rot_sensitivity: Magnitude of scale input rotation commands scaling. Defaults to 0.8.
"""
# store inputs
self.pos_sensitivity = pos_sensitivity
self.rot_sensitivity = rot_sensitivity
# acquire device interface
self._device = hid.device()
self._find_device()
# read rotations
self._read_rotation = False
# command buffers
self._close_gripper = False
self._delta_pos = np.zeros(3) # (x, y, z)
self._delta_rot = np.zeros(3) # (roll, pitch, yaw)
# dictionary for additional callbacks
self._additional_callbacks = dict()
# run a thread for listening to device updates
self._thread = threading.Thread(target=self._run_device)
self._thread.daemon = True
self._thread.start()
def __del__(self):
"""Destructor for the class."""
self._thread.join()
def __str__(self) -> str:
"""Returns: A string containing the information of joystick."""
msg = f"Spacemouse Controller for SE(3): {self.__class__.__name__}\n"
msg += f"\tManufacturer: {self._device.get_manufacturer_string()}\n"
msg += f"\tProduct: {self._device.get_product_string()}\n"
msg += "\t----------------------------------------------\n"
msg += "\tRight button: reset command\n"
msg += "\tLeft button: toggle gripper command (open/close)\n"
msg += "\tMove mouse laterally: move arm horizontally in x-y plane\n"
msg += "\tMove mouse vertically: move arm vertically\n"
msg += "\tTwist mouse about an axis: rotate arm about a corresponding axis"
return msg
"""
Operations
"""
def reset(self):
# default flags
self._close_gripper = False
self._delta_pos = np.zeros(3) # (x, y, z)
self._delta_rot = np.zeros(3) # (roll, pitch, yaw)
def add_callback(self, key: str, func: Callable):
# check keys supported by callback
if key not in ["L", "R"]:
raise ValueError(f"Only left (L) and right (R) buttons supported. Provided: {key}.")
# TODO: Improve this to allow multiple buttons on same key.
self._additional_callbacks[key] = func
def advance(self) -> tuple[np.ndarray, bool]:
"""Provides the result from spacemouse event state.
Returns:
A tuple containing the delta pose command and gripper commands.
"""
rot_vec = Rotation.from_euler("XYZ", self._delta_rot).as_rotvec()
# if new command received, reset event flag to False until keyboard updated.
return np.concatenate([self._delta_pos, rot_vec]), self._close_gripper
"""
Internal helpers.
"""
def _find_device(self):
"""Find the device connected to computer."""
found = False
# implement a timeout for device search
for _ in range(5):
for device in hid.enumerate():
if device["product_string"] == "SpaceMouse Compact":
# set found flag
found = True
vendor_id = device["vendor_id"]
product_id = device["product_id"]
# connect to the device
self._device.open(vendor_id, product_id)
# check if device found
if not found:
time.sleep(1.0)
else:
break
# no device found: return false
if not found:
raise OSError("No device found by SpaceMouse. Is the device connected?")
def _run_device(self):
"""Listener thread that keeps pulling new messages."""
# keep running
while True:
# read the device data
data = self._device.read(7)
if data is not None:
# readings from 6-DoF sensor
if data[0] == 1:
self._delta_pos[1] = self.pos_sensitivity * convert_buffer(data[1], data[2])
self._delta_pos[0] = self.pos_sensitivity * convert_buffer(data[3], data[4])
self._delta_pos[2] = self.pos_sensitivity * convert_buffer(data[5], data[6]) * -1.0
elif data[0] == 2 and not self._read_rotation:
self._delta_rot[1] = self.rot_sensitivity * convert_buffer(data[1], data[2])
self._delta_rot[0] = self.rot_sensitivity * convert_buffer(data[3], data[4])
self._delta_rot[2] = self.rot_sensitivity * convert_buffer(data[5], data[6])
# readings from the side buttons
elif data[0] == 3:
# press left button
if data[1] == 1:
# close gripper
self._close_gripper = not self._close_gripper
# additional callbacks
if "L" in self._additional_callbacks:
self._additional_callbacks["L"]
# right button is for reset
if data[1] == 2:
# reset layer
self.reset()
# additional callbacks
if "R" in self._additional_callbacks:
self._additional_callbacks["R"]
if data[1] == 3:
self._read_rotation = not self._read_rotation
| 6,885 | Python | 39.034883 | 115 | 0.568482 |
fnuabhimanyu8713/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/devices/spacemouse/__init__.py | # Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""Spacemouse device for SE(2) and SE(3) control."""
from .se2_spacemouse import Se2SpaceMouse
from .se3_spacemouse import Se3SpaceMouse
| 261 | Python | 25.199997 | 56 | 0.758621 |
fnuabhimanyu8713/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/devices/spacemouse/utils.py | # Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""Helper functions for SpaceMouse."""
# MIT License
#
# Copyright (c) 2022 Stanford Vision and Learning Lab and UT Robot Perception and Learning Lab
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
def convert_buffer(b1, b2):
"""Converts raw SpaceMouse readings to commands.
Args:
b1: 8-bit byte
b2: 8-bit byte
Returns:
Scaled value from Space-mouse message
"""
return _scale_to_control(_to_int16(b1, b2))
"""
Private methods.
"""
def _to_int16(y1, y2):
"""Convert two 8 bit bytes to a signed 16 bit integer.
Args:
y1: 8-bit byte
y2: 8-bit byte
Returns:
16-bit integer
"""
x = (y1) | (y2 << 8)
if x >= 32768:
x = -(65536 - x)
return x
def _scale_to_control(x, axis_scale=350.0, min_v=-1.0, max_v=1.0):
"""Normalize raw HID readings to target range.
Args:
x: Raw reading from HID
axis_scale: (Inverted) scaling factor for mapping raw input value
min_v: Minimum limit after scaling
max_v: Maximum limit after scaling
Returns:
Clipped, scaled input from HID
"""
x = x / axis_scale
return min(max(x, min_v), max_v)
| 2,326 | Python | 28.455696 | 94 | 0.686586 |
fnuabhimanyu8713/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/devices/spacemouse/se2_spacemouse.py | # Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""Spacemouse controller for SE(2) control."""
import hid
import numpy as np
import threading
import time
from collections.abc import Callable
from ..device_base import DeviceBase
from .utils import convert_buffer
class Se2SpaceMouse(DeviceBase):
r"""A space-mouse controller for sending SE(2) commands as delta poses.
This class implements a space-mouse controller to provide commands to mobile base.
It uses the `HID-API`_ which interfaces with USD and Bluetooth HID-class devices across multiple platforms.
The command comprises of the base linear and angular velocity: :math:`(v_x, v_y, \omega_z)`.
Note:
The interface finds and uses the first supported device connected to the computer.
Currently tested for following devices:
- SpaceMouse Compact: https://3dconnexion.com/de/product/spacemouse-compact/
.. _HID-API: https://github.com/libusb/hidapi
"""
def __init__(self, v_x_sensitivity: float = 0.8, v_y_sensitivity: float = 0.4, omega_z_sensitivity: float = 1.0):
"""Initialize the spacemouse layer.
Args:
v_x_sensitivity: Magnitude of linear velocity along x-direction scaling. Defaults to 0.8.
v_y_sensitivity: Magnitude of linear velocity along y-direction scaling. Defaults to 0.4.
omega_z_sensitivity: Magnitude of angular velocity along z-direction scaling. Defaults to 1.0.
"""
# store inputs
self.v_x_sensitivity = v_x_sensitivity
self.v_y_sensitivity = v_y_sensitivity
self.omega_z_sensitivity = omega_z_sensitivity
# acquire device interface
self._device = hid.device()
self._find_device()
# command buffers
self._base_command = np.zeros(3)
# dictionary for additional callbacks
self._additional_callbacks = dict()
# run a thread for listening to device updates
self._thread = threading.Thread(target=self._run_device)
self._thread.daemon = True
self._thread.start()
def __del__(self):
"""Destructor for the class."""
self._thread.join()
def __str__(self) -> str:
"""Returns: A string containing the information of joystick."""
msg = f"Spacemouse Controller for SE(2): {self.__class__.__name__}\n"
msg += f"\tManufacturer: {self._device.get_manufacturer_string()}\n"
msg += f"\tProduct: {self._device.get_product_string()}\n"
msg += "\t----------------------------------------------\n"
msg += "\tRight button: reset command\n"
msg += "\tMove mouse laterally: move base horizontally in x-y plane\n"
msg += "\tTwist mouse about z-axis: yaw base about a corresponding axis"
return msg
"""
Operations
"""
def reset(self):
# default flags
self._base_command.fill(0.0)
def add_callback(self, key: str, func: Callable):
# check keys supported by callback
if key not in ["L", "R"]:
raise ValueError(f"Only left (L) and right (R) buttons supported. Provided: {key}.")
# TODO: Improve this to allow multiple buttons on same key.
self._additional_callbacks[key] = func
def advance(self) -> np.ndarray:
"""Provides the result from spacemouse event state.
Returns:
A 3D array containing the linear (x,y) and angular velocity (z).
"""
return self._base_command
"""
Internal helpers.
"""
def _find_device(self):
"""Find the device connected to computer."""
found = False
# implement a timeout for device search
for _ in range(5):
for device in hid.enumerate():
if device["product_string"] == "SpaceMouse Compact":
# set found flag
found = True
vendor_id = device["vendor_id"]
product_id = device["product_id"]
# connect to the device
self._device.open(vendor_id, product_id)
# check if device found
if not found:
time.sleep(1.0)
else:
break
# no device found: return false
if not found:
raise OSError("No device found by SpaceMouse. Is the device connected?")
def _run_device(self):
"""Listener thread that keeps pulling new messages."""
# keep running
while True:
# read the device data
data = self._device.read(13)
if data is not None:
# readings from 6-DoF sensor
if data[0] == 1:
# along y-axis
self._base_command[1] = self.v_y_sensitivity * convert_buffer(data[1], data[2])
# along x-axis
self._base_command[0] = self.v_x_sensitivity * convert_buffer(data[3], data[4])
elif data[0] == 2:
# along z-axis
self._base_command[2] = self.omega_z_sensitivity * convert_buffer(data[3], data[4])
# readings from the side buttons
elif data[0] == 3:
# press left button
if data[1] == 1:
# additional callbacks
if "L" in self._additional_callbacks:
self._additional_callbacks["L"]
# right button is for reset
if data[1] == 2:
# reset layer
self.reset()
# additional callbacks
if "R" in self._additional_callbacks:
self._additional_callbacks["R"]
| 5,877 | Python | 36.92258 | 117 | 0.564914 |
fnuabhimanyu8713/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/devices/gamepad/se2_gamepad.py | # Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""Gamepad controller for SE(2) control."""
import numpy as np
import weakref
from collections.abc import Callable
import carb
import omni
from ..device_base import DeviceBase
class Se2Gamepad(DeviceBase):
r"""A gamepad controller for sending SE(2) commands as velocity commands.
This class is designed to provide a gamepad controller for mobile base (such as quadrupeds).
It uses the Omniverse gamepad interface to listen to gamepad events and map them to robot's
task-space commands.
The command comprises of the base linear and angular velocity: :math:`(v_x, v_y, \omega_z)`.
Key bindings:
====================== ========================= ========================
Command Key (+ve axis) Key (-ve axis)
====================== ========================= ========================
Move along x-axis left stick up left stick down
Move along y-axis left stick right left stick left
Rotate along z-axis right stick right right stick left
====================== ========================= ========================
.. seealso::
The official documentation for the gamepad interface: `Carb Gamepad Interface <https://docs.omniverse.nvidia.com/kit/docs/carbonite/latest/docs/python/carb.html#carb.input.Gamepad>`__.
"""
def __init__(
self,
v_x_sensitivity: float = 1.0,
v_y_sensitivity: float = 1.0,
omega_z_sensitivity: float = 1.0,
dead_zone: float = 0.01,
):
"""Initialize the gamepad layer.
Args:
v_x_sensitivity: Magnitude of linear velocity along x-direction scaling. Defaults to 1.0.
v_y_sensitivity: Magnitude of linear velocity along y-direction scaling. Defaults to 1.0.
omega_z_sensitivity: Magnitude of angular velocity along z-direction scaling. Defaults to 1.0.
dead_zone: Magnitude of dead zone for gamepad. An event value from the gamepad less than
this value will be ignored. Defaults to 0.01.
"""
# turn off simulator gamepad control
carb_settings_iface = carb.settings.get_settings()
carb_settings_iface.set_bool("/persistent/app/omniverse/gamepadCameraControl", False)
# store inputs
self.v_x_sensitivity = v_x_sensitivity
self.v_y_sensitivity = v_y_sensitivity
self.omega_z_sensitivity = omega_z_sensitivity
self.dead_zone = dead_zone
# acquire omniverse interfaces
self._appwindow = omni.appwindow.get_default_app_window()
self._input = carb.input.acquire_input_interface()
self._gamepad = self._appwindow.get_gamepad(0)
# note: Use weakref on callbacks to ensure that this object can be deleted when its destructor is called
self._gamepad_sub = self._input.subscribe_to_gamepad_events(
self._gamepad,
lambda event, *args, obj=weakref.proxy(self): obj._on_gamepad_event(event, *args),
)
# bindings for gamepad to command
self._create_key_bindings()
# command buffers
# When using the gamepad, two values are provided for each axis.
# For example: when the left stick is moved down, there are two evens: `left_stick_down = 0.8`
# and `left_stick_up = 0.0`. If only the value of left_stick_up is used, the value will be 0.0,
# which is not the desired behavior. Therefore, we save both the values into the buffer and use
# the maximum value.
# (positive, negative), (x, y, yaw)
self._base_command_raw = np.zeros([2, 3])
# dictionary for additional callbacks
self._additional_callbacks = dict()
def __del__(self):
"""Unsubscribe from gamepad events."""
self._input.unsubscribe_from_gamepad_events(self._gamepad, self._gamepad_sub)
self._gamepad_sub = None
def __str__(self) -> str:
"""Returns: A string containing the information of joystick."""
msg = f"Gamepad Controller for SE(2): {self.__class__.__name__}\n"
msg += f"\tDevice name: {self._input.get_gamepad_name(self._gamepad)}\n"
msg += "\t----------------------------------------------\n"
msg += "\tMove in X-Y plane: left stick\n"
msg += "\tRotate in Z-axis: right stick\n"
return msg
"""
Operations
"""
def reset(self):
# default flags
self._base_command_raw.fill(0.0)
def add_callback(self, key: carb.input.GamepadInput, func: Callable):
"""Add additional functions to bind gamepad.
A list of available gamepad keys are present in the
`carb documentation <https://docs.omniverse.nvidia.com/kit/docs/carbonite/latest/docs/python/carb.html#carb.input.GamepadInput>`__.
Args:
key: The gamepad button to check against.
func: The function to call when key is pressed. The callback function should not
take any arguments.
"""
self._additional_callbacks[key] = func
def advance(self) -> np.ndarray:
"""Provides the result from gamepad event state.
Returns:
A 3D array containing the linear (x,y) and angular velocity (z).
"""
return self._resolve_command_buffer(self._base_command_raw)
"""
Internal helpers.
"""
def _on_gamepad_event(self, event: carb.input.GamepadEvent, *args, **kwargs):
"""Subscriber callback to when kit is updated.
Reference:
https://docs.omniverse.nvidia.com/kit/docs/carbonite/latest/docs/python/carb.html?highlight=gamepadeventtype#carb.input.GamepadInput
"""
# check if the event is a button press
cur_val = event.value
if abs(cur_val) < self.dead_zone:
cur_val = 0
# -- left and right stick
if event.input in self._INPUT_STICK_VALUE_MAPPING:
direction, axis, value = self._INPUT_STICK_VALUE_MAPPING[event.input]
# change the value only if the stick is moved (soft press)
self._base_command_raw[direction, axis] = value * cur_val
# additional callbacks
if event.input in self._additional_callbacks:
self._additional_callbacks[event.input]()
# since no error, we are fine :)
return True
def _create_key_bindings(self):
"""Creates default key binding."""
self._INPUT_STICK_VALUE_MAPPING = {
# forward command
carb.input.GamepadInput.LEFT_STICK_UP: (0, 0, self.v_x_sensitivity),
# backward command
carb.input.GamepadInput.LEFT_STICK_DOWN: (1, 0, self.v_x_sensitivity),
# right command
carb.input.GamepadInput.LEFT_STICK_RIGHT: (0, 1, self.v_y_sensitivity),
# left command
carb.input.GamepadInput.LEFT_STICK_LEFT: (1, 1, self.v_y_sensitivity),
# yaw command (positive)
carb.input.GamepadInput.RIGHT_STICK_RIGHT: (0, 2, self.omega_z_sensitivity),
# yaw command (negative)
carb.input.GamepadInput.RIGHT_STICK_LEFT: (1, 2, self.omega_z_sensitivity),
}
def _resolve_command_buffer(self, raw_command: np.ndarray) -> np.ndarray:
"""Resolves the command buffer.
Args:
raw_command: The raw command from the gamepad. Shape is (2, 3)
This is a 2D array since gamepad dpad/stick returns two values corresponding to
the positive and negative direction. The first index is the direction (0: positive, 1: negative)
and the second index is value (absolute) of the command.
Returns:
Resolved command. Shape is (3,)
"""
# compare the positive and negative value decide the sign of the value
# if the positive value is larger, the sign is positive (i.e. False, 0)
# if the negative value is larger, the sign is positive (i.e. True, 1)
command_sign = raw_command[1, :] > raw_command[0, :]
# extract the command value
command = raw_command.max(axis=0)
# apply the sign
# if the sign is positive, the value is already positive.
# if the sign is negative, the value is negative after applying the sign.
command[command_sign] *= -1
return command
| 8,527 | Python | 41.64 | 192 | 0.604433 |
fnuabhimanyu8713/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/devices/gamepad/se3_gamepad.py | # Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""Gamepad controller for SE(3) control."""
import numpy as np
import weakref
from collections.abc import Callable
from scipy.spatial.transform.rotation import Rotation
import carb
import omni
from ..device_base import DeviceBase
class Se3Gamepad(DeviceBase):
"""A gamepad controller for sending SE(3) commands as delta poses and binary command (open/close).
This class is designed to provide a gamepad controller for a robotic arm with a gripper.
It uses the gamepad interface to listen to gamepad events and map them to the robot's
task-space commands.
The command comprises of two parts:
* delta pose: a 6D vector of (x, y, z, roll, pitch, yaw) in meters and radians.
* gripper: a binary command to open or close the gripper.
Stick and Button bindings:
============================ ========================= =========================
Description Stick/Button (+ve axis) Stick/Button (-ve axis)
============================ ========================= =========================
Toggle gripper(open/close) X Button X Button
Move along x-axis Left Stick Up Left Stick Down
Move along y-axis Left Stick Left Left Stick Right
Move along z-axis Right Stick Up Right Stick Down
Rotate along x-axis D-Pad Left D-Pad Right
Rotate along y-axis D-Pad Down D-Pad Up
Rotate along z-axis Right Stick Left Right Stick Right
============================ ========================= =========================
.. seealso::
The official documentation for the gamepad interface: `Carb Gamepad Interface <https://docs.omniverse.nvidia.com/kit/docs/carbonite/latest/docs/python/carb.html#carb.input.Gamepad>`__.
"""
def __init__(self, pos_sensitivity: float = 1.0, rot_sensitivity: float = 1.6, dead_zone: float = 0.01):
"""Initialize the gamepad layer.
Args:
pos_sensitivity: Magnitude of input position command scaling. Defaults to 1.0.
rot_sensitivity: Magnitude of scale input rotation commands scaling. Defaults to 1.6.
dead_zone: Magnitude of dead zone for gamepad. An event value from the gamepad less than
this value will be ignored. Defaults to 0.01.
"""
# turn off simulator gamepad control
carb_settings_iface = carb.settings.get_settings()
carb_settings_iface.set_bool("/persistent/app/omniverse/gamepadCameraControl", False)
# store inputs
self.pos_sensitivity = pos_sensitivity
self.rot_sensitivity = rot_sensitivity
self.dead_zone = dead_zone
# acquire omniverse interfaces
self._appwindow = omni.appwindow.get_default_app_window()
self._input = carb.input.acquire_input_interface()
self._gamepad = self._appwindow.get_gamepad(0)
# note: Use weakref on callbacks to ensure that this object can be deleted when its destructor is called
self._gamepad_sub = self._input.subscribe_to_gamepad_events(
self._gamepad,
lambda event, *args, obj=weakref.proxy(self): obj._on_gamepad_event(event, *args),
)
# bindings for gamepad to command
self._create_key_bindings()
# command buffers
self._close_gripper = False
# When using the gamepad, two values are provided for each axis.
# For example: when the left stick is moved down, there are two evens: `left_stick_down = 0.8`
# and `left_stick_up = 0.0`. If only the value of left_stick_up is used, the value will be 0.0,
# which is not the desired behavior. Therefore, we save both the values into the buffer and use
# the maximum value.
# (positive, negative), (x, y, z, roll, pitch, yaw)
self._delta_pose_raw = np.zeros([2, 6])
# dictionary for additional callbacks
self._additional_callbacks = dict()
def __del__(self):
"""Unsubscribe from gamepad events."""
self._input.unsubscribe_from_gamepad_events(self._gamepad, self._gamepad_sub)
self._gamepad_sub = None
def __str__(self) -> str:
"""Returns: A string containing the information of joystick."""
msg = f"Gamepad Controller for SE(3): {self.__class__.__name__}\n"
msg += f"\tDevice name: {self._input.get_gamepad_name(self._gamepad)}\n"
msg += "\t----------------------------------------------\n"
msg += "\tToggle gripper (open/close): X\n"
msg += "\tMove arm along x-axis: Left Stick Up/Down\n"
msg += "\tMove arm along y-axis: Left Stick Left/Right\n"
msg += "\tMove arm along z-axis: Right Stick Up/Down\n"
msg += "\tRotate arm along x-axis: D-Pad Right/Left\n"
msg += "\tRotate arm along y-axis: D-Pad Down/Up\n"
msg += "\tRotate arm along z-axis: Right Stick Left/Right\n"
return msg
"""
Operations
"""
def reset(self):
# default flags
self._close_gripper = False
self._delta_pose_raw.fill(0.0)
def add_callback(self, key: carb.input.GamepadInput, func: Callable):
"""Add additional functions to bind gamepad.
A list of available gamepad keys are present in the
`carb documentation <https://docs.omniverse.nvidia.com/kit/docs/carbonite/latest/docs/python/carb.html?highlight=gamepadeventtype#carb.input.GamepadInput>`__.
Args:
key: The gamepad button to check against.
func: The function to call when key is pressed. The callback function should not
take any arguments.
"""
self._additional_callbacks[key] = func
def advance(self) -> tuple[np.ndarray, bool]:
"""Provides the result from gamepad event state.
Returns:
A tuple containing the delta pose command and gripper commands.
"""
# -- resolve position command
delta_pos = self._resolve_command_buffer(self._delta_pose_raw[:, :3])
# -- resolve rotation command
delta_rot = self._resolve_command_buffer(self._delta_pose_raw[:, 3:])
# -- convert to rotation vector
rot_vec = Rotation.from_euler("XYZ", delta_rot).as_rotvec()
# return the command and gripper state
return np.concatenate([delta_pos, rot_vec]), self._close_gripper
"""
Internal helpers.
"""
def _on_gamepad_event(self, event, *args, **kwargs):
"""Subscriber callback to when kit is updated.
Reference:
https://docs.omniverse.nvidia.com/kit/docs/carbonite/latest/docs/python/carb.html?highlight=gamepadeventtype#carb.input.Gamepad
"""
# check if the event is a button press
cur_val = event.value
if abs(cur_val) < self.dead_zone:
cur_val = 0
# -- button
if event.input == carb.input.GamepadInput.X:
# toggle gripper based on the button pressed
if cur_val > 0.5:
self._close_gripper = not self._close_gripper
# -- left and right stick
if event.input in self._INPUT_STICK_VALUE_MAPPING:
direction, axis, value = self._INPUT_STICK_VALUE_MAPPING[event.input]
# change the value only if the stick is moved (soft press)
self._delta_pose_raw[direction, axis] = value * cur_val
# -- dpad (4 arrow buttons on the console)
if event.input in self._INPUT_DPAD_VALUE_MAPPING:
direction, axis, value = self._INPUT_DPAD_VALUE_MAPPING[event.input]
# change the value only if button is pressed on the DPAD
if cur_val > 0.5:
self._delta_pose_raw[direction, axis] = value
self._delta_pose_raw[1 - direction, axis] = 0
else:
self._delta_pose_raw[:, axis] = 0
# additional callbacks
if event.input in self._additional_callbacks:
self._additional_callbacks[event.input]()
# since no error, we are fine :)
return True
def _create_key_bindings(self):
"""Creates default key binding."""
# map gamepad input to the element in self._delta_pose_raw
# the first index is the direction (0: positive, 1: negative)
# the second index is the axis (0: x, 1: y, 2: z, 3: roll, 4: pitch, 5: yaw)
# the third index is the sensitivity of the command
self._INPUT_STICK_VALUE_MAPPING = {
# forward command
carb.input.GamepadInput.LEFT_STICK_UP: (0, 0, self.pos_sensitivity),
# backward command
carb.input.GamepadInput.LEFT_STICK_DOWN: (1, 0, self.pos_sensitivity),
# right command
carb.input.GamepadInput.LEFT_STICK_RIGHT: (0, 1, self.pos_sensitivity),
# left command
carb.input.GamepadInput.LEFT_STICK_LEFT: (1, 1, self.pos_sensitivity),
# upward command
carb.input.GamepadInput.RIGHT_STICK_UP: (0, 2, self.pos_sensitivity),
# downward command
carb.input.GamepadInput.RIGHT_STICK_DOWN: (1, 2, self.pos_sensitivity),
# yaw command (positive)
carb.input.GamepadInput.RIGHT_STICK_RIGHT: (0, 5, self.rot_sensitivity),
# yaw command (negative)
carb.input.GamepadInput.RIGHT_STICK_LEFT: (1, 5, self.rot_sensitivity),
}
self._INPUT_DPAD_VALUE_MAPPING = {
# pitch command (positive)
carb.input.GamepadInput.DPAD_UP: (1, 4, self.rot_sensitivity * 0.8),
# pitch command (negative)
carb.input.GamepadInput.DPAD_DOWN: (0, 4, self.rot_sensitivity * 0.8),
# roll command (positive)
carb.input.GamepadInput.DPAD_RIGHT: (1, 3, self.rot_sensitivity * 0.8),
# roll command (negative)
carb.input.GamepadInput.DPAD_LEFT: (0, 3, self.rot_sensitivity * 0.8),
}
def _resolve_command_buffer(self, raw_command: np.ndarray) -> np.ndarray:
"""Resolves the command buffer.
Args:
raw_command: The raw command from the gamepad. Shape is (2, 3)
This is a 2D array since gamepad dpad/stick returns two values corresponding to
the positive and negative direction. The first index is the direction (0: positive, 1: negative)
and the second index is value (absolute) of the command.
Returns:
Resolved command. Shape is (3,)
"""
# compare the positive and negative value decide the sign of the value
# if the positive value is larger, the sign is positive (i.e. False, 0)
# if the negative value is larger, the sign is positive (i.e. True, 1)
delta_command_sign = raw_command[1, :] > raw_command[0, :]
# extract the command value
delta_command = raw_command.max(axis=0)
# apply the sign
# if the sign is positive, the value is already positive.
# if the sign is negative, the value is negative after applying the sign.
delta_command[delta_command_sign] *= -1
return delta_command
| 11,390 | Python | 45.493877 | 192 | 0.599034 |
fnuabhimanyu8713/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/devices/gamepad/__init__.py | # Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""Gamepad device for SE(2) and SE(3) control."""
from .se2_gamepad import Se2Gamepad
from .se3_gamepad import Se3Gamepad
| 246 | Python | 23.699998 | 56 | 0.743902 |
fnuabhimanyu8713/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/devices/keyboard/__init__.py | # Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""Keyboard device for SE(2) and SE(3) control."""
from .se2_keyboard import Se2Keyboard
from .se3_keyboard import Se3Keyboard
| 251 | Python | 24.199998 | 56 | 0.749004 |
fnuabhimanyu8713/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/devices/keyboard/se2_keyboard.py | # Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""Keyboard controller for SE(2) control."""
import numpy as np
import weakref
from collections.abc import Callable
import carb
import omni
from ..device_base import DeviceBase
class Se2Keyboard(DeviceBase):
r"""A keyboard controller for sending SE(2) commands as velocity commands.
This class is designed to provide a keyboard controller for mobile base (such as quadrupeds).
It uses the Omniverse keyboard interface to listen to keyboard events and map them to robot's
task-space commands.
The command comprises of the base linear and angular velocity: :math:`(v_x, v_y, \omega_z)`.
Key bindings:
====================== ========================= ========================
Command Key (+ve axis) Key (-ve axis)
====================== ========================= ========================
Move along x-axis Numpad 8 / Arrow Up Numpad 2 / Arrow Down
Move along y-axis Numpad 4 / Arrow Right Numpad 6 / Arrow Left
Rotate along z-axis Numpad 7 / X Numpad 9 / Y
====================== ========================= ========================
.. seealso::
The official documentation for the keyboard interface: `Carb Keyboard Interface <https://docs.omniverse.nvidia.com/kit/docs/carbonite/latest/docs/python/carb.html#carb.input.Keyboard>`__.
"""
def __init__(self, v_x_sensitivity: float = 0.8, v_y_sensitivity: float = 0.4, omega_z_sensitivity: float = 1.0):
"""Initialize the keyboard layer.
Args:
v_x_sensitivity: Magnitude of linear velocity along x-direction scaling. Defaults to 0.8.
v_y_sensitivity: Magnitude of linear velocity along y-direction scaling. Defaults to 0.4.
omega_z_sensitivity: Magnitude of angular velocity along z-direction scaling. Defaults to 1.0.
"""
# store inputs
self.v_x_sensitivity = v_x_sensitivity
self.v_y_sensitivity = v_y_sensitivity
self.omega_z_sensitivity = omega_z_sensitivity
# acquire omniverse interfaces
self._appwindow = omni.appwindow.get_default_app_window()
self._input = carb.input.acquire_input_interface()
self._keyboard = self._appwindow.get_keyboard()
# note: Use weakref on callbacks to ensure that this object can be deleted when its destructor is called
self._keyboard_sub = self._input.subscribe_to_keyboard_events(
self._keyboard,
lambda event, *args, obj=weakref.proxy(self): obj._on_keyboard_event(event, *args),
)
# bindings for keyboard to command
self._create_key_bindings()
# command buffers
self._base_command = np.zeros(3)
# dictionary for additional callbacks
self._additional_callbacks = dict()
def __del__(self):
"""Release the keyboard interface."""
self._input.unsubscribe_from_keyboard_events(self._keyboard, self._keyboard_sub)
self._keyboard_sub = None
def __str__(self) -> str:
"""Returns: A string containing the information of joystick."""
msg = f"Keyboard Controller for SE(2): {self.__class__.__name__}\n"
msg += f"\tKeyboard name: {self._input.get_keyboard_name(self._keyboard)}\n"
msg += "\t----------------------------------------------\n"
msg += "\tReset all commands: L\n"
msg += "\tMove forward (along x-axis): Numpad 8 / Arrow Up\n"
msg += "\tMove backward (along x-axis): Numpad 2 / Arrow Down\n"
msg += "\tMove right (along y-axis): Numpad 4 / Arrow Right\n"
msg += "\tMove left (along y-axis): Numpad 6 / Arrow Left\n"
msg += "\tYaw positively (along z-axis): Numpad 7 / X\n"
msg += "\tYaw negatively (along z-axis): Numpad 9 / Y"
return msg
"""
Operations
"""
def reset(self):
# default flags
self._base_command.fill(0.0)
def add_callback(self, key: str, func: Callable):
"""Add additional functions to bind keyboard.
A list of available keys are present in the
`carb documentation <https://docs.omniverse.nvidia.com/kit/docs/carbonite/latest/docs/python/carb.html?highlight=keyboardeventtype#carb.input.KeyboardInput>`__.
Args:
key: The keyboard button to check against.
func: The function to call when key is pressed. The callback function should not
take any arguments.
"""
self._additional_callbacks[key] = func
def advance(self) -> np.ndarray:
"""Provides the result from keyboard event state.
Returns:
3D array containing the linear (x,y) and angular velocity (z).
"""
return self._base_command
"""
Internal helpers.
"""
def _on_keyboard_event(self, event, *args, **kwargs):
"""Subscriber callback to when kit is updated.
Reference:
https://docs.omniverse.nvidia.com/kit/docs/carbonite/latest/docs/python/carb.html?highlight=keyboardeventtype#carb.input.KeyboardInput
"""
# apply the command when pressed
if event.type == carb.input.KeyboardEventType.KEY_PRESS:
if event.input.name == "L":
self.reset()
elif event.input.name in self._INPUT_KEY_MAPPING:
self._base_command += self._INPUT_KEY_MAPPING[event.input.name]
# remove the command when un-pressed
if event.type == carb.input.KeyboardEventType.KEY_RELEASE:
if event.input.name in self._INPUT_KEY_MAPPING:
self._base_command -= self._INPUT_KEY_MAPPING[event.input.name]
# additional callbacks
if event.type == carb.input.KeyboardEventType.KEY_PRESS:
if event.input.name in self._additional_callbacks:
self._additional_callbacks[event.input.name]()
# since no error, we are fine :)
return True
def _create_key_bindings(self):
"""Creates default key binding."""
self._INPUT_KEY_MAPPING = {
# forward command
"NUMPAD_8": np.asarray([1.0, 0.0, 0.0]) * self.v_x_sensitivity,
"UP": np.asarray([1.0, 0.0, 0.0]) * self.v_x_sensitivity,
# back command
"NUMPAD_2": np.asarray([-1.0, 0.0, 0.0]) * self.v_x_sensitivity,
"DOWN": np.asarray([-1.0, 0.0, 0.0]) * self.v_x_sensitivity,
# right command
"NUMPAD_4": np.asarray([0.0, 1.0, 0.0]) * self.v_y_sensitivity,
"LEFT": np.asarray([0.0, 1.0, 0.0]) * self.v_y_sensitivity,
# left command
"NUMPAD_6": np.asarray([0.0, -1.0, 0.0]) * self.v_y_sensitivity,
"RIGHT": np.asarray([0.0, -1.0, 0.0]) * self.v_y_sensitivity,
# yaw command (positive)
"NUMPAD_7": np.asarray([0.0, 0.0, 1.0]) * self.omega_z_sensitivity,
"X": np.asarray([0.0, 0.0, 1.0]) * self.omega_z_sensitivity,
# yaw command (negative)
"NUMPAD_9": np.asarray([0.0, 0.0, -1.0]) * self.omega_z_sensitivity,
"Z": np.asarray([0.0, 0.0, -1.0]) * self.omega_z_sensitivity,
}
| 7,318 | Python | 42.565476 | 195 | 0.583629 |
fnuabhimanyu8713/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/devices/keyboard/se3_keyboard.py | # Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""Keyboard controller for SE(3) control."""
import numpy as np
import weakref
from collections.abc import Callable
from scipy.spatial.transform.rotation import Rotation
import carb
import omni
from ..device_base import DeviceBase
class Se3Keyboard(DeviceBase):
"""A keyboard controller for sending SE(3) commands as delta poses and binary command (open/close).
This class is designed to provide a keyboard controller for a robotic arm with a gripper.
It uses the Omniverse keyboard interface to listen to keyboard events and map them to robot's
task-space commands.
The command comprises of two parts:
* delta pose: a 6D vector of (x, y, z, roll, pitch, yaw) in meters and radians.
* gripper: a binary command to open or close the gripper.
Key bindings:
============================== ================= =================
Description Key (+ve axis) Key (-ve axis)
============================== ================= =================
Toggle gripper (open/close) K
Move along x-axis W S
Move along y-axis A D
Move along z-axis Q E
Rotate along x-axis Z X
Rotate along y-axis T G
Rotate along z-axis C V
============================== ================= =================
.. seealso::
The official documentation for the keyboard interface: `Carb Keyboard Interface <https://docs.omniverse.nvidia.com/kit/docs/carbonite/latest/docs/python/carb.html#carb.input.Keyboard>`__.
"""
def __init__(self, pos_sensitivity: float = 0.4, rot_sensitivity: float = 0.8):
"""Initialize the keyboard layer.
Args:
pos_sensitivity: Magnitude of input position command scaling. Defaults to 0.05.
rot_sensitivity: Magnitude of scale input rotation commands scaling. Defaults to 0.5.
"""
# store inputs
self.pos_sensitivity = pos_sensitivity
self.rot_sensitivity = rot_sensitivity
# acquire omniverse interfaces
self._appwindow = omni.appwindow.get_default_app_window()
self._input = carb.input.acquire_input_interface()
self._keyboard = self._appwindow.get_keyboard()
# note: Use weakref on callbacks to ensure that this object can be deleted when its destructor is called.
self._keyboard_sub = self._input.subscribe_to_keyboard_events(
self._keyboard,
lambda event, *args, obj=weakref.proxy(self): obj._on_keyboard_event(event, *args),
)
# bindings for keyboard to command
self._create_key_bindings()
# command buffers
self._close_gripper = False
self._delta_pos = np.zeros(3) # (x, y, z)
self._delta_rot = np.zeros(3) # (roll, pitch, yaw)
# dictionary for additional callbacks
self._additional_callbacks = dict()
def __del__(self):
"""Release the keyboard interface."""
self._input.unsubscribe_from_keyboard_events(self._keyboard, self._keyboard_sub)
self._keyboard_sub = None
def __str__(self) -> str:
"""Returns: A string containing the information of joystick."""
msg = f"Keyboard Controller for SE(3): {self.__class__.__name__}\n"
msg += f"\tKeyboard name: {self._input.get_keyboard_name(self._keyboard)}\n"
msg += "\t----------------------------------------------\n"
msg += "\tToggle gripper (open/close): K\n"
msg += "\tMove arm along x-axis: W/S\n"
msg += "\tMove arm along y-axis: A/D\n"
msg += "\tMove arm along z-axis: Q/E\n"
msg += "\tRotate arm along x-axis: Z/X\n"
msg += "\tRotate arm along y-axis: T/G\n"
msg += "\tRotate arm along z-axis: C/V"
return msg
"""
Operations
"""
def reset(self):
# default flags
self._close_gripper = False
self._delta_pos = np.zeros(3) # (x, y, z)
self._delta_rot = np.zeros(3) # (roll, pitch, yaw)
def add_callback(self, key: str, func: Callable):
"""Add additional functions to bind keyboard.
A list of available keys are present in the
`carb documentation <https://docs.omniverse.nvidia.com/kit/docs/carbonite/latest/docs/python/carb.html?highlight=keyboardeventtype#carb.input.KeyboardInput>`__.
Args:
key: The keyboard button to check against.
func: The function to call when key is pressed. The callback function should not
take any arguments.
"""
self._additional_callbacks[key] = func
def advance(self) -> tuple[np.ndarray, bool]:
"""Provides the result from keyboard event state.
Returns:
A tuple containing the delta pose command and gripper commands.
"""
# convert to rotation vector
rot_vec = Rotation.from_euler("XYZ", self._delta_rot).as_rotvec()
# return the command and gripper state
return np.concatenate([self._delta_pos, rot_vec]), self._close_gripper
"""
Internal helpers.
"""
def _on_keyboard_event(self, event, *args, **kwargs):
"""Subscriber callback to when kit is updated.
Reference:
https://docs.omniverse.nvidia.com/kit/docs/carbonite/latest/docs/python/carb.html?highlight=keyboardeventtype#carb.input.KeyboardInput
"""
# apply the command when pressed
if event.type == carb.input.KeyboardEventType.KEY_PRESS:
if event.input.name == "L":
self.reset()
if event.input.name == "K":
self._close_gripper = not self._close_gripper
elif event.input.name in ["W", "S", "A", "D", "Q", "E"]:
self._delta_pos += self._INPUT_KEY_MAPPING[event.input.name]
elif event.input.name in ["Z", "X", "T", "G", "C", "V"]:
self._delta_rot += self._INPUT_KEY_MAPPING[event.input.name]
# remove the command when un-pressed
if event.type == carb.input.KeyboardEventType.KEY_RELEASE:
if event.input.name in ["W", "S", "A", "D", "Q", "E"]:
self._delta_pos -= self._INPUT_KEY_MAPPING[event.input.name]
elif event.input.name in ["Z", "X", "T", "G", "C", "V"]:
self._delta_rot -= self._INPUT_KEY_MAPPING[event.input.name]
# additional callbacks
if event.type == carb.input.KeyboardEventType.KEY_PRESS:
if event.input.name in self._additional_callbacks:
self._additional_callbacks[event.input.name]()
# since no error, we are fine :)
return True
def _create_key_bindings(self):
"""Creates default key binding."""
self._INPUT_KEY_MAPPING = {
# toggle: gripper command
"K": True,
# x-axis (forward)
"W": np.asarray([1.0, 0.0, 0.0]) * self.pos_sensitivity,
"S": np.asarray([-1.0, 0.0, 0.0]) * self.pos_sensitivity,
# y-axis (right-left)
"D": np.asarray([0.0, 1.0, 0.0]) * self.pos_sensitivity,
"A": np.asarray([0.0, -1.0, 0.0]) * self.pos_sensitivity,
# z-axis (up-down)
"Q": np.asarray([0.0, 0.0, 1.0]) * self.pos_sensitivity,
"E": np.asarray([0.0, 0.0, -1.0]) * self.pos_sensitivity,
# roll (around x-axis)
"Z": np.asarray([1.0, 0.0, 0.0]) * self.rot_sensitivity,
"X": np.asarray([-1.0, 0.0, 0.0]) * self.rot_sensitivity,
# pitch (around y-axis)
"T": np.asarray([0.0, 1.0, 0.0]) * self.rot_sensitivity,
"G": np.asarray([0.0, -1.0, 0.0]) * self.rot_sensitivity,
# yaw (around z-axis)
"C": np.asarray([0.0, 0.0, 1.0]) * self.rot_sensitivity,
"V": np.asarray([0.0, 0.0, -1.0]) * self.rot_sensitivity,
}
| 8,140 | Python | 42.074074 | 195 | 0.561057 |
fnuabhimanyu8713/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/sensors/sensor_base_cfg.py | # Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
from dataclasses import MISSING
from omni.isaac.orbit.utils import configclass
from .sensor_base import SensorBase
@configclass
class SensorBaseCfg:
"""Configuration parameters for a sensor."""
class_type: type[SensorBase] = MISSING
"""The associated sensor class.
The class should inherit from :class:`omni.isaac.orbit.sensors.sensor_base.SensorBase`.
"""
prim_path: str = MISSING
"""Prim path (or expression) to the sensor.
.. note::
The expression can contain the environment namespace regex ``{ENV_REGEX_NS}`` which
will be replaced with the environment namespace.
Example: ``{ENV_REGEX_NS}/Robot/sensor`` will be replaced with ``/World/envs/env_.*/Robot/sensor``.
"""
update_period: float = 0.0
"""Update period of the sensor buffers (in seconds). Defaults to 0.0 (update every step)."""
history_length: int = 0
"""Number of past frames to store in the sensor buffers. Defaults to 0, which means that only
the current data is stored (no history)."""
debug_vis: bool = False
"""Whether to visualize the sensor. Defaults to False."""
| 1,261 | Python | 28.348837 | 107 | 0.689136 |
fnuabhimanyu8713/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/sensors/__init__.py | # Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""Sub-package containing various sensor classes implementations.
This subpackage contains the sensor classes that are compatible with Isaac Sim. We include both
USD-based and custom sensors:
* **USD-prim sensors**: Available in Omniverse and require creating a USD prim for them.
For instance, RTX ray tracing camera and lidar sensors.
* **USD-schema sensors**: Available in Omniverse and require creating a USD schema on an existing prim.
For instance, contact sensors and frame transformers.
* **Custom sensors**: Implemented in Python and do not require creating any USD prim or schema.
For instance, warp-based ray-casters.
Due to the above categorization, the prim paths passed to the sensor's configuration class
are interpreted differently based on the sensor type. The following table summarizes the
interpretation of the prim paths for different sensor types:
+---------------------+---------------------------+---------------------------------------------------------------+
| Sensor Type | Example Prim Path | Pre-check |
+=====================+===========================+===============================================================+
| Camera | /World/robot/base/camera | Leaf is available, and it will spawn a USD camera |
+---------------------+---------------------------+---------------------------------------------------------------+
| Contact Sensor | /World/robot/feet_* | Leaf is available and checks if the schema exists |
+---------------------+---------------------------+---------------------------------------------------------------+
| Ray Caster | /World/robot/base | Leaf exists and is a physics body (Articulation / Rigid Body) |
+---------------------+---------------------------+---------------------------------------------------------------+
| Frame Transformer | /World/robot/base | Leaf exists and is a physics body (Articulation / Rigid Body) |
+---------------------+---------------------------+---------------------------------------------------------------+
"""
from .camera import * # noqa: F401, F403
from .contact_sensor import * # noqa: F401, F403
from .frame_transformer import * # noqa: F401
from .ray_caster import * # noqa: F401, F403
from .sensor_base import SensorBase # noqa: F401
from .sensor_base_cfg import SensorBaseCfg # noqa: F401
| 2,577 | Python | 60.380951 | 115 | 0.510671 |
fnuabhimanyu8713/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/sensors/sensor_base.py | # Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""Base class for sensors.
This class defines an interface for sensors similar to how the :class:`omni.isaac.orbit.assets.AssetBase` class works.
Each sensor class should inherit from this class and implement the abstract methods.
"""
from __future__ import annotations
import inspect
import torch
import weakref
from abc import ABC, abstractmethod
from collections.abc import Sequence
from typing import TYPE_CHECKING, Any
import omni.kit.app
import omni.timeline
import omni.isaac.orbit.sim as sim_utils
if TYPE_CHECKING:
from .sensor_base_cfg import SensorBaseCfg
class SensorBase(ABC):
"""The base class for implementing a sensor.
The implementation is based on lazy evaluation. The sensor data is only updated when the user
tries accessing the data through the :attr:`data` property or sets ``force_compute=True`` in
the :meth:`update` method. This is done to avoid unnecessary computation when the sensor data
is not used.
The sensor is updated at the specified update period. If the update period is zero, then the
sensor is updated at every simulation step.
"""
def __init__(self, cfg: SensorBaseCfg):
"""Initialize the sensor class.
Args:
cfg: The configuration parameters for the sensor.
"""
# check that config is valid
if cfg.history_length < 0:
raise ValueError(f"History length must be greater than 0! Received: {cfg.history_length}")
# store inputs
self.cfg = cfg
# flag for whether the sensor is initialized
self._is_initialized = False
# flag for whether the sensor is in visualization mode
self._is_visualizing = False
# note: Use weakref on callbacks to ensure that this object can be deleted when its destructor is called.
# add callbacks for stage play/stop
# The order is set to 10 which is arbitrary but should be lower priority than the default order of 0
timeline_event_stream = omni.timeline.get_timeline_interface().get_timeline_event_stream()
self._initialize_handle = timeline_event_stream.create_subscription_to_pop_by_type(
int(omni.timeline.TimelineEventType.PLAY),
lambda event, obj=weakref.proxy(self): obj._initialize_callback(event),
order=10,
)
self._invalidate_initialize_handle = timeline_event_stream.create_subscription_to_pop_by_type(
int(omni.timeline.TimelineEventType.STOP),
lambda event, obj=weakref.proxy(self): obj._invalidate_initialize_callback(event),
order=10,
)
# add handle for debug visualization (this is set to a valid handle inside set_debug_vis)
self._debug_vis_handle = None
# set initial state of debug visualization
self.set_debug_vis(self.cfg.debug_vis)
def __del__(self):
"""Unsubscribe from the callbacks."""
# clear physics events handles
if self._initialize_handle:
self._initialize_handle.unsubscribe()
self._initialize_handle = None
if self._invalidate_initialize_handle:
self._invalidate_initialize_handle.unsubscribe()
self._invalidate_initialize_handle = None
# clear debug visualization
if self._debug_vis_handle:
self._debug_vis_handle.unsubscribe()
self._debug_vis_handle = None
"""
Properties
"""
@property
def num_instances(self) -> int:
"""Number of instances of the sensor.
This is equal to the number of sensors per environment multiplied by the number of environments.
"""
return self._num_envs
@property
def device(self) -> str:
"""Memory device for computation."""
return self._device
@property
@abstractmethod
def data(self) -> Any:
"""Data from the sensor.
This property is only updated when the user tries to access the data. This is done to avoid
unnecessary computation when the sensor data is not used.
For updating the sensor when this property is accessed, you can use the following
code snippet in your sensor implementation:
.. code-block:: python
# update sensors if needed
self._update_outdated_buffers()
# return the data (where `_data` is the data for the sensor)
return self._data
"""
raise NotImplementedError
@property
def has_debug_vis_implementation(self) -> bool:
"""Whether the sensor has a debug visualization implemented."""
# check if function raises NotImplementedError
source_code = inspect.getsource(self._set_debug_vis_impl)
return "NotImplementedError" not in source_code
"""
Operations
"""
def set_debug_vis(self, debug_vis: bool) -> bool:
"""Sets whether to visualize the sensor data.
Args:
debug_vis: Whether to visualize the sensor data.
Returns:
Whether the debug visualization was successfully set. False if the sensor
does not support debug visualization.
"""
# check if debug visualization is supported
if not self.has_debug_vis_implementation:
return False
# toggle debug visualization objects
self._set_debug_vis_impl(debug_vis)
# toggle debug visualization flag
self._is_visualizing = debug_vis
# toggle debug visualization handles
if debug_vis:
# create a subscriber for the post update event if it doesn't exist
if self._debug_vis_handle is None:
app_interface = omni.kit.app.get_app_interface()
self._debug_vis_handle = app_interface.get_post_update_event_stream().create_subscription_to_pop(
lambda event, obj=weakref.proxy(self): obj._debug_vis_callback(event)
)
else:
# remove the subscriber if it exists
if self._debug_vis_handle is not None:
self._debug_vis_handle.unsubscribe()
self._debug_vis_handle = None
# return success
return True
def reset(self, env_ids: Sequence[int] | None = None):
"""Resets the sensor internals.
Args:
env_ids: The sensor ids to reset. Defaults to None.
"""
# Resolve sensor ids
if env_ids is None:
env_ids = slice(None)
# Reset the timestamp for the sensors
self._timestamp[env_ids] = 0.0
self._timestamp_last_update[env_ids] = 0.0
# Set all reset sensors to outdated so that they are updated when data is called the next time.
self._is_outdated[env_ids] = True
def update(self, dt: float, force_recompute: bool = False):
# Update the timestamp for the sensors
self._timestamp += dt
self._is_outdated |= self._timestamp - self._timestamp_last_update + 1e-6 >= self.cfg.update_period
# Update the buffers
# TODO (from @mayank): Why is there a history length here when it doesn't mean anything in the sensor base?!?
# It is only for the contact sensor but there we should redefine the update function IMO.
if force_recompute or self._is_visualizing or (self.cfg.history_length > 0):
self._update_outdated_buffers()
"""
Implementation specific.
"""
@abstractmethod
def _initialize_impl(self):
"""Initializes the sensor-related handles and internal buffers."""
# Obtain Simulation Context
sim = sim_utils.SimulationContext.instance()
if sim is None:
raise RuntimeError("Simulation Context is not initialized!")
# Obtain device and backend
self._device = sim.device
self._backend = sim.backend
self._sim_physics_dt = sim.get_physics_dt()
# Count number of environments
env_prim_path_expr = self.cfg.prim_path.rsplit("/", 1)[0]
self._parent_prims = sim_utils.find_matching_prims(env_prim_path_expr)
self._num_envs = len(self._parent_prims)
# Boolean tensor indicating whether the sensor data has to be refreshed
self._is_outdated = torch.ones(self._num_envs, dtype=torch.bool, device=self._device)
# Current timestamp (in seconds)
self._timestamp = torch.zeros(self._num_envs, device=self._device)
# Timestamp from last update
self._timestamp_last_update = torch.zeros_like(self._timestamp)
@abstractmethod
def _update_buffers_impl(self, env_ids: Sequence[int]):
"""Fills the sensor data for provided environment ids.
This function does not perform any time-based checks and directly fills the data into the
data container.
Args:
env_ids: The indices of the sensors that are ready to capture.
"""
raise NotImplementedError
def _set_debug_vis_impl(self, debug_vis: bool):
"""Set debug visualization into visualization objects.
This function is responsible for creating the visualization objects if they don't exist
and input ``debug_vis`` is True. If the visualization objects exist, the function should
set their visibility into the stage.
"""
raise NotImplementedError(f"Debug visualization is not implemented for {self.__class__.__name__}.")
def _debug_vis_callback(self, event):
"""Callback for debug visualization.
This function calls the visualization objects and sets the data to visualize into them.
"""
raise NotImplementedError(f"Debug visualization is not implemented for {self.__class__.__name__}.")
"""
Internal simulation callbacks.
"""
def _initialize_callback(self, event):
"""Initializes the scene elements.
Note:
PhysX handles are only enabled once the simulator starts playing. Hence, this function needs to be
called whenever the simulator "plays" from a "stop" state.
"""
if not self._is_initialized:
self._initialize_impl()
self._is_initialized = True
def _invalidate_initialize_callback(self, event):
"""Invalidates the scene elements."""
self._is_initialized = False
"""
Helper functions.
"""
def _update_outdated_buffers(self):
"""Fills the sensor data for the outdated sensors."""
outdated_env_ids = self._is_outdated.nonzero().squeeze(-1)
if len(outdated_env_ids) > 0:
# obtain new data
self._update_buffers_impl(outdated_env_ids)
# update the timestamp from last update
self._timestamp_last_update[outdated_env_ids] = self._timestamp[outdated_env_ids]
# set outdated flag to false for the updated sensors
self._is_outdated[outdated_env_ids] = False
| 11,058 | Python | 37.940141 | 118 | 0.64433 |
fnuabhimanyu8713/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/sensors/ray_caster/ray_caster_camera.py | # Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
from __future__ import annotations
import torch
from collections.abc import Sequence
from tensordict import TensorDict
from typing import TYPE_CHECKING, ClassVar, Literal
import omni.physics.tensors.impl.api as physx
from omni.isaac.core.prims import XFormPrimView
import omni.isaac.orbit.utils.math as math_utils
from omni.isaac.orbit.sensors.camera import CameraData
from omni.isaac.orbit.sensors.camera.utils import convert_orientation_convention, create_rotation_matrix_from_view
from omni.isaac.orbit.utils.warp import raycast_mesh
from .ray_caster import RayCaster
if TYPE_CHECKING:
from .ray_caster_camera_cfg import RayCasterCameraCfg
class RayCasterCamera(RayCaster):
"""A ray-casting camera sensor.
The ray-caster camera uses a set of rays to get the distances to meshes in the scene. The rays are
defined in the sensor's local coordinate frame. The sensor has the same interface as the
:class:`omni.isaac.orbit.sensors.Camera` that implements the camera class through USD camera prims.
However, this class provides a faster image generation. The sensor converts meshes from the list of
primitive paths provided in the configuration to Warp meshes. The camera then ray-casts against these
Warp meshes only.
Currently, only the following annotators are supported:
- ``"distance_to_camera"``: An image containing the distance to camera optical center.
- ``"distance_to_image_plane"``: An image containing distances of 3D points from camera plane along camera's z-axis.
- ``"normals"``: An image containing the local surface normal vectors at each pixel.
.. note::
Currently, only static meshes are supported. Extending the warp mesh to support dynamic meshes
is a work in progress.
"""
cfg: RayCasterCameraCfg
"""The configuration parameters."""
UNSUPPORTED_TYPES: ClassVar[set[str]] = {
"rgb",
"instance_id_segmentation",
"instance_id_segmentation_fast",
"instance_segmentation",
"instance_segmentation_fast",
"semantic_segmentation",
"skeleton_data",
"motion_vectors",
"bounding_box_2d_tight",
"bounding_box_2d_tight_fast",
"bounding_box_2d_loose",
"bounding_box_2d_loose_fast",
"bounding_box_3d",
"bounding_box_3d_fast",
}
"""A set of sensor types that are not supported by the ray-caster camera."""
def __init__(self, cfg: RayCasterCameraCfg):
"""Initializes the camera object.
Args:
cfg: The configuration parameters.
Raises:
ValueError: If the provided data types are not supported by the ray-caster camera.
"""
# perform check on supported data types
self._check_supported_data_types(cfg)
# initialize base class
super().__init__(cfg)
# create empty variables for storing output data
self._data = CameraData()
def __str__(self) -> str:
"""Returns: A string containing information about the instance."""
return (
f"Ray-Caster-Camera @ '{self.cfg.prim_path}': \n"
f"\tview type : {self._view.__class__}\n"
f"\tupdate period (s) : {self.cfg.update_period}\n"
f"\tnumber of meshes : {len(RayCaster.meshes)}\n"
f"\tnumber of sensors : {self._view.count}\n"
f"\tnumber of rays/sensor: {self.num_rays}\n"
f"\ttotal number of rays : {self.num_rays * self._view.count}\n"
f"\timage shape : {self.image_shape}"
)
"""
Properties
"""
@property
def data(self) -> CameraData:
# update sensors if needed
self._update_outdated_buffers()
# return the data
return self._data
@property
def image_shape(self) -> tuple[int, int]:
"""A tuple containing (height, width) of the camera sensor."""
return (self.cfg.pattern_cfg.height, self.cfg.pattern_cfg.width)
@property
def frame(self) -> torch.tensor:
"""Frame number when the measurement took place."""
return self._frame
"""
Operations.
"""
def set_intrinsic_matrices(
self, matrices: torch.Tensor, focal_length: float = 1.0, env_ids: Sequence[int] | None = None
):
"""Set the intrinsic matrix of the camera.
Args:
matrices: The intrinsic matrices for the camera. Shape is (N, 3, 3).
focal_length: Focal length to use when computing aperture values. Defaults to 1.0.
env_ids: A sensor ids to manipulate. Defaults to None, which means all sensor indices.
"""
# resolve env_ids
if env_ids is None:
env_ids = slice(None)
# save new intrinsic matrices and focal length
self._data.intrinsic_matrices[env_ids] = matrices.to(self._device)
self._focal_length = focal_length
# recompute ray directions
self.ray_starts[env_ids], self.ray_directions[env_ids] = self.cfg.pattern_cfg.func(
self.cfg.pattern_cfg, self._data.intrinsic_matrices[env_ids], self._device
)
def reset(self, env_ids: Sequence[int] | None = None):
# reset the timestamps
super().reset(env_ids)
# resolve None
if env_ids is None:
env_ids = slice(None)
# reset the data
# note: this recomputation is useful if one performs events such as randomizations on the camera poses.
pos_w, quat_w = self._compute_camera_world_poses(env_ids)
self._data.pos_w[env_ids] = pos_w
self._data.quat_w_world[env_ids] = quat_w
# Reset the frame count
self._frame[env_ids] = 0
def set_world_poses(
self,
positions: torch.Tensor | None = None,
orientations: torch.Tensor | None = None,
env_ids: Sequence[int] | None = None,
convention: Literal["opengl", "ros", "world"] = "ros",
):
"""Set the pose of the camera w.r.t. the world frame using specified convention.
Since different fields use different conventions for camera orientations, the method allows users to
set the camera poses in the specified convention. Possible conventions are:
- :obj:`"opengl"` - forward axis: -Z - up axis +Y - Offset is applied in the OpenGL (Usd.Camera) convention
- :obj:`"ros"` - forward axis: +Z - up axis -Y - Offset is applied in the ROS convention
- :obj:`"world"` - forward axis: +X - up axis +Z - Offset is applied in the World Frame convention
See :meth:`omni.isaac.orbit.sensors.camera.utils.convert_orientation_convention` for more details
on the conventions.
Args:
positions: The cartesian coordinates (in meters). Shape is (N, 3).
Defaults to None, in which case the camera position in not changed.
orientations: The quaternion orientation in (w, x, y, z). Shape is (N, 4).
Defaults to None, in which case the camera orientation in not changed.
env_ids: A sensor ids to manipulate. Defaults to None, which means all sensor indices.
convention: The convention in which the poses are fed. Defaults to "ros".
Raises:
RuntimeError: If the camera prim is not set. Need to call :meth:`initialize` method first.
"""
# resolve env_ids
if env_ids is None:
env_ids = self._ALL_INDICES
# get current positions
pos_w, quat_w = self._compute_view_world_poses(env_ids)
if positions is not None:
# transform to camera frame
pos_offset_world_frame = positions - pos_w
self._offset_pos[env_ids] = math_utils.quat_apply(math_utils.quat_inv(quat_w), pos_offset_world_frame)
if orientations is not None:
# convert rotation matrix from input convention to world
quat_w_set = convert_orientation_convention(orientations, origin=convention, target="world")
self._offset_quat[env_ids] = math_utils.quat_mul(math_utils.quat_inv(quat_w), quat_w_set)
# update the data
pos_w, quat_w = self._compute_camera_world_poses(env_ids)
self._data.pos_w[env_ids] = pos_w
self._data.quat_w_world[env_ids] = quat_w
def set_world_poses_from_view(
self, eyes: torch.Tensor, targets: torch.Tensor, env_ids: Sequence[int] | None = None
):
"""Set the poses of the camera from the eye position and look-at target position.
Args:
eyes: The positions of the camera's eye. Shape is N, 3).
targets: The target locations to look at. Shape is (N, 3).
env_ids: A sensor ids to manipulate. Defaults to None, which means all sensor indices.
Raises:
RuntimeError: If the camera prim is not set. Need to call :meth:`initialize` method first.
NotImplementedError: If the stage up-axis is not "Y" or "Z".
"""
# camera position and rotation in opengl convention
orientations = math_utils.quat_from_matrix(create_rotation_matrix_from_view(eyes, targets, device=self._device))
self.set_world_poses(eyes, orientations, env_ids, convention="opengl")
"""
Implementation.
"""
def _initialize_rays_impl(self):
# Create all indices buffer
self._ALL_INDICES = torch.arange(self._view.count, device=self._device, dtype=torch.long)
# Create frame count buffer
self._frame = torch.zeros(self._view.count, device=self._device, dtype=torch.long)
# create buffers
self._create_buffers()
# compute intrinsic matrices
self._compute_intrinsic_matrices()
# compute ray stars and directions
self.ray_starts, self.ray_directions = self.cfg.pattern_cfg.func(
self.cfg.pattern_cfg, self._data.intrinsic_matrices, self._device
)
self.num_rays = self.ray_directions.shape[1]
# create buffer to store ray hits
self.ray_hits_w = torch.zeros(self._view.count, self.num_rays, 3, device=self._device)
# set offsets
quat_w = convert_orientation_convention(
torch.tensor([self.cfg.offset.rot], device=self._device), origin=self.cfg.offset.convention, target="world"
)
self._offset_quat = quat_w.repeat(self._view.count, 1)
self._offset_pos = torch.tensor(list(self.cfg.offset.pos), device=self._device).repeat(self._view.count, 1)
def _update_buffers_impl(self, env_ids: Sequence[int]):
"""Fills the buffers of the sensor data."""
# increment frame count
self._frame[env_ids] += 1
# compute poses from current view
pos_w, quat_w = self._compute_camera_world_poses(env_ids)
# update the data
self._data.pos_w[env_ids] = pos_w
self._data.quat_w_world[env_ids] = quat_w
# note: full orientation is considered
ray_starts_w = math_utils.quat_apply(quat_w.repeat(1, self.num_rays), self.ray_starts[env_ids])
ray_starts_w += pos_w.unsqueeze(1)
ray_directions_w = math_utils.quat_apply(quat_w.repeat(1, self.num_rays), self.ray_directions[env_ids])
# ray cast and store the hits
# TODO: Make ray-casting work for multiple meshes?
# necessary for regular dictionaries.
self.ray_hits_w, ray_depth, ray_normal, _ = raycast_mesh(
ray_starts_w,
ray_directions_w,
mesh=RayCasterCamera.meshes[self.cfg.mesh_prim_paths[0]],
max_dist=self.cfg.max_distance,
return_distance=any(
[name in self.cfg.data_types for name in ["distance_to_image_plane", "distance_to_camera"]]
),
return_normal="normals" in self.cfg.data_types,
)
# update output buffers
if "distance_to_image_plane" in self.cfg.data_types:
# note: data is in camera frame so we only take the first component (z-axis of camera frame)
distance_to_image_plane = (
math_utils.quat_apply(
math_utils.quat_inv(quat_w).repeat(1, self.num_rays),
(ray_depth[:, :, None] * ray_directions_w),
)
)[:, :, 0]
self._data.output["distance_to_image_plane"][env_ids] = distance_to_image_plane.view(-1, *self.image_shape)
if "distance_to_camera" in self.cfg.data_types:
self._data.output["distance_to_camera"][env_ids] = ray_depth.view(-1, *self.image_shape)
if "normals" in self.cfg.data_types:
self._data.output["normals"][env_ids] = ray_normal.view(-1, *self.image_shape, 3)
def _debug_vis_callback(self, event):
# in case it crashes be safe
if not hasattr(self, "ray_hits_w"):
return
# show ray hit positions
self.ray_visualizer.visualize(self.ray_hits_w.view(-1, 3))
"""
Private Helpers
"""
def _check_supported_data_types(self, cfg: RayCasterCameraCfg):
"""Checks if the data types are supported by the ray-caster camera."""
# check if there is any intersection in unsupported types
# reason: we cannot obtain this data from simplified warp-based ray caster
common_elements = set(cfg.data_types) & RayCasterCamera.UNSUPPORTED_TYPES
if common_elements:
raise ValueError(
f"RayCasterCamera class does not support the following sensor types: {common_elements}."
"\n\tThis is because these sensor types cannot be obtained in a fast way using ''warp''."
"\n\tHint: If you need to work with these sensor types, we recommend using the USD camera"
" interface from the omni.isaac.orbit.sensors.camera module."
)
def _create_buffers(self):
"""Create buffers for storing data."""
# prepare drift
self.drift = torch.zeros(self._view.count, 3, device=self.device)
# create the data object
# -- pose of the cameras
self._data.pos_w = torch.zeros((self._view.count, 3), device=self._device)
self._data.quat_w_world = torch.zeros((self._view.count, 4), device=self._device)
# -- intrinsic matrix
self._data.intrinsic_matrices = torch.zeros((self._view.count, 3, 3), device=self._device)
self._data.intrinsic_matrices[:, 2, 2] = 1.0
self._data.image_shape = self.image_shape
# -- output data
# create the buffers to store the annotator data.
self._data.output = TensorDict({}, batch_size=self._view.count, device=self.device)
self._data.info = [{name: None for name in self.cfg.data_types}] * self._view.count
for name in self.cfg.data_types:
if name in ["distance_to_image_plane", "distance_to_camera"]:
shape = (self.cfg.pattern_cfg.height, self.cfg.pattern_cfg.width)
elif name in ["normals"]:
shape = (self.cfg.pattern_cfg.height, self.cfg.pattern_cfg.width, 3)
else:
raise ValueError(f"Received unknown data type: {name}. Please check the configuration.")
# allocate tensor to store the data
self._data.output[name] = torch.zeros((self._view.count, *shape), device=self._device)
def _compute_intrinsic_matrices(self):
"""Computes the intrinsic matrices for the camera based on the config provided."""
# get the sensor properties
pattern_cfg = self.cfg.pattern_cfg
# compute the intrinsic matrix
vertical_aperture = pattern_cfg.horizontal_aperture * pattern_cfg.height / pattern_cfg.width
f_x = pattern_cfg.width * pattern_cfg.focal_length / pattern_cfg.horizontal_aperture
f_y = pattern_cfg.height * pattern_cfg.focal_length / vertical_aperture
c_x = pattern_cfg.horizontal_aperture_offset * f_x + pattern_cfg.width / 2
c_y = pattern_cfg.vertical_aperture_offset * f_y + pattern_cfg.height / 2
# allocate the intrinsic matrices
self._data.intrinsic_matrices[:, 0, 0] = f_x
self._data.intrinsic_matrices[:, 0, 2] = c_x
self._data.intrinsic_matrices[:, 1, 1] = f_y
self._data.intrinsic_matrices[:, 1, 2] = c_y
# save focal length
self._focal_length = pattern_cfg.focal_length
def _compute_view_world_poses(self, env_ids: Sequence[int]) -> tuple[torch.Tensor, torch.Tensor]:
"""Obtains the pose of the view the camera is attached to in the world frame.
Returns:
A tuple of the position (in meters) and quaternion (w, x, y, z).
"""
# obtain the poses of the sensors
# note: clone arg doesn't exist for xform prim view so we need to do this manually
if isinstance(self._view, XFormPrimView):
pos_w, quat_w = self._view.get_world_poses(env_ids)
elif isinstance(self._view, physx.ArticulationView):
pos_w, quat_w = self._view.get_root_transforms()[env_ids].split([3, 4], dim=-1)
quat_w = math_utils.convert_quat(quat_w, to="wxyz")
elif isinstance(self._view, physx.RigidBodyView):
pos_w, quat_w = self._view.get_transforms()[env_ids].split([3, 4], dim=-1)
quat_w = math_utils.convert_quat(quat_w, to="wxyz")
else:
raise RuntimeError(f"Unsupported view type: {type(self._view)}")
# return the pose
return pos_w.clone(), quat_w.clone()
def _compute_camera_world_poses(self, env_ids: Sequence[int]) -> tuple[torch.Tensor, torch.Tensor]:
"""Computes the pose of the camera in the world frame.
This function applies the offset pose to the pose of the view the camera is attached to.
Returns:
A tuple of the position (in meters) and quaternion (w, x, y, z) in "world" convention.
"""
# get the pose of the view the camera is attached to
pos_w, quat_w = self._compute_view_world_poses(env_ids)
# apply offsets
# need to apply quat because offset relative to parent frame
pos_w += math_utils.quat_apply(quat_w, self._offset_pos[env_ids])
quat_w = math_utils.quat_mul(quat_w, self._offset_quat[env_ids])
return pos_w, quat_w
| 18,416 | Python | 45.0425 | 120 | 0.627987 |
fnuabhimanyu8713/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/sensors/ray_caster/ray_caster_data.py | # Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
import torch
from dataclasses import dataclass
@dataclass
class RayCasterData:
"""Data container for the ray-cast sensor."""
pos_w: torch.Tensor = None
"""Position of the sensor origin in world frame.
Shape is (N, 3), where N is the number of sensors.
"""
quat_w: torch.Tensor = None
"""Orientation of the sensor origin in quaternion (w, x, y, z) in world frame.
Shape is (N, 4), where N is the number of sensors.
"""
ray_hits_w: torch.Tensor = None
"""The ray hit positions in the world frame.
Shape is (N, B, 3), where N is the number of sensors, B is the number of rays
in the scan pattern per sensor.
"""
| 794 | Python | 25.499999 | 82 | 0.664987 |
fnuabhimanyu8713/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/sensors/ray_caster/__init__.py | # Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""Sub-module for Warp-based ray-cast sensor."""
from . import patterns
from .ray_caster import RayCaster
from .ray_caster_camera import RayCasterCamera
from .ray_caster_camera_cfg import RayCasterCameraCfg
from .ray_caster_cfg import RayCasterCfg
from .ray_caster_data import RayCasterData
| 415 | Python | 28.714284 | 56 | 0.787952 |
fnuabhimanyu8713/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/sensors/ray_caster/ray_caster.py | # Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
from __future__ import annotations
import numpy as np
import re
import torch
from collections.abc import Sequence
from typing import TYPE_CHECKING, ClassVar
import carb
import omni.physics.tensors.impl.api as physx
import warp as wp
from omni.isaac.core.prims import XFormPrimView
from pxr import UsdGeom, UsdPhysics
import omni.isaac.orbit.sim as sim_utils
from omni.isaac.orbit.markers import VisualizationMarkers
from omni.isaac.orbit.terrains.trimesh.utils import make_plane
from omni.isaac.orbit.utils.math import convert_quat, quat_apply, quat_apply_yaw
from omni.isaac.orbit.utils.warp import convert_to_warp_mesh, raycast_mesh
from ..sensor_base import SensorBase
from .ray_caster_data import RayCasterData
if TYPE_CHECKING:
from .ray_caster_cfg import RayCasterCfg
class RayCaster(SensorBase):
"""A ray-casting sensor.
The ray-caster uses a set of rays to detect collisions with meshes in the scene. The rays are
defined in the sensor's local coordinate frame. The sensor can be configured to ray-cast against
a set of meshes with a given ray pattern.
The meshes are parsed from the list of primitive paths provided in the configuration. These are then
converted to warp meshes and stored in the `warp_meshes` list. The ray-caster then ray-casts against
these warp meshes using the ray pattern provided in the configuration.
.. note::
Currently, only static meshes are supported. Extending the warp mesh to support dynamic meshes
is a work in progress.
"""
cfg: RayCasterCfg
"""The configuration parameters."""
meshes: ClassVar[dict[str, wp.Mesh]] = {}
"""The warp meshes available for raycasting.
The keys correspond to the prim path for the meshes, and values are the corresponding warp Mesh objects.
Note:
We store a global dictionary of all warp meshes to prevent re-loading the mesh for different ray-cast sensor instances.
"""
def __init__(self, cfg: RayCasterCfg):
"""Initializes the ray-caster object.
Args:
cfg: The configuration parameters.
"""
# check if sensor path is valid
# note: currently we do not handle environment indices if there is a regex pattern in the leaf
# For example, if the prim path is "/World/Sensor_[1,2]".
sensor_path = cfg.prim_path.split("/")[-1]
sensor_path_is_regex = re.match(r"^[a-zA-Z0-9/_]+$", sensor_path) is None
if sensor_path_is_regex:
raise RuntimeError(
f"Invalid prim path for the ray-caster sensor: {self.cfg.prim_path}."
"\n\tHint: Please ensure that the prim path does not contain any regex patterns in the leaf."
)
# Initialize base class
super().__init__(cfg)
# Create empty variables for storing output data
self._data = RayCasterData()
def __str__(self) -> str:
"""Returns: A string containing information about the instance."""
return (
f"Ray-caster @ '{self.cfg.prim_path}': \n"
f"\tview type : {self._view.__class__}\n"
f"\tupdate period (s) : {self.cfg.update_period}\n"
f"\tnumber of meshes : {len(RayCaster.meshes)}\n"
f"\tnumber of sensors : {self._view.count}\n"
f"\tnumber of rays/sensor: {self.num_rays}\n"
f"\ttotal number of rays : {self.num_rays * self._view.count}"
)
"""
Properties
"""
@property
def num_instances(self) -> int:
return self._view.count
@property
def data(self) -> RayCasterData:
# update sensors if needed
self._update_outdated_buffers()
# return the data
return self._data
"""
Operations.
"""
def reset(self, env_ids: Sequence[int] | None = None):
# reset the timers and counters
super().reset(env_ids)
# resolve None
if env_ids is None:
env_ids = slice(None)
# resample the drift
self.drift[env_ids].uniform_(*self.cfg.drift_range)
"""
Implementation.
"""
def _initialize_impl(self):
super()._initialize_impl()
# create simulation view
self._physics_sim_view = physx.create_simulation_view(self._backend)
self._physics_sim_view.set_subspace_roots("/")
# check if the prim at path is an articulated or rigid prim
# we do this since for physics-based view classes we can access their data directly
# otherwise we need to use the xform view class which is slower
found_supported_prim_class = False
prim = sim_utils.find_first_matching_prim(self.cfg.prim_path)
if prim is None:
raise RuntimeError(f"Failed to find a prim at path expression: {self.cfg.prim_path}")
# create view based on the type of prim
if prim.HasAPI(UsdPhysics.ArticulationRootAPI):
self._view = self._physics_sim_view.create_articulation_view(self.cfg.prim_path.replace(".*", "*"))
found_supported_prim_class = True
elif prim.HasAPI(UsdPhysics.RigidBodyAPI):
self._view = self._physics_sim_view.create_rigid_body_view(self.cfg.prim_path.replace(".*", "*"))
found_supported_prim_class = True
else:
self._view = XFormPrimView(self.cfg.prim_path, reset_xform_properties=False)
found_supported_prim_class = True
carb.log_warn(f"The prim at path {prim.GetPath().pathString} is not a physics prim! Using XFormPrimView.")
# check if prim view class is found
if not found_supported_prim_class:
raise RuntimeError(f"Failed to find a valid prim view class for the prim paths: {self.cfg.prim_path}")
# load the meshes by parsing the stage
self._initialize_warp_meshes()
# initialize the ray start and directions
self._initialize_rays_impl()
def _initialize_warp_meshes(self):
# check number of mesh prims provided
if len(self.cfg.mesh_prim_paths) != 1:
raise NotImplementedError(
f"RayCaster currently only supports one mesh prim. Received: {len(self.cfg.mesh_prim_paths)}"
)
# read prims to ray-cast
for mesh_prim_path in self.cfg.mesh_prim_paths:
# check if mesh already casted into warp mesh
if mesh_prim_path in RayCaster.meshes:
continue
# check if the prim is a plane - handle PhysX plane as a special case
# if a plane exists then we need to create an infinite mesh that is a plane
mesh_prim = sim_utils.get_first_matching_child_prim(
mesh_prim_path, lambda prim: prim.GetTypeName() == "Plane"
)
# if we did not find a plane then we need to read the mesh
if mesh_prim is None:
# obtain the mesh prim
mesh_prim = sim_utils.get_first_matching_child_prim(
mesh_prim_path, lambda prim: prim.GetTypeName() == "Mesh"
)
# check if valid
if mesh_prim is None or not mesh_prim.IsValid():
raise RuntimeError(f"Invalid mesh prim path: {mesh_prim_path}")
# cast into UsdGeomMesh
mesh_prim = UsdGeom.Mesh(mesh_prim)
# read the vertices and faces
points = np.asarray(mesh_prim.GetPointsAttr().Get())
indices = np.asarray(mesh_prim.GetFaceVertexIndicesAttr().Get())
wp_mesh = convert_to_warp_mesh(points, indices, device=self.device)
# print info
carb.log_info(
f"Read mesh prim: {mesh_prim.GetPath()} with {len(points)} vertices and {len(indices)} faces."
)
else:
mesh = make_plane(size=(2e6, 2e6), height=0.0, center_zero=True)
wp_mesh = convert_to_warp_mesh(mesh.vertices, mesh.faces, device=self.device)
# print info
carb.log_info(f"Created infinite plane mesh prim: {mesh_prim.GetPath()}.")
# add the warp mesh to the list
RayCaster.meshes[mesh_prim_path] = wp_mesh
# throw an error if no meshes are found
if all([mesh_prim_path not in RayCaster.meshes for mesh_prim_path in self.cfg.mesh_prim_paths]):
raise RuntimeError(
f"No meshes found for ray-casting! Please check the mesh prim paths: {self.cfg.mesh_prim_paths}"
)
def _initialize_rays_impl(self):
# compute ray stars and directions
self.ray_starts, self.ray_directions = self.cfg.pattern_cfg.func(self.cfg.pattern_cfg, self._device)
self.num_rays = len(self.ray_directions)
# apply offset transformation to the rays
offset_pos = torch.tensor(list(self.cfg.offset.pos), device=self._device)
offset_quat = torch.tensor(list(self.cfg.offset.rot), device=self._device)
self.ray_directions = quat_apply(offset_quat.repeat(len(self.ray_directions), 1), self.ray_directions)
self.ray_starts += offset_pos
# repeat the rays for each sensor
self.ray_starts = self.ray_starts.repeat(self._view.count, 1, 1)
self.ray_directions = self.ray_directions.repeat(self._view.count, 1, 1)
# prepare drift
self.drift = torch.zeros(self._view.count, 3, device=self.device)
# fill the data buffer
self._data.pos_w = torch.zeros(self._view.count, 3, device=self._device)
self._data.quat_w = torch.zeros(self._view.count, 4, device=self._device)
self._data.ray_hits_w = torch.zeros(self._view.count, self.num_rays, 3, device=self._device)
def _update_buffers_impl(self, env_ids: Sequence[int]):
"""Fills the buffers of the sensor data."""
# obtain the poses of the sensors
if isinstance(self._view, XFormPrimView):
pos_w, quat_w = self._view.get_world_poses(env_ids)
elif isinstance(self._view, physx.ArticulationView):
pos_w, quat_w = self._view.get_root_transforms()[env_ids].split([3, 4], dim=-1)
quat_w = convert_quat(quat_w, to="wxyz")
elif isinstance(self._view, physx.RigidBodyView):
pos_w, quat_w = self._view.get_transforms()[env_ids].split([3, 4], dim=-1)
quat_w = convert_quat(quat_w, to="wxyz")
else:
raise RuntimeError(f"Unsupported view type: {type(self._view)}")
# note: we clone here because we are read-only operations
pos_w = pos_w.clone()
quat_w = quat_w.clone()
# apply drift
pos_w += self.drift[env_ids]
# store the poses
self._data.pos_w[env_ids] = pos_w
self._data.quat_w[env_ids] = quat_w
# ray cast based on the sensor poses
if self.cfg.attach_yaw_only:
# only yaw orientation is considered and directions are not rotated
ray_starts_w = quat_apply_yaw(quat_w.repeat(1, self.num_rays), self.ray_starts[env_ids])
ray_starts_w += pos_w.unsqueeze(1)
ray_directions_w = self.ray_directions[env_ids]
else:
# full orientation is considered
ray_starts_w = quat_apply(quat_w.repeat(1, self.num_rays), self.ray_starts[env_ids])
ray_starts_w += pos_w.unsqueeze(1)
ray_directions_w = quat_apply(quat_w.repeat(1, self.num_rays), self.ray_directions[env_ids])
# ray cast and store the hits
# TODO: Make this work for multiple meshes?
self._data.ray_hits_w[env_ids] = raycast_mesh(
ray_starts_w,
ray_directions_w,
max_dist=self.cfg.max_distance,
mesh=RayCaster.meshes[self.cfg.mesh_prim_paths[0]],
)[0]
def _set_debug_vis_impl(self, debug_vis: bool):
# set visibility of markers
# note: parent only deals with callbacks. not their visibility
if debug_vis:
if not hasattr(self, "ray_visualizer"):
self.ray_visualizer = VisualizationMarkers(self.cfg.visualizer_cfg)
# set their visibility to true
self.ray_visualizer.set_visibility(True)
else:
if hasattr(self, "ray_visualizer"):
self.ray_visualizer.set_visibility(False)
def _debug_vis_callback(self, event):
# show ray hit positions
self.ray_visualizer.visualize(self._data.ray_hits_w.view(-1, 3))
"""
Internal simulation callbacks.
"""
def _invalidate_initialize_callback(self, event):
"""Invalidates the scene elements."""
# call parent
super()._invalidate_initialize_callback(event)
# set all existing views to None to invalidate them
self._physics_sim_view = None
self._view = None
| 12,994 | Python | 42.902027 | 130 | 0.621133 |
fnuabhimanyu8713/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/sensors/ray_caster/ray_caster_camera_cfg.py | # Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""Configuration for the ray-cast camera sensor."""
from typing import Literal
from omni.isaac.orbit.utils import configclass
from .ray_caster_camera import RayCasterCamera
from .ray_caster_cfg import RayCasterCfg
@configclass
class RayCasterCameraCfg(RayCasterCfg):
"""Configuration for the ray-cast sensor."""
@configclass
class OffsetCfg:
"""The offset pose of the sensor's frame from the sensor's parent frame."""
pos: tuple[float, float, float] = (0.0, 0.0, 0.0)
"""Translation w.r.t. the parent frame. Defaults to (0.0, 0.0, 0.0)."""
rot: tuple[float, float, float, float] = (1.0, 0.0, 0.0, 0.0)
"""Quaternion rotation (w, x, y, z) w.r.t. the parent frame. Defaults to (1.0, 0.0, 0.0, 0.0)."""
convention: Literal["opengl", "ros", "world"] = "ros"
"""The convention in which the frame offset is applied. Defaults to "ros".
- ``"opengl"`` - forward axis: ``-Z`` - up axis: ``+Y`` - Offset is applied in the OpenGL (Usd.Camera) convention.
- ``"ros"`` - forward axis: ``+Z`` - up axis: ``-Y`` - Offset is applied in the ROS convention.
- ``"world"`` - forward axis: ``+X`` - up axis: ``+Z`` - Offset is applied in the World Frame convention.
"""
class_type: type = RayCasterCamera
offset: OffsetCfg = OffsetCfg()
"""The offset pose of the sensor's frame from the sensor's parent frame. Defaults to identity."""
data_types: list[str] = ["distance_to_image_plane"]
"""List of sensor names/types to enable for the camera. Defaults to ["distance_to_image_plane"]."""
def __post_init__(self):
# for cameras, this quantity should be False always.
self.attach_yaw_only = False
| 1,856 | Python | 36.139999 | 122 | 0.635776 |
fnuabhimanyu8713/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/sensors/ray_caster/ray_caster_cfg.py | # Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""Configuration for the ray-cast sensor."""
from dataclasses import MISSING
from omni.isaac.orbit.markers import VisualizationMarkersCfg
from omni.isaac.orbit.markers.config import RAY_CASTER_MARKER_CFG
from omni.isaac.orbit.utils import configclass
from ..sensor_base_cfg import SensorBaseCfg
from .patterns.patterns_cfg import PatternBaseCfg
from .ray_caster import RayCaster
@configclass
class RayCasterCfg(SensorBaseCfg):
"""Configuration for the ray-cast sensor."""
@configclass
class OffsetCfg:
"""The offset pose of the sensor's frame from the sensor's parent frame."""
pos: tuple[float, float, float] = (0.0, 0.0, 0.0)
"""Translation w.r.t. the parent frame. Defaults to (0.0, 0.0, 0.0)."""
rot: tuple[float, float, float, float] = (1.0, 0.0, 0.0, 0.0)
"""Quaternion rotation (w, x, y, z) w.r.t. the parent frame. Defaults to (1.0, 0.0, 0.0, 0.0)."""
class_type: type = RayCaster
mesh_prim_paths: list[str] = MISSING
"""The list of mesh primitive paths to ray cast against.
Note:
Currently, only a single static mesh is supported. We are working on supporting multiple
static meshes and dynamic meshes.
"""
offset: OffsetCfg = OffsetCfg()
"""The offset pose of the sensor's frame from the sensor's parent frame. Defaults to identity."""
attach_yaw_only: bool = MISSING
"""Whether the rays' starting positions and directions only track the yaw orientation.
This is useful for ray-casting height maps, where only yaw rotation is needed.
"""
pattern_cfg: PatternBaseCfg = MISSING
"""The pattern that defines the local ray starting positions and directions."""
max_distance: float = 1e6
"""Maximum distance (in meters) from the sensor to ray cast to. Defaults to 1e6."""
drift_range: tuple[float, float] = (0.0, 0.0)
"""The range of drift (in meters) to add to the ray starting positions (xyz). Defaults to (0.0, 0.0).
For floating base robots, this is useful for simulating drift in the robot's pose estimation.
"""
visualizer_cfg: VisualizationMarkersCfg = RAY_CASTER_MARKER_CFG.replace(prim_path="/Visuals/RayCaster")
"""The configuration object for the visualization markers. Defaults to RAY_CASTER_MARKER_CFG.
Note:
This attribute is only used when debug visualization is enabled.
"""
| 2,506 | Python | 34.814285 | 107 | 0.695531 |
fnuabhimanyu8713/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/sensors/ray_caster/patterns/patterns.py | # Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
from __future__ import annotations
import torch
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from . import patterns_cfg
def grid_pattern(cfg: patterns_cfg.GridPatternCfg, device: str) -> tuple[torch.Tensor, torch.Tensor]:
"""A regular grid pattern for ray casting.
The grid pattern is made from rays that are parallel to each other. They span a 2D grid in the sensor's
local coordinates from ``(-length/2, -width/2)`` to ``(length/2, width/2)``, which is defined
by the ``size = (length, width)`` and ``resolution`` parameters in the config.
Args:
cfg: The configuration instance for the pattern.
device: The device to create the pattern on.
Returns:
The starting positions and directions of the rays.
Raises:
ValueError: If the ordering is not "xy" or "yx".
ValueError: If the resolution is less than or equal to 0.
"""
# check valid arguments
if cfg.ordering not in ["xy", "yx"]:
raise ValueError(f"Ordering must be 'xy' or 'yx'. Received: '{cfg.ordering}'.")
if cfg.resolution <= 0:
raise ValueError(f"Resolution must be greater than 0. Received: '{cfg.resolution}'.")
# resolve mesh grid indexing (note: torch meshgrid is different from numpy meshgrid)
# check: https://github.com/pytorch/pytorch/issues/15301
indexing = cfg.ordering if cfg.ordering == "xy" else "ij"
# define grid pattern
x = torch.arange(start=-cfg.size[0] / 2, end=cfg.size[0] / 2 + 1.0e-9, step=cfg.resolution, device=device)
y = torch.arange(start=-cfg.size[1] / 2, end=cfg.size[1] / 2 + 1.0e-9, step=cfg.resolution, device=device)
grid_x, grid_y = torch.meshgrid(x, y, indexing=indexing)
# store into ray starts
num_rays = grid_x.numel()
ray_starts = torch.zeros(num_rays, 3, device=device)
ray_starts[:, 0] = grid_x.flatten()
ray_starts[:, 1] = grid_y.flatten()
# define ray-cast directions
ray_directions = torch.zeros_like(ray_starts)
ray_directions[..., :] = torch.tensor(list(cfg.direction), device=device)
return ray_starts, ray_directions
def pinhole_camera_pattern(
cfg: patterns_cfg.PinholeCameraPatternCfg, intrinsic_matrices: torch.Tensor, device: str
) -> tuple[torch.Tensor, torch.Tensor]:
"""The image pattern for ray casting.
.. caution::
This function does not follow the standard pattern interface. It requires the intrinsic matrices
of the cameras to be passed in. This is because we want to be able to randomize the intrinsic
matrices of the cameras, which is not possible with the standard pattern interface.
Args:
cfg: The configuration instance for the pattern.
intrinsic_matrices: The intrinsic matrices of the cameras. Shape is (N, 3, 3).
device: The device to create the pattern on.
Returns:
The starting positions and directions of the rays. The shape of the tensors are
(N, H * W, 3) and (N, H * W, 3) respectively.
"""
# get image plane mesh grid
grid = torch.meshgrid(
torch.arange(start=0, end=cfg.width, dtype=torch.int32, device=device),
torch.arange(start=0, end=cfg.height, dtype=torch.int32, device=device),
indexing="xy",
)
pixels = torch.vstack(list(map(torch.ravel, grid))).T
# convert to homogeneous coordinate system
pixels = torch.hstack([pixels, torch.ones((len(pixels), 1), device=device)])
# get pixel coordinates in camera frame
pix_in_cam_frame = torch.matmul(torch.inverse(intrinsic_matrices), pixels.T)
# robotics camera frame is (x forward, y left, z up) from camera frame with (x right, y down, z forward)
# transform to robotics camera frame
transform_vec = torch.tensor([1, -1, -1], device=device).unsqueeze(0).unsqueeze(2)
pix_in_cam_frame = pix_in_cam_frame[:, [2, 0, 1], :] * transform_vec
# normalize ray directions
ray_directions = (pix_in_cam_frame / torch.norm(pix_in_cam_frame, dim=1, keepdim=True)).permute(0, 2, 1)
# for camera, we always ray-cast from the sensor's origin
ray_starts = torch.zeros_like(ray_directions, device=device)
return ray_starts, ray_directions
def bpearl_pattern(cfg: patterns_cfg.BpearlPatternCfg, device: str) -> tuple[torch.Tensor, torch.Tensor]:
"""The RS-Bpearl pattern for ray casting.
The `Robosense RS-Bpearl`_ is a short-range LiDAR that has a 360 degrees x 90 degrees super wide
field of view. It is designed for near-field blind-spots detection.
.. _Robosense RS-Bpearl: https://www.roscomponents.com/en/lidar-laser-scanner/267-rs-bpearl.html
Args:
cfg: The configuration instance for the pattern.
device: The device to create the pattern on.
Returns:
The starting positions and directions of the rays.
"""
h = torch.arange(-cfg.horizontal_fov / 2, cfg.horizontal_fov / 2, cfg.horizontal_res, device=device)
v = torch.tensor(list(cfg.vertical_ray_angles), device=device)
pitch, yaw = torch.meshgrid(v, h, indexing="xy")
pitch, yaw = torch.deg2rad(pitch.reshape(-1)), torch.deg2rad(yaw.reshape(-1))
pitch += torch.pi / 2
x = torch.sin(pitch) * torch.cos(yaw)
y = torch.sin(pitch) * torch.sin(yaw)
z = torch.cos(pitch)
ray_directions = -torch.stack([x, y, z], dim=1)
ray_starts = torch.zeros_like(ray_directions)
return ray_starts, ray_directions
| 5,519 | Python | 41.137404 | 110 | 0.67784 |
fnuabhimanyu8713/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/sensors/ray_caster/patterns/__init__.py | # Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""Sub-module for ray-casting patterns used by the ray-caster."""
from .patterns import bpearl_pattern, grid_pattern, pinhole_camera_pattern
from .patterns_cfg import BpearlPatternCfg, GridPatternCfg, PatternBaseCfg, PinholeCameraPatternCfg
| 365 | Python | 35.599996 | 99 | 0.791781 |
fnuabhimanyu8713/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/sensors/ray_caster/patterns/patterns_cfg.py | # Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""Configuration for the ray-cast sensor."""
from __future__ import annotations
import torch
from collections.abc import Callable, Sequence
from dataclasses import MISSING
from typing import Literal
from omni.isaac.orbit.utils import configclass
from . import patterns
@configclass
class PatternBaseCfg:
"""Base configuration for a pattern."""
func: Callable[[PatternBaseCfg, str], tuple[torch.Tensor, torch.Tensor]] = MISSING
"""Function to generate the pattern.
The function should take in the configuration and the device name as arguments. It should return
the pattern's starting positions and directions as a tuple of torch.Tensor.
"""
@configclass
class GridPatternCfg(PatternBaseCfg):
"""Configuration for the grid pattern for ray-casting.
Defines a 2D grid of rays in the coordinates of the sensor.
.. attention::
The points are ordered based on the :attr:`ordering` attribute.
"""
func: Callable = patterns.grid_pattern
resolution: float = MISSING
"""Grid resolution (in meters)."""
size: tuple[float, float] = MISSING
"""Grid size (length, width) (in meters)."""
direction: tuple[float, float, float] = (0.0, 0.0, -1.0)
"""Ray direction. Defaults to (0.0, 0.0, -1.0)."""
ordering: Literal["xy", "yx"] = "xy"
"""Specifies the ordering of points in the generated grid. Defaults to ``"xy"``.
Consider a grid pattern with points at :math:`(x, y)` where :math:`x` and :math:`y` are the grid indices.
The ordering of the points can be specified as "xy" or "yx". This determines the outer and inner loop order
when iterating over the grid points.
* If *"xy"* is selected, the points are ordered with outer loop over "x" and inner loop over "y".
* If *"yx"* is selected, the points are ordered with outer loop over "y" and inner loop over "x".
For example, the grid pattern points with :math:`X = (0, 1, 2)` and :math:`Y = (3, 4)`:
* *"xy"* ordering: :math:`[(0, 3), (0, 4), (1, 3), (1, 4), (2, 3), (2, 4)]`
* *"yx"* ordering: :math:`[(0, 3), (1, 3), (2, 3), (1, 4), (2, 4), (2, 4)]`
"""
@configclass
class PinholeCameraPatternCfg(PatternBaseCfg):
"""Configuration for a pinhole camera depth image pattern for ray-casting."""
func: Callable = patterns.pinhole_camera_pattern
focal_length: float = 24.0
"""Perspective focal length (in cm). Defaults to 24.0cm.
Longer lens lengths narrower FOV, shorter lens lengths wider FOV.
"""
horizontal_aperture: float = 20.955
"""Horizontal aperture (in mm). Defaults to 20.955mm.
Emulates sensor/film width on a camera.
Note:
The default value is the horizontal aperture of a 35 mm spherical projector.
"""
horizontal_aperture_offset: float = 0.0
"""Offsets Resolution/Film gate horizontally. Defaults to 0.0."""
vertical_aperture_offset: float = 0.0
"""Offsets Resolution/Film gate vertically. Defaults to 0.0."""
width: int = MISSING
"""Width of the image (in pixels)."""
height: int = MISSING
"""Height of the image (in pixels)."""
@configclass
class BpearlPatternCfg(PatternBaseCfg):
"""Configuration for the Bpearl pattern for ray-casting."""
func: Callable = patterns.bpearl_pattern
horizontal_fov: float = 360.0
"""Horizontal field of view (in degrees). Defaults to 360.0."""
horizontal_res: float = 10.0
"""Horizontal resolution (in degrees). Defaults to 10.0."""
# fmt: off
vertical_ray_angles: Sequence[float] = [
89.5, 86.6875, 83.875, 81.0625, 78.25, 75.4375, 72.625, 69.8125, 67.0, 64.1875, 61.375,
58.5625, 55.75, 52.9375, 50.125, 47.3125, 44.5, 41.6875, 38.875, 36.0625, 33.25, 30.4375,
27.625, 24.8125, 22, 19.1875, 16.375, 13.5625, 10.75, 7.9375, 5.125, 2.3125
]
# fmt: on
"""Vertical ray angles (in degrees). Defaults to a list of 32 angles.
Note:
We manually set the vertical ray angles to match the Bpearl sensor. The ray-angles
are not evenly spaced.
"""
| 4,172 | Python | 32.384 | 111 | 0.659396 |
fnuabhimanyu8713/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/sensors/frame_transformer/frame_transformer.py | # Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
from __future__ import annotations
import torch
from collections.abc import Sequence
from typing import TYPE_CHECKING
import carb
import omni.physics.tensors.impl.api as physx
from pxr import UsdPhysics
import omni.isaac.orbit.sim as sim_utils
from omni.isaac.orbit.markers import VisualizationMarkers
from omni.isaac.orbit.utils.math import (
combine_frame_transforms,
convert_quat,
is_identity_pose,
subtract_frame_transforms,
)
from ..sensor_base import SensorBase
from .frame_transformer_data import FrameTransformerData
if TYPE_CHECKING:
from .frame_transformer_cfg import FrameTransformerCfg
class FrameTransformer(SensorBase):
"""A sensor for reporting frame transforms.
This class provides an interface for reporting the transform of one or more frames (target frames)
with respect to another frame (source frame). The source frame is specified by the user as a prim path
(:attr:`FrameTransformerCfg.prim_path`) and the target frames are specified by the user as a list of
prim paths (:attr:`FrameTransformerCfg.target_frames`).
The source frame and target frames are assumed to be rigid bodies. The transform of the target frames
with respect to the source frame is computed by first extracting the transform of the source frame
and target frames from the physics engine and then computing the relative transform between the two.
Additionally, the user can specify an offset for the source frame and each target frame. This is useful
for specifying the transform of the desired frame with respect to the body's center of mass, for instance.
A common example of using this sensor is to track the position and orientation of the end effector of a
robotic manipulator. In this case, the source frame would be the body corresponding to the base frame of the
manipulator, and the target frame would be the body corresponding to the end effector. Since the end-effector is
typically a fictitious body, the user may need to specify an offset from the end-effector to the body of the
manipulator.
.. note::
Currently, this implementation only handles frames within an articulation. This is because the frame
regex expressions are resolved based on their parent prim path. This can be extended to handle
frames outside of articulation by using the frame prim path instead. However, this would require
additional checks to ensure that the user-specified frames are valid which is not currently implemented.
.. warning::
The implementation assumes that the parent body of a target frame is not the same as that
of the source frame (i.e. :attr:`FrameTransformerCfg.prim_path`). While a corner case, this can occur
if the user specifies the same prim path for both the source frame and target frame. In this case,
the target frame will be ignored and not reported. This is a limitation of the current implementation
and will be fixed in a future release.
"""
cfg: FrameTransformerCfg
"""The configuration parameters."""
def __init__(self, cfg: FrameTransformerCfg):
"""Initializes the frame transformer object.
Args:
cfg: The configuration parameters.
"""
# initialize base class
super().__init__(cfg)
# Create empty variables for storing output data
self._data: FrameTransformerData = FrameTransformerData()
def __str__(self) -> str:
"""Returns: A string containing information about the instance."""
return (
f"FrameTransformer @ '{self.cfg.prim_path}': \n"
f"\ttracked body frames: {[self._source_frame_body_name] + self._target_frame_body_names} \n"
f"\tnumber of envs: {self._num_envs}\n"
f"\tsource body frame: {self._source_frame_body_name}\n"
f"\ttarget frames (count: {self._target_frame_names}): {len(self._target_frame_names)}\n"
)
"""
Properties
"""
@property
def data(self) -> FrameTransformerData:
# update sensors if needed
self._update_outdated_buffers()
# return the data
return self._data
"""
Operations
"""
def reset(self, env_ids: Sequence[int] | None = None):
# reset the timers and counters
super().reset(env_ids)
# resolve None
if env_ids is None:
env_ids = ...
"""
Implementation.
"""
def _initialize_impl(self):
super()._initialize_impl()
# resolve source frame offset
source_frame_offset_pos = torch.tensor(self.cfg.source_frame_offset.pos, device=self.device)
source_frame_offset_quat = torch.tensor(self.cfg.source_frame_offset.rot, device=self.device)
# Only need to perform offsetting of source frame if the position offsets is non-zero and rotation offset is
# not the identity quaternion for efficiency in _update_buffer_impl
self._apply_source_frame_offset = True
# Handle source frame offsets
if is_identity_pose(source_frame_offset_pos, source_frame_offset_quat):
carb.log_verbose(f"No offset application needed for source frame as it is identity: {self.cfg.prim_path}")
self._apply_source_frame_offset = False
else:
carb.log_verbose(f"Applying offset to source frame as it is not identity: {self.cfg.prim_path}")
# Store offsets as tensors (duplicating each env's offsets for ease of multiplication later)
self._source_frame_offset_pos = source_frame_offset_pos.unsqueeze(0).repeat(self._num_envs, 1)
self._source_frame_offset_quat = source_frame_offset_quat.unsqueeze(0).repeat(self._num_envs, 1)
# Keep track of mapping from the rigid body name to the desired frame, as there may be multiple frames
# based upon the same body name and we don't want to create unnecessary views
body_names_to_frames: dict[str, set[str]] = {}
# The offsets associated with each target frame
target_offsets: dict[str, dict[str, torch.Tensor]] = {}
# The frames whose offsets are not identity
non_identity_offset_frames: list[str] = []
# Only need to perform offsetting of target frame if any of the position offsets are non-zero or any of the
# rotation offsets are not the identity quaternion for efficiency in _update_buffer_impl
self._apply_target_frame_offset = False
# Collect all target frames, their associated body prim paths and their offsets so that we can extract
# the prim, check that it has the appropriate rigid body API in a single loop.
# First element is None because user can't specify source frame name
frames = [None] + [target_frame.name for target_frame in self.cfg.target_frames]
frame_prim_paths = [self.cfg.prim_path] + [target_frame.prim_path for target_frame in self.cfg.target_frames]
# First element is None because source frame offset is handled separately
frame_offsets = [None] + [target_frame.offset for target_frame in self.cfg.target_frames]
for frame, prim_path, offset in zip(frames, frame_prim_paths, frame_offsets):
# Find correct prim
matching_prims = sim_utils.find_matching_prims(prim_path)
if len(matching_prims) == 0:
raise ValueError(
f"Failed to create frame transformer for frame '{frame}' with path '{prim_path}'."
" No matching prims were found."
)
for prim in matching_prims:
# Get the prim path of the matching prim
matching_prim_path = prim.GetPath().pathString
# Check if it is a rigid prim
if not prim.HasAPI(UsdPhysics.RigidBodyAPI):
raise ValueError(
f"While resolving expression '{prim_path}' found a prim '{matching_prim_path}' which is not a"
" rigid body. The class only supports transformations between rigid bodies."
)
# Get the name of the body
body_name = matching_prim_path.rsplit("/", 1)[-1]
# Use body name if frame isn't specified by user
frame_name = frame if frame is not None else body_name
# Keep track of which frames are associated with which bodies
if body_name in body_names_to_frames:
body_names_to_frames[body_name].add(frame_name)
else:
body_names_to_frames[body_name] = {frame_name}
if offset is not None:
offset_pos = torch.tensor(offset.pos, device=self.device)
offset_quat = torch.tensor(offset.rot, device=self.device)
# Check if we need to apply offsets (optimized code path in _update_buffer_impl)
if not is_identity_pose(offset_pos, offset_quat):
non_identity_offset_frames.append(frame_name)
self._apply_target_frame_offset = True
target_offsets[frame_name] = {"pos": offset_pos, "quat": offset_quat}
if not self._apply_target_frame_offset:
carb.log_info(
f"No offsets application needed from '{self.cfg.prim_path}' to target frames as all"
f" are identity: {frames[1:]}"
)
else:
carb.log_info(
f"Offsets application needed from '{self.cfg.prim_path}' to the following target frames:"
f" {non_identity_offset_frames}"
)
# The names of bodies that RigidPrimView will be tracking to later extract transforms from
tracked_body_names = list(body_names_to_frames.keys())
# Construct regex expression for the body names
body_names_regex = r"(" + "|".join(tracked_body_names) + r")"
body_names_regex = f"{self.cfg.prim_path.rsplit('/', 1)[0]}/{body_names_regex}"
# Create simulation view
self._physics_sim_view = physx.create_simulation_view(self._backend)
self._physics_sim_view.set_subspace_roots("/")
# Create a prim view for all frames and initialize it
# order of transforms coming out of view will be source frame followed by target frame(s)
self._frame_physx_view = self._physics_sim_view.create_rigid_body_view(body_names_regex.replace(".*", "*"))
# Determine the order in which regex evaluated body names so we can later index into frame transforms
# by frame name correctly
all_prim_paths = self._frame_physx_view.prim_paths
# Only need first env as the names and their ordering are the same across environments
first_env_prim_paths = all_prim_paths[0 : len(tracked_body_names)]
first_env_body_names = [first_env_prim_path.split("/")[-1] for first_env_prim_path in first_env_prim_paths]
# Re-parse the list as it may have moved when resolving regex above
# -- source frame
self._source_frame_body_name = self.cfg.prim_path.split("/")[-1]
source_frame_index = first_env_body_names.index(self._source_frame_body_name)
# -- target frames
self._target_frame_body_names = first_env_body_names[:]
self._target_frame_body_names.remove(self._source_frame_body_name)
# Determine indices into all tracked body frames for both source and target frames
all_ids = torch.arange(self._num_envs * len(tracked_body_names))
self._source_frame_body_ids = torch.arange(self._num_envs) * len(tracked_body_names) + source_frame_index
self._target_frame_body_ids = all_ids[~torch.isin(all_ids, self._source_frame_body_ids)]
# The name of each of the target frame(s) - either user specified or defaulted to the body name
self._target_frame_names: list[str] = []
# The position and rotation components of target frame offsets
target_frame_offset_pos = []
target_frame_offset_quat = []
# Stores the indices of bodies that need to be duplicated. For instance, if body "LF_SHANK" is needed
# for 2 frames, this list enables us to duplicate the body to both frames when doing the calculations
# when updating sensor in _update_buffers_impl
duplicate_frame_indices = []
# Go through each body name and determine the number of duplicates we need for that frame
# and extract the offsets. This is all done to handles the case where multiple frames
# reference the same body, but have different names and/or offsets
for i, body_name in enumerate(self._target_frame_body_names):
for frame in body_names_to_frames[body_name]:
target_frame_offset_pos.append(target_offsets[frame]["pos"])
target_frame_offset_quat.append(target_offsets[frame]["quat"])
self._target_frame_names.append(frame)
duplicate_frame_indices.append(i)
# To handle multiple environments, need to expand so [0, 1, 1, 2] with 2 environments becomes
# [0, 1, 1, 2, 3, 4, 4, 5]. Again, this is a optimization to make _update_buffer_impl more efficient
duplicate_frame_indices = torch.tensor(duplicate_frame_indices, device=self.device)
num_target_body_frames = len(tracked_body_names) - 1
self._duplicate_frame_indices = torch.cat(
[duplicate_frame_indices + num_target_body_frames * env_num for env_num in range(self._num_envs)]
)
# Stack up all the frame offsets for shape (num_envs, num_frames, 3) and (num_envs, num_frames, 4)
self._target_frame_offset_pos = torch.stack(target_frame_offset_pos).repeat(self._num_envs, 1)
self._target_frame_offset_quat = torch.stack(target_frame_offset_quat).repeat(self._num_envs, 1)
# fill the data buffer
self._data.target_frame_names = self._target_frame_names
self._data.source_pos_w = torch.zeros(self._num_envs, 3, device=self._device)
self._data.source_quat_w = torch.zeros(self._num_envs, 4, device=self._device)
self._data.target_pos_w = torch.zeros(self._num_envs, len(duplicate_frame_indices), 3, device=self._device)
self._data.target_quat_w = torch.zeros(self._num_envs, len(duplicate_frame_indices), 4, device=self._device)
self._data.target_pos_source = torch.zeros_like(self._data.target_pos_w)
self._data.target_quat_source = torch.zeros_like(self._data.target_quat_w)
def _update_buffers_impl(self, env_ids: Sequence[int]):
"""Fills the buffers of the sensor data."""
# default to all sensors
if len(env_ids) == self._num_envs:
env_ids = ...
# Extract transforms from view - shape is:
# (the total number of source and target body frames being tracked * self._num_envs, 7)
transforms = self._frame_physx_view.get_transforms()
# Convert quaternions as PhysX uses xyzw form
transforms[:, 3:] = convert_quat(transforms[:, 3:], to="wxyz")
# Process source frame transform
source_frames = transforms[self._source_frame_body_ids]
# Only apply offset if the offsets will result in a coordinate frame transform
if self._apply_source_frame_offset:
source_pos_w, source_quat_w = combine_frame_transforms(
source_frames[:, :3],
source_frames[:, 3:],
self._source_frame_offset_pos,
self._source_frame_offset_quat,
)
else:
source_pos_w = source_frames[:, :3]
source_quat_w = source_frames[:, 3:]
# Process target frame transforms
target_frames = transforms[self._target_frame_body_ids]
duplicated_target_frame_pos_w = target_frames[self._duplicate_frame_indices, :3]
duplicated_target_frame_quat_w = target_frames[self._duplicate_frame_indices, 3:]
# Only apply offset if the offsets will result in a coordinate frame transform
if self._apply_target_frame_offset:
target_pos_w, target_quat_w = combine_frame_transforms(
duplicated_target_frame_pos_w,
duplicated_target_frame_quat_w,
self._target_frame_offset_pos,
self._target_frame_offset_quat,
)
else:
target_pos_w = duplicated_target_frame_pos_w
target_quat_w = duplicated_target_frame_quat_w
# Compute the transform of the target frame with respect to the source frame
total_num_frames = len(self._target_frame_names)
target_pos_source, target_quat_source = subtract_frame_transforms(
source_pos_w.unsqueeze(1).expand(-1, total_num_frames, -1).reshape(-1, 3),
source_quat_w.unsqueeze(1).expand(-1, total_num_frames, -1).reshape(-1, 4),
target_pos_w,
target_quat_w,
)
# Update buffers
# note: The frame names / ordering don't change so no need to update them after initialization
self._data.source_pos_w[:] = source_pos_w.view(-1, 3)
self._data.source_quat_w[:] = source_quat_w.view(-1, 4)
self._data.target_pos_w[:] = target_pos_w.view(-1, total_num_frames, 3)
self._data.target_quat_w[:] = target_quat_w.view(-1, total_num_frames, 4)
self._data.target_pos_source[:] = target_pos_source.view(-1, total_num_frames, 3)
self._data.target_quat_source[:] = target_quat_source.view(-1, total_num_frames, 4)
def _set_debug_vis_impl(self, debug_vis: bool):
# set visibility of markers
# note: parent only deals with callbacks. not their visibility
if debug_vis:
if not hasattr(self, "frame_visualizer"):
self.frame_visualizer = VisualizationMarkers(self.cfg.visualizer_cfg)
# set their visibility to true
self.frame_visualizer.set_visibility(True)
else:
if hasattr(self, "frame_visualizer"):
self.frame_visualizer.set_visibility(False)
def _debug_vis_callback(self, event):
# Update the visualized markers
if self.frame_visualizer is not None:
self.frame_visualizer.visualize(self._data.target_pos_w.view(-1, 3), self._data.target_quat_w.view(-1, 4))
"""
Internal simulation callbacks.
"""
def _invalidate_initialize_callback(self, event):
"""Invalidates the scene elements."""
# call parent
super()._invalidate_initialize_callback(event)
# set all existing views to None to invalidate them
self._physics_sim_view = None
self._frame_physx_view = None
| 18,936 | Python | 50.181081 | 118 | 0.647972 |
fnuabhimanyu8713/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/sensors/frame_transformer/frame_transformer_cfg.py | # Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
from dataclasses import MISSING
from omni.isaac.orbit.markers.config import FRAME_MARKER_CFG, VisualizationMarkersCfg
from omni.isaac.orbit.utils import configclass
from ..sensor_base_cfg import SensorBaseCfg
from .frame_transformer import FrameTransformer
@configclass
class OffsetCfg:
"""The offset pose of one frame relative to another frame."""
pos: tuple[float, float, float] = (0.0, 0.0, 0.0)
"""Translation w.r.t. the parent frame. Defaults to (0.0, 0.0, 0.0)."""
rot: tuple[float, float, float, float] = (1.0, 0.0, 0.0, 0.0)
"""Quaternion rotation (w, x, y, z) w.r.t. the parent frame. Defaults to (1.0, 0.0, 0.0, 0.0)."""
@configclass
class FrameTransformerCfg(SensorBaseCfg):
"""Configuration for the frame transformer sensor."""
@configclass
class FrameCfg:
"""Information specific to a coordinate frame."""
prim_path: str = MISSING
"""The prim path corresponding to the parent rigid body.
This prim should be part of the same articulation as :attr:`FrameTransformerCfg.prim_path`.
"""
name: str | None = None
"""User-defined name for the new coordinate frame. Defaults to None.
If None, then the name is extracted from the leaf of the prim path.
"""
offset: OffsetCfg = OffsetCfg()
"""The pose offset from the parent prim frame."""
class_type: type = FrameTransformer
prim_path: str = MISSING
"""The prim path of the body to transform from (source frame)."""
source_frame_offset: OffsetCfg = OffsetCfg()
"""The pose offset from the source prim frame."""
target_frames: list[FrameCfg] = MISSING
"""A list of the target frames.
This allows a single FrameTransformer to handle multiple target prims. For example, in a quadruped,
we can use a single FrameTransformer to track each foot's position and orientation in the body
frame using four frame offsets.
"""
visualizer_cfg: VisualizationMarkersCfg = FRAME_MARKER_CFG.replace(prim_path="/Visuals/FrameTransformer")
"""The configuration object for the visualization markers. Defaults to FRAME_MARKER_CFG.
Note:
This attribute is only used when debug visualization is enabled.
"""
| 2,369 | Python | 33.347826 | 109 | 0.688898 |
fnuabhimanyu8713/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/sensors/frame_transformer/__init__.py | # Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""Sub-module for frame transformer sensor."""
from .frame_transformer import FrameTransformer
from .frame_transformer_cfg import FrameTransformerCfg, OffsetCfg
from .frame_transformer_data import FrameTransformerData
| 342 | Python | 30.181815 | 65 | 0.80117 |
fnuabhimanyu8713/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/sensors/frame_transformer/frame_transformer_data.py | # Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
import torch
import warnings
from dataclasses import dataclass
@dataclass
class FrameTransformerData:
"""Data container for the frame transformer sensor."""
target_frame_names: list[str] = None
"""Target frame names (this denotes the order in which that frame data is ordered).
The frame names are resolved from the :attr:`FrameTransformerCfg.FrameCfg.name` field.
This usually follows the order in which the frames are defined in the config. However, in
the case of regex matching, the order may be different.
"""
target_pos_source: torch.Tensor = None
"""Position of the target frame(s) relative to source frame.
Shape is (N, M, 3), where N is the number of environments, and M is the number of target frames.
"""
target_quat_source: torch.Tensor = None
"""Orientation of the target frame(s) relative to source frame quaternion (w, x, y, z).
Shape is (N, M, 4), where N is the number of environments, and M is the number of target frames.
"""
target_pos_w: torch.Tensor = None
"""Position of the target frame(s) after offset (in world frame).
Shape is (N, M, 3), where N is the number of environments, and M is the number of target frames.
"""
target_quat_w: torch.Tensor = None
"""Orientation of the target frame(s) after offset (in world frame) quaternion (w, x, y, z).
Shape is (N, M, 4), where N is the number of environments, and M is the number of target frames.
"""
source_pos_w: torch.Tensor = None
"""Position of the source frame after offset (in world frame).
Shape is (N, 3), where N is the number of environments.
"""
source_quat_w: torch.Tensor = None
"""Orientation of the source frame after offset (in world frame) quaternion (w, x, y, z).
Shape is (N, 4), where N is the number of environments.
"""
@property
def target_rot_source(self) -> torch.Tensor:
"""Alias for :attr:`target_quat_source`.
.. deprecated:: v0.2.1
Use :attr:`target_quat_source` instead. Will be removed in v0.3.0.
"""
warnings.warn("'target_rot_source' is deprecated, use 'target_quat_source' instead.", DeprecationWarning)
return self.target_quat_source
@property
def target_rot_w(self) -> torch.Tensor:
"""Alias for :attr:`target_quat_w`.
.. deprecated:: v0.2.1
Use :attr:`target_quat_w` instead. Will be removed in v0.3.0.
"""
warnings.warn("'target_rot_w' is deprecated, use 'target_quat_w' instead.", DeprecationWarning)
return self.target_quat_w
@property
def source_rot_w(self) -> torch.Tensor:
"""Alias for :attr:`source_quat_w`.
.. deprecated:: v0.2.1
Use :attr:`source_quat_w` instead. Will be removed in v0.3.0.
"""
warnings.warn("'source_rot_w' is deprecated, use 'source_quat_w' instead.", DeprecationWarning)
return self.source_quat_w
| 3,087 | Python | 34.090909 | 113 | 0.653709 |
fnuabhimanyu8713/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/sensors/camera/camera.py | # Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
from __future__ import annotations
import math
import numpy as np
import re
import torch
from collections.abc import Sequence
from tensordict import TensorDict
from typing import TYPE_CHECKING, Any, Literal
import carb
import omni.kit.commands
import omni.usd
from omni.isaac.core.prims import XFormPrimView
from pxr import UsdGeom
import omni.isaac.orbit.sim as sim_utils
from omni.isaac.orbit.utils import to_camel_case
from omni.isaac.orbit.utils.array import convert_to_torch
from omni.isaac.orbit.utils.math import quat_from_matrix
from ..sensor_base import SensorBase
from .camera_data import CameraData
from .utils import convert_orientation_convention, create_rotation_matrix_from_view
if TYPE_CHECKING:
from .camera_cfg import CameraCfg
class Camera(SensorBase):
r"""The camera sensor for acquiring visual data.
This class wraps over the `UsdGeom Camera`_ for providing a consistent API for acquiring visual data.
It ensures that the camera follows the ROS convention for the coordinate system.
Summarizing from the `replicator extension`_, the following sensor types are supported:
- ``"rgb"``: A rendered color image.
- ``"distance_to_camera"``: An image containing the distance to camera optical center.
- ``"distance_to_image_plane"``: An image containing distances of 3D points from camera plane along camera's z-axis.
- ``"normals"``: An image containing the local surface normal vectors at each pixel.
- ``"motion_vectors"``: An image containing the motion vector data at each pixel.
- ``"semantic_segmentation"``: The semantic segmentation data.
- ``"instance_segmentation_fast"``: The instance segmentation data.
- ``"instance_id_segmentation_fast"``: The instance id segmentation data.
.. note::
Currently the following sensor types are not supported in a "view" format:
- ``"instance_segmentation"``: The instance segmentation data. Please use the fast counterparts instead.
- ``"instance_id_segmentation"``: The instance id segmentation data. Please use the fast counterparts instead.
- ``"bounding_box_2d_tight"``: The tight 2D bounding box data (only contains non-occluded regions).
- ``"bounding_box_2d_tight_fast"``: The tight 2D bounding box data (only contains non-occluded regions).
- ``"bounding_box_2d_loose"``: The loose 2D bounding box data (contains occluded regions).
- ``"bounding_box_2d_loose_fast"``: The loose 2D bounding box data (contains occluded regions).
- ``"bounding_box_3d"``: The 3D view space bounding box data.
- ``"bounding_box_3d_fast"``: The 3D view space bounding box data.
.. _replicator extension: https://docs.omniverse.nvidia.com/extensions/latest/ext_replicator/annotators_details.html#annotator-output
.. _USDGeom Camera: https://graphics.pixar.com/usd/docs/api/class_usd_geom_camera.html
"""
cfg: CameraCfg
"""The configuration parameters."""
UNSUPPORTED_TYPES: set[str] = {
"instance_id_segmentation",
"instance_segmentation",
"bounding_box_2d_tight",
"bounding_box_2d_loose",
"bounding_box_3d",
"bounding_box_2d_tight_fast",
"bounding_box_2d_loose_fast",
"bounding_box_3d_fast",
}
"""The set of sensor types that are not supported by the camera class."""
def __init__(self, cfg: CameraCfg):
"""Initializes the camera sensor.
Args:
cfg: The configuration parameters.
Raises:
RuntimeError: If no camera prim is found at the given path.
ValueError: If the provided data types are not supported by the camera.
"""
# check if sensor path is valid
# note: currently we do not handle environment indices if there is a regex pattern in the leaf
# For example, if the prim path is "/World/Sensor_[1,2]".
sensor_path = cfg.prim_path.split("/")[-1]
sensor_path_is_regex = re.match(r"^[a-zA-Z0-9/_]+$", sensor_path) is None
if sensor_path_is_regex:
raise RuntimeError(
f"Invalid prim path for the camera sensor: {self.cfg.prim_path}."
"\n\tHint: Please ensure that the prim path does not contain any regex patterns in the leaf."
)
# perform check on supported data types
self._check_supported_data_types(cfg)
# initialize base class
super().__init__(cfg)
# toggle rendering of rtx sensors as True
# this flag is read by SimulationContext to determine if rtx sensors should be rendered
carb_settings_iface = carb.settings.get_settings()
carb_settings_iface.set_bool("/orbit/render/rtx_sensors", True)
# spawn the asset
if self.cfg.spawn is not None:
# compute the rotation offset
rot = torch.tensor(self.cfg.offset.rot, dtype=torch.float32).unsqueeze(0)
rot_offset = convert_orientation_convention(rot, origin=self.cfg.offset.convention, target="opengl")
rot_offset = rot_offset.squeeze(0).numpy()
# spawn the asset
self.cfg.spawn.func(
self.cfg.prim_path, self.cfg.spawn, translation=self.cfg.offset.pos, orientation=rot_offset
)
# check that spawn was successful
matching_prims = sim_utils.find_matching_prims(self.cfg.prim_path)
if len(matching_prims) == 0:
raise RuntimeError(f"Could not find prim with path {self.cfg.prim_path}.")
# UsdGeom Camera prim for the sensor
self._sensor_prims: list[UsdGeom.Camera] = list()
# Create empty variables for storing output data
self._data = CameraData()
def __del__(self):
"""Unsubscribes from callbacks and detach from the replicator registry."""
# unsubscribe callbacks
super().__del__()
# delete from replicator registry
for _, annotators in self._rep_registry.items():
for annotator, render_product_path in zip(annotators, self._render_product_paths):
annotator.detach([render_product_path])
annotator = None
def __str__(self) -> str:
"""Returns: A string containing information about the instance."""
# message for class
return (
f"Camera @ '{self.cfg.prim_path}': \n"
f"\tdata types : {self.data.output.sorted_keys} \n"
f"\tsemantic filter : {self.cfg.semantic_filter}\n"
f"\tcolorize semantic segm. : {self.cfg.colorize_semantic_segmentation}\n"
f"\tcolorize instance segm. : {self.cfg.colorize_instance_segmentation}\n"
f"\tcolorize instance id segm.: {self.cfg.colorize_instance_id_segmentation}\n"
f"\tupdate period (s): {self.cfg.update_period}\n"
f"\tshape : {self.image_shape}\n"
f"\tnumber of sensors : {self._view.count}"
)
"""
Properties
"""
@property
def num_instances(self) -> int:
return self._view.count
@property
def data(self) -> CameraData:
# update sensors if needed
self._update_outdated_buffers()
# return the data
return self._data
@property
def frame(self) -> torch.tensor:
"""Frame number when the measurement took place."""
return self._frame
@property
def render_product_paths(self) -> list[str]:
"""The path of the render products for the cameras.
This can be used via replicator interfaces to attach to writes or external annotator registry.
"""
return self._render_product_paths
@property
def image_shape(self) -> tuple[int, int]:
"""A tuple containing (height, width) of the camera sensor."""
return (self.cfg.height, self.cfg.width)
"""
Configuration
"""
def set_intrinsic_matrices(
self, matrices: torch.Tensor, focal_length: float = 1.0, env_ids: Sequence[int] | None = None
):
"""Set parameters of the USD camera from its intrinsic matrix.
The intrinsic matrix and focal length are used to set the following parameters to the USD camera:
- ``focal_length``: The focal length of the camera.
- ``horizontal_aperture``: The horizontal aperture of the camera.
- ``vertical_aperture``: The vertical aperture of the camera.
- ``horizontal_aperture_offset``: The horizontal offset of the camera.
- ``vertical_aperture_offset``: The vertical offset of the camera.
.. warning::
Due to limitations of Omniverse camera, we need to assume that the camera is a spherical lens,
i.e. has square pixels, and the optical center is centered at the camera eye. If this assumption
is not true in the input intrinsic matrix, then the camera will not set up correctly.
Args:
matrices: The intrinsic matrices for the camera. Shape is (N, 3, 3).
focal_length: Focal length to use when computing aperture values. Defaults to 1.0.
env_ids: A sensor ids to manipulate. Defaults to None, which means all sensor indices.
"""
# resolve env_ids
if env_ids is None:
env_ids = self._ALL_INDICES
# convert matrices to numpy tensors
if isinstance(matrices, torch.Tensor):
matrices = matrices.cpu().numpy()
else:
matrices = np.asarray(matrices, dtype=float)
# iterate over env_ids
for i, intrinsic_matrix in zip(env_ids, matrices):
# extract parameters from matrix
f_x = intrinsic_matrix[0, 0]
c_x = intrinsic_matrix[0, 2]
f_y = intrinsic_matrix[1, 1]
c_y = intrinsic_matrix[1, 2]
# get viewport parameters
height, width = self.image_shape
height, width = float(height), float(width)
# resolve parameters for usd camera
params = {
"focal_length": focal_length,
"horizontal_aperture": width * focal_length / f_x,
"vertical_aperture": height * focal_length / f_y,
"horizontal_aperture_offset": (c_x - width / 2) / f_x,
"vertical_aperture_offset": (c_y - height / 2) / f_y,
}
# change data for corresponding camera index
sensor_prim = self._sensor_prims[i]
# set parameters for camera
for param_name, param_value in params.items():
# convert to camel case (CC)
param_name = to_camel_case(param_name, to="CC")
# get attribute from the class
param_attr = getattr(sensor_prim, f"Get{param_name}Attr")
# set value
# note: We have to do it this way because the camera might be on a different
# layer (default cameras are on session layer), and this is the simplest
# way to set the property on the right layer.
omni.usd.set_prop_val(param_attr(), param_value)
"""
Operations - Set pose.
"""
def set_world_poses(
self,
positions: torch.Tensor | None = None,
orientations: torch.Tensor | None = None,
env_ids: Sequence[int] | None = None,
convention: Literal["opengl", "ros", "world"] = "ros",
):
r"""Set the pose of the camera w.r.t. the world frame using specified convention.
Since different fields use different conventions for camera orientations, the method allows users to
set the camera poses in the specified convention. Possible conventions are:
- :obj:`"opengl"` - forward axis: -Z - up axis +Y - Offset is applied in the OpenGL (Usd.Camera) convention
- :obj:`"ros"` - forward axis: +Z - up axis -Y - Offset is applied in the ROS convention
- :obj:`"world"` - forward axis: +X - up axis +Z - Offset is applied in the World Frame convention
See :meth:`omni.isaac.orbit.sensors.camera.utils.convert_orientation_convention` for more details
on the conventions.
Args:
positions: The cartesian coordinates (in meters). Shape is (N, 3).
Defaults to None, in which case the camera position in not changed.
orientations: The quaternion orientation in (w, x, y, z). Shape is (N, 4).
Defaults to None, in which case the camera orientation in not changed.
env_ids: A sensor ids to manipulate. Defaults to None, which means all sensor indices.
convention: The convention in which the poses are fed. Defaults to "ros".
Raises:
RuntimeError: If the camera prim is not set. Need to call :meth:`initialize` method first.
"""
# resolve env_ids
if env_ids is None:
env_ids = self._ALL_INDICES
# convert to backend tensor
if positions is not None:
if isinstance(positions, np.ndarray):
positions = torch.from_numpy(positions).to(device=self._device)
elif not isinstance(positions, torch.Tensor):
positions = torch.tensor(positions, device=self._device)
# convert rotation matrix from input convention to OpenGL
if orientations is not None:
if isinstance(orientations, np.ndarray):
orientations = torch.from_numpy(orientations).to(device=self._device)
elif not isinstance(orientations, torch.Tensor):
orientations = torch.tensor(orientations, device=self._device)
orientations = convert_orientation_convention(orientations, origin=convention, target="opengl")
# set the pose
self._view.set_world_poses(positions, orientations, env_ids)
def set_world_poses_from_view(
self, eyes: torch.Tensor, targets: torch.Tensor, env_ids: Sequence[int] | None = None
):
"""Set the poses of the camera from the eye position and look-at target position.
Args:
eyes: The positions of the camera's eye. Shape is (N, 3).
targets: The target locations to look at. Shape is (N, 3).
env_ids: A sensor ids to manipulate. Defaults to None, which means all sensor indices.
Raises:
RuntimeError: If the camera prim is not set. Need to call :meth:`initialize` method first.
NotImplementedError: If the stage up-axis is not "Y" or "Z".
"""
# resolve env_ids
if env_ids is None:
env_ids = self._ALL_INDICES
# set camera poses using the view
orientations = quat_from_matrix(create_rotation_matrix_from_view(eyes, targets, device=self._device))
self._view.set_world_poses(eyes, orientations, env_ids)
"""
Operations
"""
def reset(self, env_ids: Sequence[int] | None = None):
# reset the timestamps
super().reset(env_ids)
# resolve None
# note: cannot do smart indexing here since we do a for loop over data.
if env_ids is None:
env_ids = self._ALL_INDICES
# reset the data
# note: this recomputation is useful if one performs events such as randomizations on the camera poses.
self._update_poses(env_ids)
self._update_intrinsic_matrices(env_ids)
# Reset the frame count
self._frame[env_ids] = 0
"""
Implementation.
"""
def _initialize_impl(self):
"""Initializes the sensor handles and internal buffers.
This function creates handles and registers the provided data types with the replicator registry to
be able to access the data from the sensor. It also initializes the internal buffers to store the data.
Raises:
RuntimeError: If the number of camera prims in the view does not match the number of environments.
"""
import omni.replicator.core as rep
from omni.syntheticdata.scripts.SyntheticData import SyntheticData
# Initialize parent class
super()._initialize_impl()
# Create a view for the sensor
self._view = XFormPrimView(self.cfg.prim_path, reset_xform_properties=False)
self._view.initialize()
# Check that sizes are correct
if self._view.count != self._num_envs:
raise RuntimeError(
f"Number of camera prims in the view ({self._view.count}) does not match"
f" the number of environments ({self._num_envs})."
)
# Create all env_ids buffer
self._ALL_INDICES = torch.arange(self._view.count, device=self._device, dtype=torch.long)
# Create frame count buffer
self._frame = torch.zeros(self._view.count, device=self._device, dtype=torch.long)
# Attach the sensor data types to render node
self._render_product_paths: list[str] = list()
self._rep_registry: dict[str, list[rep.annotators.Annotator]] = {name: list() for name in self.cfg.data_types}
# Obtain current stage
stage = omni.usd.get_context().get_stage()
# Convert all encapsulated prims to Camera
for cam_prim_path in self._view.prim_paths:
# Get camera prim
cam_prim = stage.GetPrimAtPath(cam_prim_path)
# Check if prim is a camera
if not cam_prim.IsA(UsdGeom.Camera):
raise RuntimeError(f"Prim at path '{cam_prim_path}' is not a Camera.")
# Add to list
sensor_prim = UsdGeom.Camera(cam_prim)
self._sensor_prims.append(sensor_prim)
# Get render product
# From Isaac Sim 2023.1 onwards, render product is a HydraTexture so we need to extract the path
render_prod_path = rep.create.render_product(cam_prim_path, resolution=(self.cfg.width, self.cfg.height))
if not isinstance(render_prod_path, str):
render_prod_path = render_prod_path.path
self._render_product_paths.append(render_prod_path)
# Check if semantic types or semantic filter predicate is provided
if isinstance(self.cfg.semantic_filter, list):
semantic_filter_predicate = ":*; ".join(self.cfg.semantic_filter) + ":*"
elif isinstance(self.cfg.semantic_filter, str):
semantic_filter_predicate = self.cfg.semantic_filter
else:
raise ValueError(f"Semantic types must be a list or a string. Received: {self.cfg.semantic_filter}.")
# set the semantic filter predicate
# copied from rep.scripts.writes_default.basic_writer.py
SyntheticData.Get().set_instance_mapping_semantic_filter(semantic_filter_predicate)
# Iterate over each data type and create annotator
# TODO: This will move out of the loop once Replicator supports multiple render products within a single
# annotator, i.e.: rep_annotator.attach(self._render_product_paths)
for name in self.cfg.data_types:
# note: we are verbose here to make it easier to understand the code.
# if colorize is true, the data is mapped to colors and a uint8 4 channel image is returned.
# if colorize is false, the data is returned as a uint32 image with ids as values.
if name == "semantic_segmentation":
init_params = {"colorize": self.cfg.colorize_semantic_segmentation}
elif name == "instance_segmentation_fast":
init_params = {"colorize": self.cfg.colorize_instance_segmentation}
elif name == "instance_id_segmentation_fast":
init_params = {"colorize": self.cfg.colorize_instance_id_segmentation}
else:
init_params = None
# Resolve device name
if "cuda" in self._device:
device_name = self._device.split(":")[0]
else:
device_name = "cpu"
# create annotator node
rep_annotator = rep.AnnotatorRegistry.get_annotator(name, init_params, device=device_name)
rep_annotator.attach(render_prod_path)
# add to registry
self._rep_registry[name].append(rep_annotator)
# Create internal buffers
self._create_buffers()
def _update_buffers_impl(self, env_ids: Sequence[int]):
# Increment frame count
self._frame[env_ids] += 1
# -- intrinsic matrix
self._update_intrinsic_matrices(env_ids)
# -- pose
self._update_poses(env_ids)
# -- read the data from annotator registry
# check if buffer is called for the first time. If so then, allocate the memory
if len(self._data.output.sorted_keys) == 0:
# this is the first time buffer is called
# it allocates memory for all the sensors
self._create_annotator_data()
else:
# iterate over all the data types
for name, annotators in self._rep_registry.items():
# iterate over all the annotators
for index in env_ids:
# get the output
output = annotators[index].get_data()
# process the output
data, info = self._process_annotator_output(name, output)
# add data to output
self._data.output[name][index] = data
# add info to output
self._data.info[index][name] = info
"""
Private Helpers
"""
def _check_supported_data_types(self, cfg: CameraCfg):
"""Checks if the data types are supported by the ray-caster camera."""
# check if there is any intersection in unsupported types
# reason: these use np structured data types which we can't yet convert to torch tensor
common_elements = set(cfg.data_types) & Camera.UNSUPPORTED_TYPES
if common_elements:
# provide alternative fast counterparts
fast_common_elements = []
for item in common_elements:
if "instance_segmentation" in item or "instance_id_segmentation" in item:
fast_common_elements.append(item + "_fast")
# raise error
raise ValueError(
f"Camera class does not support the following sensor types: {common_elements}."
"\n\tThis is because these sensor types output numpy structured data types which"
"can't be converted to torch tensors easily."
"\n\tHint: If you need to work with these sensor types, we recommend using their fast counterparts."
f"\n\t\tFast counterparts: {fast_common_elements}"
)
def _create_buffers(self):
"""Create buffers for storing data."""
# create the data object
# -- pose of the cameras
self._data.pos_w = torch.zeros((self._view.count, 3), device=self._device)
self._data.quat_w_world = torch.zeros((self._view.count, 4), device=self._device)
# -- intrinsic matrix
self._data.intrinsic_matrices = torch.zeros((self._view.count, 3, 3), device=self._device)
self._data.image_shape = self.image_shape
# -- output data
# lazy allocation of data dictionary
# since the size of the output data is not known in advance, we leave it as None
# the memory will be allocated when the buffer() function is called for the first time.
self._data.output = TensorDict({}, batch_size=self._view.count, device=self.device)
self._data.info = [{name: None for name in self.cfg.data_types} for _ in range(self._view.count)]
def _update_intrinsic_matrices(self, env_ids: Sequence[int]):
"""Compute camera's matrix of intrinsic parameters.
Also called calibration matrix. This matrix works for linear depth images. We assume square pixels.
Note:
The calibration matrix projects points in the 3D scene onto an imaginary screen of the camera.
The coordinates of points on the image plane are in the homogeneous representation.
"""
# iterate over all cameras
for i in env_ids:
# Get corresponding sensor prim
sensor_prim = self._sensor_prims[i]
# get camera parameters
focal_length = sensor_prim.GetFocalLengthAttr().Get()
horiz_aperture = sensor_prim.GetHorizontalApertureAttr().Get()
# get viewport parameters
height, width = self.image_shape
# calculate the field of view
fov = 2 * math.atan(horiz_aperture / (2 * focal_length))
# calculate the focal length in pixels
focal_px = width * 0.5 / math.tan(fov / 2)
# create intrinsic matrix for depth linear
self._data.intrinsic_matrices[i, 0, 0] = focal_px
self._data.intrinsic_matrices[i, 0, 2] = width * 0.5
self._data.intrinsic_matrices[i, 1, 1] = focal_px
self._data.intrinsic_matrices[i, 1, 2] = height * 0.5
self._data.intrinsic_matrices[i, 2, 2] = 1
def _update_poses(self, env_ids: Sequence[int]):
"""Computes the pose of the camera in the world frame with ROS convention.
This methods uses the ROS convention to resolve the input pose. In this convention,
we assume that the camera front-axis is +Z-axis and up-axis is -Y-axis.
Returns:
A tuple of the position (in meters) and quaternion (w, x, y, z).
"""
# check camera prim exists
if len(self._sensor_prims) == 0:
raise RuntimeError("Camera prim is None. Please call 'sim.play()' first.")
# get the poses from the view
poses, quat = self._view.get_world_poses(env_ids)
self._data.pos_w[env_ids] = poses
self._data.quat_w_world[env_ids] = convert_orientation_convention(quat, origin="opengl", target="world")
def _create_annotator_data(self):
"""Create the buffers to store the annotator data.
We create a buffer for each annotator and store the data in a dictionary. Since the data
shape is not known beforehand, we create a list of buffers and concatenate them later.
This is an expensive operation and should be called only once.
"""
# add data from the annotators
for name, annotators in self._rep_registry.items():
# create a list to store the data for each annotator
data_all_cameras = list()
# iterate over all the annotators
for index in self._ALL_INDICES:
# get the output
output = annotators[index].get_data()
# process the output
data, info = self._process_annotator_output(name, output)
# append the data
data_all_cameras.append(data)
# store the info
self._data.info[index][name] = info
# concatenate the data along the batch dimension
self._data.output[name] = torch.stack(data_all_cameras, dim=0)
def _process_annotator_output(self, name: str, output: Any) -> tuple[torch.tensor, dict | None]:
"""Process the annotator output.
This function is called after the data has been collected from all the cameras.
"""
# extract info and data from the output
if isinstance(output, dict):
data = output["data"]
info = output["info"]
else:
data = output
info = None
# convert data into torch tensor
data = convert_to_torch(data, device=self.device)
# process data for different segmentation types
# Note: Replicator returns raw buffers of dtype int32 for segmentation types
# so we need to convert them to uint8 4 channel images for colorized types
height, width = self.image_shape
if name == "semantic_segmentation":
if self.cfg.colorize_semantic_segmentation:
data = data.view(torch.uint8).reshape(height, width, -1)
else:
data = data.view(height, width)
elif name == "instance_segmentation_fast":
if self.cfg.colorize_instance_segmentation:
data = data.view(torch.uint8).reshape(height, width, -1)
else:
data = data.view(height, width)
elif name == "instance_id_segmentation_fast":
if self.cfg.colorize_instance_id_segmentation:
data = data.view(torch.uint8).reshape(height, width, -1)
else:
data = data.view(height, width)
# return the data and info
return data, info
"""
Internal simulation callbacks.
"""
def _invalidate_initialize_callback(self, event):
"""Invalidates the scene elements."""
# call parent
super()._invalidate_initialize_callback(event)
# set all existing views to None to invalidate them
self._view = None
| 29,391 | Python | 45.068965 | 137 | 0.617536 |
fnuabhimanyu8713/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/sensors/camera/camera_data.py | # Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
import torch
from dataclasses import dataclass
from tensordict import TensorDict
from typing import Any
from .utils import convert_orientation_convention
@dataclass
class CameraData:
"""Data container for the camera sensor."""
##
# Frame state.
##
pos_w: torch.Tensor = None
"""Position of the sensor origin in world frame, following ROS convention.
Shape is (N, 3) where N is the number of sensors.
"""
quat_w_world: torch.Tensor = None
"""Quaternion orientation `(w, x, y, z)` of the sensor origin in world frame, following the world coordinate frame
.. note::
World frame convention follows the camera aligned with forward axis +X and up axis +Z.
Shape is (N, 4) where N is the number of sensors.
"""
##
# Camera data
##
image_shape: tuple[int, int] = None
"""A tuple containing (height, width) of the camera sensor."""
intrinsic_matrices: torch.Tensor = None
"""The intrinsic matrices for the camera.
Shape is (N, 3, 3) where N is the number of sensors.
"""
output: TensorDict = None
"""The retrieved sensor data with sensor types as key.
The format of the data is available in the `Replicator Documentation`_. For semantic-based data,
this corresponds to the ``"data"`` key in the output of the sensor.
.. _Replicator Documentation: https://docs.omniverse.nvidia.com/prod_extensions/prod_extensions/ext_replicator/annotators_details.html#annotator-output
"""
info: list[dict[str, Any]] = None
"""The retrieved sensor info with sensor types as key.
This contains extra information provided by the sensor such as semantic segmentation label mapping, prim paths.
For semantic-based data, this corresponds to the ``"info"`` key in the output of the sensor. For other sensor
types, the info is empty.
"""
##
# Additional Frame orientation conventions
##
@property
def quat_w_ros(self) -> torch.Tensor:
"""Quaternion orientation `(w, x, y, z)` of the sensor origin in the world frame, following ROS convention.
.. note::
ROS convention follows the camera aligned with forward axis +Z and up axis -Y.
Shape is (N, 4) where N is the number of sensors.
"""
return convert_orientation_convention(self.quat_w_world, origin="world", target="ros")
@property
def quat_w_opengl(self) -> torch.Tensor:
"""Quaternion orientation `(w, x, y, z)` of the sensor origin in the world frame, following
Opengl / USD Camera convention.
.. note::
OpenGL convention follows the camera aligned with forward axis -Z and up axis +Y.
Shape is (N, 4) where N is the number of sensors.
"""
return convert_orientation_convention(self.quat_w_world, origin="world", target="opengl")
| 2,983 | Python | 31.086021 | 155 | 0.66946 |
fnuabhimanyu8713/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/sensors/camera/__init__.py | # Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""Sub-module for camera wrapper around USD camera prim."""
from .camera import Camera
from .camera_cfg import CameraCfg
from .camera_data import CameraData
from .utils import * # noqa: F401, F403
| 322 | Python | 25.916665 | 59 | 0.751553 |
fnuabhimanyu8713/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/sensors/camera/utils.py | # Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""Helper functions to project between pointcloud and depth images."""
# needed to import for allowing type-hinting: torch.device | str | None
from __future__ import annotations
import math
import numpy as np
import torch
import torch.nn.functional as F
from collections.abc import Sequence
from typing import Literal
import omni.isaac.core.utils.stage as stage_utils
import warp as wp
from pxr import UsdGeom
import omni.isaac.orbit.utils.math as math_utils
from omni.isaac.orbit.utils.array import TensorData, convert_to_torch
"""
Depth <-> Pointcloud conversions.
"""
def transform_points(
points: TensorData,
position: Sequence[float] | None = None,
orientation: Sequence[float] | None = None,
device: torch.device | str | None = None,
) -> np.ndarray | torch.Tensor:
r"""Transform input points in a given frame to a target frame.
This function transform points from a source frame to a target frame. The transformation is defined by the
position ``t`` and orientation ``R`` of the target frame in the source frame.
.. math::
p_{target} = R_{target} \times p_{source} + t_{target}
If either the inputs `position` and `orientation` are None, the corresponding transformation is not applied.
Args:
points: a tensor of shape (p, 3) or (n, p, 3) comprising of 3d points in source frame.
position: The position of source frame in target frame. Defaults to None.
orientation: The orientation (w, x, y, z) of source frame in target frame.
Defaults to None.
device: The device for torch where the computation
should be executed. Defaults to None, i.e. takes the device that matches the depth image.
Returns:
A tensor of shape (N, 3) comprising of 3D points in target frame.
If the input is a numpy array, the output is a numpy array. Otherwise, it is a torch tensor.
"""
# check if numpy
is_numpy = isinstance(points, np.ndarray)
# decide device
if device is None and is_numpy:
device = torch.device("cpu")
# convert to torch
points = convert_to_torch(points, dtype=torch.float32, device=device)
# update the device with the device of the depth image
# note: this is needed since warp does not provide the device directly
device = points.device
# apply rotation
if orientation is not None:
orientation = convert_to_torch(orientation, dtype=torch.float32, device=device)
# apply translation
if position is not None:
position = convert_to_torch(position, dtype=torch.float32, device=device)
# apply transformation
points = math_utils.transform_points(points, position, orientation)
# return everything according to input type
if is_numpy:
return points.detach().cpu().numpy()
else:
return points
def create_pointcloud_from_depth(
intrinsic_matrix: np.ndarray | torch.Tensor | wp.array,
depth: np.ndarray | torch.Tensor | wp.array,
keep_invalid: bool = False,
position: Sequence[float] | None = None,
orientation: Sequence[float] | None = None,
device: torch.device | str | None = None,
) -> np.ndarray | torch.Tensor:
r"""Creates pointcloud from input depth image and camera intrinsic matrix.
This function creates a pointcloud from a depth image and camera intrinsic matrix. The pointcloud is
computed using the following equation:
.. math::
p_{camera} = K^{-1} \times [u, v, 1]^T \times d
where :math:`K` is the camera intrinsic matrix, :math:`u` and :math:`v` are the pixel coordinates and
:math:`d` is the depth value at the pixel.
Additionally, the pointcloud can be transformed from the camera frame to a target frame by providing
the position ``t`` and orientation ``R`` of the camera in the target frame:
.. math::
p_{target} = R_{target} \times p_{camera} + t_{target}
Args:
intrinsic_matrix: A (3, 3) array providing camera's calibration matrix.
depth: An array of shape (H, W) with values encoding the depth measurement.
keep_invalid: Whether to keep invalid points in the cloud or not. Invalid points
correspond to pixels with depth values 0.0 or NaN. Defaults to False.
position: The position of the camera in a target frame. Defaults to None.
orientation: The orientation (w, x, y, z) of the camera in a target frame. Defaults to None.
device: The device for torch where the computation should be executed.
Defaults to None, i.e. takes the device that matches the depth image.
Returns:
An array/tensor of shape (N, 3) comprising of 3D coordinates of points.
The returned datatype is torch if input depth is of type torch.tensor or wp.array. Otherwise, a np.ndarray
is returned.
"""
# We use PyTorch here for matrix multiplication since it is compiled with Intel MKL while numpy
# by default uses OpenBLAS. With PyTorch (CPU), we could process a depth image of size (480, 640)
# in 0.0051 secs, while with numpy it took 0.0292 secs.
# convert to numpy matrix
is_numpy = isinstance(depth, np.ndarray)
# decide device
if device is None and is_numpy:
device = torch.device("cpu")
# convert depth to torch tensor
depth = convert_to_torch(depth, dtype=torch.float32, device=device)
# update the device with the device of the depth image
# note: this is needed since warp does not provide the device directly
device = depth.device
# convert inputs to torch tensors
intrinsic_matrix = convert_to_torch(intrinsic_matrix, dtype=torch.float32, device=device)
if position is not None:
position = convert_to_torch(position, dtype=torch.float32, device=device)
if orientation is not None:
orientation = convert_to_torch(orientation, dtype=torch.float32, device=device)
# compute pointcloud
depth_cloud = math_utils.unproject_depth(depth, intrinsic_matrix)
# convert 3D points to world frame
depth_cloud = math_utils.transform_points(depth_cloud, position, orientation)
# keep only valid entries if flag is set
if not keep_invalid:
pts_idx_to_keep = torch.all(torch.logical_and(~torch.isnan(depth_cloud), ~torch.isinf(depth_cloud)), dim=1)
depth_cloud = depth_cloud[pts_idx_to_keep, ...]
# return everything according to input type
if is_numpy:
return depth_cloud.detach().cpu().numpy()
else:
return depth_cloud
def create_pointcloud_from_rgbd(
intrinsic_matrix: torch.Tensor | np.ndarray | wp.array,
depth: torch.Tensor | np.ndarray | wp.array,
rgb: torch.Tensor | wp.array | np.ndarray | tuple[float, float, float] = None,
normalize_rgb: bool = False,
position: Sequence[float] | None = None,
orientation: Sequence[float] | None = None,
device: torch.device | str | None = None,
num_channels: int = 3,
) -> tuple[torch.Tensor, torch.Tensor] | tuple[np.ndarray, np.ndarray]:
"""Creates pointcloud from input depth image and camera transformation matrix.
This function provides the same functionality as :meth:`create_pointcloud_from_depth` but also allows
to provide the RGB values for each point.
The ``rgb`` attribute is used to resolve the corresponding point's color:
- If a ``np.array``/``wp.array``/``torch.tensor`` of shape (H, W, 3), then the corresponding channels encode RGB values.
- If a tuple, then the point cloud has a single color specified by the values (r, g, b).
- If None, then default color is white, i.e. (0, 0, 0).
If the input ``normalize_rgb`` is set to :obj:`True`, then the RGB values are normalized to be in the range [0, 1].
Args:
intrinsic_matrix: A (3, 3) array/tensor providing camera's calibration matrix.
depth: An array/tensor of shape (H, W) with values encoding the depth measurement.
rgb: Color for generated point cloud. Defaults to None.
normalize_rgb: Whether to normalize input rgb. Defaults to False.
position: The position of the camera in a target frame. Defaults to None.
orientation: The orientation `(w, x, y, z)` of the camera in a target frame. Defaults to None.
device: The device for torch where the computation should be executed. Defaults to None, in which case
it takes the device that matches the depth image.
num_channels: Number of channels in RGB pointcloud. Defaults to 3.
Returns:
A tuple of (N, 3) arrays or tensors containing the 3D coordinates of points and their RGB color respectively.
The returned datatype is torch if input depth is of type torch.tensor or wp.array. Otherwise, a np.ndarray
is returned.
Raises:
ValueError: When rgb image is a numpy array but not of shape (H, W, 3) or (H, W, 4).
"""
# check valid inputs
if rgb is not None and not isinstance(rgb, tuple):
if len(rgb.shape) == 3:
if rgb.shape[2] not in [3, 4]:
raise ValueError(f"Input rgb image of invalid shape: {rgb.shape} != (H, W, 3) or (H, W, 4).")
else:
raise ValueError(f"Input rgb image not three-dimensional. Received shape: {rgb.shape}.")
if num_channels not in [3, 4]:
raise ValueError(f"Invalid number of channels: {num_channels} != 3 or 4.")
# check if input depth is numpy array
is_numpy = isinstance(depth, np.ndarray)
# decide device
if device is None and is_numpy:
device = torch.device("cpu")
# convert depth to torch tensor
if is_numpy:
depth = torch.from_numpy(depth).to(device=device)
# retrieve XYZ pointcloud
points_xyz = create_pointcloud_from_depth(intrinsic_matrix, depth, True, position, orientation, device=device)
# get image height and width
im_height, im_width = depth.shape[:2]
# total number of points
num_points = im_height * im_width
# extract color value
if rgb is not None:
if isinstance(rgb, (np.ndarray, torch.Tensor, wp.array)):
# copy numpy array to preserve
rgb = convert_to_torch(rgb, device=device, dtype=torch.float32)
rgb = rgb[:, :, :3]
# convert the matrix to (W, H, 3) from (H, W, 3) since depth processing
# is done in the order (u, v) where u: (0, W-1) and v: (0 - H-1)
points_rgb = rgb.permute(1, 0, 2).reshape(-1, 3)
elif isinstance(rgb, (tuple, list)):
# same color for all points
points_rgb = torch.Tensor((rgb,) * num_points, device=device, dtype=torch.uint8)
else:
# default color is white
points_rgb = torch.Tensor(((0, 0, 0),) * num_points, device=device, dtype=torch.uint8)
else:
points_rgb = torch.Tensor(((0, 0, 0),) * num_points, device=device, dtype=torch.uint8)
# normalize color values
if normalize_rgb:
points_rgb = points_rgb.float() / 255
# remove invalid points
pts_idx_to_keep = torch.all(torch.logical_and(~torch.isnan(points_xyz), ~torch.isinf(points_xyz)), dim=1)
points_rgb = points_rgb[pts_idx_to_keep, ...]
points_xyz = points_xyz[pts_idx_to_keep, ...]
# add additional channels if required
if num_channels == 4:
points_rgb = torch.nn.functional.pad(points_rgb, (0, 1), mode="constant", value=1.0)
# return everything according to input type
if is_numpy:
return points_xyz.cpu().numpy(), points_rgb.cpu().numpy()
else:
return points_xyz, points_rgb
def convert_orientation_convention(
orientation: torch.Tensor,
origin: Literal["opengl", "ros", "world"] = "opengl",
target: Literal["opengl", "ros", "world"] = "ros",
) -> torch.Tensor:
r"""Converts a quaternion representing a rotation from one convention to another.
In USD, the camera follows the ``"opengl"`` convention. Thus, it is always in **Y up** convention.
This means that the camera is looking down the -Z axis with the +Y axis pointing up , and +X axis pointing right.
However, in ROS, the camera is looking down the +Z axis with the +Y axis pointing down, and +X axis pointing right.
Thus, the camera needs to be rotated by :math:`180^{\circ}` around the X axis to follow the ROS convention.
.. math::
T_{ROS} = \begin{bmatrix} 1 & 0 & 0 & 0 \\ 0 & -1 & 0 & 0 \\ 0 & 0 & -1 & 0 \\ 0 & 0 & 0 & 1 \end{bmatrix} T_{USD}
On the other hand, the typical world coordinate system is with +X pointing forward, +Y pointing left,
and +Z pointing up. The camera can also be set in this convention by rotating the camera by :math:`90^{\circ}`
around the X axis and :math:`-90^{\circ}` around the Y axis.
.. math::
T_{WORLD} = \begin{bmatrix} 0 & 0 & -1 & 0 \\ -1 & 0 & 0 & 0 \\ 0 & 1 & 0 & 0 \\ 0 & 0 & 0 & 1 \end{bmatrix} T_{USD}
Thus, based on their application, cameras follow different conventions for their orientation. This function
converts a quaternion from one convention to another.
Possible conventions are:
- :obj:`"opengl"` - forward axis: -Z - up axis +Y - Offset is applied in the OpenGL (Usd.Camera) convention
- :obj:`"ros"` - forward axis: +Z - up axis -Y - Offset is applied in the ROS convention
- :obj:`"world"` - forward axis: +X - up axis +Z - Offset is applied in the World Frame convention
Args:
orientation: Quaternion of form `(w, x, y, z)` with shape (..., 4) in source convention
origin: Convention to convert to. Defaults to "ros".
target: Convention to convert from. Defaults to "opengl".
Returns:
Quaternion of form `(w, x, y, z)` with shape (..., 4) in target convention
"""
if target == origin:
return orientation.clone()
# -- unify input type
if origin == "ros":
# convert from ros to opengl convention
rotm = math_utils.matrix_from_quat(orientation)
rotm[:, :, 2] = -rotm[:, :, 2]
rotm[:, :, 1] = -rotm[:, :, 1]
# convert to opengl convention
quat_gl = math_utils.quat_from_matrix(rotm)
elif origin == "world":
# convert from world (x forward and z up) to opengl convention
rotm = math_utils.matrix_from_quat(orientation)
rotm = torch.matmul(
rotm,
math_utils.matrix_from_euler(
torch.tensor([math.pi / 2, -math.pi / 2, 0], device=orientation.device), "XYZ"
),
)
# convert to isaac-sim convention
quat_gl = math_utils.quat_from_matrix(rotm)
else:
quat_gl = orientation
# -- convert to target convention
if target == "ros":
# convert from opengl to ros convention
rotm = math_utils.matrix_from_quat(quat_gl)
rotm[:, :, 2] = -rotm[:, :, 2]
rotm[:, :, 1] = -rotm[:, :, 1]
return math_utils.quat_from_matrix(rotm)
elif target == "world":
# convert from opengl to world (x forward and z up) convention
rotm = math_utils.matrix_from_quat(quat_gl)
rotm = torch.matmul(
rotm,
math_utils.matrix_from_euler(
torch.tensor([math.pi / 2, -math.pi / 2, 0], device=orientation.device), "XYZ"
).T,
)
return math_utils.quat_from_matrix(rotm)
else:
return quat_gl.clone()
# @torch.jit.script
def create_rotation_matrix_from_view(
eyes: torch.Tensor,
targets: torch.Tensor,
device: str = "cpu",
) -> torch.Tensor:
"""
This function takes a vector ''eyes'' which specifies the location
of the camera in world coordinates and the vector ''targets'' which
indicate the position of the object.
The output is a rotation matrix representing the transformation
from world coordinates -> view coordinates.
The inputs camera_position and targets can each be a
- 3 element tuple/list
- torch tensor of shape (1, 3)
- torch tensor of shape (N, 3)
Args:
eyes: position of the camera in world coordinates
targets: position of the object in world coordinates
The vectors are broadcast against each other so they all have shape (N, 3).
Returns:
R: (N, 3, 3) batched rotation matrices
Reference:
Based on PyTorch3D (https://github.com/facebookresearch/pytorch3d/blob/eaf0709d6af0025fe94d1ee7cec454bc3054826a/pytorch3d/renderer/cameras.py#L1635-L1685)
"""
up_axis_token = stage_utils.get_stage_up_axis()
if up_axis_token == UsdGeom.Tokens.y:
up_axis = torch.tensor((0, 1, 0), device=device, dtype=torch.float32).repeat(eyes.shape[0], 1)
elif up_axis_token == UsdGeom.Tokens.z:
up_axis = torch.tensor((0, 0, 1), device=device, dtype=torch.float32).repeat(eyes.shape[0], 1)
else:
raise ValueError(f"Invalid up axis: {up_axis_token}")
# get rotation matrix in opengl format (-Z forward, +Y up)
z_axis = -F.normalize(targets - eyes, eps=1e-5)
x_axis = F.normalize(torch.cross(up_axis, z_axis, dim=1), eps=1e-5)
y_axis = F.normalize(torch.cross(z_axis, x_axis, dim=1), eps=1e-5)
is_close = torch.isclose(x_axis, torch.tensor(0.0), atol=5e-3).all(dim=1, keepdim=True)
if is_close.any():
replacement = F.normalize(torch.cross(y_axis, z_axis, dim=1), eps=1e-5)
x_axis = torch.where(is_close, replacement, x_axis)
R = torch.cat((x_axis[:, None, :], y_axis[:, None, :], z_axis[:, None, :]), dim=1)
return R.transpose(1, 2)
| 17,588 | Python | 42.9725 | 158 | 0.653969 |
fnuabhimanyu8713/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/sensors/camera/camera_cfg.py | # Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
from dataclasses import MISSING
from typing import Literal
from omni.isaac.orbit.sim import FisheyeCameraCfg, PinholeCameraCfg
from omni.isaac.orbit.utils import configclass
from ..sensor_base_cfg import SensorBaseCfg
from .camera import Camera
@configclass
class CameraCfg(SensorBaseCfg):
"""Configuration for a camera sensor."""
@configclass
class OffsetCfg:
"""The offset pose of the sensor's frame from the sensor's parent frame."""
pos: tuple[float, float, float] = (0.0, 0.0, 0.0)
"""Translation w.r.t. the parent frame. Defaults to (0.0, 0.0, 0.0)."""
rot: tuple[float, float, float, float] = (1.0, 0.0, 0.0, 0.0)
"""Quaternion rotation (w, x, y, z) w.r.t. the parent frame. Defaults to (1.0, 0.0, 0.0, 0.0)."""
convention: Literal["opengl", "ros", "world"] = "ros"
"""The convention in which the frame offset is applied. Defaults to "ros".
- ``"opengl"`` - forward axis: ``-Z`` - up axis: ``+Y`` - Offset is applied in the OpenGL (Usd.Camera) convention.
- ``"ros"`` - forward axis: ``+Z`` - up axis: ``-Y`` - Offset is applied in the ROS convention.
- ``"world"`` - forward axis: ``+X`` - up axis: ``+Z`` - Offset is applied in the World Frame convention.
"""
class_type: type = Camera
offset: OffsetCfg = OffsetCfg()
"""The offset pose of the sensor's frame from the sensor's parent frame. Defaults to identity.
Note:
The parent frame is the frame the sensor attaches to. For example, the parent frame of a
camera at path ``/World/envs/env_0/Robot/Camera`` is ``/World/envs/env_0/Robot``.
"""
spawn: PinholeCameraCfg | FisheyeCameraCfg | None = MISSING
"""Spawn configuration for the asset.
If None, then the prim is not spawned by the asset. Instead, it is assumed that the
asset is already present in the scene.
"""
data_types: list[str] = ["rgb"]
"""List of sensor names/types to enable for the camera. Defaults to ["rgb"].
Please refer to the :class:`Camera` class for a list of available data types.
"""
width: int = MISSING
"""Width of the image in pixels."""
height: int = MISSING
"""Height of the image in pixels."""
semantic_filter: str | list[str] = "*:*"
"""A string or a list specifying a semantic filter predicate. Defaults to ``"*:*"``.
If a string, it should be a disjunctive normal form of (semantic type, labels). For examples:
* ``"typeA : labelA & !labelB | labelC , typeB: labelA ; typeC: labelE"``:
All prims with semantic type "typeA" and label "labelA" but not "labelB" or with label "labelC".
Also, all prims with semantic type "typeB" and label "labelA", or with semantic type "typeC" and label "labelE".
* ``"typeA : * ; * : labelA"``: All prims with semantic type "typeA" or with label "labelA"
If a list of strings, each string should be a semantic type. The segmentation for prims with
semantics of the specified types will be retrieved. For example, if the list is ["class"], only
the segmentation for prims with semantics of type "class" will be retrieved.
.. seealso::
For more information on the semantics filter, see the documentation on `Replicator Semantics Schema Editor`_.
.. _Replicator Semantics Schema Editor: https://docs.omniverse.nvidia.com/extensions/latest/ext_replicator/semantics_schema_editor.html#semantics-filtering
"""
colorize_semantic_segmentation: bool = True
"""Whether to colorize the semantic segmentation images. Defaults to True.
If True, semantic segmentation is converted to an image where semantic IDs are mapped to colors
and returned as a ``uint8`` 4-channel array. If False, the output is returned as a ``int32`` array.
"""
colorize_instance_id_segmentation: bool = True
"""Whether to colorize the instance ID segmentation images. Defaults to True.
If True, instance id segmentation is converted to an image where instance IDs are mapped to colors.
and returned as a ``uint8`` 4-channel array. If False, the output is returned as a ``int32`` array.
"""
colorize_instance_segmentation: bool = True
"""Whether to colorize the instance ID segmentation images. Defaults to True.
If True, instance segmentation is converted to an image where instance IDs are mapped to colors.
and returned as a ``uint8`` 4-channel array. If False, the output is returned as a ``int32`` array.
"""
| 4,623 | Python | 41.422018 | 159 | 0.673805 |
fnuabhimanyu8713/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/sensors/contact_sensor/contact_sensor.py | # Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
# Ignore optional memory usage warning globally
# pyright: reportOptionalSubscript=false
from __future__ import annotations
import torch
from collections.abc import Sequence
from typing import TYPE_CHECKING
import omni.physics.tensors.impl.api as physx
from pxr import PhysxSchema
import omni.isaac.orbit.sim as sim_utils
import omni.isaac.orbit.utils.string as string_utils
from omni.isaac.orbit.markers import VisualizationMarkers
from omni.isaac.orbit.utils.math import convert_quat
from ..sensor_base import SensorBase
from .contact_sensor_data import ContactSensorData
if TYPE_CHECKING:
from .contact_sensor_cfg import ContactSensorCfg
class ContactSensor(SensorBase):
"""A contact reporting sensor.
The contact sensor reports the normal contact forces on a rigid body in the world frame.
It relies on the `PhysX ContactReporter`_ API to be activated on the rigid bodies.
To enable the contact reporter on a rigid body, please make sure to enable the
:attr:`omni.isaac.orbit.sim.spawner.RigidObjectSpawnerCfg.activate_contact_sensors` on your
asset spawner configuration. This will enable the contact reporter on all the rigid bodies
in the asset.
The sensor can be configured to report the contact forces on a set of bodies with a given
filter pattern. Please check the documentation on `RigidContactView`_ for more details.
.. _PhysX ContactReporter: https://docs.omniverse.nvidia.com/kit/docs/omni_usd_schema_physics/104.2/class_physx_schema_physx_contact_report_a_p_i.html
.. _RigidContactView: https://docs.omniverse.nvidia.com/py/isaacsim/source/extensions/omni.isaac.core/docs/index.html#omni.isaac.core.prims.RigidContactView
"""
cfg: ContactSensorCfg
"""The configuration parameters."""
def __init__(self, cfg: ContactSensorCfg):
"""Initializes the contact sensor object.
Args:
cfg: The configuration parameters.
"""
# initialize base class
super().__init__(cfg)
# Create empty variables for storing output data
self._data: ContactSensorData = ContactSensorData()
# initialize self._body_physx_view for running in extension mode
self._body_physx_view = None
def __str__(self) -> str:
"""Returns: A string containing information about the instance."""
return (
f"Contact sensor @ '{self.cfg.prim_path}': \n"
f"\tview type : {self.body_physx_view.__class__}\n"
f"\tupdate period (s) : {self.cfg.update_period}\n"
f"\tnumber of bodies : {self.num_bodies}\n"
f"\tbody names : {self.body_names}\n"
)
"""
Properties
"""
@property
def num_instances(self) -> int:
return self.body_physx_view.count
@property
def data(self) -> ContactSensorData:
# update sensors if needed
self._update_outdated_buffers()
# return the data
return self._data
@property
def num_bodies(self) -> int:
"""Number of bodies with contact sensors attached."""
return self._num_bodies
@property
def body_names(self) -> list[str]:
"""Ordered names of bodies with contact sensors attached."""
prim_paths = self.body_physx_view.prim_paths[: self.num_bodies]
return [path.split("/")[-1] for path in prim_paths]
@property
def body_physx_view(self) -> physx.RigidBodyView:
"""View for the rigid bodies captured (PhysX).
Note:
Use this view with caution. It requires handling of tensors in a specific way.
"""
return self._body_physx_view
@property
def contact_physx_view(self) -> physx.RigidContactView:
"""Contact reporter view for the bodies (PhysX).
Note:
Use this view with caution. It requires handling of tensors in a specific way.
"""
return self._contact_physx_view
"""
Operations
"""
def reset(self, env_ids: Sequence[int] | None = None):
# reset the timers and counters
super().reset(env_ids)
# resolve None
if env_ids is None:
env_ids = slice(None)
# reset accumulative data buffers
self._data.net_forces_w[env_ids] = 0.0
self._data.net_forces_w_history[env_ids] = 0.0
if self.cfg.history_length > 0:
self._data.net_forces_w_history[env_ids] = 0.0
# reset force matrix
if len(self.cfg.filter_prim_paths_expr) != 0:
self._data.force_matrix_w[env_ids] = 0.0
# reset the current air time
if self.cfg.track_air_time:
self._data.current_air_time[env_ids] = 0.0
self._data.last_air_time[env_ids] = 0.0
self._data.current_contact_time[env_ids] = 0.0
self._data.last_contact_time[env_ids] = 0.0
def find_bodies(self, name_keys: str | Sequence[str], preserve_order: bool = False) -> tuple[list[int], list[str]]:
"""Find bodies in the articulation based on the name keys.
Args:
name_keys: A regular expression or a list of regular expressions to match the body names.
preserve_order: Whether to preserve the order of the name keys in the output. Defaults to False.
Returns:
A tuple of lists containing the body indices and names.
"""
return string_utils.resolve_matching_names(name_keys, self.body_names, preserve_order)
def compute_first_contact(self, dt: float, abs_tol: float = 1.0e-8) -> torch.Tensor:
"""Checks if bodies that have established contact within the last :attr:`dt` seconds.
This function checks if the bodies have established contact within the last :attr:`dt` seconds
by comparing the current contact time with the given time period. If the contact time is less
than the given time period, then the bodies are considered to be in contact.
Note:
The function assumes that :attr:`dt` is a factor of the sensor update time-step. In other
words :math:`dt / dt_sensor = n`, where :math:`n` is a natural number. This is always true
if the sensor is updated by the physics or the environment stepping time-step and the sensor
is read by the environment stepping time-step.
Args:
dt: The time period since the contact was established.
abs_tol: The absolute tolerance for the comparison.
Returns:
A boolean tensor indicating the bodies that have established contact within the last
:attr:`dt` seconds. Shape is (N, B), where N is the number of sensors and B is the
number of bodies in each sensor.
Raises:
RuntimeError: If the sensor is not configured to track contact time.
"""
# check if the sensor is configured to track contact time
if not self.cfg.track_air_time:
raise RuntimeError(
"The contact sensor is not configured to track contact time."
"Please enable the 'track_air_time' in the sensor configuration."
)
# check if the bodies are in contact
currently_in_contact = self.data.current_contact_time > 0.0
less_than_dt_in_contact = self.data.current_contact_time < (dt + abs_tol)
return currently_in_contact * less_than_dt_in_contact
def compute_first_air(self, dt: float, abs_tol: float = 1.0e-8) -> torch.Tensor:
"""Checks if bodies that have broken contact within the last :attr:`dt` seconds.
This function checks if the bodies have broken contact within the last :attr:`dt` seconds
by comparing the current air time with the given time period. If the air time is less
than the given time period, then the bodies are considered to not be in contact.
Note:
It assumes that :attr:`dt` is a factor of the sensor update time-step. In other words,
:math:`dt / dt_sensor = n`, where :math:`n` is a natural number. This is always true if
the sensor is updated by the physics or the environment stepping time-step and the sensor
is read by the environment stepping time-step.
Args:
dt: The time period since the contract is broken.
abs_tol: The absolute tolerance for the comparison.
Returns:
A boolean tensor indicating the bodies that have broken contact within the last :attr:`dt` seconds.
Shape is (N, B), where N is the number of sensors and B is the number of bodies in each sensor.
Raises:
RuntimeError: If the sensor is not configured to track contact time.
"""
# check if the sensor is configured to track contact time
if not self.cfg.track_air_time:
raise RuntimeError(
"The contact sensor is not configured to track contact time."
"Please enable the 'track_air_time' in the sensor configuration."
)
# check if the sensor is configured to track contact time
currently_detached = self.data.current_air_time > 0.0
less_than_dt_detached = self.data.current_air_time < (dt + abs_tol)
return currently_detached * less_than_dt_detached
"""
Implementation.
"""
def _initialize_impl(self):
super()._initialize_impl()
# create simulation view
self._physics_sim_view = physx.create_simulation_view(self._backend)
self._physics_sim_view.set_subspace_roots("/")
# check that only rigid bodies are selected
leaf_pattern = self.cfg.prim_path.rsplit("/", 1)[-1]
template_prim_path = self._parent_prims[0].GetPath().pathString
body_names = list()
for prim in sim_utils.find_matching_prims(template_prim_path + "/" + leaf_pattern):
# check if prim has contact reporter API
if prim.HasAPI(PhysxSchema.PhysxContactReportAPI):
prim_path = prim.GetPath().pathString
body_names.append(prim_path.rsplit("/", 1)[-1])
# check that there is at least one body with contact reporter API
if not body_names:
raise RuntimeError(
f"Sensor at path '{self.cfg.prim_path}' could not find any bodies with contact reporter API."
"\nHINT: Make sure to enable 'activate_contact_sensors' in the corresponding asset spawn configuration."
)
# construct regex expression for the body names
body_names_regex = r"(" + "|".join(body_names) + r")"
body_names_regex = f"{self.cfg.prim_path.rsplit('/', 1)[0]}/{body_names_regex}"
# convert regex expressions to glob expressions for PhysX
body_names_glob = body_names_regex.replace(".*", "*")
filter_prim_paths_glob = [expr.replace(".*", "*") for expr in self.cfg.filter_prim_paths_expr]
# create a rigid prim view for the sensor
self._body_physx_view = self._physics_sim_view.create_rigid_body_view(body_names_glob)
self._contact_physx_view = self._physics_sim_view.create_rigid_contact_view(
body_names_glob, filter_patterns=filter_prim_paths_glob
)
# resolve the true count of bodies
self._num_bodies = self.body_physx_view.count // self._num_envs
# check that contact reporter succeeded
if self._num_bodies != len(body_names):
raise RuntimeError(
"Failed to initialize contact reporter for specified bodies."
f"\n\tInput prim path : {self.cfg.prim_path}"
f"\n\tResolved prim paths: {body_names_regex}"
)
# prepare data buffers
self._data.net_forces_w = torch.zeros(self._num_envs, self._num_bodies, 3, device=self._device)
# optional buffers
# -- history of net forces
if self.cfg.history_length > 0:
self._data.net_forces_w_history = torch.zeros(
self._num_envs, self.cfg.history_length, self._num_bodies, 3, device=self._device
)
else:
self._data.net_forces_w_history = self._data.net_forces_w.unsqueeze(1)
# -- pose of sensor origins
if self.cfg.track_pose:
self._data.pos_w = torch.zeros(self._num_envs, self._num_bodies, 3, device=self._device)
self._data.quat_w = torch.zeros(self._num_envs, self._num_bodies, 4, device=self._device)
# -- air/contact time between contacts
if self.cfg.track_air_time:
self._data.last_air_time = torch.zeros(self._num_envs, self._num_bodies, device=self._device)
self._data.current_air_time = torch.zeros(self._num_envs, self._num_bodies, device=self._device)
self._data.last_contact_time = torch.zeros(self._num_envs, self._num_bodies, device=self._device)
self._data.current_contact_time = torch.zeros(self._num_envs, self._num_bodies, device=self._device)
# force matrix: (num_envs, num_bodies, num_filter_shapes, 3)
if len(self.cfg.filter_prim_paths_expr) != 0:
num_filters = self.contact_physx_view.filter_count
self._data.force_matrix_w = torch.zeros(
self._num_envs, self._num_bodies, num_filters, 3, device=self._device
)
def _update_buffers_impl(self, env_ids: Sequence[int]):
"""Fills the buffers of the sensor data."""
# default to all sensors
if len(env_ids) == self._num_envs:
env_ids = slice(None)
# obtain the contact forces
# TODO: We are handling the indexing ourself because of the shape; (N, B) vs expected (N * B).
# This isn't the most efficient way to do this, but it's the easiest to implement.
net_forces_w = self.contact_physx_view.get_net_contact_forces(dt=self._sim_physics_dt)
self._data.net_forces_w[env_ids, :, :] = net_forces_w.view(-1, self._num_bodies, 3)[env_ids]
# update contact force history
if self.cfg.history_length > 0:
self._data.net_forces_w_history[env_ids, 1:] = self._data.net_forces_w_history[env_ids, :-1].clone()
self._data.net_forces_w_history[env_ids, 0] = self._data.net_forces_w[env_ids]
# obtain the contact force matrix
if len(self.cfg.filter_prim_paths_expr) != 0:
# shape of the filtering matrix: (num_envs, num_bodies, num_filter_shapes, 3)
num_filters = self.contact_physx_view.filter_count
# acquire and shape the force matrix
force_matrix_w = self.contact_physx_view.get_contact_force_matrix(dt=self._sim_physics_dt)
force_matrix_w = force_matrix_w.view(-1, self._num_bodies, num_filters, 3)
self._data.force_matrix_w[env_ids] = force_matrix_w[env_ids]
# obtain the pose of the sensor origin
if self.cfg.track_pose:
pose = self.body_physx_view.get_transforms().view(-1, self._num_bodies, 7)[env_ids]
pose[..., 3:] = convert_quat(pose[..., 3:], to="wxyz")
self._data.pos_w[env_ids], self._data.quat_w[env_ids] = pose.split([3, 4], dim=-1)
# obtain the air time
if self.cfg.track_air_time:
# -- time elapsed since last update
# since this function is called every frame, we can use the difference to get the elapsed time
elapsed_time = self._timestamp[env_ids] - self._timestamp_last_update[env_ids]
# -- check contact state of bodies
is_contact = torch.norm(self._data.net_forces_w[env_ids, :, :], dim=-1) > self.cfg.force_threshold
is_first_contact = (self._data.current_air_time[env_ids] > 0) * is_contact
is_first_detached = (self._data.current_contact_time[env_ids] > 0) * ~is_contact
# -- update the last contact time if body has just become in contact
self._data.last_air_time[env_ids] = torch.where(
is_first_contact,
self._data.current_air_time[env_ids] + elapsed_time.unsqueeze(-1),
self._data.last_air_time[env_ids],
)
# -- increment time for bodies that are not in contact
self._data.current_air_time[env_ids] = torch.where(
~is_contact, self._data.current_air_time[env_ids] + elapsed_time.unsqueeze(-1), 0.0
)
# -- update the last contact time if body has just detached
self._data.last_contact_time[env_ids] = torch.where(
is_first_detached,
self._data.current_contact_time[env_ids] + elapsed_time.unsqueeze(-1),
self._data.last_contact_time[env_ids],
)
# -- increment time for bodies that are in contact
self._data.current_contact_time[env_ids] = torch.where(
is_contact, self._data.current_contact_time[env_ids] + elapsed_time.unsqueeze(-1), 0.0
)
def _set_debug_vis_impl(self, debug_vis: bool):
# set visibility of markers
# note: parent only deals with callbacks. not their visibility
if debug_vis:
# create markers if necessary for the first tome
if not hasattr(self, "contact_visualizer"):
self.contact_visualizer = VisualizationMarkers(self.cfg.visualizer_cfg)
# set their visibility to true
self.contact_visualizer.set_visibility(True)
else:
if hasattr(self, "contact_visualizer"):
self.contact_visualizer.set_visibility(False)
def _debug_vis_callback(self, event):
# safely return if view becomes invalid
# note: this invalidity happens because of isaac sim view callbacks
if self.body_physx_view is None:
return
# marker indices
# 0: contact, 1: no contact
net_contact_force_w = torch.norm(self._data.net_forces_w, dim=-1)
marker_indices = torch.where(net_contact_force_w > self.cfg.force_threshold, 0, 1)
# check if prim is visualized
if self.cfg.track_pose:
frame_origins: torch.Tensor = self._data.pos_w
else:
pose = self.body_physx_view.get_transforms()
frame_origins = pose.view(-1, self._num_bodies, 7)[:, :, :3]
# visualize
self.contact_visualizer.visualize(frame_origins.view(-1, 3), marker_indices=marker_indices.view(-1))
"""
Internal simulation callbacks.
"""
def _invalidate_initialize_callback(self, event):
"""Invalidates the scene elements."""
# call parent
super()._invalidate_initialize_callback(event)
# set all existing views to None to invalidate them
self._physics_sim_view = None
self._body_physx_view = None
self._contact_physx_view = None
| 18,975 | Python | 46.20398 | 160 | 0.628722 |
fnuabhimanyu8713/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/sensors/contact_sensor/contact_sensor_data.py | # Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
# needed to import for allowing type-hinting: torch.Tensor | None
from __future__ import annotations
import torch
from dataclasses import dataclass
@dataclass
class ContactSensorData:
"""Data container for the contact reporting sensor."""
pos_w: torch.Tensor | None = None
"""Position of the sensor origin in world frame.
Shape is (N, 3), where N is the number of sensors.
Note:
If the :attr:`ContactSensorCfg.track_pose` is False, then this qunatity is None.
"""
quat_w: torch.Tensor | None = None
"""Orientation of the sensor origin in quaternion (w, x, y, z) in world frame.
Shape is (N, 4), where N is the number of sensors.
Note:
If the :attr:`ContactSensorCfg.track_pose` is False, then this qunatity is None.
"""
net_forces_w: torch.Tensor | None = None
"""The net contact forces in world frame.
Shape is (N, B, 3), where N is the number of sensors and B is the number of bodies in each sensor.
"""
net_forces_w_history: torch.Tensor | None = None
"""The net contact forces in world frame.
Shape is (N, T, B, 3), where N is the number of sensors, T is the configured history length
and B is the number of bodies in each sensor.
In the history dimension, the first index is the most recent and the last index is the oldest.
"""
force_matrix_w: torch.Tensor | None = None
"""The contact forces filtered between the sensor bodies and filtered bodies in world frame.
Shape is (N, B, M, 3), where N is the number of sensors, B is number of bodies in each sensor
and ``M`` is the number of filtered bodies.
Note:
If the :attr:`ContactSensorCfg.filter_prim_paths_expr` is empty, then this quantity is None.
"""
last_air_time: torch.Tensor | None = None
"""Time spent (in s) in the air before the last contact.
Shape is (N, B), where N is the number of sensors and B is the number of bodies in each sensor.
Note:
If the :attr:`ContactSensorCfg.track_air_time` is False, then this quantity is None.
"""
current_air_time: torch.Tensor | None = None
"""Time spent (in s) in the air since the last detach.
Shape is (N, B), where N is the number of sensors and B is the number of bodies in each sensor.
Note:
If the :attr:`ContactSensorCfg.track_air_time` is False, then this quantity is None.
"""
last_contact_time: torch.Tensor | None = None
"""Time spent (in s) in contact before the last detach.
Shape is (N, B), where N is the number of sensors and B is the number of bodies in each sensor.
Note:
If the :attr:`ContactSensorCfg.track_air_time` is False, then this quantity is None.
"""
current_contact_time: torch.Tensor | None = None
"""Time spent (in s) in contact since the last contact.
Shape is (N, B), where N is the number of sensors and B is the number of bodies in each sensor.
Note:
If the :attr:`ContactSensorCfg.track_air_time` is False, then this quantity is None.
"""
| 3,177 | Python | 32.452631 | 102 | 0.671073 |
fnuabhimanyu8713/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/sensors/contact_sensor/__init__.py | # Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""Sub-module for rigid contact sensor based on :class:`omni.isaac.core.prims.RigidContactView`."""
from .contact_sensor import ContactSensor
from .contact_sensor_cfg import ContactSensorCfg
from .contact_sensor_data import ContactSensorData
| 366 | Python | 32.363633 | 99 | 0.789617 |
fnuabhimanyu8713/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/sensors/contact_sensor/contact_sensor_cfg.py | # Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
from omni.isaac.orbit.markers import VisualizationMarkersCfg
from omni.isaac.orbit.markers.config import CONTACT_SENSOR_MARKER_CFG
from omni.isaac.orbit.utils import configclass
from ..sensor_base_cfg import SensorBaseCfg
from .contact_sensor import ContactSensor
@configclass
class ContactSensorCfg(SensorBaseCfg):
"""Configuration for the contact sensor."""
class_type: type = ContactSensor
track_pose: bool = False
"""Whether to track the pose of the sensor's origin. Defaults to False."""
track_air_time: bool = False
"""Whether to track the air/contact time of the bodies (time between contacts). Defaults to False."""
force_threshold: float = 1.0
"""The threshold on the norm of the contact force that determines whether two bodies are in collision or not.
This value is only used for tracking the mode duration (the time in contact or in air),
if :attr:`track_air_time` is True.
"""
filter_prim_paths_expr: list[str] = list()
"""The list of primitive paths (or expressions) to filter contacts with. Defaults to an empty list, in which case
no filtering is applied.
The contact sensor allows reporting contacts between the primitive specified with :attr:`prim_path` and
other primitives in the scene. For instance, in a scene containing a robot, a ground plane and an object,
you can obtain individual contact reports of the base of the robot with the ground plane and the object.
.. note::
The expression in the list can contain the environment namespace regex ``{ENV_REGEX_NS}`` which
will be replaced with the environment namespace.
Example: ``{ENV_REGEX_NS}/Object`` will be replaced with ``/World/envs/env_.*/Object``.
"""
visualizer_cfg: VisualizationMarkersCfg = CONTACT_SENSOR_MARKER_CFG.replace(prim_path="/Visuals/ContactSensor")
"""The configuration object for the visualization markers. Defaults to CONTACT_SENSOR_MARKER_CFG.
.. note::
This attribute is only used when debug visualization is enabled.
"""
| 2,185 | Python | 38.745454 | 117 | 0.727689 |
fnuabhimanyu8713/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/envs/rl_task_env_cfg.py | # Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
from dataclasses import MISSING
from omni.isaac.orbit.utils import configclass
from .base_env_cfg import BaseEnvCfg
from .ui import RLTaskEnvWindow
@configclass
class RLTaskEnvCfg(BaseEnvCfg):
"""Configuration for a reinforcement learning environment."""
# ui settings
ui_window_class_type: type | None = RLTaskEnvWindow
# general settings
is_finite_horizon: bool = False
"""Whether the learning task is treated as a finite or infinite horizon problem for the agent.
Defaults to False, which means the task is treated as an infinite horizon problem.
This flag handles the subtleties of finite and infinite horizon tasks:
* **Finite horizon**: no penalty or bootstrapping value is required by the the agent for
running out of time. However, the environment still needs to terminate the episode after the
time limit is reached.
* **Infinite horizon**: the agent needs to bootstrap the value of the state at the end of the episode.
This is done by sending a time-limit (or truncated) done signal to the agent, which triggers this
bootstrapping calculation.
If True, then the environment is treated as a finite horizon problem and no time-out (or truncated) done signal
is sent to the agent. If False, then the environment is treated as an infinite horizon problem and a time-out
(or truncated) done signal is sent to the agent.
Note:
The base :class:`RLTaskEnv` class does not use this flag directly. It is used by the environment
wrappers to determine what type of done signal to send to the corresponding learning agent.
"""
episode_length_s: float = MISSING
"""Duration of an episode (in seconds).
Based on the decimation rate and physics time step, the episode length is calculated as:
.. code-block:: python
episode_length_steps = ceil(episode_length_s / (decimation_rate * physics_time_step))
For example, if the decimation rate is 10, the physics time step is 0.01, and the episode length is 10 seconds,
then the episode length in steps is 100.
"""
# environment settings
rewards: object = MISSING
"""Reward settings.
Please refer to the :class:`omni.isaac.orbit.managers.RewardManager` class for more details.
"""
terminations: object = MISSING
"""Termination settings.
Please refer to the :class:`omni.isaac.orbit.managers.TerminationManager` class for more details.
"""
curriculum: object = MISSING
"""Curriculum settings.
Please refer to the :class:`omni.isaac.orbit.managers.CurriculumManager` class for more details.
"""
commands: object = MISSING
"""Command settings.
Please refer to the :class:`omni.isaac.orbit.managers.CommandManager` class for more details.
"""
| 2,927 | Python | 35.148148 | 115 | 0.720875 |
fnuabhimanyu8713/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/envs/__init__.py | # Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""Sub-package for environment definitions.
Environments define the interface between the agent and the simulation.
In the simplest case, the environment provides the agent with the current
observations and executes the actions provided by the agent. However, the
environment can also provide additional information such as the current
reward, done flag, and information about the current episode.
Based on these, there are two types of environments:
* :class:`BaseEnv`: The base environment which only provides the agent with the
current observations and executes the actions provided by the agent.
* :class:`RLTaskEnv`: The RL task environment which besides the functionality of
the base environment also provides additional Markov Decision Process (MDP)
related information such as the current reward, done flag, and information.
"""
from . import mdp, ui
from .base_env import BaseEnv, VecEnvObs
from .base_env_cfg import BaseEnvCfg, ViewerCfg
from .rl_task_env import RLTaskEnv, VecEnvStepReturn
from .rl_task_env_cfg import RLTaskEnvCfg
| 1,177 | Python | 39.620688 | 80 | 0.796941 |
fnuabhimanyu8713/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/envs/base_env_cfg.py | # Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""Base configuration of the environment.
This module defines the general configuration of the environment. It includes parameters for
configuring the environment instances, viewer settings, and simulation parameters.
"""
from dataclasses import MISSING
from typing import Literal
import omni.isaac.orbit.envs.mdp as mdp
from omni.isaac.orbit.managers import EventTermCfg as EventTerm
from omni.isaac.orbit.scene import InteractiveSceneCfg
from omni.isaac.orbit.sim import SimulationCfg
from omni.isaac.orbit.utils import configclass
from .ui import BaseEnvWindow
@configclass
class ViewerCfg:
"""Configuration of the scene viewport camera."""
eye: tuple[float, float, float] = (7.5, 7.5, 7.5)
"""Initial camera position (in m). Default is (7.5, 7.5, 7.5)."""
lookat: tuple[float, float, float] = (0.0, 0.0, 0.0)
"""Initial camera target position (in m). Default is (0.0, 0.0, 0.0)."""
cam_prim_path: str = "/OmniverseKit_Persp"
"""The camera prim path to record images from. Default is "/OmniverseKit_Persp",
which is the default camera in the viewport.
"""
resolution: tuple[int, int] = (1280, 720)
"""The resolution (width, height) of the camera specified using :attr:`cam_prim_path`.
Default is (1280, 720).
"""
origin_type: Literal["world", "env", "asset_root"] = "world"
"""The frame in which the camera position (eye) and target (lookat) are defined in. Default is "world".
Available options are:
* ``"world"``: The origin of the world.
* ``"env"``: The origin of the environment defined by :attr:`env_index`.
* ``"asset_root"``: The center of the asset defined by :attr:`asset_name` in environment :attr:`env_index`.
"""
env_index: int = 0
"""The environment index for frame origin. Default is 0.
This quantity is only effective if :attr:`origin` is set to "env" or "asset_root".
"""
asset_name: str | None = None
"""The asset name in the interactive scene for the frame origin. Default is None.
This quantity is only effective if :attr:`origin` is set to "asset_root".
"""
@configclass
class DefaultEventManagerCfg:
"""Configuration of the default event manager.
This manager is used to reset the scene to a default state. The default state is specified
by the scene configuration.
"""
reset_scene_to_default = EventTerm(func=mdp.reset_scene_to_default, mode="reset")
@configclass
class BaseEnvCfg:
"""Base configuration of the environment."""
# simulation settings
viewer: ViewerCfg = ViewerCfg()
"""Viewer configuration. Default is ViewerCfg()."""
sim: SimulationCfg = SimulationCfg()
"""Physics simulation configuration. Default is SimulationCfg()."""
# ui settings
ui_window_class_type: type | None = BaseEnvWindow
"""The class type of the UI window. Default is None.
If None, then no UI window is created.
Note:
If you want to make your own UI window, you can create a class that inherits from
from :class:`omni.isaac.orbit.envs.ui.base_env_window.BaseEnvWindow`. Then, you can set
this attribute to your class type.
"""
# general settings
decimation: int = MISSING
"""Number of control action updates @ sim dt per policy dt.
For instance, if the simulation dt is 0.01s and the policy dt is 0.1s, then the decimation is 10.
This means that the control action is updated every 10 simulation steps.
"""
# environment settings
scene: InteractiveSceneCfg = MISSING
"""Scene settings.
Please refer to the :class:`omni.isaac.orbit.scene.InteractiveSceneCfg` class for more details.
"""
observations: object = MISSING
"""Observation space settings.
Please refer to the :class:`omni.isaac.orbit.managers.ObservationManager` class for more details.
"""
actions: object = MISSING
"""Action space settings.
Please refer to the :class:`omni.isaac.orbit.managers.ActionManager` class for more details.
"""
events: object = DefaultEventManagerCfg()
"""Event settings. Defaults to the basic configuration that resets the scene to its default state.
Please refer to the :class:`omni.isaac.orbit.managers.EventManager` class for more details.
"""
randomization: object | None = None
"""Randomization settings. Default is None.
.. deprecated:: 0.3.0
This attribute is deprecated and will be removed in v0.4.0. Please use the :attr:`events`
attribute to configure the randomization settings.
"""
| 4,682 | Python | 32.212766 | 111 | 0.693293 |
fnuabhimanyu8713/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/envs/rl_task_env.py | # Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
# needed to import for allowing type-hinting: np.ndarray | None
from __future__ import annotations
import gymnasium as gym
import math
import numpy as np
import torch
from collections.abc import Sequence
from typing import Any, ClassVar
from omni.isaac.version import get_version
from omni.isaac.orbit.managers import CommandManager, CurriculumManager, RewardManager, TerminationManager
from .base_env import BaseEnv, VecEnvObs
from .rl_task_env_cfg import RLTaskEnvCfg
VecEnvStepReturn = tuple[VecEnvObs, torch.Tensor, torch.Tensor, torch.Tensor, dict]
"""The environment signals processed at the end of each step.
The tuple contains batched information for each sub-environment. The information is stored in the following order:
1. **Observations**: The observations from the environment.
2. **Rewards**: The rewards from the environment.
3. **Terminated Dones**: Whether the environment reached a terminal state, such as task success or robot falling etc.
4. **Timeout Dones**: Whether the environment reached a timeout state, such as end of max episode length.
5. **Extras**: A dictionary containing additional information from the environment.
"""
class RLTaskEnv(BaseEnv, gym.Env):
"""The superclass for reinforcement learning-based environments.
This class inherits from :class:`BaseEnv` and implements the core functionality for
reinforcement learning-based environments. It is designed to be used with any RL
library. The class is designed to be used with vectorized environments, i.e., the
environment is expected to be run in parallel with multiple sub-environments. The
number of sub-environments is specified using the ``num_envs``.
Each observation from the environment is a batch of observations for each sub-
environments. The method :meth:`step` is also expected to receive a batch of actions
for each sub-environment.
While the environment itself is implemented as a vectorized environment, we do not
inherit from :class:`gym.vector.VectorEnv`. This is mainly because the class adds
various methods (for wait and asynchronous updates) which are not required.
Additionally, each RL library typically has its own definition for a vectorized
environment. Thus, to reduce complexity, we directly use the :class:`gym.Env` over
here and leave it up to library-defined wrappers to take care of wrapping this
environment for their agents.
Note:
For vectorized environments, it is recommended to **only** call the :meth:`reset`
method once before the first call to :meth:`step`, i.e. after the environment is created.
After that, the :meth:`step` function handles the reset of terminated sub-environments.
This is because the simulator does not support resetting individual sub-environments
in a vectorized environment.
"""
is_vector_env: ClassVar[bool] = True
"""Whether the environment is a vectorized environment."""
metadata: ClassVar[dict[str, Any]] = {
"render_modes": [None, "human", "rgb_array"],
"isaac_sim_version": get_version(),
}
"""Metadata for the environment."""
cfg: RLTaskEnvCfg
"""Configuration for the environment."""
def __init__(self, cfg: RLTaskEnvCfg, render_mode: str | None = None, **kwargs):
"""Initialize the environment.
Args:
cfg: The configuration for the environment.
render_mode: The render mode for the environment. Defaults to None, which
is similar to ``"human"``.
"""
# initialize the base class to setup the scene.
super().__init__(cfg=cfg)
# store the render mode
self.render_mode = render_mode
# initialize data and constants
# -- counter for curriculum
self.common_step_counter = 0
# -- init buffers
self.episode_length_buf = torch.zeros(self.num_envs, device=self.device, dtype=torch.long)
# setup the action and observation spaces for Gym
self._configure_gym_env_spaces()
# perform events at the start of the simulation
if "startup" in self.event_manager.available_modes:
self.event_manager.apply(mode="startup")
# print the environment information
print("[INFO]: Completed setting up the environment...")
"""
Properties.
"""
@property
def max_episode_length_s(self) -> float:
"""Maximum episode length in seconds."""
return self.cfg.episode_length_s
@property
def max_episode_length(self) -> int:
"""Maximum episode length in environment steps."""
return math.ceil(self.max_episode_length_s / self.step_dt)
"""
Operations - Setup.
"""
def load_managers(self):
# note: this order is important since observation manager needs to know the command and action managers
# and the reward manager needs to know the termination manager
# -- command manager
self.command_manager: CommandManager = CommandManager(self.cfg.commands, self)
print("[INFO] Command Manager: ", self.command_manager)
# call the parent class to load the managers for observations and actions.
super().load_managers()
# prepare the managers
# -- termination manager
self.termination_manager = TerminationManager(self.cfg.terminations, self)
print("[INFO] Termination Manager: ", self.termination_manager)
# -- reward manager
self.reward_manager = RewardManager(self.cfg.rewards, self)
print("[INFO] Reward Manager: ", self.reward_manager)
# -- curriculum manager
self.curriculum_manager = CurriculumManager(self.cfg.curriculum, self)
print("[INFO] Curriculum Manager: ", self.curriculum_manager)
"""
Operations - MDP
"""
def step(self, action: torch.Tensor) -> VecEnvStepReturn:
"""Execute one time-step of the environment's dynamics and reset terminated environments.
Unlike the :class:`BaseEnv.step` class, the function performs the following operations:
1. Process the actions.
2. Perform physics stepping.
3. Perform rendering if gui is enabled.
4. Update the environment counters and compute the rewards and terminations.
5. Reset the environments that terminated.
6. Compute the observations.
7. Return the observations, rewards, resets and extras.
Args:
action: The actions to apply on the environment. Shape is (num_envs, action_dim).
Returns:
A tuple containing the observations, rewards, resets (terminated and truncated) and extras.
"""
# process actions
self.action_manager.process_action(action)
# perform physics stepping
for _ in range(self.cfg.decimation):
# set actions into buffers
self.action_manager.apply_action()
# set actions into simulator
self.scene.write_data_to_sim()
# simulate
self.sim.step(render=False)
# update buffers at sim dt
self.scene.update(dt=self.physics_dt)
# perform rendering if gui is enabled
if self.sim.has_gui() or self.sim.has_rtx_sensors():
self.sim.render()
# post-step:
# -- update env counters (used for curriculum generation)
self.episode_length_buf += 1 # step in current episode (per env)
self.common_step_counter += 1 # total step (common for all envs)
# -- check terminations
self.reset_buf = self.termination_manager.compute()
self.reset_terminated = self.termination_manager.terminated
self.reset_time_outs = self.termination_manager.time_outs
# -- reward computation
self.reward_buf = self.reward_manager.compute(dt=self.step_dt)
# -- reset envs that terminated/timed-out and log the episode information
reset_env_ids = self.reset_buf.nonzero(as_tuple=False).squeeze(-1)
if len(reset_env_ids) > 0:
self._reset_idx(reset_env_ids)
# -- update command
self.command_manager.compute(dt=self.step_dt)
# -- step interval events
if "interval" in self.event_manager.available_modes:
self.event_manager.apply(mode="interval", dt=self.step_dt)
# -- compute observations
# note: done after reset to get the correct observations for reset envs
self.obs_buf = self.observation_manager.compute()
# return observations, rewards, resets and extras
return self.obs_buf, self.reward_buf, self.reset_terminated, self.reset_time_outs, self.extras
def render(self, recompute: bool = False) -> np.ndarray | None:
"""Run rendering without stepping through the physics.
By convention, if mode is:
- **human**: Render to the current display and return nothing. Usually for human consumption.
- **rgb_array**: Return an numpy.ndarray with shape (x, y, 3), representing RGB values for an
x-by-y pixel image, suitable for turning into a video.
Args:
recompute: Whether to force a render even if the simulator has already rendered the scene.
Defaults to False.
Returns:
The rendered image as a numpy array if mode is "rgb_array". Otherwise, returns None.
Raises:
RuntimeError: If mode is set to "rgb_data" and simulation render mode does not support it.
In this case, the simulation render mode must be set to ``RenderMode.PARTIAL_RENDERING``
or ``RenderMode.FULL_RENDERING``.
NotImplementedError: If an unsupported rendering mode is specified.
"""
# run a rendering step of the simulator
# if we have rtx sensors, we do not need to render again sin
if not self.sim.has_rtx_sensors() and not recompute:
self.sim.render()
# decide the rendering mode
if self.render_mode == "human" or self.render_mode is None:
return None
elif self.render_mode == "rgb_array":
# check that if any render could have happened
if self.sim.render_mode.value < self.sim.RenderMode.PARTIAL_RENDERING.value:
raise RuntimeError(
f"Cannot render '{self.render_mode}' when the simulation render mode is"
f" '{self.sim.render_mode.name}'. Please set the simulation render mode to:"
f"'{self.sim.RenderMode.PARTIAL_RENDERING.name}' or '{self.sim.RenderMode.FULL_RENDERING.name}'."
)
# create the annotator if it does not exist
if not hasattr(self, "_rgb_annotator"):
import omni.replicator.core as rep
# create render product
self._render_product = rep.create.render_product(
self.cfg.viewer.cam_prim_path, self.cfg.viewer.resolution
)
# create rgb annotator -- used to read data from the render product
self._rgb_annotator = rep.AnnotatorRegistry.get_annotator("rgb", device="cpu")
self._rgb_annotator.attach([self._render_product])
# obtain the rgb data
rgb_data = self._rgb_annotator.get_data()
# convert to numpy array
rgb_data = np.frombuffer(rgb_data, dtype=np.uint8).reshape(*rgb_data.shape)
# return the rgb data
# note: initially the renerer is warming up and returns empty data
if rgb_data.size == 0:
return np.zeros((self.cfg.viewer.resolution[1], self.cfg.viewer.resolution[0], 3), dtype=np.uint8)
else:
return rgb_data[:, :, :3]
else:
raise NotImplementedError(
f"Render mode '{self.render_mode}' is not supported. Please use: {self.metadata['render_modes']}."
)
def close(self):
if not self._is_closed:
# destructor is order-sensitive
del self.command_manager
del self.reward_manager
del self.termination_manager
del self.curriculum_manager
# call the parent class to close the environment
super().close()
"""
Helper functions.
"""
def _configure_gym_env_spaces(self):
"""Configure the action and observation spaces for the Gym environment."""
# observation space (unbounded since we don't impose any limits)
self.single_observation_space = gym.spaces.Dict()
for group_name, group_term_names in self.observation_manager.active_terms.items():
# extract quantities about the group
has_concatenated_obs = self.observation_manager.group_obs_concatenate[group_name]
group_dim = self.observation_manager.group_obs_dim[group_name]
group_term_dim = self.observation_manager.group_obs_term_dim[group_name]
# check if group is concatenated or not
# if not concatenated, then we need to add each term separately as a dictionary
if has_concatenated_obs:
self.single_observation_space[group_name] = gym.spaces.Box(low=-np.inf, high=np.inf, shape=group_dim)
else:
self.single_observation_space[group_name] = gym.spaces.Dict({
term_name: gym.spaces.Box(low=-np.inf, high=np.inf, shape=term_dim)
for term_name, term_dim in zip(group_term_names, group_term_dim)
})
# action space (unbounded since we don't impose any limits)
action_dim = sum(self.action_manager.action_term_dim)
self.single_action_space = gym.spaces.Box(low=-np.inf, high=np.inf, shape=(action_dim,))
# batch the spaces for vectorized environments
self.observation_space = gym.vector.utils.batch_space(self.single_observation_space, self.num_envs)
self.action_space = gym.vector.utils.batch_space(self.single_action_space, self.num_envs)
def _reset_idx(self, env_ids: Sequence[int]):
"""Reset environments based on specified indices.
Args:
env_ids: List of environment ids which must be reset
"""
# update the curriculum for environments that need a reset
self.curriculum_manager.compute(env_ids=env_ids)
# reset the internal buffers of the scene elements
self.scene.reset(env_ids)
# apply events such as randomizations for environments that need a reset
if "reset" in self.event_manager.available_modes:
self.event_manager.apply(env_ids=env_ids, mode="reset")
# iterate over all managers and reset them
# this returns a dictionary of information which is stored in the extras
# note: This is order-sensitive! Certain things need be reset before others.
self.extras["log"] = dict()
# -- observation manager
info = self.observation_manager.reset(env_ids)
self.extras["log"].update(info)
# -- action manager
info = self.action_manager.reset(env_ids)
self.extras["log"].update(info)
# -- rewards manager
info = self.reward_manager.reset(env_ids)
self.extras["log"].update(info)
# -- curriculum manager
info = self.curriculum_manager.reset(env_ids)
self.extras["log"].update(info)
# -- command manager
info = self.command_manager.reset(env_ids)
self.extras["log"].update(info)
# -- event manager
info = self.event_manager.reset(env_ids)
self.extras["log"].update(info)
# -- termination manager
info = self.termination_manager.reset(env_ids)
self.extras["log"].update(info)
# reset the episode length buffer
self.episode_length_buf[env_ids] = 0
| 16,041 | Python | 44.573864 | 117 | 0.649585 |
fnuabhimanyu8713/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/envs/base_env.py | # Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
import builtins
import torch
import warnings
from collections.abc import Sequence
from typing import Any, Dict
import carb
import omni.isaac.core.utils.torch as torch_utils
from omni.isaac.orbit.managers import ActionManager, EventManager, ObservationManager
from omni.isaac.orbit.scene import InteractiveScene
from omni.isaac.orbit.sim import SimulationContext
from omni.isaac.orbit.utils.timer import Timer
from .base_env_cfg import BaseEnvCfg
from .ui import ViewportCameraController
VecEnvObs = Dict[str, torch.Tensor | Dict[str, torch.Tensor]]
"""Observation returned by the environment.
The observations are stored in a dictionary. The keys are the group to which the observations belong.
This is useful for various setups such as reinforcement learning with asymmetric actor-critic or
multi-agent learning. For non-learning paradigms, this may include observations for different components
of a system.
Within each group, the observations can be stored either as a dictionary with keys as the names of each
observation term in the group, or a single tensor obtained from concatenating all the observation terms.
For example, for asymmetric actor-critic, the observation for the actor and the critic can be accessed
using the keys ``"policy"`` and ``"critic"`` respectively.
Note:
By default, most learning frameworks deal with default and privileged observations in different ways.
This handling must be taken care of by the wrapper around the :class:`RLTaskEnv` instance.
For included frameworks (RSL-RL, RL-Games, skrl), the observations must have the key "policy". In case,
the key "critic" is also present, then the critic observations are taken from the "critic" group.
Otherwise, they are the same as the "policy" group.
"""
class BaseEnv:
"""The base environment encapsulates the simulation scene and the environment managers.
While a simulation scene or world comprises of different components such as the robots, objects,
and sensors (cameras, lidars, etc.), the environment is a higher level abstraction
that provides an interface for interacting with the simulation. The environment is comprised of
the following components:
* **Scene**: The scene manager that creates and manages the virtual world in which the robot operates.
This includes defining the robot, static and dynamic objects, sensors, etc.
* **Observation Manager**: The observation manager that generates observations from the current simulation
state and the data gathered from the sensors. These observations may include privileged information
that is not available to the robot in the real world. Additionally, user-defined terms can be added
to process the observations and generate custom observations. For example, using a network to embed
high-dimensional observations into a lower-dimensional space.
* **Action Manager**: The action manager that processes the raw actions sent to the environment and
converts them to low-level commands that are sent to the simulation. It can be configured to accept
raw actions at different levels of abstraction. For example, in case of a robotic arm, the raw actions
can be joint torques, joint positions, or end-effector poses. Similarly for a mobile base, it can be
the joint torques, or the desired velocity of the floating base.
* **Event Manager**: The event manager orchestrates operations triggered based on simulation events.
This includes resetting the scene to a default state, applying random pushes to the robot at different intervals
of time, or randomizing properties such as mass and friction coefficients. This is useful for training
and evaluating the robot in a variety of scenarios.
The environment provides a unified interface for interacting with the simulation. However, it does not
include task-specific quantities such as the reward function, or the termination conditions. These
quantities are often specific to defining Markov Decision Processes (MDPs) while the base environment
is agnostic to the MDP definition.
The environment steps forward in time at a fixed time-step. The physics simulation is decimated at a
lower time-step. This is to ensure that the simulation is stable. These two time-steps can be configured
independently using the :attr:`BaseEnvCfg.decimation` (number of simulation steps per environment step)
and the :attr:`BaseEnvCfg.sim.dt` (physics time-step) parameters. Based on these parameters, the
environment time-step is computed as the product of the two. The two time-steps can be obtained by
querying the :attr:`physics_dt` and the :attr:`step_dt` properties respectively.
"""
def __init__(self, cfg: BaseEnvCfg):
"""Initialize the environment.
Args:
cfg: The configuration object for the environment.
Raises:
RuntimeError: If a simulation context already exists. The environment must always create one
since it configures the simulation context and controls the simulation.
"""
# store inputs to class
self.cfg = cfg
# initialize internal variables
self._is_closed = False
# create a simulation context to control the simulator
if SimulationContext.instance() is None:
# the type-annotation is required to avoid a type-checking error
# since it gets confused with Isaac Sim's SimulationContext class
self.sim: SimulationContext = SimulationContext(self.cfg.sim)
else:
raise RuntimeError("Simulation context already exists. Cannot create a new one.")
# print useful information
print("[INFO]: Base environment:")
print(f"\tEnvironment device : {self.device}")
print(f"\tPhysics step-size : {self.physics_dt}")
print(f"\tRendering step-size : {self.physics_dt * self.cfg.sim.substeps}")
print(f"\tEnvironment step-size : {self.step_dt}")
print(f"\tPhysics GPU pipeline : {self.cfg.sim.use_gpu_pipeline}")
print(f"\tPhysics GPU simulation: {self.cfg.sim.physx.use_gpu}")
# generate scene
with Timer("[INFO]: Time taken for scene creation"):
self.scene = InteractiveScene(self.cfg.scene)
print("[INFO]: Scene manager: ", self.scene)
# set up camera viewport controller
# viewport is not available in other rendering modes so the function will throw a warning
# FIXME: This needs to be fixed in the future when we unify the UI functionalities even for
# non-rendering modes.
if self.sim.render_mode >= self.sim.RenderMode.PARTIAL_RENDERING:
self.viewport_camera_controller = ViewportCameraController(self, self.cfg.viewer)
else:
self.viewport_camera_controller = None
# play the simulator to activate physics handles
# note: this activates the physics simulation view that exposes TensorAPIs
# note: when started in extension mode, first call sim.reset_async() and then initialize the managers
if builtins.ISAAC_LAUNCHED_FROM_TERMINAL is False:
print("[INFO]: Starting the simulation. This may take a few seconds. Please wait...")
with Timer("[INFO]: Time taken for simulation start"):
self.sim.reset()
# add timeline event to load managers
self.load_managers()
# extend UI elements
# we need to do this here after all the managers are initialized
# this is because they dictate the sensors and commands right now
if self.sim.has_gui() and self.cfg.ui_window_class_type is not None:
self._window = self.cfg.ui_window_class_type(self, window_name="Orbit")
else:
# if no window, then we don't need to store the window
self._window = None
# allocate dictionary to store metrics
self.extras = {}
def __del__(self):
"""Cleanup for the environment."""
self.close()
"""
Properties.
"""
@property
def num_envs(self) -> int:
"""The number of instances of the environment that are running."""
return self.scene.num_envs
@property
def physics_dt(self) -> float:
"""The physics time-step (in s).
This is the lowest time-decimation at which the simulation is happening.
"""
return self.cfg.sim.dt
@property
def step_dt(self) -> float:
"""The environment stepping time-step (in s).
This is the time-step at which the environment steps forward.
"""
return self.cfg.sim.dt * self.cfg.decimation
@property
def device(self):
"""The device on which the environment is running."""
return self.sim.device
"""
Operations - Setup.
"""
def load_managers(self):
"""Load the managers for the environment.
This function is responsible for creating the various managers (action, observation,
events, etc.) for the environment. Since the managers require access to physics handles,
they can only be created after the simulator is reset (i.e. played for the first time).
.. note::
In case of standalone application (when running simulator from Python), the function is called
automatically when the class is initialized.
However, in case of extension mode, the user must call this function manually after the simulator
is reset. This is because the simulator is only reset when the user calls
:meth:`SimulationContext.reset_async` and it isn't possible to call async functions in the constructor.
"""
# check the configs
if self.cfg.randomization is not None:
msg = (
"The 'randomization' attribute is deprecated and will be removed in a future release. "
"Please use the 'events' attribute to configure the randomization settings."
)
warnings.warn(msg, category=DeprecationWarning)
carb.log_warn(msg)
# set the randomization as events (for backward compatibility)
self.cfg.events = self.cfg.randomization
# prepare the managers
# -- action manager
self.action_manager = ActionManager(self.cfg.actions, self)
print("[INFO] Action Manager: ", self.action_manager)
# -- observation manager
self.observation_manager = ObservationManager(self.cfg.observations, self)
print("[INFO] Observation Manager:", self.observation_manager)
# -- event manager
self.event_manager = EventManager(self.cfg.events, self)
print("[INFO] Event Manager: ", self.event_manager)
"""
Operations - MDP.
"""
def reset(self, seed: int | None = None, options: dict[str, Any] | None = None) -> tuple[VecEnvObs, dict]:
"""Resets all the environments and returns observations.
Args:
seed: The seed to use for randomization. Defaults to None, in which case the seed is not set.
options: Additional information to specify how the environment is reset. Defaults to None.
Note:
This argument is used for compatibility with Gymnasium environment definition.
Returns:
A tuple containing the observations and extras.
"""
# set the seed
if seed is not None:
self.seed(seed)
# reset state of scene
indices = torch.arange(self.num_envs, dtype=torch.int64, device=self.device)
self._reset_idx(indices)
# return observations
return self.observation_manager.compute(), self.extras
def step(self, action: torch.Tensor) -> tuple[VecEnvObs, dict]:
"""Execute one time-step of the environment's dynamics.
The environment steps forward at a fixed time-step, while the physics simulation is
decimated at a lower time-step. This is to ensure that the simulation is stable. These two
time-steps can be configured independently using the :attr:`BaseEnvCfg.decimation` (number of
simulation steps per environment step) and the :attr:`BaseEnvCfg.physics_dt` (physics time-step).
Based on these parameters, the environment time-step is computed as the product of the two.
Args:
action: The actions to apply on the environment. Shape is (num_envs, action_dim).
Returns:
A tuple containing the observations and extras.
"""
# process actions
self.action_manager.process_action(action)
# perform physics stepping
for _ in range(self.cfg.decimation):
# set actions into buffers
self.action_manager.apply_action()
# set actions into simulator
self.scene.write_data_to_sim()
# simulate
self.sim.step(render=False)
# update buffers at sim dt
self.scene.update(dt=self.physics_dt)
# perform rendering if gui is enabled
if self.sim.has_gui() or self.sim.has_rtx_sensors():
self.sim.render()
# post-step: step interval event
if "interval" in self.event_manager.available_modes:
self.event_manager.apply(mode="interval", dt=self.step_dt)
# return observations and extras
return self.observation_manager.compute(), self.extras
@staticmethod
def seed(seed: int = -1) -> int:
"""Set the seed for the environment.
Args:
seed: The seed for random generator. Defaults to -1.
Returns:
The seed used for random generator.
"""
# set seed for replicator
try:
import omni.replicator.core as rep
rep.set_global_seed(seed)
except ModuleNotFoundError:
pass
# set seed for torch and other libraries
return torch_utils.set_seed(seed)
def close(self):
"""Cleanup for the environment."""
if not self._is_closed:
# destructor is order-sensitive
del self.action_manager
del self.observation_manager
del self.event_manager
del self.scene
del self.viewport_camera_controller
# clear callbacks and instance
self.sim.clear_all_callbacks()
self.sim.clear_instance()
# destroy the window
if self._window is not None:
self._window = None
# update closing status
self._is_closed = True
"""
Helper functions.
"""
def _reset_idx(self, env_ids: Sequence[int]):
"""Reset environments based on specified indices.
Args:
env_ids: List of environment ids which must be reset
"""
# reset the internal buffers of the scene elements
self.scene.reset(env_ids)
# apply events such as randomizations for environments that need a reset
if "reset" in self.event_manager.available_modes:
self.event_manager.apply(env_ids=env_ids, mode="reset")
# iterate over all managers and reset them
# this returns a dictionary of information which is stored in the extras
# note: This is order-sensitive! Certain things need be reset before others.
self.extras["log"] = dict()
# -- observation manager
info = self.observation_manager.reset(env_ids)
self.extras["log"].update(info)
# -- action manager
info = self.action_manager.reset(env_ids)
self.extras["log"].update(info)
# -- event manager
info = self.event_manager.reset(env_ids)
self.extras["log"].update(info)
| 15,997 | Python | 43.438889 | 118 | 0.669876 |
fnuabhimanyu8713/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/envs/ui/rl_task_env_window.py | # Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
from __future__ import annotations
from typing import TYPE_CHECKING
from .base_env_window import BaseEnvWindow
if TYPE_CHECKING:
from ..rl_task_env import RLTaskEnv
class RLTaskEnvWindow(BaseEnvWindow):
"""Window manager for the RL environment.
On top of the basic environment window, this class adds controls for the RL environment.
This includes visualization of the command manager.
"""
def __init__(self, env: RLTaskEnv, window_name: str = "Orbit"):
"""Initialize the window.
Args:
env: The environment object.
window_name: The name of the window. Defaults to "Orbit".
"""
# initialize base window
super().__init__(env, window_name)
# add custom UI elements
with self.ui_window_elements["main_vstack"]:
with self.ui_window_elements["debug_frame"]:
with self.ui_window_elements["debug_vstack"]:
# add command manager visualization
self._create_debug_vis_ui_element("commands", self.env.command_manager)
| 1,210 | Python | 30.051281 | 92 | 0.64876 |
fnuabhimanyu8713/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/envs/ui/viewport_camera_controller.py | # Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
from __future__ import annotations
import copy
import numpy as np
import torch
import weakref
from collections.abc import Sequence
from typing import TYPE_CHECKING
import omni.kit.app
import omni.timeline
if TYPE_CHECKING:
from omni.isaac.orbit.envs import BaseEnv, ViewerCfg
class ViewportCameraController:
"""This class handles controlling the camera associated with a viewport in the simulator.
It can be used to set the viewpoint camera to track different origin types:
- **world**: the center of the world (static)
- **env**: the center of an environment (static)
- **asset_root**: the root of an asset in the scene (e.g. tracking a robot moving in the scene)
On creation, the camera is set to track the origin type specified in the configuration.
For the :attr:`asset_root` origin type, the camera is updated at each rendering step to track the asset's
root position. For this, it registers a callback to the post update event stream from the simulation app.
"""
def __init__(self, env: BaseEnv, cfg: ViewerCfg):
"""Initialize the ViewportCameraController.
Args:
env: The environment.
cfg: The configuration for the viewport camera controller.
Raises:
ValueError: If origin type is configured to be "env" but :attr:`cfg.env_index` is out of bounds.
ValueError: If origin type is configured to be "asset_root" but :attr:`cfg.asset_name` is unset.
"""
# store inputs
self._env = env
self._cfg = copy.deepcopy(cfg)
# cast viewer eye and look-at to numpy arrays
self.default_cam_eye = np.array(self._cfg.eye)
self.default_cam_lookat = np.array(self._cfg.lookat)
# set the camera origins
if self.cfg.origin_type == "env":
# check that the env_index is within bounds
self.set_view_env_index(self.cfg.env_index)
# set the camera origin to the center of the environment
self.update_view_to_env()
elif self.cfg.origin_type == "asset_root":
# note: we do not yet update camera for tracking an asset origin, as the asset may not yet be
# in the scene when this is called. Instead, we subscribe to the post update event to update the camera
# at each rendering step.
if self.cfg.asset_name is None:
raise ValueError(f"No asset name provided for viewer with origin type: '{self.cfg.origin_type}'.")
else:
# set the camera origin to the center of the world
self.update_view_to_world()
# subscribe to post update event so that camera view can be updated at each rendering step
app_interface = omni.kit.app.get_app_interface()
app_event_stream = app_interface.get_post_update_event_stream()
self._viewport_camera_update_handle = app_event_stream.create_subscription_to_pop(
lambda event, obj=weakref.proxy(self): obj._update_tracking_callback(event)
)
def __del__(self):
"""Unsubscribe from the callback."""
# use hasattr to handle case where __init__ has not completed before __del__ is called
if hasattr(self, "_viewport_camera_update_handle") and self._viewport_camera_update_handle is not None:
self._viewport_camera_update_handle.unsubscribe()
self._viewport_camera_update_handle = None
"""
Properties
"""
@property
def cfg(self) -> ViewerCfg:
"""The configuration for the viewer."""
return self._cfg
"""
Public Functions
"""
def set_view_env_index(self, env_index: int):
"""Sets the environment index for the camera view.
Args:
env_index: The index of the environment to set the camera view to.
Raises:
ValueError: If the environment index is out of bounds. It should be between 0 and num_envs - 1.
"""
# check that the env_index is within bounds
if env_index < 0 or env_index >= self._env.num_envs:
raise ValueError(
f"Out of range value for attribute 'env_index': {env_index}."
f" Expected a value between 0 and {self._env.num_envs - 1} for the current environment."
)
# update the environment index
self.cfg.env_index = env_index
# update the camera view if the origin is set to env type (since, the camera view is static)
# note: for assets, the camera view is updated at each rendering step
if self.cfg.origin_type == "env":
self.update_view_to_env()
def update_view_to_world(self):
"""Updates the viewer's origin to the origin of the world which is (0, 0, 0)."""
# set origin type to world
self.cfg.origin_type = "world"
# update the camera origins
self.viewer_origin = torch.zeros(3)
# update the camera view
self.update_view_location()
def update_view_to_env(self):
"""Updates the viewer's origin to the origin of the selected environment."""
# set origin type to world
self.cfg.origin_type = "env"
# update the camera origins
self.viewer_origin = self._env.scene.env_origins[self.cfg.env_index]
# update the camera view
self.update_view_location()
def update_view_to_asset_root(self, asset_name: str):
"""Updates the viewer's origin based upon the root of an asset in the scene.
Args:
asset_name: The name of the asset in the scene. The name should match the name of the
asset in the scene.
Raises:
ValueError: If the asset is not in the scene.
"""
# check if the asset is in the scene
if self.cfg.asset_name != asset_name:
asset_entities = [*self._env.scene.rigid_objects.keys(), *self._env.scene.articulations.keys()]
if asset_name not in asset_entities:
raise ValueError(f"Asset '{asset_name}' is not in the scene. Available entities: {asset_entities}.")
# update the asset name
self.cfg.asset_name = asset_name
# set origin type to asset_root
self.cfg.origin_type = "asset_root"
# update the camera origins
self.viewer_origin = self._env.scene[self.cfg.asset_name].data.root_pos_w[self.cfg.env_index]
# update the camera view
self.update_view_location()
def update_view_location(self, eye: Sequence[float] | None = None, lookat: Sequence[float] | None = None):
"""Updates the camera view pose based on the current viewer origin and the eye and lookat positions.
Args:
eye: The eye position of the camera. If None, the current eye position is used.
lookat: The lookat position of the camera. If None, the current lookat position is used.
"""
# store the camera view pose for later use
if eye is not None:
self.default_cam_eye = np.asarray(eye)
if lookat is not None:
self.default_cam_lookat = np.asarray(lookat)
# set the camera locations
viewer_origin = self.viewer_origin.detach().cpu().numpy()
cam_eye = viewer_origin + self.default_cam_eye
cam_target = viewer_origin + self.default_cam_lookat
# set the camera view
self._env.sim.set_camera_view(eye=cam_eye, target=cam_target)
"""
Private Functions
"""
def _update_tracking_callback(self, event):
"""Updates the camera view at each rendering step."""
# update the camera view if the origin is set to asset_root
# in other cases, the camera view is static and does not need to be updated continuously
if self.cfg.origin_type == "asset_root" and self.cfg.asset_name is not None:
self.update_view_to_asset_root(self.cfg.asset_name)
| 8,046 | Python | 40.6943 | 116 | 0.637087 |
fnuabhimanyu8713/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/envs/ui/__init__.py | # Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""Sub-module providing UI window implementation for environments.
The UI elements are used to control the environment and visualize the state of the environment.
This includes functionalities such as tracking a robot in the simulation,
toggling different debug visualization tools, and other user-defined functionalities.
"""
from .base_env_window import BaseEnvWindow
from .rl_task_env_window import RLTaskEnvWindow
from .viewport_camera_controller import ViewportCameraController
| 608 | Python | 37.062498 | 95 | 0.814145 |
fnuabhimanyu8713/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/envs/ui/base_env_window.py | # Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
from __future__ import annotations
import asyncio
import os
import weakref
from datetime import datetime
from typing import TYPE_CHECKING
import omni.isaac.ui.ui_utils as ui_utils
import omni.kit.app
import omni.kit.commands
import omni.ui
import omni.usd
from omni.kit.window.extensions import SimpleCheckBox
from pxr import PhysxSchema, Sdf, Usd, UsdGeom, UsdPhysics
if TYPE_CHECKING:
from ..base_env import BaseEnv
class BaseEnvWindow:
"""Window manager for the basic environment.
This class creates a window that is used to control the environment. The window
contains controls for rendering, debug visualization, and other environment-specific
UI elements.
Users can add their own UI elements to the window by using the `with` context manager.
This can be done either be inheriting the class or by using the `env.window` object
directly from the standalone execution script.
Example for adding a UI element from the standalone execution script:
>>> with env.window.ui_window_elements["main_vstack"]:
>>> ui.Label("My UI element")
"""
def __init__(self, env: BaseEnv, window_name: str = "Orbit"):
"""Initialize the window.
Args:
env: The environment object.
window_name: The name of the window. Defaults to "Orbit".
"""
# store inputs
self.env = env
# prepare the list of assets that can be followed by the viewport camera
# note that the first two options are "World" and "Env" which are special cases
self._viewer_assets_options = [
"World",
"Env",
*self.env.scene.rigid_objects.keys(),
*self.env.scene.articulations.keys(),
]
print("Creating window for environment.")
# create window for UI
self.ui_window = omni.ui.Window(
window_name, width=400, height=500, visible=True, dock_preference=omni.ui.DockPreference.RIGHT_TOP
)
# dock next to properties window
asyncio.ensure_future(self._dock_window(window_title=self.ui_window.title))
# keep a dictionary of stacks so that child environments can add their own UI elements
# this can be done by using the `with` context manager
self.ui_window_elements = dict()
# create main frame
self.ui_window_elements["main_frame"] = self.ui_window.frame
with self.ui_window_elements["main_frame"]:
# create main stack
self.ui_window_elements["main_vstack"] = omni.ui.VStack(spacing=5, height=0)
with self.ui_window_elements["main_vstack"]:
# create collapsable frame for simulation
self._build_sim_frame()
# create collapsable frame for viewer
self._build_viewer_frame()
# create collapsable frame for debug visualization
self._build_debug_vis_frame()
def __del__(self):
"""Destructor for the window."""
# destroy the window
if self.ui_window is not None:
self.ui_window.visible = False
self.ui_window.destroy()
self.ui_window = None
"""
Build sub-sections of the UI.
"""
def _build_sim_frame(self):
"""Builds the sim-related controls frame for the UI."""
# create collapsable frame for controls
self.ui_window_elements["sim_frame"] = omni.ui.CollapsableFrame(
title="Simulation Settings",
width=omni.ui.Fraction(1),
height=0,
collapsed=False,
style=ui_utils.get_style(),
horizontal_scrollbar_policy=omni.ui.ScrollBarPolicy.SCROLLBAR_AS_NEEDED,
vertical_scrollbar_policy=omni.ui.ScrollBarPolicy.SCROLLBAR_ALWAYS_ON,
)
with self.ui_window_elements["sim_frame"]:
# create stack for controls
self.ui_window_elements["sim_vstack"] = omni.ui.VStack(spacing=5, height=0)
with self.ui_window_elements["sim_vstack"]:
# create rendering mode dropdown
render_mode_cfg = {
"label": "Rendering Mode",
"type": "dropdown",
"default_val": self.env.sim.render_mode.value,
"items": [member.name for member in self.env.sim.RenderMode if member.value >= 0],
"tooltip": "Select a rendering mode\n" + self.env.sim.RenderMode.__doc__,
"on_clicked_fn": lambda value: self.env.sim.set_render_mode(self.env.sim.RenderMode[value]),
}
self.ui_window_elements["render_dropdown"] = ui_utils.dropdown_builder(**render_mode_cfg)
# create animation recording box
record_animate_cfg = {
"label": "Record Animation",
"type": "state_button",
"a_text": "START",
"b_text": "STOP",
"tooltip": "Record the animation of the scene. Only effective if fabric is disabled.",
"on_clicked_fn": lambda value: self._toggle_recording_animation_fn(value),
}
self.ui_window_elements["record_animation"] = ui_utils.state_btn_builder(**record_animate_cfg)
# disable the button if fabric is not enabled
self.ui_window_elements["record_animation"].enabled = not self.env.sim.is_fabric_enabled()
def _build_viewer_frame(self):
"""Build the viewer-related control frame for the UI."""
# create collapsable frame for viewer
self.ui_window_elements["viewer_frame"] = omni.ui.CollapsableFrame(
title="Viewer Settings",
width=omni.ui.Fraction(1),
height=0,
collapsed=False,
style=ui_utils.get_style(),
horizontal_scrollbar_policy=omni.ui.ScrollBarPolicy.SCROLLBAR_AS_NEEDED,
vertical_scrollbar_policy=omni.ui.ScrollBarPolicy.SCROLLBAR_ALWAYS_ON,
)
with self.ui_window_elements["viewer_frame"]:
# create stack for controls
self.ui_window_elements["viewer_vstack"] = omni.ui.VStack(spacing=5, height=0)
with self.ui_window_elements["viewer_vstack"]:
# create a number slider to move to environment origin
# NOTE: slider is 1-indexed, whereas the env index is 0-indexed
viewport_origin_cfg = {
"label": "Environment Index",
"type": "button",
"default_val": self.env.cfg.viewer.env_index + 1,
"min": 1,
"max": self.env.num_envs,
"tooltip": "The environment index to follow. Only effective if follow mode is not 'World'.",
}
self.ui_window_elements["viewer_env_index"] = ui_utils.int_builder(**viewport_origin_cfg)
# create a number slider to move to environment origin
self.ui_window_elements["viewer_env_index"].add_value_changed_fn(self._set_viewer_env_index_fn)
# create a tracker for the camera location
viewer_follow_cfg = {
"label": "Follow Mode",
"type": "dropdown",
"default_val": 0,
"items": [name.replace("_", " ").title() for name in self._viewer_assets_options],
"tooltip": "Select the viewport camera following mode.",
"on_clicked_fn": self._set_viewer_origin_type_fn,
}
self.ui_window_elements["viewer_follow"] = ui_utils.dropdown_builder(**viewer_follow_cfg)
# add viewer default eye and lookat locations
self.ui_window_elements["viewer_eye"] = ui_utils.xyz_builder(
label="Camera Eye",
tooltip="Modify the XYZ location of the viewer eye.",
default_val=self.env.cfg.viewer.eye,
step=0.1,
on_value_changed_fn=[self._set_viewer_location_fn] * 3,
)
self.ui_window_elements["viewer_lookat"] = ui_utils.xyz_builder(
label="Camera Target",
tooltip="Modify the XYZ location of the viewer target.",
default_val=self.env.cfg.viewer.lookat,
step=0.1,
on_value_changed_fn=[self._set_viewer_location_fn] * 3,
)
def _build_debug_vis_frame(self):
"""Builds the debug visualization frame for various scene elements.
This function inquires the scene for all elements that have a debug visualization
implemented and creates a checkbox to toggle the debug visualization for each element
that has it implemented. If the element does not have a debug visualization implemented,
a label is created instead.
"""
# create collapsable frame for debug visualization
self.ui_window_elements["debug_frame"] = omni.ui.CollapsableFrame(
title="Scene Debug Visualization",
width=omni.ui.Fraction(1),
height=0,
collapsed=False,
style=ui_utils.get_style(),
horizontal_scrollbar_policy=omni.ui.ScrollBarPolicy.SCROLLBAR_AS_NEEDED,
vertical_scrollbar_policy=omni.ui.ScrollBarPolicy.SCROLLBAR_ALWAYS_ON,
)
with self.ui_window_elements["debug_frame"]:
# create stack for debug visualization
self.ui_window_elements["debug_vstack"] = omni.ui.VStack(spacing=5, height=0)
with self.ui_window_elements["debug_vstack"]:
elements = [
self.env.scene.terrain,
*self.env.scene.rigid_objects.values(),
*self.env.scene.articulations.values(),
*self.env.scene.sensors.values(),
]
names = [
"terrain",
*self.env.scene.rigid_objects.keys(),
*self.env.scene.articulations.keys(),
*self.env.scene.sensors.keys(),
]
# create one for the terrain
for elem, name in zip(elements, names):
if elem is not None:
self._create_debug_vis_ui_element(name, elem)
"""
Custom callbacks for UI elements.
"""
def _toggle_recording_animation_fn(self, value: bool):
"""Toggles the animation recording."""
if value:
# log directory to save the recording
if not hasattr(self, "animation_log_dir"):
# create a new log directory
log_dir = datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
self.animation_log_dir = os.path.join(os.getcwd(), "recordings", log_dir)
# start the recording
_ = omni.kit.commands.execute(
"StartRecording",
target_paths=[("/World", True)],
live_mode=True,
use_frame_range=False,
start_frame=0,
end_frame=0,
use_preroll=False,
preroll_frame=0,
record_to="FILE",
fps=0,
apply_root_anim=False,
increment_name=True,
record_folder=self.animation_log_dir,
take_name="TimeSample",
)
else:
# stop the recording
_ = omni.kit.commands.execute("StopRecording")
# save the current stage
stage = omni.usd.get_context().get_stage()
source_layer = stage.GetRootLayer()
# output the stage to a file
stage_usd_path = os.path.join(self.animation_log_dir, "Stage.usd")
source_prim_path = "/"
# creates empty anon layer
temp_layer = Sdf.Find(stage_usd_path)
if temp_layer is None:
temp_layer = Sdf.Layer.CreateNew(stage_usd_path)
temp_stage = Usd.Stage.Open(temp_layer)
# update stage data
UsdGeom.SetStageUpAxis(temp_stage, UsdGeom.GetStageUpAxis(stage))
UsdGeom.SetStageMetersPerUnit(temp_stage, UsdGeom.GetStageMetersPerUnit(stage))
# copy the prim
Sdf.CreatePrimInLayer(temp_layer, source_prim_path)
Sdf.CopySpec(source_layer, source_prim_path, temp_layer, source_prim_path)
# set the default prim
temp_layer.defaultPrim = Sdf.Path(source_prim_path).name
# remove all physics from the stage
for prim in temp_stage.TraverseAll():
# skip if the prim is an instance
if prim.IsInstanceable():
continue
# if prim has articulation then disable it
if prim.HasAPI(UsdPhysics.ArticulationRootAPI):
prim.RemoveAPI(UsdPhysics.ArticulationRootAPI)
prim.RemoveAPI(PhysxSchema.PhysxArticulationAPI)
# if prim has rigid body then disable it
if prim.HasAPI(UsdPhysics.RigidBodyAPI):
prim.RemoveAPI(UsdPhysics.RigidBodyAPI)
prim.RemoveAPI(PhysxSchema.PhysxRigidBodyAPI)
# if prim is a joint type then disable it
if prim.IsA(UsdPhysics.Joint):
prim.GetAttribute("physics:jointEnabled").Set(False)
# resolve all paths relative to layer path
omni.usd.resolve_paths(source_layer.identifier, temp_layer.identifier)
# save the stage
temp_layer.Save()
# print the path to the saved stage
print("Recording completed.")
print(f"\tSaved recorded stage to : {stage_usd_path}")
print(f"\tSaved recorded animation to: {os.path.join(self.animation_log_dir, 'TimeSample_tk001.usd')}")
print("\nTo play the animation, check the instructions in the following link:")
print(
"\thttps://docs.omniverse.nvidia.com/extensions/latest/ext_animation_stage-recorder.html#using-the-captured-timesamples"
)
print("\n")
# reset the log directory
self.animation_log_dir = None
def _set_viewer_origin_type_fn(self, value: str):
"""Sets the origin of the viewport's camera. This is based on the drop-down menu in the UI."""
# Extract the viewport camera controller from environment
vcc = self.env.viewport_camera_controller
if vcc is None:
raise ValueError("Viewport camera controller is not initialized! Please check the rendering mode.")
# Based on origin type, update the camera view
if value == "World":
vcc.update_view_to_world()
elif value == "Env":
vcc.update_view_to_env()
else:
# find which index the asset is
fancy_names = [name.replace("_", " ").title() for name in self._viewer_assets_options]
# store the desired env index
viewer_asset_name = self._viewer_assets_options[fancy_names.index(value)]
# update the camera view
vcc.update_view_to_asset_root(viewer_asset_name)
def _set_viewer_location_fn(self, model: omni.ui.SimpleFloatModel):
"""Sets the viewport camera location based on the UI."""
# access the viewport camera controller (for brevity)
vcc = self.env.viewport_camera_controller
if vcc is None:
raise ValueError("Viewport camera controller is not initialized! Please check the rendering mode.")
# obtain the camera locations and set them in the viewpoint camera controller
eye = [self.ui_window_elements["viewer_eye"][i].get_value_as_float() for i in range(3)]
lookat = [self.ui_window_elements["viewer_lookat"][i].get_value_as_float() for i in range(3)]
# update the camera view
vcc.update_view_location(eye, lookat)
def _set_viewer_env_index_fn(self, model: omni.ui.SimpleIntModel):
"""Sets the environment index and updates the camera if in 'env' origin mode."""
# access the viewport camera controller (for brevity)
vcc = self.env.viewport_camera_controller
if vcc is None:
raise ValueError("Viewport camera controller is not initialized! Please check the rendering mode.")
# store the desired env index, UI is 1-indexed
vcc.set_view_env_index(model.as_int - 1)
"""
Helper functions - UI building.
"""
def _create_debug_vis_ui_element(self, name: str, elem: object):
"""Create a checkbox for toggling debug visualization for the given element."""
with omni.ui.HStack():
# create the UI element
text = (
"Toggle debug visualization."
if elem.has_debug_vis_implementation
else "Debug visualization not implemented."
)
omni.ui.Label(
name.replace("_", " ").title(),
width=ui_utils.LABEL_WIDTH - 12,
alignment=omni.ui.Alignment.LEFT_CENTER,
tooltip=text,
)
self.ui_window_elements[f"{name}_cb"] = SimpleCheckBox(
model=omni.ui.SimpleBoolModel(),
enabled=elem.has_debug_vis_implementation,
checked=elem.cfg.debug_vis,
on_checked_fn=lambda value, e=weakref.proxy(elem): e.set_debug_vis(value),
)
ui_utils.add_line_rect_flourish()
async def _dock_window(self, window_title: str):
"""Docks the custom UI window to the property window."""
# wait for the window to be created
for _ in range(5):
if omni.ui.Workspace.get_window(window_title):
break
await self.env.sim.app.next_update_async()
# dock next to properties window
custom_window = omni.ui.Workspace.get_window(window_title)
property_window = omni.ui.Workspace.get_window("Property")
if custom_window and property_window:
custom_window.dock_in(property_window, omni.ui.DockPosition.SAME, 1.0)
custom_window.focus()
| 18,520 | Python | 45.535176 | 136 | 0.582397 |
fnuabhimanyu8713/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/envs/mdp/__init__.py | # Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""Sub-module with implementation of manager terms.
The functions can be provided to different managers that are responsible for the
different aspects of the MDP. These include the observation, reward, termination,
actions, events and curriculum managers.
The terms are defined under the ``envs`` module because they are used to define
the environment. However, they are not part of the environment directly, but
are used to define the environment through their managers.
"""
from .actions import * # noqa: F401, F403
from .commands import * # noqa: F401, F403
from .curriculums import * # noqa: F401, F403
from .events import * # noqa: F401, F403
from .observations import * # noqa: F401, F403
from .rewards import * # noqa: F401, F403
from .terminations import * # noqa: F401, F403
| 918 | Python | 35.759999 | 81 | 0.752723 |
fnuabhimanyu8713/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/envs/mdp/curriculums.py | # Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""Common functions that can be used to create curriculum for the learning environment.
The functions can be passed to the :class:`omni.isaac.orbit.managers.CurriculumTermCfg` object to enable
the curriculum introduced by the function.
"""
from __future__ import annotations
from collections.abc import Sequence
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from omni.isaac.orbit.envs import RLTaskEnv
def modify_reward_weight(env: RLTaskEnv, env_ids: Sequence[int], term_name: str, weight: float, num_steps: int):
"""Curriculum that modifies a reward weight a given number of steps.
Args:
env: The learning environment.
env_ids: Not used since all environments are affected.
term_name: The name of the reward term.
weight: The weight of the reward term.
num_steps: The number of steps after which the change should be applied.
"""
if env.common_step_counter > num_steps:
# obtain term settings
term_cfg = env.reward_manager.get_term_cfg(term_name)
# update term settings
term_cfg.weight = weight
env.reward_manager.set_term_cfg(term_name, term_cfg)
| 1,285 | Python | 33.756756 | 112 | 0.713619 |
fnuabhimanyu8713/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/envs/mdp/rewards.py | # Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""Common functions that can be used to enable reward functions.
The functions can be passed to the :class:`omni.isaac.orbit.managers.RewardTermCfg` object to include
the reward introduced by the function.
"""
from __future__ import annotations
import torch
from typing import TYPE_CHECKING
from omni.isaac.orbit.assets import Articulation, RigidObject
from omni.isaac.orbit.managers import SceneEntityCfg
from omni.isaac.orbit.managers.manager_base import ManagerTermBase
from omni.isaac.orbit.managers.manager_term_cfg import RewardTermCfg
from omni.isaac.orbit.sensors import ContactSensor
if TYPE_CHECKING:
from omni.isaac.orbit.envs import RLTaskEnv
"""
General.
"""
def is_alive(env: RLTaskEnv) -> torch.Tensor:
"""Reward for being alive."""
return (~env.termination_manager.terminated).float()
def is_terminated(env: RLTaskEnv) -> torch.Tensor:
"""Penalize terminated episodes that don't correspond to episodic timeouts."""
return env.termination_manager.terminated.float()
class is_terminated_term(ManagerTermBase):
"""Penalize termination for specific terms that don't correspond to episodic timeouts.
The parameters are as follows:
* attr:`term_keys`: The termination terms to penalize. This can be a string, a list of strings
or regular expressions. Default is ".*" which penalizes all terminations.
The reward is computed as the sum of the termination terms that are not episodic timeouts.
This means that the reward is 0 if the episode is terminated due to an episodic timeout. Otherwise,
if two termination terms are active, the reward is 2.
"""
def __init__(self, cfg: RewardTermCfg, env: RLTaskEnv):
# initialize the base class
super().__init__(cfg, env)
# find and store the termination terms
term_keys = cfg.params.get("term_keys", ".*")
self._term_names = env.termination_manager.find_terms(term_keys)
def __call__(self, env: RLTaskEnv, term_keys: str | list[str] = ".*") -> torch.Tensor:
# Return the unweighted reward for the termination terms
reset_buf = torch.zeros(env.num_envs, device=env.device)
for term in self._term_names:
# Sums over terminations term values to account for multiple terminations in the same step
reset_buf += env.termination_manager.get_term(term)
return (reset_buf * (~env.termination_manager.time_outs)).float()
"""
Root penalties.
"""
def lin_vel_z_l2(env: RLTaskEnv, asset_cfg: SceneEntityCfg = SceneEntityCfg("robot")) -> torch.Tensor:
"""Penalize z-axis base linear velocity using L2-kernel."""
# extract the used quantities (to enable type-hinting)
asset: RigidObject = env.scene[asset_cfg.name]
return torch.square(asset.data.root_lin_vel_b[:, 2])
def ang_vel_xy_l2(env: RLTaskEnv, asset_cfg: SceneEntityCfg = SceneEntityCfg("robot")) -> torch.Tensor:
"""Penalize xy-axis base angular velocity using L2-kernel."""
# extract the used quantities (to enable type-hinting)
asset: RigidObject = env.scene[asset_cfg.name]
return torch.sum(torch.square(asset.data.root_ang_vel_b[:, :2]), dim=1)
def flat_orientation_l2(env: RLTaskEnv, asset_cfg: SceneEntityCfg = SceneEntityCfg("robot")) -> torch.Tensor:
"""Penalize non-flat base orientation using L2-kernel.
This is computed by penalizing the xy-components of the projected gravity vector.
"""
# extract the used quantities (to enable type-hinting)
asset: RigidObject = env.scene[asset_cfg.name]
return torch.sum(torch.square(asset.data.projected_gravity_b[:, :2]), dim=1)
def base_height_l2(
env: RLTaskEnv, target_height: float, asset_cfg: SceneEntityCfg = SceneEntityCfg("robot")
) -> torch.Tensor:
"""Penalize asset height from its target using L2-kernel.
Note:
Currently, it assumes a flat terrain, i.e. the target height is in the world frame.
"""
# extract the used quantities (to enable type-hinting)
asset: RigidObject = env.scene[asset_cfg.name]
# TODO: Fix this for rough-terrain.
return torch.square(asset.data.root_pos_w[:, 2] - target_height)
def body_lin_acc_l2(env: RLTaskEnv, asset_cfg: SceneEntityCfg = SceneEntityCfg("robot")) -> torch.Tensor:
"""Penalize the linear acceleration of bodies using L2-kernel."""
asset: Articulation = env.scene[asset_cfg.name]
return torch.sum(torch.norm(asset.data.body_lin_acc_w[:, asset_cfg.body_ids, :], dim=-1), dim=1)
"""
Joint penalties.
"""
def joint_torques_l2(env: RLTaskEnv, asset_cfg: SceneEntityCfg = SceneEntityCfg("robot")) -> torch.Tensor:
"""Penalize joint torques applied on the articulation using L2-kernel.
NOTE: Only the joints configured in :attr:`asset_cfg.joint_ids` will have their joint torques contribute to the L2 norm.
"""
# extract the used quantities (to enable type-hinting)
asset: Articulation = env.scene[asset_cfg.name]
return torch.sum(torch.square(asset.data.applied_torque[:, asset_cfg.joint_ids]), dim=1)
def joint_vel_l1(env: RLTaskEnv, asset_cfg: SceneEntityCfg) -> torch.Tensor:
"""Penalize joint velocities on the articulation using an L1-kernel."""
# extract the used quantities (to enable type-hinting)
asset: Articulation = env.scene[asset_cfg.name]
return torch.sum(torch.abs(asset.data.joint_vel[:, asset_cfg.joint_ids]), dim=1)
def joint_vel_l2(env: RLTaskEnv, asset_cfg: SceneEntityCfg = SceneEntityCfg("robot")) -> torch.Tensor:
"""Penalize joint velocities on the articulation using L1-kernel.
NOTE: Only the joints configured in :attr:`asset_cfg.joint_ids` will have their joint velocities contribute to the L1 norm.
"""
# extract the used quantities (to enable type-hinting)
asset: Articulation = env.scene[asset_cfg.name]
return torch.sum(torch.square(asset.data.joint_vel[:, asset_cfg.joint_ids]), dim=1)
def joint_acc_l2(env: RLTaskEnv, asset_cfg: SceneEntityCfg = SceneEntityCfg("robot")) -> torch.Tensor:
"""Penalize joint accelerations on the articulation using L2-kernel.
NOTE: Only the joints configured in :attr:`asset_cfg.joint_ids` will have their joint accelerations contribute to the L2 norm.
"""
# extract the used quantities (to enable type-hinting)
asset: Articulation = env.scene[asset_cfg.name]
return torch.sum(torch.square(asset.data.joint_acc[:, asset_cfg.joint_ids]), dim=1)
def joint_deviation_l1(env, asset_cfg: SceneEntityCfg = SceneEntityCfg("robot")) -> torch.Tensor:
"""Penalize joint positions that deviate from the default one."""
# extract the used quantities (to enable type-hinting)
asset: Articulation = env.scene[asset_cfg.name]
# compute out of limits constraints
angle = asset.data.joint_pos[:, asset_cfg.joint_ids] - asset.data.default_joint_pos[:, asset_cfg.joint_ids]
return torch.sum(torch.abs(angle), dim=1)
def joint_pos_limits(env: RLTaskEnv, asset_cfg: SceneEntityCfg = SceneEntityCfg("robot")) -> torch.Tensor:
"""Penalize joint positions if they cross the soft limits.
This is computed as a sum of the absolute value of the difference between the joint position and the soft limits.
"""
# extract the used quantities (to enable type-hinting)
asset: Articulation = env.scene[asset_cfg.name]
# compute out of limits constraints
out_of_limits = -(
asset.data.joint_pos[:, asset_cfg.joint_ids] - asset.data.soft_joint_pos_limits[:, asset_cfg.joint_ids, 0]
).clip(max=0.0)
out_of_limits += (
asset.data.joint_pos[:, asset_cfg.joint_ids] - asset.data.soft_joint_pos_limits[:, asset_cfg.joint_ids, 1]
).clip(min=0.0)
return torch.sum(out_of_limits, dim=1)
def joint_vel_limits(
env: RLTaskEnv, soft_ratio: float, asset_cfg: SceneEntityCfg = SceneEntityCfg("robot")
) -> torch.Tensor:
"""Penalize joint velocities if they cross the soft limits.
This is computed as a sum of the absolute value of the difference between the joint velocity and the soft limits.
Args:
soft_ratio: The ratio of the soft limits to be used.
"""
# extract the used quantities (to enable type-hinting)
asset: Articulation = env.scene[asset_cfg.name]
# compute out of limits constraints
out_of_limits = (
torch.abs(asset.data.joint_vel[:, asset_cfg.joint_ids])
- asset.data.soft_joint_vel_limits[:, asset_cfg.joint_ids] * soft_ratio
)
# clip to max error = 1 rad/s per joint to avoid huge penalties
out_of_limits = out_of_limits.clip_(min=0.0, max=1.0)
return torch.sum(out_of_limits, dim=1)
"""
Action penalties.
"""
def applied_torque_limits(env: RLTaskEnv, asset_cfg: SceneEntityCfg = SceneEntityCfg("robot")) -> torch.Tensor:
"""Penalize applied torques if they cross the limits.
This is computed as a sum of the absolute value of the difference between the applied torques and the limits.
.. caution::
Currently, this only works for explicit actuators since we manually compute the applied torques.
For implicit actuators, we currently cannot retrieve the applied torques from the physics engine.
"""
# extract the used quantities (to enable type-hinting)
asset: Articulation = env.scene[asset_cfg.name]
# compute out of limits constraints
# TODO: We need to fix this to support implicit joints.
out_of_limits = torch.abs(
asset.data.applied_torque[:, asset_cfg.joint_ids] - asset.data.computed_torque[:, asset_cfg.joint_ids]
)
return torch.sum(out_of_limits, dim=1)
def action_rate_l2(env: RLTaskEnv) -> torch.Tensor:
"""Penalize the rate of change of the actions using L2-kernel."""
return torch.sum(torch.square(env.action_manager.action - env.action_manager.prev_action), dim=1)
def action_l2(env: RLTaskEnv) -> torch.Tensor:
"""Penalize the actions using L2-kernel."""
return torch.sum(torch.square(env.action_manager.action), dim=1)
"""
Contact sensor.
"""
def undesired_contacts(env: RLTaskEnv, threshold: float, sensor_cfg: SceneEntityCfg) -> torch.Tensor:
"""Penalize undesired contacts as the number of violations that are above a threshold."""
# extract the used quantities (to enable type-hinting)
contact_sensor: ContactSensor = env.scene.sensors[sensor_cfg.name]
# check if contact force is above threshold
net_contact_forces = contact_sensor.data.net_forces_w_history
is_contact = torch.max(torch.norm(net_contact_forces[:, :, sensor_cfg.body_ids], dim=-1), dim=1)[0] > threshold
# sum over contacts for each environment
return torch.sum(is_contact, dim=1)
def contact_forces(env: RLTaskEnv, threshold: float, sensor_cfg: SceneEntityCfg) -> torch.Tensor:
"""Penalize contact forces as the amount of violations of the net contact force."""
# extract the used quantities (to enable type-hinting)
contact_sensor: ContactSensor = env.scene.sensors[sensor_cfg.name]
net_contact_forces = contact_sensor.data.net_forces_w_history
# compute the violation
violation = torch.max(torch.norm(net_contact_forces[:, :, sensor_cfg.body_ids], dim=-1), dim=1)[0] - threshold
# compute the penalty
return torch.sum(violation.clip(min=0.0), dim=1)
"""
Velocity-tracking rewards.
"""
def track_lin_vel_xy_exp(
env: RLTaskEnv, std: float, command_name: str, asset_cfg: SceneEntityCfg = SceneEntityCfg("robot")
) -> torch.Tensor:
"""Reward tracking of linear velocity commands (xy axes) using exponential kernel."""
# extract the used quantities (to enable type-hinting)
asset: RigidObject = env.scene[asset_cfg.name]
# compute the error
lin_vel_error = torch.sum(
torch.square(env.command_manager.get_command(command_name)[:, :2] - asset.data.root_lin_vel_b[:, :2]),
dim=1,
)
return torch.exp(-lin_vel_error / std**2)
def track_ang_vel_z_exp(
env: RLTaskEnv, std: float, command_name: str, asset_cfg: SceneEntityCfg = SceneEntityCfg("robot")
) -> torch.Tensor:
"""Reward tracking of angular velocity commands (yaw) using exponential kernel."""
# extract the used quantities (to enable type-hinting)
asset: RigidObject = env.scene[asset_cfg.name]
# compute the error
ang_vel_error = torch.square(env.command_manager.get_command(command_name)[:, 2] - asset.data.root_ang_vel_b[:, 2])
return torch.exp(-ang_vel_error / std**2)
| 12,477 | Python | 40.732441 | 130 | 0.70666 |
fnuabhimanyu8713/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/envs/mdp/events.py | # Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""Common functions that can be used to enable different events.
Events include anything related to altering the simulation state. This includes changing the physics
materials, applying external forces, and resetting the state of the asset.
The functions can be passed to the :class:`omni.isaac.orbit.managers.EventTermCfg` object to enable
the event introduced by the function.
"""
from __future__ import annotations
import torch
import warnings
from typing import TYPE_CHECKING, Literal
import carb
import omni.isaac.orbit.utils.math as math_utils
from omni.isaac.orbit.actuators import ImplicitActuator
from omni.isaac.orbit.assets import Articulation, RigidObject
from omni.isaac.orbit.managers import SceneEntityCfg
from omni.isaac.orbit.terrains import TerrainImporter
if TYPE_CHECKING:
from omni.isaac.orbit.envs import BaseEnv
def randomize_rigid_body_material(
env: BaseEnv,
env_ids: torch.Tensor | None,
static_friction_range: tuple[float, float],
dynamic_friction_range: tuple[float, float],
restitution_range: tuple[float, float],
num_buckets: int,
asset_cfg: SceneEntityCfg,
):
"""Randomize the physics materials on all geometries of the asset.
This function creates a set of physics materials with random static friction, dynamic friction, and restitution
values. The number of materials is specified by ``num_buckets``. The materials are generated by sampling
uniform random values from the given ranges.
The material properties are then assigned to the geometries of the asset. The assignment is done by
creating a random integer tensor of shape (num_instances, max_num_shapes) where ``num_instances``
is the number of assets spawned and ``max_num_shapes`` is the maximum number of shapes in the asset (over
all bodies). The integer values are used as indices to select the material properties from the
material buckets.
.. attention::
This function uses CPU tensors to assign the material properties. It is recommended to use this function
only during the initialization of the environment. Otherwise, it may lead to a significant performance
overhead.
.. note::
PhysX only allows 64000 unique physics materials in the scene. If the number of materials exceeds this
limit, the simulation will crash.
"""
# extract the used quantities (to enable type-hinting)
asset: RigidObject | Articulation = env.scene[asset_cfg.name]
# resolve environment ids
if env_ids is None:
env_ids = torch.arange(env.scene.num_envs, device="cpu")
# sample material properties from the given ranges
material_buckets = torch.zeros(num_buckets, 3)
material_buckets[:, 0].uniform_(*static_friction_range)
material_buckets[:, 1].uniform_(*dynamic_friction_range)
material_buckets[:, 2].uniform_(*restitution_range)
# create random material assignments based on the total number of shapes: num_assets x num_shapes
# note: not optimal since it creates assignments for all the shapes but only a subset is used in the body indices case.
material_ids = torch.randint(0, num_buckets, (len(env_ids), asset.root_physx_view.max_shapes))
if asset_cfg.body_ids == slice(None) or isinstance(asset, RigidObject):
# get the current materials of the bodies
materials = asset.root_physx_view.get_material_properties()
# assign the new materials
# material ids are of shape: num_env_ids x num_shapes
# material_buckets are of shape: num_buckets x 3
materials[env_ids] = material_buckets[material_ids]
# set the material properties into the physics simulation
asset.root_physx_view.set_material_properties(materials, env_ids)
elif isinstance(asset, Articulation):
# obtain number of shapes per body (needed for indexing the material properties correctly)
# note: this is a workaround since the Articulation does not provide a direct way to obtain the number of shapes
# per body. We use the physics simulation view to obtain the number of shapes per body.
num_shapes_per_body = []
for link_path in asset.root_physx_view.link_paths[0]:
link_physx_view = asset._physics_sim_view.create_rigid_body_view(link_path) # type: ignore
num_shapes_per_body.append(link_physx_view.max_shapes)
# get the current materials of the bodies
materials = asset.root_physx_view.get_material_properties()
# sample material properties from the given ranges
for body_id in asset_cfg.body_ids:
# start index of shape
start_idx = sum(num_shapes_per_body[:body_id])
# end index of shape
end_idx = start_idx + num_shapes_per_body[body_id]
# assign the new materials
# material ids are of shape: num_env_ids x num_shapes
# material_buckets are of shape: num_buckets x 3
materials[env_ids, start_idx:end_idx] = material_buckets[material_ids[:, start_idx:end_idx]]
# set the material properties into the physics simulation
asset.root_physx_view.set_material_properties(materials, env_ids)
else:
raise ValueError(
f"Randomization term 'randomize_rigid_body_material' not supported for asset: '{asset_cfg.name}'"
f" with type: '{type(asset)}' and body_ids: '{asset_cfg.body_ids}'."
)
def add_body_mass(
env: BaseEnv, env_ids: torch.Tensor | None, mass_range: tuple[float, float], asset_cfg: SceneEntityCfg
):
"""Randomize the mass of the bodies by adding a random value sampled from the given range.
.. tip::
This function uses CPU tensors to assign the body masses. It is recommended to use this function
only during the initialization of the environment.
.. deprecated:: v0.4
This function is deprecated. Please use :func:`randomize_rigid_body_mass` with ``operation="add"`` instead.
"""
msg = "Event term 'add_body_mass' is deprecated. Please use 'randomize_rigid_body_mass' with operation='add'."
warnings.warn(msg, DeprecationWarning)
carb.log_warn(msg)
# call the new function
randomize_rigid_body_mass(env, env_ids, asset_cfg, mass_range, operation="add", distribution="uniform")
def randomize_rigid_body_mass(
env: BaseEnv,
env_ids: torch.Tensor | None,
asset_cfg: SceneEntityCfg,
mass_range: tuple[float, float],
operation: Literal["add", "scale", "abs"],
distribution: Literal["uniform", "log_uniform"] = "uniform",
):
"""Randomize the mass of the bodies by adding, scaling, or setting random values.
This function allows randomizing the mass of the bodies of the asset. The function samples random values from the
given ranges and adds, scales, or sets the values into the physics simulation based on the operation.
.. tip::
This function uses CPU tensors to assign the body masses. It is recommended to use this function
only during the initialization of the environment.
"""
# extract the used quantities (to enable type-hinting)
asset: RigidObject | Articulation = env.scene[asset_cfg.name]
# resolve body indices
if asset_cfg.body_ids == slice(None):
body_ids = torch.arange(asset.num_bodies, dtype=torch.int, device="cpu")
else:
body_ids = torch.tensor(asset_cfg.body_ids, dtype=torch.int, device="cpu")
# get the current masses of the bodies (num_assets, num_bodies)
masses = asset.root_physx_view.get_masses()
# sample from the given range
# note: we modify the masses in-place for all environments
# however, the setter takes care that only the masses of the specified environments are modified
masses = _randomize_prop_by_op(
masses, mass_range, env_ids, body_ids, operation=operation, distribution=distribution
)
# resolve environment ids
if env_ids is None:
env_ids = torch.arange(env.scene.num_envs, device="cpu")
# set the mass into the physics simulation
asset.root_physx_view.set_masses(masses, env_ids)
def randomize_actuator_gains(
env: BaseEnv,
env_ids: torch.Tensor | None,
asset_cfg: SceneEntityCfg,
stiffness_range: tuple[float, float] | None = None,
damping_range: tuple[float, float] | None = None,
operation: Literal["add", "scale", "abs"] = "abs",
distribution: Literal["uniform", "log_uniform"] = "uniform",
):
"""Randomize the actuator gains in an articulation by adding, scaling, or setting random values.
This function allows randomizing the actuator stiffness and damping gains.
The function samples random values from the given ranges and applies the operation to the joint properties.
It then sets the values into the actuator models. If the ranges are not provided for a particular property,
the function does not modify the property.
.. tip::
For implicit actuators, this function uses CPU tensors to assign the actuator gains into the simulation.
In such cases, it is recommended to use this function only during the initialization of the environment.
Raises:
NotImplementedError: If the joint indices are in explicit motor mode. This operation is currently
not supported for explicit actuator models.
"""
# extract the used quantities (to enable type-hinting)
asset: Articulation = env.scene[asset_cfg.name]
# resolve joint indices
if asset_cfg.joint_ids == slice(None):
joint_ids_list = range(asset.num_joints)
joint_ids = slice(None) # for optimization purposes
else:
joint_ids_list = asset_cfg.joint_ids
joint_ids = torch.tensor(asset_cfg.joint_ids, dtype=torch.int, device=asset.device)
# check if none of the joint indices are in explicit motor mode
for joint_index in joint_ids_list:
for act_name, actuator in asset.actuators.items():
# if joint indices are a slice (i.e., all joints are captured) or the joint index is in the actuator
if actuator.joint_indices == slice(None) or joint_index in actuator.joint_indices:
if not isinstance(actuator, ImplicitActuator):
raise NotImplementedError(
"Event term 'randomize_actuator_stiffness_and_damping' is performed on asset"
f" '{asset_cfg.name}' on the joint '{asset.joint_names[joint_index]}' ('{joint_index}') which"
f" uses an explicit actuator model '{act_name}<{actuator.__class__.__name__}>'. This operation"
" is currently not supported for explicit actuator models."
)
# sample joint properties from the given ranges and set into the physics simulation
# -- stiffness
if stiffness_range is not None:
stiffness = asset.root_physx_view.get_dof_stiffnesses().to(asset.device)
stiffness = _randomize_prop_by_op(
stiffness, stiffness_range, env_ids, joint_ids, operation=operation, distribution=distribution
)
asset.write_joint_stiffness_to_sim(stiffness, joint_ids=joint_ids, env_ids=env_ids)
# -- damping
if damping_range is not None:
damping = asset.root_physx_view.get_dof_dampings().to(asset.device)
damping = _randomize_prop_by_op(
damping, damping_range, env_ids, joint_ids, operation=operation, distribution=distribution
)
asset.write_joint_damping_to_sim(damping, joint_ids=joint_ids, env_ids=env_ids)
def randomize_joint_parameters(
env: BaseEnv,
env_ids: torch.Tensor | None,
asset_cfg: SceneEntityCfg,
friction_range: tuple[float, float] | None = None,
armature_range: tuple[float, float] | None = None,
operation: Literal["add", "scale", "abs"] = "abs",
distribution: Literal["uniform", "log_uniform"] = "uniform",
):
"""Randomize the joint parameters of an articulation by adding, scaling, or setting random values.
This function allows randomizing the joint parameters (friction and armature) of the asset. These correspond
to the physics engine joint properties that affect the joint behavior.
The function samples random values from the given ranges and applies the operation to the joint properties.
It then sets the values into the physics simulation. If the ranges are not provided for a
particular property, the function does not modify the property.
.. tip::
This function uses CPU tensors to assign the joint properties. It is recommended to use this function
only during the initialization of the environment.
"""
# extract the used quantities (to enable type-hinting)
asset: Articulation = env.scene[asset_cfg.name]
# resolve joint indices
if asset_cfg.joint_ids == slice(None):
joint_ids = slice(None) # for optimization purposes
else:
joint_ids = torch.tensor(asset_cfg.joint_ids, dtype=torch.int, device=asset.device)
# sample joint properties from the given ranges and set into the physics simulation
# -- friction
if friction_range is not None:
friction = asset.root_physx_view.get_dof_friction_coefficients().to(asset.device)
friction = _randomize_prop_by_op(
friction, friction_range, env_ids, joint_ids, operation=operation, distribution=distribution
)
asset.write_joint_friction_to_sim(friction, joint_ids=joint_ids, env_ids=env_ids)
# -- armature
if armature_range is not None:
armature = asset.root_physx_view.get_dof_armatures().to(asset.device)
armature = _randomize_prop_by_op(
armature, armature_range, env_ids, joint_ids, operation=operation, distribution=distribution
)
asset.write_joint_armature_to_sim(armature, joint_ids=joint_ids, env_ids=env_ids)
def apply_external_force_torque(
env: BaseEnv,
env_ids: torch.Tensor,
force_range: tuple[float, float],
torque_range: tuple[float, float],
asset_cfg: SceneEntityCfg = SceneEntityCfg("robot"),
):
"""Randomize the external forces and torques applied to the bodies.
This function creates a set of random forces and torques sampled from the given ranges. The number of forces
and torques is equal to the number of bodies times the number of environments. The forces and torques are
applied to the bodies by calling ``asset.set_external_force_and_torque``. The forces and torques are only
applied when ``asset.write_data_to_sim()`` is called in the environment.
"""
# extract the used quantities (to enable type-hinting)
asset: RigidObject | Articulation = env.scene[asset_cfg.name]
# resolve environment ids
if env_ids is None:
env_ids = torch.arange(env.scene.num_envs, device=asset.device)
# resolve number of bodies
num_bodies = len(asset_cfg.body_ids) if isinstance(asset_cfg.body_ids, list) else asset.num_bodies
# sample random forces and torques
size = (len(env_ids), num_bodies, 3)
forces = math_utils.sample_uniform(*force_range, size, asset.device)
torques = math_utils.sample_uniform(*torque_range, size, asset.device)
# set the forces and torques into the buffers
# note: these are only applied when you call: `asset.write_data_to_sim()`
asset.set_external_force_and_torque(forces, torques, env_ids=env_ids, body_ids=asset_cfg.body_ids)
def push_by_setting_velocity(
env: BaseEnv,
env_ids: torch.Tensor,
velocity_range: dict[str, tuple[float, float]],
asset_cfg: SceneEntityCfg = SceneEntityCfg("robot"),
):
"""Push the asset by setting the root velocity to a random value within the given ranges.
This creates an effect similar to pushing the asset with a random impulse that changes the asset's velocity.
It samples the root velocity from the given ranges and sets the velocity into the physics simulation.
The function takes a dictionary of velocity ranges for each axis and rotation. The keys of the dictionary
are ``x``, ``y``, ``z``, ``roll``, ``pitch``, and ``yaw``. The values are tuples of the form ``(min, max)``.
If the dictionary does not contain a key, the velocity is set to zero for that axis.
"""
# extract the used quantities (to enable type-hinting)
asset: RigidObject | Articulation = env.scene[asset_cfg.name]
# velocities
vel_w = asset.data.root_vel_w[env_ids]
# sample random velocities
range_list = [velocity_range.get(key, (0.0, 0.0)) for key in ["x", "y", "z", "roll", "pitch", "yaw"]]
ranges = torch.tensor(range_list, device=asset.device)
vel_w[:] = math_utils.sample_uniform(ranges[:, 0], ranges[:, 1], vel_w.shape, device=asset.device)
# set the velocities into the physics simulation
asset.write_root_velocity_to_sim(vel_w, env_ids=env_ids)
def reset_root_state_uniform(
env: BaseEnv,
env_ids: torch.Tensor,
pose_range: dict[str, tuple[float, float]],
velocity_range: dict[str, tuple[float, float]],
asset_cfg: SceneEntityCfg = SceneEntityCfg("robot"),
):
"""Reset the asset root state to a random position and velocity uniformly within the given ranges.
This function randomizes the root position and velocity of the asset.
* It samples the root position from the given ranges and adds them to the default root position, before setting
them into the physics simulation.
* It samples the root orientation from the given ranges and sets them into the physics simulation.
* It samples the root velocity from the given ranges and sets them into the physics simulation.
The function takes a dictionary of pose and velocity ranges for each axis and rotation. The keys of the
dictionary are ``x``, ``y``, ``z``, ``roll``, ``pitch``, and ``yaw``. The values are tuples of the form
``(min, max)``. If the dictionary does not contain a key, the position or velocity is set to zero for that axis.
"""
# extract the used quantities (to enable type-hinting)
asset: RigidObject | Articulation = env.scene[asset_cfg.name]
# get default root state
root_states = asset.data.default_root_state[env_ids].clone()
# poses
range_list = [pose_range.get(key, (0.0, 0.0)) for key in ["x", "y", "z", "roll", "pitch", "yaw"]]
ranges = torch.tensor(range_list, device=asset.device)
rand_samples = math_utils.sample_uniform(ranges[:, 0], ranges[:, 1], (len(env_ids), 6), device=asset.device)
positions = root_states[:, 0:3] + env.scene.env_origins[env_ids] + rand_samples[:, 0:3]
orientations = math_utils.quat_from_euler_xyz(rand_samples[:, 3], rand_samples[:, 4], rand_samples[:, 5])
# velocities
range_list = [velocity_range.get(key, (0.0, 0.0)) for key in ["x", "y", "z", "roll", "pitch", "yaw"]]
ranges = torch.tensor(range_list, device=asset.device)
rand_samples = math_utils.sample_uniform(ranges[:, 0], ranges[:, 1], (len(env_ids), 6), device=asset.device)
velocities = root_states[:, 7:13] + rand_samples
# set into the physics simulation
asset.write_root_pose_to_sim(torch.cat([positions, orientations], dim=-1), env_ids=env_ids)
asset.write_root_velocity_to_sim(velocities, env_ids=env_ids)
def reset_root_state_with_random_orientation(
env: BaseEnv,
env_ids: torch.Tensor,
pose_range: dict[str, tuple[float, float]],
velocity_range: dict[str, tuple[float, float]],
asset_cfg: SceneEntityCfg = SceneEntityCfg("robot"),
):
"""Reset the asset root position and velocities sampled randomly within the given ranges
and the asset root orientation sampled randomly from the SO(3).
This function randomizes the root position and velocity of the asset.
* It samples the root position from the given ranges and adds them to the default root position, before setting
them into the physics simulation.
* It samples the root orientation uniformly from the SO(3) and sets them into the physics simulation.
* It samples the root velocity from the given ranges and sets them into the physics simulation.
The function takes a dictionary of position and velocity ranges for each axis and rotation:
* :attr:`pose_range` - a dictionary of position ranges for each axis. The keys of the dictionary are ``x``,
``y``, and ``z``. The orientation is sampled uniformly from the SO(3).
* :attr:`velocity_range` - a dictionary of velocity ranges for each axis and rotation. The keys of the dictionary
are ``x``, ``y``, ``z``, ``roll``, ``pitch``, and ``yaw``.
The values are tuples of the form ``(min, max)``. If the dictionary does not contain a particular key,
the position is set to zero for that axis.
"""
# extract the used quantities (to enable type-hinting)
asset: RigidObject | Articulation = env.scene[asset_cfg.name]
# get default root state
root_states = asset.data.default_root_state[env_ids].clone()
# poses
range_list = [pose_range.get(key, (0.0, 0.0)) for key in ["x", "y", "z"]]
ranges = torch.tensor(range_list, device=asset.device)
rand_samples = math_utils.sample_uniform(ranges[:, 0], ranges[:, 1], (len(env_ids), 3), device=asset.device)
positions = root_states[:, 0:3] + env.scene.env_origins[env_ids] + rand_samples
orientations = math_utils.random_orientation(len(env_ids), device=asset.device)
# velocities
range_list = [velocity_range.get(key, (0.0, 0.0)) for key in ["x", "y", "z", "roll", "pitch", "yaw"]]
ranges = torch.tensor(range_list, device=asset.device)
rand_samples = math_utils.sample_uniform(ranges[:, 0], ranges[:, 1], (len(env_ids), 6), device=asset.device)
velocities = root_states[:, 7:13] + rand_samples
# set into the physics simulation
asset.write_root_pose_to_sim(torch.cat([positions, orientations], dim=-1), env_ids=env_ids)
asset.write_root_velocity_to_sim(velocities, env_ids=env_ids)
def reset_root_state_from_terrain(
env: BaseEnv,
env_ids: torch.Tensor,
pose_range: dict[str, tuple[float, float]],
velocity_range: dict[str, tuple[float, float]],
asset_cfg: SceneEntityCfg = SceneEntityCfg("robot"),
):
"""Reset the asset root state by sampling a random valid pose from the terrain.
This function samples a random valid pose(based on flat patches) from the terrain and sets the root state
of the asset to this position. The function also samples random velocities from the given ranges and sets them
into the physics simulation.
The function takes a dictionary of position and velocity ranges for each axis and rotation:
* :attr:`pose_range` - a dictionary of pose ranges for each axis. The keys of the dictionary are ``roll``,
``pitch``, and ``yaw``. The position is sampled from the flat patches of the terrain.
* :attr:`velocity_range` - a dictionary of velocity ranges for each axis and rotation. The keys of the dictionary
are ``x``, ``y``, ``z``, ``roll``, ``pitch``, and ``yaw``.
The values are tuples of the form ``(min, max)``. If the dictionary does not contain a particular key,
the position is set to zero for that axis.
Note:
The function expects the terrain to have valid flat patches under the key "init_pos". The flat patches
are used to sample the random pose for the robot.
Raises:
ValueError: If the terrain does not have valid flat patches under the key "init_pos".
"""
# access the used quantities (to enable type-hinting)
asset: RigidObject | Articulation = env.scene[asset_cfg.name]
terrain: TerrainImporter = env.scene.terrain
# obtain all flat patches corresponding to the valid poses
valid_positions: torch.Tensor = terrain.flat_patches.get("init_pos")
if valid_positions is None:
raise ValueError(
"The event term 'reset_root_state_from_terrain' requires valid flat patches under 'init_pos'."
f" Found: {list(terrain.flat_patches.keys())}"
)
# sample random valid poses
ids = torch.randint(0, valid_positions.shape[2], size=(len(env_ids),), device=env.device)
positions = valid_positions[terrain.terrain_levels[env_ids], terrain.terrain_types[env_ids], ids]
positions += asset.data.default_root_state[env_ids, :3]
# sample random orientations
range_list = [pose_range.get(key, (0.0, 0.0)) for key in ["roll", "pitch", "yaw"]]
ranges = torch.tensor(range_list, device=asset.device)
rand_samples = math_utils.sample_uniform(ranges[:, 0], ranges[:, 1], (len(env_ids), 3), device=asset.device)
# convert to quaternions
orientations = math_utils.quat_from_euler_xyz(rand_samples[:, 0], rand_samples[:, 1], rand_samples[:, 2])
# sample random velocities
range_list = [velocity_range.get(key, (0.0, 0.0)) for key in ["x", "y", "z", "roll", "pitch", "yaw"]]
ranges = torch.tensor(range_list, device=asset.device)
rand_samples = math_utils.sample_uniform(ranges[:, 0], ranges[:, 1], (len(env_ids), 6), device=asset.device)
velocities = asset.data.default_root_state[:, 7:13] + rand_samples
# set into the physics simulation
asset.write_root_pose_to_sim(torch.cat([positions, orientations], dim=-1), env_ids=env_ids)
asset.write_root_velocity_to_sim(velocities, env_ids=env_ids)
def reset_joints_by_scale(
env: BaseEnv,
env_ids: torch.Tensor,
position_range: tuple[float, float],
velocity_range: tuple[float, float],
asset_cfg: SceneEntityCfg = SceneEntityCfg("robot"),
):
"""Reset the robot joints by scaling the default position and velocity by the given ranges.
This function samples random values from the given ranges and scales the default joint positions and velocities
by these values. The scaled values are then set into the physics simulation.
"""
# extract the used quantities (to enable type-hinting)
asset: Articulation = env.scene[asset_cfg.name]
# get default joint state
joint_pos = asset.data.default_joint_pos[env_ids].clone()
joint_vel = asset.data.default_joint_vel[env_ids].clone()
# scale these values randomly
joint_pos *= math_utils.sample_uniform(*position_range, joint_pos.shape, joint_pos.device)
joint_vel *= math_utils.sample_uniform(*velocity_range, joint_vel.shape, joint_vel.device)
# clamp joint pos to limits
joint_pos_limits = asset.data.soft_joint_pos_limits[env_ids]
joint_pos = joint_pos.clamp_(joint_pos_limits[..., 0], joint_pos_limits[..., 1])
# clamp joint vel to limits
joint_vel_limits = asset.data.soft_joint_vel_limits[env_ids]
joint_vel = joint_vel.clamp_(-joint_vel_limits, joint_vel_limits)
# set into the physics simulation
asset.write_joint_state_to_sim(joint_pos, joint_vel, env_ids=env_ids)
def reset_joints_by_offset(
env: BaseEnv,
env_ids: torch.Tensor,
position_range: tuple[float, float],
velocity_range: tuple[float, float],
asset_cfg: SceneEntityCfg = SceneEntityCfg("robot"),
):
"""Reset the robot joints with offsets around the default position and velocity by the given ranges.
This function samples random values from the given ranges and biases the default joint positions and velocities
by these values. The biased values are then set into the physics simulation.
"""
# extract the used quantities (to enable type-hinting)
asset: Articulation = env.scene[asset_cfg.name]
# get default joint state
joint_pos = asset.data.default_joint_pos[env_ids].clone()
joint_vel = asset.data.default_joint_vel[env_ids].clone()
# bias these values randomly
joint_pos += math_utils.sample_uniform(*position_range, joint_pos.shape, joint_pos.device)
joint_vel += math_utils.sample_uniform(*velocity_range, joint_vel.shape, joint_vel.device)
# clamp joint pos to limits
joint_pos_limits = asset.data.soft_joint_pos_limits[env_ids]
joint_pos = joint_pos.clamp_(joint_pos_limits[..., 0], joint_pos_limits[..., 1])
# clamp joint vel to limits
joint_vel_limits = asset.data.soft_joint_vel_limits[env_ids]
joint_vel = joint_vel.clamp_(-joint_vel_limits, joint_vel_limits)
# set into the physics simulation
asset.write_joint_state_to_sim(joint_pos, joint_vel, env_ids=env_ids)
def reset_scene_to_default(env: BaseEnv, env_ids: torch.Tensor):
"""Reset the scene to the default state specified in the scene configuration."""
# rigid bodies
for rigid_object in env.scene.rigid_objects.values():
# obtain default and deal with the offset for env origins
default_root_state = rigid_object.data.default_root_state[env_ids].clone()
default_root_state[:, 0:3] += env.scene.env_origins[env_ids]
# set into the physics simulation
rigid_object.write_root_state_to_sim(default_root_state, env_ids=env_ids)
# articulations
for articulation_asset in env.scene.articulations.values():
# obtain default and deal with the offset for env origins
default_root_state = articulation_asset.data.default_root_state[env_ids].clone()
default_root_state[:, 0:3] += env.scene.env_origins[env_ids]
# set into the physics simulation
articulation_asset.write_root_state_to_sim(default_root_state, env_ids=env_ids)
# obtain default joint positions
default_joint_pos = articulation_asset.data.default_joint_pos[env_ids].clone()
default_joint_vel = articulation_asset.data.default_joint_vel[env_ids].clone()
# set into the physics simulation
articulation_asset.write_joint_state_to_sim(default_joint_pos, default_joint_vel, env_ids=env_ids)
"""
Internal helper functions.
"""
def _randomize_prop_by_op(
data: torch.Tensor,
sample_range: tuple[float, float],
dim_0_ids: torch.Tensor | None,
dim_1_ids: torch.Tensor | slice,
operation: Literal["add", "scale", "abs"],
distribution: Literal["uniform", "log_uniform"],
) -> torch.Tensor:
"""Perform data randomization based on the given operation and distribution.
Args:
data: The data tensor to be randomized. Shape is (dim_0, dim_1).
sample_range: The range to sample the random values from.
dim_0_ids: The indices of the first dimension to randomize.
dim_1_ids: The indices of the second dimension to randomize.
operation: The operation to perform on the data. Options: 'add', 'scale', 'abs'.
distribution: The distribution to sample the random values from. Options: 'uniform', 'log_uniform'.
Returns:
The data tensor after randomization. Shape is (dim_0, dim_1).
Raises:
NotImplementedError: If the operation or distribution is not supported.
"""
# resolve shape
# -- dim 0
if dim_0_ids is None:
n_dim_0 = data.shape[0]
dim_0_ids = slice(None)
else:
n_dim_0 = len(dim_0_ids)
# -- dim 1
if isinstance(dim_1_ids, slice):
n_dim_1 = data.shape[1]
else:
n_dim_1 = len(dim_1_ids)
# resolve the distribution
if distribution == "uniform":
dist_fn = math_utils.sample_uniform
elif distribution == "log_uniform":
dist_fn = math_utils.sample_log_uniform
else:
raise NotImplementedError(
f"Unknown distribution: '{distribution}' for joint properties randomization."
" Please use 'uniform' or 'log_uniform'."
)
# perform the operation
if operation == "add":
data[dim_0_ids, dim_1_ids] += dist_fn(*sample_range, (n_dim_0, n_dim_1), device=data.device)
elif operation == "scale":
data[dim_0_ids, dim_1_ids] *= dist_fn(*sample_range, (n_dim_0, n_dim_1), device=data.device)
elif operation == "abs":
data[dim_0_ids, dim_1_ids] = dist_fn(*sample_range, (n_dim_0, n_dim_1), device=data.device)
else:
raise NotImplementedError(
f"Unknown operation: '{operation}' for property randomization. Please use 'add', 'scale', or 'abs'."
)
return data
| 32,101 | Python | 46.418021 | 123 | 0.688172 |
fnuabhimanyu8713/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/envs/mdp/terminations.py | # Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""Common functions that can be used to activate certain terminations.
The functions can be passed to the :class:`omni.isaac.orbit.managers.TerminationTermCfg` object to enable
the termination introduced by the function.
"""
from __future__ import annotations
import torch
from typing import TYPE_CHECKING
from omni.isaac.orbit.assets import Articulation, RigidObject
from omni.isaac.orbit.managers import SceneEntityCfg
from omni.isaac.orbit.sensors import ContactSensor
if TYPE_CHECKING:
from omni.isaac.orbit.envs import RLTaskEnv
from omni.isaac.orbit.managers.command_manager import CommandTerm
"""
MDP terminations.
"""
def time_out(env: RLTaskEnv) -> torch.Tensor:
"""Terminate the episode when the episode length exceeds the maximum episode length."""
return env.episode_length_buf >= env.max_episode_length
def command_resample(env: RLTaskEnv, command_name: str, num_resamples: int = 1) -> torch.Tensor:
"""Terminate the episode based on the total number of times commands have been re-sampled.
This makes the maximum episode length fluid in nature as it depends on how the commands are
sampled. It is useful in situations where delayed rewards are used :cite:`rudin2022advanced`.
"""
command: CommandTerm = env.command_manager.get_term(command_name)
return torch.logical_and((command.time_left <= env.step_dt), (command.command_counter == num_resamples))
"""
Root terminations.
"""
def bad_orientation(
env: RLTaskEnv, limit_angle: float, asset_cfg: SceneEntityCfg = SceneEntityCfg("robot")
) -> torch.Tensor:
"""Terminate when the asset's orientation is too far from the desired orientation limits.
This is computed by checking the angle between the projected gravity vector and the z-axis.
"""
# extract the used quantities (to enable type-hinting)
asset: RigidObject = env.scene[asset_cfg.name]
return torch.acos(-asset.data.projected_gravity_b[:, 2]).abs() > limit_angle
def root_height_below_minimum(
env: RLTaskEnv, minimum_height: float, asset_cfg: SceneEntityCfg = SceneEntityCfg("robot")
) -> torch.Tensor:
"""Terminate when the asset's root height is below the minimum height.
Note:
This is currently only supported for flat terrains, i.e. the minimum height is in the world frame.
"""
# extract the used quantities (to enable type-hinting)
asset: RigidObject = env.scene[asset_cfg.name]
return asset.data.root_pos_w[:, 2] < minimum_height
"""
Joint terminations.
"""
def joint_pos_out_of_limit(env: RLTaskEnv, asset_cfg: SceneEntityCfg = SceneEntityCfg("robot")) -> torch.Tensor:
"""Terminate when the asset's joint positions are outside of the soft joint limits."""
# extract the used quantities (to enable type-hinting)
asset: Articulation = env.scene[asset_cfg.name]
# compute any violations
out_of_upper_limits = torch.any(asset.data.joint_pos > asset.data.soft_joint_pos_limits[..., 1], dim=1)
out_of_lower_limits = torch.any(asset.data.joint_pos < asset.data.soft_joint_pos_limits[..., 0], dim=1)
return torch.logical_or(out_of_upper_limits[:, asset_cfg.joint_ids], out_of_lower_limits[:, asset_cfg.joint_ids])
def joint_pos_out_of_manual_limit(
env: RLTaskEnv, bounds: tuple[float, float], asset_cfg: SceneEntityCfg = SceneEntityCfg("robot")
) -> torch.Tensor:
"""Terminate when the asset's joint positions are outside of the configured bounds.
Note:
This function is similar to :func:`joint_pos_out_of_limit` but allows the user to specify the bounds manually.
"""
# extract the used quantities (to enable type-hinting)
asset: Articulation = env.scene[asset_cfg.name]
if asset_cfg.joint_ids is None:
asset_cfg.joint_ids = slice(None)
# compute any violations
out_of_upper_limits = torch.any(asset.data.joint_pos[:, asset_cfg.joint_ids] > bounds[1], dim=1)
out_of_lower_limits = torch.any(asset.data.joint_pos[:, asset_cfg.joint_ids] < bounds[0], dim=1)
return torch.logical_or(out_of_upper_limits, out_of_lower_limits)
def joint_vel_out_of_limit(env: RLTaskEnv, asset_cfg: SceneEntityCfg = SceneEntityCfg("robot")) -> torch.Tensor:
"""Terminate when the asset's joint velocities are outside of the soft joint limits."""
# extract the used quantities (to enable type-hinting)
asset: Articulation = env.scene[asset_cfg.name]
# compute any violations
limits = asset.data.soft_joint_vel_limits
return torch.any(torch.abs(asset.data.joint_vel[:, asset_cfg.joint_ids]) > limits[:, asset_cfg.joint_ids], dim=1)
def joint_vel_out_of_manual_limit(
env: RLTaskEnv, max_velocity: float, asset_cfg: SceneEntityCfg = SceneEntityCfg("robot")
) -> torch.Tensor:
"""Terminate when the asset's joint velocities are outside the provided limits."""
# extract the used quantities (to enable type-hinting)
asset: Articulation = env.scene[asset_cfg.name]
# compute any violations
return torch.any(torch.abs(asset.data.joint_vel[:, asset_cfg.joint_ids]) > max_velocity, dim=1)
def joint_effort_out_of_limit(env: RLTaskEnv, asset_cfg: SceneEntityCfg = SceneEntityCfg("robot")) -> torch.Tensor:
"""Terminate when effort applied on the asset's joints are outside of the soft joint limits.
In the actuators, the applied torque are the efforts applied on the joints. These are computed by clipping
the computed torques to the joint limits. Hence, we check if the computed torques are equal to the applied
torques.
"""
# extract the used quantities (to enable type-hinting)
asset: Articulation = env.scene[asset_cfg.name]
# check if any joint effort is out of limit
out_of_limits = torch.isclose(
asset.data.computed_torque[:, asset_cfg.joint_ids], asset.data.applied_torque[:, asset_cfg.joint_ids]
)
return torch.any(out_of_limits, dim=1)
"""
Contact sensor.
"""
def illegal_contact(env: RLTaskEnv, threshold: float, sensor_cfg: SceneEntityCfg) -> torch.Tensor:
"""Terminate when the contact force on the sensor exceeds the force threshold."""
# extract the used quantities (to enable type-hinting)
contact_sensor: ContactSensor = env.scene.sensors[sensor_cfg.name]
net_contact_forces = contact_sensor.data.net_forces_w_history
# check if any contact force exceeds the threshold
return torch.any(
torch.max(torch.norm(net_contact_forces[:, :, sensor_cfg.body_ids], dim=-1), dim=1)[0] > threshold, dim=1
)
| 6,594 | Python | 41.006369 | 118 | 0.7202 |
fnuabhimanyu8713/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/envs/mdp/observations.py | # Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""Common functions that can be used to create observation terms.
The functions can be passed to the :class:`omni.isaac.orbit.managers.ObservationTermCfg` object to enable
the observation introduced by the function.
"""
from __future__ import annotations
import torch
from typing import TYPE_CHECKING
import omni.isaac.orbit.utils.math as math_utils
from omni.isaac.orbit.assets import Articulation, RigidObject
from omni.isaac.orbit.managers import SceneEntityCfg
from omni.isaac.orbit.sensors import RayCaster
if TYPE_CHECKING:
from omni.isaac.orbit.envs import BaseEnv, RLTaskEnv
"""
Root state.
"""
def base_pos_z(env: BaseEnv, asset_cfg: SceneEntityCfg = SceneEntityCfg("robot")) -> torch.Tensor:
"""Root height in the simulation world frame."""
# extract the used quantities (to enable type-hinting)
asset: Articulation = env.scene[asset_cfg.name]
return asset.data.root_pos_w[:, 2].unsqueeze(-1)
def base_lin_vel(env: BaseEnv, asset_cfg: SceneEntityCfg = SceneEntityCfg("robot")) -> torch.Tensor:
"""Root linear velocity in the asset's root frame."""
# extract the used quantities (to enable type-hinting)
asset: RigidObject = env.scene[asset_cfg.name]
return asset.data.root_lin_vel_b
def base_ang_vel(env: BaseEnv, asset_cfg: SceneEntityCfg = SceneEntityCfg("robot")) -> torch.Tensor:
"""Root angular velocity in the asset's root frame."""
# extract the used quantities (to enable type-hinting)
asset: RigidObject = env.scene[asset_cfg.name]
return asset.data.root_ang_vel_b
def projected_gravity(env: BaseEnv, asset_cfg: SceneEntityCfg = SceneEntityCfg("robot")) -> torch.Tensor:
"""Gravity projection on the asset's root frame."""
# extract the used quantities (to enable type-hinting)
asset: RigidObject = env.scene[asset_cfg.name]
return asset.data.projected_gravity_b
def root_pos_w(env: BaseEnv, asset_cfg: SceneEntityCfg = SceneEntityCfg("robot")) -> torch.Tensor:
"""Asset root position in the environment frame."""
# extract the used quantities (to enable type-hinting)
asset: RigidObject = env.scene[asset_cfg.name]
return asset.data.root_pos_w - env.scene.env_origins
def root_quat_w(
env: BaseEnv, make_quat_unique: bool = False, asset_cfg: SceneEntityCfg = SceneEntityCfg("robot")
) -> torch.Tensor:
"""Asset root orientation (w, x, y, z) in the environment frame.
If :attr:`make_quat_unique` is True, then returned quaternion is made unique by ensuring
the quaternion has non-negative real component. This is because both ``q`` and ``-q`` represent
the same orientation.
"""
# extract the used quantities (to enable type-hinting)
asset: RigidObject = env.scene[asset_cfg.name]
quat = asset.data.root_quat_w
# make the quaternion real-part positive if configured
return math_utils.quat_unique(quat) if make_quat_unique else quat
def root_lin_vel_w(env: BaseEnv, asset_cfg: SceneEntityCfg = SceneEntityCfg("robot")) -> torch.Tensor:
"""Asset root linear velocity in the environment frame."""
# extract the used quantities (to enable type-hinting)
asset: RigidObject = env.scene[asset_cfg.name]
return asset.data.root_lin_vel_w
def root_ang_vel_w(env: BaseEnv, asset_cfg: SceneEntityCfg = SceneEntityCfg("robot")) -> torch.Tensor:
"""Asset root angular velocity in the environment frame."""
# extract the used quantities (to enable type-hinting)
asset: RigidObject = env.scene[asset_cfg.name]
return asset.data.root_ang_vel_w
"""
Joint state.
"""
def joint_pos(env: BaseEnv, asset_cfg: SceneEntityCfg = SceneEntityCfg("robot")) -> torch.Tensor:
"""The joint positions of the asset.
Note: Only the joints configured in :attr:`asset_cfg.joint_ids` will have their positions returned.
"""
# extract the used quantities (to enable type-hinting)
asset: Articulation = env.scene[asset_cfg.name]
return asset.data.joint_pos[:, asset_cfg.joint_ids]
def joint_pos_rel(env: BaseEnv, asset_cfg: SceneEntityCfg = SceneEntityCfg("robot")) -> torch.Tensor:
"""The joint positions of the asset w.r.t. the default joint positions.
Note: Only the joints configured in :attr:`asset_cfg.joint_ids` will have their positions returned.
"""
# extract the used quantities (to enable type-hinting)
asset: Articulation = env.scene[asset_cfg.name]
return asset.data.joint_pos[:, asset_cfg.joint_ids] - asset.data.default_joint_pos[:, asset_cfg.joint_ids]
def joint_pos_limit_normalized(env: BaseEnv, asset_cfg: SceneEntityCfg = SceneEntityCfg("robot")) -> torch.Tensor:
"""The joint positions of the asset normalized with the asset's joint limits.
Note: Only the joints configured in :attr:`asset_cfg.joint_ids` will have their normalized positions returned.
"""
# extract the used quantities (to enable type-hinting)
asset: Articulation = env.scene[asset_cfg.name]
return math_utils.scale_transform(
asset.data.joint_pos[:, asset_cfg.joint_ids],
asset.data.soft_joint_pos_limits[:, asset_cfg.joint_ids, 0],
asset.data.soft_joint_pos_limits[:, asset_cfg.joint_ids, 1],
)
def joint_vel(env: BaseEnv, asset_cfg: SceneEntityCfg = SceneEntityCfg("robot")):
"""The joint velocities of the asset.
Note: Only the joints configured in :attr:`asset_cfg.joint_ids` will have their velocities returned.
"""
# extract the used quantities (to enable type-hinting)
asset: Articulation = env.scene[asset_cfg.name]
return asset.data.joint_vel[:, asset_cfg.joint_ids]
def joint_vel_rel(env: BaseEnv, asset_cfg: SceneEntityCfg = SceneEntityCfg("robot")):
"""The joint velocities of the asset w.r.t. the default joint velocities.
Note: Only the joints configured in :attr:`asset_cfg.joint_ids` will have their velocities returned.
"""
# extract the used quantities (to enable type-hinting)
asset: Articulation = env.scene[asset_cfg.name]
return asset.data.joint_vel[:, asset_cfg.joint_ids] - asset.data.default_joint_vel[:, asset_cfg.joint_ids]
"""
Sensors.
"""
def height_scan(env: BaseEnv, sensor_cfg: SceneEntityCfg, offset: float = 0.5) -> torch.Tensor:
"""Height scan from the given sensor w.r.t. the sensor's frame.
The provided offset (Defaults to 0.5) is subtracted from the returned values.
"""
# extract the used quantities (to enable type-hinting)
sensor: RayCaster = env.scene.sensors[sensor_cfg.name]
# height scan: height = sensor_height - hit_point_z - offset
return sensor.data.pos_w[:, 2].unsqueeze(1) - sensor.data.ray_hits_w[..., 2] - offset
def body_incoming_wrench(env: BaseEnv, asset_cfg: SceneEntityCfg) -> torch.Tensor:
"""Incoming spatial wrench on bodies of an articulation in the simulation world frame.
This is the 6-D wrench (force and torque) applied to the body link by the incoming joint force.
"""
# extract the used quantities (to enable type-hinting)
asset: Articulation = env.scene[asset_cfg.name]
# obtain the link incoming forces in world frame
link_incoming_forces = asset.root_physx_view.get_link_incoming_joint_force()[:, asset_cfg.body_ids]
return link_incoming_forces.view(env.num_envs, -1)
"""
Actions.
"""
def last_action(env: BaseEnv, action_name: str | None = None) -> torch.Tensor:
"""The last input action to the environment.
The name of the action term for which the action is required. If None, the
entire action tensor is returned.
"""
if action_name is None:
return env.action_manager.action
else:
return env.action_manager.get_term(action_name).raw_actions
"""
Commands.
"""
def generated_commands(env: RLTaskEnv, command_name: str) -> torch.Tensor:
"""The generated command from command term in the command manager with the given name."""
return env.command_manager.get_command(command_name)
| 8,019 | Python | 37.557692 | 114 | 0.712433 |
fnuabhimanyu8713/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/envs/mdp/actions/task_space_actions.py | # Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
from __future__ import annotations
import torch
from typing import TYPE_CHECKING
import carb
import omni.isaac.orbit.utils.math as math_utils
from omni.isaac.orbit.assets.articulation import Articulation
from omni.isaac.orbit.controllers.differential_ik import DifferentialIKController
from omni.isaac.orbit.managers.action_manager import ActionTerm
if TYPE_CHECKING:
from omni.isaac.orbit.envs import BaseEnv
from . import actions_cfg
class DifferentialInverseKinematicsAction(ActionTerm):
r"""Inverse Kinematics action term.
This action term performs pre-processing of the raw actions using scaling transformation.
.. math::
\text{action} = \text{scaling} \times \text{input action}
\text{joint position} = J^{-} \times \text{action}
where :math:`\text{scaling}` is the scaling applied to the input action, and :math:`\text{input action}`
is the input action from the user, :math:`J` is the Jacobian over the articulation's actuated joints,
and \text{joint position} is the desired joint position command for the articulation's joints.
"""
cfg: actions_cfg.DifferentialInverseKinematicsActionCfg
"""The configuration of the action term."""
_asset: Articulation
"""The articulation asset on which the action term is applied."""
_scale: torch.Tensor
"""The scaling factor applied to the input action. Shape is (1, action_dim)."""
def __init__(self, cfg: actions_cfg.DifferentialInverseKinematicsActionCfg, env: BaseEnv):
# initialize the action term
super().__init__(cfg, env)
# resolve the joints over which the action term is applied
self._joint_ids, self._joint_names = self._asset.find_joints(self.cfg.joint_names)
self._num_joints = len(self._joint_ids)
# parse the body index
body_ids, body_names = self._asset.find_bodies(self.cfg.body_name)
if len(body_ids) != 1:
raise ValueError(
f"Expected one match for the body name: {self.cfg.body_name}. Found {len(body_ids)}: {body_names}."
)
# save only the first body index
self._body_idx = body_ids[0]
self._body_name = body_names[0]
# check if articulation is fixed-base
# if fixed-base then the jacobian for the base is not computed
# this means that number of bodies is one less than the articulation's number of bodies
if self._asset.is_fixed_base:
self._jacobi_body_idx = self._body_idx - 1
else:
self._jacobi_body_idx = self._body_idx
# log info for debugging
carb.log_info(
f"Resolved joint names for the action term {self.__class__.__name__}:"
f" {self._joint_names} [{self._joint_ids}]"
)
carb.log_info(
f"Resolved body name for the action term {self.__class__.__name__}: {self._body_name} [{self._body_idx}]"
)
# Avoid indexing across all joints for efficiency
if self._num_joints == self._asset.num_joints:
self._joint_ids = slice(None)
# create the differential IK controller
self._ik_controller = DifferentialIKController(
cfg=self.cfg.controller, num_envs=self.num_envs, device=self.device
)
# create tensors for raw and processed actions
self._raw_actions = torch.zeros(self.num_envs, self.action_dim, device=self.device)
self._processed_actions = torch.zeros_like(self.raw_actions)
# save the scale as tensors
self._scale = torch.zeros((self.num_envs, self.action_dim), device=self.device)
self._scale[:] = torch.tensor(self.cfg.scale, device=self.device)
# convert the fixed offsets to torch tensors of batched shape
if self.cfg.body_offset is not None:
self._offset_pos = torch.tensor(self.cfg.body_offset.pos, device=self.device).repeat(self.num_envs, 1)
self._offset_rot = torch.tensor(self.cfg.body_offset.rot, device=self.device).repeat(self.num_envs, 1)
else:
self._offset_pos, self._offset_rot = None, None
"""
Properties.
"""
@property
def action_dim(self) -> int:
return self._ik_controller.action_dim
@property
def raw_actions(self) -> torch.Tensor:
return self._raw_actions
@property
def processed_actions(self) -> torch.Tensor:
return self._processed_actions
"""
Operations.
"""
def process_actions(self, actions: torch.Tensor):
# store the raw actions
self._raw_actions[:] = actions
self._processed_actions[:] = self.raw_actions * self._scale
# obtain quantities from simulation
ee_pos_curr, ee_quat_curr = self._compute_frame_pose()
# set command into controller
self._ik_controller.set_command(self._processed_actions, ee_pos_curr, ee_quat_curr)
def apply_actions(self):
# obtain quantities from simulation
ee_pos_curr, ee_quat_curr = self._compute_frame_pose()
joint_pos = self._asset.data.joint_pos[:, self._joint_ids]
# compute the delta in joint-space
if ee_quat_curr.norm() != 0:
jacobian = self._compute_frame_jacobian()
joint_pos_des = self._ik_controller.compute(ee_pos_curr, ee_quat_curr, jacobian, joint_pos)
else:
joint_pos_des = joint_pos.clone()
# set the joint position command
self._asset.set_joint_position_target(joint_pos_des, self._joint_ids)
"""
Helper functions.
"""
def _compute_frame_pose(self) -> tuple[torch.Tensor, torch.Tensor]:
"""Computes the pose of the target frame in the root frame.
Returns:
A tuple of the body's position and orientation in the root frame.
"""
# obtain quantities from simulation
ee_pose_w = self._asset.data.body_state_w[:, self._body_idx, :7]
root_pose_w = self._asset.data.root_state_w[:, :7]
# compute the pose of the body in the root frame
ee_pose_b, ee_quat_b = math_utils.subtract_frame_transforms(
root_pose_w[:, 0:3], root_pose_w[:, 3:7], ee_pose_w[:, 0:3], ee_pose_w[:, 3:7]
)
# account for the offset
if self.cfg.body_offset is not None:
ee_pose_b, ee_quat_b = math_utils.combine_frame_transforms(
ee_pose_b, ee_quat_b, self._offset_pos, self._offset_rot
)
return ee_pose_b, ee_quat_b
def _compute_frame_jacobian(self):
"""Computes the geometric Jacobian of the target frame in the root frame.
This function accounts for the target frame offset and applies the necessary transformations to obtain
the right Jacobian from the parent body Jacobian.
"""
# read the parent jacobian
jacobian = self._asset.root_physx_view.get_jacobians()[:, self._jacobi_body_idx, :, self._joint_ids]
# account for the offset
if self.cfg.body_offset is not None:
# Modify the jacobian to account for the offset
# -- translational part
# v_link = v_ee + w_ee x r_link_ee = v_J_ee * q + w_J_ee * q x r_link_ee
# = (v_J_ee + w_J_ee x r_link_ee ) * q
# = (v_J_ee - r_link_ee_[x] @ w_J_ee) * q
jacobian[:, 0:3, :] += torch.bmm(-math_utils.skew_symmetric_matrix(self._offset_pos), jacobian[:, 3:, :])
# -- rotational part
# w_link = R_link_ee @ w_ee
jacobian[:, 3:, :] = torch.bmm(math_utils.matrix_from_quat(self._offset_rot), jacobian[:, 3:, :])
return jacobian
| 7,767 | Python | 40.100529 | 117 | 0.627527 |
fnuabhimanyu8713/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/envs/mdp/actions/non_holonomic_actions.py | # Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
from __future__ import annotations
import torch
from typing import TYPE_CHECKING
import carb
from omni.isaac.orbit.assets.articulation import Articulation
from omni.isaac.orbit.managers.action_manager import ActionTerm
from omni.isaac.orbit.utils.math import euler_xyz_from_quat
if TYPE_CHECKING:
from omni.isaac.orbit.envs import BaseEnv
from . import actions_cfg
class NonHolonomicAction(ActionTerm):
r"""Non-holonomic action that maps a two dimensional action to the velocity of the robot in
the x, y and yaw directions.
This action term helps model a skid-steer robot base. The action is a 2D vector which comprises of the
forward velocity :math:`v_{B,x}` and the turning rate :\omega_{B,z}: in the base frame. Using the current
base orientation, the commands are transformed into dummy joint velocity targets as:
.. math::
\dot{q}_{0, des} &= v_{B,x} \cos(\theta) \\
\dot{q}_{1, des} &= v_{B,x} \sin(\theta) \\
\dot{q}_{2, des} &= \omega_{B,z}
where :math:`\theta` is the yaw of the 2-D base. Since the base is simulated as a dummy joint, the yaw is directly
the value of the revolute joint along z, i.e., :math:`q_2 = \theta`.
.. note::
The current implementation assumes that the base is simulated with three dummy joints (prismatic joints along x
and y, and revolute joint along z). This is because it is easier to consider the mobile base as a floating link
controlled by three dummy joints, in comparison to simulating wheels which is at times is tricky because of
friction settings.
However, the action term can be extended to support other base configurations as well.
.. tip::
For velocity control of the base with dummy mechanism, we recommend setting high damping gains to the joints.
This ensures that the base remains unperturbed from external disturbances, such as an arm mounted on the base.
"""
cfg: actions_cfg.NonHolonomicActionCfg
"""The configuration of the action term."""
_asset: Articulation
"""The articulation asset on which the action term is applied."""
_scale: torch.Tensor
"""The scaling factor applied to the input action. Shape is (1, 2)."""
_offset: torch.Tensor
"""The offset applied to the input action. Shape is (1, 2)."""
def __init__(self, cfg: actions_cfg.NonHolonomicActionCfg, env: BaseEnv):
# initialize the action term
super().__init__(cfg, env)
# parse the joint information
# -- x joint
x_joint_id, x_joint_name = self._asset.find_joints(self.cfg.x_joint_name)
if len(x_joint_id) != 1:
raise ValueError(
f"Expected a single joint match for the x joint name: {self.cfg.x_joint_name}, got {len(x_joint_id)}"
)
# -- y joint
y_joint_id, y_joint_name = self._asset.find_joints(self.cfg.y_joint_name)
if len(y_joint_id) != 1:
raise ValueError(f"Found more than one joint match for the y joint name: {self.cfg.y_joint_name}")
# -- yaw joint
yaw_joint_id, yaw_joint_name = self._asset.find_joints(self.cfg.yaw_joint_name)
if len(yaw_joint_id) != 1:
raise ValueError(f"Found more than one joint match for the yaw joint name: {self.cfg.yaw_joint_name}")
# parse the body index
self._body_idx, self._body_name = self._asset.find_bodies(self.cfg.body_name)
if len(self._body_idx) != 1:
raise ValueError(f"Found more than one body match for the body name: {self.cfg.body_name}")
# process into a list of joint ids
self._joint_ids = [x_joint_id[0], y_joint_id[0], yaw_joint_id[0]]
self._joint_names = [x_joint_name[0], y_joint_name[0], yaw_joint_name[0]]
# log info for debugging
carb.log_info(
f"Resolved joint names for the action term {self.__class__.__name__}:"
f" {self._joint_names} [{self._joint_ids}]"
)
carb.log_info(
f"Resolved body name for the action term {self.__class__.__name__}: {self._body_name} [{self._body_idx}]"
)
# create tensors for raw and processed actions
self._raw_actions = torch.zeros(self.num_envs, self.action_dim, device=self.device)
self._processed_actions = torch.zeros_like(self.raw_actions)
self._joint_vel_command = torch.zeros(self.num_envs, 3, device=self.device)
# save the scale and offset as tensors
self._scale = torch.tensor(self.cfg.scale, device=self.device).unsqueeze(0)
self._offset = torch.tensor(self.cfg.offset, device=self.device).unsqueeze(0)
"""
Properties.
"""
@property
def action_dim(self) -> int:
return 2
@property
def raw_actions(self) -> torch.Tensor:
return self._raw_actions
@property
def processed_actions(self) -> torch.Tensor:
return self._processed_actions
"""
Operations.
"""
def process_actions(self, actions):
# store the raw actions
self._raw_actions[:] = actions
self._processed_actions = self.raw_actions * self._scale + self._offset
def apply_actions(self):
# obtain current heading
quat_w = self._asset.data.body_quat_w[:, self._body_idx]
yaw_w = euler_xyz_from_quat(quat_w)[2]
# compute joint velocities targets
self._joint_vel_command[:, 0] = torch.cos(yaw_w) * self.processed_actions[:, 0] # x
self._joint_vel_command[:, 1] = torch.sin(yaw_w) * self.processed_actions[:, 0] # y
self._joint_vel_command[:, 2] = self.processed_actions[:, 1] # yaw
# set the joint velocity targets
self._asset.set_joint_velocity_target(self._joint_vel_command, joint_ids=self._joint_ids)
| 5,929 | Python | 40.760563 | 119 | 0.644291 |
fnuabhimanyu8713/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/envs/mdp/actions/joint_actions.py | # Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
from __future__ import annotations
import torch
from typing import TYPE_CHECKING
import carb
import omni.isaac.orbit.utils.string as string_utils
from omni.isaac.orbit.assets.articulation import Articulation
from omni.isaac.orbit.managers.action_manager import ActionTerm
if TYPE_CHECKING:
from omni.isaac.orbit.envs import BaseEnv
from . import actions_cfg
class JointAction(ActionTerm):
r"""Base class for joint actions.
This action term performs pre-processing of the raw actions using affine transformations (scale and offset).
These transformations can be configured to be applied to a subset of the articulation's joints.
Mathematically, the action term is defined as:
.. math::
\text{action} = \text{offset} + \text{scaling} \times \text{input action}
where :math:`\text{action}` is the action that is sent to the articulation's actuated joints, :math:`\text{offset}`
is the offset applied to the input action, :math:`\text{scaling}` is the scaling applied to the input
action, and :math:`\text{input action}` is the input action from the user.
Based on above, this kind of action transformation ensures that the input and output actions are in the same
units and dimensions. The child classes of this action term can then map the output action to a specific
desired command of the articulation's joints (e.g. position, velocity, etc.).
"""
cfg: actions_cfg.JointActionCfg
"""The configuration of the action term."""
_asset: Articulation
"""The articulation asset on which the action term is applied."""
_scale: torch.Tensor | float
"""The scaling factor applied to the input action."""
_offset: torch.Tensor | float
"""The offset applied to the input action."""
def __init__(self, cfg: actions_cfg.JointActionCfg, env: BaseEnv) -> None:
# initialize the action term
super().__init__(cfg, env)
# resolve the joints over which the action term is applied
self._joint_ids, self._joint_names = self._asset.find_joints(self.cfg.joint_names)
self._num_joints = len(self._joint_ids)
# log the resolved joint names for debugging
carb.log_info(
f"Resolved joint names for the action term {self.__class__.__name__}:"
f" {self._joint_names} [{self._joint_ids}]"
)
# Avoid indexing across all joints for efficiency
if self._num_joints == self._asset.num_joints:
self._joint_ids = slice(None)
# create tensors for raw and processed actions
self._raw_actions = torch.zeros(self.num_envs, self.action_dim, device=self.device)
self._processed_actions = torch.zeros_like(self.raw_actions)
# parse scale
if isinstance(cfg.scale, (float, int)):
self._scale = float(cfg.scale)
elif isinstance(cfg.scale, dict):
self._scale = torch.ones(self.num_envs, self.action_dim, device=self.device)
# resolve the dictionary config
index_list, _, value_list = string_utils.resolve_matching_names_values(self.cfg.scale, self._joint_names)
self._scale[:, index_list] = torch.tensor(value_list, device=self.device)
else:
raise ValueError(f"Unsupported scale type: {type(cfg.scale)}. Supported types are float and dict.")
# parse offset
if isinstance(cfg.offset, (float, int)):
self._offset = float(cfg.offset)
elif isinstance(cfg.offset, dict):
self._offset = torch.zeros_like(self._raw_actions)
# resolve the dictionary config
index_list, _, value_list = string_utils.resolve_matching_names_values(self.cfg.offset, self._joint_names)
self._offset[:, index_list] = torch.tensor(value_list, device=self.device)
else:
raise ValueError(f"Unsupported offset type: {type(cfg.offset)}. Supported types are float and dict.")
"""
Properties.
"""
@property
def action_dim(self) -> int:
return self._num_joints
@property
def raw_actions(self) -> torch.Tensor:
return self._raw_actions
@property
def processed_actions(self) -> torch.Tensor:
return self._processed_actions
"""
Operations.
"""
def process_actions(self, actions: torch.Tensor):
# store the raw actions
self._raw_actions[:] = actions
# apply the affine transformations
self._processed_actions = self._raw_actions * self._scale + self._offset
class JointPositionAction(JointAction):
"""Joint action term that applies the processed actions to the articulation's joints as position commands."""
cfg: actions_cfg.JointPositionActionCfg
"""The configuration of the action term."""
def __init__(self, cfg: actions_cfg.JointPositionActionCfg, env: BaseEnv):
# initialize the action term
super().__init__(cfg, env)
# use default joint positions as offset
if cfg.use_default_offset:
self._offset = self._asset.data.default_joint_pos[:, self._joint_ids].clone()
def apply_actions(self):
# set position targets
self._asset.set_joint_position_target(self.processed_actions, joint_ids=self._joint_ids)
class RelativeJointPositionAction(JointAction):
r"""Joint action term that applies the processed actions to the articulation's joints as relative position commands.
Unlike :class:`JointPositionAction`, this action term applies the processed actions as relative position commands.
This means that the processed actions are added to the current joint positions of the articulation's joints
before being sent as position commands.
This means that the action applied at every step is:
.. math::
\text{applied action} = \text{current joint positions} + \text{processed actions}
where :math:`\text{current joint positions}` are the current joint positions of the articulation's joints.
"""
cfg: actions_cfg.RelativeJointPositionActionCfg
"""The configuration of the action term."""
def __init__(self, cfg: actions_cfg.RelativeJointPositionActionCfg, env: BaseEnv):
# initialize the action term
super().__init__(cfg, env)
# use zero offset for relative position
if cfg.use_zero_offset:
self._offset = 0.0
def apply_actions(self):
# add current joint positions to the processed actions
current_actions = self.processed_actions + self._asset.data.joint_pos[:, self._joint_ids]
# set position targets
self._asset.set_joint_position_target(current_actions, joint_ids=self._joint_ids)
class JointVelocityAction(JointAction):
"""Joint action term that applies the processed actions to the articulation's joints as velocity commands."""
cfg: actions_cfg.JointVelocityActionCfg
"""The configuration of the action term."""
def __init__(self, cfg: actions_cfg.JointVelocityActionCfg, env: BaseEnv):
# initialize the action term
super().__init__(cfg, env)
# use default joint velocity as offset
if cfg.use_default_offset:
self._offset = self._asset.data.default_joint_vel[:, self._joint_ids].clone()
def apply_actions(self):
# set joint velocity targets
self._asset.set_joint_velocity_target(self.processed_actions, joint_ids=self._joint_ids)
class JointEffortAction(JointAction):
"""Joint action term that applies the processed actions to the articulation's joints as effort commands."""
cfg: actions_cfg.JointEffortActionCfg
"""The configuration of the action term."""
def __init__(self, cfg: actions_cfg.JointEffortActionCfg, env: BaseEnv):
super().__init__(cfg, env)
def apply_actions(self):
# set joint effort targets
self._asset.set_joint_effort_target(self.processed_actions, joint_ids=self._joint_ids)
| 8,070 | Python | 38.75862 | 120 | 0.67658 |
fnuabhimanyu8713/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/envs/mdp/actions/__init__.py | # Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""Various action terms that can be used in the environment."""
from .actions_cfg import *
from .binary_joint_actions import *
from .joint_actions import *
from .joint_actions_to_limits import *
from .non_holonomic_actions import *
| 356 | Python | 26.461536 | 63 | 0.75 |
fnuabhimanyu8713/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/envs/mdp/actions/actions_cfg.py | # Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
from dataclasses import MISSING
from omni.isaac.orbit.controllers import DifferentialIKControllerCfg
from omni.isaac.orbit.managers.action_manager import ActionTerm, ActionTermCfg
from omni.isaac.orbit.utils import configclass
from . import binary_joint_actions, joint_actions, joint_actions_to_limits, non_holonomic_actions, task_space_actions
##
# Joint actions.
##
@configclass
class JointActionCfg(ActionTermCfg):
"""Configuration for the base joint action term.
See :class:`JointAction` for more details.
"""
joint_names: list[str] = MISSING
"""List of joint names or regex expressions that the action will be mapped to."""
scale: float | dict[str, float] = 1.0
"""Scale factor for the action (float or dict of regex expressions). Defaults to 1.0."""
offset: float | dict[str, float] = 0.0
"""Offset factor for the action (float or dict of regex expressions). Defaults to 0.0."""
@configclass
class JointPositionActionCfg(JointActionCfg):
"""Configuration for the joint position action term.
See :class:`JointPositionAction` for more details.
"""
class_type: type[ActionTerm] = joint_actions.JointPositionAction
use_default_offset: bool = True
"""Whether to use default joint positions configured in the articulation asset as offset.
Defaults to True.
If True, this flag results in overwriting the values of :attr:`offset` to the default joint positions
from the articulation asset.
"""
@configclass
class RelativeJointPositionActionCfg(JointActionCfg):
"""Configuration for the relative joint position action term.
See :class:`RelativeJointPositionAction` for more details.
"""
class_type: type[ActionTerm] = joint_actions.RelativeJointPositionAction
use_zero_offset: bool = True
"""Whether to ignore the offset defined in articulation asset. Defaults to True.
If True, this flag results in overwriting the values of :attr:`offset` to zero.
"""
@configclass
class JointVelocityActionCfg(JointActionCfg):
"""Configuration for the joint velocity action term.
See :class:`JointVelocityAction` for more details.
"""
class_type: type[ActionTerm] = joint_actions.JointVelocityAction
use_default_offset: bool = True
"""Whether to use default joint velocities configured in the articulation asset as offset.
Defaults to True.
This overrides the settings from :attr:`offset` if set to True.
"""
@configclass
class JointEffortActionCfg(JointActionCfg):
"""Configuration for the joint effort action term.
See :class:`JointEffortAction` for more details.
"""
class_type: type[ActionTerm] = joint_actions.JointEffortAction
##
# Joint actions rescaled to limits.
##
@configclass
class JointPositionToLimitsActionCfg(ActionTermCfg):
"""Configuration for the bounded joint position action term.
See :class:`JointPositionWithinLimitsAction` for more details.
"""
class_type: type[ActionTerm] = joint_actions_to_limits.JointPositionToLimitsAction
joint_names: list[str] = MISSING
"""List of joint names or regex expressions that the action will be mapped to."""
scale: float | dict[str, float] = 1.0
"""Scale factor for the action (float or dict of regex expressions). Defaults to 1.0."""
rescale_to_limits: bool = True
"""Whether to rescale the action to the joint limits. Defaults to True.
If True, the input actions are rescaled to the joint limits, i.e., the action value in
the range [-1, 1] corresponds to the joint lower and upper limits respectively.
Note:
This operation is performed after applying the scale factor.
"""
@configclass
class EMAJointPositionToLimitsActionCfg(JointPositionToLimitsActionCfg):
"""Configuration for the exponential moving average (EMA) joint position action term.
See :class:`EMAJointPositionToLimitsAction` for more details.
"""
class_type: type[ActionTerm] = joint_actions_to_limits.EMAJointPositionToLimitsAction
alpha: float | dict[str, float] = 1.0
"""The weight for the moving average (float or dict of regex expressions). Defaults to 1.0.
If set to 1.0, the processed action is applied directly without any moving average window.
"""
##
# Gripper actions.
##
@configclass
class BinaryJointActionCfg(ActionTermCfg):
"""Configuration for the base binary joint action term.
See :class:`BinaryJointAction` for more details.
"""
joint_names: list[str] = MISSING
"""List of joint names or regex expressions that the action will be mapped to."""
open_command_expr: dict[str, float] = MISSING
"""The joint command to move to *open* configuration."""
close_command_expr: dict[str, float] = MISSING
"""The joint command to move to *close* configuration."""
@configclass
class BinaryJointPositionActionCfg(BinaryJointActionCfg):
"""Configuration for the binary joint position action term.
See :class:`BinaryJointPositionAction` for more details.
"""
class_type: type[ActionTerm] = binary_joint_actions.BinaryJointPositionAction
@configclass
class BinaryJointVelocityActionCfg(BinaryJointActionCfg):
"""Configuration for the binary joint velocity action term.
See :class:`BinaryJointVelocityAction` for more details.
"""
class_type: type[ActionTerm] = binary_joint_actions.BinaryJointVelocityAction
##
# Non-holonomic actions.
##
@configclass
class NonHolonomicActionCfg(ActionTermCfg):
"""Configuration for the non-holonomic action term with dummy joints at the base.
See :class:`NonHolonomicAction` for more details.
"""
class_type: type[ActionTerm] = non_holonomic_actions.NonHolonomicAction
body_name: str = MISSING
"""Name of the body which has the dummy mechanism connected to."""
x_joint_name: str = MISSING
"""The dummy joint name in the x direction."""
y_joint_name: str = MISSING
"""The dummy joint name in the y direction."""
yaw_joint_name: str = MISSING
"""The dummy joint name in the yaw direction."""
scale: tuple[float, float] = (1.0, 1.0)
"""Scale factor for the action. Defaults to (1.0, 1.0)."""
offset: tuple[float, float] = (0.0, 0.0)
"""Offset factor for the action. Defaults to (0.0, 0.0)."""
##
# Task-space Actions.
##
@configclass
class DifferentialInverseKinematicsActionCfg(ActionTermCfg):
"""Configuration for inverse differential kinematics action term.
See :class:`DifferentialInverseKinematicsAction` for more details.
"""
@configclass
class OffsetCfg:
"""The offset pose from parent frame to child frame.
On many robots, end-effector frames are fictitious frames that do not have a corresponding
rigid body. In such cases, it is easier to define this transform w.r.t. their parent rigid body.
For instance, for the Franka Emika arm, the end-effector is defined at an offset to the the
"panda_hand" frame.
"""
pos: tuple[float, float, float] = (0.0, 0.0, 0.0)
"""Translation w.r.t. the parent frame. Defaults to (0.0, 0.0, 0.0)."""
rot: tuple[float, float, float, float] = (1.0, 0.0, 0.0, 0.0)
"""Quaternion rotation ``(w, x, y, z)`` w.r.t. the parent frame. Defaults to (1.0, 0.0, 0.0, 0.0)."""
class_type: type[ActionTerm] = task_space_actions.DifferentialInverseKinematicsAction
joint_names: list[str] = MISSING
"""List of joint names or regex expressions that the action will be mapped to."""
body_name: str = MISSING
"""Name of the body or frame for which IK is performed."""
body_offset: OffsetCfg | None = None
"""Offset of target frame w.r.t. to the body frame. Defaults to None, in which case no offset is applied."""
scale: float | tuple[float, ...] = 1.0
"""Scale factor for the action. Defaults to 1.0."""
controller: DifferentialIKControllerCfg = MISSING
"""The configuration for the differential IK controller."""
| 8,113 | Python | 31.586345 | 117 | 0.709355 |
fnuabhimanyu8713/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/envs/mdp/actions/binary_joint_actions.py | # Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
from __future__ import annotations
import torch
from typing import TYPE_CHECKING
import carb
import omni.isaac.orbit.utils.string as string_utils
from omni.isaac.orbit.assets.articulation import Articulation
from omni.isaac.orbit.managers.action_manager import ActionTerm
if TYPE_CHECKING:
from omni.isaac.orbit.envs import BaseEnv
from . import actions_cfg
class BinaryJointAction(ActionTerm):
"""Base class for binary joint actions.
This action term maps a binary action to the *open* or *close* joint configurations. These configurations are
specified through the :class:`BinaryJointActionCfg` object. If the input action is a float vector, the action
is considered binary based on the sign of the action values.
Based on above, we follow the following convention for the binary action:
1. Open action: 1 (bool) or positive values (float).
2. Close action: 0 (bool) or negative values (float).
The action term can mostly be used for gripper actions, where the gripper is either open or closed. This
helps in devising a mimicking mechanism for the gripper, since in simulation it is often not possible to
add such constraints to the gripper.
"""
cfg: actions_cfg.BinaryJointActionCfg
"""The configuration of the action term."""
_asset: Articulation
"""The articulation asset on which the action term is applied."""
def __init__(self, cfg: actions_cfg.BinaryJointActionCfg, env: BaseEnv) -> None:
# initialize the action term
super().__init__(cfg, env)
# resolve the joints over which the action term is applied
self._joint_ids, self._joint_names = self._asset.find_joints(self.cfg.joint_names)
self._num_joints = len(self._joint_ids)
# log the resolved joint names for debugging
carb.log_info(
f"Resolved joint names for the action term {self.__class__.__name__}:"
f" {self._joint_names} [{self._joint_ids}]"
)
# create tensors for raw and processed actions
self._raw_actions = torch.zeros(self.num_envs, 1, device=self.device)
self._processed_actions = torch.zeros(self.num_envs, self._num_joints, device=self.device)
# parse open command
self._open_command = torch.zeros(self._num_joints, device=self.device)
index_list, name_list, value_list = string_utils.resolve_matching_names_values(
self.cfg.open_command_expr, self._joint_names
)
if len(index_list) != self._num_joints:
raise ValueError(
f"Could not resolve all joints for the action term. Missing: {set(self._joint_names) - set(name_list)}"
)
self._open_command[index_list] = torch.tensor(value_list, device=self.device)
# parse close command
self._close_command = torch.zeros_like(self._open_command)
index_list, name_list, value_list = string_utils.resolve_matching_names_values(
self.cfg.close_command_expr, self._joint_names
)
if len(index_list) != self._num_joints:
raise ValueError(
f"Could not resolve all joints for the action term. Missing: {set(self._joint_names) - set(name_list)}"
)
self._close_command[index_list] = torch.tensor(value_list, device=self.device)
"""
Properties.
"""
@property
def action_dim(self) -> int:
return 1
@property
def raw_actions(self) -> torch.Tensor:
return self._raw_actions
@property
def processed_actions(self) -> torch.Tensor:
return self._processed_actions
"""
Operations.
"""
def process_actions(self, actions: torch.Tensor):
# store the raw actions
self._raw_actions[:] = actions
# compute the binary mask
if actions.dtype == torch.bool:
# true: close, false: open
binary_mask = actions == 0
else:
# true: close, false: open
binary_mask = actions < 0
# compute the command
self._processed_actions = torch.where(binary_mask, self._close_command, self._open_command)
class BinaryJointPositionAction(BinaryJointAction):
"""Binary joint action that sets the binary action into joint position targets."""
cfg: actions_cfg.BinaryJointPositionActionCfg
"""The configuration of the action term."""
def apply_actions(self):
self._asset.set_joint_position_target(self._processed_actions, joint_ids=self._joint_ids)
class BinaryJointVelocityAction(BinaryJointAction):
"""Binary joint action that sets the binary action into joint velocity targets."""
cfg: actions_cfg.BinaryJointVelocityActionCfg
"""The configuration of the action term."""
def apply_actions(self):
self._asset.set_joint_velocity_target(self._processed_actions, joint_ids=self._joint_ids)
| 5,021 | Python | 35.656934 | 119 | 0.667994 |
fnuabhimanyu8713/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/envs/mdp/actions/joint_actions_to_limits.py | # Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
from __future__ import annotations
import torch
from collections.abc import Sequence
from typing import TYPE_CHECKING
import carb
import omni.isaac.orbit.utils.math as math_utils
import omni.isaac.orbit.utils.string as string_utils
from omni.isaac.orbit.assets.articulation import Articulation
from omni.isaac.orbit.managers.action_manager import ActionTerm
if TYPE_CHECKING:
from omni.isaac.orbit.envs import BaseEnv
from . import actions_cfg
class JointPositionToLimitsAction(ActionTerm):
"""Joint position action term that scales the input actions to the joint limits and applies them to the
articulation's joints.
This class is similar to the :class:`JointPositionAction` class. However, it performs additional
re-scaling of input actions to the actuator joint position limits.
While processing the actions, it performs the following operations:
1. Apply scaling to the raw actions based on :attr:`actions_cfg.JointPositionToLimitsActionCfg.scale`.
2. Clip the scaled actions to the range [-1, 1] and re-scale them to the joint limits if
:attr:`actions_cfg.JointPositionToLimitsActionCfg.rescale_to_limits` is set to True.
The processed actions are then sent as position commands to the articulation's joints.
"""
cfg: actions_cfg.JointPositionToLimitsActionCfg
"""The configuration of the action term."""
_asset: Articulation
"""The articulation asset on which the action term is applied."""
_scale: torch.Tensor | float
"""The scaling factor applied to the input action."""
def __init__(self, cfg: actions_cfg.JointPositionToLimitsActionCfg, env: BaseEnv):
# initialize the action term
super().__init__(cfg, env)
# resolve the joints over which the action term is applied
self._joint_ids, self._joint_names = self._asset.find_joints(self.cfg.joint_names)
self._num_joints = len(self._joint_ids)
# log the resolved joint names for debugging
carb.log_info(
f"Resolved joint names for the action term {self.__class__.__name__}:"
f" {self._joint_names} [{self._joint_ids}]"
)
# Avoid indexing across all joints for efficiency
if self._num_joints == self._asset.num_joints:
self._joint_ids = slice(None)
# create tensors for raw and processed actions
self._raw_actions = torch.zeros(self.num_envs, self.action_dim, device=self.device)
self._processed_actions = torch.zeros_like(self.raw_actions)
# parse scale
if isinstance(cfg.scale, (float, int)):
self._scale = float(cfg.scale)
elif isinstance(cfg.scale, dict):
self._scale = torch.ones(self.num_envs, self.action_dim, device=self.device)
# resolve the dictionary config
index_list, _, value_list = string_utils.resolve_matching_names_values(self.cfg.scale, self._joint_names)
self._scale[:, index_list] = torch.tensor(value_list, device=self.device)
else:
raise ValueError(f"Unsupported scale type: {type(cfg.scale)}. Supported types are float and dict.")
"""
Properties.
"""
@property
def action_dim(self) -> int:
return self._num_joints
@property
def raw_actions(self) -> torch.Tensor:
return self._raw_actions
@property
def processed_actions(self) -> torch.Tensor:
return self._processed_actions
"""
Operations.
"""
def process_actions(self, actions: torch.Tensor):
# store the raw actions
self._raw_actions[:] = actions
# apply affine transformations
self._processed_actions = self._raw_actions * self._scale
# rescale the position targets if configured
# this is useful when the input actions are in the range [-1, 1]
if self.cfg.rescale_to_limits:
# clip to [-1, 1]
actions = self._processed_actions.clamp(-1.0, 1.0)
# rescale within the joint limits
actions = math_utils.unscale_transform(
actions,
self._asset.data.soft_joint_pos_limits[:, self._joint_ids, 0],
self._asset.data.soft_joint_pos_limits[:, self._joint_ids, 1],
)
self._processed_actions[:] = actions[:]
def apply_actions(self):
# set position targets
self._asset.set_joint_position_target(self.processed_actions, joint_ids=self._joint_ids)
class EMAJointPositionToLimitsAction(JointPositionToLimitsAction):
r"""Joint action term that applies exponential moving average (EMA) over the processed actions as the
articulation's joints position commands.
Exponential moving average (EMA) is a type of moving average that gives more weight to the most recent data points.
This action term applies the processed actions as moving average position action commands.
The moving average is computed as:
.. math::
\text{applied action} = \alpha \times \text{processed actions} + (1 - \alpha) \times \text{previous applied action}
where :math:`\alpha` is the weight for the moving average, :math:`\text{processed actions}` are the
processed actions, and :math:`\text{previous action}` is the previous action that was applied to the articulation's
joints.
In the trivial case where the weight is 1.0, the action term behaves exactly like
the :class:`JointPositionToLimitsAction` class.
On reset, the previous action is initialized to the current joint positions of the articulation's joints.
"""
cfg: actions_cfg.EMAJointPositionToLimitsActionCfg
"""The configuration of the action term."""
def __init__(self, cfg: actions_cfg.EMAJointPositionToLimitsActionCfg, env: BaseEnv):
# initialize the action term
super().__init__(cfg, env)
# parse and save the moving average weight
if isinstance(cfg.alpha, float):
# check that the weight is in the valid range
if not 0.0 <= cfg.alpha <= 1.0:
raise ValueError(f"Moving average weight must be in the range [0, 1]. Got {cfg.alpha}.")
self._alpha = cfg.alpha
elif isinstance(cfg.alpha, dict):
self._alpha = torch.ones((env.num_envs, self.action_dim), device=self.device)
# resolve the dictionary config
index_list, names_list, value_list = string_utils.resolve_matching_names_values(
cfg.alpha, self._joint_names
)
# check that the weights are in the valid range
for name, value in zip(names_list, value_list):
if not 0.0 <= value <= 1.0:
raise ValueError(
f"Moving average weight must be in the range [0, 1]. Got {value} for joint {name}."
)
self._alpha[:, index_list] = torch.tensor(value_list, device=self.device)
else:
raise ValueError(
f"Unsupported moving average weight type: {type(cfg.alpha)}. Supported types are float and dict."
)
# initialize the previous targets
self._prev_applied_actions = torch.zeros_like(self.processed_actions)
def reset(self, env_ids: Sequence[int] | None = None) -> None:
# check if specific environment ids are provided
if env_ids is None:
env_ids = slice(None)
# reset history to current joint positions
self._prev_applied_actions[env_ids, :] = self._asset.data.joint_pos[env_ids, self._joint_ids]
def process_actions(self, actions: torch.Tensor):
# apply affine transformations
super().process_actions(actions)
# set position targets as moving average
ema_actions = self._alpha * self._processed_actions
ema_actions += (1.0 - self._alpha) * self._prev_applied_actions
# clamp the targets
self._processed_actions[:] = torch.clamp(
ema_actions,
self._asset.data.soft_joint_pos_limits[:, self._joint_ids, 0],
self._asset.data.soft_joint_pos_limits[:, self._joint_ids, 1],
)
# update previous targets
self._prev_applied_actions[:] = self._processed_actions[:]
| 8,380 | Python | 40.905 | 123 | 0.652625 |
fnuabhimanyu8713/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/envs/mdp/commands/commands_cfg.py | # Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
import math
from dataclasses import MISSING
from omni.isaac.orbit.managers import CommandTermCfg
from omni.isaac.orbit.utils import configclass
from .null_command import NullCommand
from .pose_2d_command import TerrainBasedPose2dCommand, UniformPose2dCommand
from .pose_command import UniformPoseCommand
from .velocity_command import NormalVelocityCommand, UniformVelocityCommand
@configclass
class NullCommandCfg(CommandTermCfg):
"""Configuration for the null command generator."""
class_type: type = NullCommand
def __post_init__(self):
"""Post initialization."""
# set the resampling time range to infinity to avoid resampling
self.resampling_time_range = (math.inf, math.inf)
@configclass
class UniformVelocityCommandCfg(CommandTermCfg):
"""Configuration for the uniform velocity command generator."""
class_type: type = UniformVelocityCommand
asset_name: str = MISSING
"""Name of the asset in the environment for which the commands are generated."""
heading_command: bool = MISSING
"""Whether to use heading command or angular velocity command.
If True, the angular velocity command is computed from the heading error, where the
target heading is sampled uniformly from provided range. Otherwise, the angular velocity
command is sampled uniformly from provided range.
"""
heading_control_stiffness: float = MISSING
"""Scale factor to convert the heading error to angular velocity command."""
rel_standing_envs: float = MISSING
"""Probability threshold for environments where the robots that are standing still."""
rel_heading_envs: float = MISSING
"""Probability threshold for environments where the robots follow the heading-based angular velocity command
(the others follow the sampled angular velocity command)."""
@configclass
class Ranges:
"""Uniform distribution ranges for the velocity commands."""
lin_vel_x: tuple[float, float] = MISSING # min max [m/s]
lin_vel_y: tuple[float, float] = MISSING # min max [m/s]
ang_vel_z: tuple[float, float] = MISSING # min max [rad/s]
heading: tuple[float, float] = MISSING # min max [rad]
ranges: Ranges = MISSING
"""Distribution ranges for the velocity commands."""
@configclass
class NormalVelocityCommandCfg(UniformVelocityCommandCfg):
"""Configuration for the normal velocity command generator."""
class_type: type = NormalVelocityCommand
heading_command: bool = False # --> we don't use heading command for normal velocity command.
@configclass
class Ranges:
"""Normal distribution ranges for the velocity commands."""
mean_vel: tuple[float, float, float] = MISSING
"""Mean velocity for the normal distribution.
The tuple contains the mean linear-x, linear-y, and angular-z velocity.
"""
std_vel: tuple[float, float, float] = MISSING
"""Standard deviation for the normal distribution.
The tuple contains the standard deviation linear-x, linear-y, and angular-z velocity.
"""
zero_prob: tuple[float, float, float] = MISSING
"""Probability of zero velocity for the normal distribution.
The tuple contains the probability of zero linear-x, linear-y, and angular-z velocity.
"""
ranges: Ranges = MISSING
"""Distribution ranges for the velocity commands."""
@configclass
class UniformPoseCommandCfg(CommandTermCfg):
"""Configuration for uniform pose command generator."""
class_type: type = UniformPoseCommand
asset_name: str = MISSING
"""Name of the asset in the environment for which the commands are generated."""
body_name: str = MISSING
"""Name of the body in the asset for which the commands are generated."""
make_quat_unique: bool = False
"""Whether to make the quaternion unique or not. Defaults to False.
If True, the quaternion is made unique by ensuring the real part is positive.
"""
@configclass
class Ranges:
"""Uniform distribution ranges for the pose commands."""
pos_x: tuple[float, float] = MISSING # min max [m]
pos_y: tuple[float, float] = MISSING # min max [m]
pos_z: tuple[float, float] = MISSING # min max [m]
roll: tuple[float, float] = MISSING # min max [rad]
pitch: tuple[float, float] = MISSING # min max [rad]
yaw: tuple[float, float] = MISSING # min max [rad]
ranges: Ranges = MISSING
"""Ranges for the commands."""
@configclass
class UniformPose2dCommandCfg(CommandTermCfg):
"""Configuration for the uniform 2D-pose command generator."""
class_type: type = UniformPose2dCommand
asset_name: str = MISSING
"""Name of the asset in the environment for which the commands are generated."""
simple_heading: bool = MISSING
"""Whether to use simple heading or not.
If True, the heading is in the direction of the target position.
"""
@configclass
class Ranges:
"""Uniform distribution ranges for the position commands."""
pos_x: tuple[float, float] = MISSING
"""Range for the x position (in m)."""
pos_y: tuple[float, float] = MISSING
"""Range for the y position (in m)."""
heading: tuple[float, float] = MISSING
"""Heading range for the position commands (in rad).
Used only if :attr:`simple_heading` is False.
"""
ranges: Ranges = MISSING
"""Distribution ranges for the position commands."""
@configclass
class TerrainBasedPose2dCommandCfg(UniformPose2dCommandCfg):
"""Configuration for the terrain-based position command generator."""
class_type = TerrainBasedPose2dCommand
@configclass
class Ranges:
"""Uniform distribution ranges for the position commands."""
heading: tuple[float, float] = MISSING
"""Heading range for the position commands (in rad).
Used only if :attr:`simple_heading` is False.
"""
ranges: Ranges = MISSING
"""Distribution ranges for the sampled commands."""
| 6,228 | Python | 33.605555 | 112 | 0.688182 |
fnuabhimanyu8713/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/envs/mdp/commands/pose_2d_command.py | # Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""Sub-module containing command generators for the 2D-pose for locomotion tasks."""
from __future__ import annotations
import torch
from collections.abc import Sequence
from typing import TYPE_CHECKING
from omni.isaac.orbit.assets import Articulation
from omni.isaac.orbit.managers import CommandTerm
from omni.isaac.orbit.markers import VisualizationMarkers
from omni.isaac.orbit.markers.config import GREEN_ARROW_X_MARKER_CFG
from omni.isaac.orbit.terrains import TerrainImporter
from omni.isaac.orbit.utils.math import quat_from_euler_xyz, quat_rotate_inverse, wrap_to_pi, yaw_quat
if TYPE_CHECKING:
from omni.isaac.orbit.envs import BaseEnv
from .commands_cfg import TerrainBasedPose2dCommandCfg, UniformPose2dCommandCfg
class UniformPose2dCommand(CommandTerm):
"""Command generator that generates pose commands containing a 3-D position and heading.
The command generator samples uniform 2D positions around the environment origin. It sets
the height of the position command to the default root height of the robot. The heading
command is either set to point towards the target or is sampled uniformly.
This can be configured through the :attr:`Pose2dCommandCfg.simple_heading` parameter in
the configuration.
"""
cfg: UniformPose2dCommandCfg
"""Configuration for the command generator."""
def __init__(self, cfg: UniformPose2dCommandCfg, env: BaseEnv):
"""Initialize the command generator class.
Args:
cfg: The configuration parameters for the command generator.
env: The environment object.
"""
# initialize the base class
super().__init__(cfg, env)
# obtain the robot and terrain assets
# -- robot
self.robot: Articulation = env.scene[cfg.asset_name]
# crete buffers to store the command
# -- commands: (x, y, z, heading)
self.pos_command_w = torch.zeros(self.num_envs, 3, device=self.device)
self.heading_command_w = torch.zeros(self.num_envs, device=self.device)
self.pos_command_b = torch.zeros_like(self.pos_command_w)
self.heading_command_b = torch.zeros_like(self.heading_command_w)
# -- metrics
self.metrics["error_pos"] = torch.zeros(self.num_envs, device=self.device)
self.metrics["error_heading"] = torch.zeros(self.num_envs, device=self.device)
def __str__(self) -> str:
msg = "PositionCommand:\n"
msg += f"\tCommand dimension: {tuple(self.command.shape[1:])}\n"
msg += f"\tResampling time range: {self.cfg.resampling_time_range}"
return msg
"""
Properties
"""
@property
def command(self) -> torch.Tensor:
"""The desired 2D-pose in base frame. Shape is (num_envs, 4)."""
return torch.cat([self.pos_command_b, self.heading_command_b.unsqueeze(1)], dim=1)
"""
Implementation specific functions.
"""
def _update_metrics(self):
# logs data
self.metrics["error_pos_2d"] = torch.norm(self.pos_command_w[:, :2] - self.robot.data.root_pos_w[:, :2], dim=1)
self.metrics["error_heading"] = torch.abs(wrap_to_pi(self.heading_command_w - self.robot.data.heading_w))
def _resample_command(self, env_ids: Sequence[int]):
# obtain env origins for the environments
self.pos_command_w[env_ids] = self._env.scene.env_origins[env_ids]
# offset the position command by the current root position
r = torch.empty(len(env_ids), device=self.device)
self.pos_command_w[env_ids, 0] += r.uniform_(*self.cfg.ranges.pos_x)
self.pos_command_w[env_ids, 1] += r.uniform_(*self.cfg.ranges.pos_y)
self.pos_command_w[env_ids, 2] += self.robot.data.default_root_state[env_ids, 2]
if self.cfg.simple_heading:
# set heading command to point towards target
target_vec = self.pos_command_w[env_ids] - self.robot.data.root_pos_w[env_ids]
target_direction = torch.atan2(target_vec[:, 1], target_vec[:, 0])
flipped_target_direction = wrap_to_pi(target_direction + torch.pi)
# compute errors to find the closest direction to the current heading
# this is done to avoid the discontinuity at the -pi/pi boundary
curr_to_target = wrap_to_pi(target_direction - self.robot.data.heading_w[env_ids]).abs()
curr_to_flipped_target = wrap_to_pi(flipped_target_direction - self.robot.data.heading_w[env_ids]).abs()
# set the heading command to the closest direction
self.heading_command_w[env_ids] = torch.where(
curr_to_target < curr_to_flipped_target,
target_direction,
flipped_target_direction,
)
else:
# random heading command
self.heading_command_w[env_ids] = r.uniform_(*self.cfg.ranges.heading)
def _update_command(self):
"""Re-target the position command to the current root state."""
target_vec = self.pos_command_w - self.robot.data.root_pos_w[:, :3]
self.pos_command_b[:] = quat_rotate_inverse(yaw_quat(self.robot.data.root_quat_w), target_vec)
self.heading_command_b[:] = wrap_to_pi(self.heading_command_w - self.robot.data.heading_w)
def _set_debug_vis_impl(self, debug_vis: bool):
# create markers if necessary for the first tome
if debug_vis:
if not hasattr(self, "arrow_goal_visualizer"):
marker_cfg = GREEN_ARROW_X_MARKER_CFG.copy()
marker_cfg.markers["arrow"].scale = (0.2, 0.2, 0.8)
marker_cfg.prim_path = "/Visuals/Command/pose_goal"
self.arrow_goal_visualizer = VisualizationMarkers(marker_cfg)
# set their visibility to true
self.arrow_goal_visualizer.set_visibility(True)
else:
if hasattr(self, "arrow_goal_visualizer"):
self.arrow_goal_visualizer.set_visibility(False)
def _debug_vis_callback(self, event):
# update the box marker
self.arrow_goal_visualizer.visualize(
translations=self.pos_command_w,
orientations=quat_from_euler_xyz(
torch.zeros_like(self.heading_command_w),
torch.zeros_like(self.heading_command_w),
self.heading_command_w,
),
)
class TerrainBasedPose2dCommand(UniformPose2dCommand):
"""Command generator that generates pose commands based on the terrain.
This command generator samples the position commands from the valid patches of the terrain.
The heading commands are either set to point towards the target or are sampled uniformly.
It expects the terrain to have a valid flat patches under the key 'target'.
"""
cfg: TerrainBasedPose2dCommandCfg
"""Configuration for the command generator."""
def __init__(self, cfg: TerrainBasedPose2dCommandCfg, env: BaseEnv):
# initialize the base class
super().__init__(cfg, env)
# obtain the terrain asset
self.terrain: TerrainImporter = env.scene["terrain"]
# obtain the valid targets from the terrain
if "target" not in self.terrain.flat_patches:
raise RuntimeError(
"The terrain-based command generator requires a valid flat patch under 'target' in the terrain."
f" Found: {list(self.terrain.flat_patches.keys())}"
)
# valid targets: (terrain_level, terrain_type, num_patches, 3)
self.valid_targets: torch.Tensor = self.terrain.flat_patches["target"]
def _resample_command(self, env_ids: Sequence[int]):
# sample new position targets from the terrain
ids = torch.randint(0, self.valid_targets.shape[2], size=(len(env_ids),), device=self.device)
self.pos_command_w[env_ids] = self.valid_targets[
self.terrain.terrain_levels[env_ids], self.terrain.terrain_types[env_ids], ids
]
# offset the position command by the current root height
self.pos_command_w[env_ids, 2] += self.robot.data.default_root_state[env_ids, 2]
if self.cfg.simple_heading:
# set heading command to point towards target
target_vec = self.pos_command_w[env_ids] - self.robot.data.root_pos_w[env_ids]
target_direction = torch.atan2(target_vec[:, 1], target_vec[:, 0])
flipped_target_direction = wrap_to_pi(target_direction + torch.pi)
# compute errors to find the closest direction to the current heading
# this is done to avoid the discontinuity at the -pi/pi boundary
curr_to_target = wrap_to_pi(target_direction - self.robot.data.heading_w[env_ids]).abs()
curr_to_flipped_target = wrap_to_pi(flipped_target_direction - self.robot.data.heading_w[env_ids]).abs()
# set the heading command to the closest direction
self.heading_command_w[env_ids] = torch.where(
curr_to_target < curr_to_flipped_target,
target_direction,
flipped_target_direction,
)
else:
# random heading command
r = torch.empty(len(env_ids), device=self.device)
self.heading_command_w[env_ids] = r.uniform_(*self.cfg.ranges.heading)
| 9,435 | Python | 44.365384 | 119 | 0.649921 |
fnuabhimanyu8713/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/envs/mdp/commands/velocity_command.py | # Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""Sub-module containing command generators for the velocity-based locomotion task."""
from __future__ import annotations
import torch
from collections.abc import Sequence
from typing import TYPE_CHECKING
import omni.isaac.orbit.utils.math as math_utils
from omni.isaac.orbit.assets import Articulation
from omni.isaac.orbit.managers import CommandTerm
from omni.isaac.orbit.markers import VisualizationMarkers
from omni.isaac.orbit.markers.config import BLUE_ARROW_X_MARKER_CFG, GREEN_ARROW_X_MARKER_CFG
if TYPE_CHECKING:
from omni.isaac.orbit.envs import BaseEnv
from .commands_cfg import NormalVelocityCommandCfg, UniformVelocityCommandCfg
class UniformVelocityCommand(CommandTerm):
r"""Command generator that generates a velocity command in SE(2) from uniform distribution.
The command comprises of a linear velocity in x and y direction and an angular velocity around
the z-axis. It is given in the robot's base frame.
If the :attr:`cfg.heading_command` flag is set to True, the angular velocity is computed from the heading
error similar to doing a proportional control on the heading error. The target heading is sampled uniformly
from the provided range. Otherwise, the angular velocity is sampled uniformly from the provided range.
Mathematically, the angular velocity is computed as follows from the heading command:
.. math::
\omega_z = \frac{1}{2} \text{wrap_to_pi}(\theta_{\text{target}} - \theta_{\text{current}})
"""
cfg: UniformVelocityCommandCfg
"""The configuration of the command generator."""
def __init__(self, cfg: UniformVelocityCommandCfg, env: BaseEnv):
"""Initialize the command generator.
Args:
cfg: The configuration of the command generator.
env: The environment.
"""
# initialize the base class
super().__init__(cfg, env)
# obtain the robot asset
# -- robot
self.robot: Articulation = env.scene[cfg.asset_name]
# crete buffers to store the command
# -- command: x vel, y vel, yaw vel, heading
self.vel_command_b = torch.zeros(self.num_envs, 3, device=self.device)
self.heading_target = torch.zeros(self.num_envs, device=self.device)
self.is_heading_env = torch.zeros(self.num_envs, dtype=torch.bool, device=self.device)
self.is_standing_env = torch.zeros_like(self.is_heading_env)
# -- metrics
self.metrics["error_vel_xy"] = torch.zeros(self.num_envs, device=self.device)
self.metrics["error_vel_yaw"] = torch.zeros(self.num_envs, device=self.device)
def __str__(self) -> str:
"""Return a string representation of the command generator."""
msg = "UniformVelocityCommand:\n"
msg += f"\tCommand dimension: {tuple(self.command.shape[1:])}\n"
msg += f"\tResampling time range: {self.cfg.resampling_time_range}\n"
msg += f"\tHeading command: {self.cfg.heading_command}\n"
if self.cfg.heading_command:
msg += f"\tHeading probability: {self.cfg.rel_heading_envs}\n"
msg += f"\tStanding probability: {self.cfg.rel_standing_envs}"
return msg
"""
Properties
"""
@property
def command(self) -> torch.Tensor:
"""The desired base velocity command in the base frame. Shape is (num_envs, 3)."""
return self.vel_command_b
"""
Implementation specific functions.
"""
def _update_metrics(self):
# time for which the command was executed
max_command_time = self.cfg.resampling_time_range[1]
max_command_step = max_command_time / self._env.step_dt
# logs data
self.metrics["error_vel_xy"] += (
torch.norm(self.vel_command_b[:, :2] - self.robot.data.root_lin_vel_b[:, :2], dim=-1) / max_command_step
)
self.metrics["error_vel_yaw"] += (
torch.abs(self.vel_command_b[:, 2] - self.robot.data.root_ang_vel_b[:, 2]) / max_command_step
)
def _resample_command(self, env_ids: Sequence[int]):
# sample velocity commands
r = torch.empty(len(env_ids), device=self.device)
# -- linear velocity - x direction
self.vel_command_b[env_ids, 0] = r.uniform_(*self.cfg.ranges.lin_vel_x)
# -- linear velocity - y direction
self.vel_command_b[env_ids, 1] = r.uniform_(*self.cfg.ranges.lin_vel_y)
# -- ang vel yaw - rotation around z
self.vel_command_b[env_ids, 2] = r.uniform_(*self.cfg.ranges.ang_vel_z)
# heading target
if self.cfg.heading_command:
self.heading_target[env_ids] = r.uniform_(*self.cfg.ranges.heading)
# update heading envs
self.is_heading_env[env_ids] = r.uniform_(0.0, 1.0) <= self.cfg.rel_heading_envs
# update standing envs
self.is_standing_env[env_ids] = r.uniform_(0.0, 1.0) <= self.cfg.rel_standing_envs
def _update_command(self):
"""Post-processes the velocity command.
This function sets velocity command to zero for standing environments and computes angular
velocity from heading direction if the heading_command flag is set.
"""
# Compute angular velocity from heading direction
if self.cfg.heading_command:
# resolve indices of heading envs
env_ids = self.is_heading_env.nonzero(as_tuple=False).flatten()
# compute angular velocity
heading_error = math_utils.wrap_to_pi(self.heading_target[env_ids] - self.robot.data.heading_w[env_ids])
self.vel_command_b[env_ids, 2] = torch.clip(
self.cfg.heading_control_stiffness * heading_error,
min=self.cfg.ranges.ang_vel_z[0],
max=self.cfg.ranges.ang_vel_z[1],
)
# Enforce standing (i.e., zero velocity command) for standing envs
# TODO: check if conversion is needed
standing_env_ids = self.is_standing_env.nonzero(as_tuple=False).flatten()
self.vel_command_b[standing_env_ids, :] = 0.0
def _set_debug_vis_impl(self, debug_vis: bool):
# set visibility of markers
# note: parent only deals with callbacks. not their visibility
if debug_vis:
# create markers if necessary for the first tome
if not hasattr(self, "base_vel_goal_visualizer"):
# -- goal
marker_cfg = GREEN_ARROW_X_MARKER_CFG.copy()
marker_cfg.prim_path = "/Visuals/Command/velocity_goal"
marker_cfg.markers["arrow"].scale = (0.5, 0.5, 0.5)
self.base_vel_goal_visualizer = VisualizationMarkers(marker_cfg)
# -- current
marker_cfg = BLUE_ARROW_X_MARKER_CFG.copy()
marker_cfg.prim_path = "/Visuals/Command/velocity_current"
marker_cfg.markers["arrow"].scale = (0.5, 0.5, 0.5)
self.base_vel_visualizer = VisualizationMarkers(marker_cfg)
# set their visibility to true
self.base_vel_goal_visualizer.set_visibility(True)
self.base_vel_visualizer.set_visibility(True)
else:
if hasattr(self, "base_vel_goal_visualizer"):
self.base_vel_goal_visualizer.set_visibility(False)
self.base_vel_visualizer.set_visibility(False)
def _debug_vis_callback(self, event):
# get marker location
# -- base state
base_pos_w = self.robot.data.root_pos_w.clone()
base_pos_w[:, 2] += 0.5
# -- resolve the scales and quaternions
vel_des_arrow_scale, vel_des_arrow_quat = self._resolve_xy_velocity_to_arrow(self.command[:, :2])
vel_arrow_scale, vel_arrow_quat = self._resolve_xy_velocity_to_arrow(self.robot.data.root_lin_vel_b[:, :2])
# display markers
self.base_vel_goal_visualizer.visualize(base_pos_w, vel_des_arrow_quat, vel_des_arrow_scale)
self.base_vel_visualizer.visualize(base_pos_w, vel_arrow_quat, vel_arrow_scale)
"""
Internal helpers.
"""
def _resolve_xy_velocity_to_arrow(self, xy_velocity: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor]:
"""Converts the XY base velocity command to arrow direction rotation."""
# obtain default scale of the marker
default_scale = self.base_vel_goal_visualizer.cfg.markers["arrow"].scale
# arrow-scale
arrow_scale = torch.tensor(default_scale, device=self.device).repeat(xy_velocity.shape[0], 1)
arrow_scale[:, 0] *= torch.linalg.norm(xy_velocity, dim=1) * 3.0
# arrow-direction
heading_angle = torch.atan2(xy_velocity[:, 1], xy_velocity[:, 0])
zeros = torch.zeros_like(heading_angle)
arrow_quat = math_utils.quat_from_euler_xyz(zeros, zeros, heading_angle)
# convert everything back from base to world frame
base_quat_w = self.robot.data.root_quat_w
arrow_quat = math_utils.quat_mul(base_quat_w, arrow_quat)
return arrow_scale, arrow_quat
class NormalVelocityCommand(UniformVelocityCommand):
"""Command generator that generates a velocity command in SE(2) from a normal distribution.
The command comprises of a linear velocity in x and y direction and an angular velocity around
the z-axis. It is given in the robot's base frame.
The command is sampled from a normal distribution with mean and standard deviation specified in
the configuration. With equal probability, the sign of the individual components is flipped.
"""
cfg: NormalVelocityCommandCfg
"""The command generator configuration."""
def __init__(self, cfg: NormalVelocityCommandCfg, env: object):
"""Initializes the command generator.
Args:
cfg: The command generator configuration.
env: The environment.
"""
super().__init__(self, cfg, env)
# create buffers for zero commands envs
self.is_zero_vel_x_env = torch.zeros(self.num_envs, dtype=torch.bool, device=self.device)
self.is_zero_vel_y_env = torch.zeros_like(self.is_zero_vel_x_env)
self.is_zero_vel_yaw_env = torch.zeros_like(self.is_zero_vel_x_env)
def __str__(self) -> str:
"""Return a string representation of the command generator."""
msg = "NormalVelocityCommand:\n"
msg += f"\tCommand dimension: {tuple(self.command.shape[1:])}\n"
msg += f"\tResampling time range: {self.cfg.resampling_time_range}\n"
msg += f"\tStanding probability: {self.cfg.rel_standing_envs}"
return msg
def _resample_command(self, env_ids):
# sample velocity commands
r = torch.empty(len(env_ids), device=self.device)
# -- linear velocity - x direction
self.vel_command_b[env_ids, 0] = r.normal_(mean=self.cfg.ranges.mean_vel[0], std=self.cfg.ranges.std_vel[0])
self.vel_command_b[env_ids, 0] *= torch.where(r.uniform_(0.0, 1.0) <= 0.5, 1.0, -1.0)
# -- linear velocity - y direction
self.vel_command_b[env_ids, 1] = r.normal_(mean=self.cfg.ranges.mean_vel[1], std=self.cfg.ranges.std_vel[1])
self.vel_command_b[env_ids, 1] *= torch.where(r.uniform_(0.0, 1.0) <= 0.5, 1.0, -1.0)
# -- angular velocity - yaw direction
self.vel_command_b[env_ids, 2] = r.normal_(mean=self.cfg.ranges.mean_vel[2], std=self.cfg.ranges.std_vel[2])
self.vel_command_b[env_ids, 2] *= torch.where(r.uniform_(0.0, 1.0) <= 0.5, 1.0, -1.0)
# update element wise zero velocity command
# TODO what is zero prob ?
self.is_zero_vel_x_env[env_ids] = r.uniform_(0.0, 1.0) <= self.cfg.ranges.zero_prob[0]
self.is_zero_vel_y_env[env_ids] = r.uniform_(0.0, 1.0) <= self.cfg.ranges.zero_prob[1]
self.is_zero_vel_yaw_env[env_ids] = r.uniform_(0.0, 1.0) <= self.cfg.ranges.zero_prob[2]
# update standing envs
self.is_standing_env[env_ids] = r.uniform_(0.0, 1.0) <= self.cfg.rel_standing_envs
def _update_command(self):
"""Sets velocity command to zero for standing envs."""
# Enforce standing (i.e., zero velocity command) for standing envs
standing_env_ids = self.is_standing_env.nonzero(as_tuple=False).flatten() # TODO check if conversion is needed
self.vel_command_b[standing_env_ids, :] = 0.0
# Enforce zero velocity for individual elements
# TODO: check if conversion is needed
zero_vel_x_env_ids = self.is_zero_vel_x_env.nonzero(as_tuple=False).flatten()
zero_vel_y_env_ids = self.is_zero_vel_y_env.nonzero(as_tuple=False).flatten()
zero_vel_yaw_env_ids = self.is_zero_vel_yaw_env.nonzero(as_tuple=False).flatten()
self.vel_command_b[zero_vel_x_env_ids, 0] = 0.0
self.vel_command_b[zero_vel_y_env_ids, 1] = 0.0
self.vel_command_b[zero_vel_yaw_env_ids, 2] = 0.0
| 12,959 | Python | 46.29927 | 119 | 0.641408 |
fnuabhimanyu8713/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/envs/mdp/commands/pose_command.py | # Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""Sub-module containing command generators for pose tracking."""
from __future__ import annotations
import torch
from collections.abc import Sequence
from typing import TYPE_CHECKING
from omni.isaac.orbit.assets import Articulation
from omni.isaac.orbit.managers import CommandTerm
from omni.isaac.orbit.markers import VisualizationMarkers
from omni.isaac.orbit.markers.config import FRAME_MARKER_CFG
from omni.isaac.orbit.utils.math import combine_frame_transforms, compute_pose_error, quat_from_euler_xyz, quat_unique
if TYPE_CHECKING:
from omni.isaac.orbit.envs import BaseEnv
from .commands_cfg import UniformPoseCommandCfg
class UniformPoseCommand(CommandTerm):
"""Command generator for generating pose commands uniformly.
The command generator generates poses by sampling positions uniformly within specified
regions in cartesian space. For orientation, it samples uniformly the euler angles
(roll-pitch-yaw) and converts them into quaternion representation (w, x, y, z).
The position and orientation commands are generated in the base frame of the robot, and not the
simulation world frame. This means that users need to handle the transformation from the
base frame to the simulation world frame themselves.
.. caution::
Sampling orientations uniformly is not strictly the same as sampling euler angles uniformly.
This is because rotations are defined by 3D non-Euclidean space, and the mapping
from euler angles to rotations is not one-to-one.
"""
cfg: UniformPoseCommandCfg
"""Configuration for the command generator."""
def __init__(self, cfg: UniformPoseCommandCfg, env: BaseEnv):
"""Initialize the command generator class.
Args:
cfg: The configuration parameters for the command generator.
env: The environment object.
"""
# initialize the base class
super().__init__(cfg, env)
# extract the robot and body index for which the command is generated
self.robot: Articulation = env.scene[cfg.asset_name]
self.body_idx = self.robot.find_bodies(cfg.body_name)[0][0]
# create buffers
# -- commands: (x, y, z, qw, qx, qy, qz) in root frame
self.pose_command_b = torch.zeros(self.num_envs, 7, device=self.device)
self.pose_command_b[:, 3] = 1.0
self.pose_command_w = torch.zeros_like(self.pose_command_b)
# -- metrics
self.metrics["position_error"] = torch.zeros(self.num_envs, device=self.device)
self.metrics["orientation_error"] = torch.zeros(self.num_envs, device=self.device)
def __str__(self) -> str:
msg = "UniformPoseCommand:\n"
msg += f"\tCommand dimension: {tuple(self.command.shape[1:])}\n"
msg += f"\tResampling time range: {self.cfg.resampling_time_range}\n"
return msg
"""
Properties
"""
@property
def command(self) -> torch.Tensor:
"""The desired pose command. Shape is (num_envs, 7).
The first three elements correspond to the position, followed by the quaternion orientation in (w, x, y, z).
"""
return self.pose_command_b
"""
Implementation specific functions.
"""
def _update_metrics(self):
# transform command from base frame to simulation world frame
self.pose_command_w[:, :3], self.pose_command_w[:, 3:] = combine_frame_transforms(
self.robot.data.root_pos_w,
self.robot.data.root_quat_w,
self.pose_command_b[:, :3],
self.pose_command_b[:, 3:],
)
# compute the error
pos_error, rot_error = compute_pose_error(
self.pose_command_w[:, :3],
self.pose_command_w[:, 3:],
self.robot.data.body_state_w[:, self.body_idx, :3],
self.robot.data.body_state_w[:, self.body_idx, 3:7],
)
self.metrics["position_error"] = torch.norm(pos_error, dim=-1)
self.metrics["orientation_error"] = torch.norm(rot_error, dim=-1)
def _resample_command(self, env_ids: Sequence[int]):
# sample new pose targets
# -- position
r = torch.empty(len(env_ids), device=self.device)
self.pose_command_b[env_ids, 0] = r.uniform_(*self.cfg.ranges.pos_x)
self.pose_command_b[env_ids, 1] = r.uniform_(*self.cfg.ranges.pos_y)
self.pose_command_b[env_ids, 2] = r.uniform_(*self.cfg.ranges.pos_z)
# -- orientation
euler_angles = torch.zeros_like(self.pose_command_b[env_ids, :3])
euler_angles[:, 0].uniform_(*self.cfg.ranges.roll)
euler_angles[:, 1].uniform_(*self.cfg.ranges.pitch)
euler_angles[:, 2].uniform_(*self.cfg.ranges.yaw)
quat = quat_from_euler_xyz(euler_angles[:, 0], euler_angles[:, 1], euler_angles[:, 2])
# make sure the quaternion has real part as positive
self.pose_command_b[env_ids, 3:] = quat_unique(quat) if self.cfg.make_quat_unique else quat
def _update_command(self):
pass
def _set_debug_vis_impl(self, debug_vis: bool):
# create markers if necessary for the first tome
if debug_vis:
if not hasattr(self, "goal_pose_visualizer"):
marker_cfg = FRAME_MARKER_CFG.copy()
marker_cfg.markers["frame"].scale = (0.1, 0.1, 0.1)
# -- goal pose
marker_cfg.prim_path = "/Visuals/Command/goal_pose"
self.goal_pose_visualizer = VisualizationMarkers(marker_cfg)
# -- current body pose
marker_cfg.prim_path = "/Visuals/Command/body_pose"
self.body_pose_visualizer = VisualizationMarkers(marker_cfg)
# set their visibility to true
self.goal_pose_visualizer.set_visibility(True)
self.body_pose_visualizer.set_visibility(True)
else:
if hasattr(self, "goal_pose_visualizer"):
self.goal_pose_visualizer.set_visibility(False)
self.body_pose_visualizer.set_visibility(False)
def _debug_vis_callback(self, event):
# update the markers
# -- goal pose
self.goal_pose_visualizer.visualize(self.pose_command_w[:, :3], self.pose_command_w[:, 3:])
# -- current body pose
body_pose_w = self.robot.data.body_state_w[:, self.body_idx]
self.body_pose_visualizer.visualize(body_pose_w[:, :3], body_pose_w[:, 3:7])
| 6,561 | Python | 40.796178 | 118 | 0.641366 |
fnuabhimanyu8713/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/envs/mdp/commands/null_command.py | # Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""Sub-module containing command generator that does nothing."""
from __future__ import annotations
from collections.abc import Sequence
from typing import TYPE_CHECKING
from omni.isaac.orbit.managers import CommandTerm
if TYPE_CHECKING:
from .commands_cfg import NullCommandCfg
class NullCommand(CommandTerm):
"""Command generator that does nothing.
This command generator does not generate any commands. It is used for environments that do not
require any commands.
"""
cfg: NullCommandCfg
"""Configuration for the command generator."""
def __str__(self) -> str:
msg = "NullCommand:\n"
msg += "\tCommand dimension: N/A\n"
msg += f"\tResampling time range: {self.cfg.resampling_time_range}"
return msg
"""
Properties
"""
@property
def command(self):
"""Null command.
Raises:
RuntimeError: No command is generated. Always raises this error.
"""
raise RuntimeError("NullCommandTerm does not generate any commands.")
"""
Operations.
"""
def reset(self, env_ids: Sequence[int] | None = None) -> dict[str, float]:
return {}
def compute(self, dt: float):
pass
"""
Implementation specific functions.
"""
def _update_metrics(self):
pass
def _resample_command(self, env_ids: Sequence[int]):
pass
def _update_command(self):
pass
| 1,574 | Python | 21.5 | 98 | 0.640407 |
fnuabhimanyu8713/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/envs/mdp/commands/__init__.py | # Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""Various command terms that can be used in the environment."""
from .commands_cfg import (
NormalVelocityCommandCfg,
NullCommandCfg,
TerrainBasedPose2dCommandCfg,
UniformPose2dCommandCfg,
UniformPoseCommandCfg,
UniformVelocityCommandCfg,
)
from .null_command import NullCommand
from .pose_2d_command import TerrainBasedPose2dCommand, UniformPose2dCommand
from .pose_command import UniformPoseCommand
from .velocity_command import NormalVelocityCommand, UniformVelocityCommand
| 626 | Python | 30.349998 | 76 | 0.801917 |
fnuabhimanyu8713/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/managers/observation_manager.py | # Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""Observation manager for computing observation signals for a given world."""
from __future__ import annotations
import torch
from collections.abc import Sequence
from prettytable import PrettyTable
from typing import TYPE_CHECKING
from .manager_base import ManagerBase, ManagerTermBase
from .manager_term_cfg import ObservationGroupCfg, ObservationTermCfg
if TYPE_CHECKING:
from omni.isaac.orbit.envs import BaseEnv
class ObservationManager(ManagerBase):
"""Manager for computing observation signals for a given world.
Observations are organized into groups based on their intended usage. This allows having different observation
groups for different types of learning such as asymmetric actor-critic and student-teacher training. Each
group contains observation terms which contain information about the observation function to call, the noise
corruption model to use, and the sensor to retrieve data from.
Each observation group should inherit from the :class:`ObservationGroupCfg` class. Within each group, each
observation term should instantiate the :class:`ObservationTermCfg` class.
"""
def __init__(self, cfg: object, env: BaseEnv):
"""Initialize observation manager.
Args:
cfg: The configuration object or dictionary (``dict[str, ObservationGroupCfg]``).
env: The environment instance.
"""
super().__init__(cfg, env)
# compute combined vector for obs group
self._group_obs_dim: dict[str, tuple[int, ...]] = dict()
for group_name, group_term_dims in self._group_obs_term_dim.items():
term_dims = [torch.tensor(dims, device="cpu") for dims in group_term_dims]
self._group_obs_dim[group_name] = tuple(torch.sum(torch.stack(term_dims, dim=0), dim=0).tolist())
def __str__(self) -> str:
"""Returns: A string representation for the observation manager."""
msg = f"<ObservationManager> contains {len(self._group_obs_term_names)} groups.\n"
# add info for each group
for group_name, group_dim in self._group_obs_dim.items():
# create table for term information
table = PrettyTable()
table.title = f"Active Observation Terms in Group: '{group_name}' (shape: {group_dim})"
table.field_names = ["Index", "Name", "Shape"]
# set alignment of table columns
table.align["Name"] = "l"
# add info for each term
obs_terms = zip(
self._group_obs_term_names[group_name],
self._group_obs_term_dim[group_name],
)
for index, (name, dims) in enumerate(obs_terms):
# resolve inputs to simplify prints
tab_dims = tuple(dims)
# add row
table.add_row([index, name, tab_dims])
# convert table to string
msg += table.get_string()
msg += "\n"
return msg
"""
Properties.
"""
@property
def active_terms(self) -> dict[str, list[str]]:
"""Name of active observation terms in each group."""
return self._group_obs_term_names
@property
def group_obs_dim(self) -> dict[str, tuple[int, ...]]:
"""Shape of observation tensor in each group."""
return self._group_obs_dim
@property
def group_obs_term_dim(self) -> dict[str, list[tuple[int, ...]]]:
"""Shape of observation tensor for each term in each group."""
return self._group_obs_term_dim
@property
def group_obs_concatenate(self) -> dict[str, bool]:
"""Whether the observation terms are concatenated in each group."""
return self._group_obs_concatenate
"""
Operations.
"""
def reset(self, env_ids: Sequence[int] | None = None) -> dict[str, float]:
# call all terms that are classes
for group_cfg in self._group_obs_class_term_cfgs.values():
for term_cfg in group_cfg:
term_cfg.func.reset(env_ids=env_ids)
# nothing to log here
return {}
def compute(self) -> dict[str, torch.Tensor | dict[str, torch.Tensor]]:
"""Compute the observations per group for all groups.
The method computes the observations for all the groups handled by the observation manager.
Please check the :meth:`compute_group` on the processing of observations per group.
Returns:
A dictionary with keys as the group names and values as the computed observations.
"""
# create a buffer for storing obs from all the groups
obs_buffer = dict()
# iterate over all the terms in each group
for group_name in self._group_obs_term_names:
obs_buffer[group_name] = self.compute_group(group_name)
# otherwise return a dict with observations of all groups
return obs_buffer
def compute_group(self, group_name: str) -> torch.Tensor | dict[str, torch.Tensor]:
"""Computes the observations for a given group.
The observations for a given group are computed by calling the registered functions for each
term in the group. The functions are called in the order of the terms in the group. The functions
are expected to return a tensor with shape (num_envs, ...).
If a corruption/noise model is registered for a term, the function is called to corrupt
the observation. The corruption function is expected to return a tensor with the same
shape as the observation. The observations are clipped and scaled as per the configuration
settings.
The operations are performed in the order: compute, add corruption/noise, clip, scale.
By default, no scaling or clipping is applied.
Args:
group_name: The name of the group for which to compute the observations. Defaults to None,
in which case observations for all the groups are computed and returned.
Returns:
Depending on the group's configuration, the tensors for individual observation terms are
concatenated along the last dimension into a single tensor. Otherwise, they are returned as
a dictionary with keys corresponding to the term's name.
Raises:
ValueError: If input ``group_name`` is not a valid group handled by the manager.
"""
# check ig group name is valid
if group_name not in self._group_obs_term_names:
raise ValueError(
f"Unable to find the group '{group_name}' in the observation manager."
f" Available groups are: {list(self._group_obs_term_names.keys())}"
)
# iterate over all the terms in each group
group_term_names = self._group_obs_term_names[group_name]
# buffer to store obs per group
group_obs = dict.fromkeys(group_term_names, None)
# read attributes for each term
obs_terms = zip(group_term_names, self._group_obs_term_cfgs[group_name])
# evaluate terms: compute, add noise, clip, scale.
for name, term_cfg in obs_terms:
# compute term's value
obs: torch.Tensor = term_cfg.func(self._env, **term_cfg.params).clone()
# apply post-processing
if term_cfg.noise:
obs = term_cfg.noise.func(obs, term_cfg.noise)
if term_cfg.clip:
obs = obs.clip_(min=term_cfg.clip[0], max=term_cfg.clip[1])
if term_cfg.scale:
obs = obs.mul_(term_cfg.scale)
# TODO: Introduce delay and filtering models.
# Ref: https://robosuite.ai/docs/modules/sensors.html#observables
# add value to list
group_obs[name] = obs
# concatenate all observations in the group together
if self._group_obs_concatenate[group_name]:
return torch.cat(list(group_obs.values()), dim=-1)
else:
return group_obs
"""
Helper functions.
"""
def _prepare_terms(self):
"""Prepares a list of observation terms functions."""
# create buffers to store information for each observation group
# TODO: Make this more convenient by using data structures.
self._group_obs_term_names: dict[str, list[str]] = dict()
self._group_obs_term_dim: dict[str, list[int]] = dict()
self._group_obs_term_cfgs: dict[str, list[ObservationTermCfg]] = dict()
self._group_obs_class_term_cfgs: dict[str, list[ObservationTermCfg]] = dict()
self._group_obs_concatenate: dict[str, bool] = dict()
# check if config is dict already
if isinstance(self.cfg, dict):
group_cfg_items = self.cfg.items()
else:
group_cfg_items = self.cfg.__dict__.items()
# iterate over all the groups
for group_name, group_cfg in group_cfg_items:
# check for non config
if group_cfg is None:
continue
# check if the term is a curriculum term
if not isinstance(group_cfg, ObservationGroupCfg):
raise TypeError(
f"Observation group '{group_name}' is not of type 'ObservationGroupCfg'."
f" Received: '{type(group_cfg)}'."
)
# initialize list for the group settings
self._group_obs_term_names[group_name] = list()
self._group_obs_term_dim[group_name] = list()
self._group_obs_term_cfgs[group_name] = list()
self._group_obs_class_term_cfgs[group_name] = list()
# read common config for the group
self._group_obs_concatenate[group_name] = group_cfg.concatenate_terms
# check if config is dict already
if isinstance(group_cfg, dict):
group_cfg_items = group_cfg.items()
else:
group_cfg_items = group_cfg.__dict__.items()
# iterate over all the terms in each group
for term_name, term_cfg in group_cfg.__dict__.items():
# skip non-obs settings
if term_name in ["enable_corruption", "concatenate_terms"]:
continue
# check for non config
if term_cfg is None:
continue
if not isinstance(term_cfg, ObservationTermCfg):
raise TypeError(
f"Configuration for the term '{term_name}' is not of type ObservationTermCfg."
f" Received: '{type(term_cfg)}'."
)
# resolve common terms in the config
self._resolve_common_term_cfg(f"{group_name}/{term_name}", term_cfg, min_argc=1)
# check noise settings
if not group_cfg.enable_corruption:
term_cfg.noise = None
# add term config to list to list
self._group_obs_term_names[group_name].append(term_name)
self._group_obs_term_cfgs[group_name].append(term_cfg)
# call function the first time to fill up dimensions
obs_dims = tuple(term_cfg.func(self._env, **term_cfg.params).shape[1:])
self._group_obs_term_dim[group_name].append(obs_dims)
# add term in a separate list if term is a class
if isinstance(term_cfg.func, ManagerTermBase):
self._group_obs_class_term_cfgs[group_name].append(term_cfg)
# call reset (in-case above call to get obs dims changed the state)
term_cfg.func.reset()
| 11,820 | Python | 44.291188 | 114 | 0.609645 |
fnuabhimanyu8713/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/managers/manager_base.py | # Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
from __future__ import annotations
import copy
import inspect
from abc import ABC, abstractmethod
from collections.abc import Sequence
from typing import TYPE_CHECKING, Any
import carb
import omni.isaac.orbit.utils.string as string_utils
from omni.isaac.orbit.utils import string_to_callable
from .manager_term_cfg import ManagerTermBaseCfg
from .scene_entity_cfg import SceneEntityCfg
if TYPE_CHECKING:
from omni.isaac.orbit.envs import BaseEnv
class ManagerTermBase(ABC):
"""Base class for manager terms.
Manager term implementations can be functions or classes. If the term is a class, it should
inherit from this base class and implement the required methods.
Each manager is implemented as a class that inherits from the :class:`ManagerBase` class. Each manager
class should also have a corresponding configuration class that defines the configuration terms for the
manager. Each term should the :class:`ManagerTermBaseCfg` class or its subclass.
Example pseudo-code for creating a manager:
.. code-block:: python
from omni.isaac.orbit.utils import configclass
from omni.isaac.orbit.utils.mdp import ManagerBase, ManagerTermBaseCfg
@configclass
class MyManagerCfg:
my_term_1: ManagerTermBaseCfg = ManagerTermBaseCfg(...)
my_term_2: ManagerTermBaseCfg = ManagerTermBaseCfg(...)
my_term_3: ManagerTermBaseCfg = ManagerTermBaseCfg(...)
# define manager instance
my_manager = ManagerBase(cfg=ManagerCfg(), env=env)
"""
def __init__(self, cfg: ManagerTermBaseCfg, env: BaseEnv):
"""Initialize the manager term.
Args:
cfg: The configuration object.
env: The environment instance.
"""
# store the inputs
self.cfg = cfg
self._env = env
"""
Properties.
"""
@property
def num_envs(self) -> int:
"""Number of environments."""
return self._env.num_envs
@property
def device(self) -> str:
"""Device on which to perform computations."""
return self._env.device
"""
Operations.
"""
def reset(self, env_ids: Sequence[int] | None = None) -> None:
"""Resets the manager term.
Args:
env_ids: The environment ids. Defaults to None, in which case
all environments are considered.
"""
pass
def __call__(self, *args) -> Any:
"""Returns the value of the term required by the manager.
In case of a class implementation, this function is called by the manager
to get the value of the term. The arguments passed to this function are
the ones specified in the term configuration (see :attr:`ManagerTermBaseCfg.params`).
.. attention::
To be consistent with memory-less implementation of terms with functions, it is
recommended to ensure that the returned mutable quantities are cloned before
returning them. For instance, if the term returns a tensor, it is recommended
to ensure that the returned tensor is a clone of the original tensor. This prevents
the manager from storing references to the tensors and altering the original tensors.
Args:
*args: Variable length argument list.
Returns:
The value of the term.
"""
raise NotImplementedError
class ManagerBase(ABC):
"""Base class for all managers."""
def __init__(self, cfg: object, env: BaseEnv):
"""Initialize the manager.
Args:
cfg: The configuration object.
env: The environment instance.
"""
# store the inputs
self.cfg = copy.deepcopy(cfg)
self._env = env
# parse config to create terms information
self._prepare_terms()
"""
Properties.
"""
@property
def num_envs(self) -> int:
"""Number of environments."""
return self._env.num_envs
@property
def device(self) -> str:
"""Device on which to perform computations."""
return self._env.device
@property
@abstractmethod
def active_terms(self) -> list[str] | dict[str, list[str]]:
"""Name of active terms."""
raise NotImplementedError
"""
Operations.
"""
def reset(self, env_ids: Sequence[int] | None = None) -> dict[str, float]:
"""Resets the manager and returns logging information for the current time-step.
Args:
env_ids: The environment ids for which to log data.
Defaults None, which logs data for all environments.
Returns:
Dictionary containing the logging information.
"""
return {}
def find_terms(self, name_keys: str | Sequence[str]) -> list[str]:
"""Find terms in the manager based on the names.
This function searches the manager for terms based on the names. The names can be
specified as regular expressions or a list of regular expressions. The search is
performed on the active terms in the manager.
Please check the :meth:`omni.isaac.orbit.utils.string_utils.resolve_matching_names` function for more
information on the name matching.
Args:
name_keys: A regular expression or a list of regular expressions to match the term names.
Returns:
A list of term names that match the input keys.
"""
# resolve search keys
if isinstance(self.active_terms, dict):
list_of_strings = []
for names in self.active_terms.values():
list_of_strings.extend(names)
else:
list_of_strings = self.active_terms
# return the matching names
return string_utils.resolve_matching_names(name_keys, list_of_strings)[1]
"""
Implementation specific.
"""
@abstractmethod
def _prepare_terms(self):
"""Prepare terms information from the configuration object."""
raise NotImplementedError
"""
Helper functions.
"""
def _resolve_common_term_cfg(self, term_name: str, term_cfg: ManagerTermBaseCfg, min_argc: int = 1):
"""Resolve common term configuration.
Usually, called by the :meth:`_prepare_terms` method to resolve common term configuration.
Note:
By default, all term functions are expected to have at least one argument, which is the
environment object. Some other managers may expect functions to take more arguments, for
instance, the environment indices as the second argument. In such cases, the
``min_argc`` argument can be used to specify the minimum number of arguments
required by the term function to be called correctly by the manager.
Args:
term_name: The name of the term.
term_cfg: The term configuration.
min_argc: The minimum number of arguments required by the term function to be called correctly
by the manager.
Raises:
TypeError: If the term configuration is not of type :class:`ManagerTermBaseCfg`.
ValueError: If the scene entity defined in the term configuration does not exist.
AttributeError: If the term function is not callable.
ValueError: If the term function's arguments are not matched by the parameters.
"""
# check if the term is a valid term config
if not isinstance(term_cfg, ManagerTermBaseCfg):
raise TypeError(
f"Configuration for the term '{term_name}' is not of type ManagerTermBaseCfg."
f" Received: '{type(term_cfg)}'."
)
# iterate over all the entities and parse the joint and body names
for key, value in term_cfg.params.items():
# deal with string
if isinstance(value, SceneEntityCfg):
# load the entity
try:
value.resolve(self._env.scene)
except ValueError as e:
raise ValueError(f"Error while parsing '{term_name}:{key}'. {e}")
# log the entity for checking later
msg = f"[{term_cfg.__class__.__name__}:{term_name}] Found entity '{value.name}'."
if value.joint_ids is not None:
msg += f"\n\tJoint names: {value.joint_names} [{value.joint_ids}]"
if value.body_ids is not None:
msg += f"\n\tBody names: {value.body_names} [{value.body_ids}]"
# print the information
carb.log_info(msg)
# store the entity
term_cfg.params[key] = value
# get the corresponding function or functional class
if isinstance(term_cfg.func, str):
term_cfg.func = string_to_callable(term_cfg.func)
# initialize the term if it is a class
if inspect.isclass(term_cfg.func):
if not issubclass(term_cfg.func, ManagerTermBase):
raise TypeError(
f"Configuration for the term '{term_name}' is not of type ManagerTermBase."
f" Received: '{type(term_cfg.func)}'."
)
term_cfg.func = term_cfg.func(cfg=term_cfg, env=self._env)
# check if function is callable
if not callable(term_cfg.func):
raise AttributeError(f"The term '{term_name}' is not callable. Received: {term_cfg.func}")
# check if term's arguments are matched by params
term_params = list(term_cfg.params.keys())
args = inspect.signature(term_cfg.func).parameters
args_with_defaults = [arg for arg in args if args[arg].default is not inspect.Parameter.empty]
args_without_defaults = [arg for arg in args if args[arg].default is inspect.Parameter.empty]
args = args_without_defaults + args_with_defaults
# ignore first two arguments for env and env_ids
# Think: Check for cases when kwargs are set inside the function?
if len(args) > min_argc:
if set(args[min_argc:]) != set(term_params + args_with_defaults):
raise ValueError(
f"The term '{term_name}' expects mandatory parameters: {args_without_defaults[min_argc:]}"
f" and optional parameters: {args_with_defaults}, but received: {term_params}."
)
| 10,629 | Python | 36.167832 | 110 | 0.621884 |
fnuabhimanyu8713/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/managers/__init__.py | # Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""Sub-module for environment managers.
The managers are used to handle various aspects of the environment such as randomization events, curriculum,
and observations. Each manager implements a specific functionality for the environment. The managers are
designed to be modular and can be easily extended to support new functionality.
"""
from .action_manager import ActionManager, ActionTerm
from .command_manager import CommandManager, CommandTerm
from .curriculum_manager import CurriculumManager
from .event_manager import EventManager, RandomizationManager
from .manager_base import ManagerBase, ManagerTermBase
from .manager_term_cfg import (
ActionTermCfg,
CommandTermCfg,
CurriculumTermCfg,
EventTermCfg,
ManagerTermBaseCfg,
ObservationGroupCfg,
ObservationTermCfg,
RandomizationTermCfg,
RewardTermCfg,
TerminationTermCfg,
)
from .observation_manager import ObservationManager
from .reward_manager import RewardManager
from .scene_entity_cfg import SceneEntityCfg
from .termination_manager import TerminationManager
| 1,188 | Python | 33.970587 | 108 | 0.808081 |
fnuabhimanyu8713/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/managers/event_manager.py | # Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""Event manager for orchestrating operations based on different simulation events."""
from __future__ import annotations
import torch
import warnings
from collections.abc import Sequence
from prettytable import PrettyTable
from typing import TYPE_CHECKING
import carb
from .manager_base import ManagerBase, ManagerTermBase
from .manager_term_cfg import EventTermCfg
if TYPE_CHECKING:
from omni.isaac.orbit.envs import RLTaskEnv
class EventManager(ManagerBase):
"""Manager for orchestrating operations based on different simulation events.
The event manager applies operations to the environment based on different simulation events. For example,
changing the masses of objects or their friction coefficients during initialization/ reset, or applying random
pushes to the robot at a fixed interval of steps. The user can specify several modes of events to fine-tune the
behavior based on when to apply the event.
The event terms are parsed from a config class containing the manager's settings and each term's
parameters. Each event term should instantiate the :class:`EventTermCfg` class.
Event terms can be grouped by their mode. The mode is a user-defined string that specifies when
the event term should be applied. This provides the user complete control over when event
terms should be applied.
For a typical training process, you may want to apply events in the following modes:
- "startup": Event is applied once at the beginning of the training.
- "reset": Event is applied at every reset.
- "interval": Event is applied at pre-specified intervals of time.
However, you can also define your own modes and use them in the training process as you see fit.
For this you will need to add the triggering of that mode in the environment implementation as well.
.. note::
The triggering of operations corresponding to the mode ``"interval"`` are the only mode that are
directly handled by the manager itself. The other modes are handled by the environment implementation.
"""
_env: RLTaskEnv
"""The environment instance."""
def __init__(self, cfg: object, env: RLTaskEnv):
"""Initialize the event manager.
Args:
cfg: A configuration object or dictionary (``dict[str, EventTermCfg]``).
env: An environment object.
"""
super().__init__(cfg, env)
def __str__(self) -> str:
"""Returns: A string representation for event manager."""
msg = f"<EventManager> contains {len(self._mode_term_names)} active terms.\n"
# add info on each mode
for mode in self._mode_term_names:
# create table for term information
table = PrettyTable()
table.title = f"Active Event Terms in Mode: '{mode}'"
# add table headers based on mode
if mode == "interval":
table.field_names = ["Index", "Name", "Interval time range (s)"]
table.align["Name"] = "l"
for index, (name, cfg) in enumerate(zip(self._mode_term_names[mode], self._mode_term_cfgs[mode])):
table.add_row([index, name, cfg.interval_range_s])
else:
table.field_names = ["Index", "Name"]
table.align["Name"] = "l"
for index, name in enumerate(self._mode_term_names[mode]):
table.add_row([index, name])
# convert table to string
msg += table.get_string()
msg += "\n"
return msg
"""
Properties.
"""
@property
def active_terms(self) -> dict[str, list[str]]:
"""Name of active event terms.
The keys are the modes of event and the values are the names of the event terms.
"""
return self._mode_term_names
@property
def available_modes(self) -> list[str]:
"""Modes of events."""
return list(self._mode_term_names.keys())
"""
Operations.
"""
def reset(self, env_ids: Sequence[int] | None = None) -> dict[str, float]:
# call all terms that are classes
for mode_cfg in self._mode_class_term_cfgs.values():
for term_cfg in mode_cfg:
term_cfg.func.reset(env_ids=env_ids)
# nothing to log here
return {}
def apply(self, mode: str, env_ids: Sequence[int] | None = None, dt: float | None = None):
"""Calls each event term in the specified mode.
Note:
For interval mode, the time step of the environment is used to determine if the event
should be applied.
Args:
mode: The mode of event.
env_ids: The indices of the environments to apply the event to.
Defaults to None, in which case the event is applied to all environments.
dt: The time step of the environment. This is only used for the "interval" mode.
Defaults to None to simplify the call for other modes.
Raises:
ValueError: If the mode is ``"interval"`` and the time step is not provided.
"""
# check if mode is valid
if mode not in self._mode_term_names:
carb.log_warn(f"Event mode '{mode}' is not defined. Skipping event.")
return
# iterate over all the event terms
for index, term_cfg in enumerate(self._mode_term_cfgs[mode]):
# resample interval if needed
if mode == "interval":
if dt is None:
raise ValueError(
f"Event mode '{mode}' requires the time step of the environment"
" to be passed to the event manager."
)
# extract time left for this term
time_left = self._interval_mode_time_left[index]
# update the time left for each environment
time_left -= dt
# check if the interval has passed
env_ids = (time_left <= 0.0).nonzero().flatten()
if len(env_ids) > 0:
lower, upper = term_cfg.interval_range_s
time_left[env_ids] = torch.rand(len(env_ids), device=self.device) * (upper - lower) + lower
# call the event term
term_cfg.func(self._env, env_ids, **term_cfg.params)
"""
Operations - Term settings.
"""
def set_term_cfg(self, term_name: str, cfg: EventTermCfg):
"""Sets the configuration of the specified term into the manager.
The method finds the term by name by searching through all the modes.
It then updates the configuration of the term with the first matching name.
Args:
term_name: The name of the event term.
cfg: The configuration for the event term.
Raises:
ValueError: If the term name is not found.
"""
term_found = False
for mode, terms in self._mode_term_names.items():
if term_name in terms:
self._mode_term_cfgs[mode][terms.index(term_name)] = cfg
term_found = True
break
if not term_found:
raise ValueError(f"Event term '{term_name}' not found.")
def get_term_cfg(self, term_name: str) -> EventTermCfg:
"""Gets the configuration for the specified term.
The method finds the term by name by searching through all the modes.
It then returns the configuration of the term with the first matching name.
Args:
term_name: The name of the event term.
Returns:
The configuration of the event term.
Raises:
ValueError: If the term name is not found.
"""
for mode, terms in self._mode_term_names.items():
if term_name in terms:
return self._mode_term_cfgs[mode][terms.index(term_name)]
raise ValueError(f"Event term '{term_name}' not found.")
"""
Helper functions.
"""
def _prepare_terms(self):
"""Prepares a list of event functions."""
# parse remaining event terms and decimate their information
self._mode_term_names: dict[str, list[str]] = dict()
self._mode_term_cfgs: dict[str, list[EventTermCfg]] = dict()
self._mode_class_term_cfgs: dict[str, list[EventTermCfg]] = dict()
# buffer to store the time left for each environment for "interval" mode
self._interval_mode_time_left: list[torch.Tensor] = list()
# check if config is dict already
if isinstance(self.cfg, dict):
cfg_items = self.cfg.items()
else:
cfg_items = self.cfg.__dict__.items()
# iterate over all the terms
for term_name, term_cfg in cfg_items:
# check for non config
if term_cfg is None:
continue
# check for valid config type
if not isinstance(term_cfg, EventTermCfg):
raise TypeError(
f"Configuration for the term '{term_name}' is not of type EventTermCfg."
f" Received: '{type(term_cfg)}'."
)
# resolve common parameters
self._resolve_common_term_cfg(term_name, term_cfg, min_argc=2)
# check if mode is a new mode
if term_cfg.mode not in self._mode_term_names:
# add new mode
self._mode_term_names[term_cfg.mode] = list()
self._mode_term_cfgs[term_cfg.mode] = list()
self._mode_class_term_cfgs[term_cfg.mode] = list()
# add term name and parameters
self._mode_term_names[term_cfg.mode].append(term_name)
self._mode_term_cfgs[term_cfg.mode].append(term_cfg)
# check if the term is a class
if isinstance(term_cfg.func, ManagerTermBase):
self._mode_class_term_cfgs[term_cfg.mode].append(term_cfg)
# resolve the mode of the events
if term_cfg.mode == "interval":
if term_cfg.interval_range_s is None:
raise ValueError(
f"Event term '{term_name}' has mode 'interval' but 'interval_range_s' is not specified."
)
# sample the time left for each environment
lower, upper = term_cfg.interval_range_s
time_left = torch.rand(self.num_envs, device=self.device) * (upper - lower) + lower
self._interval_mode_time_left.append(time_left)
class RandomizationManager(EventManager):
"""Manager for applying event specific operations to different elements in the scene.
.. deprecated:: v0.4.0
As the RandomizationManager also handles events such as resetting the environment, the class has been
renamed to EventManager as it is more general purpose. The RandomizationManager will be removed in v0.4.0.
"""
def __init__(self, cfg: object, env: RLTaskEnv):
"""Initialize the randomization manager.
Args:
cfg: A configuration object or dictionary (``dict[str, EventTermCfg]``).
env: An environment object.
"""
dep_msg = "The class 'RandomizationManager' will be removed in v0.4.0. Please use 'EventManager' instead."
warnings.warn(dep_msg, DeprecationWarning)
carb.log_error(dep_msg)
super().__init__(cfg, env)
def randomize(self, mode: str, env_ids: Sequence[int] | None = None, dt: float | None = None):
"""Randomize the environment.
.. deprecated:: v0.4.0
This method will be removed in v0.4.0. Please use the method :meth:`EventManager.apply`
instead.
"""
dep_msg = (
"The class 'RandomizationManager' including its method 'randomize' will be removed in v0.4.0. Please use "
"the class 'EventManager' with the method 'apply' instead."
)
warnings.warn(dep_msg, DeprecationWarning)
carb.log_error(dep_msg)
self.apply(mode, env_ids, dt)
| 12,265 | Python | 39.481848 | 118 | 0.603261 |
fnuabhimanyu8713/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/managers/command_manager.py | # Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""Command manager for generating and updating commands."""
from __future__ import annotations
import inspect
import torch
import weakref
from abc import abstractmethod
from collections.abc import Sequence
from prettytable import PrettyTable
from typing import TYPE_CHECKING
import omni.kit.app
from .manager_base import ManagerBase, ManagerTermBase
from .manager_term_cfg import CommandTermCfg
if TYPE_CHECKING:
from omni.isaac.orbit.envs import RLTaskEnv
class CommandTerm(ManagerTermBase):
"""The base class for implementing a command term.
A command term is used to generate commands for goal-conditioned tasks. For example,
in the case of a goal-conditioned navigation task, the command term can be used to
generate a target position for the robot to navigate to.
It implements a resampling mechanism that allows the command to be resampled at a fixed
frequency. The resampling frequency can be specified in the configuration object.
Additionally, it is possible to assign a visualization function to the command term
that can be used to visualize the command in the simulator.
"""
def __init__(self, cfg: CommandTermCfg, env: RLTaskEnv):
"""Initialize the command generator class.
Args:
cfg: The configuration parameters for the command generator.
env: The environment object.
"""
super().__init__(cfg, env)
# create buffers to store the command
# -- metrics that can be used for logging
self.metrics = dict()
# -- time left before resampling
self.time_left = torch.zeros(self.num_envs, device=self.device)
# -- counter for the number of times the command has been resampled within the current episode
self.command_counter = torch.zeros(self.num_envs, device=self.device, dtype=torch.long)
# add handle for debug visualization (this is set to a valid handle inside set_debug_vis)
self._debug_vis_handle = None
# set initial state of debug visualization
self.set_debug_vis(self.cfg.debug_vis)
def __del__(self):
"""Unsubscribe from the callbacks."""
if self._debug_vis_handle:
self._debug_vis_handle.unsubscribe()
self._debug_vis_handle = None
"""
Properties
"""
@property
@abstractmethod
def command(self) -> torch.Tensor:
"""The command tensor. Shape is (num_envs, command_dim)."""
raise NotImplementedError
@property
def has_debug_vis_implementation(self) -> bool:
"""Whether the command generator has a debug visualization implemented."""
# check if function raises NotImplementedError
source_code = inspect.getsource(self._set_debug_vis_impl)
return "NotImplementedError" not in source_code
"""
Operations.
"""
def set_debug_vis(self, debug_vis: bool) -> bool:
"""Sets whether to visualize the command data.
Args:
debug_vis: Whether to visualize the command data.
Returns:
Whether the debug visualization was successfully set. False if the command
generator does not support debug visualization.
"""
# check if debug visualization is supported
if not self.has_debug_vis_implementation:
return False
# toggle debug visualization objects
self._set_debug_vis_impl(debug_vis)
# toggle debug visualization handles
if debug_vis:
# create a subscriber for the post update event if it doesn't exist
if self._debug_vis_handle is None:
app_interface = omni.kit.app.get_app_interface()
self._debug_vis_handle = app_interface.get_post_update_event_stream().create_subscription_to_pop(
lambda event, obj=weakref.proxy(self): obj._debug_vis_callback(event)
)
else:
# remove the subscriber if it exists
if self._debug_vis_handle is not None:
self._debug_vis_handle.unsubscribe()
self._debug_vis_handle = None
# return success
return True
def reset(self, env_ids: Sequence[int] | None = None) -> dict[str, float]:
"""Reset the command generator and log metrics.
This function resets the command counter and resamples the command. It should be called
at the beginning of each episode.
Args:
env_ids: The list of environment IDs to reset. Defaults to None.
Returns:
A dictionary containing the information to log under the "{name}" key.
"""
# resolve the environment IDs
if env_ids is None:
env_ids = slice(None)
# set the command counter to zero
self.command_counter[env_ids] = 0
# resample the command
self._resample(env_ids)
# add logging metrics
extras = {}
for metric_name, metric_value in self.metrics.items():
# compute the mean metric value
extras[metric_name] = torch.mean(metric_value[env_ids]).item()
# reset the metric value
metric_value[env_ids] = 0.0
return extras
def compute(self, dt: float):
"""Compute the command.
Args:
dt: The time step passed since the last call to compute.
"""
# update the metrics based on current state
self._update_metrics()
# reduce the time left before resampling
self.time_left -= dt
# resample the command if necessary
resample_env_ids = (self.time_left <= 0.0).nonzero().flatten()
if len(resample_env_ids) > 0:
self._resample(resample_env_ids)
# update the command
self._update_command()
"""
Helper functions.
"""
def _resample(self, env_ids: Sequence[int]):
"""Resample the command.
This function resamples the command and time for which the command is applied for the
specified environment indices.
Args:
env_ids: The list of environment IDs to resample.
"""
# resample the time left before resampling
self.time_left[env_ids] = self.time_left[env_ids].uniform_(*self.cfg.resampling_time_range)
# increment the command counter
self.command_counter[env_ids] += 1
# resample the command
self._resample_command(env_ids)
"""
Implementation specific functions.
"""
@abstractmethod
def _update_metrics(self):
"""Update the metrics based on the current state."""
raise NotImplementedError
@abstractmethod
def _resample_command(self, env_ids: Sequence[int]):
"""Resample the command for the specified environments."""
raise NotImplementedError
@abstractmethod
def _update_command(self):
"""Update the command based on the current state."""
raise NotImplementedError
def _set_debug_vis_impl(self, debug_vis: bool):
"""Set debug visualization into visualization objects.
This function is responsible for creating the visualization objects if they don't exist
and input ``debug_vis`` is True. If the visualization objects exist, the function should
set their visibility into the stage.
"""
raise NotImplementedError(f"Debug visualization is not implemented for {self.__class__.__name__}.")
def _debug_vis_callback(self, event):
"""Callback for debug visualization.
This function calls the visualization objects and sets the data to visualize into them.
"""
raise NotImplementedError(f"Debug visualization is not implemented for {self.__class__.__name__}.")
class CommandManager(ManagerBase):
"""Manager for generating commands.
The command manager is used to generate commands for an agent to execute. It makes it convenient to switch
between different command generation strategies within the same environment. For instance, in an environment
consisting of a quadrupedal robot, the command to it could be a velocity command or position command.
By keeping the command generation logic separate from the environment, it is easy to switch between different
command generation strategies.
The command terms are implemented as classes that inherit from the :class:`CommandTerm` class.
Each command generator term should also have a corresponding configuration class that inherits from the
:class:`CommandTermCfg` class.
"""
_env: RLTaskEnv
"""The environment instance."""
def __init__(self, cfg: object, env: RLTaskEnv):
"""Initialize the command manager.
Args:
cfg: The configuration object or dictionary (``dict[str, CommandTermCfg]``).
env: The environment instance.
"""
super().__init__(cfg, env)
# store the commands
self._commands = dict()
self.cfg.debug_vis = False
for term in self._terms.values():
self.cfg.debug_vis |= term.cfg.debug_vis
def __str__(self) -> str:
"""Returns: A string representation for the command manager."""
msg = f"<CommandManager> contains {len(self._terms.values())} active terms.\n"
# create table for term information
table = PrettyTable()
table.title = "Active Command Terms"
table.field_names = ["Index", "Name", "Type"]
# set alignment of table columns
table.align["Name"] = "l"
# add info on each term
for index, (name, term) in enumerate(self._terms.items()):
table.add_row([index, name, term.__class__.__name__])
# convert table to string
msg += table.get_string()
msg += "\n"
return msg
"""
Properties.
"""
@property
def active_terms(self) -> list[str]:
"""Name of active command terms."""
return list(self._terms.keys())
@property
def has_debug_vis_implementation(self) -> bool:
"""Whether the command terms have debug visualization implemented."""
# check if function raises NotImplementedError
has_debug_vis = False
for term in self._terms.values():
has_debug_vis |= term.has_debug_vis_implementation
return has_debug_vis
"""
Operations.
"""
def set_debug_vis(self, debug_vis: bool) -> bool:
"""Sets whether to visualize the command data.
Args:
debug_vis: Whether to visualize the command data.
Returns:
Whether the debug visualization was successfully set. False if the command
generator does not support debug visualization.
"""
for term in self._terms.values():
term.set_debug_vis(debug_vis)
def reset(self, env_ids: Sequence[int] | None = None) -> dict[str, torch.Tensor]:
"""Reset the command terms and log their metrics.
This function resets the command counter and resamples the command for each term. It should be called
at the beginning of each episode.
Args:
env_ids: The list of environment IDs to reset. Defaults to None.
Returns:
A dictionary containing the information to log under the "Metrics/{term_name}/{metric_name}" key.
"""
# resolve environment ids
if env_ids is None:
env_ids = slice(None)
# store information
extras = {}
for name, term in self._terms.items():
# reset the command term
metrics = term.reset(env_ids=env_ids)
# compute the mean metric value
for metric_name, metric_value in metrics.items():
extras[f"Metrics/{name}/{metric_name}"] = metric_value
# return logged information
return extras
def compute(self, dt: float):
"""Updates the commands.
This function calls each command term managed by the class.
Args:
dt: The time-step interval of the environment.
"""
# iterate over all the command terms
for term in self._terms.values():
# compute term's value
term.compute(dt)
def get_command(self, name: str) -> torch.Tensor:
"""Returns the command for the specified command term.
Args:
name: The name of the command term.
Returns:
The command tensor of the specified command term.
"""
return self._terms[name].command
def get_term(self, name: str) -> CommandTerm:
"""Returns the command term with the specified name.
Args:
name: The name of the command term.
Returns:
The command term with the specified name.
"""
return self._terms[name]
"""
Helper functions.
"""
def _prepare_terms(self):
"""Prepares a list of command terms."""
# parse command terms from the config
self._terms: dict[str, CommandTerm] = dict()
# check if config is dict already
if isinstance(self.cfg, dict):
cfg_items = self.cfg.items()
else:
cfg_items = self.cfg.__dict__.items()
# iterate over all the terms
for term_name, term_cfg in cfg_items:
# check for non config
if term_cfg is None:
continue
# check for valid config type
if not isinstance(term_cfg, CommandTermCfg):
raise TypeError(
f"Configuration for the term '{term_name}' is not of type CommandTermCfg."
f" Received: '{type(term_cfg)}'."
)
# create the action term
term = term_cfg.class_type(term_cfg, self._env)
# add class to dict
self._terms[term_name] = term
| 14,055 | Python | 34.405541 | 113 | 0.623764 |
fnuabhimanyu8713/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/managers/scene_entity_cfg.py | # Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""Configuration terms for different managers."""
from dataclasses import MISSING
from omni.isaac.orbit.assets import Articulation, RigidObject
from omni.isaac.orbit.scene import InteractiveScene
from omni.isaac.orbit.utils import configclass
@configclass
class SceneEntityCfg:
"""Configuration for a scene entity that is used by the manager's term.
This class is used to specify the name of the scene entity that is queried from the
:class:`InteractiveScene` and passed to the manager's term function.
"""
name: str = MISSING
"""The name of the scene entity.
This is the name defined in the scene configuration file. See the :class:`InteractiveSceneCfg`
class for more details.
"""
joint_names: str | list[str] | None = None
"""The names of the joints from the scene entity. Defaults to None.
The names can be either joint names or a regular expression matching the joint names.
These are converted to joint indices on initialization of the manager and passed to the term
function as a list of joint indices under :attr:`joint_ids`.
"""
joint_ids: list[int] | slice = slice(None)
"""The indices of the joints from the asset required by the term. Defaults to slice(None), which means
all the joints in the asset (if present).
If :attr:`joint_names` is specified, this is filled in automatically on initialization of the
manager.
"""
body_names: str | list[str] | None = None
"""The names of the bodies from the asset required by the term. Defaults to None.
The names can be either body names or a regular expression matching the body names.
These are converted to body indices on initialization of the manager and passed to the term
function as a list of body indices under :attr:`body_ids`.
"""
body_ids: list[int] | slice = slice(None)
"""The indices of the bodies from the asset required by the term. Defaults to slice(None), which means
all the bodies in the asset.
If :attr:`body_names` is specified, this is filled in automatically on initialization of the
manager.
"""
preserve_order: bool = False
"""Whether to preserve indices ordering to match with that in the specified joint or body names. Defaults to False.
If False, the ordering of the indices are sorted in ascending order (i.e. the ordering in the entity's joints
or bodies). Otherwise, the indices are preserved in the order of the specified joint and body names.
For more details, see the :meth:`omni.isaac.orbit.utils.string.resolve_matching_names` function.
.. note::
This attribute is only used when :attr:`joint_names` or :attr:`body_names` are specified.
"""
def resolve(self, scene: InteractiveScene):
"""Resolves the scene entity and converts the joint and body names to indices.
This function examines the scene entity from the :class:`InteractiveScene` and resolves the indices
and names of the joints and bodies. It is an expensive operation as it resolves regular expressions
and should be called only once.
Args:
scene: The interactive scene instance.
Raises:
ValueError: If the scene entity is not found.
ValueError: If both ``joint_names`` and ``joint_ids`` are specified and are not consistent.
ValueError: If both ``body_names`` and ``body_ids`` are specified and are not consistent.
"""
# check if the entity is valid
if self.name not in scene.keys():
raise ValueError(f"The scene entity '{self.name}' does not exist. Available entities: {scene.keys()}.")
# convert joint names to indices based on regex
if self.joint_names is not None or self.joint_ids != slice(None):
entity: Articulation = scene[self.name]
# -- if both are not their default values, check if they are valid
if self.joint_names is not None and self.joint_ids != slice(None):
if isinstance(self.joint_names, str):
self.joint_names = [self.joint_names]
if isinstance(self.joint_ids, int):
self.joint_ids = [self.joint_ids]
joint_ids, _ = entity.find_joints(self.joint_names, preserve_order=self.preserve_order)
joint_names = [entity.joint_names[i] for i in self.joint_ids]
if joint_ids != self.joint_ids or joint_names != self.joint_names:
raise ValueError(
"Both 'joint_names' and 'joint_ids' are specified, and are not consistent."
f"\n\tfrom joint names: {self.joint_names} [{joint_ids}]"
f"\n\tfrom joint ids: {joint_names} [{self.joint_ids}]"
"\nHint: Use either 'joint_names' or 'joint_ids' to avoid confusion."
)
# -- from joint names to joint indices
elif self.joint_names is not None:
if isinstance(self.joint_names, str):
self.joint_names = [self.joint_names]
self.joint_ids, _ = entity.find_joints(self.joint_names, preserve_order=self.preserve_order)
# performance optimization (slice offers faster indexing than list of indices)
# only all joint in the entity order are selected
if len(self.joint_ids) == entity.num_joints and self.joint_names == entity.joint_names:
self.joint_ids = slice(None)
# -- from joint indices to joint names
elif self.joint_ids != slice(None):
if isinstance(self.joint_ids, int):
self.joint_ids = [self.joint_ids]
self.joint_names = [entity.joint_names[i] for i in self.joint_ids]
# convert body names to indices based on regex
if self.body_names is not None or self.body_ids != slice(None):
entity: RigidObject = scene[self.name]
# -- if both are not their default values, check if they are valid
if self.body_names is not None and self.body_ids != slice(None):
if isinstance(self.body_names, str):
self.body_names = [self.body_names]
if isinstance(self.body_ids, int):
self.body_ids = [self.body_ids]
body_ids, _ = entity.find_bodies(self.body_names, preserve_order=self.preserve_order)
body_names = [entity.body_names[i] for i in self.body_ids]
if body_ids != self.body_ids or body_names != self.body_names:
raise ValueError(
"Both 'body_names' and 'body_ids' are specified, and are not consistent."
f"\n\tfrom body names: {self.body_names} [{body_ids}]"
f"\n\tfrom body ids: {body_names} [{self.body_ids}]"
"\nHint: Use either 'body_names' or 'body_ids' to avoid confusion."
)
# -- from body names to body indices
elif self.body_names is not None:
if isinstance(self.body_names, str):
self.body_names = [self.body_names]
self.body_ids, _ = entity.find_bodies(self.body_names, preserve_order=self.preserve_order)
# performance optimization (slice offers faster indexing than list of indices)
# only all bodies in the entity order are selected
if len(self.body_ids) == entity.num_bodies and self.body_names == entity.body_names:
self.body_ids = slice(None)
# -- from body indices to body names
elif self.body_ids != slice(None):
if isinstance(self.body_ids, int):
self.body_ids = [self.body_ids]
self.body_names = [entity.body_names[i] for i in self.body_ids]
| 8,066 | Python | 49.10559 | 119 | 0.623729 |
fnuabhimanyu8713/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/managers/curriculum_manager.py | # Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""Curriculum manager for updating environment quantities subject to a training curriculum."""
from __future__ import annotations
import torch
from collections.abc import Sequence
from prettytable import PrettyTable
from typing import TYPE_CHECKING
from .manager_base import ManagerBase, ManagerTermBase
from .manager_term_cfg import CurriculumTermCfg
if TYPE_CHECKING:
from omni.isaac.orbit.envs import RLTaskEnv
class CurriculumManager(ManagerBase):
"""Manager to implement and execute specific curricula.
The curriculum manager updates various quantities of the environment subject to a training curriculum by
calling a list of terms. These help stabilize learning by progressively making the learning tasks harder
as the agent improves.
The curriculum terms are parsed from a config class containing the manager's settings and each term's
parameters. Each curriculum term should instantiate the :class:`CurriculumTermCfg` class.
"""
_env: RLTaskEnv
"""The environment instance."""
def __init__(self, cfg: object, env: RLTaskEnv):
"""Initialize the manager.
Args:
cfg: The configuration object or dictionary (``dict[str, CurriculumTermCfg]``)
env: An environment object.
Raises:
TypeError: If curriculum term is not of type :class:`CurriculumTermCfg`.
ValueError: If curriculum term configuration does not satisfy its function signature.
"""
super().__init__(cfg, env)
# prepare logging
self._curriculum_state = dict()
for term_name in self._term_names:
self._curriculum_state[term_name] = None
def __str__(self) -> str:
"""Returns: A string representation for curriculum manager."""
msg = f"<CurriculumManager> contains {len(self._term_names)} active terms.\n"
# create table for term information
table = PrettyTable()
table.title = "Active Curriculum Terms"
table.field_names = ["Index", "Name"]
# set alignment of table columns
table.align["Name"] = "l"
# add info on each term
for index, name in enumerate(self._term_names):
table.add_row([index, name])
# convert table to string
msg += table.get_string()
msg += "\n"
return msg
"""
Properties.
"""
@property
def active_terms(self) -> list[str]:
"""Name of active curriculum terms."""
return self._term_names
"""
Operations.
"""
def reset(self, env_ids: Sequence[int] | None = None) -> dict[str, float]:
"""Returns the current state of individual curriculum terms.
Note:
This function does not use the environment indices :attr:`env_ids`
and logs the state of all the terms. The argument is only present
to maintain consistency with other classes.
Returns:
Dictionary of curriculum terms and their states.
"""
extras = {}
for term_name, term_state in self._curriculum_state.items():
if term_state is not None:
# deal with dict
if isinstance(term_state, dict):
# each key is a separate state to log
for key, value in term_state.items():
if isinstance(value, torch.Tensor):
value = value.item()
extras[f"Curriculum/{term_name}/{key}"] = value
else:
# log directly if not a dict
if isinstance(term_state, torch.Tensor):
term_state = term_state.item()
extras[f"Curriculum/{term_name}"] = term_state
# reset all the curriculum terms
for term_cfg in self._class_term_cfgs:
term_cfg.func.reset(env_ids=env_ids)
# return logged information
return extras
def compute(self, env_ids: Sequence[int] | None = None):
"""Update the curriculum terms.
This function calls each curriculum term managed by the class.
Args:
env_ids: The list of environment IDs to update.
If None, all the environments are updated. Defaults to None.
"""
# resolve environment indices
if env_ids is None:
env_ids = slice(None)
# iterate over all the curriculum terms
for name, term_cfg in zip(self._term_names, self._term_cfgs):
state = term_cfg.func(self._env, env_ids, **term_cfg.params)
self._curriculum_state[name] = state
"""
Helper functions.
"""
def _prepare_terms(self):
# parse remaining curriculum terms and decimate their information
self._term_names: list[str] = list()
self._term_cfgs: list[CurriculumTermCfg] = list()
self._class_term_cfgs: list[CurriculumTermCfg] = list()
# check if config is dict already
if isinstance(self.cfg, dict):
cfg_items = self.cfg.items()
else:
cfg_items = self.cfg.__dict__.items()
# iterate over all the terms
for term_name, term_cfg in cfg_items:
# check for non config
if term_cfg is None:
continue
# check if the term is a valid term config
if not isinstance(term_cfg, CurriculumTermCfg):
raise TypeError(
f"Configuration for the term '{term_name}' is not of type CurriculumTermCfg."
f" Received: '{type(term_cfg)}'."
)
# resolve common parameters
self._resolve_common_term_cfg(term_name, term_cfg, min_argc=2)
# add name and config to list
self._term_names.append(term_name)
self._term_cfgs.append(term_cfg)
# check if the term is a class
if isinstance(term_cfg.func, ManagerTermBase):
self._class_term_cfgs.append(term_cfg)
| 6,171 | Python | 35.738095 | 108 | 0.602009 |
fnuabhimanyu8713/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/managers/manager_term_cfg.py | # Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""Configuration terms for different managers."""
from __future__ import annotations
import torch
import warnings
from collections.abc import Callable
from dataclasses import MISSING
from typing import TYPE_CHECKING, Any
from omni.isaac.orbit.utils import configclass
from omni.isaac.orbit.utils.noise import NoiseCfg
from .scene_entity_cfg import SceneEntityCfg
if TYPE_CHECKING:
from .action_manager import ActionTerm
from .command_manager import CommandTerm
from .manager_base import ManagerTermBase
@configclass
class ManagerTermBaseCfg:
"""Configuration for a manager term."""
func: Callable | ManagerTermBase = MISSING
"""The function or class to be called for the term.
The function must take the environment object as the first argument.
The remaining arguments are specified in the :attr:`params` attribute.
It also supports `callable classes`_, i.e. classes that implement the :meth:`__call__`
method. In this case, the class should inherit from the :class:`ManagerTermBase` class
and implement the required methods.
.. _`callable classes`: https://docs.python.org/3/reference/datamodel.html#object.__call__
"""
params: dict[str, Any | SceneEntityCfg] = dict()
"""The parameters to be passed to the function as keyword arguments. Defaults to an empty dict.
.. note::
If the value is a :class:`SceneEntityCfg` object, the manager will query the scene entity
from the :class:`InteractiveScene` and process the entity's joints and bodies as specified
in the :class:`SceneEntityCfg` object.
"""
##
# Action manager.
##
@configclass
class ActionTermCfg:
"""Configuration for an action term."""
class_type: type[ActionTerm] = MISSING
"""The associated action term class.
The class should inherit from :class:`omni.isaac.orbit.managers.action_manager.ActionTerm`.
"""
asset_name: str = MISSING
"""The name of the scene entity.
This is the name defined in the scene configuration file. See the :class:`InteractiveSceneCfg`
class for more details.
"""
##
# Command manager.
##
@configclass
class CommandTermCfg:
"""Configuration for a command generator term."""
class_type: type[CommandTerm] = MISSING
"""The associated command term class to use.
The class should inherit from :class:`omni.isaac.orbit.managers.command_manager.CommandTerm`.
"""
resampling_time_range: tuple[float, float] = MISSING
"""Time before commands are changed [s]."""
debug_vis: bool = False
"""Whether to visualize debug information. Defaults to False."""
##
# Curriculum manager.
##
@configclass
class CurriculumTermCfg(ManagerTermBaseCfg):
"""Configuration for a curriculum term."""
func: Callable[..., float | dict[str, float] | None] = MISSING
"""The name of the function to be called.
This function should take the environment object, environment indices
and any other parameters as input and return the curriculum state for
logging purposes. If the function returns None, the curriculum state
is not logged.
"""
##
# Observation manager.
##
@configclass
class ObservationTermCfg(ManagerTermBaseCfg):
"""Configuration for an observation term."""
func: Callable[..., torch.Tensor] = MISSING
"""The name of the function to be called.
This function should take the environment object and any other parameters
as input and return the observation signal as torch float tensors of
shape (num_envs, obs_term_dim).
"""
noise: NoiseCfg | None = None
"""The noise to add to the observation. Defaults to None, in which case no noise is added."""
clip: tuple[float, float] | None = None
"""The clipping range for the observation after adding noise. Defaults to None,
in which case no clipping is applied."""
scale: float | None = None
"""The scale to apply to the observation after clipping. Defaults to None,
in which case no scaling is applied (same as setting scale to :obj:`1`)."""
@configclass
class ObservationGroupCfg:
"""Configuration for an observation group."""
concatenate_terms: bool = True
"""Whether to concatenate the observation terms in the group. Defaults to True.
If true, the observation terms in the group are concatenated along the last dimension.
Otherwise, they are kept separate and returned as a dictionary.
"""
enable_corruption: bool = False
"""Whether to enable corruption for the observation group. Defaults to False.
If true, the observation terms in the group are corrupted by adding noise (if specified).
Otherwise, no corruption is applied.
"""
##
# Event manager
##
@configclass
class EventTermCfg(ManagerTermBaseCfg):
"""Configuration for a event term."""
func: Callable[..., None] = MISSING
"""The name of the function to be called.
This function should take the environment object, environment indices
and any other parameters as input.
"""
mode: str = MISSING
"""The mode in which the event term is applied.
Note:
The mode name ``"interval"`` is a special mode that is handled by the
manager Hence, its name is reserved and cannot be used for other modes.
"""
interval_range_s: tuple[float, float] | None = None
"""The range of time in seconds at which the term is applied.
Based on this, the interval is sampled uniformly between the specified
range for each environment instance. The term is applied on the environment
instances where the current time hits the interval time.
Note:
This is only used if the mode is ``"interval"``.
"""
@configclass
class RandomizationTermCfg(EventTermCfg):
"""Configuration for a randomization term.
.. deprecated:: v0.3.0
This class is deprecated and will be removed in v0.4.0. Please use :class:`EventTermCfg` instead.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# Deprecation warning.
warnings.warn(
"The RandomizationTermCfg has been renamed to EventTermCfg and will be removed in v0.4.0. Please use"
" EventTermCfg instead.",
DeprecationWarning,
)
##
# Reward manager.
##
@configclass
class RewardTermCfg(ManagerTermBaseCfg):
"""Configuration for a reward term."""
func: Callable[..., torch.Tensor] = MISSING
"""The name of the function to be called.
This function should take the environment object and any other parameters
as input and return the reward signals as torch float tensors of
shape (num_envs,).
"""
weight: float = MISSING
"""The weight of the reward term.
This is multiplied with the reward term's value to compute the final
reward.
Note:
If the weight is zero, the reward term is ignored.
"""
##
# Termination manager.
##
@configclass
class TerminationTermCfg(ManagerTermBaseCfg):
"""Configuration for a termination term."""
func: Callable[..., torch.Tensor] = MISSING
"""The name of the function to be called.
This function should take the environment object and any other parameters
as input and return the termination signals as torch boolean tensors of
shape (num_envs,).
"""
time_out: bool = False
"""Whether the termination term contributes towards episodic timeouts. Defaults to False.
Note:
These usually correspond to tasks that have a fixed time limit.
"""
| 7,667 | Python | 27.295203 | 113 | 0.696361 |
fnuabhimanyu8713/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/managers/reward_manager.py | # Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""Reward manager for computing reward signals for a given world."""
from __future__ import annotations
import torch
from collections.abc import Sequence
from prettytable import PrettyTable
from typing import TYPE_CHECKING
from .manager_base import ManagerBase, ManagerTermBase
from .manager_term_cfg import RewardTermCfg
if TYPE_CHECKING:
from omni.isaac.orbit.envs import RLTaskEnv
class RewardManager(ManagerBase):
"""Manager for computing reward signals for a given world.
The reward manager computes the total reward as a sum of the weighted reward terms. The reward
terms are parsed from a nested config class containing the reward manger's settings and reward
terms configuration.
The reward terms are parsed from a config class containing the manager's settings and each term's
parameters. Each reward term should instantiate the :class:`RewardTermCfg` class.
.. note::
The reward manager multiplies the reward term's ``weight`` with the time-step interval ``dt``
of the environment. This is done to ensure that the computed reward terms are balanced with
respect to the chosen time-step interval in the environment.
"""
_env: RLTaskEnv
"""The environment instance."""
def __init__(self, cfg: object, env: RLTaskEnv):
"""Initialize the reward manager.
Args:
cfg: The configuration object or dictionary (``dict[str, RewardTermCfg]``).
env: The environment instance.
"""
super().__init__(cfg, env)
# prepare extra info to store individual reward term information
self._episode_sums = dict()
for term_name in self._term_names:
self._episode_sums[term_name] = torch.zeros(self.num_envs, dtype=torch.float, device=self.device)
# create buffer for managing reward per environment
self._reward_buf = torch.zeros(self.num_envs, dtype=torch.float, device=self.device)
def __str__(self) -> str:
"""Returns: A string representation for reward manager."""
msg = f"<RewardManager> contains {len(self._term_names)} active terms.\n"
# create table for term information
table = PrettyTable()
table.title = "Active Reward Terms"
table.field_names = ["Index", "Name", "Weight"]
# set alignment of table columns
table.align["Name"] = "l"
table.align["Weight"] = "r"
# add info on each term
for index, (name, term_cfg) in enumerate(zip(self._term_names, self._term_cfgs)):
table.add_row([index, name, term_cfg.weight])
# convert table to string
msg += table.get_string()
msg += "\n"
return msg
"""
Properties.
"""
@property
def active_terms(self) -> list[str]:
"""Name of active reward terms."""
return self._term_names
"""
Operations.
"""
def reset(self, env_ids: Sequence[int] | None = None) -> dict[str, torch.Tensor]:
"""Returns the episodic sum of individual reward terms.
Args:
env_ids: The environment ids for which the episodic sum of
individual reward terms is to be returned. Defaults to all the environment ids.
Returns:
Dictionary of episodic sum of individual reward terms.
"""
# resolve environment ids
if env_ids is None:
env_ids = slice(None)
# store information
extras = {}
for key in self._episode_sums.keys():
# store information
# r_1 + r_2 + ... + r_n
episodic_sum_avg = torch.mean(self._episode_sums[key][env_ids])
extras["Episode Reward/" + key] = episodic_sum_avg / self._env.max_episode_length_s
# reset episodic sum
self._episode_sums[key][env_ids] = 0.0
# reset all the reward terms
for term_cfg in self._class_term_cfgs:
term_cfg.func.reset(env_ids=env_ids)
# return logged information
return extras
def compute(self, dt: float) -> torch.Tensor:
"""Computes the reward signal as a weighted sum of individual terms.
This function calls each reward term managed by the class and adds them to compute the net
reward signal. It also updates the episodic sums corresponding to individual reward terms.
Args:
dt: The time-step interval of the environment.
Returns:
The net reward signal of shape (num_envs,).
"""
# reset computation
self._reward_buf[:] = 0.0
# iterate over all the reward terms
for name, term_cfg in zip(self._term_names, self._term_cfgs):
# skip if weight is zero (kind of a micro-optimization)
if term_cfg.weight == 0.0:
continue
# compute term's value
value = term_cfg.func(self._env, **term_cfg.params) * term_cfg.weight * dt
# update total reward
self._reward_buf += value
# update episodic sum
self._episode_sums[name] += value
return self._reward_buf
"""
Operations - Term settings.
"""
def set_term_cfg(self, term_name: str, cfg: RewardTermCfg):
"""Sets the configuration of the specified term into the manager.
Args:
term_name: The name of the reward term.
cfg: The configuration for the reward term.
Raises:
ValueError: If the term name is not found.
"""
if term_name not in self._term_names:
raise ValueError(f"Reward term '{term_name}' not found.")
# set the configuration
self._term_cfgs[self._term_names.index(term_name)] = cfg
def get_term_cfg(self, term_name: str) -> RewardTermCfg:
"""Gets the configuration for the specified term.
Args:
term_name: The name of the reward term.
Returns:
The configuration of the reward term.
Raises:
ValueError: If the term name is not found.
"""
if term_name not in self._term_names:
raise ValueError(f"Reward term '{term_name}' not found.")
# return the configuration
return self._term_cfgs[self._term_names.index(term_name)]
"""
Helper functions.
"""
def _prepare_terms(self):
"""Prepares a list of reward functions."""
# parse remaining reward terms and decimate their information
self._term_names: list[str] = list()
self._term_cfgs: list[RewardTermCfg] = list()
self._class_term_cfgs: list[RewardTermCfg] = list()
# check if config is dict already
if isinstance(self.cfg, dict):
cfg_items = self.cfg.items()
else:
cfg_items = self.cfg.__dict__.items()
# iterate over all the terms
for term_name, term_cfg in cfg_items:
# check for non config
if term_cfg is None:
continue
# check for valid config type
if not isinstance(term_cfg, RewardTermCfg):
raise TypeError(
f"Configuration for the term '{term_name}' is not of type RewardTermCfg."
f" Received: '{type(term_cfg)}'."
)
# check for valid weight type
if not isinstance(term_cfg.weight, (float, int)):
raise TypeError(
f"Weight for the term '{term_name}' is not of type float or int."
f" Received: '{type(term_cfg.weight)}'."
)
# resolve common parameters
self._resolve_common_term_cfg(term_name, term_cfg, min_argc=1)
# add function to list
self._term_names.append(term_name)
self._term_cfgs.append(term_cfg)
# check if the term is a class
if isinstance(term_cfg.func, ManagerTermBase):
self._class_term_cfgs.append(term_cfg)
| 8,148 | Python | 35.379464 | 109 | 0.602111 |
fnuabhimanyu8713/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/managers/action_manager.py | # Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""Action manager for processing actions sent to the environment."""
from __future__ import annotations
import torch
from abc import abstractmethod
from collections.abc import Sequence
from prettytable import PrettyTable
from typing import TYPE_CHECKING
from omni.isaac.orbit.assets import AssetBase
from .manager_base import ManagerBase, ManagerTermBase
from .manager_term_cfg import ActionTermCfg
if TYPE_CHECKING:
from omni.isaac.orbit.envs import BaseEnv
class ActionTerm(ManagerTermBase):
"""Base class for action terms.
The action term is responsible for processing the raw actions sent to the environment
and applying them to the asset managed by the term. The action term is comprised of two
operations:
* Processing of actions: This operation is performed once per **environment step** and
is responsible for pre-processing the raw actions sent to the environment.
* Applying actions: This operation is performed once per **simulation step** and is
responsible for applying the processed actions to the asset managed by the term.
"""
def __init__(self, cfg: ActionTermCfg, env: BaseEnv):
"""Initialize the action term.
Args:
cfg: The configuration object.
env: The environment instance.
"""
# call the base class constructor
super().__init__(cfg, env)
# parse config to obtain asset to which the term is applied
self._asset: AssetBase = self._env.scene[self.cfg.asset_name]
"""
Properties.
"""
@property
@abstractmethod
def action_dim(self) -> int:
"""Dimension of the action term."""
raise NotImplementedError
@property
@abstractmethod
def raw_actions(self) -> torch.Tensor:
"""The input/raw actions sent to the term."""
raise NotImplementedError
@property
@abstractmethod
def processed_actions(self) -> torch.Tensor:
"""The actions computed by the term after applying any processing."""
raise NotImplementedError
"""
Operations.
"""
@abstractmethod
def process_actions(self, actions: torch.Tensor):
"""Processes the actions sent to the environment.
Note:
This function is called once per environment step by the manager.
Args:
actions: The actions to process.
"""
raise NotImplementedError
@abstractmethod
def apply_actions(self):
"""Applies the actions to the asset managed by the term.
Note:
This is called at every simulation step by the manager.
"""
raise NotImplementedError
class ActionManager(ManagerBase):
"""Manager for processing and applying actions for a given world.
The action manager handles the interpretation and application of user-defined
actions on a given world. It is comprised of different action terms that decide
the dimension of the expected actions.
The action manager performs operations at two stages:
* processing of actions: It splits the input actions to each term and performs any
pre-processing needed. This should be called once at every environment step.
* apply actions: This operation typically sets the processed actions into the assets in the
scene (such as robots). It should be called before every simulation step.
"""
def __init__(self, cfg: object, env: BaseEnv):
"""Initialize the action manager.
Args:
cfg: The configuration object or dictionary (``dict[str, ActionTermCfg]``).
env: The environment instance.
"""
super().__init__(cfg, env)
# create buffers to store actions
self._action = torch.zeros((self.num_envs, self.total_action_dim), device=self.device)
self._prev_action = torch.zeros_like(self._action)
def __str__(self) -> str:
"""Returns: A string representation for action manager."""
msg = f"<ActionManager> contains {len(self._term_names)} active terms.\n"
# create table for term information
table = PrettyTable()
table.title = f"Active Action Terms (shape: {self.total_action_dim})"
table.field_names = ["Index", "Name", "Dimension"]
# set alignment of table columns
table.align["Name"] = "l"
table.align["Dimension"] = "r"
# add info on each term
for index, (name, term) in enumerate(self._terms.items()):
table.add_row([index, name, term.action_dim])
# convert table to string
msg += table.get_string()
msg += "\n"
return msg
"""
Properties.
"""
@property
def total_action_dim(self) -> int:
"""Total dimension of actions."""
return sum(self.action_term_dim)
@property
def active_terms(self) -> list[str]:
"""Name of active action terms."""
return self._term_names
@property
def action_term_dim(self) -> list[int]:
"""Shape of each action term."""
return [term.action_dim for term in self._terms.values()]
@property
def action(self) -> torch.Tensor:
"""The actions sent to the environment. Shape is (num_envs, total_action_dim)."""
return self._action
@property
def prev_action(self) -> torch.Tensor:
"""The previous actions sent to the environment. Shape is (num_envs, total_action_dim)."""
return self._prev_action
"""
Operations.
"""
def reset(self, env_ids: Sequence[int] | None = None) -> dict[str, torch.Tensor]:
"""Resets the action history.
Args:
env_ids: The environment ids. Defaults to None, in which case
all environments are considered.
Returns:
An empty dictionary.
"""
# resolve environment ids
if env_ids is None:
env_ids = slice(None)
# reset the action history
self._prev_action[env_ids] = 0.0
self._action[env_ids] = 0.0
# reset all action terms
for term in self._terms.values():
term.reset(env_ids=env_ids)
# nothing to log here
return {}
def process_action(self, action: torch.Tensor):
"""Processes the actions sent to the environment.
Note:
This function should be called once per environment step.
Args:
action: The actions to process.
"""
# check if action dimension is valid
if self.total_action_dim != action.shape[1]:
raise ValueError(f"Invalid action shape, expected: {self.total_action_dim}, received: {action.shape[1]}.")
# store the input actions
self._prev_action[:] = self._action
self._action[:] = action.to(self.device)
# split the actions and apply to each tensor
idx = 0
for term in self._terms.values():
term_actions = action[:, idx : idx + term.action_dim]
term.process_actions(term_actions)
idx += term.action_dim
def apply_action(self) -> None:
"""Applies the actions to the environment/simulation.
Note:
This should be called at every simulation step.
"""
for term in self._terms.values():
term.apply_actions()
def get_term(self, name: str) -> ActionTerm:
"""Returns the action term with the specified name.
Args:
name: The name of the action term.
Returns:
The action term with the specified name.
"""
return self._terms[name]
"""
Helper functions.
"""
def _prepare_terms(self):
"""Prepares a list of action terms."""
# parse action terms from the config
self._term_names: list[str] = list()
self._terms: dict[str, ActionTerm] = dict()
# check if config is dict already
if isinstance(self.cfg, dict):
cfg_items = self.cfg.items()
else:
cfg_items = self.cfg.__dict__.items()
for term_name, term_cfg in cfg_items:
# check if term config is None
if term_cfg is None:
continue
# check valid type
if not isinstance(term_cfg, ActionTermCfg):
raise TypeError(
f"Configuration for the term '{term_name}' is not of type ActionTermCfg."
f" Received: '{type(term_cfg)}'."
)
# create the action term
term = term_cfg.class_type(term_cfg, self._env)
# sanity check if term is valid type
if not isinstance(term, ActionTerm):
raise TypeError(f"Returned object for the term '{term_name}' is not of type ActionType.")
# add term name and parameters
self._term_names.append(term_name)
self._terms[term_name] = term
| 9,069 | Python | 31.862319 | 118 | 0.614732 |
fnuabhimanyu8713/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/managers/termination_manager.py | # Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""Termination manager for computing done signals for a given world."""
from __future__ import annotations
import torch
from collections.abc import Sequence
from prettytable import PrettyTable
from typing import TYPE_CHECKING
from .manager_base import ManagerBase, ManagerTermBase
from .manager_term_cfg import TerminationTermCfg
if TYPE_CHECKING:
from omni.isaac.orbit.envs import RLTaskEnv
class TerminationManager(ManagerBase):
"""Manager for computing done signals for a given world.
The termination manager computes the termination signal (also called dones) as a combination
of termination terms. Each termination term is a function which takes the environment as an
argument and returns a boolean tensor of shape (num_envs,). The termination manager
computes the termination signal as the union (logical or) of all the termination terms.
Following the `Gymnasium API <https://gymnasium.farama.org/tutorials/gymnasium_basics/handling_time_limits/>`_,
the termination signal is computed as the logical OR of the following signals:
* **Time-out**: This signal is set to true if the environment has ended after an externally defined condition
(that is outside the scope of a MDP). For example, the environment may be terminated if the episode has
timed out (i.e. reached max episode length).
* **Terminated**: This signal is set to true if the environment has reached a terminal state defined by the
environment. This state may correspond to task success, task failure, robot falling, etc.
These signals can be individually accessed using the :attr:`time_outs` and :attr:`terminated` properties.
The termination terms are parsed from a config class containing the manager's settings and each term's
parameters. Each termination term should instantiate the :class:`TerminationTermCfg` class. The term's
configuration :attr:`TerminationTermCfg.time_out` decides whether the term is a timeout or a termination term.
"""
_env: RLTaskEnv
"""The environment instance."""
def __init__(self, cfg: object, env: RLTaskEnv):
"""Initializes the termination manager.
Args:
cfg: The configuration object or dictionary (``dict[str, TerminationTermCfg]``).
env: An environment object.
"""
super().__init__(cfg, env)
# prepare extra info to store individual termination term information
self._term_dones = dict()
for term_name in self._term_names:
self._term_dones[term_name] = torch.zeros(self.num_envs, device=self.device, dtype=torch.bool)
# create buffer for managing termination per environment
self._truncated_buf = torch.zeros(self.num_envs, device=self.device, dtype=torch.bool)
self._terminated_buf = torch.zeros_like(self._truncated_buf)
def __str__(self) -> str:
"""Returns: A string representation for termination manager."""
msg = f"<TerminationManager> contains {len(self._term_names)} active terms.\n"
# create table for term information
table = PrettyTable()
table.title = "Active Termination Terms"
table.field_names = ["Index", "Name", "Time Out"]
# set alignment of table columns
table.align["Name"] = "l"
# add info on each term
for index, (name, term_cfg) in enumerate(zip(self._term_names, self._term_cfgs)):
table.add_row([index, name, term_cfg.time_out])
# convert table to string
msg += table.get_string()
msg += "\n"
return msg
"""
Properties.
"""
@property
def active_terms(self) -> list[str]:
"""Name of active termination terms."""
return self._term_names
@property
def dones(self) -> torch.Tensor:
"""The net termination signal. Shape is (num_envs,)."""
return self._truncated_buf | self._terminated_buf
@property
def time_outs(self) -> torch.Tensor:
"""The timeout signal (reaching max episode length). Shape is (num_envs,).
This signal is set to true if the environment has ended after an externally defined condition
(that is outside the scope of a MDP). For example, the environment may be terminated if the episode has
timed out (i.e. reached max episode length).
"""
return self._truncated_buf
@property
def terminated(self) -> torch.Tensor:
"""The terminated signal (reaching a terminal state). Shape is (num_envs,).
This signal is set to true if the environment has reached a terminal state defined by the environment.
This state may correspond to task success, task failure, robot falling, etc.
"""
return self._terminated_buf
"""
Operations.
"""
def reset(self, env_ids: Sequence[int] | None = None) -> dict[str, torch.Tensor]:
"""Returns the episodic counts of individual termination terms.
Args:
env_ids: The environment ids. Defaults to None, in which case
all environments are considered.
Returns:
Dictionary of episodic sum of individual reward terms.
"""
# resolve environment ids
if env_ids is None:
env_ids = slice(None)
# add to episode dict
extras = {}
for key in self._term_dones.keys():
# store information
extras["Episode Termination/" + key] = torch.count_nonzero(self._term_dones[key][env_ids]).item()
# reset all the reward terms
for term_cfg in self._class_term_cfgs:
term_cfg.func.reset(env_ids=env_ids)
# return logged information
return extras
def compute(self) -> torch.Tensor:
"""Computes the termination signal as union of individual terms.
This function calls each termination term managed by the class and performs a logical OR operation
to compute the net termination signal.
Returns:
The combined termination signal of shape (num_envs,).
"""
# reset computation
self._truncated_buf[:] = False
self._terminated_buf[:] = False
# iterate over all the termination terms
for name, term_cfg in zip(self._term_names, self._term_cfgs):
value = term_cfg.func(self._env, **term_cfg.params)
# store timeout signal separately
if term_cfg.time_out:
self._truncated_buf |= value
else:
self._terminated_buf |= value
# add to episode dones
self._term_dones[name][:] = value
# return combined termination signal
return self._truncated_buf | self._terminated_buf
def get_term(self, name: str) -> torch.Tensor:
"""Returns the termination term with the specified name.
Args:
name: The name of the termination term.
Returns:
The corresponding termination term value. Shape is (num_envs,).
"""
return self._term_dones[name]
"""
Operations - Term settings.
"""
def set_term_cfg(self, term_name: str, cfg: TerminationTermCfg):
"""Sets the configuration of the specified term into the manager.
Args:
term_name: The name of the termination term.
cfg: The configuration for the termination term.
Raises:
ValueError: If the term name is not found.
"""
if term_name not in self._term_names:
raise ValueError(f"Termination term '{term_name}' not found.")
# set the configuration
self._term_cfgs[self._term_names.index(term_name)] = cfg
def get_term_cfg(self, term_name: str) -> TerminationTermCfg:
"""Gets the configuration for the specified term.
Args:
term_name: The name of the termination term.
Returns:
The configuration of the termination term.
Raises:
ValueError: If the term name is not found.
"""
if term_name not in self._term_names:
raise ValueError(f"Termination term '{term_name}' not found.")
# return the configuration
return self._term_cfgs[self._term_names.index(term_name)]
"""
Helper functions.
"""
def _prepare_terms(self):
"""Prepares a list of termination functions."""
# parse remaining termination terms and decimate their information
self._term_names: list[str] = list()
self._term_cfgs: list[TerminationTermCfg] = list()
self._class_term_cfgs: list[TerminationTermCfg] = list()
# check if config is dict already
if isinstance(self.cfg, dict):
cfg_items = self.cfg.items()
else:
cfg_items = self.cfg.__dict__.items()
# iterate over all the terms
for term_name, term_cfg in cfg_items:
# check for non config
if term_cfg is None:
continue
# check for valid config type
if not isinstance(term_cfg, TerminationTermCfg):
raise TypeError(
f"Configuration for the term '{term_name}' is not of type TerminationTermCfg."
f" Received: '{type(term_cfg)}'."
)
# resolve common parameters
self._resolve_common_term_cfg(term_name, term_cfg, min_argc=1)
# add function to list
self._term_names.append(term_name)
self._term_cfgs.append(term_cfg)
# check if the term is a class
if isinstance(term_cfg.func, ManagerTermBase):
self._class_term_cfgs.append(term_cfg)
| 9,845 | Python | 38.071428 | 115 | 0.634027 |
fnuabhimanyu8713/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/controllers/rmp_flow.py | # Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
import torch
from dataclasses import MISSING
import omni.isaac.core.utils.prims as prim_utils
from omni.isaac.core.articulations import Articulation
from omni.isaac.core.simulation_context import SimulationContext
from omni.isaac.motion_generation import ArticulationMotionPolicy
from omni.isaac.motion_generation.lula.motion_policies import RmpFlow, RmpFlowSmoothed
from omni.isaac.orbit.utils import configclass
@configclass
class RmpFlowControllerCfg:
"""Configuration for RMP-Flow controller (provided through LULA library)."""
name: str = "rmp_flow"
"""Name of the controller. Supported: "rmp_flow", "rmp_flow_smoothed". Defaults to "rmp_flow"."""
config_file: str = MISSING
"""Path to the configuration file for the controller."""
urdf_file: str = MISSING
"""Path to the URDF model of the robot."""
collision_file: str = MISSING
"""Path to collision model description of the robot."""
frame_name: str = MISSING
"""Name of the robot frame for task space (must be present in the URDF)."""
evaluations_per_frame: float = MISSING
"""Number of substeps during Euler integration inside LULA world model."""
ignore_robot_state_updates: bool = False
"""If true, then state of the world model inside controller is rolled out. Defaults to False."""
class RmpFlowController:
"""Wraps around RMPFlow from IsaacSim for batched environments."""
def __init__(self, cfg: RmpFlowControllerCfg, device: str):
"""Initialize the controller.
Args:
cfg: The configuration for the controller.
device: The device to use for computation.
"""
# store input
self.cfg = cfg
self._device = device
# display info
print(f"[INFO]: Loading RMPFlow controller URDF from: {self.cfg.urdf_file}")
"""
Properties.
"""
@property
def num_actions(self) -> int:
"""Dimension of the action space of controller."""
return 7
"""
Operations.
"""
def initialize(self, prim_paths_expr: str):
"""Initialize the controller.
Args:
prim_paths_expr: The expression to find the articulation prim paths.
"""
# obtain the simulation time
physics_dt = SimulationContext.instance().get_physics_dt()
# find all prims
self._prim_paths = prim_utils.find_matching_prim_paths(prim_paths_expr)
self.num_robots = len(self._prim_paths)
# resolve controller
if self.cfg.name == "rmp_flow":
controller_cls = RmpFlow
elif self.cfg.name == "rmp_flow_smoothed":
controller_cls = RmpFlowSmoothed
else:
raise ValueError(f"Unsupported controller in Lula library: {self.cfg.name}")
# create all franka robots references and their controllers
self.articulation_policies = list()
for prim_path in self._prim_paths:
# add robot reference
robot = Articulation(prim_path)
robot.initialize()
# add controller
rmpflow = controller_cls(
robot_description_path=self.cfg.collision_file,
urdf_path=self.cfg.urdf_file,
rmpflow_config_path=self.cfg.config_file,
end_effector_frame_name=self.cfg.frame_name,
maximum_substep_size=physics_dt / self.cfg.evaluations_per_frame,
ignore_robot_state_updates=self.cfg.ignore_robot_state_updates,
)
# wrap rmpflow to connect to the Franka robot articulation
articulation_policy = ArticulationMotionPolicy(robot, rmpflow, physics_dt)
self.articulation_policies.append(articulation_policy)
# get number of active joints
self.active_dof_names = self.articulation_policies[0].get_motion_policy().get_active_joints()
self.num_dof = len(self.active_dof_names)
# create buffers
# -- for storing command
self._command = torch.zeros(self.num_robots, self.num_actions, device=self._device)
# -- for policy output
self.dof_pos_target = torch.zeros((self.num_robots, self.num_dof), device=self._device)
self.dof_vel_target = torch.zeros((self.num_robots, self.num_dof), device=self._device)
def reset_idx(self, robot_ids: torch.Tensor = None):
"""Reset the internals."""
# if no robot ids are provided, then reset all robots
if robot_ids is None:
robot_ids = torch.arange(self.num_robots, device=self._device)
# reset policies for specified robots
for index in robot_ids:
self.articulation_policies[index].motion_policy.reset()
def set_command(self, command: torch.Tensor):
"""Set target end-effector pose command."""
# store command
self._command[:] = command
def compute(self) -> tuple[torch.Tensor, torch.Tensor]:
"""Performs inference with the controller.
Returns:
The target joint positions and velocity commands.
"""
# convert command to numpy
command = self._command.cpu().numpy()
# compute control actions
for i, policy in enumerate(self.articulation_policies):
# enable type-hinting
policy: ArticulationMotionPolicy
# set rmpflow target to be the current position of the target cube.
policy.get_motion_policy().set_end_effector_target(
target_position=command[i, 0:3], target_orientation=command[i, 3:7]
)
# apply action on the robot
action = policy.get_next_articulation_action()
# copy actions into buffer
self.dof_pos_target[i, :] = torch.from_numpy(action.joint_positions[:]).to(self.dof_pos_target)
self.dof_vel_target[i, :] = torch.from_numpy(action.joint_velocities[:]).to(self.dof_vel_target)
return self.dof_pos_target, self.dof_vel_target
| 6,110 | Python | 39.74 | 108 | 0.642717 |
fnuabhimanyu8713/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/controllers/__init__.py | # Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""Sub-package for different controllers and motion-generators.
Controllers or motion generators are responsible for closed-loop tracking of a given command. The
controller can be a simple PID controller or a more complex controller such as impedance control
or inverse kinematics control. The controller is responsible for generating the desired joint-level
commands to be sent to the robot.
"""
from .differential_ik import DifferentialIKController
from .differential_ik_cfg import DifferentialIKControllerCfg
| 637 | Python | 38.874998 | 99 | 0.814757 |
fnuabhimanyu8713/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/controllers/operational_space.py | # Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
import torch
from collections.abc import Sequence
from dataclasses import MISSING
from omni.isaac.orbit.utils import configclass
from omni.isaac.orbit.utils.math import apply_delta_pose, compute_pose_error
@configclass
class OperationSpaceControllerCfg:
"""Configuration for operation-space controller."""
command_types: Sequence[str] = MISSING
"""Type of command.
It has two sub-strings joined by underscore:
- type of command mode: "position", "pose", "force"
- type of command resolving: "abs" (absolute), "rel" (relative)
"""
impedance_mode: str = MISSING
"""Type of gains for motion control: "fixed", "variable", "variable_kp"."""
uncouple_motion_wrench: bool = False
"""Whether to decouple the wrench computation from task-space pose (motion) error."""
motion_control_axes: Sequence[int] = (1, 1, 1, 1, 1, 1)
"""Motion direction to control. Mark as 0/1 for each axis."""
force_control_axes: Sequence[int] = (0, 0, 0, 0, 0, 0)
"""Force direction to control. Mark as 0/1 for each axis."""
inertial_compensation: bool = False
"""Whether to perform inertial compensation for motion control (inverse dynamics)."""
gravity_compensation: bool = False
"""Whether to perform gravity compensation."""
stiffness: float | Sequence[float] = MISSING
"""The positional gain for determining wrenches based on task-space pose error."""
damping_ratio: float | Sequence[float] | None = None
"""The damping ratio is used in-conjunction with positional gain to compute wrenches
based on task-space velocity error.
The following math operation is performed for computing velocity gains:
:math:`d_gains = 2 * sqrt(p_gains) * damping_ratio`.
"""
stiffness_limits: tuple[float, float] = (0, 300)
"""Minimum and maximum values for positional gains.
Note: Used only when :obj:`impedance_mode` is "variable" or "variable_kp".
"""
damping_ratio_limits: tuple[float, float] = (0, 100)
"""Minimum and maximum values for damping ratios used to compute velocity gains.
Note: Used only when :obj:`impedance_mode` is "variable".
"""
force_stiffness: float | Sequence[float] = None
"""The positional gain for determining wrenches for closed-loop force control.
If obj:`None`, then open-loop control of desired forces is performed.
"""
position_command_scale: tuple[float, float, float] = (1.0, 1.0, 1.0)
"""Scaling of the position command received. Used only in relative mode."""
rotation_command_scale: tuple[float, float, float] = (1.0, 1.0, 1.0)
"""Scaling of the rotation command received. Used only in relative mode."""
class OperationSpaceController:
"""Operation-space controller.
Reference:
[1] https://ethz.ch/content/dam/ethz/special-interest/mavt/robotics-n-intelligent-systems/rsl-dam/documents/RobotDynamics2017/RD_HS2017script.pdf
"""
def __init__(self, cfg: OperationSpaceControllerCfg, num_robots: int, num_dof: int, device: str):
"""Initialize operation-space controller.
Args:
cfg: The configuration for operation-space controller.
num_robots: The number of robots to control.
num_dof: The number of degrees of freedom of the robot.
device: The device to use for computations.
Raises:
ValueError: When invalid control command is provided.
"""
# store inputs
self.cfg = cfg
self.num_robots = num_robots
self.num_dof = num_dof
self._device = device
# resolve tasks-pace target dimensions
self.target_list = list()
for command_type in self.cfg.command_types:
if "position" in command_type:
self.target_list.append(3)
elif command_type == "pose_rel":
self.target_list.append(6)
elif command_type == "pose_abs":
self.target_list.append(7)
elif command_type == "force_abs":
self.target_list.append(6)
else:
raise ValueError(f"Invalid control command: {command_type}.")
self.target_dim = sum(self.target_list)
# create buffers
# -- selection matrices
self._selection_matrix_motion = torch.diag(
torch.tensor(self.cfg.motion_control_axes, dtype=torch.float, device=self._device)
)
self._selection_matrix_force = torch.diag(
torch.tensor(self.cfg.force_control_axes, dtype=torch.float, device=self._device)
)
# -- commands
self._task_space_target = torch.zeros(self.num_robots, self.target_dim, device=self._device)
# -- scaling of command
self._position_command_scale = torch.diag(torch.tensor(self.cfg.position_command_scale, device=self._device))
self._rotation_command_scale = torch.diag(torch.tensor(self.cfg.rotation_command_scale, device=self._device))
# -- motion control gains
self._p_gains = torch.zeros(self.num_robots, 6, device=self._device)
self._p_gains[:] = torch.tensor(self.cfg.stiffness, device=self._device)
self._d_gains = 2 * torch.sqrt(self._p_gains) * torch.tensor(self.cfg.damping_ratio, device=self._device)
# -- force control gains
if self.cfg.force_stiffness is not None:
self._p_wrench_gains = torch.zeros(self.num_robots, 6, device=self._device)
self._p_wrench_gains[:] = torch.tensor(self.cfg.force_stiffness, device=self._device)
else:
self._p_wrench_gains = None
# -- position gain limits
self._p_gains_limits = torch.zeros(self.num_robots, 6, device=self._device)
self._p_gains_limits[..., 0], self._p_gains_limits[..., 1] = (
self.cfg.stiffness_limits[0],
self.cfg.stiffness_limits[1],
)
# -- damping ratio limits
self._damping_ratio_limits = torch.zeros_like(self._p_gains_limits)
self._damping_ratio_limits[..., 0], self._damping_ratio_limits[..., 1] = (
self.cfg.damping_ratio_limits[0],
self.cfg.damping_ratio_limits[1],
)
# -- storing outputs
self._desired_torques = torch.zeros(self.num_robots, self.num_dof, self.num_dof, device=self._device)
"""
Properties.
"""
@property
def num_actions(self) -> int:
"""Dimension of the action space of controller."""
# impedance mode
if self.cfg.impedance_mode == "fixed":
# task-space pose
return self.target_dim
elif self.cfg.impedance_mode == "variable_kp":
# task-space pose + stiffness
return self.target_dim + 6
elif self.cfg.impedance_mode == "variable":
# task-space pose + stiffness + damping
return self.target_dim + 6 + 6
else:
raise ValueError(f"Invalid impedance mode: {self.cfg.impedance_mode}.")
"""
Operations.
"""
def initialize(self):
"""Initialize the internals."""
pass
def reset_idx(self, robot_ids: torch.Tensor = None):
"""Reset the internals."""
pass
def set_command(self, command: torch.Tensor):
"""Set target end-effector pose or force command.
Args:
command: The target end-effector pose or force command.
"""
# check input size
if command.shape != (self.num_robots, self.num_actions):
raise ValueError(
f"Invalid command shape '{command.shape}'. Expected: '{(self.num_robots, self.num_actions)}'."
)
# impedance mode
if self.cfg.impedance_mode == "fixed":
# joint positions
self._task_space_target[:] = command
elif self.cfg.impedance_mode == "variable_kp":
# split input command
task_space_command, stiffness = torch.tensor_split(command, (self.target_dim, 6), dim=-1)
# format command
stiffness = stiffness.clip_(min=self._p_gains_limits[0], max=self._p_gains_limits[1])
# joint positions + stiffness
self._task_space_target[:] = task_space_command.squeeze(dim=-1)
self._p_gains[:] = stiffness
self._d_gains[:] = 2 * torch.sqrt(self._p_gains) # critically damped
elif self.cfg.impedance_mode == "variable":
# split input command
task_space_command, stiffness, damping_ratio = torch.tensor_split(command, 3, dim=-1)
# format command
stiffness = stiffness.clip_(min=self._p_gains_limits[0], max=self._p_gains_limits[1])
damping_ratio = damping_ratio.clip_(min=self._damping_ratio_limits[0], max=self._damping_ratio_limits[1])
# joint positions + stiffness + damping
self._task_space_target[:] = task_space_command
self._p_gains[:] = stiffness
self._d_gains[:] = 2 * torch.sqrt(self._p_gains) * damping_ratio
else:
raise ValueError(f"Invalid impedance mode: {self.cfg.impedance_mode}.")
def compute(
self,
jacobian: torch.Tensor,
ee_pose: torch.Tensor | None = None,
ee_vel: torch.Tensor | None = None,
ee_force: torch.Tensor | None = None,
mass_matrix: torch.Tensor | None = None,
gravity: torch.Tensor | None = None,
) -> torch.Tensor:
"""Performs inference with the controller.
Args:
jacobian: The Jacobian matrix of the end-effector.
ee_pose: The current end-effector pose. It is a tensor of shape
(num_robots, 7), which contains the position and quaternion (w, x, y, z). Defaults to None.
ee_vel: The current end-effector velocity. It is a tensor of shape
(num_robots, 6), which contains the linear and angular velocities. Defaults to None.
ee_force: The current external force on the end-effector.
It is a tensor of shape (num_robots, 3), which contains the linear force. Defaults to None.
mass_matrix: The joint-space inertial matrix. Defaults to None.
gravity: The joint-space gravity vector. Defaults to None.
Raises:
ValueError: When the end-effector pose is not provided for the 'position_rel' command.
ValueError: When the end-effector pose is not provided for the 'position_abs' command.
ValueError: When the end-effector pose is not provided for the 'pose_rel' command.
ValueError: When an invalid command type is provided.
ValueError: When motion-control is enabled but the end-effector pose or velocity is not provided.
ValueError: When force-control is enabled but the end-effector force is not provided.
ValueError: When inertial compensation is enabled but the mass matrix is not provided.
ValueError: When gravity compensation is enabled but the gravity vector is not provided.
Returns:
The target joint torques commands.
"""
# buffers for motion/force control
desired_ee_pos = None
desired_ee_rot = None
desired_ee_force = None
# resolve the commands
target_groups = torch.split(self._task_space_target, self.target_list)
for command_type, target in zip(self.cfg.command_types, target_groups):
if command_type == "position_rel":
# check input is provided
if ee_pose is None:
raise ValueError("End-effector pose is required for 'position_rel' command.")
# scale command
target @= self._position_command_scale
# compute targets
desired_ee_pos = ee_pose[:, :3] + target
desired_ee_rot = ee_pose[:, 3:]
elif command_type == "position_abs":
# check input is provided
if ee_pose is None:
raise ValueError("End-effector pose is required for 'position_abs' command.")
# compute targets
desired_ee_pos = target
desired_ee_rot = ee_pose[:, 3:]
elif command_type == "pose_rel":
# check input is provided
if ee_pose is None:
raise ValueError("End-effector pose is required for 'pose_rel' command.")
# scale command
target[:, 0:3] @= self._position_command_scale
target[:, 3:6] @= self._rotation_command_scale
# compute targets
desired_ee_pos, desired_ee_rot = apply_delta_pose(ee_pose[:, :3], ee_pose[:, 3:], target)
elif command_type == "pose_abs":
# compute targets
desired_ee_pos = target[:, 0:3]
desired_ee_rot = target[:, 3:7]
elif command_type == "force_abs":
# compute targets
desired_ee_force = target
else:
raise ValueError(f"Invalid control command: {self.cfg.command_type}.")
# reset desired joint torques
self._desired_torques[:] = 0
# compute for motion-control
if desired_ee_pos is not None:
# check input is provided
if ee_pose is None or ee_vel is None:
raise ValueError("End-effector pose and velocity are required for motion control.")
# -- end-effector tracking error
pose_error = compute_pose_error(
ee_pose[:, :3], ee_pose[:, 3:], desired_ee_pos, desired_ee_rot, rot_error_type="axis_angle"
)
velocity_error = -ee_vel # zero target velocity
# -- desired end-effector acceleration (spring damped system)
des_ee_acc = self._p_gains * pose_error + self._d_gains * velocity_error
# -- inertial compensation
if self.cfg.inertial_compensation:
# check input is provided
if mass_matrix is None:
raise ValueError("Mass matrix is required for inertial compensation.")
# compute task-space dynamics quantities
# wrench = (J M^(-1) J^T)^(-1) * \ddot(x_des)
mass_matrix_inv = torch.inverse(mass_matrix)
if self.cfg.uncouple_motion_wrench:
# decoupled-mass matrices
lambda_pos = torch.inverse(jacobian[:, 0:3] @ mass_matrix_inv * jacobian[:, 0:3].T)
lambda_ori = torch.inverse(jacobian[:, 3:6] @ mass_matrix_inv * jacobian[:, 3:6].T)
# desired end-effector wrench (from pseudo-dynamics)
decoupled_force = lambda_pos @ des_ee_acc[:, 0:3]
decoupled_torque = lambda_ori @ des_ee_acc[:, 3:6]
des_motion_wrench = torch.cat(decoupled_force, decoupled_torque)
else:
# coupled dynamics
lambda_full = torch.inverse(jacobian @ mass_matrix_inv * jacobian.T)
# desired end-effector wrench (from pseudo-dynamics)
des_motion_wrench = lambda_full @ des_ee_acc
else:
# task-space impedance control
# wrench = \ddot(x_des)
des_motion_wrench = des_ee_acc
# -- joint-space wrench
self._desired_torques += jacobian.T @ self._selection_matrix_motion @ des_motion_wrench
# compute for force control
if desired_ee_force is not None:
# -- task-space wrench
if self.cfg.stiffness is not None:
# check input is provided
if ee_force is None:
raise ValueError("End-effector force is required for closed-loop force control.")
# closed-loop control
des_force_wrench = desired_ee_force + self._p_wrench_gains * (desired_ee_force - ee_force)
else:
# open-loop control
des_force_wrench = desired_ee_force
# -- joint-space wrench
self._desired_torques += jacobian.T @ self._selection_matrix_force @ des_force_wrench
# add gravity compensation (bias correction)
if self.cfg.gravity_compensation:
# check input is provided
if gravity is None:
raise ValueError("Gravity vector is required for gravity compensation.")
# add gravity compensation
self._desired_torques += gravity
return self._desired_torques
| 16,784 | Python | 44.611413 | 153 | 0.596938 |
fnuabhimanyu8713/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/controllers/differential_ik.py | # Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
from __future__ import annotations
import torch
from typing import TYPE_CHECKING
from omni.isaac.orbit.utils.math import apply_delta_pose, compute_pose_error
if TYPE_CHECKING:
from .differential_ik_cfg import DifferentialIKControllerCfg
class DifferentialIKController:
r"""Differential inverse kinematics (IK) controller.
This controller is based on the concept of differential inverse kinematics [1, 2] which is a method for computing
the change in joint positions that yields the desired change in pose.
.. math::
\Delta \mathbf{q} = \mathbf{J}^{\dagger} \Delta \mathbf{x}
\mathbf{q}_{\text{desired}} = \mathbf{q}_{\text{current}} + \Delta \mathbf{q}
where :math:`\mathbf{J}^{\dagger}` is the pseudo-inverse of the Jacobian matrix :math:`\mathbf{J}`,
:math:`\Delta \mathbf{x}` is the desired change in pose, and :math:`\mathbf{q}_{\text{current}}`
is the current joint positions.
To deal with singularity in Jacobian, the following methods are supported for computing inverse of the Jacobian:
- "pinv": Moore-Penrose pseudo-inverse
- "svd": Adaptive singular-value decomposition (SVD)
- "trans": Transpose of matrix
- "dls": Damped version of Moore-Penrose pseudo-inverse (also called Levenberg-Marquardt)
.. caution::
The controller does not assume anything about the frames of the current and desired end-effector pose,
or the joint-space velocities. It is up to the user to ensure that these quantities are given
in the correct format.
Reference:
[1] https://ethz.ch/content/dam/ethz/special-interest/mavt/robotics-n-intelligent-systems/rsl-dam/documents/RobotDynamics2017/RD_HS2017script.pdf
[2] https://www.cs.cmu.edu/~15464-s13/lectures/lecture6/iksurvey.pdf
"""
def __init__(self, cfg: DifferentialIKControllerCfg, num_envs: int, device: str):
"""Initialize the controller.
Args:
cfg: The configuration for the controller.
num_envs: The number of environments.
device: The device to use for computations.
"""
# store inputs
self.cfg = cfg
self.num_envs = num_envs
self._device = device
# create buffers
self.ee_pos_des = torch.zeros(self.num_envs, 3, device=self._device)
self.ee_quat_des = torch.zeros(self.num_envs, 4, device=self._device)
# -- input command
self._command = torch.zeros(self.num_envs, self.action_dim, device=self._device)
"""
Properties.
"""
@property
def action_dim(self) -> int:
"""Dimension of the controller's input command."""
if self.cfg.command_type == "position":
return 3 # (x, y, z)
elif self.cfg.command_type == "pose" and self.cfg.use_relative_mode:
return 6 # (dx, dy, dz, droll, dpitch, dyaw)
else:
return 7 # (x, y, z, qw, qx, qy, qz)
"""
Operations.
"""
def reset(self, env_ids: torch.Tensor = None):
"""Reset the internals.
Args:
env_ids: The environment indices to reset. If None, then all environments are reset.
"""
pass
def set_command(
self, command: torch.Tensor, ee_pos: torch.Tensor | None = None, ee_quat: torch.Tensor | None = None
):
"""Set target end-effector pose command.
Based on the configured command type and relative mode, the method computes the desired end-effector pose.
It is up to the user to ensure that the command is given in the correct frame. The method only
applies the relative mode if the command type is ``position_rel`` or ``pose_rel``.
Args:
command: The input command in shape (N, 3) or (N, 6) or (N, 7).
ee_pos: The current end-effector position in shape (N, 3).
This is only needed if the command type is ``position_rel`` or ``pose_rel``.
ee_quat: The current end-effector orientation (w, x, y, z) in shape (N, 4).
This is only needed if the command type is ``position_*`` or ``pose_rel``.
Raises:
ValueError: If the command type is ``position_*`` and :attr:`ee_quat` is None.
ValueError: If the command type is ``position_rel`` and :attr:`ee_pos` is None.
ValueError: If the command type is ``pose_rel`` and either :attr:`ee_pos` or :attr:`ee_quat` is None.
"""
# store command
self._command[:] = command
# compute the desired end-effector pose
if self.cfg.command_type == "position":
# we need end-effector orientation even though we are in position mode
# this is only needed for display purposes
if ee_quat is None:
raise ValueError("End-effector orientation can not be None for `position_*` command type!")
# compute targets
if self.cfg.use_relative_mode:
if ee_pos is None:
raise ValueError("End-effector position can not be None for `position_rel` command type!")
self.ee_pos_des[:] = ee_pos + self._command
self.ee_quat_des[:] = ee_quat
else:
self.ee_pos_des[:] = self._command
self.ee_quat_des[:] = ee_quat
else:
# compute targets
if self.cfg.use_relative_mode:
if ee_pos is None or ee_quat is None:
raise ValueError(
"Neither end-effector position nor orientation can be None for `pose_rel` command type!"
)
self.ee_pos_des, self.ee_quat_des = apply_delta_pose(ee_pos, ee_quat, self._command)
else:
self.ee_pos_des = self._command[:, 0:3]
self.ee_quat_des = self._command[:, 3:7]
def compute(
self, ee_pos: torch.Tensor, ee_quat: torch.Tensor, jacobian: torch.Tensor, joint_pos: torch.Tensor
) -> torch.Tensor:
"""Computes the target joint positions that will yield the desired end effector pose.
Args:
ee_pos: The current end-effector position in shape (N, 3).
ee_quat: The current end-effector orientation in shape (N, 4).
jacobian: The geometric jacobian matrix in shape (N, 6, num_joints).
joint_pos: The current joint positions in shape (N, num_joints).
Returns:
The target joint positions commands in shape (N, num_joints).
"""
# compute the delta in joint-space
if "position" in self.cfg.command_type:
position_error = self.ee_pos_des - ee_pos
jacobian_pos = jacobian[:, 0:3]
delta_joint_pos = self._compute_delta_joint_pos(delta_pose=position_error, jacobian=jacobian_pos)
else:
position_error, axis_angle_error = compute_pose_error(
ee_pos, ee_quat, self.ee_pos_des, self.ee_quat_des, rot_error_type="axis_angle"
)
pose_error = torch.cat((position_error, axis_angle_error), dim=1)
delta_joint_pos = self._compute_delta_joint_pos(delta_pose=pose_error, jacobian=jacobian)
# return the desired joint positions
return joint_pos + delta_joint_pos
"""
Helper functions.
"""
def _compute_delta_joint_pos(self, delta_pose: torch.Tensor, jacobian: torch.Tensor) -> torch.Tensor:
"""Computes the change in joint position that yields the desired change in pose.
The method uses the Jacobian mapping from joint-space velocities to end-effector velocities
to compute the delta-change in the joint-space that moves the robot closer to a desired
end-effector position.
Args:
delta_pose: The desired delta pose in shape (N, 3) or (N, 6).
jacobian: The geometric jacobian matrix in shape (N, 3, num_joints) or (N, 6, num_joints).
Returns:
The desired delta in joint space. Shape is (N, num-jointsß).
"""
if self.cfg.ik_params is None:
raise RuntimeError(f"Inverse-kinematics parameters for method '{self.cfg.ik_method}' is not defined!")
# compute the delta in joint-space
if self.cfg.ik_method == "pinv": # Jacobian pseudo-inverse
# parameters
k_val = self.cfg.ik_params["k_val"]
# computation
jacobian_pinv = torch.linalg.pinv(jacobian)
delta_joint_pos = k_val * jacobian_pinv @ delta_pose.unsqueeze(-1)
delta_joint_pos = delta_joint_pos.squeeze(-1)
elif self.cfg.ik_method == "svd": # adaptive SVD
# parameters
k_val = self.cfg.ik_params["k_val"]
min_singular_value = self.cfg.ik_params["min_singular_value"]
# computation
# U: 6xd, S: dxd, V: d x num-joint
U, S, Vh = torch.linalg.svd(jacobian)
S_inv = 1.0 / S
S_inv = torch.where(S > min_singular_value, S_inv, torch.zeros_like(S_inv))
jacobian_pinv = (
torch.transpose(Vh, dim0=1, dim1=2)[:, :, :6]
@ torch.diag_embed(S_inv)
@ torch.transpose(U, dim0=1, dim1=2)
)
delta_joint_pos = k_val * jacobian_pinv @ delta_pose.unsqueeze(-1)
delta_joint_pos = delta_joint_pos.squeeze(-1)
elif self.cfg.ik_method == "trans": # Jacobian transpose
# parameters
k_val = self.cfg.ik_params["k_val"]
# computation
jacobian_T = torch.transpose(jacobian, dim0=1, dim1=2)
delta_joint_pos = k_val * jacobian_T @ delta_pose.unsqueeze(-1)
delta_joint_pos = delta_joint_pos.squeeze(-1)
elif self.cfg.ik_method == "dls": # damped least squares
# parameters
lambda_val = self.cfg.ik_params["lambda_val"]
# computation
jacobian_T = torch.transpose(jacobian, dim0=1, dim1=2)
lambda_matrix = (lambda_val**2) * torch.eye(n=jacobian.shape[1], device=self._device)
delta_joint_pos = (
jacobian_T @ torch.inverse(jacobian @ jacobian_T + lambda_matrix) @ delta_pose.unsqueeze(-1)
)
delta_joint_pos = delta_joint_pos.squeeze(-1)
else:
raise ValueError(f"Unsupported inverse-kinematics method: {self.cfg.ik_method}")
return delta_joint_pos
| 10,589 | Python | 44.06383 | 153 | 0.602134 |
fnuabhimanyu8713/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/controllers/differential_ik_cfg.py | # Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
from dataclasses import MISSING
from typing import Literal
from omni.isaac.orbit.utils import configclass
from .differential_ik import DifferentialIKController
@configclass
class DifferentialIKControllerCfg:
"""Configuration for differential inverse kinematics controller."""
class_type: type = DifferentialIKController
"""The associated controller class."""
command_type: Literal["position", "pose"] = MISSING
"""Type of task-space command to control the articulation's body.
If "position", then the controller only controls the position of the articulation's body.
Otherwise, the controller controls the pose of the articulation's body.
"""
use_relative_mode: bool = False
"""Whether to use relative mode for the controller. Defaults to False.
If True, then the controller treats the input command as a delta change in the position/pose.
Otherwise, the controller treats the input command as the absolute position/pose.
"""
ik_method: Literal["pinv", "svd", "trans", "dls"] = MISSING
"""Method for computing inverse of Jacobian."""
ik_params: dict[str, float] | None = None
"""Parameters for the inverse-kinematics method. Defaults to None, in which case the default
parameters for the method are used.
- Moore-Penrose pseudo-inverse ("pinv"):
- "k_val": Scaling of computed delta-joint positions (default: 1.0).
- Adaptive Singular Value Decomposition ("svd"):
- "k_val": Scaling of computed delta-joint positions (default: 1.0).
- "min_singular_value": Single values less than this are suppressed to zero (default: 1e-5).
- Jacobian transpose ("trans"):
- "k_val": Scaling of computed delta-joint positions (default: 1.0).
- Damped Moore-Penrose pseudo-inverse ("dls"):
- "lambda_val": Damping coefficient (default: 0.01).
"""
def __post_init__(self):
# check valid input
if self.command_type not in ["position", "pose"]:
raise ValueError(f"Unsupported inverse-kinematics command: {self.command_type}.")
if self.ik_method not in ["pinv", "svd", "trans", "dls"]:
raise ValueError(f"Unsupported inverse-kinematics method: {self.ik_method}.")
# default parameters for different inverse kinematics approaches.
default_ik_params = {
"pinv": {"k_val": 1.0},
"svd": {"k_val": 1.0, "min_singular_value": 1e-5},
"trans": {"k_val": 1.0},
"dls": {"lambda_val": 0.01},
}
# update parameters for IK-method if not provided
ik_params = default_ik_params[self.ik_method].copy()
if self.ik_params is not None:
ik_params.update(self.ik_params)
self.ik_params = ik_params
| 2,893 | Python | 39.760563 | 100 | 0.664017 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.