file_path
stringlengths
21
207
content
stringlengths
5
1.02M
size
int64
5
1.02M
lang
stringclasses
9 values
avg_line_length
float64
1.33
100
max_line_length
int64
4
993
alphanum_fraction
float64
0.27
0.93
NVIDIA-Omniverse/orbit/source/standalone/tutorials/01_assets/run_rigid_object.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """ This script demonstrates how to create a rigid object and interact with it. .. code-block:: bash # Usage ./orbit.sh -p source/standalone/tutorials/01_assets/run_rigid_object.py """ from __future__ import annotations """Launch Isaac Sim Simulator first.""" import argparse from omni.isaac.orbit.app import AppLauncher # add argparse arguments parser = argparse.ArgumentParser(description="Tutorial on spawning and interacting with a rigid object.") # append AppLauncher cli args AppLauncher.add_app_launcher_args(parser) # parse the arguments args_cli = parser.parse_args() # launch omniverse app app_launcher = AppLauncher(args_cli) simulation_app = app_launcher.app """Rest everything follows.""" import torch import omni.isaac.core.utils.prims as prim_utils import omni.isaac.orbit.sim as sim_utils import omni.isaac.orbit.utils.math as math_utils from omni.isaac.orbit.assets import RigidObject, RigidObjectCfg from omni.isaac.orbit.sim import SimulationContext def design_scene(): """Designs the scene.""" # Ground-plane cfg = sim_utils.GroundPlaneCfg() cfg.func("/World/defaultGroundPlane", cfg) # Lights cfg = sim_utils.DomeLightCfg(intensity=2000.0, color=(0.8, 0.8, 0.8)) cfg.func("/World/Light", cfg) # Create separate groups called "Origin1", "Origin2", "Origin3" # Each group will have a robot in it origins = [[0.25, 0.25, 0.0], [-0.25, 0.25, 0.0], [0.25, -0.25, 0.0], [-0.25, -0.25, 0.0]] for i, origin in enumerate(origins): prim_utils.create_prim(f"/World/Origin{i}", "Xform", translation=origin) # Rigid Object cone_cfg = RigidObjectCfg( prim_path="/World/Origin.*/Cone", spawn=sim_utils.ConeCfg( radius=0.1, height=0.2, rigid_props=sim_utils.RigidBodyPropertiesCfg(), mass_props=sim_utils.MassPropertiesCfg(mass=1.0), collision_props=sim_utils.CollisionPropertiesCfg(), visual_material=sim_utils.PreviewSurfaceCfg(diffuse_color=(0.0, 1.0, 0.0), metallic=0.2), ), init_state=RigidObjectCfg.InitialStateCfg(), ) cone_object = RigidObject(cfg=cone_cfg) # return the scene information scene_entities = {"cone": cone_object} return scene_entities, origins def run_simulator(sim: sim_utils.SimulationContext, entities: dict[str, RigidObject], origins: torch.Tensor): """Runs the simulation loop.""" # Extract scene entities # note: we only do this here for readability. In general, it is better to access the entities directly from # the dictionary. This dictionary is replaced by the InteractiveScene class in the next tutorial. cone_object = entities["cone"] # Define simulation stepping sim_dt = sim.get_physics_dt() sim_time = 0.0 count = 0 # Simulate physics while simulation_app.is_running(): # reset if count % 250 == 0: # reset counters sim_time = 0.0 count = 0 # reset root state root_state = cone_object.data.default_root_state.clone() # sample a random position on a cylinder around the origins root_state[:, :3] += origins root_state[:, :3] += math_utils.sample_cylinder( radius=0.1, h_range=(0.25, 0.5), size=cone_object.num_instances, device=cone_object.device ) # write root state to simulation cone_object.write_root_state_to_sim(root_state) # reset buffers cone_object.reset() print("----------------------------------------") print("[INFO]: Resetting object state...") # apply sim data cone_object.write_data_to_sim() # perform step sim.step() # update sim-time sim_time += sim_dt count += 1 # update buffers cone_object.update(sim_dt) # print the root position if count % 50 == 0: print(f"Root position (in world): {cone_object.data.root_state_w[:, :3]}") def main(): """Main function.""" # Load kit helper sim_cfg = sim_utils.SimulationCfg() sim = SimulationContext(sim_cfg) # Set main camera sim.set_camera_view(eye=[1.5, 0.0, 1.0], target=[0.0, 0.0, 0.0]) # Design scene scene_entities, scene_origins = design_scene() scene_origins = torch.tensor(scene_origins, device=sim.device) # Play the simulator sim.reset() # Now we are ready! print("[INFO]: Setup complete...") # Run the simulator run_simulator(sim, scene_entities, scene_origins) if __name__ == "__main__": # run the main function main() # close sim app simulation_app.close()
4,847
Python
31.32
111
0.632763
NVIDIA-Omniverse/orbit/source/standalone/tutorials/02_scene/create_scene.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """This script demonstrates how to use the interactive scene interface to setup a scene with multiple prims. .. code-block:: bash # Usage ./orbit.sh -p source/standalone/tutorials/03_scene/create_scene.py --num_envs 32 """ from __future__ import annotations """Launch Isaac Sim Simulator first.""" import argparse from omni.isaac.orbit.app import AppLauncher # add argparse arguments parser = argparse.ArgumentParser(description="Tutorial on using the interactive scene interface.") parser.add_argument("--num_envs", type=int, default=2, help="Number of environments to spawn.") # append AppLauncher cli args AppLauncher.add_app_launcher_args(parser) # parse the arguments args_cli = parser.parse_args() # launch omniverse app app_launcher = AppLauncher(args_cli) simulation_app = app_launcher.app """Rest everything follows.""" import torch import omni.isaac.orbit.sim as sim_utils from omni.isaac.orbit.assets import ArticulationCfg, AssetBaseCfg from omni.isaac.orbit.scene import InteractiveScene, InteractiveSceneCfg from omni.isaac.orbit.sim import SimulationContext from omni.isaac.orbit.utils import configclass ## # Pre-defined configs ## from omni.isaac.orbit_assets import CARTPOLE_CFG # isort:skip @configclass class CartpoleSceneCfg(InteractiveSceneCfg): """Configuration for a cart-pole scene.""" # ground plane ground = AssetBaseCfg(prim_path="/World/defaultGroundPlane", spawn=sim_utils.GroundPlaneCfg()) # lights dome_light = AssetBaseCfg( prim_path="/World/Light", spawn=sim_utils.DomeLightCfg(intensity=3000.0, color=(0.75, 0.75, 0.75)) ) # articulation cartpole: ArticulationCfg = CARTPOLE_CFG.replace(prim_path="{ENV_REGEX_NS}/Robot") def run_simulator(sim: sim_utils.SimulationContext, scene: InteractiveScene): """Runs the simulation loop.""" # Extract scene entities # note: we only do this here for readability. robot = scene["cartpole"] # Define simulation stepping sim_dt = sim.get_physics_dt() count = 0 # Simulation loop while simulation_app.is_running(): # Reset if count % 500 == 0: # reset counter count = 0 # reset the scene entities # root state # we offset the root state by the origin since the states are written in simulation world frame # if this is not done, then the robots will be spawned at the (0, 0, 0) of the simulation world root_state = robot.data.default_root_state.clone() root_state[:, :3] += scene.env_origins robot.write_root_state_to_sim(root_state) # set joint positions with some noise joint_pos, joint_vel = robot.data.default_joint_pos.clone(), robot.data.default_joint_vel.clone() joint_pos += torch.rand_like(joint_pos) * 0.1 robot.write_joint_state_to_sim(joint_pos, joint_vel) # clear internal buffers scene.reset() print("[INFO]: Resetting robot state...") # Apply random action # -- generate random joint efforts efforts = torch.randn_like(robot.data.joint_pos) * 5.0 # -- apply action to the robot robot.set_joint_effort_target(efforts) # -- write data to sim scene.write_data_to_sim() # Perform step sim.step() # Increment counter count += 1 # Update buffers scene.update(sim_dt) def main(): """Main function.""" # Load kit helper sim_cfg = sim_utils.SimulationCfg(device="cpu", use_gpu_pipeline=False) sim = SimulationContext(sim_cfg) # Set main camera sim.set_camera_view([2.5, 0.0, 4.0], [0.0, 0.0, 2.0]) # Design scene scene_cfg = CartpoleSceneCfg(num_envs=args_cli.num_envs, env_spacing=2.0) scene = InteractiveScene(scene_cfg) # Play the simulator sim.reset() # Now we are ready! print("[INFO]: Setup complete...") # Run the simulator run_simulator(sim, scene) if __name__ == "__main__": # run the main function main() # close sim app simulation_app.close()
4,251
Python
30.731343
109
0.663844
NVIDIA-Omniverse/orbit/source/standalone/tutorials/03_envs/create_cartpole_base_env.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """ This script demonstrates how to create a simple environment with a cartpole. It combines the concepts of scene, action, observation and event managers to create an environment. """ from __future__ import annotations """Launch Isaac Sim Simulator first.""" import argparse from omni.isaac.orbit.app import AppLauncher # add argparse arguments parser = argparse.ArgumentParser(description="Tutorial on creating a cartpole base environment.") parser.add_argument("--num_envs", type=int, default=16, help="Number of environments to spawn.") # append AppLauncher cli args AppLauncher.add_app_launcher_args(parser) # parse the arguments args_cli = parser.parse_args() # launch omniverse app app_launcher = AppLauncher(args_cli) simulation_app = app_launcher.app """Rest everything follows.""" import math import torch import omni.isaac.orbit.envs.mdp as mdp from omni.isaac.orbit.envs import BaseEnv, BaseEnvCfg from omni.isaac.orbit.managers import EventTermCfg as EventTerm from omni.isaac.orbit.managers import ObservationGroupCfg as ObsGroup from omni.isaac.orbit.managers import ObservationTermCfg as ObsTerm from omni.isaac.orbit.managers import SceneEntityCfg from omni.isaac.orbit.utils import configclass from omni.isaac.orbit_tasks.classic.cartpole.cartpole_env_cfg import CartpoleSceneCfg @configclass class ActionsCfg: """Action specifications for the environment.""" joint_efforts = mdp.JointEffortActionCfg(asset_name="robot", joint_names=["slider_to_cart"], scale=5.0) @configclass class ObservationsCfg: """Observation specifications for the environment.""" @configclass class PolicyCfg(ObsGroup): """Observations for policy group.""" # observation terms (order preserved) joint_pos_rel = ObsTerm(func=mdp.joint_pos_rel) joint_vel_rel = ObsTerm(func=mdp.joint_vel_rel) def __post_init__(self) -> None: self.enable_corruption = False self.concatenate_terms = True # observation groups policy: PolicyCfg = PolicyCfg() @configclass class EventCfg: """Configuration for events.""" # on startup add_pole_mass = EventTerm( func=mdp.add_body_mass, mode="startup", params={ "asset_cfg": SceneEntityCfg("robot", body_names=["pole"]), "mass_range": (0.1, 0.5), }, ) # on reset reset_cart_position = EventTerm( func=mdp.reset_joints_by_offset, mode="reset", params={ "asset_cfg": SceneEntityCfg("robot", joint_names=["slider_to_cart"]), "position_range": (-1.0, 1.0), "velocity_range": (-0.1, 0.1), }, ) reset_pole_position = EventTerm( func=mdp.reset_joints_by_offset, mode="reset", params={ "asset_cfg": SceneEntityCfg("robot", joint_names=["cart_to_pole"]), "position_range": (-0.125 * math.pi, 0.125 * math.pi), "velocity_range": (-0.01 * math.pi, 0.01 * math.pi), }, ) @configclass class CartpoleEnvCfg(BaseEnvCfg): """Configuration for the cartpole environment.""" # Scene settings scene = CartpoleSceneCfg(num_envs=1024, env_spacing=2.5) # Basic settings observations = ObservationsCfg() actions = ActionsCfg() events = EventCfg() def __post_init__(self): """Post initialization.""" # viewer settings self.viewer.eye = [4.5, 0.0, 6.0] self.viewer.lookat = [0.0, 0.0, 2.0] # step settings self.decimation = 4 # env step every 4 sim steps: 200Hz / 4 = 50Hz # simulation settings self.sim.dt = 0.005 # sim step every 5ms: 200Hz def main(): """Main function.""" # parse the arguments env_cfg = CartpoleEnvCfg() env_cfg.scene.num_envs = args_cli.num_envs # setup base environment env = BaseEnv(cfg=env_cfg) # simulate physics count = 0 while simulation_app.is_running(): with torch.inference_mode(): # reset if count % 300 == 0: count = 0 env.reset() print("-" * 80) print("[INFO]: Resetting environment...") # sample random actions joint_efforts = torch.randn_like(env.action_manager.action) # step the environment obs, _ = env.step(joint_efforts) # print current orientation of pole print("[Env 0]: Pole joint: ", obs["policy"][0][1].item()) # update counter count += 1 # close the environment env.close() if __name__ == "__main__": # run the main function main() # close sim app simulation_app.close()
4,839
Python
27.470588
107
0.633602
NVIDIA-Omniverse/orbit/source/standalone/tutorials/03_envs/run_cartpole_rl_env.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """ This script demonstrates how to run the RL environment for the cartpole balancing task. """ from __future__ import annotations """Launch Isaac Sim Simulator first.""" import argparse from omni.isaac.orbit.app import AppLauncher # add argparse arguments parser = argparse.ArgumentParser(description="Tutorial on running the cartpole RL environment.") parser.add_argument("--num_envs", type=int, default=16, help="Number of environments to spawn.") # append AppLauncher cli args AppLauncher.add_app_launcher_args(parser) # parse the arguments args_cli = parser.parse_args() # launch omniverse app app_launcher = AppLauncher(args_cli) simulation_app = app_launcher.app """Rest everything follows.""" import torch from omni.isaac.orbit.envs import RLTaskEnv from omni.isaac.orbit_tasks.classic.cartpole.cartpole_env_cfg import CartpoleEnvCfg def main(): """Main function.""" # create environment configuration env_cfg = CartpoleEnvCfg() env_cfg.scene.num_envs = args_cli.num_envs # setup RL environment env = RLTaskEnv(cfg=env_cfg) # simulate physics count = 0 while simulation_app.is_running(): with torch.inference_mode(): # reset if count % 300 == 0: count = 0 env.reset() print("-" * 80) print("[INFO]: Resetting environment...") # sample random actions joint_efforts = torch.randn_like(env.action_manager.action) # step the environment obs, rew, terminated, truncated, info = env.step(joint_efforts) # print current orientation of pole print("[Env 0]: Pole joint: ", obs["policy"][0][1].item()) # update counter count += 1 # close the environment env.close() if __name__ == "__main__": # run the main function main() # close sim app simulation_app.close()
2,054
Python
25.688311
96
0.648978
NVIDIA-Omniverse/orbit/source/standalone/tutorials/03_envs/create_cube_base_env.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """ This script creates a simple environment with a floating cube. The cube is controlled by a PD controller to track an arbitrary target position. While going through this tutorial, we recommend you to pay attention to how a custom action term is defined. The action term is responsible for processing the raw actions and applying them to the scene entities. The rest of the environment is similar to the previous tutorials. .. code-block:: bash # Run the script ./orbit.sh -p source/standalone/tutorials/04_envs/floating_cube.py --num_envs 32 """ from __future__ import annotations """Launch Isaac Sim Simulator first.""" import argparse from omni.isaac.orbit.app import AppLauncher # add argparse arguments parser = argparse.ArgumentParser(description="Tutorial on creating a floating cube environment.") parser.add_argument("--num_envs", type=int, default=64, help="Number of environments to spawn.") # append AppLauncher cli args AppLauncher.add_app_launcher_args(parser) # parse the arguments args_cli = parser.parse_args() # launch omniverse app app_launcher = AppLauncher(args_cli) simulation_app = app_launcher.app """Rest everything follows.""" import torch import omni.isaac.orbit.envs.mdp as mdp import omni.isaac.orbit.sim as sim_utils from omni.isaac.orbit.assets import AssetBaseCfg, RigidObject, RigidObjectCfg from omni.isaac.orbit.envs import BaseEnv, BaseEnvCfg from omni.isaac.orbit.managers import ActionTerm, ActionTermCfg from omni.isaac.orbit.managers import EventTermCfg as EventTerm from omni.isaac.orbit.managers import ObservationGroupCfg as ObsGroup from omni.isaac.orbit.managers import ObservationTermCfg as ObsTerm from omni.isaac.orbit.managers import SceneEntityCfg from omni.isaac.orbit.scene import InteractiveSceneCfg from omni.isaac.orbit.terrains import TerrainImporterCfg from omni.isaac.orbit.utils import configclass ## # Custom action term ## class CubeActionTerm(ActionTerm): """Simple action term that implements a PD controller to track a target position. The action term is applied to the cube asset. It involves two steps: 1. **Process the raw actions**: Typically, this includes any transformations of the raw actions that are required to map them to the desired space. This is called once per environment step. 2. **Apply the processed actions**: This step applies the processed actions to the asset. It is called once per simulation step. In this case, the action term simply applies the raw actions to the cube asset. The raw actions are the desired target positions of the cube in the environment frame. The pre-processing step simply copies the raw actions to the processed actions as no additional processing is required. The processed actions are then applied to the cube asset by implementing a PD controller to track the target position. """ _asset: RigidObject """The articulation asset on which the action term is applied.""" def __init__(self, cfg: CubeActionTermCfg, env: BaseEnv): # call super constructor super().__init__(cfg, env) # create buffers self._raw_actions = torch.zeros(env.num_envs, 3, device=self.device) self._processed_actions = torch.zeros(env.num_envs, 3, device=self.device) self._vel_command = torch.zeros(self.num_envs, 6, device=self.device) # gains of controller self.p_gain = cfg.p_gain self.d_gain = cfg.d_gain """ Properties. """ @property def action_dim(self) -> int: return self._raw_actions.shape[1] @property def raw_actions(self) -> torch.Tensor: return self._raw_actions @property def processed_actions(self) -> torch.Tensor: return self._processed_actions """ Operations """ def process_actions(self, actions: torch.Tensor): # store the raw actions self._raw_actions[:] = actions # no-processing of actions self._processed_actions[:] = self._raw_actions[:] def apply_actions(self): # implement a PD controller to track the target position pos_error = self._processed_actions - (self._asset.data.root_pos_w - self._env.scene.env_origins) vel_error = -self._asset.data.root_lin_vel_w # set velocity targets self._vel_command[:, :3] = self.p_gain * pos_error + self.d_gain * vel_error self._asset.write_root_velocity_to_sim(self._vel_command) @configclass class CubeActionTermCfg(ActionTermCfg): """Configuration for the cube action term.""" class_type: type = CubeActionTerm """The class corresponding to the action term.""" p_gain: float = 5.0 """Proportional gain of the PD controller.""" d_gain: float = 0.5 """Derivative gain of the PD controller.""" ## # Custom observation term ## def base_position(env: BaseEnv, asset_cfg: SceneEntityCfg) -> torch.Tensor: """Root linear velocity in the asset's root frame.""" # extract the used quantities (to enable type-hinting) asset: RigidObject = env.scene[asset_cfg.name] return asset.data.root_pos_w - env.scene.env_origins ## # Scene definition ## @configclass class MySceneCfg(InteractiveSceneCfg): """Example scene configuration. The scene comprises of a ground plane, light source and floating cubes (gravity disabled). """ # add terrain terrain = TerrainImporterCfg(prim_path="/World/ground", terrain_type="plane", debug_vis=False) # add cube cube: RigidObjectCfg = RigidObjectCfg( prim_path="{ENV_REGEX_NS}/cube", spawn=sim_utils.CuboidCfg( size=(0.2, 0.2, 0.2), rigid_props=sim_utils.RigidBodyPropertiesCfg(max_depenetration_velocity=1.0, disable_gravity=True), mass_props=sim_utils.MassPropertiesCfg(mass=1.0), physics_material=sim_utils.RigidBodyMaterialCfg(), visual_material=sim_utils.PreviewSurfaceCfg(diffuse_color=(0.5, 0.0, 0.0)), ), init_state=RigidObjectCfg.InitialStateCfg(pos=(0.0, 0.0, 5)), ) # lights light = AssetBaseCfg( prim_path="/World/light", spawn=sim_utils.DistantLightCfg(color=(0.75, 0.75, 0.75), intensity=3000.0), ) ## # Environment settings ## @configclass class ActionsCfg: """Action specifications for the MDP.""" joint_pos = CubeActionTermCfg(asset_name="cube") @configclass class ObservationsCfg: """Observation specifications for the MDP.""" @configclass class PolicyCfg(ObsGroup): """Observations for policy group.""" # cube velocity position = ObsTerm(func=base_position, params={"asset_cfg": SceneEntityCfg("cube")}) def __post_init__(self): self.enable_corruption = True self.concatenate_terms = True # observation groups policy: PolicyCfg = PolicyCfg() @configclass class EventCfg: """Configuration for events.""" reset_base = EventTerm( func=mdp.reset_root_state_uniform, mode="reset", params={ "pose_range": {"x": (-0.5, 0.5), "y": (-0.5, 0.5), "yaw": (-3.14, 3.14)}, "velocity_range": { "x": (-0.5, 0.5), "y": (-0.5, 0.5), "z": (-0.5, 0.5), }, "asset_cfg": SceneEntityCfg("cube"), }, ) ## # Environment configuration ## @configclass class CubeEnvCfg(BaseEnvCfg): """Configuration for the locomotion velocity-tracking environment.""" # Scene settings scene: MySceneCfg = MySceneCfg(num_envs=args_cli.num_envs, env_spacing=2.5) # Basic settings observations: ObservationsCfg = ObservationsCfg() actions: ActionsCfg = ActionsCfg() events: EventCfg = EventCfg() def __post_init__(self): """Post initialization.""" # general settings self.decimation = 2 # simulation settings self.sim.dt = 0.01 self.sim.physics_material = self.scene.terrain.physics_material def main(): """Main function.""" # setup base environment env = BaseEnv(cfg=CubeEnvCfg()) # setup target position commands target_position = torch.rand(env.num_envs, 3, device=env.device) * 2 target_position[:, 2] += 2.0 # offset all targets so that they move to the world origin target_position -= env.scene.env_origins # simulate physics count = 0 obs, _ = env.reset() while simulation_app.is_running(): with torch.inference_mode(): # reset if count % 300 == 0: count = 0 obs, _ = env.reset() print("-" * 80) print("[INFO]: Resetting environment...") # step env obs, _ = env.step(target_position) # print mean squared position error between target and current position error = torch.norm(obs["policy"] - target_position).mean().item() print(f"[Step: {count:04d}]: Mean position error: {error:.4f}") # update counter count += 1 # close the environment env.close() if __name__ == "__main__": # run the main function main() # close sim app simulation_app.close()
9,359
Python
29.688524
111
0.662037
NVIDIA-Omniverse/orbit/source/standalone/tutorials/03_envs/create_quadruped_base_env.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """ This script demonstrates the environment for a quadruped robot with height-scan sensor. In this example, we use a locomotion policy to control the robot. The robot is commanded to move forward at a constant velocity. The height-scan sensor is used to detect the height of the terrain. .. code-block:: bash # Run the script ./orbit.sh -p source/standalone/tutorials/04_envs/quadruped_base_env.py --num_envs 32 """ from __future__ import annotations """Launch Isaac Sim Simulator first.""" import argparse from omni.isaac.orbit.app import AppLauncher # add argparse arguments parser = argparse.ArgumentParser(description="Tutorial on creating a quadruped base environment.") parser.add_argument("--num_envs", type=int, default=64, help="Number of environments to spawn.") # append AppLauncher cli args AppLauncher.add_app_launcher_args(parser) # parse the arguments args_cli = parser.parse_args() # launch omniverse app app_launcher = AppLauncher(args_cli) simulation_app = app_launcher.app """Rest everything follows.""" import os import torch import omni.isaac.orbit.envs.mdp as mdp import omni.isaac.orbit.sim as sim_utils from omni.isaac.orbit.assets import ArticulationCfg, AssetBaseCfg from omni.isaac.orbit.envs import BaseEnv, BaseEnvCfg from omni.isaac.orbit.managers import EventTermCfg as EventTerm from omni.isaac.orbit.managers import ObservationGroupCfg as ObsGroup from omni.isaac.orbit.managers import ObservationTermCfg as ObsTerm from omni.isaac.orbit.managers import SceneEntityCfg from omni.isaac.orbit.scene import InteractiveSceneCfg from omni.isaac.orbit.sensors import RayCasterCfg, patterns from omni.isaac.orbit.terrains import TerrainImporterCfg from omni.isaac.orbit.utils import configclass from omni.isaac.orbit.utils.assets import ISAAC_ORBIT_NUCLEUS_DIR, check_file_path, read_file from omni.isaac.orbit.utils.noise import AdditiveUniformNoiseCfg as Unoise ## # Pre-defined configs ## from omni.isaac.orbit.terrains.config.rough import ROUGH_TERRAINS_CFG # isort: skip from omni.isaac.orbit_assets.anymal import ANYMAL_C_CFG # isort: skip ## # Custom observation terms ## def constant_commands(env: BaseEnv) -> torch.Tensor: """The generated command from the command generator.""" return torch.tensor([[1, 0, 0]], device=env.device).repeat(env.num_envs, 1) ## # Scene definition ## @configclass class MySceneCfg(InteractiveSceneCfg): """Example scene configuration.""" # add terrain terrain = TerrainImporterCfg( prim_path="/World/ground", terrain_type="generator", terrain_generator=ROUGH_TERRAINS_CFG, max_init_terrain_level=5, collision_group=-1, physics_material=sim_utils.RigidBodyMaterialCfg( friction_combine_mode="multiply", restitution_combine_mode="multiply", static_friction=1.0, dynamic_friction=1.0, ), debug_vis=False, ) # add robot robot: ArticulationCfg = ANYMAL_C_CFG.replace(prim_path="{ENV_REGEX_NS}/Robot") # sensors height_scanner = RayCasterCfg( prim_path="{ENV_REGEX_NS}/Robot/base", offset=RayCasterCfg.OffsetCfg(pos=(0.0, 0.0, 20.0)), attach_yaw_only=True, pattern_cfg=patterns.GridPatternCfg(resolution=0.1, size=[1.6, 1.0]), debug_vis=True, mesh_prim_paths=["/World/ground"], ) # lights light = AssetBaseCfg( prim_path="/World/light", spawn=sim_utils.DistantLightCfg(color=(0.75, 0.75, 0.75), intensity=3000.0), ) ## # MDP settings ## @configclass class ActionsCfg: """Action specifications for the MDP.""" joint_pos = mdp.JointPositionActionCfg(asset_name="robot", joint_names=[".*"], scale=0.5, use_default_offset=True) @configclass class ObservationsCfg: """Observation specifications for the MDP.""" @configclass class PolicyCfg(ObsGroup): """Observations for policy group.""" # observation terms (order preserved) base_lin_vel = ObsTerm(func=mdp.base_lin_vel, noise=Unoise(n_min=-0.1, n_max=0.1)) base_ang_vel = ObsTerm(func=mdp.base_ang_vel, noise=Unoise(n_min=-0.2, n_max=0.2)) projected_gravity = ObsTerm( func=mdp.projected_gravity, noise=Unoise(n_min=-0.05, n_max=0.05), ) velocity_commands = ObsTerm(func=constant_commands) joint_pos = ObsTerm(func=mdp.joint_pos_rel, noise=Unoise(n_min=-0.01, n_max=0.01)) joint_vel = ObsTerm(func=mdp.joint_vel_rel, noise=Unoise(n_min=-1.5, n_max=1.5)) actions = ObsTerm(func=mdp.last_action) height_scan = ObsTerm( func=mdp.height_scan, params={"sensor_cfg": SceneEntityCfg("height_scanner")}, noise=Unoise(n_min=-0.1, n_max=0.1), clip=(-1.0, 1.0), ) def __post_init__(self): self.enable_corruption = True self.concatenate_terms = True # observation groups policy: PolicyCfg = PolicyCfg() @configclass class EventCfg: """Configuration for events.""" reset_scene = EventTerm(func=mdp.reset_scene_to_default, mode="reset") ## # Environment configuration ## @configclass class QuadrupedEnvCfg(BaseEnvCfg): """Configuration for the locomotion velocity-tracking environment.""" # Scene settings scene: MySceneCfg = MySceneCfg(num_envs=args_cli.num_envs, env_spacing=2.5) # Basic settings observations: ObservationsCfg = ObservationsCfg() actions: ActionsCfg = ActionsCfg() events: EventCfg = EventCfg() def __post_init__(self): """Post initialization.""" # general settings self.decimation = 4 # env decimation -> 50 Hz control # simulation settings self.sim.dt = 0.005 # simulation timestep -> 200 Hz physics self.sim.physics_material = self.scene.terrain.physics_material # update sensor update periods # we tick all the sensors based on the smallest update period (physics update period) if self.scene.height_scanner is not None: self.scene.height_scanner.update_period = self.decimation * self.sim.dt # 50 Hz def main(): """Main function.""" # setup base environment env_cfg = QuadrupedEnvCfg() env = BaseEnv(cfg=env_cfg) # load level policy policy_path = os.path.join(ISAAC_ORBIT_NUCLEUS_DIR, "Policies", "ANYmal-C", "policy.pt") # check if policy file exists if not check_file_path(policy_path): raise FileNotFoundError(f"Policy file '{policy_path}' does not exist.") file_bytes = read_file(policy_path) # jit load the policy policy = torch.jit.load(file_bytes).to(env.device).eval() # simulate physics count = 0 obs, _ = env.reset() while simulation_app.is_running(): with torch.inference_mode(): # reset if count % 1000 == 0: obs, _ = env.reset() count = 0 print("-" * 80) print("[INFO]: Resetting environment...") # infer action action = policy(obs["policy"]) # step env obs, _ = env.step(action) # update counter count += 1 # close the environment env.close() if __name__ == "__main__": # run the main function main() # close sim app simulation_app.close()
7,505
Python
29.266129
118
0.66489
NVIDIA-Omniverse/orbit/source/standalone/tutorials/05_controllers/run_diff_ik.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """ This script demonstrates how to use the differential inverse kinematics controller with the simulator. The differential IK controller can be configured in different modes. It uses the Jacobians computed by PhysX. This helps perform parallelized computation of the inverse kinematics. .. code-block:: bash # Usage ./orbit.sh -p source/standalone/tutorials/05_controllers/ik_control.py """ from __future__ import annotations """Launch Isaac Sim Simulator first.""" import argparse from omni.isaac.orbit.app import AppLauncher # add argparse arguments parser = argparse.ArgumentParser(description="Tutorial on using the differential IK controller.") parser.add_argument("--robot", type=str, default="franka_panda", help="Name of the robot.") parser.add_argument("--num_envs", type=int, default=128, help="Number of environments to spawn.") # append AppLauncher cli args AppLauncher.add_app_launcher_args(parser) # parse the arguments args_cli = parser.parse_args() # launch omniverse app app_launcher = AppLauncher(args_cli) simulation_app = app_launcher.app """Rest everything follows.""" import torch import omni.isaac.orbit.sim as sim_utils from omni.isaac.orbit.assets import AssetBaseCfg from omni.isaac.orbit.controllers import DifferentialIKController, DifferentialIKControllerCfg from omni.isaac.orbit.managers import SceneEntityCfg from omni.isaac.orbit.markers import VisualizationMarkers from omni.isaac.orbit.markers.config import FRAME_MARKER_CFG from omni.isaac.orbit.scene import InteractiveScene, InteractiveSceneCfg from omni.isaac.orbit.utils import configclass from omni.isaac.orbit.utils.assets import ISAAC_NUCLEUS_DIR from omni.isaac.orbit.utils.math import subtract_frame_transforms ## # Pre-defined configs ## from omni.isaac.orbit_assets import FRANKA_PANDA_HIGH_PD_CFG, UR10_CFG # isort:skip @configclass class TableTopSceneCfg(InteractiveSceneCfg): """Configuration for a cart-pole scene.""" # ground plane ground = AssetBaseCfg( prim_path="/World/defaultGroundPlane", spawn=sim_utils.GroundPlaneCfg(), init_state=AssetBaseCfg.InitialStateCfg(pos=(0.0, 0.0, -1.05)), ) # lights dome_light = AssetBaseCfg( prim_path="/World/Light", spawn=sim_utils.DomeLightCfg(intensity=3000.0, color=(0.75, 0.75, 0.75)) ) # mount table = AssetBaseCfg( prim_path="{ENV_REGEX_NS}/Table", spawn=sim_utils.UsdFileCfg( usd_path=f"{ISAAC_NUCLEUS_DIR}/Props/Mounts/Stand/stand_instanceable.usd", scale=(2.0, 2.0, 2.0) ), ) # articulation if args_cli.robot == "franka_panda": robot = FRANKA_PANDA_HIGH_PD_CFG.replace(prim_path="{ENV_REGEX_NS}/Robot") elif args_cli.robot == "ur10": robot = UR10_CFG.replace(prim_path="{ENV_REGEX_NS}/Robot") else: raise ValueError(f"Robot {args_cli.robot} is not supported. Valid: franka_panda, ur10") def run_simulator(sim: sim_utils.SimulationContext, scene: InteractiveScene): """Runs the simulation loop.""" # Extract scene entities # note: we only do this here for readability. robot = scene["robot"] # Create controller diff_ik_cfg = DifferentialIKControllerCfg(command_type="pose", use_relative_mode=False, ik_method="dls") diff_ik_controller = DifferentialIKController(diff_ik_cfg, num_envs=scene.num_envs, device=sim.device) # Markers frame_marker_cfg = FRAME_MARKER_CFG.copy() frame_marker_cfg.markers["frame"].scale = (0.1, 0.1, 0.1) ee_marker = VisualizationMarkers(frame_marker_cfg.replace(prim_path="/Visuals/ee_current")) goal_marker = VisualizationMarkers(frame_marker_cfg.replace(prim_path="/Visuals/ee_goal")) # Define goals for the arm ee_goals = [ [0.5, 0.5, 0.7, 0.707, 0, 0.707, 0], [0.5, -0.4, 0.6, 0.707, 0.707, 0.0, 0.0], [0.5, 0, 0.5, 0.0, 1.0, 0.0, 0.0], ] ee_goals = torch.tensor(ee_goals, device=sim.device) # Track the given command current_goal_idx = 0 # Create buffers to store actions ik_commands = torch.zeros(scene.num_envs, diff_ik_controller.action_dim, device=robot.device) ik_commands[:] = ee_goals[current_goal_idx] # Specify robot-specific parameters if args_cli.robot == "franka_panda": robot_entity_cfg = SceneEntityCfg("robot", joint_names=["panda_joint.*"], body_names=["panda_hand"]) elif args_cli.robot == "ur10": robot_entity_cfg = SceneEntityCfg("robot", joint_names=[".*"], body_names=["ee_link"]) else: raise ValueError(f"Robot {args_cli.robot} is not supported. Valid: franka_panda, ur10") # Resolving the scene entities robot_entity_cfg.resolve(scene) # Obtain the frame index of the end-effector # For a fixed base robot, the frame index is one less than the body index. This is because # the root body is not included in the returned Jacobians. if robot.is_fixed_base: ee_jacobi_idx = robot_entity_cfg.body_ids[0] - 1 else: ee_jacobi_idx = robot_entity_cfg.body_ids[0] # Define simulation stepping sim_dt = sim.get_physics_dt() count = 0 # Simulation loop while simulation_app.is_running(): # reset if count % 150 == 0: # reset time count = 0 # reset joint state joint_pos = robot.data.default_joint_pos.clone() joint_vel = robot.data.default_joint_vel.clone() robot.write_joint_state_to_sim(joint_pos, joint_vel) robot.reset() # reset actions ik_commands[:] = ee_goals[current_goal_idx] joint_pos_des = joint_pos[:, robot_entity_cfg.joint_ids].clone() # reset controller diff_ik_controller.reset() diff_ik_controller.set_command(ik_commands) # change goal current_goal_idx = (current_goal_idx + 1) % len(ee_goals) else: # obtain quantities from simulation jacobian = robot.root_physx_view.get_jacobians()[:, ee_jacobi_idx, :, robot_entity_cfg.joint_ids] ee_pose_w = robot.data.body_state_w[:, robot_entity_cfg.body_ids[0], 0:7] root_pose_w = robot.data.root_state_w[:, 0:7] joint_pos = robot.data.joint_pos[:, robot_entity_cfg.joint_ids] # compute frame in root frame ee_pos_b, ee_quat_b = subtract_frame_transforms( root_pose_w[:, 0:3], root_pose_w[:, 3:7], ee_pose_w[:, 0:3], ee_pose_w[:, 3:7] ) # compute the joint commands joint_pos_des = diff_ik_controller.compute(ee_pos_b, ee_quat_b, jacobian, joint_pos) # apply actions robot.set_joint_position_target(joint_pos_des, joint_ids=robot_entity_cfg.joint_ids) scene.write_data_to_sim() # perform step sim.step() # update sim-time count += 1 # update buffers scene.update(sim_dt) # obtain quantities from simulation ee_pose_w = robot.data.body_state_w[:, robot_entity_cfg.body_ids[0], 0:7] # update marker positions ee_marker.visualize(ee_pose_w[:, 0:3], ee_pose_w[:, 3:7]) goal_marker.visualize(ik_commands[:, 0:3] + scene.env_origins, ik_commands[:, 3:7]) def main(): """Main function.""" # Load kit helper sim_cfg = sim_utils.SimulationCfg(dt=0.01) sim = sim_utils.SimulationContext(sim_cfg) # Set main camera sim.set_camera_view([2.5, 2.5, 2.5], [0.0, 0.0, 0.0]) # Design scene scene_cfg = TableTopSceneCfg(num_envs=args_cli.num_envs, env_spacing=2.0) scene = InteractiveScene(scene_cfg) # Play the simulator sim.reset() # Now we are ready! print("[INFO]: Setup complete...") # Run the simulator run_simulator(sim, scene) if __name__ == "__main__": # run the main function main() # close sim app simulation_app.close()
8,021
Python
36.311628
109
0.657399
NVIDIA-Omniverse/orbit/source/standalone/tutorials/04_sensors/run_usd_camera.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """ This script shows how to use the camera sensor from the Orbit framework. The camera sensor is created and interfaced through the Omniverse Replicator API. However, instead of using the simulator or OpenGL convention for the camera, we use the robotics or ROS convention. .. code-block:: bash # Usage with GUI ./orbit.sh -p source/standalone/tutorials/04_sensors/run_usd_camera.py # Usage with headless ./orbit.sh -p source/standalone/tutorials/04_sensors/run_usd_camera.py --headless --offscreen_render """ from __future__ import annotations """Launch Isaac Sim Simulator first.""" import argparse from omni.isaac.orbit.app import AppLauncher # add argparse arguments parser = argparse.ArgumentParser(description="This script demonstrates how to use the camera sensor.") parser.add_argument("--cpu", action="store_true", default=False, help="Use CPU device for camera output.") parser.add_argument( "--draw", action="store_true", default=False, help="Draw the pointcloud from camera at index specified by ``--camera_id``.", ) parser.add_argument( "--save", action="store_true", default=False, help="Save the data from camera at index specified by ``--camera_id``.", ) parser.add_argument( "--camera_id", type=int, choices={0, 1}, default=0, help=( "The camera ID to use for displaying points or saving the camera data. Default is 0." " The viewport will always initialize with the perspective of camera 0." ), ) # append AppLauncher cli args AppLauncher.add_app_launcher_args(parser) # parse the arguments args_cli = parser.parse_args() # launch omniverse app app_launcher = AppLauncher(args_cli) simulation_app = app_launcher.app """Rest everything follows.""" import numpy as np import os import random import torch import omni.isaac.core.utils.prims as prim_utils import omni.replicator.core as rep import omni.isaac.orbit.sim as sim_utils from omni.isaac.orbit.assets import RigidObject, RigidObjectCfg from omni.isaac.orbit.markers import VisualizationMarkers from omni.isaac.orbit.markers.config import RAY_CASTER_MARKER_CFG from omni.isaac.orbit.sensors.camera import Camera, CameraCfg from omni.isaac.orbit.sensors.camera.utils import create_pointcloud_from_depth from omni.isaac.orbit.utils import convert_dict_to_backend def define_sensor() -> Camera: """Defines the camera sensor to add to the scene.""" # Setup camera sensor # In contrast to the ray-cast camera, we spawn the prim at these locations. # This means the camera sensor will be attached to these prims. prim_utils.create_prim("/World/Origin_00", "Xform") prim_utils.create_prim("/World/Origin_01", "Xform") camera_cfg = CameraCfg( prim_path="/World/Origin_.*/CameraSensor", update_period=0, height=480, width=640, data_types=[ "rgb", "distance_to_image_plane", "normals", "semantic_segmentation", "instance_segmentation_fast", "instance_id_segmentation_fast", ], colorize_semantic_segmentation=True, colorize_instance_id_segmentation=True, colorize_instance_segmentation=True, spawn=sim_utils.PinholeCameraCfg( focal_length=24.0, focus_distance=400.0, horizontal_aperture=20.955, clipping_range=(0.1, 1.0e5) ), ) # Create camera camera = Camera(cfg=camera_cfg) return camera def design_scene() -> dict: """Design the scene.""" # Populate scene # -- Ground-plane cfg = sim_utils.GroundPlaneCfg() cfg.func("/World/defaultGroundPlane", cfg) # -- Lights cfg = sim_utils.DistantLightCfg(intensity=3000.0, color=(0.75, 0.75, 0.75)) cfg.func("/World/Light", cfg) # Create a dictionary for the scene entities scene_entities = {} # Xform to hold objects prim_utils.create_prim("/World/Objects", "Xform") # Random objects for i in range(8): # sample random position position = np.random.rand(3) - np.asarray([0.05, 0.05, -1.0]) position *= np.asarray([1.5, 1.5, 0.5]) # sample random color color = (random.random(), random.random(), random.random()) # choose random prim type prim_type = random.choice(["Cube", "Cone", "Cylinder"]) common_properties = { "rigid_props": sim_utils.RigidBodyPropertiesCfg(), "mass_props": sim_utils.MassPropertiesCfg(mass=5.0), "collision_props": sim_utils.CollisionPropertiesCfg(), "visual_material": sim_utils.PreviewSurfaceCfg(diffuse_color=color, metallic=0.5), "semantic_tags": [("class", prim_type)], } if prim_type == "Cube": shape_cfg = sim_utils.CuboidCfg(size=(0.25, 0.25, 0.25), **common_properties) elif prim_type == "Cone": shape_cfg = sim_utils.ConeCfg(radius=0.1, height=0.25, **common_properties) elif prim_type == "Cylinder": shape_cfg = sim_utils.CylinderCfg(radius=0.25, height=0.25, **common_properties) # Rigid Object obj_cfg = RigidObjectCfg( prim_path=f"/World/Objects/Obj_{i:02d}", spawn=shape_cfg, init_state=RigidObjectCfg.InitialStateCfg(pos=position), ) scene_entities[f"rigid_object{i}"] = RigidObject(cfg=obj_cfg) # Sensors camera = define_sensor() # return the scene information scene_entities["camera"] = camera return scene_entities def run_simulator(sim: sim_utils.SimulationContext, scene_entities: dict): """Run the simulator.""" # extract entities for simplified notation camera: Camera = scene_entities["camera"] # Create replicator writer output_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "output", "camera") rep_writer = rep.BasicWriter( output_dir=output_dir, frame_padding=0, colorize_instance_id_segmentation=camera.cfg.colorize_instance_id_segmentation, colorize_instance_segmentation=camera.cfg.colorize_instance_segmentation, colorize_semantic_segmentation=camera.cfg.colorize_semantic_segmentation, ) # Camera positions, targets, orientations camera_positions = torch.tensor([[2.5, 2.5, 2.5], [-2.5, -2.5, 2.5]], device=sim.device) camera_targets = torch.tensor([[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]], device=sim.device) # These orientations are in ROS-convention, and will position the cameras to view the origin camera_orientations = torch.tensor( # noqa: F841 [[-0.1759, 0.3399, 0.8205, -0.4247], [-0.4247, 0.8205, -0.3399, 0.1759]], device=sim.device ) # Set pose: There are two ways to set the pose of the camera. # -- Option-1: Set pose using view camera.set_world_poses_from_view(camera_positions, camera_targets) # -- Option-2: Set pose using ROS # camera.set_world_poses(camera_positions, camera_orientations, convention="ros") # Index of the camera to use for visualization and saving camera_index = args_cli.camera_id # Create the markers for the --draw option outside of is_running() loop if sim.has_gui() and args_cli.draw: cfg = RAY_CASTER_MARKER_CFG.replace(prim_path="/Visuals/CameraPointCloud") cfg.markers["hit"].radius = 0.002 pc_markers = VisualizationMarkers(cfg) # Simulate physics while simulation_app.is_running(): # Step simulation sim.step() # Update camera data camera.update(dt=sim.get_physics_dt()) # Print camera info print(camera) if "rgb" in camera.data.output.keys(): print("Received shape of rgb image : ", camera.data.output["rgb"].shape) if "distance_to_image_plane" in camera.data.output.keys(): print("Received shape of depth image : ", camera.data.output["distance_to_image_plane"].shape) if "normals" in camera.data.output.keys(): print("Received shape of normals : ", camera.data.output["normals"].shape) if "semantic_segmentation" in camera.data.output.keys(): print("Received shape of semantic segm. : ", camera.data.output["semantic_segmentation"].shape) if "instance_segmentation_fast" in camera.data.output.keys(): print("Received shape of instance segm. : ", camera.data.output["instance_segmentation_fast"].shape) if "instance_id_segmentation_fast" in camera.data.output.keys(): print("Received shape of instance id segm.: ", camera.data.output["instance_id_segmentation_fast"].shape) print("-------------------------------") # Extract camera data if args_cli.save: # Save images from camera at camera_index # note: BasicWriter only supports saving data in numpy format, so we need to convert the data to numpy. # tensordict allows easy indexing of tensors in the dictionary single_cam_data = convert_dict_to_backend(camera.data.output[camera_index], backend="numpy") # Extract the other information single_cam_info = camera.data.info[camera_index] # Pack data back into replicator format to save them using its writer rep_output = dict() for key, data, info in zip(single_cam_data.keys(), single_cam_data.values(), single_cam_info.values()): if info is not None: rep_output[key] = {"data": data, "info": info} else: rep_output[key] = data # Save images # Note: We need to provide On-time data for Replicator to save the images. rep_output["trigger_outputs"] = {"on_time": camera.frame[camera_index]} rep_writer.write(rep_output) # Draw pointcloud if there is a GUI and --draw has been passed if sim.has_gui() and args_cli.draw and "distance_to_image_plane" in camera.data.output.keys(): # Derive pointcloud from camera at camera_index pointcloud = create_pointcloud_from_depth( intrinsic_matrix=camera.data.intrinsic_matrices[camera_index], depth=camera.data.output[camera_index]["distance_to_image_plane"], position=camera.data.pos_w[camera_index], orientation=camera.data.quat_w_ros[camera_index], device=sim.device, ) # In the first few steps, things are still being instanced and Camera.data # can be empty. If we attempt to visualize an empty pointcloud it will crash # the sim, so we check that the pointcloud is not empty. if pointcloud.size()[0] > 0: pc_markers.visualize(translations=pointcloud) def main(): """Main function.""" # Load simulation context sim_cfg = sim_utils.SimulationCfg(device="cpu" if args_cli.cpu else "cuda") sim = sim_utils.SimulationContext(sim_cfg) # Set main camera sim.set_camera_view([2.5, 2.5, 2.5], [0.0, 0.0, 0.0]) # design the scene scene_entities = design_scene() # Play simulator sim.reset() # Now we are ready! print("[INFO]: Setup complete...") # Run simulator run_simulator(sim, scene_entities) if __name__ == "__main__": # run the main function main() # close sim app simulation_app.close()
11,535
Python
38.642612
117
0.646034
NVIDIA-Omniverse/orbit/source/standalone/tutorials/04_sensors/run_frame_transformer.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """ This script demonstrates the FrameTransformer sensor by visualizing the frames that it creates. .. code-block:: bash # Usage ./orbit.sh -p source/standalone/tutorials/04_sensors/run_frame_transformer.py """ from __future__ import annotations """Launch Isaac Sim Simulator first.""" import argparse from omni.isaac.orbit.app import AppLauncher # add argparse arguments parser = argparse.ArgumentParser( description="This script checks the FrameTransformer sensor by visualizing the frames that it creates." ) AppLauncher.add_app_launcher_args(parser) args_cli = parser.parse_args() # launch omniverse app app_launcher = AppLauncher(headless=args_cli.headless) simulation_app = app_launcher.app """Rest everything follows.""" import math import torch import omni.isaac.debug_draw._debug_draw as omni_debug_draw import omni.isaac.orbit.sim as sim_utils import omni.isaac.orbit.utils.math as math_utils from omni.isaac.orbit.assets import Articulation from omni.isaac.orbit.markers import VisualizationMarkers from omni.isaac.orbit.markers.config import FRAME_MARKER_CFG from omni.isaac.orbit.sensors import FrameTransformer, FrameTransformerCfg, OffsetCfg from omni.isaac.orbit.sim import SimulationContext ## # Pre-defined configs ## from omni.isaac.orbit_assets.anymal import ANYMAL_C_CFG # isort:skip def define_sensor() -> FrameTransformer: """Defines the FrameTransformer sensor to add to the scene.""" # define offset rot_offset = math_utils.quat_from_euler_xyz(torch.zeros(1), torch.zeros(1), torch.tensor(-math.pi / 2)) pos_offset = math_utils.quat_apply(rot_offset, torch.tensor([0.08795, 0.01305, -0.33797])) # Example using .* to get full body + LF_FOOT frame_transformer_cfg = FrameTransformerCfg( prim_path="/World/Robot/base", target_frames=[ FrameTransformerCfg.FrameCfg(prim_path="/World/Robot/.*"), FrameTransformerCfg.FrameCfg( prim_path="/World/Robot/LF_SHANK", name="LF_FOOT_USER", offset=OffsetCfg(pos=tuple(pos_offset.tolist()), rot=tuple(rot_offset[0].tolist())), ), ], debug_vis=False, ) frame_transformer = FrameTransformer(frame_transformer_cfg) return frame_transformer def design_scene() -> dict: """Design the scene.""" # Populate scene # -- Ground-plane cfg = sim_utils.GroundPlaneCfg() cfg.func("/World/defaultGroundPlane", cfg) # -- Lights cfg = sim_utils.DistantLightCfg(intensity=3000.0, color=(0.75, 0.75, 0.75)) cfg.func("/World/Light", cfg) # -- Robot robot = Articulation(ANYMAL_C_CFG.replace(prim_path="/World/Robot")) # -- Sensors frame_transformer = define_sensor() # return the scene information scene_entities = {"robot": robot, "frame_transformer": frame_transformer} return scene_entities def run_simulator(sim: sim_utils.SimulationContext, scene_entities: dict): """Run the simulator.""" # Define simulation stepping sim_dt = sim.get_physics_dt() sim_time = 0.0 count = 0 # extract entities for simplified notation robot: Articulation = scene_entities["robot"] frame_transformer: FrameTransformer = scene_entities["frame_transformer"] # We only want one visualization at a time. This visualizer will be used # to step through each frame so the user can verify that the correct frame # is being visualized as the frame names are printing to console if not args_cli.headless: cfg = FRAME_MARKER_CFG.replace(prim_path="/Visuals/FrameVisualizerFromScript") cfg.markers["frame"].scale = (0.1, 0.1, 0.1) transform_visualizer = VisualizationMarkers(cfg) # debug drawing for lines connecting the frame draw_interface = omni_debug_draw.acquire_debug_draw_interface() else: transform_visualizer = None draw_interface = None frame_index = 0 # Simulate physics while simulation_app.is_running(): # perform this loop at policy control freq (50 Hz) robot.set_joint_position_target(robot.data.default_joint_pos.clone()) robot.write_data_to_sim() # perform step sim.step() # update sim-time sim_time += sim_dt count += 1 # read data from sim robot.update(sim_dt) frame_transformer.update(dt=sim_dt) # Change the frame that we are visualizing to ensure that frame names # are correctly associated with the frames if not args_cli.headless: if count % 50 == 0: # get frame names frame_names = frame_transformer.data.target_frame_names print(f"Displaying Frame ID {frame_index}: {frame_names[frame_index]}") # increment frame index frame_index += 1 frame_index = frame_index % len(frame_names) # visualize frame source_pos = frame_transformer.data.source_pos_w source_quat = frame_transformer.data.source_quat_w target_pos = frame_transformer.data.target_pos_w[:, frame_index] target_quat = frame_transformer.data.target_quat_w[:, frame_index] # draw the frames transform_visualizer.visualize( torch.cat([source_pos, target_pos], dim=0), torch.cat([source_quat, target_quat], dim=0) ) # draw the line connecting the frames draw_interface.clear_lines() # plain color for lines lines_colors = [[1.0, 1.0, 0.0, 1.0]] * source_pos.shape[0] line_thicknesses = [5.0] * source_pos.shape[0] draw_interface.draw_lines(source_pos.tolist(), target_pos.tolist(), lines_colors, line_thicknesses) def main(): """Main function.""" # Load kit helper sim = SimulationContext(sim_utils.SimulationCfg(dt=0.005)) # Set main camera sim.set_camera_view(eye=[2.5, 2.5, 2.5], target=[0.0, 0.0, 0.0]) # Design the scene scene_entities = design_scene() # Play the simulator sim.reset() # Now we are ready! print("[INFO]: Setup complete...") # Run the simulator run_simulator(sim, scene_entities) if __name__ == "__main__": # Run the main function main() # Close the simulator simulation_app.close()
6,473
Python
33.43617
111
0.657346
NVIDIA-Omniverse/orbit/source/standalone/tutorials/04_sensors/run_ray_caster_camera.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """ This script shows how to use the ray-cast camera sensor from the Orbit framework. The camera sensor is based on using Warp kernels which do ray-casting against static meshes. .. code-block:: bash # Usage ./orbit.sh -p source/standalone/tutorials/04_sensors/run_ray_caster_camera.py """ """Launch Isaac Sim Simulator first.""" import argparse from omni.isaac.orbit.app import AppLauncher # add argparse arguments parser = argparse.ArgumentParser(description="This script demonstrates how to use the ray-cast camera sensor.") parser.add_argument("--num_envs", type=int, default=16, help="Number of environments to generate.") parser.add_argument("--save", action="store_true", default=False, help="Save the obtained data to disk.") # append AppLauncher cli args AppLauncher.add_app_launcher_args(parser) # parse the arguments args_cli = parser.parse_args() # launch omniverse app app_launcher = AppLauncher(args_cli) simulation_app = app_launcher.app """Rest everything follows.""" import os import torch import omni.isaac.core.utils.prims as prim_utils import omni.replicator.core as rep import omni.isaac.orbit.sim as sim_utils from omni.isaac.orbit.sensors.ray_caster import RayCasterCamera, RayCasterCameraCfg, patterns from omni.isaac.orbit.utils import convert_dict_to_backend from omni.isaac.orbit.utils.assets import ISAAC_NUCLEUS_DIR from omni.isaac.orbit.utils.math import project_points, unproject_depth def define_sensor() -> RayCasterCamera: """Defines the ray-cast camera sensor to add to the scene.""" # Camera base frames # In contras to the USD camera, we associate the sensor to the prims at these locations. # This means that parent prim of the sensor is the prim at this location. prim_utils.create_prim("/World/Origin_00/CameraSensor", "Xform") prim_utils.create_prim("/World/Origin_01/CameraSensor", "Xform") # Setup camera sensor camera_cfg = RayCasterCameraCfg( prim_path="/World/Origin_.*/CameraSensor", mesh_prim_paths=["/World/ground"], update_period=0.1, offset=RayCasterCameraCfg.OffsetCfg(pos=(0.0, 0.0, 0.0), rot=(1.0, 0.0, 0.0, 0.0)), data_types=["distance_to_image_plane", "normals", "distance_to_camera"], debug_vis=True, pattern_cfg=patterns.PinholeCameraPatternCfg( focal_length=24.0, horizontal_aperture=20.955, height=480, width=640, ), ) # Create camera camera = RayCasterCamera(cfg=camera_cfg) return camera def design_scene(): # Populate scene # -- Rough terrain cfg = sim_utils.UsdFileCfg(usd_path=f"{ISAAC_NUCLEUS_DIR}/Environments/Terrains/rough_plane.usd") cfg.func("/World/ground", cfg) # -- Lights cfg = sim_utils.DistantLightCfg(intensity=600.0, color=(0.75, 0.75, 0.75)) cfg.func("/World/Light", cfg) # -- Sensors camera = define_sensor() # return the scene information scene_entities = {"camera": camera} return scene_entities def run_simulator(sim: sim_utils.SimulationContext, scene_entities: dict): """Run the simulator.""" # extract entities for simplified notation camera: RayCasterCamera = scene_entities["camera"] # Create replicator writer output_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "output", "ray_caster_camera") rep_writer = rep.BasicWriter(output_dir=output_dir, frame_padding=3) # Set pose: There are two ways to set the pose of the camera. # -- Option-1: Set pose using view eyes = torch.tensor([[2.5, 2.5, 2.5], [-2.5, -2.5, 2.5]], device=sim.device) targets = torch.tensor([[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]], device=sim.device) camera.set_world_poses_from_view(eyes, targets) # -- Option-2: Set pose using ROS # position = torch.tensor([[2.5, 2.5, 2.5]], device=sim.device) # orientation = torch.tensor([[-0.17591989, 0.33985114, 0.82047325, -0.42470819]], device=sim.device) # camera.set_world_poses(position, orientation, indices=[0], convention="ros") # Simulate physics while simulation_app.is_running(): # Step simulation sim.step() # Update camera data camera.update(dt=sim.get_physics_dt()) # Print camera info print(camera) print("Received shape of depth image: ", camera.data.output["distance_to_image_plane"].shape) print("-------------------------------") # Extract camera data if args_cli.save: # Extract camera data camera_index = 0 # note: BasicWriter only supports saving data in numpy format, so we need to convert the data to numpy. if sim.backend == "torch": # tensordict allows easy indexing of tensors in the dictionary single_cam_data = convert_dict_to_backend(camera.data.output[camera_index], backend="numpy") else: # for numpy, we need to manually index the data single_cam_data = dict() for key, value in camera.data.output.items(): single_cam_data[key] = value[camera_index] # Extract the other information single_cam_info = camera.data.info[camera_index] # Pack data back into replicator format to save them using its writer rep_output = dict() for key, data, info in zip(single_cam_data.keys(), single_cam_data.values(), single_cam_info.values()): if info is not None: rep_output[key] = {"data": data, "info": info} else: rep_output[key] = data # Save images rep_output["trigger_outputs"] = {"on_time": camera.frame[camera_index]} rep_writer.write(rep_output) # Pointcloud in world frame points_3d_cam = unproject_depth( camera.data.output["distance_to_image_plane"], camera.data.intrinsic_matrices ) # Check methods are valid im_height, im_width = camera.image_shape # -- project points to (u, v, d) reproj_points = project_points(points_3d_cam, camera.data.intrinsic_matrices) reproj_depths = reproj_points[..., -1].view(-1, im_width, im_height).transpose_(1, 2) sim_depths = camera.data.output["distance_to_image_plane"].squeeze(-1) torch.testing.assert_close(reproj_depths, sim_depths) def main(): """Main function.""" # Load kit helper sim = sim_utils.SimulationContext() # Set main camera sim.set_camera_view([2.5, 2.5, 3.5], [0.0, 0.0, 0.0]) # design the scene scene_entities = design_scene() # Play simulator sim.reset() # Now we are ready! print("[INFO]: Setup complete...") # Run simulator run_simulator(sim=sim, scene_entities=scene_entities) if __name__ == "__main__": # run the main function main() # close sim app simulation_app.close()
7,126
Python
36.708995
115
0.642015
NVIDIA-Omniverse/orbit/source/standalone/tutorials/04_sensors/add_sensors_on_robot.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """ This script demonstrates how to add and simulate on-board sensors for a robot. We add the following sensors on the quadruped robot, ANYmal-C (ANYbotics): * USD-Camera: This is a camera sensor that is attached to the robot's base. * Height Scanner: This is a height scanner sensor that is attached to the robot's base. * Contact Sensor: This is a contact sensor that is attached to the robot's feet. .. code-block:: bash # Usage ./orbit.sh -p source/standalone/tutorials/04_sensors/add_sensors_on_robot.py """ from __future__ import annotations """Launch Isaac Sim Simulator first.""" import argparse from omni.isaac.orbit.app import AppLauncher # add argparse arguments parser = argparse.ArgumentParser(description="Tutorial on adding sensors on a robot.") parser.add_argument("--num_envs", type=int, default=2, help="Number of environments to spawn.") # append AppLauncher cli args AppLauncher.add_app_launcher_args(parser) # parse the arguments args_cli = parser.parse_args() # launch omniverse app app_launcher = AppLauncher(args_cli) simulation_app = app_launcher.app """Rest everything follows.""" import torch import omni.isaac.orbit.sim as sim_utils from omni.isaac.orbit.assets import ArticulationCfg, AssetBaseCfg from omni.isaac.orbit.scene import InteractiveScene, InteractiveSceneCfg from omni.isaac.orbit.sensors import CameraCfg, ContactSensorCfg, RayCasterCfg, patterns from omni.isaac.orbit.utils import configclass ## # Pre-defined configs ## from omni.isaac.orbit_assets.anymal import ANYMAL_C_CFG # isort: skip @configclass class SensorsSceneCfg(InteractiveSceneCfg): """Design the scene with sensors on the robot.""" # ground plane ground = AssetBaseCfg(prim_path="/World/defaultGroundPlane", spawn=sim_utils.GroundPlaneCfg()) # lights dome_light = AssetBaseCfg( prim_path="/World/Light", spawn=sim_utils.DomeLightCfg(intensity=3000.0, color=(0.75, 0.75, 0.75)) ) # robot robot: ArticulationCfg = ANYMAL_C_CFG.replace(prim_path="{ENV_REGEX_NS}/Robot") # sensors camera = CameraCfg( prim_path="{ENV_REGEX_NS}/Robot/base/front_cam", update_period=0.1, height=480, width=640, data_types=["rgb", "distance_to_image_plane"], spawn=sim_utils.PinholeCameraCfg( focal_length=24.0, focus_distance=400.0, horizontal_aperture=20.955, clipping_range=(0.1, 1.0e5) ), offset=CameraCfg.OffsetCfg(pos=(0.510, 0.0, 0.015), rot=(0.5, -0.5, 0.5, -0.5), convention="ros"), ) height_scanner = RayCasterCfg( prim_path="{ENV_REGEX_NS}/Robot/base", update_period=0.02, offset=RayCasterCfg.OffsetCfg(pos=(0.0, 0.0, 20.0)), attach_yaw_only=True, pattern_cfg=patterns.GridPatternCfg(resolution=0.1, size=[1.6, 1.0]), debug_vis=True, mesh_prim_paths=["/World/defaultGroundPlane"], ) contact_forces = ContactSensorCfg( prim_path="{ENV_REGEX_NS}/Robot/.*_FOOT", update_period=0.0, history_length=6, debug_vis=True ) def run_simulator(sim: sim_utils.SimulationContext, scene: InteractiveScene): """Run the simulator.""" # Define simulation stepping sim_dt = sim.get_physics_dt() sim_time = 0.0 count = 0 # Simulate physics while simulation_app.is_running(): # Reset if count % 500 == 0: # reset counter count = 0 # reset the scene entities # root state # we offset the root state by the origin since the states are written in simulation world frame # if this is not done, then the robots will be spawned at the (0, 0, 0) of the simulation world root_state = scene["robot"].data.default_root_state.clone() root_state[:, :3] += scene.env_origins scene["robot"].write_root_state_to_sim(root_state) # set joint positions with some noise joint_pos, joint_vel = ( scene["robot"].data.default_joint_pos.clone(), scene["robot"].data.default_joint_vel.clone(), ) joint_pos += torch.rand_like(joint_pos) * 0.1 scene["robot"].write_joint_state_to_sim(joint_pos, joint_vel) # clear internal buffers scene.reset() print("[INFO]: Resetting robot state...") # Apply default actions to the robot # -- generate actions/commands targets = scene["robot"].data.default_joint_pos # -- apply action to the robot scene["robot"].set_joint_position_target(targets) # -- write data to sim scene.write_data_to_sim() # perform step sim.step() # update sim-time sim_time += sim_dt count += 1 # update buffers scene.update(sim_dt) # print information from the sensors print("-------------------------------") print(scene["camera"]) print("Received shape of rgb image: ", scene["camera"].data.output["rgb"].shape) print("Received shape of depth image: ", scene["camera"].data.output["distance_to_image_plane"].shape) print("-------------------------------") print(scene["height_scanner"]) print("Received max height value: ", torch.max(scene["height_scanner"].data.ray_hits_w[..., -1]).item()) print("-------------------------------") print(scene["contact_forces"]) print("Received max contact force of: ", torch.max(scene["contact_forces"].data.net_forces_w).item()) def main(): """Main function.""" # Initialize the simulation context sim_cfg = sim_utils.SimulationCfg(dt=0.005, substeps=1) sim = sim_utils.SimulationContext(sim_cfg) # Set main camera sim.set_camera_view(eye=[3.5, 3.5, 3.5], target=[0.0, 0.0, 0.0]) # design scene scene_cfg = SensorsSceneCfg(num_envs=args_cli.num_envs, env_spacing=2.0) scene = InteractiveScene(scene_cfg) # Play the simulator sim.reset() # Now we are ready! print("[INFO]: Setup complete...") # Run the simulator run_simulator(sim, scene) if __name__ == "__main__": # run the main function main() # close sim app simulation_app.close()
6,365
Python
33.978022
112
0.635192
NVIDIA-Omniverse/orbit/source/standalone/tutorials/04_sensors/run_ray_caster.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """ This script demonstrates how to use the ray-caster sensor. .. code-block:: bash # Usage ./orbit.sh -p source/standalone/tutorials/04_sensors/run_ray_caster.py """ from __future__ import annotations """Launch Isaac Sim Simulator first.""" import argparse from omni.isaac.orbit.app import AppLauncher # add argparse arguments parser = argparse.ArgumentParser(description="Ray Caster Test Script") # append AppLauncher cli args AppLauncher.add_app_launcher_args(parser) # parse the arguments args_cli = parser.parse_args() # launch omniverse app app_launcher = AppLauncher(args_cli) simulation_app = app_launcher.app """Rest everything follows.""" import torch import omni.isaac.core.utils.prims as prim_utils import omni.isaac.orbit.sim as sim_utils from omni.isaac.orbit.assets import RigidObject, RigidObjectCfg from omni.isaac.orbit.sensors.ray_caster import RayCaster, RayCasterCfg, patterns from omni.isaac.orbit.utils.assets import ISAAC_NUCLEUS_DIR from omni.isaac.orbit.utils.timer import Timer def define_sensor() -> RayCaster: """Defines the ray-caster sensor to add to the scene.""" # Create a ray-caster sensor ray_caster_cfg = RayCasterCfg( prim_path="/World/Origin.*/ball", mesh_prim_paths=["/World/ground"], pattern_cfg=patterns.GridPatternCfg(resolution=0.1, size=(2.0, 2.0)), attach_yaw_only=True, debug_vis=not args_cli.headless, ) ray_caster = RayCaster(cfg=ray_caster_cfg) return ray_caster def design_scene() -> dict: """Design the scene.""" # Populate scene # -- Rough terrain cfg = sim_utils.UsdFileCfg(usd_path=f"{ISAAC_NUCLEUS_DIR}/Environments/Terrains/rough_plane.usd") cfg.func("/World/ground", cfg) # -- Light cfg = sim_utils.DistantLightCfg(intensity=2000) cfg.func("/World/light", cfg) # Create separate groups called "Origin1", "Origin2", "Origin3" # Each group will have a robot in it origins = [[0.25, 0.25, 0.0], [-0.25, 0.25, 0.0], [0.25, -0.25, 0.0], [-0.25, -0.25, 0.0]] for i, origin in enumerate(origins): prim_utils.create_prim(f"/World/Origin{i}", "Xform", translation=origin) # -- Balls cfg = RigidObjectCfg( prim_path="/World/Origin.*/ball", spawn=sim_utils.SphereCfg( radius=0.25, rigid_props=sim_utils.RigidBodyPropertiesCfg(), mass_props=sim_utils.MassPropertiesCfg(mass=0.5), collision_props=sim_utils.CollisionPropertiesCfg(), visual_material=sim_utils.PreviewSurfaceCfg(diffuse_color=(0.0, 0.0, 1.0)), ), ) balls = RigidObject(cfg) # -- Sensors ray_caster = define_sensor() # return the scene information scene_entities = {"balls": balls, "ray_caster": ray_caster} return scene_entities def run_simulator(sim: sim_utils.SimulationContext, scene_entities: dict): """Run the simulator.""" # Extract scene_entities for simplified notation ray_caster: RayCaster = scene_entities["ray_caster"] balls: RigidObject = scene_entities["balls"] # define an initial position of the sensor ball_default_state = balls.data.default_root_state.clone() ball_default_state[:, :3] = torch.rand_like(ball_default_state[:, :3]) * 10 # Create a counter for resetting the scene step_count = 0 # Simulate physics while simulation_app.is_running(): # Reset the scene if step_count % 250 == 0: # reset the balls balls.write_root_state_to_sim(ball_default_state) # reset the sensor ray_caster.reset() # reset the counter step_count = 0 # Step simulation sim.step() # Update the ray-caster with Timer( f"Ray-caster update with {4} x {ray_caster.num_rays} rays with max height of" f" {torch.max(ray_caster.data.pos_w).item():.2f}" ): ray_caster.update(dt=sim.get_physics_dt(), force_recompute=True) # Update counter step_count += 1 def main(): """Main function.""" # Load simulation context sim_cfg = sim_utils.SimulationCfg() sim = sim_utils.SimulationContext(sim_cfg) # Set main camera sim.set_camera_view([0.0, 15.0, 15.0], [0.0, 0.0, -2.5]) # Design the scene scene_entities = design_scene() # Play simulator sim.reset() # Now we are ready! print("[INFO]: Setup complete...") # Run simulator run_simulator(sim=sim, scene_entities=scene_entities) if __name__ == "__main__": # run the main function main() # close sim app simulation_app.close()
4,764
Python
30.143791
101
0.649664
NVIDIA-Omniverse/orbit/source/standalone/tutorials/00_sim/launch_app.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """ This script demonstrates how to run IsaacSim via the AppLauncher .. code-block:: bash # Usage ./orbit.sh -p source/standalone/tutorials/00_sim/launch_app.py """ from __future__ import annotations """Launch Isaac Sim Simulator first.""" import argparse from omni.isaac.orbit.app import AppLauncher # create argparser parser = argparse.ArgumentParser(description="Tutorial on running IsaacSim via the AppLauncher.") parser.add_argument("--size", type=float, default=1.0, help="Side-length of cuboid") # SimulationApp arguments https://docs.omniverse.nvidia.com/py/isaacsim/source/extensions/omni.isaac.kit/docs/index.html?highlight=simulationapp#omni.isaac.kit.SimulationApp parser.add_argument( "--width", type=int, default=1280, help="Width of the viewport and generated images. Defaults to 1280" ) parser.add_argument( "--height", type=int, default=720, help="Height of the viewport and generated images. Defaults to 720" ) # append AppLauncher cli args AppLauncher.add_app_launcher_args(parser) # parse the arguments args_cli = parser.parse_args() # launch omniverse app app_launcher = AppLauncher(args_cli) simulation_app = app_launcher.app """Rest everything follows.""" import omni.isaac.orbit.sim as sim_utils def design_scene(): """Designs the scene by spawning ground plane, light, objects and meshes from usd files.""" # Ground-plane cfg_ground = sim_utils.GroundPlaneCfg() cfg_ground.func("/World/defaultGroundPlane", cfg_ground) # spawn distant light cfg_light_distant = sim_utils.DistantLightCfg( intensity=3000.0, color=(0.75, 0.75, 0.75), ) cfg_light_distant.func("/World/lightDistant", cfg_light_distant, translation=(1, 0, 10)) # spawn a cuboid cfg_cuboid = sim_utils.CuboidCfg( size=[args_cli.size] * 3, visual_material=sim_utils.PreviewSurfaceCfg(diffuse_color=(1.0, 1.0, 1.0)), ) # Spawn cuboid, altering translation on the z-axis to scale to its size cfg_cuboid.func("/World/Object", cfg_cuboid, translation=(0.0, 0.0, args_cli.size / 2)) def main(): """Main function.""" # Initialize the simulation context sim_cfg = sim_utils.SimulationCfg(dt=0.01, substeps=1) sim = sim_utils.SimulationContext(sim_cfg) # Set main camera sim.set_camera_view([2.0, 0.0, 2.5], [-0.5, 0.0, 0.5]) # Design scene by adding assets to it design_scene() # Play the simulator sim.reset() # Now we are ready! print("[INFO]: Setup complete...") # Simulate physics while simulation_app.is_running(): # perform step sim.step() if __name__ == "__main__": # run the main function main() # close sim app simulation_app.close()
2,854
Python
28.432989
173
0.689909
NVIDIA-Omniverse/orbit/source/standalone/tutorials/00_sim/create_empty.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """This script demonstrates how to create a simple stage in Isaac Sim. .. code-block:: bash # Usage ./orbit.sh -p source/standalone/tutorials/00_sim/create_empty.py """ from __future__ import annotations """Launch Isaac Sim Simulator first.""" import argparse from omni.isaac.orbit.app import AppLauncher # create argparser parser = argparse.ArgumentParser(description="Tutorial on creating an empty stage.") # append AppLauncher cli args AppLauncher.add_app_launcher_args(parser) # parse the arguments args_cli = parser.parse_args() # launch omniverse app app_launcher = AppLauncher(args_cli) simulation_app = app_launcher.app """Rest everything follows.""" from omni.isaac.orbit.sim import SimulationCfg, SimulationContext def main(): """Main function.""" # Initialize the simulation context sim_cfg = SimulationCfg(dt=0.01, substeps=1) sim = SimulationContext(sim_cfg) # Set main camera sim.set_camera_view([2.5, 2.5, 2.5], [0.0, 0.0, 0.0]) # Play the simulator sim.reset() # Now we are ready! print("[INFO]: Setup complete...") # Simulate physics while simulation_app.is_running(): # perform step sim.step() if __name__ == "__main__": # run the main function main() # close sim app simulation_app.close()
1,436
Python
22.177419
84
0.685933
NVIDIA-Omniverse/orbit/source/standalone/tutorials/00_sim/spawn_prims.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """This script demonstrates how to spawn prims into the scene. .. code-block:: bash # Usage ./orbit.sh -p source/standalone/tutorials/00_sim/spawn_prims.py """ from __future__ import annotations """Launch Isaac Sim Simulator first.""" import argparse from omni.isaac.orbit.app import AppLauncher # create argparser parser = argparse.ArgumentParser(description="Tutorial on spawning prims into the scene.") # append AppLauncher cli args AppLauncher.add_app_launcher_args(parser) # parse the arguments args_cli = parser.parse_args() # launch omniverse app app_launcher = AppLauncher(args_cli) simulation_app = app_launcher.app """Rest everything follows.""" import omni.isaac.core.utils.prims as prim_utils import omni.isaac.orbit.sim as sim_utils from omni.isaac.orbit.utils.assets import ISAAC_NUCLEUS_DIR def design_scene(): """Designs the scene by spawning ground plane, light, objects and meshes from usd files.""" # Ground-plane cfg_ground = sim_utils.GroundPlaneCfg() cfg_ground.func("/World/defaultGroundPlane", cfg_ground) # spawn distant light cfg_light_distant = sim_utils.DistantLightCfg( intensity=3000.0, color=(0.75, 0.75, 0.75), ) cfg_light_distant.func("/World/lightDistant", cfg_light_distant, translation=(1, 0, 10)) # create a new xform prim for all objects to be spawned under prim_utils.create_prim("/World/Objects", "Xform") # spawn a red cone cfg_cone = sim_utils.ConeCfg( radius=0.15, height=0.5, visual_material=sim_utils.PreviewSurfaceCfg(diffuse_color=(1.0, 0.0, 0.0)), ) cfg_cone.func("/World/Objects/Cone1", cfg_cone, translation=(-1.0, 1.0, 1.0)) cfg_cone.func("/World/Objects/Cone2", cfg_cone, translation=(-1.0, -1.0, 1.0)) # spawn a green cone with colliders and rigid body cfg_cone_rigid = sim_utils.ConeCfg( radius=0.15, height=0.5, rigid_props=sim_utils.RigidBodyPropertiesCfg(), mass_props=sim_utils.MassPropertiesCfg(mass=1.0), collision_props=sim_utils.CollisionPropertiesCfg(), visual_material=sim_utils.PreviewSurfaceCfg(diffuse_color=(0.0, 1.0, 0.0)), ) cfg_cone_rigid.func( "/World/Objects/ConeRigid", cfg_cone_rigid, translation=(0.0, 0.0, 2.0), orientation=(0.5, 0.0, 0.5, 0.0) ) # spawn a usd file of a table into the scene cfg = sim_utils.UsdFileCfg(usd_path=f"{ISAAC_NUCLEUS_DIR}/Props/Mounts/SeattleLabTable/table_instanceable.usd") cfg.func("/World/Objects/Table", cfg, translation=(0.0, 0.0, 1.05)) def main(): """Main function.""" # Initialize the simulation context sim_cfg = sim_utils.SimulationCfg(dt=0.01, substeps=1) sim = sim_utils.SimulationContext(sim_cfg) # Set main camera sim.set_camera_view([2.0, 0.0, 2.5], [-0.5, 0.0, 0.5]) # Design scene by adding assets to it design_scene() # Play the simulator sim.reset() # Now we are ready! print("[INFO]: Setup complete...") # Simulate physics while simulation_app.is_running(): # perform step sim.step() if __name__ == "__main__": # run the main function main() # close sim app simulation_app.close()
3,338
Python
29.354545
115
0.670761
NVIDIA-Omniverse/orbit/source/standalone/tutorials/00_sim/log_time.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """ This script demonstrates how to generate log outputs while the simulation plays. It accompanies the tutorial on docker usage. .. code-block:: bash # Usage ./orbit.sh -p source/standalone/tutorials/00_sim/log_time.py """ from __future__ import annotations """Launch Isaac Sim Simulator first.""" import argparse import os from omni.isaac.orbit.app import AppLauncher # create argparser parser = argparse.ArgumentParser(description="Tutorial on creating logs from within the docker container.") # append AppLauncher cli args AppLauncher.add_app_launcher_args(parser) # parse the arguments args_cli = parser.parse_args() # launch omniverse app app_launcher = AppLauncher(args_cli) simulation_app = app_launcher.app """Rest everything follows.""" from omni.isaac.orbit.sim import SimulationCfg, SimulationContext def main(): """Main function.""" # Specify that the logs must be in logs/docker_tutorial log_dir_path = os.path.join("logs", "docker_tutorial") # In the container, the absolute path will be # /workspace/orbit/logs/docker_tutorial, because # all python execution is done through /workspace/orbit/orbit.sh # and the calling process' path will be /workspace/orbit log_dir_path = os.path.abspath(log_dir_path) if not os.path.isdir(log_dir_path): os.mkdir(log_dir_path) print(f"[INFO] Logging experiment to directory: {log_dir_path}") # Initialize the simulation context sim_cfg = SimulationCfg(dt=0.01, substeps=1) sim = SimulationContext(sim_cfg) # Set main camera sim.set_camera_view([2.5, 2.5, 2.5], [0.0, 0.0, 0.0]) # Play the simulator sim.reset() # Now we are ready! print("[INFO]: Setup complete...") # Prepare to count sim_time sim_dt = sim.get_physics_dt() sim_time = 0.0 # Open logging file with open(os.path.join(log_dir_path, "log.txt"), "w") as log_file: # Simulate physics while simulation_app.is_running(): log_file.write(f"{sim_time}" + "\n") # perform step sim.step() sim_time += sim_dt if __name__ == "__main__": # run the main function main() # close sim app simulation_app.close()
2,342
Python
27.228915
107
0.673356
NVIDIA-Omniverse/orbit/source/standalone/demos/markers.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """This script demonstrates different types of markers. .. code-block:: bash # Usage ./orbit.sh -p source/standalone/demos/markers.py """ from __future__ import annotations """Launch Isaac Sim Simulator first.""" import argparse from omni.isaac.orbit.app import AppLauncher # add argparse arguments parser = argparse.ArgumentParser(description="This script demonstrates different types of markers.") # append AppLauncher cli args AppLauncher.add_app_launcher_args(parser) # parse the arguments args_cli = parser.parse_args() # launch omniverse app app_launcher = AppLauncher(args_cli) simulation_app = app_launcher.app """Rest everything follows.""" import torch import omni.isaac.orbit.sim as sim_utils from omni.isaac.orbit.markers import VisualizationMarkers, VisualizationMarkersCfg from omni.isaac.orbit.sim import SimulationContext from omni.isaac.orbit.utils.assets import ISAAC_NUCLEUS_DIR, ISAAC_ORBIT_NUCLEUS_DIR from omni.isaac.orbit.utils.math import quat_from_angle_axis def define_markers() -> VisualizationMarkers: """Define markers with various different shapes.""" marker_cfg = VisualizationMarkersCfg( prim_path="/Visuals/myMarkers", markers={ "frame": sim_utils.UsdFileCfg( usd_path=f"{ISAAC_NUCLEUS_DIR}/Props/UIElements/frame_prim.usd", scale=(0.5, 0.5, 0.5), ), "arrow_x": sim_utils.UsdFileCfg( usd_path=f"{ISAAC_NUCLEUS_DIR}/Props/UIElements/arrow_x.usd", scale=(1.0, 0.5, 0.5), visual_material=sim_utils.PreviewSurfaceCfg(diffuse_color=(0.0, 1.0, 1.0)), ), "cube": sim_utils.CuboidCfg( size=(1.0, 1.0, 1.0), visual_material=sim_utils.PreviewSurfaceCfg(diffuse_color=(1.0, 0.0, 0.0)), ), "sphere": sim_utils.SphereCfg( radius=0.5, visual_material=sim_utils.PreviewSurfaceCfg(diffuse_color=(0.0, 1.0, 0.0)), ), "cylinder": sim_utils.CylinderCfg( radius=0.5, height=1.0, visual_material=sim_utils.PreviewSurfaceCfg(diffuse_color=(0.0, 0.0, 1.0)), ), "cone": sim_utils.ConeCfg( radius=0.5, height=1.0, visual_material=sim_utils.PreviewSurfaceCfg(diffuse_color=(1.0, 1.0, 0.0)), ), "mesh": sim_utils.UsdFileCfg( usd_path=f"{ISAAC_NUCLEUS_DIR}/Props/Blocks/DexCube/dex_cube_instanceable.usd", scale=(10.0, 10.0, 10.0), ), "mesh_recolored": sim_utils.UsdFileCfg( usd_path=f"{ISAAC_NUCLEUS_DIR}/Props/Blocks/DexCube/dex_cube_instanceable.usd", scale=(10.0, 10.0, 10.0), visual_material=sim_utils.PreviewSurfaceCfg(diffuse_color=(1.0, 0.25, 0.0)), ), "robot_mesh": sim_utils.UsdFileCfg( usd_path=f"{ISAAC_ORBIT_NUCLEUS_DIR}/Robots/ANYbotics/ANYmal-D/anymal_d.usd", scale=(2.0, 2.0, 2.0), visual_material=sim_utils.GlassMdlCfg(glass_color=(0.0, 0.1, 0.0)), ), }, ) return VisualizationMarkers(marker_cfg) def main(): """Main function.""" # Load kit helper sim = SimulationContext(sim_utils.SimulationCfg(dt=0.01, substeps=1)) # Set main camera sim.set_camera_view([0.0, 18.0, 12.0], [0.0, 3.0, 0.0]) # Spawn things into stage # Lights cfg = sim_utils.DomeLightCfg(intensity=3000.0, color=(0.75, 0.75, 0.75)) cfg.func("/World/Light", cfg) # create markers my_visualizer = define_markers() # define a grid of positions where the markers should be placed num_markers_per_type = 5 grid_spacing = 2.0 # Calculate the half-width and half-height half_width = (num_markers_per_type - 1) / 2.0 half_height = (my_visualizer.num_prototypes - 1) / 2.0 # Create the x and y ranges centered around the origin x_range = torch.arange(-half_width * grid_spacing, (half_width + 1) * grid_spacing, grid_spacing) y_range = torch.arange(-half_height * grid_spacing, (half_height + 1) * grid_spacing, grid_spacing) # Create the grid x_grid, y_grid = torch.meshgrid(x_range, y_range, indexing="ij") x_grid = x_grid.reshape(-1) y_grid = y_grid.reshape(-1) z_grid = torch.zeros_like(x_grid) # marker locations marker_locations = torch.stack([x_grid, y_grid, z_grid], dim=1) marker_indices = torch.arange(my_visualizer.num_prototypes).repeat(num_markers_per_type) # Play the simulator sim.reset() # Now we are ready! print("[INFO]: Setup complete...") # Yaw angle yaw = torch.zeros_like(marker_locations[:, 0]) # Simulate physics while simulation_app.is_running(): # rotate the markers around the z-axis for visualization marker_orientations = quat_from_angle_axis(yaw, torch.tensor([0.0, 0.0, 1.0])) # visualize my_visualizer.visualize(marker_locations, marker_orientations, marker_indices=marker_indices) # roll corresponding indices to show how marker prototype can be changed if yaw[0].item() % (0.5 * torch.pi) < 0.01: marker_indices = torch.roll(marker_indices, 1) # perform step sim.step() # increment yaw yaw += 0.01 if __name__ == "__main__": # run the main function main() # close sim app simulation_app.close()
5,641
Python
34.936306
103
0.617089
NVIDIA-Omniverse/orbit/source/standalone/demos/hands.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """ This script demonstrates different dexterous hands. .. code-block:: bash # Usage ./orbit.sh -p source/standalone/demos/hands.py """ from __future__ import annotations """Launch Isaac Sim Simulator first.""" import argparse from omni.isaac.orbit.app import AppLauncher # add argparse arguments parser = argparse.ArgumentParser(description="This script demonstrates different dexterous hands.") # append AppLauncher cli args AppLauncher.add_app_launcher_args(parser) # parse the arguments args_cli = parser.parse_args() # launch omniverse app app_launcher = AppLauncher(args_cli) simulation_app = app_launcher.app """Rest everything follows.""" import numpy as np import torch import omni.isaac.core.utils.prims as prim_utils import omni.isaac.orbit.sim as sim_utils from omni.isaac.orbit.assets import Articulation ## # Pre-defined configs ## from omni.isaac.orbit_assets.allegro import ALLEGRO_HAND_CFG # isort:skip from omni.isaac.orbit_assets.shadow_hand import SHADOW_HAND_CFG # isort:skip def define_origins(num_origins: int, spacing: float) -> list[list[float]]: """Defines the origins of the the scene.""" # create tensor based on number of environments env_origins = torch.zeros(num_origins, 3) # create a grid of origins num_cols = np.floor(np.sqrt(num_origins)) num_rows = np.ceil(num_origins / num_cols) xx, yy = torch.meshgrid(torch.arange(num_rows), torch.arange(num_cols), indexing="xy") env_origins[:, 0] = spacing * xx.flatten()[:num_origins] - spacing * (num_rows - 1) / 2 env_origins[:, 1] = spacing * yy.flatten()[:num_origins] - spacing * (num_cols - 1) / 2 env_origins[:, 2] = 0.0 # return the origins return env_origins.tolist() def design_scene() -> tuple[dict, list[list[float]]]: """Designs the scene.""" # Ground-plane cfg = sim_utils.GroundPlaneCfg() cfg.func("/World/defaultGroundPlane", cfg) # Lights cfg = sim_utils.DomeLightCfg(intensity=2000.0, color=(0.75, 0.75, 0.75)) cfg.func("/World/Light", cfg) # Create separate groups called "Origin1", "Origin2", "Origin3" # Each group will have a mount and a robot on top of it origins = define_origins(num_origins=2, spacing=0.5) # Origin 1 with Allegro Hand prim_utils.create_prim("/World/Origin1", "Xform", translation=origins[0]) # -- Robot allegro = Articulation(ALLEGRO_HAND_CFG.replace(prim_path="/World/Origin1/Robot")) # Origin 2 with Shadow Hand prim_utils.create_prim("/World/Origin2", "Xform", translation=origins[1]) # -- Robot shadow_hand = Articulation(SHADOW_HAND_CFG.replace(prim_path="/World/Origin2/Robot")) # return the scene information scene_entities = { "allegro": allegro, "shadow_hand": shadow_hand, } return scene_entities, origins def run_simulator(sim: sim_utils.SimulationContext, entities: dict[str, Articulation], origins: torch.Tensor): """Runs the simulation loop.""" # Define simulation stepping sim_dt = sim.get_physics_dt() sim_time = 0.0 count = 0 # Start with hand open grasp_mode = 0 # Simulate physics while simulation_app.is_running(): # reset if count % 1000 == 0: # reset counters sim_time = 0.0 count = 0 # reset robots for index, robot in enumerate(entities.values()): # root state root_state = robot.data.default_root_state.clone() root_state[:, :3] += origins[index] robot.write_root_state_to_sim(root_state) # joint state joint_pos, joint_vel = robot.data.default_joint_pos.clone(), robot.data.default_joint_vel.clone() robot.write_joint_state_to_sim(joint_pos, joint_vel) # reset the internal state robot.reset() print("[INFO]: Resetting robots state...") # toggle grasp mode if count % 100 == 0: grasp_mode = 1 - grasp_mode # apply default actions to the hands robots for robot in entities.values(): # generate joint positions joint_pos_target = robot.data.soft_joint_pos_limits[..., grasp_mode] # apply action to the robot robot.set_joint_position_target(joint_pos_target) # write data to sim robot.write_data_to_sim() # perform step sim.step() # update sim-time sim_time += sim_dt count += 1 # update buffers for robot in entities.values(): robot.update(sim_dt) def main(): """Main function.""" # Initialize the simulation context sim = sim_utils.SimulationContext(sim_utils.SimulationCfg(dt=0.01, substeps=1)) # Set main camera sim.set_camera_view(eye=[0.0, -0.5, 1.5], target=[0.0, -0.2, 0.5]) # design scene scene_entities, scene_origins = design_scene() scene_origins = torch.tensor(scene_origins, device=sim.device) # Play the simulator sim.reset() # Now we are ready! print("[INFO]: Setup complete...") # Run the simulator run_simulator(sim, scene_entities, scene_origins) if __name__ == "__main__": # run the main execution main() # close sim app simulation_app.close()
5,446
Python
31.041176
113
0.637716
NVIDIA-Omniverse/orbit/source/standalone/demos/arms.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """ This script demonstrates different single-arm manipulators. .. code-block:: bash # Usage ./orbit.sh -p source/standalone/demos/arms.py """ from __future__ import annotations """Launch Isaac Sim Simulator first.""" import argparse from omni.isaac.orbit.app import AppLauncher # add argparse arguments parser = argparse.ArgumentParser(description="This script demonstrates different single-arm manipulators.") # append AppLauncher cli args AppLauncher.add_app_launcher_args(parser) # parse the arguments args_cli = parser.parse_args() # launch omniverse app app_launcher = AppLauncher(args_cli) simulation_app = app_launcher.app """Rest everything follows.""" import numpy as np import torch import omni.isaac.core.utils.prims as prim_utils import omni.isaac.orbit.sim as sim_utils from omni.isaac.orbit.assets import Articulation from omni.isaac.orbit.utils.assets import ISAAC_NUCLEUS_DIR ## # Pre-defined configs ## # isort: off from omni.isaac.orbit_assets import ( FRANKA_PANDA_CFG, UR10_CFG, KINOVA_JACO2_N7S300_CFG, KINOVA_JACO2_N6S300_CFG, KINOVA_GEN3_N7_CFG, SAWYER_CFG, ) # isort: on def define_origins(num_origins: int, spacing: float) -> list[list[float]]: """Defines the origins of the the scene.""" # create tensor based on number of environments env_origins = torch.zeros(num_origins, 3) # create a grid of origins num_rows = np.floor(np.sqrt(num_origins)) num_cols = np.ceil(num_origins / num_rows) xx, yy = torch.meshgrid(torch.arange(num_rows), torch.arange(num_cols), indexing="xy") env_origins[:, 0] = spacing * xx.flatten()[:num_origins] - spacing * (num_rows - 1) / 2 env_origins[:, 1] = spacing * yy.flatten()[:num_origins] - spacing * (num_cols - 1) / 2 env_origins[:, 2] = 0.0 # return the origins return env_origins.tolist() def design_scene() -> tuple[dict, list[list[float]]]: """Designs the scene.""" # Ground-plane cfg = sim_utils.GroundPlaneCfg() cfg.func("/World/defaultGroundPlane", cfg) # Lights cfg = sim_utils.DomeLightCfg(intensity=2000.0, color=(0.75, 0.75, 0.75)) cfg.func("/World/Light", cfg) # Create separate groups called "Origin1", "Origin2", "Origin3" # Each group will have a mount and a robot on top of it origins = define_origins(num_origins=6, spacing=2.0) # Origin 1 with Franka Panda prim_utils.create_prim("/World/Origin1", "Xform", translation=origins[0]) # -- Table cfg = sim_utils.UsdFileCfg(usd_path=f"{ISAAC_NUCLEUS_DIR}/Props/Mounts/SeattleLabTable/table_instanceable.usd") cfg.func("/World/Origin1/Table", cfg, translation=(0.55, 0.0, 1.05)) # -- Robot franka_arm_cfg = FRANKA_PANDA_CFG.replace(prim_path="/World/Origin1/Robot") franka_arm_cfg.init_state.pos = (0.0, 0.0, 1.05) franka_panda = Articulation(cfg=franka_arm_cfg) # Origin 2 with UR10 prim_utils.create_prim("/World/Origin2", "Xform", translation=origins[1]) # -- Table cfg = sim_utils.UsdFileCfg( usd_path=f"{ISAAC_NUCLEUS_DIR}/Props/Mounts/Stand/stand_instanceable.usd", scale=(2.0, 2.0, 2.0) ) cfg.func("/World/Origin2/Table", cfg, translation=(0.0, 0.0, 1.03)) # -- Robot ur10_cfg = UR10_CFG.replace(prim_path="/World/Origin2/Robot") ur10_cfg.init_state.pos = (0.0, 0.0, 1.03) ur10 = Articulation(cfg=ur10_cfg) # Origin 3 with Kinova JACO2 (7-Dof) arm prim_utils.create_prim("/World/Origin3", "Xform", translation=origins[2]) # -- Table cfg = sim_utils.UsdFileCfg(usd_path=f"{ISAAC_NUCLEUS_DIR}/Props/Mounts/ThorlabsTable/table_instanceable.usd") cfg.func("/World/Origin3/Table", cfg, translation=(0.0, 0.0, 0.8)) # -- Robot kinova_arm_cfg = KINOVA_JACO2_N7S300_CFG.replace(prim_path="/World/Origin3/Robot") kinova_arm_cfg.init_state.pos = (0.0, 0.0, 0.8) kinova_j2n7s300 = Articulation(cfg=kinova_arm_cfg) # Origin 4 with Kinova JACO2 (6-Dof) arm prim_utils.create_prim("/World/Origin4", "Xform", translation=origins[3]) # -- Table cfg = sim_utils.UsdFileCfg(usd_path=f"{ISAAC_NUCLEUS_DIR}/Props/Mounts/ThorlabsTable/table_instanceable.usd") cfg.func("/World/Origin4/Table", cfg, translation=(0.0, 0.0, 0.8)) # -- Robot kinova_arm_cfg = KINOVA_JACO2_N6S300_CFG.replace(prim_path="/World/Origin4/Robot") kinova_arm_cfg.init_state.pos = (0.0, 0.0, 0.8) kinova_j2n6s300 = Articulation(cfg=kinova_arm_cfg) # Origin 5 with Sawyer prim_utils.create_prim("/World/Origin5", "Xform", translation=origins[4]) # -- Table cfg = sim_utils.UsdFileCfg(usd_path=f"{ISAAC_NUCLEUS_DIR}/Props/Mounts/SeattleLabTable/table_instanceable.usd") cfg.func("/World/Origin5/Table", cfg, translation=(0.55, 0.0, 1.05)) # -- Robot kinova_arm_cfg = KINOVA_GEN3_N7_CFG.replace(prim_path="/World/Origin5/Robot") kinova_arm_cfg.init_state.pos = (0.0, 0.0, 1.05) kinova_gen3n7 = Articulation(cfg=kinova_arm_cfg) # Origin 6 with Kinova Gen3 (7-Dof) arm prim_utils.create_prim("/World/Origin6", "Xform", translation=origins[5]) # -- Table cfg = sim_utils.UsdFileCfg( usd_path=f"{ISAAC_NUCLEUS_DIR}/Props/Mounts/Stand/stand_instanceable.usd", scale=(2.0, 2.0, 2.0) ) cfg.func("/World/Origin6/Table", cfg, translation=(0.0, 0.0, 1.03)) # -- Robot sawyer_arm_cfg = SAWYER_CFG.replace(prim_path="/World/Origin6/Robot") sawyer_arm_cfg.init_state.pos = (0.0, 0.0, 1.03) sawyer = Articulation(cfg=sawyer_arm_cfg) # return the scene information scene_entities = { "franka_panda": franka_panda, "ur10": ur10, "kinova_j2n7s300": kinova_j2n7s300, "kinova_j2n6s300": kinova_j2n6s300, "kinova_gen3n7": kinova_gen3n7, "sawyer": sawyer, } return scene_entities, origins def run_simulator(sim: sim_utils.SimulationContext, entities: dict[str, Articulation], origins: torch.Tensor): """Runs the simulation loop.""" # Define simulation stepping sim_dt = sim.get_physics_dt() sim_time = 0.0 count = 0 # Simulate physics while simulation_app.is_running(): # reset if count % 200 == 0: # reset counters sim_time = 0.0 count = 0 # reset the scene entities for index, robot in enumerate(entities.values()): # root state root_state = robot.data.default_root_state.clone() root_state[:, :3] += origins[index] robot.write_root_state_to_sim(root_state) # set joint positions joint_pos, joint_vel = robot.data.default_joint_pos.clone(), robot.data.default_joint_vel.clone() robot.write_joint_state_to_sim(joint_pos, joint_vel) # clear internal buffers robot.reset() print("[INFO]: Resetting robots state...") # apply random actions to the robots for robot in entities.values(): # generate random joint positions joint_pos_target = robot.data.default_joint_pos + torch.randn_like(robot.data.joint_pos) * 0.1 joint_pos_target = joint_pos_target.clamp_( robot.data.soft_joint_pos_limits[..., 0], robot.data.soft_joint_pos_limits[..., 1] ) # apply action to the robot robot.set_joint_position_target(joint_pos_target) # write data to sim robot.write_data_to_sim() # perform step sim.step() # update sim-time sim_time += sim_dt count += 1 # update buffers for robot in entities.values(): robot.update(sim_dt) def main(): """Main function.""" # Initialize the simulation context sim_cfg = sim_utils.SimulationCfg() sim = sim_utils.SimulationContext(sim_cfg) # Set main camera sim.set_camera_view([3.5, 0.0, 3.2], [0.0, 0.0, 0.5]) # design scene scene_entities, scene_origins = design_scene() scene_origins = torch.tensor(scene_origins, device=sim.device) # Play the simulator sim.reset() # Now we are ready! print("[INFO]: Setup complete...") # Run the simulator run_simulator(sim, scene_entities, scene_origins) if __name__ == "__main__": # run the main function main() # close sim app simulation_app.close()
8,481
Python
34.940678
115
0.643438
NVIDIA-Omniverse/orbit/source/standalone/demos/quadrupeds.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """ This script demonstrates different legged robots. .. code-block:: bash # Usage ./orbit.sh -p source/standalone/demos/quadrupeds.py """ from __future__ import annotations """Launch Isaac Sim Simulator first.""" import argparse from omni.isaac.orbit.app import AppLauncher # add argparse arguments parser = argparse.ArgumentParser(description="This script demonstrates different legged robots.") # append AppLauncher cli args AppLauncher.add_app_launcher_args(parser) # parse the arguments args_cli = parser.parse_args() # launch omniverse app app_launcher = AppLauncher(args_cli) simulation_app = app_launcher.app """Rest everything follows.""" import numpy as np import torch import omni.isaac.core.utils.prims as prim_utils import omni.isaac.orbit.sim as sim_utils from omni.isaac.orbit.assets import Articulation ## # Pre-defined configs ## from omni.isaac.orbit_assets.anymal import ANYMAL_B_CFG, ANYMAL_C_CFG, ANYMAL_D_CFG # isort:skip from omni.isaac.orbit_assets.unitree import UNITREE_A1_CFG, UNITREE_GO1_CFG, UNITREE_GO2_CFG # isort:skip def define_origins(num_origins: int, spacing: float) -> list[list[float]]: """Defines the origins of the the scene.""" # create tensor based on number of environments env_origins = torch.zeros(num_origins, 3) # create a grid of origins num_cols = np.floor(np.sqrt(num_origins)) num_rows = np.ceil(num_origins / num_cols) xx, yy = torch.meshgrid(torch.arange(num_rows), torch.arange(num_cols), indexing="xy") env_origins[:, 0] = spacing * xx.flatten()[:num_origins] - spacing * (num_rows - 1) / 2 env_origins[:, 1] = spacing * yy.flatten()[:num_origins] - spacing * (num_cols - 1) / 2 env_origins[:, 2] = 0.0 # return the origins return env_origins.tolist() def design_scene() -> tuple[dict, list[list[float]]]: """Designs the scene.""" # Ground-plane cfg = sim_utils.GroundPlaneCfg() cfg.func("/World/defaultGroundPlane", cfg) # Lights cfg = sim_utils.DomeLightCfg(intensity=2000.0, color=(0.75, 0.75, 0.75)) cfg.func("/World/Light", cfg) # Create separate groups called "Origin1", "Origin2", "Origin3" # Each group will have a mount and a robot on top of it origins = define_origins(num_origins=6, spacing=1.25) # Origin 1 with Anymal B prim_utils.create_prim("/World/Origin1", "Xform", translation=origins[0]) # -- Robot anymal_b = Articulation(ANYMAL_B_CFG.replace(prim_path="/World/Origin1/Robot")) # Origin 2 with Anymal C prim_utils.create_prim("/World/Origin2", "Xform", translation=origins[1]) # -- Robot anymal_c = Articulation(ANYMAL_C_CFG.replace(prim_path="/World/Origin2/Robot")) # Origin 3 with Anymal D prim_utils.create_prim("/World/Origin3", "Xform", translation=origins[2]) # -- Robot anymal_d = Articulation(ANYMAL_D_CFG.replace(prim_path="/World/Origin3/Robot")) # Origin 4 with Unitree A1 prim_utils.create_prim("/World/Origin4", "Xform", translation=origins[3]) # -- Robot unitree_a1 = Articulation(UNITREE_A1_CFG.replace(prim_path="/World/Origin4/Robot")) # Origin 5 with Unitree Go1 prim_utils.create_prim("/World/Origin5", "Xform", translation=origins[4]) # -- Robot unitree_go1 = Articulation(UNITREE_GO1_CFG.replace(prim_path="/World/Origin5/Robot")) # Origin 6 with Unitree Go2 prim_utils.create_prim("/World/Origin6", "Xform", translation=origins[5]) # -- Robot unitree_go2 = Articulation(UNITREE_GO2_CFG.replace(prim_path="/World/Origin6/Robot")) # return the scene information scene_entities = { "anymal_b": anymal_b, "anymal_c": anymal_c, "anymal_d": anymal_d, "unitree_a1": unitree_a1, "unitree_go1": unitree_go1, "unitree_go2": unitree_go2, } return scene_entities, origins def run_simulator(sim: sim_utils.SimulationContext, entities: dict[str, Articulation], origins: torch.Tensor): """Runs the simulation loop.""" # Define simulation stepping sim_dt = sim.get_physics_dt() sim_time = 0.0 count = 0 # Simulate physics while simulation_app.is_running(): # reset if count % 200 == 0: # reset counters sim_time = 0.0 count = 0 # reset robots for index, robot in enumerate(entities.values()): # root state root_state = robot.data.default_root_state.clone() root_state[:, :3] += origins[index] robot.write_root_state_to_sim(root_state) # joint state joint_pos, joint_vel = robot.data.default_joint_pos.clone(), robot.data.default_joint_vel.clone() robot.write_joint_state_to_sim(joint_pos, joint_vel) # reset the internal state robot.reset() print("[INFO]: Resetting robots state...") # apply default actions to the quadrupedal robots for robot in entities.values(): # generate random joint positions joint_pos_target = robot.data.default_joint_pos + torch.randn_like(robot.data.joint_pos) * 0.1 # apply action to the robot robot.set_joint_position_target(joint_pos_target) # write data to sim robot.write_data_to_sim() # perform step sim.step() # update sim-time sim_time += sim_dt count += 1 # update buffers for robot in entities.values(): robot.update(sim_dt) def main(): """Main function.""" # Initialize the simulation context sim = sim_utils.SimulationContext(sim_utils.SimulationCfg(dt=0.01, substeps=1)) # Set main camera sim.set_camera_view(eye=[2.5, 2.5, 2.5], target=[0.0, 0.0, 0.0]) # design scene scene_entities, scene_origins = design_scene() scene_origins = torch.tensor(scene_origins, device=sim.device) # Play the simulator sim.reset() # Now we are ready! print("[INFO]: Setup complete...") # Run the simulator run_simulator(sim, scene_entities, scene_origins) if __name__ == "__main__": # run the main function main() # close sim app simulation_app.close()
6,360
Python
32.656084
113
0.644654
NVIDIA-Omniverse/orbit/source/standalone/demos/bipeds.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """ This script demonstrates how to simulate a bipedal robot. """ from __future__ import annotations """Launch Isaac Sim Simulator first.""" import argparse from omni.isaac.orbit.app import AppLauncher # add argparse arguments parser = argparse.ArgumentParser(description="This script demonstrates how to simulate a bipedal robot.") # append AppLauncher cli args AppLauncher.add_app_launcher_args(parser) # parse the arguments args_cli = parser.parse_args() # launch omniverse app app_launcher = AppLauncher(args_cli) simulation_app = app_launcher.app """Rest everything follows.""" import omni.isaac.orbit.sim as sim_utils from omni.isaac.orbit.assets import Articulation from omni.isaac.orbit.sim import SimulationContext ## # Pre-defined configs ## from omni.isaac.orbit_assets.cassie import CASSIE_CFG # isort:skip def main(): """Main function.""" # Load kit helper sim = SimulationContext( sim_utils.SimulationCfg(device="cpu", use_gpu_pipeline=False, dt=0.005, physx=sim_utils.PhysxCfg(use_gpu=False)) ) # Set main camera sim.set_camera_view(eye=[3.5, 3.5, 3.5], target=[0.0, 0.0, 0.0]) # Spawn things into stage # Ground-plane cfg = sim_utils.GroundPlaneCfg() cfg.func("/World/defaultGroundPlane", cfg) # Lights cfg = sim_utils.DistantLightCfg(intensity=3000.0, color=(0.75, 0.75, 0.75)) cfg.func("/World/Light", cfg) # Robots robot_cfg = CASSIE_CFG robot_cfg.spawn.func("/World/Cassie/Robot_1", robot_cfg.spawn, translation=(1.5, 0.5, 0.42)) # create handles for the robots robots = Articulation(robot_cfg.replace(prim_path="/World/Cassie/Robot.*")) # Play the simulator sim.reset() # Now we are ready! print("[INFO]: Setup complete...") # Define simulation stepping sim_dt = sim.get_physics_dt() sim_time = 0.0 count = 0 # Simulate physics while simulation_app.is_running(): # reset if count % 200 == 0: # reset counters sim_time = 0.0 count = 0 # reset dof state joint_pos, joint_vel = robots.data.default_joint_pos, robots.data.default_joint_vel robots.write_joint_state_to_sim(joint_pos, joint_vel) robots.write_root_pose_to_sim(robots.data.default_root_state[:, :7]) robots.write_root_velocity_to_sim(robots.data.default_root_state[:, 7:]) robots.reset() # reset command print(">>>>>>>> Reset!") # apply action to the robot robots.set_joint_position_target(robots.data.default_joint_pos.clone()) robots.write_data_to_sim() # perform step sim.step() # update sim-time sim_time += sim_dt count += 1 # update buffers robots.update(sim_dt) if __name__ == "__main__": # run the main function main() # close sim app simulation_app.close()
3,056
Python
26.790909
120
0.643652
NVIDIA-Omniverse/orbit/source/standalone/demos/procedural_terrain.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """ This script demonstrates procedural terrains with flat patches. Example usage: .. code-block:: bash # Generate terrain with height color scheme ./orbit.sh -p source/standalone/demos/procedural_terrain.py --color_scheme height # Generate terrain with random color scheme ./orbit.sh -p source/standalone/demos/procedural_terrain.py --color_scheme random # Generate terrain with no color scheme ./orbit.sh -p source/standalone/demos/procedural_terrain.py --color_scheme none # Generate terrain with curriculum ./orbit.sh -p source/standalone/demos/procedural_terrain.py --use_curriculum # Generate terrain with curriculum along with flat patches ./orbit.sh -p source/standalone/demos/procedural_terrain.py --use_curriculum --show_flat_patches """ from __future__ import annotations """Launch Isaac Sim Simulator first.""" import argparse from omni.isaac.orbit.app import AppLauncher # add argparse arguments parser = argparse.ArgumentParser(description="This script demonstrates procedural terrain generation.") parser.add_argument( "--color_scheme", type=str, default="none", choices=["height", "random", "none"], help="Color scheme to use for the terrain generation.", ) parser.add_argument( "--use_curriculum", action="store_true", default=False, help="Whether to use the curriculum for the terrain generation.", ) parser.add_argument( "--show_flat_patches", action="store_true", default=False, help="Whether to show the flat patches computed during the terrain generation.", ) # append AppLauncher cli args AppLauncher.add_app_launcher_args(parser) # parse the arguments args_cli = parser.parse_args() # launch omniverse app app_launcher = AppLauncher(args_cli) simulation_app = app_launcher.app """Rest everything follows.""" import random import torch import omni.isaac.orbit.sim as sim_utils from omni.isaac.orbit.assets import AssetBase from omni.isaac.orbit.markers import VisualizationMarkers, VisualizationMarkersCfg from omni.isaac.orbit.terrains import FlatPatchSamplingCfg, TerrainImporter, TerrainImporterCfg ## # Pre-defined configs ## from omni.isaac.orbit.terrains.config.rough import ROUGH_TERRAINS_CFG # isort:skip def design_scene() -> tuple[dict, torch.Tensor]: """Designs the scene.""" # Lights cfg = sim_utils.DomeLightCfg(intensity=2000.0, color=(0.75, 0.75, 0.75)) cfg.func("/World/Light", cfg) # Parse terrain generation terrain_gen_cfg = ROUGH_TERRAINS_CFG.replace(curriculum=args_cli.use_curriculum, color_scheme=args_cli.color_scheme) # Add flat patch configuration # Note: To have separate colors for each sub-terrain type, we set the flat patch sampling configuration name # to the sub-terrain name. However, this is not how it should be used in practice. The key name should be # the intention of the flat patch. For instance, "source" or "target" for spawn and command related flat patches. if args_cli.show_flat_patches: for sub_terrain_name, sub_terrain_cfg in terrain_gen_cfg.sub_terrains.items(): sub_terrain_cfg.flat_patch_sampling = { sub_terrain_name: FlatPatchSamplingCfg(num_patches=10, patch_radius=0.5, max_height_diff=0.05) } # Handler for terrains importing terrain_importer_cfg = TerrainImporterCfg( num_envs=2048, env_spacing=3.0, prim_path="/World/ground", max_init_terrain_level=None, terrain_type="generator", terrain_generator=terrain_gen_cfg, debug_vis=True, ) # Remove visual material for height and random color schemes to use the default material if args_cli.color_scheme in ["height", "random"]: terrain_importer_cfg.visual_material = None # Create terrain importer terrain_importer = TerrainImporter(terrain_importer_cfg) # Show the flat patches computed if args_cli.show_flat_patches: # Configure the flat patches vis_cfg = VisualizationMarkersCfg(prim_path="/Visuals/TerrainFlatPatches", markers={}) for name in terrain_importer.flat_patches: vis_cfg.markers[name] = sim_utils.CylinderCfg( radius=0.5, # note: manually set to the patch radius for visualization height=0.1, visual_material=sim_utils.GlassMdlCfg(glass_color=(random.random(), random.random(), random.random())), ) flat_patches_visualizer = VisualizationMarkers(vis_cfg) # Visualize the flat patches all_patch_locations = [] all_patch_indices = [] for i, patch_locations in enumerate(terrain_importer.flat_patches.values()): num_patch_locations = patch_locations.view(-1, 3).shape[0] # store the patch locations and indices all_patch_locations.append(patch_locations.view(-1, 3)) all_patch_indices += [i] * num_patch_locations # combine the patch locations and indices flat_patches_visualizer.visualize(torch.cat(all_patch_locations), marker_indices=all_patch_indices) # return the scene information scene_entities = {"terrain": terrain_importer} return scene_entities, terrain_importer.env_origins def run_simulator(sim: sim_utils.SimulationContext, entities: dict[str, AssetBase], origins: torch.Tensor): """Runs the simulation loop.""" # Simulate physics while simulation_app.is_running(): # perform step sim.step() def main(): """Main function.""" # Initialize the simulation context sim = sim_utils.SimulationContext(sim_utils.SimulationCfg(dt=0.01, substeps=1)) # Set main camera sim.set_camera_view(eye=[2.5, 2.5, 2.5], target=[0.0, 0.0, 0.0]) # design scene scene_entities, scene_origins = design_scene() # Play the simulator sim.reset() # Now we are ready! print("[INFO]: Setup complete...") # Run the simulator run_simulator(sim, scene_entities, scene_origins) if __name__ == "__main__": # run the main function main() # close sim app simulation_app.close()
6,249
Python
34.112359
120
0.692591
NVIDIA-Omniverse/orbit/source/standalone/environments/random_agent.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """Script to an environment with random action agent.""" from __future__ import annotations """Launch Isaac Sim Simulator first.""" import argparse from omni.isaac.orbit.app import AppLauncher # add argparse arguments parser = argparse.ArgumentParser(description="Random agent for Orbit environments.") parser.add_argument("--cpu", action="store_true", default=False, help="Use CPU pipeline.") parser.add_argument( "--disable_fabric", action="store_true", default=False, help="Disable fabric and use USD I/O operations." ) parser.add_argument("--num_envs", type=int, default=None, help="Number of environments to simulate.") parser.add_argument("--task", type=str, default=None, help="Name of the task.") # append AppLauncher cli args AppLauncher.add_app_launcher_args(parser) # parse the arguments args_cli = parser.parse_args() # launch omniverse app app_launcher = AppLauncher(args_cli) simulation_app = app_launcher.app """Rest everything follows.""" import gymnasium as gym import torch import omni.isaac.orbit_tasks # noqa: F401 from omni.isaac.orbit_tasks.utils import parse_env_cfg def main(): """Random actions agent with Orbit environment.""" # create environment configuration env_cfg = parse_env_cfg( args_cli.task, use_gpu=not args_cli.cpu, num_envs=args_cli.num_envs, use_fabric=not args_cli.disable_fabric ) # create environment env = gym.make(args_cli.task, cfg=env_cfg) # print info (this is vectorized environment) print(f"[INFO]: Gym observation space: {env.observation_space}") print(f"[INFO]: Gym action space: {env.action_space}") # reset environment env.reset() # simulate environment while simulation_app.is_running(): # run everything in inference mode with torch.inference_mode(): # sample actions from -1 to 1 actions = 2 * torch.rand(env.action_space.shape, device=env.unwrapped.device) - 1 # apply actions env.step(actions) # close the simulator env.close() if __name__ == "__main__": # run the main function main() # close sim app simulation_app.close()
2,276
Python
29.36
115
0.695079
NVIDIA-Omniverse/orbit/source/standalone/environments/list_envs.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """ Script to print all the available environments in ORBIT. The script iterates over all registered environments and stores the details in a table. It prints the name of the environment, the entry point and the config file. All the environments are registered in the `omni.isaac.orbit_tasks` extension. They start with `Isaac` in their name. """ from __future__ import annotations """Launch Isaac Sim Simulator first.""" from omni.isaac.orbit.app import AppLauncher # launch omniverse app app_launcher = AppLauncher(headless=True) simulation_app = app_launcher.app """Rest everything follows.""" import gymnasium as gym from prettytable import PrettyTable import omni.isaac.orbit_tasks # noqa: F401 def main(): """Print all environments registered in `omni.isaac.orbit_tasks` extension.""" # print all the available environments table = PrettyTable(["S. No.", "Task Name", "Entry Point", "Config"]) table.title = "Available Environments in ORBIT" # set alignment of table columns table.align["Task Name"] = "l" table.align["Entry Point"] = "l" table.align["Config"] = "l" # count of environments index = 0 # acquire all Isaac environments names for task_spec in gym.registry.values(): if "Isaac" in task_spec.id: # add details to table table.add_row([index + 1, task_spec.id, task_spec.entry_point, task_spec.kwargs["env_cfg_entry_point"]]) # increment count index += 1 print(table) if __name__ == "__main__": try: # run the main function main() except Exception as e: raise e finally: # close the app simulation_app.close()
1,827
Python
25.882353
116
0.67214
NVIDIA-Omniverse/orbit/source/standalone/environments/zero_agent.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """Script to run an environment with zero action agent.""" from __future__ import annotations """Launch Isaac Sim Simulator first.""" import argparse from omni.isaac.orbit.app import AppLauncher # add argparse arguments parser = argparse.ArgumentParser(description="Zero agent for Orbit environments.") parser.add_argument("--cpu", action="store_true", default=False, help="Use CPU pipeline.") parser.add_argument( "--disable_fabric", action="store_true", default=False, help="Disable fabric and use USD I/O operations." ) parser.add_argument("--num_envs", type=int, default=None, help="Number of environments to simulate.") parser.add_argument("--task", type=str, default=None, help="Name of the task.") # append AppLauncher cli args AppLauncher.add_app_launcher_args(parser) # parse the arguments args_cli = parser.parse_args() # launch omniverse app app_launcher = AppLauncher(args_cli) simulation_app = app_launcher.app """Rest everything follows.""" import gymnasium as gym import torch import omni.isaac.orbit_tasks # noqa: F401 from omni.isaac.orbit_tasks.utils import parse_env_cfg def main(): """Zero actions agent with Orbit environment.""" # parse configuration env_cfg = parse_env_cfg( args_cli.task, use_gpu=not args_cli.cpu, num_envs=args_cli.num_envs, use_fabric=not args_cli.disable_fabric ) # create environment env = gym.make(args_cli.task, cfg=env_cfg) # print info (this is vectorized environment) print(f"[INFO]: Gym observation space: {env.observation_space}") print(f"[INFO]: Gym action space: {env.action_space}") # reset environment env.reset() # simulate environment while simulation_app.is_running(): # run everything in inference mode with torch.inference_mode(): # compute zero actions actions = torch.zeros(env.action_space.shape, device=env.unwrapped.device) # apply actions env.step(actions) # close the simulator env.close() if __name__ == "__main__": # run the main function main() # close sim app simulation_app.close()
2,247
Python
28.973333
115
0.695594
NVIDIA-Omniverse/orbit/source/standalone/environments/teleoperation/teleop_se3_agent.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """Script to run a keyboard teleoperation with Orbit manipulation environments.""" from __future__ import annotations """Launch Isaac Sim Simulator first.""" import argparse from omni.isaac.orbit.app import AppLauncher # add argparse arguments parser = argparse.ArgumentParser(description="Keyboard teleoperation for Orbit environments.") parser.add_argument("--cpu", action="store_true", default=False, help="Use CPU pipeline.") parser.add_argument( "--disable_fabric", action="store_true", default=False, help="Disable fabric and use USD I/O operations." ) parser.add_argument("--num_envs", type=int, default=1, help="Number of environments to simulate.") parser.add_argument("--device", type=str, default="keyboard", help="Device for interacting with environment") parser.add_argument("--task", type=str, default=None, help="Name of the task.") parser.add_argument("--sensitivity", type=float, default=1.0, help="Sensitivity factor.") # append AppLauncher cli args AppLauncher.add_app_launcher_args(parser) # parse the arguments args_cli = parser.parse_args() # launch omniverse app app_launcher = AppLauncher(headless=args_cli.headless) simulation_app = app_launcher.app """Rest everything follows.""" import gymnasium as gym import torch import carb from omni.isaac.orbit.devices import Se3Gamepad, Se3Keyboard, Se3SpaceMouse import omni.isaac.orbit_tasks # noqa: F401 from omni.isaac.orbit_tasks.utils import parse_env_cfg def pre_process_actions(delta_pose: torch.Tensor, gripper_command: bool) -> torch.Tensor: """Pre-process actions for the environment.""" # compute actions based on environment if "Reach" in args_cli.task: # note: reach is the only one that uses a different action space # compute actions return delta_pose else: # resolve gripper command gripper_vel = torch.zeros(delta_pose.shape[0], 1, device=delta_pose.device) gripper_vel[:] = -1.0 if gripper_command else 1.0 # compute actions return torch.concat([delta_pose, gripper_vel], dim=1) def main(): """Running keyboard teleoperation with Orbit manipulation environment.""" # parse configuration env_cfg = parse_env_cfg( args_cli.task, use_gpu=not args_cli.cpu, num_envs=args_cli.num_envs, use_fabric=not args_cli.disable_fabric ) # modify configuration env_cfg.terminations.time_out = None # create environment env = gym.make(args_cli.task, cfg=env_cfg) # check environment name (for reach , we don't allow the gripper) if "Reach" in args_cli.task: carb.log_warn( f"The environment '{args_cli.task}' does not support gripper control. The device command will be ignored." ) # create controller if args_cli.device.lower() == "keyboard": teleop_interface = Se3Keyboard( pos_sensitivity=0.005 * args_cli.sensitivity, rot_sensitivity=0.005 * args_cli.sensitivity ) elif args_cli.device.lower() == "spacemouse": teleop_interface = Se3SpaceMouse( pos_sensitivity=0.05 * args_cli.sensitivity, rot_sensitivity=0.005 * args_cli.sensitivity ) elif args_cli.device.lower() == "gamepad": teleop_interface = Se3Gamepad( pos_sensitivity=0.1 * args_cli.sensitivity, rot_sensitivity=0.1 * args_cli.sensitivity ) else: raise ValueError(f"Invalid device interface '{args_cli.device}'. Supported: 'keyboard', 'spacemouse'.") # add teleoperation key for env reset teleop_interface.add_callback("L", env.reset) # print helper for keyboard print(teleop_interface) # reset environment env.reset() teleop_interface.reset() # simulate environment while simulation_app.is_running(): # run everything in inference mode with torch.inference_mode(): # get keyboard command delta_pose, gripper_command = teleop_interface.advance() delta_pose = delta_pose.astype("float32") # convert to torch delta_pose = torch.tensor(delta_pose, device=env.unwrapped.device).repeat(env.unwrapped.num_envs, 1) # pre-process actions actions = pre_process_actions(delta_pose, gripper_command) # apply actions env.step(actions) # close the simulator env.close() if __name__ == "__main__": # run the main function main() # close sim app simulation_app.close()
4,590
Python
34.589147
118
0.682135
NVIDIA-Omniverse/orbit/source/standalone/environments/state_machine/lift_cube_sm.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """ Script to run an environment with a pick and lift state machine. The state machine is implemented in the kernel function `infer_state_machine`. It uses the `warp` library to run the state machine in parallel on the GPU. .. code-block:: bash ./orbit.sh -p source/standalone/environments/state_machine/lift_cube_sm.py --num_envs 32 """ """Launch Omniverse Toolkit first.""" import argparse from omni.isaac.orbit.app import AppLauncher # add argparse arguments parser = argparse.ArgumentParser(description="Pick and lift state machine for lift environments.") parser.add_argument("--cpu", action="store_true", default=False, help="Use CPU pipeline.") parser.add_argument( "--disable_fabric", action="store_true", default=False, help="Disable fabric and use USD I/O operations." ) parser.add_argument("--num_envs", type=int, default=None, help="Number of environments to simulate.") # append AppLauncher cli args AppLauncher.add_app_launcher_args(parser) # parse the arguments args_cli = parser.parse_args() # launch omniverse app app_launcher = AppLauncher(headless=args_cli.headless) simulation_app = app_launcher.app """Rest everything else.""" import gymnasium as gym import torch from collections.abc import Sequence import warp as wp from omni.isaac.orbit.assets.rigid_object.rigid_object_data import RigidObjectData import omni.isaac.orbit_tasks # noqa: F401 from omni.isaac.orbit_tasks.manipulation.lift.lift_env_cfg import LiftEnvCfg from omni.isaac.orbit_tasks.utils.parse_cfg import parse_env_cfg # initialize warp wp.init() class GripperState: """States for the gripper.""" OPEN = wp.constant(1.0) CLOSE = wp.constant(-1.0) class PickSmState: """States for the pick state machine.""" REST = wp.constant(0) APPROACH_ABOVE_OBJECT = wp.constant(1) APPROACH_OBJECT = wp.constant(2) GRASP_OBJECT = wp.constant(3) LIFT_OBJECT = wp.constant(4) class PickSmWaitTime: """Additional wait times (in s) for states for before switching.""" REST = wp.constant(0.2) APPROACH_ABOVE_OBJECT = wp.constant(0.5) APPROACH_OBJECT = wp.constant(0.6) GRASP_OBJECT = wp.constant(0.3) LIFT_OBJECT = wp.constant(1.0) @wp.kernel def infer_state_machine( dt: wp.array(dtype=float), sm_state: wp.array(dtype=int), sm_wait_time: wp.array(dtype=float), ee_pose: wp.array(dtype=wp.transform), object_pose: wp.array(dtype=wp.transform), des_object_pose: wp.array(dtype=wp.transform), des_ee_pose: wp.array(dtype=wp.transform), gripper_state: wp.array(dtype=float), offset: wp.array(dtype=wp.transform), ): # retrieve thread id tid = wp.tid() # retrieve state machine state state = sm_state[tid] # decide next state if state == PickSmState.REST: des_ee_pose[tid] = ee_pose[tid] gripper_state[tid] = GripperState.OPEN # wait for a while if sm_wait_time[tid] >= PickSmWaitTime.REST: # move to next state and reset wait time sm_state[tid] = PickSmState.APPROACH_ABOVE_OBJECT sm_wait_time[tid] = 0.0 elif state == PickSmState.APPROACH_ABOVE_OBJECT: des_ee_pose[tid] = wp.transform_multiply(offset[tid], object_pose[tid]) gripper_state[tid] = GripperState.OPEN # TODO: error between current and desired ee pose below threshold # wait for a while if sm_wait_time[tid] >= PickSmWaitTime.APPROACH_OBJECT: # move to next state and reset wait time sm_state[tid] = PickSmState.APPROACH_OBJECT sm_wait_time[tid] = 0.0 elif state == PickSmState.APPROACH_OBJECT: des_ee_pose[tid] = object_pose[tid] gripper_state[tid] = GripperState.OPEN # TODO: error between current and desired ee pose below threshold # wait for a while if sm_wait_time[tid] >= PickSmWaitTime.APPROACH_OBJECT: # move to next state and reset wait time sm_state[tid] = PickSmState.GRASP_OBJECT sm_wait_time[tid] = 0.0 elif state == PickSmState.GRASP_OBJECT: des_ee_pose[tid] = object_pose[tid] gripper_state[tid] = GripperState.CLOSE # wait for a while if sm_wait_time[tid] >= PickSmWaitTime.GRASP_OBJECT: # move to next state and reset wait time sm_state[tid] = PickSmState.LIFT_OBJECT sm_wait_time[tid] = 0.0 elif state == PickSmState.LIFT_OBJECT: des_ee_pose[tid] = des_object_pose[tid] gripper_state[tid] = GripperState.CLOSE # TODO: error between current and desired ee pose below threshold # wait for a while if sm_wait_time[tid] >= PickSmWaitTime.LIFT_OBJECT: # move to next state and reset wait time sm_state[tid] = PickSmState.LIFT_OBJECT sm_wait_time[tid] = 0.0 # increment wait time sm_wait_time[tid] = sm_wait_time[tid] + dt[tid] class PickAndLiftSm: """A simple state machine in a robot's task space to pick and lift an object. The state machine is implemented as a warp kernel. It takes in the current state of the robot's end-effector and the object, and outputs the desired state of the robot's end-effector and the gripper. The state machine is implemented as a finite state machine with the following states: 1. REST: The robot is at rest. 2. APPROACH_ABOVE_OBJECT: The robot moves above the object. 3. APPROACH_OBJECT: The robot moves to the object. 4. GRASP_OBJECT: The robot grasps the object. 5. LIFT_OBJECT: The robot lifts the object to the desired pose. This is the final state. """ def __init__(self, dt: float, num_envs: int, device: torch.device | str = "cpu"): """Initialize the state machine. Args: dt: The environment time step. num_envs: The number of environments to simulate. device: The device to run the state machine on. """ # save parameters self.dt = float(dt) self.num_envs = num_envs self.device = device # initialize state machine self.sm_dt = torch.full((self.num_envs,), self.dt, device=self.device) self.sm_state = torch.full((self.num_envs,), 0, dtype=torch.int32, device=self.device) self.sm_wait_time = torch.zeros((self.num_envs,), device=self.device) # desired state self.des_ee_pose = torch.zeros((self.num_envs, 7), device=self.device) self.des_gripper_state = torch.full((self.num_envs,), 0.0, device=self.device) # approach above object offset self.offset = torch.zeros((self.num_envs, 7), device=self.device) self.offset[:, 2] = 0.1 self.offset[:, -1] = 1.0 # warp expects quaternion as (x, y, z, w) # convert to warp self.sm_dt_wp = wp.from_torch(self.sm_dt, wp.float32) self.sm_state_wp = wp.from_torch(self.sm_state, wp.int32) self.sm_wait_time_wp = wp.from_torch(self.sm_wait_time, wp.float32) self.des_ee_pose_wp = wp.from_torch(self.des_ee_pose, wp.transform) self.des_gripper_state_wp = wp.from_torch(self.des_gripper_state, wp.float32) self.offset_wp = wp.from_torch(self.offset, wp.transform) def reset_idx(self, env_ids: Sequence[int] = None): """Reset the state machine.""" if env_ids is None: env_ids = slice(None) self.sm_state[env_ids] = 0 self.sm_wait_time[env_ids] = 0.0 def compute(self, ee_pose: torch.Tensor, object_pose: torch.Tensor, des_object_pose: torch.Tensor): """Compute the desired state of the robot's end-effector and the gripper.""" # convert all transformations from (w, x, y, z) to (x, y, z, w) ee_pose = ee_pose[:, [0, 1, 2, 4, 5, 6, 3]] object_pose = object_pose[:, [0, 1, 2, 4, 5, 6, 3]] des_object_pose = des_object_pose[:, [0, 1, 2, 4, 5, 6, 3]] # convert to warp ee_pose_wp = wp.from_torch(ee_pose.contiguous(), wp.transform) object_pose_wp = wp.from_torch(object_pose.contiguous(), wp.transform) des_object_pose_wp = wp.from_torch(des_object_pose.contiguous(), wp.transform) # run state machine wp.launch( kernel=infer_state_machine, dim=self.num_envs, inputs=[ self.sm_dt_wp, self.sm_state_wp, self.sm_wait_time_wp, ee_pose_wp, object_pose_wp, des_object_pose_wp, self.des_ee_pose_wp, self.des_gripper_state_wp, self.offset_wp, ], device=self.device, ) # convert transformations back to (w, x, y, z) des_ee_pose = self.des_ee_pose[:, [0, 1, 2, 6, 3, 4, 5]] # convert to torch return torch.cat([des_ee_pose, self.des_gripper_state.unsqueeze(-1)], dim=-1) def main(): # parse configuration env_cfg: LiftEnvCfg = parse_env_cfg( "Isaac-Lift-Cube-Franka-IK-Abs-v0", use_gpu=not args_cli.cpu, num_envs=args_cli.num_envs, use_fabric=not args_cli.disable_fabric, ) # create environment env = gym.make("Isaac-Lift-Cube-Franka-IK-Abs-v0", cfg=env_cfg) # reset environment at start env.reset() # create action buffers (position + quaternion) actions = torch.zeros(env.unwrapped.action_space.shape, device=env.unwrapped.device) actions[:, 3] = 1.0 # desired object orientation (we only do position control of object) desired_orientation = torch.zeros((env.unwrapped.num_envs, 4), device=env.unwrapped.device) desired_orientation[:, 1] = 1.0 # create state machine pick_sm = PickAndLiftSm(env_cfg.sim.dt * env_cfg.decimation, env.unwrapped.num_envs, env.unwrapped.device) while simulation_app.is_running(): # run everything in inference mode with torch.inference_mode(): # step environment dones = env.step(actions)[-2] # observations # -- end-effector frame ee_frame_sensor = env.unwrapped.scene["ee_frame"] tcp_rest_position = ee_frame_sensor.data.target_pos_w[..., 0, :].clone() - env.unwrapped.scene.env_origins tcp_rest_orientation = ee_frame_sensor.data.target_quat_w[..., 0, :].clone() # -- object frame object_data: RigidObjectData = env.unwrapped.scene["object"].data object_position = object_data.root_pos_w - env.unwrapped.scene.env_origins # -- target object frame desired_position = env.unwrapped.command_manager.get_command("object_pose")[..., :3] # advance state machine actions = pick_sm.compute( torch.cat([tcp_rest_position, tcp_rest_orientation], dim=-1), torch.cat([object_position, desired_orientation], dim=-1), torch.cat([desired_position, desired_orientation], dim=-1), ) # reset state machine if dones.any(): pick_sm.reset_idx(dones.nonzero(as_tuple=False).squeeze(-1)) # close the environment env.close() if __name__ == "__main__": # run the main function main() # close sim app simulation_app.close()
11,404
Python
37.016667
118
0.632673
NVIDIA-Omniverse/orbit/source/standalone/environments/state_machine/open_cabinet_sm.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """ Script to run an environment with a cabinet opening state machine. The state machine is implemented in the kernel function `infer_state_machine`. It uses the `warp` library to run the state machine in parallel on the GPU. .. code-block:: bash ./orbit.sh -p source/standalone/environments/state_machine/lift_cube_sm.py --num_envs 32 """ """Launch Omniverse Toolkit first.""" import argparse from omni.isaac.orbit.app import AppLauncher # add argparse arguments parser = argparse.ArgumentParser(description="Pick and lift state machine for cabinet environments.") parser.add_argument("--cpu", action="store_true", default=False, help="Use CPU pipeline.") parser.add_argument( "--disable_fabric", action="store_true", default=False, help="Disable fabric and use USD I/O operations." ) parser.add_argument("--num_envs", type=int, default=None, help="Number of environments to simulate.") # append AppLauncher cli args AppLauncher.add_app_launcher_args(parser) # parse the arguments args_cli = parser.parse_args() # launch omniverse app app_launcher = AppLauncher(headless=args_cli.headless) simulation_app = app_launcher.app """Rest everything else.""" import gymnasium as gym import torch import traceback from collections.abc import Sequence import carb import warp as wp from omni.isaac.orbit.sensors import FrameTransformer import omni.isaac.orbit_tasks # noqa: F401 from omni.isaac.orbit_tasks.manipulation.cabinet.cabinet_env_cfg import CabinetEnvCfg from omni.isaac.orbit_tasks.utils.parse_cfg import parse_env_cfg # initialize warp wp.init() class GripperState: """States for the gripper.""" OPEN = wp.constant(1.0) CLOSE = wp.constant(-1.0) class OpenDrawerSmState: """States for the cabinet drawer opening state machine.""" REST = wp.constant(0) APPROACH_INFRONT_HANDLE = wp.constant(1) APPROACH_HANDLE = wp.constant(2) GRASP_HANDLE = wp.constant(3) OPEN_DRAWER = wp.constant(4) RELEASE_HANDLE = wp.constant(5) class OpenDrawerSmWaitTime: """Additional wait times (in s) for states for before switching.""" REST = wp.constant(0.5) APPROACH_INFRONT_HANDLE = wp.constant(1.25) APPROACH_HANDLE = wp.constant(1.0) GRASP_HANDLE = wp.constant(1.0) OPEN_DRAWER = wp.constant(3.0) RELEASE_HANDLE = wp.constant(0.2) @wp.kernel def infer_state_machine( dt: wp.array(dtype=float), sm_state: wp.array(dtype=int), sm_wait_time: wp.array(dtype=float), ee_pose: wp.array(dtype=wp.transform), handle_pose: wp.array(dtype=wp.transform), des_ee_pose: wp.array(dtype=wp.transform), gripper_state: wp.array(dtype=float), handle_approach_offset: wp.array(dtype=wp.transform), handle_grasp_offset: wp.array(dtype=wp.transform), drawer_opening_rate: wp.array(dtype=wp.transform), ): # retrieve thread id tid = wp.tid() # retrieve state machine state state = sm_state[tid] # decide next state if state == OpenDrawerSmState.REST: des_ee_pose[tid] = ee_pose[tid] gripper_state[tid] = GripperState.OPEN # wait for a while if sm_wait_time[tid] >= OpenDrawerSmWaitTime.REST: # move to next state and reset wait time sm_state[tid] = OpenDrawerSmState.APPROACH_INFRONT_HANDLE sm_wait_time[tid] = 0.0 elif state == OpenDrawerSmState.APPROACH_INFRONT_HANDLE: des_ee_pose[tid] = wp.transform_multiply(handle_approach_offset[tid], handle_pose[tid]) gripper_state[tid] = GripperState.OPEN # TODO: error between current and desired ee pose below threshold # wait for a while if sm_wait_time[tid] >= OpenDrawerSmWaitTime.APPROACH_INFRONT_HANDLE: # move to next state and reset wait time sm_state[tid] = OpenDrawerSmState.APPROACH_HANDLE sm_wait_time[tid] = 0.0 elif state == OpenDrawerSmState.APPROACH_HANDLE: des_ee_pose[tid] = handle_pose[tid] gripper_state[tid] = GripperState.OPEN # TODO: error between current and desired ee pose below threshold # wait for a while if sm_wait_time[tid] >= OpenDrawerSmWaitTime.APPROACH_HANDLE: # move to next state and reset wait time sm_state[tid] = OpenDrawerSmState.GRASP_HANDLE sm_wait_time[tid] = 0.0 elif state == OpenDrawerSmState.GRASP_HANDLE: des_ee_pose[tid] = wp.transform_multiply(handle_grasp_offset[tid], handle_pose[tid]) gripper_state[tid] = GripperState.CLOSE # wait for a while if sm_wait_time[tid] >= OpenDrawerSmWaitTime.GRASP_HANDLE: # move to next state and reset wait time sm_state[tid] = OpenDrawerSmState.OPEN_DRAWER sm_wait_time[tid] = 0.0 elif state == OpenDrawerSmState.OPEN_DRAWER: des_ee_pose[tid] = wp.transform_multiply(drawer_opening_rate[tid], handle_pose[tid]) gripper_state[tid] = GripperState.CLOSE # wait for a while if sm_wait_time[tid] >= OpenDrawerSmWaitTime.OPEN_DRAWER: # move to next state and reset wait time sm_state[tid] = OpenDrawerSmState.RELEASE_HANDLE sm_wait_time[tid] = 0.0 elif state == OpenDrawerSmState.RELEASE_HANDLE: des_ee_pose[tid] = ee_pose[tid] gripper_state[tid] = GripperState.CLOSE # wait for a while if sm_wait_time[tid] >= OpenDrawerSmWaitTime.RELEASE_HANDLE: # move to next state and reset wait time sm_state[tid] = OpenDrawerSmState.RELEASE_HANDLE sm_wait_time[tid] = 0.0 # increment wait time sm_wait_time[tid] = sm_wait_time[tid] + dt[tid] class OpenDrawerSm: """A simple state machine in a robot's task space to open a drawer in the cabinet. The state machine is implemented as a warp kernel. It takes in the current state of the robot's end-effector and the object, and outputs the desired state of the robot's end-effector and the gripper. The state machine is implemented as a finite state machine with the following states: 1. REST: The robot is at rest. 2. APPROACH_HANDLE: The robot moves towards the handle of the drawer. 3. GRASP_HANDLE: The robot grasps the handle of the drawer. 4. OPEN_DRAWER: The robot opens the drawer. 5. RELEASE_HANDLE: The robot releases the handle of the drawer. This is the final state. """ def __init__(self, dt: float, num_envs: int, device: torch.device | str = "cpu"): """Initialize the state machine. Args: dt: The environment time step. num_envs: The number of environments to simulate. device: The device to run the state machine on. """ # save parameters self.dt = float(dt) self.num_envs = num_envs self.device = device # initialize state machine self.sm_dt = torch.full((self.num_envs,), self.dt, device=self.device) self.sm_state = torch.full((self.num_envs,), 0, dtype=torch.int32, device=self.device) self.sm_wait_time = torch.zeros((self.num_envs,), device=self.device) # desired state self.des_ee_pose = torch.zeros((self.num_envs, 7), device=self.device) self.des_gripper_state = torch.full((self.num_envs,), 0.0, device=self.device) # approach infront of the handle self.handle_approach_offset = torch.zeros((self.num_envs, 7), device=self.device) self.handle_approach_offset[:, 0] = -0.1 self.handle_approach_offset[:, -1] = 1.0 # warp expects quaternion as (x, y, z, w) # handle grasp offset self.handle_grasp_offset = torch.zeros((self.num_envs, 7), device=self.device) self.handle_grasp_offset[:, 0] = 0.025 self.handle_grasp_offset[:, -1] = 1.0 # warp expects quaternion as (x, y, z, w) # drawer opening rate self.drawer_opening_rate = torch.zeros((self.num_envs, 7), device=self.device) self.drawer_opening_rate[:, 0] = -0.015 self.drawer_opening_rate[:, -1] = 1.0 # warp expects quaternion as (x, y, z, w) # convert to warp self.sm_dt_wp = wp.from_torch(self.sm_dt, wp.float32) self.sm_state_wp = wp.from_torch(self.sm_state, wp.int32) self.sm_wait_time_wp = wp.from_torch(self.sm_wait_time, wp.float32) self.des_ee_pose_wp = wp.from_torch(self.des_ee_pose, wp.transform) self.des_gripper_state_wp = wp.from_torch(self.des_gripper_state, wp.float32) self.handle_approach_offset_wp = wp.from_torch(self.handle_approach_offset, wp.transform) self.handle_grasp_offset_wp = wp.from_torch(self.handle_grasp_offset, wp.transform) self.drawer_opening_rate_wp = wp.from_torch(self.drawer_opening_rate, wp.transform) def reset_idx(self, env_ids: Sequence[int] | None = None): """Reset the state machine.""" if env_ids is None: env_ids = slice(None) # reset state machine self.sm_state[env_ids] = 0 self.sm_wait_time[env_ids] = 0.0 def compute(self, ee_pose: torch.Tensor, handle_pose: torch.Tensor): """Compute the desired state of the robot's end-effector and the gripper.""" # convert all transformations from (w, x, y, z) to (x, y, z, w) ee_pose = ee_pose[:, [0, 1, 2, 4, 5, 6, 3]] handle_pose = handle_pose[:, [0, 1, 2, 4, 5, 6, 3]] # convert to warp ee_pose_wp = wp.from_torch(ee_pose.contiguous(), wp.transform) handle_pose_wp = wp.from_torch(handle_pose.contiguous(), wp.transform) # run state machine wp.launch( kernel=infer_state_machine, dim=self.num_envs, inputs=[ self.sm_dt_wp, self.sm_state_wp, self.sm_wait_time_wp, ee_pose_wp, handle_pose_wp, self.des_ee_pose_wp, self.des_gripper_state_wp, self.handle_approach_offset_wp, self.handle_grasp_offset_wp, self.drawer_opening_rate_wp, ], device=self.device, ) # convert transformations back to (w, x, y, z) des_ee_pose = self.des_ee_pose[:, [0, 1, 2, 6, 3, 4, 5]] # convert to torch return torch.cat([des_ee_pose, self.des_gripper_state.unsqueeze(-1)], dim=-1) def main(): # parse configuration env_cfg: CabinetEnvCfg = parse_env_cfg( "Isaac-Open-Drawer-Franka-IK-Abs-v0", use_gpu=not args_cli.cpu, num_envs=args_cli.num_envs, use_fabric=not args_cli.disable_fabric, ) # create environment env = gym.make("Isaac-Open-Drawer-Franka-IK-Abs-v0", cfg=env_cfg) # reset environment at start env.reset() # create action buffers (position + quaternion) actions = torch.zeros(env.unwrapped.action_space.shape, device=env.unwrapped.device) actions[:, 3] = 1.0 # desired object orientation (we only do position control of object) desired_orientation = torch.zeros((env.unwrapped.num_envs, 4), device=env.unwrapped.device) desired_orientation[:, 1] = 1.0 # create state machine open_sm = OpenDrawerSm(env_cfg.sim.dt * env_cfg.decimation, env.unwrapped.num_envs, env.unwrapped.device) while simulation_app.is_running(): # run everything in inference mode with torch.inference_mode(): # step environment dones = env.step(actions)[-2] # observations # -- end-effector frame ee_frame_tf: FrameTransformer = env.unwrapped.scene["ee_frame"] tcp_rest_position = ee_frame_tf.data.target_pos_w[..., 0, :].clone() - env.unwrapped.scene.env_origins tcp_rest_orientation = ee_frame_tf.data.target_quat_w[..., 0, :].clone() # -- handle frame cabinet_frame_tf: FrameTransformer = env.unwrapped.scene["cabinet_frame"] cabinet_position = cabinet_frame_tf.data.target_pos_w[..., 0, :].clone() - env.unwrapped.scene.env_origins cabinet_orientation = cabinet_frame_tf.data.target_quat_w[..., 0, :].clone() # advance state machine actions = open_sm.compute( torch.cat([tcp_rest_position, tcp_rest_orientation], dim=-1), torch.cat([cabinet_position, cabinet_orientation], dim=-1), ) # reset state machine if dones.any(): open_sm.reset_idx(dones.nonzero(as_tuple=False).squeeze(-1)) # close the environment env.close() if __name__ == "__main__": try: # run the main execution main() except Exception as err: carb.log_error(err) carb.log_error(traceback.format_exc()) raise finally: # close sim app simulation_app.close()
12,935
Python
38.559633
118
0.639351
NVIDIA-Omniverse/orbit/source/standalone/workflows/skrl/play.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """ Script to play a checkpoint of an RL agent from skrl. Visit the skrl documentation (https://skrl.readthedocs.io) to see the examples structured in a more user-friendly way. """ from __future__ import annotations """Launch Isaac Sim Simulator first.""" import argparse from omni.isaac.orbit.app import AppLauncher # add argparse arguments parser = argparse.ArgumentParser(description="Play a checkpoint of an RL agent from skrl.") parser.add_argument("--cpu", action="store_true", default=False, help="Use CPU pipeline.") parser.add_argument( "--disable_fabric", action="store_true", default=False, help="Disable fabric and use USD I/O operations." ) parser.add_argument("--num_envs", type=int, default=None, help="Number of environments to simulate.") parser.add_argument("--task", type=str, default=None, help="Name of the task.") parser.add_argument("--checkpoint", type=str, default=None, help="Path to model checkpoint.") # append AppLauncher cli args AppLauncher.add_app_launcher_args(parser) # parse the arguments args_cli = parser.parse_args() # launch omniverse app app_launcher = AppLauncher(args_cli) simulation_app = app_launcher.app """Rest everything follows.""" import gymnasium as gym import os import torch from skrl.agents.torch.ppo import PPO, PPO_DEFAULT_CONFIG from skrl.utils.model_instantiators.torch import deterministic_model, gaussian_model, shared_model import omni.isaac.orbit_tasks # noqa: F401 from omni.isaac.orbit_tasks.utils import get_checkpoint_path, load_cfg_from_registry, parse_env_cfg from omni.isaac.orbit_tasks.utils.wrappers.skrl import SkrlVecEnvWrapper, process_skrl_cfg def main(): """Play with skrl agent.""" # parse env configuration env_cfg = parse_env_cfg( args_cli.task, use_gpu=not args_cli.cpu, num_envs=args_cli.num_envs, use_fabric=not args_cli.disable_fabric ) experiment_cfg = load_cfg_from_registry(args_cli.task, "skrl_cfg_entry_point") # create isaac environment env = gym.make(args_cli.task, cfg=env_cfg) # wrap around environment for skrl env = SkrlVecEnvWrapper(env) # same as: `wrap_env(env, wrapper="isaac-orbit")` # instantiate models using skrl model instantiator utility # https://skrl.readthedocs.io/en/latest/modules/skrl.utils.model_instantiators.html models = {} # non-shared models if experiment_cfg["models"]["separate"]: models["policy"] = gaussian_model( observation_space=env.observation_space, action_space=env.action_space, device=env.device, **process_skrl_cfg(experiment_cfg["models"]["policy"]), ) models["value"] = deterministic_model( observation_space=env.observation_space, action_space=env.action_space, device=env.device, **process_skrl_cfg(experiment_cfg["models"]["value"]), ) # shared models else: models["policy"] = shared_model( observation_space=env.observation_space, action_space=env.action_space, device=env.device, structure=None, roles=["policy", "value"], parameters=[ process_skrl_cfg(experiment_cfg["models"]["policy"]), process_skrl_cfg(experiment_cfg["models"]["value"]), ], ) models["value"] = models["policy"] # configure and instantiate PPO agent # https://skrl.readthedocs.io/en/latest/modules/skrl.agents.ppo.html agent_cfg = PPO_DEFAULT_CONFIG.copy() experiment_cfg["agent"]["rewards_shaper"] = None # avoid 'dictionary changed size during iteration' agent_cfg.update(process_skrl_cfg(experiment_cfg["agent"])) agent_cfg["state_preprocessor_kwargs"].update({"size": env.observation_space, "device": env.device}) agent_cfg["value_preprocessor_kwargs"].update({"size": 1, "device": env.device}) agent_cfg["experiment"]["write_interval"] = 0 # don't log to Tensorboard agent_cfg["experiment"]["checkpoint_interval"] = 0 # don't generate checkpoints agent = PPO( models=models, memory=None, # memory is optional during evaluation cfg=agent_cfg, observation_space=env.observation_space, action_space=env.action_space, device=env.device, ) # specify directory for logging experiments (load checkpoint) log_root_path = os.path.join("logs", "skrl", experiment_cfg["agent"]["experiment"]["directory"]) log_root_path = os.path.abspath(log_root_path) print(f"[INFO] Loading experiment from directory: {log_root_path}") # get checkpoint path if args_cli.checkpoint: resume_path = os.path.abspath(args_cli.checkpoint) else: resume_path = get_checkpoint_path(log_root_path, other_dirs=["checkpoints"]) print(f"[INFO] Loading model checkpoint from: {resume_path}") # initialize agent agent.init() agent.load(resume_path) # set agent to evaluation mode agent.set_running_mode("eval") # reset environment obs, _ = env.reset() # simulate environment while simulation_app.is_running(): # run everything in inference mode with torch.inference_mode(): # agent stepping actions = agent.act(obs, timestep=0, timesteps=0)[0] # env stepping obs, _, _, _, _ = env.step(actions) # close the simulator env.close() if __name__ == "__main__": # run the main function main() # close sim app simulation_app.close()
5,657
Python
35.269231
115
0.668022
NVIDIA-Omniverse/orbit/source/standalone/workflows/skrl/train.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """ Script to train RL agent with skrl. Visit the skrl documentation (https://skrl.readthedocs.io) to see the examples structured in a more user-friendly way. """ from __future__ import annotations """Launch Isaac Sim Simulator first.""" import argparse from omni.isaac.orbit.app import AppLauncher # add argparse arguments parser = argparse.ArgumentParser(description="Train an RL agent with skrl.") parser.add_argument("--video", action="store_true", default=False, help="Record videos during training.") parser.add_argument("--video_length", type=int, default=200, help="Length of the recorded video (in steps).") parser.add_argument("--video_interval", type=int, default=2000, help="Interval between video recordings (in steps).") parser.add_argument("--cpu", action="store_true", default=False, help="Use CPU pipeline.") parser.add_argument( "--disable_fabric", action="store_true", default=False, help="Disable fabric and use USD I/O operations." ) parser.add_argument("--num_envs", type=int, default=None, help="Number of environments to simulate.") parser.add_argument("--task", type=str, default=None, help="Name of the task.") parser.add_argument("--seed", type=int, default=None, help="Seed used for the environment") # append AppLauncher cli args AppLauncher.add_app_launcher_args(parser) # parse the arguments args_cli = parser.parse_args() # launch omniverse app app_launcher = AppLauncher(args_cli) simulation_app = app_launcher.app """Rest everything follows.""" import gymnasium as gym import os from datetime import datetime from skrl.agents.torch.ppo import PPO, PPO_DEFAULT_CONFIG from skrl.memories.torch import RandomMemory from skrl.utils import set_seed from skrl.utils.model_instantiators.torch import deterministic_model, gaussian_model, shared_model from omni.isaac.orbit.utils.dict import print_dict from omni.isaac.orbit.utils.io import dump_pickle, dump_yaml import omni.isaac.orbit_tasks # noqa: F401 from omni.isaac.orbit_tasks.utils import load_cfg_from_registry, parse_env_cfg from omni.isaac.orbit_tasks.utils.wrappers.skrl import SkrlSequentialLogTrainer, SkrlVecEnvWrapper, process_skrl_cfg def main(): """Train with skrl agent.""" # read the seed from command line args_cli_seed = args_cli.seed # parse configuration env_cfg = parse_env_cfg( args_cli.task, use_gpu=not args_cli.cpu, num_envs=args_cli.num_envs, use_fabric=not args_cli.disable_fabric ) experiment_cfg = load_cfg_from_registry(args_cli.task, "skrl_cfg_entry_point") # specify directory for logging experiments log_root_path = os.path.join("logs", "skrl", experiment_cfg["agent"]["experiment"]["directory"]) log_root_path = os.path.abspath(log_root_path) print(f"[INFO] Logging experiment in directory: {log_root_path}") # specify directory for logging runs: {time-stamp}_{run_name} log_dir = datetime.now().strftime("%Y-%m-%d_%H-%M-%S") if experiment_cfg["agent"]["experiment"]["experiment_name"]: log_dir += f'_{experiment_cfg["agent"]["experiment"]["experiment_name"]}' # set directory into agent config experiment_cfg["agent"]["experiment"]["directory"] = log_root_path experiment_cfg["agent"]["experiment"]["experiment_name"] = log_dir # update log_dir log_dir = os.path.join(log_root_path, log_dir) # dump the configuration into log-directory dump_yaml(os.path.join(log_dir, "params", "env.yaml"), env_cfg) dump_yaml(os.path.join(log_dir, "params", "agent.yaml"), experiment_cfg) dump_pickle(os.path.join(log_dir, "params", "env.pkl"), env_cfg) dump_pickle(os.path.join(log_dir, "params", "agent.pkl"), experiment_cfg) # create isaac environment env = gym.make(args_cli.task, cfg=env_cfg, render_mode="rgb_array" if args_cli.video else None) # wrap for video recording if args_cli.video: video_kwargs = { "video_folder": os.path.join(log_dir, "videos"), "step_trigger": lambda step: step % args_cli.video_interval == 0, "video_length": args_cli.video_length, "disable_logger": True, } print("[INFO] Recording videos during training.") print_dict(video_kwargs, nesting=4) env = gym.wrappers.RecordVideo(env, **video_kwargs) # wrap around environment for skrl env = SkrlVecEnvWrapper(env) # same as: `wrap_env(env, wrapper="isaac-orbit")` # set seed for the experiment (override from command line) set_seed(args_cli_seed if args_cli_seed is not None else experiment_cfg["seed"]) # instantiate models using skrl model instantiator utility # https://skrl.readthedocs.io/en/latest/modules/skrl.utils.model_instantiators.html models = {} # non-shared models if experiment_cfg["models"]["separate"]: models["policy"] = gaussian_model( observation_space=env.observation_space, action_space=env.action_space, device=env.device, **process_skrl_cfg(experiment_cfg["models"]["policy"]), ) models["value"] = deterministic_model( observation_space=env.observation_space, action_space=env.action_space, device=env.device, **process_skrl_cfg(experiment_cfg["models"]["value"]), ) # shared models else: models["policy"] = shared_model( observation_space=env.observation_space, action_space=env.action_space, device=env.device, structure=None, roles=["policy", "value"], parameters=[ process_skrl_cfg(experiment_cfg["models"]["policy"]), process_skrl_cfg(experiment_cfg["models"]["value"]), ], ) models["value"] = models["policy"] # instantiate a RandomMemory as rollout buffer (any memory can be used for this) # https://skrl.readthedocs.io/en/latest/modules/skrl.memories.random.html memory_size = experiment_cfg["agent"]["rollouts"] # memory_size is the agent's number of rollouts memory = RandomMemory(memory_size=memory_size, num_envs=env.num_envs, device=env.device) # configure and instantiate PPO agent # https://skrl.readthedocs.io/en/latest/modules/skrl.agents.ppo.html agent_cfg = PPO_DEFAULT_CONFIG.copy() experiment_cfg["agent"]["rewards_shaper"] = None # avoid 'dictionary changed size during iteration' agent_cfg.update(process_skrl_cfg(experiment_cfg["agent"])) agent_cfg["state_preprocessor_kwargs"].update({"size": env.observation_space, "device": env.device}) agent_cfg["value_preprocessor_kwargs"].update({"size": 1, "device": env.device}) agent = PPO( models=models, memory=memory, cfg=agent_cfg, observation_space=env.observation_space, action_space=env.action_space, device=env.device, ) # configure and instantiate a custom RL trainer for logging episode events # https://skrl.readthedocs.io/en/latest/modules/skrl.trainers.base_class.html trainer_cfg = experiment_cfg["trainer"] trainer = SkrlSequentialLogTrainer(cfg=trainer_cfg, env=env, agents=agent) # train the agent trainer.train() # close the simulator env.close() if __name__ == "__main__": # run the main function main() # close sim app simulation_app.close()
7,471
Python
39.608695
117
0.680498
NVIDIA-Omniverse/orbit/source/standalone/workflows/rsl_rl/play.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """Script to play a checkpoint if an RL agent from RSL-RL.""" from __future__ import annotations """Launch Isaac Sim Simulator first.""" import argparse from omni.isaac.orbit.app import AppLauncher # local imports import cli_args # isort: skip # add argparse arguments parser = argparse.ArgumentParser(description="Train an RL agent with RSL-RL.") parser.add_argument("--cpu", action="store_true", default=False, help="Use CPU pipeline.") parser.add_argument( "--disable_fabric", action="store_true", default=False, help="Disable fabric and use USD I/O operations." ) parser.add_argument("--num_envs", type=int, default=None, help="Number of environments to simulate.") parser.add_argument("--task", type=str, default=None, help="Name of the task.") parser.add_argument("--seed", type=int, default=None, help="Seed used for the environment") # append RSL-RL cli arguments cli_args.add_rsl_rl_args(parser) # append AppLauncher cli args AppLauncher.add_app_launcher_args(parser) args_cli = parser.parse_args() # launch omniverse app app_launcher = AppLauncher(args_cli) simulation_app = app_launcher.app """Rest everything follows.""" import gymnasium as gym import os import torch from rsl_rl.runners import OnPolicyRunner import omni.isaac.orbit_tasks # noqa: F401 from omni.isaac.orbit_tasks.utils import get_checkpoint_path, parse_env_cfg from omni.isaac.orbit_tasks.utils.wrappers.rsl_rl import ( RslRlOnPolicyRunnerCfg, RslRlVecEnvWrapper, export_policy_as_onnx, ) def main(): """Play with RSL-RL agent.""" # parse configuration env_cfg = parse_env_cfg( args_cli.task, use_gpu=not args_cli.cpu, num_envs=args_cli.num_envs, use_fabric=not args_cli.disable_fabric ) agent_cfg: RslRlOnPolicyRunnerCfg = cli_args.parse_rsl_rl_cfg(args_cli.task, args_cli) # create isaac environment env = gym.make(args_cli.task, cfg=env_cfg) # wrap around environment for rsl-rl env = RslRlVecEnvWrapper(env) # specify directory for logging experiments log_root_path = os.path.join("logs", "rsl_rl", agent_cfg.experiment_name) log_root_path = os.path.abspath(log_root_path) print(f"[INFO] Loading experiment from directory: {log_root_path}") resume_path = get_checkpoint_path(log_root_path, agent_cfg.load_run, agent_cfg.load_checkpoint) print(f"[INFO]: Loading model checkpoint from: {resume_path}") # load previously trained model ppo_runner = OnPolicyRunner(env, agent_cfg.to_dict(), log_dir=None, device=agent_cfg.device) ppo_runner.load(resume_path) print(f"[INFO]: Loading model checkpoint from: {resume_path}") # obtain the trained policy for inference policy = ppo_runner.get_inference_policy(device=env.unwrapped.device) # export policy to onnx export_model_dir = os.path.join(os.path.dirname(resume_path), "exported") export_policy_as_onnx(ppo_runner.alg.actor_critic, export_model_dir, filename="policy.onnx") # reset environment obs, _ = env.get_observations() # simulate environment while simulation_app.is_running(): # run everything in inference mode with torch.inference_mode(): # agent stepping actions = policy(obs) # env stepping obs, _, _, _ = env.step(actions) # close the simulator env.close() if __name__ == "__main__": # run the main function main() # close sim app simulation_app.close()
3,568
Python
32.046296
115
0.702354
NVIDIA-Omniverse/orbit/source/standalone/workflows/rsl_rl/cli_args.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from __future__ import annotations import argparse from typing import TYPE_CHECKING if TYPE_CHECKING: from omni.isaac.orbit_tasks.utils.wrappers.rsl_rl import RslRlOnPolicyRunnerCfg def add_rsl_rl_args(parser: argparse.ArgumentParser): """Add RSL-RL arguments to the parser. Args: parser: The parser to add the arguments to. """ # create a new argument group arg_group = parser.add_argument_group("rsl_rl", description="Arguments for RSL-RL agent.") # -- experiment arguments arg_group.add_argument( "--experiment_name", type=str, default=None, help="Name of the experiment folder where logs will be stored." ) arg_group.add_argument("--run_name", type=str, default=None, help="Run name suffix to the log directory.") # -- load arguments arg_group.add_argument("--resume", type=bool, default=None, help="Whether to resume from a checkpoint.") arg_group.add_argument("--load_run", type=str, default=None, help="Name of the run folder to resume from.") arg_group.add_argument("--checkpoint", type=str, default=None, help="Checkpoint file to resume from.") # -- logger arguments arg_group.add_argument( "--logger", type=str, default=None, choices={"wandb", "tensorboard", "neptune"}, help="Logger module to use." ) arg_group.add_argument( "--log_project_name", type=str, default=None, help="Name of the logging project when using wandb or neptune." ) def parse_rsl_rl_cfg(task_name: str, args_cli: argparse.Namespace) -> RslRlOnPolicyRunnerCfg: """Parse configuration for RSL-RL agent based on inputs. Args: task_name: The name of the environment. args_cli: The command line arguments. Returns: The parsed configuration for RSL-RL agent based on inputs. """ from omni.isaac.orbit_tasks.utils.parse_cfg import load_cfg_from_registry # load the default configuration rslrl_cfg: RslRlOnPolicyRunnerCfg = load_cfg_from_registry(task_name, "rsl_rl_cfg_entry_point") # override the default configuration with CLI arguments if args_cli.seed is not None: rslrl_cfg.seed = args_cli.seed if args_cli.resume is not None: rslrl_cfg.resume = args_cli.resume if args_cli.load_run is not None: rslrl_cfg.load_run = args_cli.load_run if args_cli.checkpoint is not None: rslrl_cfg.load_checkpoint = args_cli.checkpoint if args_cli.run_name is not None: rslrl_cfg.run_name = args_cli.run_name if args_cli.logger is not None: rslrl_cfg.logger = args_cli.logger # set the project name for wandb and neptune if rslrl_cfg.logger in {"wandb", "neptune"} and args_cli.log_project_name: rslrl_cfg.wandb_project = args_cli.log_project_name rslrl_cfg.neptune_project = args_cli.log_project_name return rslrl_cfg
2,981
Python
38.759999
117
0.688695
NVIDIA-Omniverse/orbit/source/standalone/workflows/rsl_rl/train.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """Script to train RL agent with RSL-RL.""" from __future__ import annotations """Launch Isaac Sim Simulator first.""" import argparse from omni.isaac.orbit.app import AppLauncher # local imports import cli_args # isort: skip # add argparse arguments parser = argparse.ArgumentParser(description="Train an RL agent with RSL-RL.") parser.add_argument("--video", action="store_true", default=False, help="Record videos during training.") parser.add_argument("--video_length", type=int, default=200, help="Length of the recorded video (in steps).") parser.add_argument("--video_interval", type=int, default=2000, help="Interval between video recordings (in steps).") parser.add_argument("--cpu", action="store_true", default=False, help="Use CPU pipeline.") parser.add_argument( "--disable_fabric", action="store_true", default=False, help="Disable fabric and use USD I/O operations." ) parser.add_argument("--num_envs", type=int, default=None, help="Number of environments to simulate.") parser.add_argument("--task", type=str, default=None, help="Name of the task.") parser.add_argument("--seed", type=int, default=None, help="Seed used for the environment") # append RSL-RL cli arguments cli_args.add_rsl_rl_args(parser) # append AppLauncher cli args AppLauncher.add_app_launcher_args(parser) args_cli = parser.parse_args() # launch omniverse app app_launcher = AppLauncher(args_cli) simulation_app = app_launcher.app """Rest everything follows.""" import gymnasium as gym import os import torch from datetime import datetime from rsl_rl.runners import OnPolicyRunner from omni.isaac.orbit.envs import RLTaskEnvCfg from omni.isaac.orbit.utils.dict import print_dict from omni.isaac.orbit.utils.io import dump_pickle, dump_yaml import omni.isaac.orbit_tasks # noqa: F401 from omni.isaac.orbit_tasks.utils import get_checkpoint_path, parse_env_cfg from omni.isaac.orbit_tasks.utils.wrappers.rsl_rl import RslRlOnPolicyRunnerCfg, RslRlVecEnvWrapper torch.backends.cuda.matmul.allow_tf32 = True torch.backends.cudnn.allow_tf32 = True torch.backends.cudnn.deterministic = False torch.backends.cudnn.benchmark = False def main(): """Train with RSL-RL agent.""" # parse configuration env_cfg: RLTaskEnvCfg = parse_env_cfg( args_cli.task, use_gpu=not args_cli.cpu, num_envs=args_cli.num_envs, use_fabric=not args_cli.disable_fabric ) agent_cfg: RslRlOnPolicyRunnerCfg = cli_args.parse_rsl_rl_cfg(args_cli.task, args_cli) # specify directory for logging experiments log_root_path = os.path.join("logs", "rsl_rl", agent_cfg.experiment_name) log_root_path = os.path.abspath(log_root_path) print(f"[INFO] Logging experiment in directory: {log_root_path}") # specify directory for logging runs: {time-stamp}_{run_name} log_dir = datetime.now().strftime("%Y-%m-%d_%H-%M-%S") if agent_cfg.run_name: log_dir += f"_{agent_cfg.run_name}" log_dir = os.path.join(log_root_path, log_dir) # create isaac environment env = gym.make(args_cli.task, cfg=env_cfg, render_mode="rgb_array" if args_cli.video else None) # wrap for video recording if args_cli.video: video_kwargs = { "video_folder": os.path.join(log_dir, "videos"), "step_trigger": lambda step: step % args_cli.video_interval == 0, "video_length": args_cli.video_length, "disable_logger": True, } print("[INFO] Recording videos during training.") print_dict(video_kwargs, nesting=4) env = gym.wrappers.RecordVideo(env, **video_kwargs) # wrap around environment for rsl-rl env = RslRlVecEnvWrapper(env) # create runner from rsl-rl runner = OnPolicyRunner(env, agent_cfg.to_dict(), log_dir=log_dir, device=agent_cfg.device) # write git state to logs runner.add_git_repo_to_log(__file__) # save resume path before creating a new log_dir if agent_cfg.resume: # get path to previous checkpoint resume_path = get_checkpoint_path(log_root_path, agent_cfg.load_run, agent_cfg.load_checkpoint) print(f"[INFO]: Loading model checkpoint from: {resume_path}") # load previously trained model runner.load(resume_path) # set seed of the environment env.seed(agent_cfg.seed) # dump the configuration into log-directory dump_yaml(os.path.join(log_dir, "params", "env.yaml"), env_cfg) dump_yaml(os.path.join(log_dir, "params", "agent.yaml"), agent_cfg) dump_pickle(os.path.join(log_dir, "params", "env.pkl"), env_cfg) dump_pickle(os.path.join(log_dir, "params", "agent.pkl"), agent_cfg) # run training runner.learn(num_learning_iterations=agent_cfg.max_iterations, init_at_random_ep_len=True) # close the simulator env.close() if __name__ == "__main__": # run the main function main() # close sim app simulation_app.close()
5,002
Python
36.616541
117
0.70052
NVIDIA-Omniverse/orbit/source/standalone/workflows/robomimic/play.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """Script to run a trained policy from robomimic.""" from __future__ import annotations """Launch Isaac Sim Simulator first.""" import argparse from omni.isaac.orbit.app import AppLauncher # add argparse arguments parser = argparse.ArgumentParser(description="Play policy trained using robomimic for Orbit environments.") parser.add_argument("--cpu", action="store_true", default=False, help="Use CPU pipeline.") parser.add_argument( "--disable_fabric", action="store_true", default=False, help="Disable fabric and use USD I/O operations." ) parser.add_argument("--task", type=str, default=None, help="Name of the task.") parser.add_argument("--checkpoint", type=str, default=None, help="Pytorch model checkpoint to load.") # append AppLauncher cli args AppLauncher.add_app_launcher_args(parser) # parse the arguments args_cli = parser.parse_args() # launch omniverse app app_launcher = AppLauncher(args_cli) simulation_app = app_launcher.app """Rest everything follows.""" import gymnasium as gym import torch import robomimic # noqa: F401 import robomimic.utils.file_utils as FileUtils import robomimic.utils.torch_utils as TorchUtils import omni.isaac.orbit_tasks # noqa: F401 from omni.isaac.orbit_tasks.utils import parse_env_cfg def main(): """Run a trained policy from robomimic with Orbit environment.""" # parse configuration env_cfg = parse_env_cfg(args_cli.task, use_gpu=not args_cli.cpu, num_envs=1, use_fabric=not args_cli.disable_fabric) # we want to have the terms in the observations returned as a dictionary # rather than a concatenated tensor env_cfg.observations.policy.concatenate_terms = False # create environment env = gym.make(args_cli.task, cfg=env_cfg) # acquire device device = TorchUtils.get_torch_device(try_to_use_cuda=True) # restore policy policy, _ = FileUtils.policy_from_checkpoint(ckpt_path=args_cli.checkpoint, device=device, verbose=True) # reset environment obs_dict, _ = env.reset() # robomimic only cares about policy observations obs = obs_dict["policy"] # simulate environment while simulation_app.is_running(): # run everything in inference mode with torch.inference_mode(): # compute actions actions = policy(obs) actions = torch.from_numpy(actions).to(device=device).view(1, env.action_space.shape[1]) # apply actions obs_dict = env.step(actions)[0] # robomimic only cares about policy observations obs = obs_dict["policy"] # close the simulator env.close() if __name__ == "__main__": # run the main function main() # close sim app simulation_app.close()
2,845
Python
31.340909
120
0.702988
NVIDIA-Omniverse/orbit/source/standalone/workflows/robomimic/collect_demonstrations.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """Script to collect demonstrations with Orbit environments.""" from __future__ import annotations """Launch Isaac Sim Simulator first.""" import argparse from omni.isaac.orbit.app import AppLauncher # add argparse arguments parser = argparse.ArgumentParser(description="Collect demonstrations for Orbit environments.") parser.add_argument("--cpu", action="store_true", default=False, help="Use CPU pipeline.") parser.add_argument("--num_envs", type=int, default=1, help="Number of environments to simulate.") parser.add_argument("--task", type=str, default=None, help="Name of the task.") parser.add_argument("--device", type=str, default="keyboard", help="Device for interacting with environment") parser.add_argument("--num_demos", type=int, default=1, help="Number of episodes to store in the dataset.") parser.add_argument("--filename", type=str, default="hdf_dataset", help="Basename of output file.") # append AppLauncher cli args AppLauncher.add_app_launcher_args(parser) # parse the arguments args_cli = parser.parse_args() # launch the simulator app_launcher = AppLauncher(args_cli) simulation_app = app_launcher.app """Rest everything follows.""" import contextlib import gymnasium as gym import os import torch from omni.isaac.orbit.devices import Se3Keyboard, Se3SpaceMouse from omni.isaac.orbit.managers import TerminationTermCfg as DoneTerm from omni.isaac.orbit.utils.io import dump_pickle, dump_yaml import omni.isaac.orbit_tasks # noqa: F401 from omni.isaac.orbit_tasks.manipulation.lift import mdp from omni.isaac.orbit_tasks.utils.data_collector import RobomimicDataCollector from omni.isaac.orbit_tasks.utils.parse_cfg import parse_env_cfg def pre_process_actions(delta_pose: torch.Tensor, gripper_command: bool) -> torch.Tensor: """Pre-process actions for the environment.""" # compute actions based on environment if "Reach" in args_cli.task: # note: reach is the only one that uses a different action space # compute actions return delta_pose else: # resolve gripper command gripper_vel = torch.zeros((delta_pose.shape[0], 1), dtype=torch.float, device=delta_pose.device) gripper_vel[:] = -1 if gripper_command else 1 # compute actions return torch.concat([delta_pose, gripper_vel], dim=1) def main(): """Collect demonstrations from the environment using teleop interfaces.""" assert ( args_cli.task == "Isaac-Lift-Cube-Franka-IK-Rel-v0" ), "Only 'Isaac-Lift-Cube-Franka-IK-Rel-v0' is supported currently." # parse configuration env_cfg = parse_env_cfg(args_cli.task, use_gpu=not args_cli.cpu, num_envs=args_cli.num_envs) # modify configuration such that the environment runs indefinitely # until goal is reached env_cfg.terminations.time_out = None # set the resampling time range to large number to avoid resampling env_cfg.commands.object_pose.resampling_time_range = (1.0e9, 1.0e9) # we want to have the terms in the observations returned as a dictionary # rather than a concatenated tensor env_cfg.observations.policy.concatenate_terms = False # add termination condition for reaching the goal otherwise the environment won't reset env_cfg.terminations.object_reached_goal = DoneTerm(func=mdp.object_reached_goal) # create environment env = gym.make(args_cli.task, cfg=env_cfg) # create controller if args_cli.device.lower() == "keyboard": teleop_interface = Se3Keyboard(pos_sensitivity=0.04, rot_sensitivity=0.08) elif args_cli.device.lower() == "spacemouse": teleop_interface = Se3SpaceMouse(pos_sensitivity=0.05, rot_sensitivity=0.005) else: raise ValueError(f"Invalid device interface '{args_cli.device}'. Supported: 'keyboard', 'spacemouse'.") # add teleoperation key for env reset teleop_interface.add_callback("L", env.reset) # print helper print(teleop_interface) # specify directory for logging experiments log_dir = os.path.join("./logs/robomimic", args_cli.task) # dump the configuration into log-directory dump_yaml(os.path.join(log_dir, "params", "env.yaml"), env_cfg) dump_pickle(os.path.join(log_dir, "params", "env.pkl"), env_cfg) # create data-collector collector_interface = RobomimicDataCollector( env_name=args_cli.task, directory_path=log_dir, filename=args_cli.filename, num_demos=args_cli.num_demos, flush_freq=env.num_envs, env_config={"device": args_cli.device}, ) # reset environment obs_dict, _ = env.reset() # reset interfaces teleop_interface.reset() collector_interface.reset() # simulate environment -- run everything in inference mode with contextlib.suppress(KeyboardInterrupt) and torch.inference_mode(): while not collector_interface.is_stopped(): # get keyboard command delta_pose, gripper_command = teleop_interface.advance() # convert to torch delta_pose = torch.tensor(delta_pose, dtype=torch.float, device=env.device).repeat(env.num_envs, 1) # compute actions based on environment actions = pre_process_actions(delta_pose, gripper_command) # TODO: Deal with the case when reset is triggered by teleoperation device. # The observations need to be recollected. # store signals before stepping # -- obs for key, value in obs_dict["policy"].items(): collector_interface.add(f"obs/{key}", value) # -- actions collector_interface.add("actions", actions) # perform action on environment obs_dict, rewards, terminated, truncated, info = env.step(actions) dones = terminated | truncated # check that simulation is stopped or not if env.unwrapped.sim.is_stopped(): break # robomimic only cares about policy observations # store signals from the environment # -- next_obs for key, value in obs_dict["policy"].items(): collector_interface.add(f"next_obs/{key}", value) # -- rewards collector_interface.add("rewards", rewards) # -- dones collector_interface.add("dones", dones) # -- is success label collector_interface.add("success", env.termination_manager.get_term("object_reached_goal")) # flush data from collector for successful environments reset_env_ids = dones.nonzero(as_tuple=False).squeeze(-1) collector_interface.flush(reset_env_ids) # check if enough data is collected if collector_interface.is_stopped(): break # close the simulator collector_interface.close() env.close() if __name__ == "__main__": # run the main function main() # close sim app simulation_app.close()
7,116
Python
38.320442
111
0.676504
NVIDIA-Omniverse/orbit/source/standalone/workflows/robomimic/train.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause # MIT License # # Copyright (c) 2021 Stanford Vision and Learning Lab # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. """ The main entry point for training policies from pre-collected data. Args: algo: name of the algorithm to run. task: name of the environment. name: if provided, override the experiment name defined in the config dataset: if provided, override the dataset path defined in the config This file has been modified from the original version in the following ways: * Added import of AppLauncher from omni.isaac.orbit.app to resolve the configuration to load for training. """ from __future__ import annotations """Launch Isaac Sim Simulator first.""" from omni.isaac.orbit.app import AppLauncher # launch omniverse app app_launcher = AppLauncher(headless=True) simulation_app = app_launcher.app """Rest everything follows.""" import argparse import gymnasium as gym import json import numpy as np import os import sys import time import torch import traceback from collections import OrderedDict from torch.utils.data import DataLoader import psutil import robomimic.utils.env_utils as EnvUtils import robomimic.utils.file_utils as FileUtils import robomimic.utils.obs_utils as ObsUtils import robomimic.utils.torch_utils as TorchUtils import robomimic.utils.train_utils as TrainUtils from robomimic.algo import RolloutPolicy, algo_factory from robomimic.config import config_factory from robomimic.utils.log_utils import DataLogger, PrintLogger # Needed so that environment is registered import omni.isaac.orbit_tasks # noqa: F401 def train(config, device): """Train a model using the algorithm.""" # first set seeds np.random.seed(config.train.seed) torch.manual_seed(config.train.seed) print("\n============= New Training Run with Config =============") print(config) print("") log_dir, ckpt_dir, video_dir = TrainUtils.get_exp_dir(config) print(f">>> Saving logs into directory: {log_dir}") print(f">>> Saving checkpoints into directory: {ckpt_dir}") print(f">>> Saving videos into directory: {video_dir}") if config.experiment.logging.terminal_output_to_txt: # log stdout and stderr to a text file logger = PrintLogger(os.path.join(log_dir, "log.txt")) sys.stdout = logger sys.stderr = logger # read config to set up metadata for observation modalities (e.g. detecting rgb observations) ObsUtils.initialize_obs_utils_with_config(config) # make sure the dataset exists dataset_path = os.path.expanduser(config.train.data) if not os.path.exists(dataset_path): raise FileNotFoundError(f"Dataset at provided path {dataset_path} not found!") # load basic metadata from training file print("\n============= Loaded Environment Metadata =============") env_meta = FileUtils.get_env_metadata_from_dataset(dataset_path=config.train.data) shape_meta = FileUtils.get_shape_metadata_from_dataset( dataset_path=config.train.data, all_obs_keys=config.all_obs_keys, verbose=True ) if config.experiment.env is not None: env_meta["env_name"] = config.experiment.env print("=" * 30 + "\n" + "Replacing Env to {}\n".format(env_meta["env_name"]) + "=" * 30) # create environment envs = OrderedDict() if config.experiment.rollout.enabled: # create environments for validation runs env_names = [env_meta["env_name"]] if config.experiment.additional_envs is not None: for name in config.experiment.additional_envs: env_names.append(name) for env_name in env_names: env = EnvUtils.create_env_from_metadata( env_meta=env_meta, env_name=env_name, render=False, render_offscreen=config.experiment.render_video, use_image_obs=shape_meta["use_images"], ) envs[env.name] = env print(envs[env.name]) print("") # setup for a new training run data_logger = DataLogger(log_dir, config=config, log_tb=config.experiment.logging.log_tb) model = algo_factory( algo_name=config.algo_name, config=config, obs_key_shapes=shape_meta["all_shapes"], ac_dim=shape_meta["ac_dim"], device=device, ) # save the config as a json file with open(os.path.join(log_dir, "..", "config.json"), "w") as outfile: json.dump(config, outfile, indent=4) print("\n============= Model Summary =============") print(model) # print model summary print("") # load training data trainset, validset = TrainUtils.load_data_for_training(config, obs_keys=shape_meta["all_obs_keys"]) train_sampler = trainset.get_dataset_sampler() print("\n============= Training Dataset =============") print(trainset) print("") # maybe retrieve statistics for normalizing observations obs_normalization_stats = None if config.train.hdf5_normalize_obs: obs_normalization_stats = trainset.get_obs_normalization_stats() # initialize data loaders train_loader = DataLoader( dataset=trainset, sampler=train_sampler, batch_size=config.train.batch_size, shuffle=(train_sampler is None), num_workers=config.train.num_data_workers, drop_last=True, ) if config.experiment.validate: # cap num workers for validation dataset at 1 num_workers = min(config.train.num_data_workers, 1) valid_sampler = validset.get_dataset_sampler() valid_loader = DataLoader( dataset=validset, sampler=valid_sampler, batch_size=config.train.batch_size, shuffle=(valid_sampler is None), num_workers=num_workers, drop_last=True, ) else: valid_loader = None # main training loop best_valid_loss = None best_return = {k: -np.inf for k in envs} if config.experiment.rollout.enabled else None best_success_rate = {k: -1.0 for k in envs} if config.experiment.rollout.enabled else None last_ckpt_time = time.time() # number of learning steps per epoch (defaults to a full dataset pass) train_num_steps = config.experiment.epoch_every_n_steps valid_num_steps = config.experiment.validation_epoch_every_n_steps for epoch in range(1, config.train.num_epochs + 1): # epoch numbers start at 1 step_log = TrainUtils.run_epoch(model=model, data_loader=train_loader, epoch=epoch, num_steps=train_num_steps) model.on_epoch_end(epoch) # setup checkpoint path epoch_ckpt_name = f"model_epoch_{epoch}" # check for recurring checkpoint saving conditions should_save_ckpt = False if config.experiment.save.enabled: time_check = (config.experiment.save.every_n_seconds is not None) and ( time.time() - last_ckpt_time > config.experiment.save.every_n_seconds ) epoch_check = ( (config.experiment.save.every_n_epochs is not None) and (epoch > 0) and (epoch % config.experiment.save.every_n_epochs == 0) ) epoch_list_check = epoch in config.experiment.save.epochs should_save_ckpt = time_check or epoch_check or epoch_list_check ckpt_reason = None if should_save_ckpt: last_ckpt_time = time.time() ckpt_reason = "time" print(f"Train Epoch {epoch}") print(json.dumps(step_log, sort_keys=True, indent=4)) for k, v in step_log.items(): if k.startswith("Time_"): data_logger.record(f"Timing_Stats/Train_{k[5:]}", v, epoch) else: data_logger.record(f"Train/{k}", v, epoch) # Evaluate the model on validation set if config.experiment.validate: with torch.no_grad(): step_log = TrainUtils.run_epoch( model=model, data_loader=valid_loader, epoch=epoch, validate=True, num_steps=valid_num_steps ) for k, v in step_log.items(): if k.startswith("Time_"): data_logger.record(f"Timing_Stats/Valid_{k[5:]}", v, epoch) else: data_logger.record(f"Valid/{k}", v, epoch) print(f"Validation Epoch {epoch}") print(json.dumps(step_log, sort_keys=True, indent=4)) # save checkpoint if achieve new best validation loss valid_check = "Loss" in step_log if valid_check and (best_valid_loss is None or (step_log["Loss"] <= best_valid_loss)): best_valid_loss = step_log["Loss"] if config.experiment.save.enabled and config.experiment.save.on_best_validation: epoch_ckpt_name += f"_best_validation_{best_valid_loss}" should_save_ckpt = True ckpt_reason = "valid" if ckpt_reason is None else ckpt_reason # Evaluate the model by by running rollouts # do rollouts at fixed rate or if it's time to save a new ckpt video_paths = None rollout_check = (epoch % config.experiment.rollout.rate == 0) or (should_save_ckpt and ckpt_reason == "time") if config.experiment.rollout.enabled and (epoch > config.experiment.rollout.warmstart) and rollout_check: # wrap model as a RolloutPolicy to prepare for rollouts rollout_model = RolloutPolicy(model, obs_normalization_stats=obs_normalization_stats) num_episodes = config.experiment.rollout.n all_rollout_logs, video_paths = TrainUtils.rollout_with_stats( policy=rollout_model, envs=envs, horizon=config.experiment.rollout.horizon, use_goals=config.use_goals, num_episodes=num_episodes, render=False, video_dir=video_dir if config.experiment.render_video else None, epoch=epoch, video_skip=config.experiment.get("video_skip", 5), terminate_on_success=config.experiment.rollout.terminate_on_success, ) # summarize results from rollouts to tensorboard and terminal for env_name in all_rollout_logs: rollout_logs = all_rollout_logs[env_name] for k, v in rollout_logs.items(): if k.startswith("Time_"): data_logger.record(f"Timing_Stats/Rollout_{env_name}_{k[5:]}", v, epoch) else: data_logger.record(f"Rollout/{k}/{env_name}", v, epoch, log_stats=True) print("\nEpoch {} Rollouts took {}s (avg) with results:".format(epoch, rollout_logs["time"])) print(f"Env: {env_name}") print(json.dumps(rollout_logs, sort_keys=True, indent=4)) # checkpoint and video saving logic updated_stats = TrainUtils.should_save_from_rollout_logs( all_rollout_logs=all_rollout_logs, best_return=best_return, best_success_rate=best_success_rate, epoch_ckpt_name=epoch_ckpt_name, save_on_best_rollout_return=config.experiment.save.on_best_rollout_return, save_on_best_rollout_success_rate=config.experiment.save.on_best_rollout_success_rate, ) best_return = updated_stats["best_return"] best_success_rate = updated_stats["best_success_rate"] epoch_ckpt_name = updated_stats["epoch_ckpt_name"] should_save_ckpt = ( config.experiment.save.enabled and updated_stats["should_save_ckpt"] ) or should_save_ckpt if updated_stats["ckpt_reason"] is not None: ckpt_reason = updated_stats["ckpt_reason"] # Only keep saved videos if the ckpt should be saved (but not because of validation score) should_save_video = (should_save_ckpt and (ckpt_reason != "valid")) or config.experiment.keep_all_videos if video_paths is not None and not should_save_video: for env_name in video_paths: os.remove(video_paths[env_name]) # Save model checkpoints based on conditions (success rate, validation loss, etc) if should_save_ckpt: TrainUtils.save_model( model=model, config=config, env_meta=env_meta, shape_meta=shape_meta, ckpt_path=os.path.join(ckpt_dir, epoch_ckpt_name + ".pth"), obs_normalization_stats=obs_normalization_stats, ) # Finally, log memory usage in MB process = psutil.Process(os.getpid()) mem_usage = int(process.memory_info().rss / 1000000) data_logger.record("System/RAM Usage (MB)", mem_usage, epoch) print(f"\nEpoch {epoch} Memory Usage: {mem_usage} MB\n") # terminate logging data_logger.close() def main(args): """Train a model on a task using a specified algorithm.""" # load config if args.task is not None: # obtain the configuration entry point cfg_entry_point_key = f"robomimic_{args.algo}_cfg_entry_point" print(f"Loading configuration for task: {args.task}") cfg_entry_point_file = gym.spec(args.task).kwargs.pop(cfg_entry_point_key) # check if entry point exists if cfg_entry_point_file is None: raise ValueError( f"Could not find configuration for the environment: '{args.task}'." f" Please check that the gym registry has the entry point: '{cfg_entry_point_key}'." ) # load config from json file with open(cfg_entry_point_file) as f: ext_cfg = json.load(f) config = config_factory(ext_cfg["algo_name"]) # update config with external json - this will throw errors if # the external config has keys not present in the base algo config with config.values_unlocked(): config.update(ext_cfg) else: raise ValueError("Please provide a task name through CLI arguments.") if args.dataset is not None: config.train.data = args.dataset if args.name is not None: config.experiment.name = args.name # change location of experiment directory config.train.output_dir = os.path.abspath(os.path.join("./logs/robomimic", args.task)) # get torch device device = TorchUtils.get_torch_device(try_to_use_cuda=config.train.cuda) config.lock() # catch error during training and print it res_str = "finished run successfully!" try: train(config, device=device) except Exception as e: res_str = f"run failed with error:\n{e}\n\n{traceback.format_exc()}" print(res_str) if __name__ == "__main__": parser = argparse.ArgumentParser() # Experiment Name (for tensorboard, saving models, etc.) parser.add_argument( "--name", type=str, default=None, help="(optional) if provided, override the experiment name defined in the config", ) # Dataset path, to override the one in the config parser.add_argument( "--dataset", type=str, default=None, help="(optional) if provided, override the dataset path defined in the config", ) parser.add_argument("--task", type=str, default=None, help="Name of the task.") parser.add_argument("--algo", type=str, default=None, help="Name of the algorithm.") args = parser.parse_args() # run training main(args) # close sim app simulation_app.close()
16,901
Python
38.957447
118
0.633809
NVIDIA-Omniverse/orbit/source/standalone/workflows/robomimic/tools/episode_merging.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """Tool to merge multiple episodes with single trajectory into one episode with multiple trajectories.""" from __future__ import annotations import argparse import h5py import json import os if __name__ == "__main__": # parse arguments parser = argparse.ArgumentParser(description="Merge multiple episodes with single trajectory into one episode.") parser.add_argument( "--dir", type=str, default=None, help="Path to directory that contains all single episode hdf5 files" ) parser.add_argument("--task", type=str, default=None, help="Name of the task.") parser.add_argument("--out", type=str, default="merged_dataset.hdf5", help="output hdf5 file") args_cli = parser.parse_args() # read arguments parent_dir = args_cli.dir merged_dataset_name = args_cli.out task_name = args_cli.task # check valid task name if task_name is None: raise ValueError("Please specify a valid task name.") # get hdf5 entries from specified directory entries = [i for i in os.listdir(parent_dir) if i.endswith(".hdf5")] # create new hdf5 file for merging episodes fp = h5py.File(parent_dir + merged_dataset_name, "a") # initiate data group f_grp = fp.create_group("data") f_grp.attrs["num_samples"] = 0 # merge all episodes for count, entry in enumerate(entries): fc = h5py.File(parent_dir + entry, "r") # find total number of samples in all demos f_grp.attrs["num_samples"] = f_grp.attrs["num_samples"] + fc["data"]["demo_0"].attrs["num_samples"] fc.copy("data/demo_0", fp["data"], "demo_" + str(count)) # This is needed to run env in robomimic fp["data"].attrs["env_args"] = json.dumps({"env_name": task_name, "type": 2, "env_kwargs": {}}) fp.close() print("merged")
1,934
Python
32.362068
116
0.661324
NVIDIA-Omniverse/orbit/source/standalone/workflows/robomimic/tools/inspect_demonstrations.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """Tool to check structure of hdf5 files.""" from __future__ import annotations import argparse import h5py def check_group(f, num: int): """Print the data from different keys in stored dictionary.""" # print name of the group first for subs in f: if isinstance(subs, str): print("\t" * num, subs, ":", type(f[subs])) check_group(f[subs], num + 1) # print attributes of the group print("\t" * num, "attributes", ":") for attr in f.attrs: print("\t" * (num + 1), attr, ":", type(f.attrs[attr]), ":", f.attrs[attr]) if __name__ == "__main__": # parse arguments parser = argparse.ArgumentParser(description="Check structure of hdf5 file.") parser.add_argument("file", type=str, default=None, help="The path to HDF5 file to analyze.") args_cli = parser.parse_args() # open specified file with h5py.File(args_cli.file, "r") as f: # print name of the file first print(f) # print contents of file check_group(f["data"], 1)
1,166
Python
28.923076
97
0.614923
NVIDIA-Omniverse/orbit/source/standalone/workflows/robomimic/tools/split_train_val.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause # MIT License # # Copyright (c) 2021 Stanford Vision and Learning Lab # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. # """ Script for splitting a dataset hdf5 file into training and validation trajectories. Args: dataset: path to hdf5 dataset filter_key: if provided, split the subset of trajectories in the file that correspond to this filter key into a training and validation set of trajectories, instead of splitting the full set of trajectories ratio: validation ratio, in (0, 1). Defaults to 0.1, which is 10%. Example usage: python split_train_val.py --dataset /path/to/demo.hdf5 --ratio 0.1 """ from __future__ import annotations import argparse import h5py import numpy as np from robomimic.utils.file_utils import create_hdf5_filter_key def split_train_val_from_hdf5(hdf5_path: str, val_ratio=0.1, filter_key=None): """ Splits data into training set and validation set from HDF5 file. Args: hdf5_path: path to the hdf5 file to load the transitions from val_ratio: ratio of validation demonstrations to all demonstrations filter_key: if provided, split the subset of demonstration keys stored under mask/@filter_key instead of the full set of demonstrations """ # retrieve number of demos f = h5py.File(hdf5_path, "r") if filter_key is not None: print(f"Using filter key: {filter_key}") demos = sorted(elem.decode("utf-8") for elem in np.array(f[f"mask/{filter_key}"])) else: demos = sorted(list(f["data"].keys())) num_demos = len(demos) f.close() # get random split num_demos = len(demos) num_val = int(val_ratio * num_demos) mask = np.zeros(num_demos) mask[:num_val] = 1.0 np.random.shuffle(mask) mask = mask.astype(int) train_inds = (1 - mask).nonzero()[0] valid_inds = mask.nonzero()[0] train_keys = [demos[i] for i in train_inds] valid_keys = [demos[i] for i in valid_inds] print(f"{num_val} validation demonstrations out of {num_demos} total demonstrations.") # pass mask to generate split name_1 = "train" name_2 = "valid" if filter_key is not None: name_1 = f"{filter_key}_{name_1}" name_2 = f"{filter_key}_{name_2}" train_lengths = create_hdf5_filter_key(hdf5_path=hdf5_path, demo_keys=train_keys, key_name=name_1) valid_lengths = create_hdf5_filter_key(hdf5_path=hdf5_path, demo_keys=valid_keys, key_name=name_2) print(f"Total number of train samples: {np.sum(train_lengths)}") print(f"Average number of train samples {np.mean(train_lengths)}") print(f"Total number of valid samples: {np.sum(valid_lengths)}") print(f"Average number of valid samples {np.mean(valid_lengths)}") if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("dataset", type=str, help="path to hdf5 dataset") parser.add_argument( "--filter_key", type=str, default=None, help=( "If provided, split the subset of trajectories in the file that correspond to this filter key" " into a training and validation set of trajectories, instead of splitting the full set of" " trajectories." ), ) parser.add_argument("--ratio", type=float, default=0.1, help="validation ratio, in (0, 1)") args = parser.parse_args() # seed to make sure results are consistent np.random.seed(0) split_train_val_from_hdf5(args.dataset, val_ratio=args.ratio, filter_key=args.filter_key)
4,685
Python
36.190476
106
0.690288
NVIDIA-Omniverse/orbit/source/standalone/workflows/sb3/play.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """Script to play a checkpoint if an RL agent from Stable-Baselines3.""" from __future__ import annotations """Launch Isaac Sim Simulator first.""" import argparse from omni.isaac.orbit.app import AppLauncher # add argparse arguments parser = argparse.ArgumentParser(description="Play a checkpoint of an RL agent from Stable-Baselines3.") parser.add_argument("--cpu", action="store_true", default=False, help="Use CPU pipeline.") parser.add_argument( "--disable_fabric", action="store_true", default=False, help="Disable fabric and use USD I/O operations." ) parser.add_argument("--num_envs", type=int, default=None, help="Number of environments to simulate.") parser.add_argument("--task", type=str, default=None, help="Name of the task.") parser.add_argument("--checkpoint", type=str, default=None, help="Path to model checkpoint.") parser.add_argument( "--use_last_checkpoint", action="store_true", help="When no checkpoint provided, use the last saved model. Otherwise use the best saved model.", ) # append AppLauncher cli args AppLauncher.add_app_launcher_args(parser) # parse the arguments args_cli = parser.parse_args() # launch omniverse app app_launcher = AppLauncher(args_cli) simulation_app = app_launcher.app """Rest everything follows.""" import gymnasium as gym import numpy as np import os import torch from stable_baselines3 import PPO from stable_baselines3.common.vec_env import VecNormalize import omni.isaac.orbit_tasks # noqa: F401 from omni.isaac.orbit_tasks.utils.parse_cfg import get_checkpoint_path, load_cfg_from_registry, parse_env_cfg from omni.isaac.orbit_tasks.utils.wrappers.sb3 import Sb3VecEnvWrapper, process_sb3_cfg def main(): """Play with stable-baselines agent.""" # parse configuration env_cfg = parse_env_cfg( args_cli.task, use_gpu=not args_cli.cpu, num_envs=args_cli.num_envs, use_fabric=not args_cli.disable_fabric ) agent_cfg = load_cfg_from_registry(args_cli.task, "sb3_cfg_entry_point") # post-process agent configuration agent_cfg = process_sb3_cfg(agent_cfg) # create isaac environment env = gym.make(args_cli.task, cfg=env_cfg) # wrap around environment for stable baselines env = Sb3VecEnvWrapper(env) # normalize environment (if needed) if "normalize_input" in agent_cfg: env = VecNormalize( env, training=True, norm_obs="normalize_input" in agent_cfg and agent_cfg.pop("normalize_input"), norm_reward="normalize_value" in agent_cfg and agent_cfg.pop("normalize_value"), clip_obs="clip_obs" in agent_cfg and agent_cfg.pop("clip_obs"), gamma=agent_cfg["gamma"], clip_reward=np.inf, ) # directory for logging into log_root_path = os.path.join("logs", "sb3", args_cli.task) log_root_path = os.path.abspath(log_root_path) # check checkpoint is valid if args_cli.checkpoint is None: if args_cli.use_last_checkpoint: checkpoint = "model_.*.zip" else: checkpoint = "model.zip" checkpoint_path = get_checkpoint_path(log_root_path, ".*", checkpoint) else: checkpoint_path = args_cli.checkpoint # create agent from stable baselines print(f"Loading checkpoint from: {checkpoint_path}") agent = PPO.load(checkpoint_path, env, print_system_info=True) # reset environment obs = env.reset() # simulate environment while simulation_app.is_running(): # run everything in inference mode with torch.inference_mode(): # agent stepping actions, _ = agent.predict(obs, deterministic=True) # env stepping obs, _, _, _ = env.step(actions) # close the simulator env.close() if __name__ == "__main__": # run the main function main() # close sim app simulation_app.close()
4,014
Python
33.025423
115
0.680867
NVIDIA-Omniverse/orbit/source/standalone/workflows/sb3/train.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """Script to train RL agent with Stable Baselines3. Since Stable-Baselines3 does not support buffers living on GPU directly, we recommend using smaller number of environments. Otherwise, there will be significant overhead in GPU->CPU transfer. """ from __future__ import annotations """Launch Isaac Sim Simulator first.""" import argparse from omni.isaac.orbit.app import AppLauncher # add argparse arguments parser = argparse.ArgumentParser(description="Train an RL agent with Stable-Baselines3.") parser.add_argument("--video", action="store_true", default=False, help="Record videos during training.") parser.add_argument("--video_length", type=int, default=200, help="Length of the recorded video (in steps).") parser.add_argument("--video_interval", type=int, default=2000, help="Interval between video recordings (in steps).") parser.add_argument("--cpu", action="store_true", default=False, help="Use CPU pipeline.") parser.add_argument( "--disable_fabric", action="store_true", default=False, help="Disable fabric and use USD I/O operations." ) parser.add_argument("--num_envs", type=int, default=None, help="Number of environments to simulate.") parser.add_argument("--task", type=str, default=None, help="Name of the task.") parser.add_argument("--seed", type=int, default=None, help="Seed used for the environment") # append AppLauncher cli args AppLauncher.add_app_launcher_args(parser) # parse the arguments args_cli = parser.parse_args() # launch omniverse app app_launcher = AppLauncher(args_cli) simulation_app = app_launcher.app """Rest everything follows.""" import gymnasium as gym import numpy as np import os from datetime import datetime from stable_baselines3 import PPO from stable_baselines3.common.callbacks import CheckpointCallback from stable_baselines3.common.logger import configure from stable_baselines3.common.vec_env import VecNormalize from omni.isaac.orbit.utils.dict import print_dict from omni.isaac.orbit.utils.io import dump_pickle, dump_yaml import omni.isaac.orbit_tasks # noqa: F401 from omni.isaac.orbit_tasks.utils import load_cfg_from_registry, parse_env_cfg from omni.isaac.orbit_tasks.utils.wrappers.sb3 import Sb3VecEnvWrapper, process_sb3_cfg def main(): """Train with stable-baselines agent.""" # parse configuration env_cfg = parse_env_cfg( args_cli.task, use_gpu=not args_cli.cpu, num_envs=args_cli.num_envs, use_fabric=not args_cli.disable_fabric ) agent_cfg = load_cfg_from_registry(args_cli.task, "sb3_cfg_entry_point") # override configuration with command line arguments if args_cli.seed is not None: agent_cfg["seed"] = args_cli.seed # directory for logging into log_dir = os.path.join("logs", "sb3", args_cli.task, datetime.now().strftime("%Y-%m-%d_%H-%M-%S")) # dump the configuration into log-directory dump_yaml(os.path.join(log_dir, "params", "env.yaml"), env_cfg) dump_yaml(os.path.join(log_dir, "params", "agent.yaml"), agent_cfg) dump_pickle(os.path.join(log_dir, "params", "env.pkl"), env_cfg) dump_pickle(os.path.join(log_dir, "params", "agent.pkl"), agent_cfg) # post-process agent configuration agent_cfg = process_sb3_cfg(agent_cfg) # read configurations about the agent-training policy_arch = agent_cfg.pop("policy") n_timesteps = agent_cfg.pop("n_timesteps") # create isaac environment env = gym.make(args_cli.task, cfg=env_cfg, render_mode="rgb_array" if args_cli.video else None) # wrap for video recording if args_cli.video: video_kwargs = { "video_folder": os.path.join(log_dir, "videos"), "step_trigger": lambda step: step % args_cli.video_interval == 0, "video_length": args_cli.video_length, "disable_logger": True, } print("[INFO] Recording videos during training.") print_dict(video_kwargs, nesting=4) env = gym.wrappers.RecordVideo(env, **video_kwargs) # wrap around environment for stable baselines env = Sb3VecEnvWrapper(env) # set the seed env.seed(seed=agent_cfg["seed"]) if "normalize_input" in agent_cfg: env = VecNormalize( env, training=True, norm_obs="normalize_input" in agent_cfg and agent_cfg.pop("normalize_input"), norm_reward="normalize_value" in agent_cfg and agent_cfg.pop("normalize_value"), clip_obs="clip_obs" in agent_cfg and agent_cfg.pop("clip_obs"), gamma=agent_cfg["gamma"], clip_reward=np.inf, ) # create agent from stable baselines agent = PPO(policy_arch, env, verbose=1, **agent_cfg) # configure the logger new_logger = configure(log_dir, ["stdout", "tensorboard"]) agent.set_logger(new_logger) # callbacks for agent checkpoint_callback = CheckpointCallback(save_freq=1000, save_path=log_dir, name_prefix="model", verbose=2) # train the agent agent.learn(total_timesteps=n_timesteps, callback=checkpoint_callback) # save the final model agent.save(os.path.join(log_dir, "model")) # close the simulator env.close() if __name__ == "__main__": # run the main function main() # close sim app simulation_app.close()
5,362
Python
37.307143
117
0.696382
NVIDIA-Omniverse/orbit/source/standalone/workflows/rl_games/play.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """Script to play a checkpoint if an RL agent from RL-Games.""" from __future__ import annotations """Launch Isaac Sim Simulator first.""" import argparse from omni.isaac.orbit.app import AppLauncher # add argparse arguments parser = argparse.ArgumentParser(description="Play a checkpoint of an RL agent from RL-Games.") parser.add_argument("--cpu", action="store_true", default=False, help="Use CPU pipeline.") parser.add_argument( "--disable_fabric", action="store_true", default=False, help="Disable fabric and use USD I/O operations." ) parser.add_argument("--num_envs", type=int, default=None, help="Number of environments to simulate.") parser.add_argument("--task", type=str, default=None, help="Name of the task.") parser.add_argument("--checkpoint", type=str, default=None, help="Path to model checkpoint.") parser.add_argument( "--use_last_checkpoint", action="store_true", help="When no checkpoint provided, use the last saved model. Otherwise use the best saved model.", ) # append AppLauncher cli args AppLauncher.add_app_launcher_args(parser) # parse the arguments args_cli = parser.parse_args() # launch omniverse app app_launcher = AppLauncher(args_cli) simulation_app = app_launcher.app """Rest everything follows.""" import gymnasium as gym import math import os import torch from rl_games.common import env_configurations, vecenv from rl_games.common.player import BasePlayer from rl_games.torch_runner import Runner from omni.isaac.orbit.utils.assets import retrieve_file_path import omni.isaac.orbit_tasks # noqa: F401 from omni.isaac.orbit_tasks.utils import get_checkpoint_path, load_cfg_from_registry, parse_env_cfg from omni.isaac.orbit_tasks.utils.wrappers.rl_games import RlGamesGpuEnv, RlGamesVecEnvWrapper def main(): """Play with RL-Games agent.""" # parse env configuration env_cfg = parse_env_cfg( args_cli.task, use_gpu=not args_cli.cpu, num_envs=args_cli.num_envs, use_fabric=not args_cli.disable_fabric ) agent_cfg = load_cfg_from_registry(args_cli.task, "rl_games_cfg_entry_point") # wrap around environment for rl-games rl_device = agent_cfg["params"]["config"]["device"] clip_obs = agent_cfg["params"]["env"].get("clip_observations", math.inf) clip_actions = agent_cfg["params"]["env"].get("clip_actions", math.inf) # create isaac environment env = gym.make(args_cli.task, cfg=env_cfg) # wrap around environment for rl-games env = RlGamesVecEnvWrapper(env, rl_device, clip_obs, clip_actions) # register the environment to rl-games registry # note: in agents configuration: environment name must be "rlgpu" vecenv.register( "IsaacRlgWrapper", lambda config_name, num_actors, **kwargs: RlGamesGpuEnv(config_name, num_actors, **kwargs) ) env_configurations.register("rlgpu", {"vecenv_type": "IsaacRlgWrapper", "env_creator": lambda **kwargs: env}) # specify directory for logging experiments log_root_path = os.path.join("logs", "rl_games", agent_cfg["params"]["config"]["name"]) log_root_path = os.path.abspath(log_root_path) print(f"[INFO] Loading experiment from directory: {log_root_path}") # find checkpoint if args_cli.checkpoint is None: # specify directory for logging runs run_dir = agent_cfg["params"]["config"].get("full_experiment_name", ".*") # specify name of checkpoint if args_cli.use_last_checkpoint: checkpoint_file = ".*" else: # this loads the best checkpoint checkpoint_file = f"{agent_cfg['params']['config']['name']}.pth" # get path to previous checkpoint resume_path = get_checkpoint_path(log_root_path, run_dir, checkpoint_file, other_dirs=["nn"]) else: resume_path = retrieve_file_path(args_cli.checkpoint) # load previously trained model agent_cfg["params"]["load_checkpoint"] = True agent_cfg["params"]["load_path"] = resume_path print(f"[INFO]: Loading model checkpoint from: {agent_cfg['params']['load_path']}") # set number of actors into agent config agent_cfg["params"]["config"]["num_actors"] = env.unwrapped.num_envs # create runner from rl-games runner = Runner() runner.load(agent_cfg) # obtain the agent from the runner agent: BasePlayer = runner.create_player() agent.restore(resume_path) agent.reset() # reset environment obs = env.reset() # required: enables the flag for batched observations _ = agent.get_batch_size(obs, 1) # simulate environment # note: We simplified the logic in rl-games player.py (:func:`BasePlayer.run()`) function in an # attempt to have complete control over environment stepping. However, this removes other # operations such as masking that is used for multi-agent learning by RL-Games. while simulation_app.is_running(): # run everything in inference mode with torch.inference_mode(): # convert obs to agent format obs = agent.obs_to_torch(obs) # agent stepping actions = agent.get_action(obs, is_deterministic=True) # env stepping obs, _, dones, _ = env.step(actions) # perform operations for terminated episodes if len(dones) > 0: # reset rnn state for terminated episodes if agent.is_rnn and agent.states is not None: for s in agent.states: s[:, dones, :] = 0.0 # close the simulator env.close() if __name__ == "__main__": # run the main function main() # close sim app simulation_app.close()
5,785
Python
37.573333
117
0.676059
NVIDIA-Omniverse/orbit/source/standalone/workflows/rl_games/train.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """Script to train RL agent with RL-Games.""" from __future__ import annotations """Launch Isaac Sim Simulator first.""" import argparse from omni.isaac.orbit.app import AppLauncher # add argparse arguments parser = argparse.ArgumentParser(description="Train an RL agent with RL-Games.") parser.add_argument("--video", action="store_true", default=False, help="Record videos during training.") parser.add_argument("--video_length", type=int, default=200, help="Length of the recorded video (in steps).") parser.add_argument("--video_interval", type=int, default=2000, help="Interval between video recordings (in steps).") parser.add_argument("--cpu", action="store_true", default=False, help="Use CPU pipeline.") parser.add_argument( "--disable_fabric", action="store_true", default=False, help="Disable fabric and use USD I/O operations." ) parser.add_argument("--num_envs", type=int, default=None, help="Number of environments to simulate.") parser.add_argument("--task", type=str, default=None, help="Name of the task.") parser.add_argument("--seed", type=int, default=None, help="Seed used for the environment") # append AppLauncher cli args AppLauncher.add_app_launcher_args(parser) # parse the arguments args_cli = parser.parse_args() # launch omniverse app app_launcher = AppLauncher(args_cli) simulation_app = app_launcher.app """Rest everything follows.""" import gymnasium as gym import math import os from datetime import datetime from rl_games.common import env_configurations, vecenv from rl_games.common.algo_observer import IsaacAlgoObserver from rl_games.torch_runner import Runner from omni.isaac.orbit.utils.dict import print_dict from omni.isaac.orbit.utils.io import dump_pickle, dump_yaml import omni.isaac.orbit_tasks # noqa: F401 from omni.isaac.orbit_tasks.utils import load_cfg_from_registry, parse_env_cfg from omni.isaac.orbit_tasks.utils.wrappers.rl_games import RlGamesGpuEnv, RlGamesVecEnvWrapper def main(): """Train with RL-Games agent.""" # parse seed from command line args_cli_seed = args_cli.seed # parse configuration env_cfg = parse_env_cfg( args_cli.task, use_gpu=not args_cli.cpu, num_envs=args_cli.num_envs, use_fabric=not args_cli.disable_fabric ) agent_cfg = load_cfg_from_registry(args_cli.task, "rl_games_cfg_entry_point") # override from command line if args_cli_seed is not None: agent_cfg["params"]["seed"] = args_cli_seed # specify directory for logging experiments log_root_path = os.path.join("logs", "rl_games", agent_cfg["params"]["config"]["name"]) log_root_path = os.path.abspath(log_root_path) print(f"[INFO] Logging experiment in directory: {log_root_path}") # specify directory for logging runs log_dir = agent_cfg["params"]["config"].get("full_experiment_name", datetime.now().strftime("%Y-%m-%d_%H-%M-%S")) # set directory into agent config # logging directory path: <train_dir>/<full_experiment_name> agent_cfg["params"]["config"]["train_dir"] = log_root_path agent_cfg["params"]["config"]["full_experiment_name"] = log_dir # dump the configuration into log-directory dump_yaml(os.path.join(log_root_path, log_dir, "params", "env.yaml"), env_cfg) dump_yaml(os.path.join(log_root_path, log_dir, "params", "agent.yaml"), agent_cfg) dump_pickle(os.path.join(log_root_path, log_dir, "params", "env.pkl"), env_cfg) dump_pickle(os.path.join(log_root_path, log_dir, "params", "agent.pkl"), agent_cfg) # read configurations about the agent-training rl_device = agent_cfg["params"]["config"]["device"] clip_obs = agent_cfg["params"]["env"].get("clip_observations", math.inf) clip_actions = agent_cfg["params"]["env"].get("clip_actions", math.inf) # create isaac environment env = gym.make(args_cli.task, cfg=env_cfg, render_mode="rgb_array" if args_cli.video else None) # wrap for video recording if args_cli.video: video_kwargs = { "video_folder": os.path.join(log_dir, "videos"), "step_trigger": lambda step: step % args_cli.video_interval == 0, "video_length": args_cli.video_length, "disable_logger": True, } print("[INFO] Recording videos during training.") print_dict(video_kwargs, nesting=4) env = gym.wrappers.RecordVideo(env, **video_kwargs) # wrap around environment for rl-games env = RlGamesVecEnvWrapper(env, rl_device, clip_obs, clip_actions) # register the environment to rl-games registry # note: in agents configuration: environment name must be "rlgpu" vecenv.register( "IsaacRlgWrapper", lambda config_name, num_actors, **kwargs: RlGamesGpuEnv(config_name, num_actors, **kwargs) ) env_configurations.register("rlgpu", {"vecenv_type": "IsaacRlgWrapper", "env_creator": lambda **kwargs: env}) # set number of actors into agent config agent_cfg["params"]["config"]["num_actors"] = env.unwrapped.num_envs # create runner from rl-games runner = Runner(IsaacAlgoObserver()) runner.load(agent_cfg) # set seed of the env env.seed(agent_cfg["params"]["seed"]) # reset the agent and env runner.reset() # train the agent runner.run({"train": True, "play": False, "sigma": None}) # close the simulator env.close() if __name__ == "__main__": # run the main function main() # close sim app simulation_app.close()
5,558
Python
39.576642
117
0.692155
NVIDIA-Omniverse/orbit/docker/docker-compose.yaml
# Here we set the parts that would # be re-used between services to an # extension field # https://docs.docker.com/compose/compose-file/compose-file-v3/#extension-fields x-default-orbit-volumes: &default-orbit-volumes # These volumes follow from this page # https://docs.omniverse.nvidia.com/app_isaacsim/app_isaacsim/install_faq.html#save-isaac-sim-configs-on-local-disk - type: volume source: isaac-cache-kit target: ${DOCKER_ISAACSIM_ROOT_PATH}/kit/cache - type: volume source: isaac-cache-ov target: ${DOCKER_USER_HOME}/.cache/ov - type: volume source: isaac-cache-pip target: ${DOCKER_USER_HOME}/.cache/pip - type: volume source: isaac-cache-gl target: ${DOCKER_USER_HOME}/.cache/nvidia/GLCache - type: volume source: isaac-cache-compute target: ${DOCKER_USER_HOME}/.nv/ComputeCache - type: volume source: isaac-logs target: ${DOCKER_USER_HOME}/.nvidia-omniverse/logs - type: volume source: isaac-carb-logs target: ${DOCKER_ISAACSIM_ROOT_PATH}/kit/logs/Kit/Isaac-Sim - type: volume source: isaac-data target: ${DOCKER_USER_HOME}/.local/share/ov/data - type: volume source: isaac-docs target: ${DOCKER_USER_HOME}/Documents # These volumes allow X11 Forwarding # We currently comment these out because they can # cause bugs and warnings for people uninterested in # X11 Forwarding from within the docker. We keep them # as comments as a convenience for those seeking X11 # forwarding until a scripted solution is developed # - type: bind # source: /tmp/.X11-unix # target: /tmp/.X11-unix # - type: bind # source: ${HOME}/.Xauthority # target: ${DOCKER_USER_HOME}/.Xauthority # This overlay allows changes on the local files to # be reflected within the container immediately - type: bind source: ../source target: /workspace/orbit/source - type: bind source: ../docs target: /workspace/orbit/docs # The effect of these volumes is twofold: # 1. Prevent root-owned files from flooding the _build and logs dir # on the host machine # 2. Preserve the artifacts in persistent volumes for later copying # to the host machine - type: volume source: orbit-docs target: /workspace/orbit/docs/_build - type: volume source: orbit-logs target: /workspace/orbit/logs - type: volume source: orbit-data target: /workspace/orbit/data_storage x-default-orbit-deploy: &default-orbit-deploy resources: reservations: devices: - driver: nvidia count: all capabilities: [ gpu ] services: # This service is the base Orbit image orbit-base: profiles: ["base"] env_file: .env.base build: context: ../ dockerfile: docker/Dockerfile.base args: - ISAACSIM_VERSION=${ISAACSIM_VERSION} - ISAACSIM_ROOT_PATH=${DOCKER_ISAACSIM_ROOT_PATH} - ORBIT_PATH=${DOCKER_ORBIT_PATH} - DOCKER_USER_HOME=${DOCKER_USER_HOME} image: orbit-base container_name: orbit-base environment: # We can't just define this in the .env file because shell envars take precedence # https://docs.docker.com/compose/environment-variables/envvars-precedence/ - ISAACSIM_PATH=${DOCKER_ORBIT_PATH}/_isaac_sim - ORBIT_PATH=${DOCKER_ORBIT_PATH} # This should also be enabled for X11 forwarding # - DISPLAY=${DISPLAY} volumes: *default-orbit-volumes network_mode: host deploy: *default-orbit-deploy # This is the entrypoint for the container entrypoint: bash stdin_open: true tty: true # This service adds a ROS2 Humble # installation on top of the base image orbit-ros2: profiles: ["ros2"] env_file: - .env.base - .env.ros2 build: context: ../ dockerfile: docker/Dockerfile.ros2 args: # ROS2_APT_PACKAGE will default to NONE. This is to # avoid a warning message when building only the base profile # with the .env.base file - ROS2_APT_PACKAGE=${ROS2_APT_PACKAGE:-NONE} - DOCKER_USER_HOME=${DOCKER_USER_HOME} image: orbit-ros2 container_name: orbit-ros2 environment: - ISAACSIM_PATH=${DOCKER_ORBIT_PATH}/_isaac_sim - ORBIT_PATH=${DOCKER_ORBIT_PATH} volumes: *default-orbit-volumes network_mode: host deploy: *default-orbit-deploy # This is the entrypoint for the container entrypoint: bash stdin_open: true tty: true volumes: # isaac-sim isaac-cache-kit: isaac-cache-ov: isaac-cache-pip: isaac-cache-gl: isaac-cache-compute: isaac-logs: isaac-carb-logs: isaac-data: isaac-docs: # orbit orbit-docs: orbit-logs: orbit-data:
4,881
YAML
30.701299
119
0.646589
NVIDIA-Omniverse/orbit/docs/README.md
# Building Documentation We use [Sphinx](https://www.sphinx-doc.org/en/master/) with the [Book Theme](https://sphinx-book-theme.readthedocs.io/en/stable/) for maintaining the documentation. > **Note:** To build the documentation, we recommend creating a virtual environment to avoid any conflicts with system installed dependencies. Execute the following instructions to build the documentation (assumed from the top of the repository): 1. Install the dependencies for [Sphinx](https://www.sphinx-doc.org/en/master/): ```bash # enter the location where this readme exists cd docs # install dependencies pip install -r requirements.txt ``` 2. Generate the documentation file via: ```bash # make the html version make html ``` 3. The documentation is now available at `docs/_build/html/index.html`: ```bash # open on default browser xdg-open _build/html/index.html ```
932
Markdown
29.096773
164
0.714592
NVIDIA-Omniverse/orbit/docs/index.rst
Overview ======== **Orbit** is a unified and modular framework for robot learning that aims to simplify common workflows in robotics research (such as RL, learning from demonstrations, and motion planning). It is built upon `NVIDIA Isaac Sim`_ to leverage the latest simulation capabilities for photo-realistic scenes, and fast and efficient simulation. The core objectives of the framework are: - **Modularity**: Easily customize and add new environments, robots, and sensors. - **Agility**: Adapt to the changing needs of the community. - **Openness**: Remain open-sourced to allow the community to contribute and extend the framework. - **Battery-included**: Include a number of environments, sensors, and tasks that are ready to use. For more information about the framework, please refer to the `paper <https://arxiv.org/abs/2301.04195>`_ :cite:`mittal2023orbit`. For clarifications on NVIDIA Isaac ecosystem, please check out the :doc:`/source/setup/faq` section. .. figure:: source/_static/tasks.jpg :width: 100% :alt: Example tasks created using orbit Citing ====== If you use Orbit in your research, please use the following BibTeX entry: .. code:: bibtex @article{mittal2023orbit, author={Mittal, Mayank and Yu, Calvin and Yu, Qinxi and Liu, Jingzhou and Rudin, Nikita and Hoeller, David and Yuan, Jia Lin and Singh, Ritvik and Guo, Yunrong and Mazhar, Hammad and Mandlekar, Ajay and Babich, Buck and State, Gavriel and Hutter, Marco and Garg, Animesh}, journal={IEEE Robotics and Automation Letters}, title={Orbit: A Unified Simulation Framework for Interactive Robot Learning Environments}, year={2023}, volume={8}, number={6}, pages={3740-3747}, doi={10.1109/LRA.2023.3270034} } License ======= NVIDIA Isaac Sim is provided under the NVIDIA End User License Agreement. However, the Orbit framework is open-sourced under the BSD-3-Clause license. Please refer to :ref:`license` for more details. Table of Contents ================= .. toctree:: :maxdepth: 2 :caption: Getting Started source/setup/installation source/setup/developer source/setup/sample source/setup/template source/setup/faq .. toctree:: :maxdepth: 2 :caption: Features source/features/environments source/features/actuators .. source/features/motion_generators .. toctree:: :maxdepth: 1 :caption: Resources :titlesonly: source/tutorials/index source/how-to/index source/deployment/index .. toctree:: :maxdepth: 1 :caption: Source API source/api/index .. toctree:: :maxdepth: 1 :caption: References source/refs/migration source/refs/contributing source/refs/troubleshooting source/refs/issues source/refs/changelog source/refs/license source/refs/bibliography .. toctree:: :hidden: :caption: Project Links GitHub <https://github.com/NVIDIA-Omniverse/orbit> Indices and tables ================== * :ref:`genindex` * :ref:`modindex` * :ref:`search` .. _NVIDIA Isaac Sim: https://docs.omniverse.nvidia.com/app_isaacsim/app_isaacsim/overview.html
3,113
reStructuredText
26.803571
278
0.716672
NVIDIA-Omniverse/orbit/docs/conf.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause # Configuration file for the Sphinx documentation builder. # # This file only contains a selection of the most common options. For a full # list see the documentation: # https://www.sphinx-doc.org/en/master/usage/configuration.html # -- Path setup -------------------------------------------------------------- # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # import os import sys sys.path.insert(0, os.path.abspath("../source/extensions/omni.isaac.orbit")) sys.path.insert(0, os.path.abspath("../source/extensions/omni.isaac.orbit/omni/isaac/orbit")) sys.path.insert(0, os.path.abspath("../source/extensions/omni.isaac.orbit_tasks")) sys.path.insert(0, os.path.abspath("../source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks")) # -- Project information ----------------------------------------------------- project = "orbit" copyright = "2022-2024, The ORBIT Project Developers." author = "The ORBIT Project Developers." version = "0.2.0" # -- General configuration --------------------------------------------------- # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ "autodocsumm", "myst_parser", "sphinx.ext.napoleon", "sphinxemoji.sphinxemoji", "sphinx.ext.autodoc", "sphinx.ext.autosummary", "sphinx.ext.githubpages", "sphinx.ext.intersphinx", "sphinx.ext.mathjax", "sphinx.ext.todo", "sphinx.ext.viewcode", "sphinxcontrib.bibtex", "sphinx_copybutton", "sphinx_design", ] # mathjax hacks mathjax3_config = { "tex": { "inlineMath": [["\\(", "\\)"]], "displayMath": [["\\[", "\\]"]], }, } # panels hacks panels_add_bootstrap_css = False panels_add_fontawesome_css = True # supported file extensions for source files source_suffix = { ".rst": "restructuredtext", ".md": "markdown", } # make sure we don't have any unknown references # TODO: Enable this by default once we have fixed all the warnings # nitpicky = True # put type hints inside the signature instead of the description (easier to maintain) autodoc_typehints = "signature" # autodoc_typehints_format = "fully-qualified" # document class *and* __init__ methods autoclass_content = "class" # # separate class docstring from __init__ docstring autodoc_class_signature = "separated" # sort members by source order autodoc_member_order = "bysource" # inherit docstrings from base classes autodoc_inherit_docstrings = True # BibTeX configuration bibtex_bibfiles = ["source/_static/refs.bib"] # generate autosummary even if no references autosummary_generate = True autosummary_generate_overwrite = False # default autodoc settings autodoc_default_options = { "autosummary": True, } # generate links to the documentation of objects in external projects intersphinx_mapping = { "python": ("https://docs.python.org/3", None), "numpy": ("https://numpy.org/doc/stable/", None), "torch": ("https://pytorch.org/docs/stable/", None), "isaac": ("https://docs.omniverse.nvidia.com/py/isaacsim", None), "gymnasium": ("https://gymnasium.farama.org/", None), "warp": ("https://nvidia.github.io/warp/", None), } # Add any paths that contain templates here, relative to this directory. templates_path = [] # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This pattern also affects html_static_path and html_extra_path. exclude_patterns = ["_build", "Thumbs.db", ".DS_Store", "README.md", "licenses/*"] # Mock out modules that are not available on RTD autodoc_mock_imports = [ "torch", "numpy", "matplotlib", "scipy", "carb", "warp", "pxr", "omni.kit", "omni.usd", "omni.client", "omni.physx", "omni.physics", "pxr.PhysxSchema", "pxr.PhysicsSchemaTools", "omni.replicator", "omni.isaac.core", "omni.isaac.kit", "omni.isaac.cloner", "omni.isaac.urdf", "omni.isaac.version", "omni.isaac.motion_generation", "omni.isaac.ui", "omni.syntheticdata", "omni.timeline", "omni.ui", "gym", "skrl", "stable_baselines3", "rsl_rl", "rl_games", "ray", "h5py", "hid", "prettytable", "tqdm", "tensordict", "trimesh", "toml", ] # List of zero or more Sphinx-specific warning categories to be squelched (i.e., # suppressed, ignored). suppress_warnings = [ # FIXME: *THIS IS TERRIBLE.* Generally speaking, we do want Sphinx to inform # us about cross-referencing failures. Remove this hack entirely after Sphinx # resolves this open issue: # https://github.com/sphinx-doc/sphinx/issues/4961 # Squelch mostly ignorable warnings resembling: # WARNING: more than one target found for cross-reference 'TypeHint': # beartype.door._doorcls.TypeHint, beartype.door.TypeHint # # Sphinx currently emits *MANY* of these warnings against our # documentation. All of these warnings appear to be ignorable. Although we # could explicitly squelch *SOME* of these warnings by canonicalizing # relative to absolute references in docstrings, Sphinx emits still others # of these warnings when parsing PEP-compliant type hints via static # analysis. Since those hints are actual hints that *CANNOT* by definition # by canonicalized, our only recourse is to squelch warnings altogether. "ref.python", ] # -- Internationalization ---------------------------------------------------- # specifying the natural language populates some key tags language = "en" # -- Options for HTML output ------------------------------------------------- import sphinx_book_theme html_title = "orbit documentation" html_theme_path = [sphinx_book_theme.get_html_theme_path()] html_theme = "sphinx_book_theme" html_favicon = "source/_static/favicon.ico" html_show_copyright = True html_show_sphinx = False html_last_updated_fmt = "" # to reveal the build date in the pages meta # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ["source/_static/css"] html_css_files = ["custom.css"] html_theme_options = { "collapse_navigation": True, "repository_url": "https://github.com/NVIDIA-Omniverse/Orbit", "announcement": "We have now released v0.2.0! Please use the latest version for the best experience.", "use_repository_button": True, "use_issues_button": True, "use_edit_page_button": True, "show_toc_level": 1, "use_sidenotes": True, "logo": { "text": "orbit documentation", "image_light": "source/_static/NVIDIA-logo-white.png", "image_dark": "source/_static/NVIDIA-logo-black.png", }, "icon_links": [ { "name": "GitHub", "url": "https://github.com/NVIDIA-Omniverse/Orbit", "icon": "fa-brands fa-square-github", "type": "fontawesome", }, { "name": "Isaac Sim", "url": "https://developer.nvidia.com/isaac-sim", "icon": "https://img.shields.io/badge/IsaacSim-2023.1.1-silver.svg", "type": "url", }, { "name": "Stars", "url": "https://img.shields.io/github/stars/NVIDIA-Omniverse/Orbit?color=fedcba", "icon": "https://img.shields.io/github/stars/NVIDIA-Omniverse/Orbit?color=fedcba", "type": "url", }, ], "icon_links_label": "Quick Links", } html_sidebars = {"**": ["navbar-logo.html", "icon-links.html", "search-field.html", "sbt-sidebar-nav.html"]} # -- Advanced configuration ------------------------------------------------- def skip_member(app, what, name, obj, skip, options): # List the names of the functions you want to skip here exclusions = ["from_dict", "to_dict", "replace", "copy", "__post_init__"] if name in exclusions: return True return None def setup(app): app.connect("autodoc-skip-member", skip_member)
8,492
Python
32.175781
108
0.644842
NVIDIA-Omniverse/orbit/docs/source/how-to/save_camera_output.rst
.. _how-to-save-images-and-3d-reprojection: Saving rendered images and 3D re-projection =========================================== .. currentmodule:: omni.isaac.orbit This guide accompanied with the ``run_usd_camera.py`` script in the ``orbit/source/standalone/tutorials/04_sensors`` directory. .. dropdown:: Code for run_usd_camera.py :icon: code .. literalinclude:: ../../../source/standalone/tutorials/04_sensors/run_usd_camera.py :language: python :emphasize-lines: 171-179, 229-247, 251-264 :linenos: Saving using Replicator Basic Writer ------------------------------------ To save camera outputs, we use the basic write class from Omniverse Replicator. This class allows us to save the images in a numpy format. For more information on the basic writer, please check the `documentation <https://docs.omniverse.nvidia.com/extensions/latest/ext_replicator/writer_examples.html>`_. .. literalinclude:: ../../../source/standalone/tutorials/04_sensors/run_usd_camera.py :language: python :start-at: rep_writer = rep.BasicWriter( :end-before: # Camera positions, targets, orientations While stepping the simulator, the images can be saved to the defined folder. Since the BasicWriter only supports saving data using NumPy format, we first need to convert the PyTorch sensors to NumPy arrays before packing them in a dictionary. .. literalinclude:: ../../../source/standalone/tutorials/04_sensors/run_usd_camera.py :language: python :start-at: # Save images from camera at camera_index :end-at: single_cam_info = camera.data.info[camera_index] After this step, we can save the images using the BasicWriter. .. literalinclude:: ../../../source/standalone/tutorials/04_sensors/run_usd_camera.py :language: python :start-at: # Pack data back into replicator format to save them using its writer :end-at: rep_writer.write(rep_output) Projection into 3D Space ------------------------ We include utilities to project the depth image into 3D Space. The re-projection operations are done using PyTorch operations which allows faster computation. .. code-block:: python from omni.isaac.orbit.utils.math import transform_points, unproject_depth # Pointcloud in world frame points_3d_cam = unproject_depth( camera.data.output["distance_to_image_plane"], camera.data.intrinsic_matrices ) points_3d_world = transform_points(points_3d_cam, camera.data.pos_w, camera.data.quat_w_ros) Alternately, we can use the :meth:`omni.isaac.orbit.sensors.camera.utils.create_pointcloud_from_depth` function to create a point cloud from the depth image and transform it to the world frame. .. literalinclude:: ../../../source/standalone/tutorials/04_sensors/run_usd_camera.py :language: python :start-at: # Derive pointcloud from camera at camera_index :end-before: # In the first few steps, things are still being instanced and Camera.data The resulting point cloud can be visualized using the :mod:`omni.isaac.debug_draw` extension from Isaac Sim. This makes it easy to visualize the point cloud in the 3D space. .. literalinclude:: ../../../source/standalone/tutorials/04_sensors/run_usd_camera.py :language: python :start-at: # In the first few steps, things are still being instanced and Camera.data :end-at: pc_markers.visualize(translations=pointcloud) Executing the script -------------------- To run the accompanying script, execute the following command: .. code-block:: bash # Usage with saving and drawing ./orbit.sh -p source/standalone/tutorials/04_sensors/run_usd_camera.py --save --draw # Usage with saving only in headless mode ./orbit.sh -p source/standalone/tutorials/04_sensors/run_usd_camera.py --save --headless --offscreen_render The simulation should start, and you can observe different objects falling down. An output folder will be created in the ``orbit/source/standalone/tutorials/04_sensors`` directory, where the images will be saved. Additionally, you should see the point cloud in the 3D space drawn on the viewport. To stop the simulation, close the window, press the ``STOP`` button in the UI, or use ``Ctrl+C`` in the terminal.
4,175
reStructuredText
39.543689
116
0.728383
NVIDIA-Omniverse/orbit/docs/source/how-to/wrap_rl_env.rst
.. _how-to-env-wrappers: Wrapping environments ===================== .. currentmodule:: omni.isaac.orbit Environment wrappers are a way to modify the behavior of an environment without modifying the environment itself. This can be used to apply functions to modify observations or rewards, record videos, enforce time limits, etc. A detailed description of the API is available in the :class:`gymnasium.Wrapper` class. At present, all RL environments inheriting from the :class:`~envs.RLTaskEnv` class are compatible with :class:`gymnasium.Wrapper`, since the base class implements the :class:`gymnasium.Env` interface. In order to wrap an environment, you need to first initialize the base environment. After that, you can wrap it with as many wrappers as you want by calling ``env = wrapper(env, *args, **kwargs)`` repeatedly. For example, here is how you would wrap an environment to enforce that reset is called before step or render: .. code-block:: python """Launch Isaac Sim Simulator first.""" from omni.isaac.orbit.app import AppLauncher # launch omniverse app in headless mode app_launcher = AppLauncher(headless=True) simulation_app = app_launcher.app """Rest everything follows.""" import gymnasium as gym import omni.isaac.orbit_tasks # noqa: F401 from omni.isaac.orbit_tasks.utils import load_cfg_from_registry # create base environment cfg = load_cfg_from_registry("Isaac-Reach-Franka-v0", "env_cfg_entry_point") env = gym.make("Isaac-Reach-Franka-v0", cfg=cfg) # wrap environment to enforce that reset is called before step env = gym.wrappers.OrderEnforcing(env) Wrapper for recording videos ---------------------------- The :class:`gymnasium.wrappers.RecordVideo` wrapper can be used to record videos of the environment. The wrapper takes a ``video_dir`` argument, which specifies where to save the videos. The videos are saved in `mp4 <https://en.wikipedia.org/wiki/MP4_file_format>`__ format at specified intervals for specified number of environment steps or episodes. To use the wrapper, you need to first install ``ffmpeg``. On Ubuntu, you can install it by running: .. code-block:: bash sudo apt-get install ffmpeg .. attention:: By default, when running an environment in headless mode, the Omniverse viewport is disabled. This is done to improve performance by avoiding unnecessary rendering. We notice the following performance in different rendering modes with the ``Isaac-Reach-Franka-v0`` environment using an RTX 3090 GPU: * No GUI execution without off-screen rendering enabled: ~65,000 FPS * No GUI execution with off-screen enabled: ~57,000 FPS * GUI execution with full rendering: ~13,000 FPS The viewport camera used for rendering is the default camera in the scene called ``"/OmniverseKit_Persp"``. The camera's pose and image resolution can be configured through the :class:`~envs.ViewerCfg` class. .. dropdown:: Default parameters of the ViewerCfg class: :icon: code .. literalinclude:: ../../../source/extensions/omni.isaac.orbit/omni/isaac/orbit/envs/base_env_cfg.py :language: python :pyobject: ViewerCfg After adjusting the parameters, you can record videos by wrapping the environment with the :class:`gymnasium.wrappers.RecordVideo` wrapper and enabling the off-screen rendering flag. Additionally, you need to specify the render mode of the environment as ``"rgb_array"``. As an example, the following code records a video of the ``Isaac-Reach-Franka-v0`` environment for 200 steps, and saves it in the ``videos`` folder at a step interval of 1500 steps. .. code:: python """Launch Isaac Sim Simulator first.""" from omni.isaac.orbit.app import AppLauncher # launch omniverse app in headless mode with off-screen rendering app_launcher = AppLauncher(headless=True, offscreen_render=True) simulation_app = app_launcher.app """Rest everything follows.""" import gymnasium as gym # adjust camera resolution and pose env_cfg.viewer.resolution = (640, 480) env_cfg.viewer.eye = (1.0, 1.0, 1.0) env_cfg.viewer.lookat = (0.0, 0.0, 0.0) # create isaac-env instance # set render mode to rgb_array to obtain images on render calls env = gym.make(task_name, cfg=env_cfg, render_mode="rgb_array") # wrap for video recording video_kwargs = { "video_folder": "videos", "step_trigger": lambda step: step % 1500 == 0, "video_length": 200, } env = gym.wrappers.RecordVideo(env, **video_kwargs) Wrapper for learning frameworks ------------------------------- Every learning framework has its own API for interacting with environments. For example, the `Stable-Baselines3`_ library uses the `gym.Env <https://gymnasium.farama.org/api/env/>`_ interface to interact with environments. However, libraries like `RL-Games`_ or `RSL-RL`_ use their own API for interfacing with a learning environments. Since there is no one-size-fits-all solution, we do not base the :class:`~envs.RLTaskEnv` class on any particular learning framework's environment definition. Instead, we implement wrappers to make it compatible with the learning framework's environment definition. As an example of how to use the RL task environment with Stable-Baselines3: .. code:: python from omni.isaac.orbit_tasks.utils.wrappers.sb3 import Sb3VecEnvWrapper # create isaac-env instance env = gym.make(task_name, cfg=env_cfg) # wrap around environment for stable baselines env = Sb3VecEnvWrapper(env) .. caution:: Wrapping the environment with the respective learning framework's wrapper should happen in the end, i.e. after all other wrappers have been applied. This is because the learning framework's wrapper modifies the interpretation of environment's APIs which may no longer be compatible with :class:`gymnasium.Env`. Adding new wrappers ------------------- All new wrappers should be added to the :mod:`omni.isaac.orbit_tasks.utils.wrappers` module. They should check that the underlying environment is an instance of :class:`omni.isaac.orbit.envs.RLTaskEnv` before applying the wrapper. This can be done by using the :func:`unwrapped` property. We include a set of wrappers in this module that can be used as a reference to implement your own wrappers. If you implement a new wrapper, please consider contributing it to the framework by opening a pull request. .. _Stable-Baselines3: https://stable-baselines3.readthedocs.io/en/master/ .. _RL-Games: https://github.com/Denys88/rl_games .. _RSL-RL: https://github.com/leggedrobotics/rsl_rl
6,619
reStructuredText
38.879518
117
0.735005
NVIDIA-Omniverse/orbit/docs/source/how-to/draw_markers.rst
Creating Visualization Markers ============================== .. currentmodule:: omni.isaac.orbit Visualization markers are useful to debug the state of the environment. They can be used to visualize the frames, commands, and other information in the simulation. While Isaac Sim provides its own :mod:`omni.isaac.debug_draw` extension, it is limited to rendering only points, lines and splines. For cases, where you need to render more complex shapes, you can use the :class:`markers.VisualizationMarkers` class. This guide is accompanied by a sample script ``markers.py`` in the ``orbit/source/standalone/demos`` directory. .. dropdown:: Code for markers.py :icon: code .. literalinclude:: ../../../source/standalone/demos/markers.py :language: python :emphasize-lines: 49-97, 112-113, 142-148 :linenos: Configuring the markers ----------------------- The :class:`~markers.VisualizationMarkersCfg` class provides a simple interface to configure different types of markers. It takes in the following parameters: - :attr:`~markers.VisualizationMarkersCfg.prim_path`: The corresponding prim path for the marker class. - :attr:`~markers.VisualizationMarkersCfg.markers`: A dictionary specifying the different marker prototypes handled by the class. The key is the name of the marker prototype and the value is its spawn configuration. .. note:: In case the marker prototype specifies a configuration with physics properties, these are removed. This is because the markers are not meant to be simulated. Here we show all the different types of markers that can be configured. These range from simple shapes like cones and spheres to more complex geometries like a frame or arrows. The marker prototypes can also be configured from USD files. .. literalinclude:: ../../../source/standalone/demos/markers.py :language: python :lines: 49-97 :dedent: Drawing the markers ------------------- To draw the markers, we call the :class:`~markers.VisualizationMarkers.visualize` method. This method takes in as arguments the pose of the markers and the corresponding marker prototypes to draw. .. literalinclude:: ../../../source/standalone/demos/markers.py :language: python :lines: 142-148 :dedent: Executing the Script -------------------- To run the accompanying script, execute the following command: .. code-block:: bash ./orbit.sh -p source/standalone/demos/markers.py The simulation should start, and you can observe the different types of markers arranged in a grid pattern. The markers will rotating around their respective axes. Additionally every few rotations, they will roll forward on the grid. To stop the simulation, close the window, or use ``Ctrl+C`` in the terminal.
2,751
reStructuredText
35.210526
111
0.739731
NVIDIA-Omniverse/orbit/docs/source/how-to/import_new_asset.rst
Importing a New Asset ===================== .. currentmodule:: omni.isaac.orbit NVIDIA Omniverse relies on the Universal Scene Description (USD) file format to import and export assets. USD is an open source file format developed by Pixar Animation Studios. It is a scene description format optimized for large-scale, complex data sets. While this format is widely used in the film and animation industry, it is less common in the robotics community. To this end, NVIDIA has developed various importers that allow you to import assets from other file formats into USD. These importers are available as extensions to Omniverse Kit: * **URDF Importer** - Import assets from URDF files. * **MJCF Importer** - Import assets from MJCF files. * **Asset Importer** - Import assets from various file formats, including OBJ, FBX, STL, and glTF. The recommended workflow from NVIDIA is to use the above importers to convert the asset into its USD representation. Once the asset is in USD format, you can use the Omniverse Kit to edit the asset and export it to other file formats. An important note to use assets for large-scale simulation is to ensure that they are in `instanceable`_ format. This allows the asset to be efficiently loaded into memory and used multiple times in a scene. Otherwise, the asset will be loaded into memory multiple times, which can cause performance issues. For more details on instanceable assets, please check the Isaac Sim `documentation`_. Using URDF Importer ------------------- Isaac Sim includes the URDF and MJCF importers by default. These importers support the option to import assets as instanceable assets. By selecting this option, the importer will create two USD files: one for all the mesh data and one for all the non-mesh data (e.g. joints, rigid bodies, etc.). The prims in the mesh data file are referenced in the non-mesh data file. This allows the mesh data (which is often bulky) to be loaded into memory only once and used multiple times in a scene. For using these importers from the GUI, please check the documentation for `MJCF importer`_ and `URDF importer`_ respectively. For using the URDF importers from Python scripts, we include a utility tool called ``convert_urdf.py``. Internally, this script creates an instance of :class:`~sim.converters.UrdfConverterCfg` which is then passed to the :class:`~sim.converters.UrdfConverter` class. The configuration class specifies the default values for the importer. The important settings are: * :attr:`~sim.converters.UrdfConverterCfg.fix_base` - Whether to fix the base of the robot. This depends on whether you have a floating-base or fixed-base robot. * :attr:`~sim.converters.UrdfConverterCfg.make_instanceable` - Whether to create instanceable assets. Usually, this should be set to ``True``. * :attr:`~sim.converters.UrdfConverterCfg.merge_fixed_joints` - Whether to merge the fixed joints. Usually, this should be set to ``True`` to reduce the asset complexity. * :attr:`~sim.converters.UrdfConverterCfg.default_drive_type` - The drive-type for the joints. We recommend this to always be ``"none"``. This allows changing the drive configuration using the actuator models. * :attr:`~sim.converters.UrdfConverterCfg.default_drive_stiffness` - The drive stiffness for the joints. We recommend this to always be ``0.0``. This allows changing the drive configuration using the actuator models. * :attr:`~sim.converters.UrdfConverterCfg.default_drive_damping` - The drive damping for the joints. Similar to the stiffness, we recommend this to always be ``0.0``. Example Usage ~~~~~~~~~~~~~ In this example, we use the pre-processed URDF file of the ANYmal-D robot. To check the pre-process URDF, please check the file the `anymal.urdf`_. The main difference between the pre-processed URDF and the original URDF are: * We removed the ``<gazebo>`` tag from the URDF. This tag is not supported by the URDF importer. * We removed the ``<transmission>`` tag from the URDF. This tag is not supported by the URDF importer. * We removed various collision bodies from the URDF to reduce the complexity of the asset. * We changed all the joint's damping and friction parameters to ``0.0``. This ensures that we can perform effort-control on the joints without PhysX adding additional damping. * We added the ``<dont_collapse>`` tag to fixed joints. This ensures that the importer does not merge these fixed joints. The following shows the steps to clone the repository and run the converter: .. code-block:: bash # create a directory to clone mkdir ~/git && cd ~/git # clone a repository with URDF files git clone [email protected]:isaac-orbit/anymal_d_simple_description.git # go to top of the repository cd /path/to/orbit # run the converter ./orbit.sh -p source/standalone/tools/convert_urdf.py \ ~/git/anymal_d_simple_description/urdf/anymal.urdf \ source/extensions/omni.isaac.orbit_assets/data/Robots/ANYbotics/anymal_d.usd \ --merge-joints \ --make-instanceable Executing the above script will create two USD files inside the ``source/extensions/omni.isaac.orbit_assets/data/Robots/ANYbotics/`` directory: * ``anymal_d.usd`` - This is the main asset file. It contains all the non-mesh data. * ``Props/instanceable_assets.usd`` - This is the mesh data file. .. note:: Since Isaac Sim 2023.1.1, the URDF importer behavior has changed and it stores the mesh data inside the main asset file even if the ``--make-instanceable`` flag is set. This means that the ``Props/instanceable_assets.usd`` file is created but not used anymore. You can press play on the opened window to see the asset in the scene. The asset should "collapse" if everything is working correctly. If it blows up, then it might be that you have self-collisions present in the URDF. To run the script headless, you can add the ``--headless`` flag. This will not open the GUI and exit the script after the conversion is complete. Using Mesh Importer ------------------- Omniverse Kit includes the mesh converter tool that uses the ASSIMP library to import assets from various mesh formats (e.g. OBJ, FBX, STL, glTF, etc.). The asset converter tool is available as an extension to Omniverse Kit. Please check the `asset converter`_ documentation for more details. However, unlike Isaac Sim's URDF and MJCF importers, the asset converter tool does not support creating instanceable assets. This means that the asset will be loaded into memory multiple times if it is used multiple times in a scene. Thus, we include a utility tool called ``convert_mesh.py`` that uses the asset converter tool to import the asset and then converts it into an instanceable asset. Internally, this script creates an instance of :class:`~sim.converters.MeshConverterCfg` which is then passed to the :class:`~sim.converters.MeshConverter` class. Since the mesh file does not contain any physics information, the configuration class accepts different physics properties (such as mass, collision shape, etc.) as input. Please check the documentation for :class:`~sim.converters.MeshConverterCfg` for more details. Example Usage ~~~~~~~~~~~~~ We use an OBJ file of a cube to demonstrate the usage of the mesh converter. The following shows the steps to clone the repository and run the converter: .. code-block:: bash # create a directory to clone mkdir ~/git && cd ~/git # clone a repository with URDF files git clone [email protected]:NVIDIA-Omniverse/IsaacGymEnvs.git # go to top of the repository cd /path/to/orbit # run the converter ./orbit.sh -p source/standalone/tools/convert_mesh.py \ ~/git/IsaacGymEnvs/assets/trifinger/objects/meshes/cube_multicolor.obj \ source/extensions/omni.isaac.orbit_assets/data/Props/CubeMultiColor/cube_multicolor.usd \ --make-instanceable \ --collision-approximation convexDecomposition \ --mass 1.0 Similar to the URDF converter, executing the above script will create two USD files inside the ``source/extensions/omni.isaac.orbit_assets/data/Props/CubeMultiColor/`` directory. Additionally, if you press play on the opened window, you should see the asset fall down under the influence of gravity. * If you do not set the ``--mass`` flag, then no rigid body properties will be added to the asset. It will be imported as a static asset. * If you also do not set the ``--collision-approximation`` flag, then the asset will not have any collider properties as well and will be imported as a visual asset. .. _instanceable: https://openusd.org/dev/api/_usd__page__scenegraph_instancing.html .. _documentation: https://docs.omniverse.nvidia.com/isaacsim/latest/isaac_gym_tutorials/tutorial_gym_instanceable_assets.html .. _MJCF importer: https://docs.omniverse.nvidia.com/isaacsim/latest/advanced_tutorials/tutorial_advanced_import_mjcf.html .. _URDF importer: https://docs.omniverse.nvidia.com/isaacsim/latest/advanced_tutorials/tutorial_advanced_import_urdf.html .. _anymal.urdf: https://github.com/isaac-orbit/anymal_d_simple_description/blob/master/urdf/anymal.urdf .. _asset converter: https://docs.omniverse.nvidia.com/extensions/latest/ext_asset-converter.html
9,167
reStructuredText
50.79661
126
0.765245
NVIDIA-Omniverse/orbit/docs/source/how-to/master_omniverse.rst
Mastering Omniverse for Robotics ================================ NVIDIA Omniverse offers a large suite of tools for 3D content workflows. There are three main components (relevant to robotics) in Omniverse: - **USD Composer**: This is based on a novel file format (Universal Scene Description) from the animation (originally Pixar) community that is used in Omniverse - **PhysX SDK**: This is the main physics engine behind Omniverse that leverages GPU-based parallelization for massive scenes - **RTX-enabled Renderer**: This uses ray-tracing kernels in NVIDIA RTX GPUs for real-time physically-based rendering Of these, the first two require a deeper understanding to start working with Omniverse and its constituent applications (Isaac Sim and Orbit). The main things to learn: - How to use the Composer GUI efficiently? - What are USD prims and schemas? - How do you compose a USD scene? - What is the difference between references and payloads in USD? - What is meant by scene-graph instancing? - How to apply PhysX schemas on prims? What all schemas are possible? - How to write basic operations in USD for creating prims and modifying their attributes? Part 1: Using USD Composer -------------------------- While several `video tutorials <https://www.youtube.com/@NVIDIA-Studio>`__ and `documentation <https://docs.omniverse.nvidia.com/>`__ exist out there on NVIDIA Omniverse, going through all of them would take an extensive amount of time and effort. Thus, we have curated these resources to guide you through using Omniverse, specifically for robotics. Introduction to Omniverse and USD - `What is NVIDIA Omniverse? <https://youtu.be/dvdB-ndYJBM>`__ - `What is the USD File Type? \| Getting Started in NVIDIA Omniverse <https://youtu.be/GOdyx-oSs2M>`__ - `What Makes USD Unique in NVIDIA Omniverse <https://youtu.be/o2x-30-PTkw>`__ Using Omniverse USD Composer - `Introduction to Omniverse USD Composer <https://youtu.be/_30Pf3nccuE>`__ - `Navigation Basics in Omniverse USD Composer <https://youtu.be/kb4ZA3TyMak>`__ - `Lighting Basics in NVIDIA Omniverse USD Composer <https://youtu.be/c7qyI8pZvF4>`__ - `Rendering Overview in NVIDIA Omniverse USD Composer <https://youtu.be/dCvq2ZyYmu4>`__ Materials and MDL - `Five Things to Know About Materials in NVIDIA Omniverse <https://youtu.be/C0HmcQXaENc>`__ - `How to apply materials? <https://docs.omniverse.nvidia.com/materials-and-rendering/latest/materials.html%23applying-materials>`__ Omniverse Physics and PhysX SDK - `Basics - Setting Up Physics and Toolbar Overview <https://youtu.be/nsJ0S9MycJI>`__ - `Basics - Demos Overview <https://youtu.be/-y0-EVTj10s>`__ - `Rigid Bodies - Mass Editing <https://youtu.be/GHl2RwWeRuM>`__ - `Materials - Friction Restitution and Defaults <https://youtu.be/oTW81DltNiE>`__ - `Overview of Simulation Ready Assets Physics in Omniverse <https://youtu.be/lFtEMg86lJc>`__ Importing assets - `Omniverse Create - Importing FBX Files \| NVIDIA Omniverse Tutorials <https://youtu.be/dQI0OpzfVHw>`__ - `Omniverse Asset Importer <https://docs.omniverse.nvidia.com/extensions/latest/ext_asset-importer.html>`__ - `Isaac Sim URDF impoter <https://docs.omniverse.nvidia.com/isaacsim/latest/ext_omni_isaac_urdf.html>`__ Part 2: Scripting in Omniverse ------------------------------ The above links mainly introduced how to use the USD Composer and its functionalities through UI operations. However, often developers need to write scripts to perform operations. This is especially true when you want to automate certain tasks or create custom applications that use Omniverse as a backend. This section will introduce you to scripting in Omniverse. USD is the main file format Omniverse operates with. So naturally, the APIs (from OpenUSD) for modifying USD are at the core of Omniverse. Most of the APIs are in C++ and Python bindings are provided for them. Thus, to script in Omniverse, you need to understand the USD APIs. .. note:: While Isaac Sim and Orbit try to "relieve" users from understanding the core USD concepts and APIs, understanding these basics still help a lot once you start diving inside the codebase and modifying it for your own application. Before diving into USD scripting, it is good to get acquainted with the terminologies used in USD. We recommend the following `introduction to USD basics <https://www.sidefx.com/docs/houdini/solaris/usd.html>`__ by Houdini, which is a 3D animation software. Make sure to go through the following sections: - `Quick example <https://www.sidefx.com/docs/houdini/solaris/usd.html%23quick-example>`__ - `Attributes and primvars <https://www.sidefx.com/docs/houdini/solaris/usd.html%23attrs>`__ - `Composition <https://www.sidefx.com/docs/houdini/solaris/usd.html%23compose>`__ - `Schemas <https://www.sidefx.com/docs/houdini/solaris/usd.html%23schemas>`__ - `Instances <https://www.sidefx.com/docs/houdini/solaris/usd.html%23instancing>`__ and `Scene-graph Instancing <https://openusd.org/dev/api/_usd__page__scenegraph_instancing.html>`__ As a test of understanding, make sure you can answer the following: - What are prims? What is meant by a prim path in a stage? - How are attributes related to prims? - How are schemas related to prims? - What is the difference between attributes and schemas? - What is asset instancing? Part 3: More Resources ---------------------- - `Omniverse Glossary of Terms <https://docs.omniverse.nvidia.com/isaacsim/latest/common/glossary-of-terms.html>`__ - `Omniverse Code Samples <https://docs.omniverse.nvidia.com/dev-guide/latest/programmer_ref.html>`__ - `PhysX Collider Compatibility <https://docs.omniverse.nvidia.com/extensions/latest/ext_physics/rigid-bodies.html#collidercompatibility>`__ - `PhysX Limitations <https://docs.omniverse.nvidia.com/isaacsim/latest/features/physics/physX_limitations.html>`__ - `PhysX Documentation <https://nvidia-omniverse.github.io/PhysX/physx/>`__.
5,971
reStructuredText
46.776
140
0.748786
NVIDIA-Omniverse/orbit/docs/source/how-to/write_articulation_cfg.rst
.. _how-to-write-articulation-config: Writing an Asset Configuration ============================== .. currentmodule:: omni.isaac.orbit This guide walks through the process of creating an :class:`~assets.ArticulationCfg`. The :class:`~assets.ArticulationCfg` is a configuration object that defines the properties of an :class:`~assets.Articulation` in Orbit. .. note:: While we only cover the creation of an :class:`~assets.ArticulationCfg` in this guide, the process is similar for creating any other asset configuration object. We will use the Cartpole example to demonstrate how to create an :class:`~assets.ArticulationCfg`. The Cartpole is a simple robot that consists of a cart with a pole attached to it. The cart is free to move along a rail, and the pole is free to rotate about the cart. .. dropdown:: Code for Cartpole configuration :icon: code .. literalinclude:: ../../../source/extensions/omni.isaac.orbit_assets/omni/isaac/orbit_assets/cartpole.py :language: python :linenos: Defining the spawn configuration -------------------------------- As explained in :ref:`tutorial-spawn-prims` tutorials, the spawn configuration defines the properties of the assets to be spawned. This spawning may happen procedurally, or through an existing asset file (e.g. USD or URDF). In this example, we will spawn the Cartpole from a USD file. When spawning an asset from a USD file, we define its :class:`~sim.spawners.from_files.UsdFileCfg`. This configuration object takes in the following parameters: * :class:`~sim.spawners.from_files.UsdFileCfg.usd_path`: The USD file path to spawn from * :class:`~sim.spawners.from_files.UsdFileCfg.rigid_props`: The properties of the articulation's root * :class:`~sim.spawners.from_files.UsdFileCfg.articulation_props`: The properties of all the articulation's links The last two parameters are optional. If not specified, they are kept at their default values in the USD file. .. literalinclude:: ../../../source/extensions/omni.isaac.orbit_assets/omni/isaac/orbit_assets/cartpole.py :language: python :lines: 17-33 :dedent: To import articulation from a URDF file instead of a USD file, you can replace the :class:`~sim.spawners.from_files.UsdFileCfg` with a :class:`~sim.spawners.from_files.UrdfFileCfg`. For more details, please check the API documentation. Defining the initial state -------------------------- Every asset requires defining their initial or *default* state in the simulation through its configuration. This configuration is stored into the asset's default state buffers that can be accessed when the asset's state needs to be reset. .. note:: The initial state of an asset is defined w.r.t. its local environment frame. This then needs to be transformed into the global simulation frame when resetting the asset's state. For more details, please check the :ref:`tutorial-interact-articulation` tutorial. For an articulation, the :class:`~assets.ArticulationCfg.InitialStateCfg` object defines the initial state of the root of the articulation and the initial state of all its joints. In this example, we will spawn the Cartpole at the origin of the XY plane at a Z height of 2.0 meters. Meanwhile, the joint positions and velocities are set to 0.0. .. literalinclude:: ../../../source/extensions/omni.isaac.orbit_assets/omni/isaac/orbit_assets/cartpole.py :language: python :lines: 34-36 :dedent: Defining the actuator configuration ----------------------------------- Actuators are a crucial component of an articulation. Through this configuration, it is possible to define the type of actuator model to use. We can use the internal actuator model provided by the physics engine (i.e. the implicit actuator model), or use a custom actuator model which is governed by a user-defined system of equations (i.e. the explicit actuator model). For more details on actuators, see :ref:`feature-actuators`. The cartpole's articulation has two actuators, one corresponding to its each joint: ``cart_to_pole`` and ``slider_to_cart``. We use two different actuator models for these actuators as an example. However, since they are both using the same actuator model, it is possible to combine them into a single actuator model. .. dropdown:: Actuator model configuration with separate actuator models :icon: code .. literalinclude:: ../../../source/extensions/omni.isaac.orbit_assets/omni/isaac/orbit_assets/cartpole.py :language: python :lines: 37-47 :dedent: .. dropdown:: Actuator model configuration with a single actuator model :icon: code .. code-block:: python actuators={ "all_joints": ImplicitActuatorCfg( joint_names_expr=[".*"], effort_limit=400.0, velocity_limit=100.0, stiffness={"slider_to_cart": 0.0, "cart_to_pole": 0.0}, damping={"slider_to_cart": 10.0, "cart_to_pole": 0.0}, ), },
4,957
reStructuredText
41.376068
113
0.727053
NVIDIA-Omniverse/orbit/docs/source/how-to/record_animation.rst
Recording Animations of Simulations =================================== .. currentmodule:: omni.isaac.orbit Omniverse includes tools to record animations of physics simulations. The `Stage Recorder`_ extension listens to all the motion and USD property changes within a USD stage and records them to a USD file. This file contains the time samples of the changes, which can be played back to render the animation. The timeSampled USD file only contains the changes to the stage. It uses the same hierarchy as the original stage at the time of recording. This allows adding the animation to the original stage, or to a different stage with the same hierarchy. The timeSampled file can be directly added as a sublayer to the original stage to play back the animation. .. note:: Omniverse only supports playing animation or playing physics on a USD prim at the same time. If you want to play back the animation of a USD prim, you need to disable the physics simulation on the prim. In Orbit, we directly use the `Stage Recorder`_ extension to record the animation of the physics simulation. This is available as a feature in the :class:`~omni.isaac.orbit.envs.ui.BaseEnvWindow` class. However, to record the animation of a simulation, you need to disable `Fabric`_ to allow reading and writing all the changes (such as motion and USD properties) to the USD stage. Stage Recorder Settings ~~~~~~~~~~~~~~~~~~~~~~~ Orbit integration of the `Stage Recorder`_ extension assumes certain default settings. If you want to change the settings, you can directly use the `Stage Recorder`_ extension in the Omniverse Create application. .. dropdown:: Settings used in base_env_window.py :icon: code .. literalinclude:: ../../../source/extensions/omni.isaac.orbit/omni/isaac/orbit/envs/ui/base_env_window.py :language: python :linenos: :pyobject: BaseEnvWindow._toggle_recording_animation_fn Example Usage ~~~~~~~~~~~~~ In all environment standalone scripts, Fabric can be disabled by passing the ``--disable_fabric`` flag to the script. Here we run the state-machine example and record the animation of the simulation. .. code-block:: bash ./orbit.sh -p source/standalone/environments/state_machine/lift_cube_sm.py --num_envs 8 --cpu --disable_fabric On running the script, the Orbit UI window opens with the button "Record Animation" in the toolbar. Clicking this button starts recording the animation of the simulation. On clicking the button again, the recording stops. The recorded animation and the original stage (with all physics disabled) are saved to the ``recordings`` folder in the current working directory. The files are stored in the ``usd`` format: - ``Stage.usd``: The original stage with all physics disabled - ``TimeSample_tk001.usd``: The timeSampled file containing the recorded animation You can open Omniverse Isaac Sim application to play back the animation. There are many ways to launch the application (such as from terminal or `Omniverse Launcher`_). Here we use the terminal to open the application and play the animation. .. code-block:: bash ./orbit.sh -s # Opens Isaac Sim application through _isaac_sim/isaac-sim.sh On a new stage, add the ``Stage.usd`` as a sublayer and then add the ``TimeSample_tk001.usd`` as a sublayer. You can do this by dragging and dropping the files from the file explorer to the stage. Please check out the `tutorial on layering in Omniverse`_ for more details. You can then play the animation by pressing the play button. .. _Stage Recorder: https://docs.omniverse.nvidia.com/extensions/latest/ext_animation_stage-recorder.html .. _Fabric: https://docs.omniverse.nvidia.com/kit/docs/usdrt/latest/docs/usd_fabric_usdrt.html .. _Omniverse Launcher: https://docs.omniverse.nvidia.com/launcher/latest/index.html .. _tutorial on layering in Omniverse: https://www.youtube.com/watch?v=LTwmNkSDh-c&ab_channel=NVIDIAOmniverse
3,914
reStructuredText
48.556961
117
0.762902
NVIDIA-Omniverse/orbit/docs/source/how-to/index.rst
How-to Guides ============= This section includes guides that help you use Orbit. These are intended for users who have already worked through the tutorials and are looking for more information on how to use Orbit. If you are new to Orbit, we recommend you start with the tutorials. .. note:: This section is a work in progress. If you have a question that is not answered here, please open an issue on our `GitHub page <https://github.com/NVIDIA-Omniverse/Orbit>`_. .. toctree:: :maxdepth: 1 import_new_asset write_articulation_cfg save_camera_output draw_markers wrap_rl_env master_omniverse record_animation
656
reStructuredText
27.565216
91
0.716463
NVIDIA-Omniverse/orbit/docs/source/tutorials/index.rst
Tutorials ========= Welcome to the Orbit tutorials! These tutorials provide a step-by-step guide to help you understand and use various features of the framework. All the tutorials are written as Python scripts. You can find the source code for each tutorial in the ``source/standalone/tutorials`` directory of the Orbit repository. .. note:: We would love to extend the tutorials to cover more topics and use cases, so please let us know if you have any suggestions. We recommend that you go through the tutorials in the order they are listed here. .. toctree:: :maxdepth: 2 00_sim/index 01_assets/index 02_scene/index 03_envs/index 04_sensors/index 05_controllers/index
714
reStructuredText
27.599999
102
0.736695
NVIDIA-Omniverse/orbit/docs/source/tutorials/01_assets/run_articulation.rst
.. _tutorial-interact-articulation: Interacting with an articulation ================================ .. currentmodule:: omni.isaac.orbit This tutorial shows how to interact with an articulated robot in the simulation. It is a continuation of the :ref:`tutorial-interact-rigid-object` tutorial, where we learned how to interact with a rigid object. On top of setting the root state, we will see how to set the joint state and apply commands to the articulated robot. The Code ~~~~~~~~ The tutorial corresponds to the ``run_articulation.py`` script in the ``orbit/source/standalone/tutorials/01_assets`` directory. .. dropdown:: Code for run_articulation.py :icon: code .. literalinclude:: ../../../../source/standalone/tutorials/01_assets/run_articulation.py :language: python :emphasize-lines: 60-71, 93-106, 110-113, 118-119 :linenos: The Code Explained ~~~~~~~~~~~~~~~~~~ Designing the scene ------------------- Similar to the previous tutorial, we populate the scene with a ground plane and a distant light. Instead of spawning rigid objects, we now spawn a cart-pole articulation from its USD file. The cart-pole is a simple robot consisting of a cart and a pole attached to it. The cart is free to move along the x-axis, and the pole is free to rotate about the cart. The USD file for the cart-pole contains the robot's geometry, joints, and other physical properties. For the cart-pole, we use its pre-defined configuration object, which is an instance of the :class:`assets.ArticulationCfg` class. This class contains information about the articulation's spawning strategy, default initial state, actuator models for different joints, and other meta-information. A deeper-dive into how to create this configuration object is provided in the :ref:`how-to-write-articulation-config` tutorial. As seen in the previous tutorial, we can spawn the articulation into the scene in a similar fashion by creating an instance of the :class:`assets.Articulation` class by passing the configuration object to its constructor. .. literalinclude:: ../../../../source/standalone/tutorials/01_assets/run_articulation.py :language: python :start-at: # Create separate groups called "Origin1", "Origin2", "Origin3" :end-at: cartpole = Articulation(cfg=cartpole_cfg) Running the simulation loop --------------------------- Continuing from the previous tutorial, we reset the simulation at regular intervals, set commands to the articulation, step the simulation, and update the articulation's internal buffers. Resetting the simulation """""""""""""""""""""""" Similar to a rigid object, an articulation also has a root state. This state corresponds to the root body in the articulation tree. On top of the root state, an articulation also has joint states. These states correspond to the joint positions and velocities. To reset the articulation, we first set the root state by calling the :meth:`Articulation.write_root_state_to_sim` method. Similarly, we set the joint states by calling the :meth:`Articulation.write_joint_state_to_sim` method. Finally, we call the :meth:`Articulation.reset` method to reset any internal buffers and caches. .. literalinclude:: ../../../../source/standalone/tutorials/01_assets/run_articulation.py :language: python :start-at: # reset the scene entities :end-at: robot.reset() Stepping the simulation """"""""""""""""""""""" Applying commands to the articulation involves two steps: 1. *Setting the joint targets*: This sets the desired joint position, velocity, or effort targets for the articulation. 2. *Writing the data to the simulation*: Based on the articulation's configuration, this step handles any :ref:`actuation conversions <feature-actuators>` and writes the converted values to the PhysX buffer. In this tutorial, we control the articulation using joint effort commands. For this to work, we need to set the articulation's stiffness and damping parameters to zero. This is done a-priori inside the cart-pole's pre-defined configuration object. At every step, we randomly sample joint efforts and set them to the articulation by calling the :meth:`Articulation.set_joint_effort_target` method. After setting the targets, we call the :meth:`Articulation.write_data_to_sim` method to write the data to the PhysX buffer. Finally, we step the simulation. .. literalinclude:: ../../../../source/standalone/tutorials/01_assets/run_articulation.py :language: python :start-at: # Apply random action :end-at: robot.write_data_to_sim() Updating the state """""""""""""""""" Every articulation class contains a :class:`assets.ArticulationData` object. This stores the state of the articulation. To update the state inside the buffer, we call the :meth:`assets.Articulation.update` method. .. literalinclude:: ../../../../source/standalone/tutorials/01_assets/run_articulation.py :language: python :start-at: # Update buffers :end-at: robot.update(sim_dt) The Code Execution ~~~~~~~~~~~~~~~~~~ To run the code and see the results, let's run the script from the terminal: .. code-block:: bash ./orbit.sh -p source/standalone/tutorials/01_assets/run_articulation.py This command should open a stage with a ground plane, lights, and two cart-poles that are moving around randomly. To stop the simulation, you can either close the window, press the ``STOP`` button in the UI, or press ``Ctrl+C`` in the terminal. In this tutorial, we learned how to create and interact with a simple articulation. We saw how to set the state of an articulation (its root and joint state) and how to apply commands to it. We also saw how to update its buffers to read the latest state from the simulation. In addition to this tutorial, we also provide a few other scripts that spawn different robots.These are included in the ``orbit/source/standalone/demos`` directory. You can run these scripts as: .. code-block:: bash # Spawn many different single-arm manipulators ./orbit.sh -p source/standalone/demos/arms.py # Spawn many different quadrupeds ./orbit.sh -p source/standalone/demos/quadrupeds.py
6,130
reStructuredText
42.176056
119
0.745351
NVIDIA-Omniverse/orbit/docs/source/tutorials/01_assets/run_rigid_object.rst
.. _tutorial-interact-rigid-object: Interacting with a rigid object =============================== .. currentmodule:: omni.isaac.orbit In the previous tutorials, we learned the essential workings of the standalone script and how to spawn different objects (or *prims*) into the simulation. This tutorial shows how to create and interact with a rigid object. For this, we will use the :class:`assets.RigidObject` class provided in Orbit. The Code ~~~~~~~~ The tutorial corresponds to the ``run_rigid_object.py`` script in the ``orbit/source/standalone/tutorials/01_assets`` directory. .. dropdown:: Code for run_rigid_object.py :icon: code .. literalinclude:: ../../../../source/standalone/tutorials/01_assets/run_rigid_object.py :language: python :emphasize-lines: 57-76, 78-80, 100-110, 113-114, 120-121, 134-136, 141-142 :linenos: The Code Explained ~~~~~~~~~~~~~~~~~~ In this script, we split the ``main`` function into two separate functions, which highlight the two main steps of setting up any simulation in the simulator: 1. **Design scene**: As the name suggests, this part is responsible for adding all the prims to the scene. 2. **Run simulation**: This part is responsible for stepping the simulator, interacting with the prims in the scene, e.g., changing their poses, and applying any commands to them. A distinction between these two steps is necessary because the second step only happens after the first step is complete and the simulator is reset. Once the simulator is reset (which automatically plays the simulation), no new (physics-enabled) prims should be added to the scene as it may lead to unexpected behaviors. However, the prims can be interacted with through their respective handles. Designing the scene ------------------- Similar to the previous tutorial, we populate the scene with a ground plane and a light source. In addition, we add a rigid object to the scene using the :class:`assets.RigidObject` class. This class is responsible for spawning the prims at the input path and initializes their corresponding rigid body physics handles. In this tutorial, we create a conical rigid object using the spawn configuration similar to the rigid cone in the :ref:`Spawn Objects <tutorial-spawn-prims>` tutorial. The only difference is that now we wrap the spawning configuration into the :class:`assets.RigidObjectCfg` class. This class contains information about the asset's spawning strategy, default initial state, and other meta-information. When this class is passed to the :class:`assets.RigidObject` class, it spawns the object and initializes the corresponding physics handles when the simulation is played. As an example on spawning the rigid object prim multiple times, we create its parent Xform prims, ``/World/Origin{i}``, that correspond to different spawn locations. When the regex expression ``/World/Origin*/Cone`` is passed to the :class:`assets.RigidObject` class, it spawns the rigid object prim at each of the ``/World/Origin{i}`` locations. For instance, if ``/World/Origin1`` and ``/World/Origin2`` are present in the scene, the rigid object prims are spawned at the locations ``/World/Origin1/Cone`` and ``/World/Origin2/Cone`` respectively. .. literalinclude:: ../../../../source/standalone/tutorials/01_assets/run_rigid_object.py :language: python :start-at: # Create separate groups called "Origin1", "Origin2", "Origin3" :end-at: cone_object = RigidObject(cfg=cone_cfg) Since we want to interact with the rigid object, we pass this entity back to the main function. This entity is then used to interact with the rigid object in the simulation loop. In later tutorials, we will see a more convenient way to handle multiple scene entities using the :class:`scene.InteractiveScene` class. .. literalinclude:: ../../../../source/standalone/tutorials/01_assets/run_rigid_object.py :language: python :start-at: # return the scene information :end-at: return scene_entities, origins Running the simulation loop --------------------------- We modify the simulation loop to interact with the rigid object to include three steps -- resetting the simulation state at fixed intervals, stepping the simulation, and updating the internal buffers of the rigid object. For the convenience of this tutorial, we extract the rigid object's entity from the scene dictionary and store it in a variable. Resetting the simulation state """""""""""""""""""""""""""""" To reset the simulation state of the spawned rigid object prims, we need to set their pose and velocity. Together they define the root state of the spawned rigid objects. It is important to note that this state is defined in the **simulation world frame**, and not of their parent Xform prim. This is because the physics engine only understands the world frame and not the parent Xform prim's frame. Thus, we need to transform desired state of the rigid object prim into the world frame before setting it. We use the :attr:`assets.RigidObject.data.default_root_state` attribute to get the default root state of the spawned rigid object prims. This default state can be configured from the :attr:`assets.RigidObjectCfg.init_state` attribute, which we left as identity in this tutorial. We then randomize the translation of the root state and set the desired state of the rigid object prim using the :meth:`assets.RigidObject.write_root_state_to_sim` method. As the name suggests, this method writes the root state of the rigid object prim into the simulation buffer. .. literalinclude:: ../../../../source/standalone/tutorials/01_assets/run_rigid_object.py :language: python :start-at: # reset root state :end-at: cone_object.reset() Stepping the simulation """"""""""""""""""""""" Before stepping the simulation, we perform the :meth:`assets.RigidObject.write_data_to_sim` method. This method writes other data, such as external forces, into the simulation buffer. In this tutorial, we do not apply any external forces to the rigid object, so this method is not necessary. However, it is included for completeness. .. literalinclude:: ../../../../source/standalone/tutorials/01_assets/run_rigid_object.py :language: python :start-at: # apply sim data :end-at: cone_object.write_data_to_sim() Updating the state """""""""""""""""" After stepping the simulation, we update the internal buffers of the rigid object prims to reflect their new state inside the :class:`assets.RigidObject.data` attribute. This is done using the :meth:`assets.RigidObject.update` method. .. literalinclude:: ../../../../source/standalone/tutorials/01_assets/run_rigid_object.py :language: python :start-at: # update buffers :end-at: cone_object.update(sim_dt) The Code Execution ~~~~~~~~~~~~~~~~~~ Now that we have gone through the code, let's run the script and see the result: .. code-block:: bash ./orbit.sh -p source/standalone/tutorials/01_assets/run_rigid_object.py This should open a stage with a ground plane, lights, and several green cones. The cones must be dropping from a random height and settling on to the ground. To stop the simulation, you can either close the window, or press the ``STOP`` button in the UI, or press ``Ctrl+C`` in the terminal This tutorial showed how to spawn rigid objects and wrap them in a :class:`RigidObject` class to initialize their physics handles which allows setting and obtaining their state. In the next tutorial, we will see how to interact with an articulated object which is a collection of rigid objects connected by joints.
7,574
reStructuredText
50.182432
128
0.750594
NVIDIA-Omniverse/orbit/docs/source/tutorials/01_assets/index.rst
Interacting with Assets ======================= Having spawned objects in the scene, these tutorials show you how to create physics handles for these objects and interact with them. These revolve around the :class:`~omni.isaac.orbit.assets.AssetBase` class and its derivatives such as :class:`~omni.isaac.orbit.assets.RigidObject` and :class:`~omni.isaac.orbit.assets.Articulation`. .. toctree:: :maxdepth: 1 :titlesonly: run_rigid_object run_articulation
475
reStructuredText
30.733331
101
0.726316
NVIDIA-Omniverse/orbit/docs/source/tutorials/02_scene/create_scene.rst
.. _tutorial-interactive-scene: Using the Interactive Scene =========================== .. currentmodule:: omni.isaac.orbit So far in the tutorials, we manually spawned assets into the simulation and created object instances to interact with them. However, as the complexity of the scene increases, it becomes tedious to perform these tasks manually. In this tutorial, we will introduce the :class:`scene.InteractiveScene` class, which provides a convenient interface for spawning prims and managing them in the simulation. At a high-level, the interactive scene is a collection of scene entities. Each entity can be either a non-interactive prim (e.g. ground plane, light source), an interactive prim (e.g. articulation, rigid object), or a sensor (e.g. camera, lidar). The interactive scene provides a convenient interface for spawning these entities and managing them in the simulation. Compared the manual approach, it provides the following benefits: * Alleviates the user needing to spawn each asset separately as this is handled implicitly. * Enables user-friendly cloning of scene prims for multiple environments. * Collects all the scene entities into a single object, which makes them easier to manage. In this tutorial, we take the cartpole example from the :ref:`tutorial-interact-articulation` tutorial and replace the ``design_scene`` function with an :class:`scene.InteractiveScene` object. While it may seem like overkill to use the interactive scene for this simple example, it will become more useful in the future as more assets and sensors are added to the scene. The Code ~~~~~~~~ This tutorial corresponds to the ``create_scene.py`` script within ``orbit/source/standalone/tutorials/02_scene``. .. dropdown:: Code for create_scene.py :icon: code .. literalinclude:: ../../../../source/standalone/tutorials/02_scene/create_scene.py :language: python :emphasize-lines: 52-65, 70-72, 93-94, 101-102, 107-108, 118-120 :linenos: The Code Explained ~~~~~~~~~~~~~~~~~~ While the code is similar to the previous tutorial, there are a few key differences that we will go over in detail. Scene configuration ------------------- The scene is composed of a collection of entities, each with their own configuration. These are specified in a configuration class that inherits from :class:`scene.InteractiveSceneCfg`. The configuration class is then passed to the :class:`scene.InteractiveScene` constructor to create the scene. For the cartpole example, we specify the same scene as in the previous tutorial, but list them now in the configuration class :class:`CartpoleSceneCfg` instead of manually spawning them. .. literalinclude:: ../../../../source/standalone/tutorials/02_scene/create_scene.py :language: python :pyobject: CartpoleSceneCfg The variable names in the configuration class are used as keys to access the corresponding entity from the :class:`scene.InteractiveScene` object. For example, the cartpole can be accessed via ``scene["cartpole"]``. However, we will get to that later. First, let's look at how individual scene entities are configured. Similar to how a rigid object and articulation were configured in the previous tutorials, the configurations are specified using a configuration class. However, there is a key difference between the configurations for the ground plane and light source and the configuration for the cartpole. The ground plane and light source are non-interactive prims, while the cartpole is an interactive prim. This distinction is reflected in the configuration classes used to specify them. The configurations for the ground plane and light source are specified using an instance of the :class:`assets.AssetBaseCfg` class while the cartpole is configured using an instance of the :class:`assets.ArticulationCfg`. Anything that is not an interactive prim (i.e., neither an asset nor a sensor) is not *handled* by the scene during simulation steps. Another key difference to note is in the specification of the prim paths for the different prims: * Ground plane: ``/World/defaultGroundPlane`` * Light source: ``/World/Light`` * Cartpole: ``{ENV_REGEX_NS}/Robot`` As we learned earlier, Omniverse creates a graph of prims in the USD stage. The prim paths are used to specify the location of the prim in the graph. The ground plane and light source are specified using absolute paths, while the cartpole is specified using a relative path. The relative path is specified using the ``ENV_REGEX_NS`` variable, which is a special variable that is replaced with the environment name during scene creation. Any entity that has the ``ENV_REGEX_NS`` variable in its prim path will be cloned for each environment. This path is replaced by the scene object with ``/World/envs/env_{i}`` where ``i`` is the environment index. Scene instantiation ------------------- Unlike before where we called the ``design_scene`` function to create the scene, we now create an instance of the :class:`scene.InteractiveScene` class and pass in the configuration object to its constructor. While creating the configuration instance of ``CartpoleSceneCfg`` we specify how many environment copies we want to create using the ``num_envs`` argument. This will be used to clone the scene for each environment. .. literalinclude:: ../../../../source/standalone/tutorials/02_scene/create_scene.py :language: python :start-at: # Design scene :end-at: scene = InteractiveScene(scene_cfg) Accessing scene elements ------------------------ Similar to how entities were accessed from a dictionary in the previous tutorials, the scene elements can be accessed from the :class:`InteractiveScene` object using the ``[]`` operator. The operator takes in a string key and returns the corresponding entity. The key is specified through the configuration class for each entity. For example, the cartpole is specified using the key ``"cartpole"`` in the configuration class. .. literalinclude:: ../../../../source/standalone/tutorials/02_scene/create_scene.py :language: python :start-at: # Extract scene entities :end-at: robot = scene["cartpole"] Running the simulation loop --------------------------- The rest of the script looks similar to previous scripts that interfaced with :class:`assets.Articulation`, with a few small differences in the methods called: * :meth:`assets.Articulation.reset` ⟶ :meth:`scene.InteractiveScene.reset` * :meth:`assets.Articulation.write_data_to_sim` ⟶ :meth:`scene.InteractiveScene.write_data_to_sim` * :meth:`assets.Articulation.update` ⟶ :meth:`scene.InteractiveScene.update` Under the hood, the methods of :class:`scene.InteractiveScene` call the corresponding methods of the entities in the scene. The Code Execution ~~~~~~~~~~~~~~~~~~ Let's run the script to simulate 32 cartpoles in the scene. We can do this by passing the ``--num_envs`` argument to the script. .. code-block:: bash ./orbit.sh -p source/standalone/tutorials/02_scene/create_scene.py --num_envs 32 This should open a stage with 32 cartpoles swinging around randomly. You can use the mouse to rotate the camera and the arrow keys to move around the scene. In this tutorial, we saw how to use :class:`scene.InteractiveScene` to create a scene with multiple assets. We also saw how to use the ``num_envs`` argument to clone the scene for multiple environments. There are many more example usages of the :class:`scene.InteractiveSceneCfg` in the tasks found under the ``omni.isaac.orbit_tasks`` extension. Please check out the source code to see how they are used for more complex scenes.
7,600
reStructuredText
45.919753
107
0.761974
NVIDIA-Omniverse/orbit/docs/source/tutorials/02_scene/index.rst
Creating a Scene ================ With the basic concepts of the framework covered, the tutorials move to a more intuitive scene interface that uses the :class:`~omni.isaac.orbit.scene.InteractiveScene` class. This class provides a higher level abstraction for creating scenes easily. .. toctree:: :maxdepth: 1 :titlesonly: create_scene
352
reStructuredText
26.153844
94
0.730114
NVIDIA-Omniverse/orbit/docs/source/tutorials/03_envs/run_rl_training.rst
Training with an RL Agent ========================= .. currentmodule:: omni.isaac.orbit In the previous tutorials, we covered how to define an RL task environment, register it into the ``gym`` registry, and interact with it using a random agent. We now move on to the next step: training an RL agent to solve the task. Although the :class:`envs.RLTaskEnv` conforms to the :class:`gymnasium.Env` interface, it is not exactly a ``gym`` environment. The input and outputs of the environment are not numpy arrays, but rather based on torch tensors with the first dimension being the number of environment instances. Additionally, most RL libraries expect their own variation of an environment interface. For example, `Stable-Baselines3`_ expects the environment to conform to its `VecEnv API`_ which expects a list of numpy arrays instead of a single tensor. Similarly, `RSL-RL`_ and `RL-Games`_ expect a different interface. Since there is no one-size-fits-all solution, we do not base the :class:`envs.RLTaskEnv` on any particular learning library. Instead, we implement wrappers to convert the environment into the expected interface. These are specified in the :mod:`omni.isaac.orbit_tasks.utils.wrappers` module. In this tutorial, we will use `Stable-Baselines3`_ to train an RL agent to solve the cartpole balancing task. .. caution:: Wrapping the environment with the respective learning framework's wrapper should happen in the end, i.e. after all other wrappers have been applied. This is because the learning framework's wrapper modifies the interpretation of environment's APIs which may no longer be compatible with :class:`gymnasium.Env`. The Code -------- For this tutorial, we use the training script from `Stable-Baselines3`_ workflow in the ``orbit/source/standalone/workflows/sb3`` directory. .. dropdown:: Code for train.py :icon: code .. literalinclude:: ../../../../source/standalone/workflows/sb3/train.py :language: python :emphasize-lines: 58, 61, 67-69, 78, 92-96, 98-99, 102-110, 112, 117-125, 127-128, 135-138 :linenos: The Code Explained ------------------ .. currentmodule:: omni.isaac.orbit_tasks.utils Most of the code above is boilerplate code to create logging directories, saving the parsed configurations, and setting up different Stable-Baselines3 components. For this tutorial, the important part is creating the environment and wrapping it with the Stable-Baselines3 wrapper. There are three wrappers used in the code above: 1. :class:`gymnasium.wrappers.RecordVideo`: This wrapper records a video of the environment and saves it to the specified directory. This is useful for visualizing the agent's behavior during training. 2. :class:`wrappers.sb3.Sb3VecEnvWrapper`: This wrapper converts the environment into a Stable-Baselines3 compatible environment. 3. `stable_baselines3.common.vec_env.VecNormalize`_: This wrapper normalizes the environment's observations and rewards. Each of these wrappers wrap around the previous wrapper by following ``env = wrapper(env, *args, **kwargs)`` repeatedly. The final environment is then used to train the agent. For more information on how these wrappers work, please refer to the :ref:`how-to-env-wrappers` documentation. The Code Execution ------------------ We train a PPO agent from Stable-Baselines3 to solve the cartpole balancing task. Training the agent ~~~~~~~~~~~~~~~~~~ There are three main ways to train the agent. Each of them has their own advantages and disadvantages. It is up to you to decide which one you prefer based on your use case. Headless execution """""""""""""""""" If the ``--headless`` flag is set, the simulation is not rendered during training. This is useful when training on a remote server or when you do not want to see the simulation. Typically, it speeds up the training process since only physics simulation step is performed. .. code-block:: bash ./orbit.sh -p source/standalone/workflows/sb3/train.py --task Isaac-Cartpole-v0 --num_envs 64 --headless Headless execution with off-screen render """"""""""""""""""""""""""""""""""""""""" Since the above command does not render the simulation, it is not possible to visualize the agent's behavior during training. To visualize the agent's behavior, we pass the ``--offscreen_render`` which enables off-screen rendering. Additionally, we pass the flag ``--video`` which records a video of the agent's behavior during training. .. code-block:: bash ./orbit.sh -p source/standalone/workflows/sb3/train.py --task Isaac-Cartpole-v0 --num_envs 64 --headless --offscreen_render --video The videos are saved to the ``logs/sb3/Isaac-Cartpole-v0/<run-dir>/videos`` directory. You can open these videos using any video player. Interactive execution """"""""""""""""""""" .. currentmodule:: omni.isaac.orbit While the above two methods are useful for training the agent, they don't allow you to interact with the simulation to see what is happening. In this case, you can ignore the ``--headless`` flag and run the training script as follows: .. code-block:: bash ./orbit.sh -p source/standalone/workflows/sb3/train.py --task Isaac-Cartpole-v0 --num_envs 64 This will open the Isaac Sim window and you can see the agent training in the environment. However, this will slow down the training process since the simulation is rendered on the screen. As a workaround, you can switch between different render modes in the ``"Orbit"`` window that is docked on the bottom-right corner of the screen. To learn more about these render modes, please check the :class:`sim.SimulationContext.RenderMode` class. Viewing the logs ~~~~~~~~~~~~~~~~ On a separate terminal, you can monitor the training progress by executing the following command: .. code:: bash # execute from the root directory of the repository ./orbit.sh -p -m tensorboard.main --logdir logs/sb3/Isaac-Cartpole-v0 Playing the trained agent ~~~~~~~~~~~~~~~~~~~~~~~~~ Once the training is complete, you can visualize the trained agent by executing the following command: .. code:: bash # execute from the root directory of the repository ./orbit.sh -p source/standalone/workflows/sb3/play.py --task Isaac-Cartpole-v0 --num_envs 32 --use_last_checkpoint The above command will load the latest checkpoint from the ``logs/sb3/Isaac-Cartpole-v0`` directory. You can also specify a specific checkpoint by passing the ``--checkpoint`` flag. .. _Stable-Baselines3: https://stable-baselines3.readthedocs.io/en/master/ .. _VecEnv API: https://stable-baselines3.readthedocs.io/en/master/guide/vec_envs.html#vecenv-api-vs-gym-api .. _`stable_baselines3.common.vec_env.VecNormalize`: https://stable-baselines3.readthedocs.io/en/master/guide/vec_envs.html#vecnormalize .. _RL-Games: https://github.com/Denys88/rl_games .. _RSL-RL: https://github.com/leggedrobotics/rsl_rl
6,871
reStructuredText
43.623376
136
0.747781
NVIDIA-Omniverse/orbit/docs/source/tutorials/03_envs/create_rl_env.rst
.. _tutorial-create-rl-env: Creating an RL Environment ========================== .. currentmodule:: omni.isaac.orbit Having learnt how to create a base environment in :ref:`tutorial-create-base-env`, we will now look at how to create a task environment for reinforcement learning. The base environment is designed as an sense-act environment where the agent can send commands to the environment and receive observations from the environment. This minimal interface is sufficient for many applications such as traditional motion planning and controls. However, many applications require a task-specification which often serves as the learning objective for the agent. For instance, in a navigation task, the agent may be required to reach a goal location. To this end, we use the :class:`envs.RLTaskEnv` class which extends the base environment to include a task specification. Similar to other components in Orbit, instead of directly modifying the base class :class:`RLTaskEnv`, we encourage users to simply implement a configuration :class:`RLTaskEnvCfg` for their task environment. This practice allows us to separate the task specification from the environment implementation, making it easier to reuse components of the same environment for different tasks. In this tutorial, we will configure the cartpole environment using the :class:`RLTaskEnvCfg` to create a task for balancing the pole upright. We will learn how to specify the task using reward terms, termination criteria, curriculum and commands. The Code ~~~~~~~~ For this tutorial, we use the cartpole environment defined in ``omni.isaac.orbit_tasks.classic.cartpole`` module. .. dropdown:: Code for cartpole_env_cfg.py :icon: code .. literalinclude:: ../../../../source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/classic/cartpole/cartpole_env_cfg.py :language: python :emphasize-lines: 63-68, 124-149, 152-162, 165-169, 187-192 :linenos: The script for running the environment ``run_cartpole_rl_env.py`` is present in the ``orbit/source/standalone/tutorials/03_envs`` directory. The script is similar to the ``cartpole_base_env.py`` script in the previous tutorial, except that it uses the :class:`envs.RLTaskEnv` instead of the :class:`envs.BaseEnv`. .. dropdown:: Code for run_cartpole_rl_env.py :icon: code .. literalinclude:: ../../../../source/standalone/tutorials/03_envs/run_cartpole_rl_env.py :language: python :emphasize-lines: 43-47, 61-62 :linenos: The Code Explained ~~~~~~~~~~~~~~~~~~ We already went through parts of the above in the :ref:`tutorial-create-base-env` tutorial to learn about how to specify the scene, observations, actions and events. Thus, in this tutorial, we will focus only on the RL components of the environment. In Orbit, we provide various implementations of different terms in the :mod:`envs.mdp` module. We will use some of these terms in this tutorial, but users are free to define their own terms as well. These are usually placed in their task-specific sub-package (for instance, in :mod:`omni.isaac.orbit_tasks.classic.cartpole.mdp`). Defining rewards ---------------- The :class:`managers.RewardManager` is used to compute the reward terms for the agent. Similar to the other managers, its terms are configured using the :class:`managers.RewardTermCfg` class. The :class:`managers.RewardTermCfg` class specifies the function or callable class that computes the reward as well as the weighting associated with it. It also takes in dictionary of arguments, ``"params"`` that are passed to the reward function when it is called. For the cartpole task, we will use the following reward terms: * **Alive Reward**: Encourage the agent to stay alive for as long as possible. * **Terminating Reward**: Similarly penalize the agent for terminating. * **Pole Angle Reward**: Encourage the agent to keep the pole at the desired upright position. * **Cart Velocity Reward**: Encourage the agent to keep the cart velocity as small as possible. * **Pole Velocity Reward**: Encourage the agent to keep the pole velocity as small as possible. .. literalinclude:: ../../../../source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/classic/cartpole/cartpole_env_cfg.py :language: python :pyobject: RewardsCfg Defining termination criteria ----------------------------- Most learning tasks happen over a finite number of steps that we call an episode. For instance, in the cartpole task, we want the agent to balance the pole for as long as possible. However, if the agent reaches an unstable or unsafe state, we want to terminate the episode. On the other hand, if the agent is able to balance the pole for a long time, we want to terminate the episode and start a new one so that the agent can learn to balance the pole from a different starting configuration. The :class:`managers.TerminationsCfg` configures what constitutes for an episode to terminate. In this example, we want the task to terminate when either of the following conditions is met: * **Episode Length** The episode length is greater than the defined max_episode_length * **Cart out of bounds** The cart goes outside of the bounds [-3, 3] The flag :attr:`managers.TerminationsCfg.time_out` specifies whether the term is a time-out (truncation) term or terminated term. These are used to indicate the two types of terminations as described in `Gymnasium's documentation <https://gymnasium.farama.org/tutorials/gymnasium_basics/handling_time_limits/>`_. .. literalinclude:: ../../../../source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/classic/cartpole/cartpole_env_cfg.py :language: python :pyobject: TerminationsCfg Defining commands ----------------- For various goal-conditioned tasks, it is useful to specify the goals or commands for the agent. These are handled through the :class:`managers.CommandManager`. The command manager handles resampling and updating the commands at each step. It can also be used to provide the commands as an observation to the agent. For this simple task, we do not use any commands. This is specified by using a command term with the :class:`envs.mdp.NullCommandCfg` configuration. However, you can see an example of command definitions in the locomotion or manipulation tasks. .. literalinclude:: ../../../../source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/classic/cartpole/cartpole_env_cfg.py :language: python :pyobject: CommandsCfg Defining curriculum ------------------- Often times when training a learning agent, it helps to start with a simple task and gradually increase the tasks's difficulty as the agent training progresses. This is the idea behind curriculum learning. In Orbit, we provide a :class:`managers.CurriculumManager` class that can be used to define a curriculum for your environment. In this tutorial we don't implement a curriculum for simplicity, but you can see an example of a curriculum definition in the other locomotion or manipulation tasks. We use a simple pass-through curriculum to define a curriculum manager that does not modify the environment. .. literalinclude:: ../../../../source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/classic/cartpole/cartpole_env_cfg.py :language: python :pyobject: CurriculumCfg Tying it all together --------------------- With all the above components defined, we can now create the :class:`RLTaskEnvCfg` configuration for the cartpole environment. This is similar to the :class:`BaseEnvCfg` defined in :ref:`tutorial-create-base-env`, only with the added RL components explained in the above sections. .. literalinclude:: ../../../../source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/classic/cartpole/cartpole_env_cfg.py :language: python :pyobject: CartpoleEnvCfg Running the simulation loop --------------------------- Coming back to the ``run_cartpole_rl_env.py`` script, the simulation loop is similar to the previous tutorial. The only difference is that we create an instance of :class:`envs.RLTaskEnv` instead of the :class:`envs.BaseEnv`. Consequently, now the :meth:`envs.RLTaskEnv.step` method returns additional signals such as the reward and termination status. The information dictionary also maintains logging of quantities such as the reward contribution from individual terms, the termination status of each term, the episode length etc. .. literalinclude:: ../../../../source/standalone/tutorials/03_envs/run_cartpole_rl_env.py :language: python :pyobject: main The Code Execution ~~~~~~~~~~~~~~~~~~ Similar to the previous tutorial, we can run the environment by executing the ``run_cartpole_rl_env.py`` script. .. code-block:: bash ./orbit.sh -p source/standalone/tutorials/03_envs/run_cartpole_rl_env.py --num_envs 32 This should open a similar simulation as in the previous tutorial. However, this time, the environment returns more signals that specify the reward and termination status. Additionally, the individual environments reset themselves when they terminate based on the termination criteria specified in the configuration. To stop the simulation, you can either close the window, or press ``Ctrl+C`` in the terminal where you started the simulation. In this tutorial, we learnt how to create a task environment for reinforcement learning. We do this by extending the base environment to include the rewards, terminations, commands and curriculum terms. We also learnt how to use the :class:`envs.RLTaskEnv` class to run the environment and receive various signals from it. While it is possible to manually create an instance of :class:`envs.RLTaskEnv` class for a desired task, this is not scalable as it requires specialized scripts for each task. Thus, we exploit the :meth:`gymnasium.make` function to create the environment with the gym interface. We will learn how to do this in the next tutorial.
9,925
reStructuredText
49.902564
135
0.764131
NVIDIA-Omniverse/orbit/docs/source/tutorials/03_envs/create_base_env.rst
.. _tutorial-create-base-env: Creating a Base Environment =========================== .. currentmodule:: omni.isaac.orbit Environments bring together different aspects of the simulation such as the scene, observations and actions spaces, reset events etc. to create a coherent interface for various applications. In Orbit, environments are implemented as :class:`envs.BaseEnv` and :class:`envs.RLTaskEnv` classes. The two classes are very similar, but :class:`envs.RLTaskEnv` is useful for reinforcement learning tasks and contains rewards, terminations, curriculum and command generation. The :class:`envs.BaseEnv` class is useful for traditional robot control and doesn't contain rewards and terminations. In this tutorial, we will look at the base class :class:`envs.BaseEnv` and its corresponding configuration class :class:`envs.BaseEnvCfg`. We will use the cartpole environment from earlier to illustrate the different components in creating a new :class:`envs.BaseEnv` environment. The Code ~~~~~~~~ The tutorial corresponds to the ``create_cartpole_base_env`` script in the ``orbit/source/standalone/tutorials/03_envs`` directory. .. dropdown:: Code for create_cartpole_base_env.py :icon: code .. literalinclude:: ../../../../source/standalone/tutorials/03_envs/create_cartpole_base_env.py :language: python :emphasize-lines: 49-53, 56-73, 76-109, 112-131, 136-140, 145, 149, 154-155, 161-162 :linenos: The Code Explained ~~~~~~~~~~~~~~~~~~ The base class :class:`envs.BaseEnv` wraps around many intricacies of the simulation interaction and provides a simple interface for the user to run the simulation and interact with it. It is composed of the following components: * :class:`scene.InteractiveScene` - The scene that is used for the simulation. * :class:`managers.ActionManager` - The manager that handles actions. * :class:`managers.ObservationManager` - The manager that handles observations. * :class:`managers.EventManager` - The manager that schedules operations (such as domain randomization) at specified simulation events. For instance, at startup, on resets, or periodic intervals. By configuring these components, the user can create different variations of the same environment with minimal effort. In this tutorial, we will go through the different components of the :class:`envs.BaseEnv` class and how to configure them to create a new environment. Designing the scene ------------------- The first step in creating a new environment is to configure its scene. For the cartpole environment, we will be using the scene from the previous tutorial. Thus, we omit the scene configuration here. For more details on how to configure a scene, see :ref:`tutorial-interactive-scene`. Defining actions ---------------- In the previous tutorial, we directly input the action to the cartpole using the :meth:`assets.Articulation.set_joint_effort_target` method. In this tutorial, we will use the :class:`managers.ActionManager` to handle the actions. The action manager can comprise of multiple :class:`managers.ActionTerm`. Each action term is responsible for applying *control* over a specific aspect of the environment. For instance, for robotic arm, we can have two action terms -- one for controlling the joints of the arm, and the other for controlling the gripper. This composition allows the user to define different control schemes for different aspects of the environment. In the cartpole environment, we want to control the force applied to the cart to balance the pole. Thus, we will create an action term that controls the force applied to the cart. .. literalinclude:: ../../../../source/standalone/tutorials/03_envs/create_cartpole_base_env.py :language: python :pyobject: ActionsCfg Defining observations --------------------- While the scene defines the state of the environment, the observations define the states that are observable by the agent. These observations are used by the agent to make decisions on what actions to take. In Orbit, the observations are computed by the :class:`managers.ObservationManager` class. Similar to the action manager, the observation manager can comprise of multiple observation terms. These are further grouped into observation groups which are used to define different observation spaces for the environment. For instance, for hierarchical control, we may want to define two observation groups -- one for the low level controller and the other for the high level controller. It is assumed that all the observation terms in a group have the same dimensions. For this tutorial, we will only define one observation group named ``"policy"``. While not completely prescriptive, this group is a necessary requirement for various wrappers in Orbit. We define a group by inheriting from the :class:`managers.ObservationGroupCfg` class. This class collects different observation terms and help define common properties for the group, such as enabling noise corruption or concatenating the observations into a single tensor. The individual terms are defined by inheriting from the :class:`managers.ObservationTermCfg` class. This class takes in the :attr:`managers.ObservationTermCfg.func` that specifies the function or callable class that computes the observation for that term. It includes other parameters for defining the noise model, clipping, scaling, etc. However, we leave these parameters to their default values for this tutorial. .. literalinclude:: ../../../../source/standalone/tutorials/03_envs/create_cartpole_base_env.py :language: python :pyobject: ObservationsCfg Defining events --------------- At this point, we have defined the scene, actions and observations for the cartpole environment. The general idea for all these components is to define the configuration classes and then pass them to the corresponding managers. The event manager is no different. The :class:`managers.EventManager` class is responsible for events corresponding to changes in the simulation state. This includes resetting (or randomizing) the scene, randomizing physical properties (such as mass, friction, etc.), and varying visual properties (such as colors, textures, etc.). Each of these are specified through the :class:`managers.EventTermCfg` class, which takes in the :attr:`managers.EventTermCfg.func` that specifies the function or callable class that performs the event. Additionally, it expects the **mode** of the event. The mode specifies when the event term should be applied. It is possible to specify your own mode. For this, you'll need to adapt the :class:`~envs.BaseEnv` class. However, out of the box, Orbit provides three commonly used modes: * ``"startup"`` - Event that takes place only once at environment startup. * ``"reset"`` - Event that occurs on environment termination and reset. * ``"interval"`` - Event that are executed at a given interval, i.e., periodically after a certain number of steps. For this example, we define events that randomize the pole's mass on startup. This is done only once since this operation is expensive and we don't want to do it on every reset. We also create an event to randomize the initial joint state of the cartpole and the pole at every reset. .. literalinclude:: ../../../../source/standalone/tutorials/03_envs/create_cartpole_base_env.py :language: python :pyobject: EventCfg Tying it all together --------------------- Having defined the scene and manager configurations, we can now define the environment configuration through the :class:`envs.BaseEnvCfg` class. This class takes in the scene, action, observation and event configurations. In addition to these, it also takes in the :attr:`envs.BaseEnvCfg.sim` which defines the simulation parameters such as the timestep, gravity, etc. This is initialized to the default values, but can be modified as needed. We recommend doing so by defining the :meth:`__post_init__` method in the :class:`envs.BaseEnvCfg` class, which is called after the configuration is initialized. .. literalinclude:: ../../../../source/standalone/tutorials/03_envs/create_cartpole_base_env.py :language: python :pyobject: CartpoleEnvCfg Running the simulation ---------------------- Lastly, we revisit the simulation execution loop. This is now much simpler since we have abstracted away most of the details into the environment configuration. We only need to call the :meth:`envs.BaseEnv.reset` method to reset the environment and :meth:`envs.BaseEnv.step` method to step the environment. Both these functions return the observation and an info dictionary which may contain additional information provided by the environment. These can be used by an agent for decision-making. The :class:`envs.BaseEnv` class does not have any notion of terminations since that concept is specific for episodic tasks. Thus, the user is responsible for defining the termination condition for the environment. In this tutorial, we reset the simulation at regular intervals. .. literalinclude:: ../../../../source/standalone/tutorials/03_envs/create_cartpole_base_env.py :language: python :pyobject: main An important thing to note above is that the entire simulation loop is wrapped inside the :meth:`torch.inference_mode` context manager. This is because the environment uses PyTorch operations under-the-hood and we want to ensure that the simulation is not slowed down by the overhead of PyTorch's autograd engine and gradients are not computed for the simulation operations. The Code Execution ~~~~~~~~~~~~~~~~~~ To run the base environment made in this tutorial, you can use the following command: .. code-block:: bash ./orbit.sh -p source/standalone/tutorials/03_envs/create_cartpole_base_env.py --num_envs 32 This should open a stage with a ground plane, light source, and cartpoles. The simulation should be playing with random actions on the cartpole. Additionally, it opens a UI window on the bottom right corner of the screen named ``"Orbit"``. This window contains different UI elements that can be used for debugging and visualization. To stop the simulation, you can either close the window, or press ``Ctrl+C`` in the terminal where you started the simulation. In this tutorial, we learned about the different managers that help define a base environment. We include more examples of defining the base environment in the ``orbit/source/standalone/tutorials/03_envs`` directory. For completeness, they can be run using the following commands: .. code-block:: bash # Floating cube environment with custom action term for PD control ./orbit.sh -p source/standalone/tutorials/03_envs/create_cube_base_env.py --num_envs 32 # Quadrupedal locomotion environment with a policy that interacts with the environment ./orbit.sh -p source/standalone/tutorials/03_envs/create_quadruped_base_env.py --num_envs 32 In the following tutorial, we will look at the :class:`envs.RLTaskEnv` class and how to use it to create a Markovian Decision Process (MDP).
11,020
reStructuredText
49.555046
121
0.773775
NVIDIA-Omniverse/orbit/docs/source/tutorials/03_envs/register_rl_env_gym.rst
Registering an Environment ========================== .. currentmodule:: omni.isaac.orbit In the previous tutorial, we learned how to create a custom cartpole environment. We manually created an instance of the environment by importing the environment class and its configuration class. .. dropdown:: Environment creation in the previous tutorial :icon: code .. literalinclude:: ../../../../source/standalone/tutorials/03_envs/run_cartpole_rl_env.py :language: python :start-at: # create environment configuration :end-at: env = RLTaskEnv(cfg=env_cfg) While straightforward, this approach is not scalable as we have a large suite of environments. In this tutorial, we will show how to use the :meth:`gymnasium.register` method to register environments with the ``gymnasium`` registry. This allows us to create the environment through the :meth:`gymnasium.make` function. .. dropdown:: Environment creation in this tutorial :icon: code .. literalinclude:: ../../../../source/standalone/environments/random_agent.py :language: python :lines: 40-51 The Code ~~~~~~~~ The tutorial corresponds to the ``random_agent.py`` script in the ``orbit/source/standalone/environments`` directory. .. dropdown:: Code for random_agent.py :icon: code .. literalinclude:: ../../../../source/standalone/environments/random_agent.py :language: python :emphasize-lines: 39-41, 46-51 :linenos: The Code Explained ~~~~~~~~~~~~~~~~~~ The :class:`envs.RLTaskEnv` class inherits from the :class:`gymnasium.Env` class to follow a standard interface. However, unlike the traditional Gym environments, the :class:`envs.RLTaskEnv` implements a *vectorized* environment. This means that multiple environment instances are running simultaneously in the same process, and all the data is returned in a batched fashion. Using the gym registry ---------------------- To register an environment, we use the :meth:`gymnasium.register` method. This method takes in the environment name, the entry point to the environment class, and the entry point to the environment configuration class. For the cartpole environment, the following shows the registration call in the ``omni.isaac.orbit_tasks.classic.cartpole`` sub-package: .. literalinclude:: ../../../../source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/classic/cartpole/__init__.py :language: python :lines: 10- :emphasize-lines: 11, 12, 15 The ``id`` argument is the name of the environment. As a convention, we name all the environments with the prefix ``Isaac-`` to make it easier to search for them in the registry. The name of the environment is typically followed by the name of the task, and then the name of the robot. For instance, for legged locomotion with ANYmal C on flat terrain, the environment is called ``Isaac-Velocity-Flat-Anymal-C-v0``. The version number ``v<N>`` is typically used to specify different variations of the same environment. Otherwise, the names of the environments can become too long and difficult to read. The ``entry_point`` argument is the entry point to the environment class. The entry point is a string of the form ``<module>:<class>``. In the case of the cartpole environment, the entry point is ``omni.isaac.orbit.envs:RLTaskEnv``. The entry point is used to import the environment class when creating the environment instance. The ``env_cfg_entry_point`` argument specifies the default configuration for the environment. The default configuration is loaded using the :meth:`omni.isaac.orbit_tasks.utils.parse_env_cfg` function. It is then passed to the :meth:`gymnasium.make` function to create the environment instance. The configuration entry point can be both a YAML file or a python configuration class. .. note:: The ``gymnasium`` registry is a global registry. Hence, it is important to ensure that the environment names are unique. Otherwise, the registry will throw an error when registering the environment. Creating the environment ------------------------ To inform the ``gym`` registry with all the environments provided by the ``omni.isaac.orbit_tasks`` extension, we must import the module at the start of the script. This will execute the ``__init__.py`` file which iterates over all the sub-packages and registers their respective environments. .. literalinclude:: ../../../../source/standalone/environments/random_agent.py :language: python :start-at: import omni.isaac.orbit_tasks # noqa: F401 :end-at: import omni.isaac.orbit_tasks # noqa: F401 In this tutorial, the task name is read from the command line. The task name is used to parse the default configuration as well as to create the environment instance. In addition, other parsed command line arguments such as the number of environments, the simulation device, and whether to render, are used to override the default configuration. .. literalinclude:: ../../../../source/standalone/environments/random_agent.py :language: python :start-at: # create environment configuration :end-at: env = gym.make(args_cli.task, cfg=env_cfg) Once creating the environment, the rest of the execution follows the standard resetting and stepping. The Code Execution ~~~~~~~~~~~~~~~~~~ Now that we have gone through the code, let's run the script and see the result: .. code-block:: bash ./orbit.sh -p source/standalone/environments/random_agent.py --task Isaac-Cartpole-v0 --num_envs 32 This should open a stage with everything similar to the previous :ref:`tutorial-create-rl-env` tutorial. To stop the simulation, you can either close the window, or press ``Ctrl+C`` in the terminal. In addition, you can also change the simulation device from GPU to CPU by adding the ``--cpu`` flag: .. code-block:: bash ./orbit.sh -p source/standalone/environments/random_agent.py --task Isaac-Cartpole-v0 --num_envs 32 --cpu With the ``--cpu`` flag, the simulation will run on the CPU. This is useful for debugging the simulation. However, the simulation will run much slower than on the GPU.
6,070
reStructuredText
43.313868
124
0.741516
NVIDIA-Omniverse/orbit/docs/source/tutorials/03_envs/index.rst
Designing an Environment ======================== The following tutorials introduce the concept of environments: :class:`~omni.isaac.orbit.envs.BaseEnv` and its derivative :class:`~omni.isaac.orbit.envs.RLTaskEnv`. These environments bring-in together different aspects of the framework to create a simulation environment for agent interaction. .. toctree:: :maxdepth: 1 :titlesonly: create_base_env create_rl_env register_rl_env_gym run_rl_training
477
reStructuredText
28.874998
102
0.721174
NVIDIA-Omniverse/orbit/docs/source/tutorials/05_controllers/run_diff_ik.rst
Using a task-space controller ============================= .. currentmodule:: omni.isaac.orbit In the previous tutorials, we have joint-space controllers to control the robot. However, in many cases, it is more intuitive to control the robot using a task-space controller. For example, if we want to teleoperate the robot, it is easier to specify the desired end-effector pose rather than the desired joint positions. In this tutorial, we will learn how to use a task-space controller to control the robot. We will use the :class:`controllers.DifferentialIKController` class to track a desired end-effector pose command. The Code ~~~~~~~~ The tutorial corresponds to the ``run_diff_ik.py`` script in the ``orbit/source/standalone/tutorials/05_controllers`` directory. .. dropdown:: Code for run_diff_ik.py :icon: code .. literalinclude:: ../../../../source/standalone/tutorials/05_controllers/run_diff_ik.py :language: python :emphasize-lines: 100-102, 123-138, 157-159, 163-173 :linenos: The Code Explained ~~~~~~~~~~~~~~~~~~ While using any task-space controller, it is important to ensure that the provided quantities are in the correct frames. When parallelizing environment instances, they are all existing in the same unique simulation world frame. However, typically, we want each environment itself to have its own local frame. This is accessible through the :attr:`scene.InteractiveScene.env_origins` attribute. In our APIs, we use the following notation for frames: - The simulation world frame (denoted as ``w``), which is the frame of the entire simulation. - The local environment frame (denoted as ``e``), which is the frame of the local environment. - The robot's base frame (denoted as ``b``), which is the frame of the robot's base link. Since the asset instances are not "aware" of the local environment frame, they return their states in the simulation world frame. Thus, we need to convert the obtained quantities to the local environment frame. This is done by subtracting the local environment origin from the obtained quantities. Creating an IK controller ------------------------- The :class:`~controllers.DifferentialIKController` class computes the desired joint positions for a robot to reach a desired end-effector pose. The included implementation performs the computation in a batched format and uses PyTorch operations. It supports different types of inverse kinematics solvers, including the damped least-squares method and the pseudo-inverse method. These solvers can be specified using the :attr:`~controllers.DifferentialIKControllerCfg.ik_method` argument. Additionally, the controller can handle commands as both relative and absolute poses. In this tutorial, we will use the damped least-squares method to compute the desired joint positions. Additionally, since we want to track desired end-effector poses, we will use the absolute pose command mode. .. literalinclude:: ../../../../source/standalone/tutorials/05_controllers/run_diff_ik.py :language: python :start-at: # Create controller :end-at: diff_ik_controller = DifferentialIKController(diff_ik_cfg, num_envs=scene.num_envs, device=sim.device) Obtaining the robot's joint and body indices -------------------------------------------- The IK controller implementation is a computation-only class. Thus, it expects the user to provide the necessary information about the robot. This includes the robot's joint positions, current end-effector pose, and the Jacobian matrix. While the attribute :attr:`assets.ArticulationData.joint_pos` provides the joint positions, we only want the joint positions of the robot's arm, and not the gripper. Similarly, while the attribute :attr:`assets.ArticulationData.body_state_w` provides the state of all the robot's bodies, we only want the state of the robot's end-effector. Thus, we need to index into these arrays to obtain the desired quantities. For this, the articulation class provides the methods :meth:`~assets.Articulation.find_joints` and :meth:`~assets.Articulation.find_bodies`. These methods take in the names of the joints and bodies and return their corresponding indices. While you may directly use these methods to obtain the indices, we recommend using the :attr:`~managers.SceneEntityCfg` class to resolve the indices. This class is used in various places in the APIs to extract certain information from a scene entity. Internally, it calls the above methods to obtain the indices. However, it also performs some additional checks to ensure that the provided names are valid. Thus, it is a safer option to use this class. .. literalinclude:: ../../../../source/standalone/tutorials/05_controllers/run_diff_ik.py :language: python :start-at: # Specify robot-specific parameters :end-before: # Define simulation stepping Computing robot command ----------------------- The IK controller separates the operation of setting the desired command and computing the desired joint positions. This is done to allow for the user to run the IK controller at a different frequency than the robot's control frequency. The :meth:`~controllers.DifferentialIKController.set_command` method takes in the desired end-effector pose as a single batched array. The pose is specified in the robot's base frame. .. literalinclude:: ../../../../source/standalone/tutorials/05_controllers/run_diff_ik.py :language: python :start-at: # reset controller :end-at: diff_ik_controller.set_command(ik_commands) We can then compute the desired joint positions using the :meth:`~controllers.DifferentialIKController.compute` method. The method takes in the current end-effector pose (in base frame), Jacobian, and current joint positions. We read the Jacobian matrix from the robot's data, which uses its value computed from the physics engine. .. literalinclude:: ../../../../source/standalone/tutorials/05_controllers/run_diff_ik.py :language: python :start-at: # obtain quantities from simulation :end-at: joint_pos_des = diff_ik_controller.compute(ee_pos_b, ee_quat_b, jacobian, joint_pos) The computed joint position targets can then be applied on the robot, as done in the previous tutorials. .. literalinclude:: ../../../../source/standalone/tutorials/05_controllers/run_diff_ik.py :language: python :start-at: # apply actions :end-at: scene.write_data_to_sim() The Code Execution ~~~~~~~~~~~~~~~~~~ Now that we have gone through the code, let's run the script and see the result: .. code-block:: bash ./orbit.sh -p source/standalone/tutorials/05_controllers/run_diff_ik.py --robot franka_panda --num_envs 128 The script will start a simulation with 128 robots. The robots will be controlled using the IK controller. The current and desired end-effector poses should be displayed using frame markers. When the robot reaches the desired pose, the command should cycle through to the next pose specified in the script. To stop the simulation, you can either close the window, or press the ``STOP`` button in the UI, or press ``Ctrl+C`` in the terminal.
7,092
reStructuredText
44.76129
114
0.758319
NVIDIA-Omniverse/orbit/docs/source/tutorials/05_controllers/index.rst
Using Motion Generators ======================= While the robots in the simulation environment can be controlled at the joint-level, the following tutorials show you how to use motion generators to control the robots at the task-level. .. toctree:: :maxdepth: 1 :titlesonly: run_diff_ik
302
reStructuredText
24.249998
98
0.695364
NVIDIA-Omniverse/orbit/docs/source/tutorials/04_sensors/add_sensors_on_robot.rst
.. _tutorial-add-sensors-on-robot: Adding sensors on a robot ========================= .. currentmodule:: omni.isaac.orbit While the asset classes allow us to create and simulate the physical embodiment of the robot, sensors help in obtaining information about the environment. They typically update at a lower frequency than the simulation and are useful for obtaining different proprioceptive and exteroceptive information. For example, a camera sensor can be used to obtain the visual information of the environment, and a contact sensor can be used to obtain the contact information of the robot with the environment. In this tutorial, we will see how to add different sensors to a robot. We will use the ANYmal-C robot for this tutorial. The ANYmal-C robot is a quadrupedal robot with 12 degrees of freedom. It has 4 legs, each with 3 degrees of freedom. The robot has the following sensors: - A camera sensor on the head of the robot which provides RGB-D images - A height scanner sensor that provides terrain height information - Contact sensors on the feet of the robot that provide contact information We continue this tutorial from the previous tutorial on :ref:`tutorial-interactive-scene`, where we learned about the :class:`scene.InteractiveScene` class. The Code ~~~~~~~~ The tutorial corresponds to the ``add_sensors_on_robot.py`` script in the ``orbit/source/standalone/tutorials/04_sensors`` directory. .. dropdown:: Code for add_sensors_on_robot.py :icon: code .. literalinclude:: ../../../../source/standalone/tutorials/04_sensors/add_sensors_on_robot.py :language: python :emphasize-lines: 74-97, 145-155, 169-170 :linenos: The Code Explained ~~~~~~~~~~~~~~~~~~ Similar to the previous tutorials, where we added assets to the scene, the sensors are also added to the scene using the scene configuration. All sensors inherit from the :class:`sensors.SensorBase` class and are configured through their respective config classes. Each sensor instance can define its own update period, which is the frequency at which the sensor is updated. The update period is specified in seconds through the :attr:`sensors.SensorBaseCfg.update_period` attribute. Depending on the specified path and the sensor type, the sensors are attached to the prims in the scene. They may have an associated prim that is created in the scene or they may be attached to an existing prim. For instance, the camera sensor has a corresponding prim that is created in the scene, whereas for the contact sensor, the activating the contact reporting is a property on a rigid body prim. In the following, we introduce the different sensors we use in this tutorial and how they are configured. For more description about them, please check the :mod:`sensors` module. Camera sensor ------------- A camera is defined using the :class:`sensors.CameraCfg`. It is based on the USD Camera sensor and the different data types are captured using Omniverse Replicator API. Since it has a corresponding prim in the scene, the prims are created in the scene at the specified prim path. The configuration of the camera sensor includes the following parameters: * :attr:`~sensors.CameraCfg.spawn`: The type of USD camera to create. This can be either :class:`~sim.spawners.sensors.PinholeCameraCfg` or :class:`~sim.spawners.sensors.FisheyeCameraCfg`. * :attr:`~sensors.CameraCfg.offset`: The offset of the camera sensor from the parent prim. * :attr:`~sensors.CameraCfg.data_types`: The data types to capture. This can be ``rgb``, ``distance_to_image_plane``, ``normals``, or other types supported by the USD Camera sensor. To attach an RGB-D camera sensor to the head of the robot, we specify an offset relative to the base frame of the robot. The offset is specified as a translation and rotation relative to the base frame, and the :attr:`~sensors.CameraCfg.OffsetCfg.convention` in which the offset is specified. In the following, we show the configuration of the camera sensor used in this tutorial. We set the update period to 0.1s, which means that the camera sensor is updated at 10Hz. The prim path expression is set to ``{ENV_REGEX_NS}/Robot/base/front_cam`` where the ``{ENV_REGEX_NS}`` is the environment namespace, ``"Robot"`` is the name of the robot, ``"base"`` is the name of the prim to which the camera is attached, and ``"front_cam"`` is the name of the prim associated with the camera sensor. .. literalinclude:: ../../../../source/standalone/tutorials/04_sensors/add_sensors_on_robot.py :language: python :start-at: camera = CameraCfg( :end-before: height_scanner = RayCasterCfg( Height scanner -------------- The height-scanner is implemented as a virtual sensor using the NVIDIA Warp ray-casting kernels. Through the :class:`sensors.RayCasterCfg`, we can specify the pattern of rays to cast and the meshes against which to cast the rays. Since they are virtual sensors, there is no corresponding prim created in the scene for them. Instead they are attached to a prim in the scene, which is used to specify the location of the sensor. For this tutorial, the ray-cast based height scanner is attached to the base frame of the robot. The pattern of rays is specified using the :attr:`~sensors.RayCasterCfg.pattern` attribute. For a uniform grid pattern, we specify the pattern using :class:`~sensors.patterns.GridPatternCfg`. Since we only care about the height information, we do not need to consider the roll and pitch of the robot. Hence, we set the :attr:`~sensors.RayCasterCfg.attach_yaw_only` to true. For the height-scanner, you can visualize the points where the rays hit the mesh. This is done by setting the :attr:`~sensors.SensorBaseCfg.debug_vis` attribute to true. The entire configuration of the height-scanner is as follows: .. literalinclude:: ../../../../source/standalone/tutorials/04_sensors/add_sensors_on_robot.py :language: python :start-at: height_scanner = RayCasterCfg( :end-before: contact_forces = ContactSensorCfg( Contact sensor -------------- Contact sensors wrap around the PhysX contact reporting API to obtain the contact information of the robot with the environment. Since it relies of PhysX, the contact sensor expects the contact reporting API to be enabled on the rigid bodies of the robot. This can be done by setting the :attr:`~sim.spawners.RigidObjectSpawnerCfg.activate_contact_sensors` to true in the asset configuration. Through the :class:`sensors.ContactSensorCfg`, it is possible to specify the prims for which we want to obtain the contact information. Additional flags can be set to obtain more information about the contact, such as the contact air time, contact forces between filtered prims, etc. In this tutorial, we attach the contact sensor to the feet of the robot. The feet of the robot are named ``"LF_FOOT"``, ``"RF_FOOT"``, ``"LH_FOOT"``, and ``"RF_FOOT"``. We pass a Regex expression ``".*_FOOT"`` to simplify the prim path specification. This Regex expression matches all prims that end with ``"_FOOT"``. We set the update period to 0 to update the sensor at the same frequency as the simulation. Additionally, for contact sensors, we can specify the history length of the contact information to store. For this tutorial, we set the history length to 6, which means that the contact information for the last 6 simulation steps is stored. The entire configuration of the contact sensor is as follows: .. literalinclude:: ../../../../source/standalone/tutorials/04_sensors/add_sensors_on_robot.py :language: python :start-at: contact_forces = ContactSensorCfg( :lines: 1-3 Running the simulation loop --------------------------- Similar to when using assets, the buffers and physics handles for the sensors are initialized only when the simulation is played, i.e., it is important to call ``sim.reset()`` after creating the scene. .. literalinclude:: ../../../../source/standalone/tutorials/04_sensors/add_sensors_on_robot.py :language: python :start-at: # Play the simulator :end-at: sim.reset() Besides that, the simulation loop is similar to the previous tutorials. The sensors are updated as part of the scene update and they internally handle the updating of their buffers based on their update periods. The data from the sensors can be accessed through their ``data`` attribute. As an example, we show how to access the data for the different sensors created in this tutorial: .. literalinclude:: ../../../../source/standalone/tutorials/04_sensors/add_sensors_on_robot.py :language: python :start-at: # print information from the sensors :end-at: print("Received max contact force of: ", torch.max(scene["contact_forces"].data.net_forces_w).item()) The Code Execution ~~~~~~~~~~~~~~~~~~ Now that we have gone through the code, let's run the script and see the result: .. code-block:: bash ./orbit.sh -p source/standalone/tutorials/04_sensors/add_sensors_on_robot.py --num_envs 2 This command should open a stage with a ground plane, lights, and two quadrupedal robots. Around the robots, you should see red spheres that indicate the points where the rays hit the mesh. Additionally, you can switch the viewport to the camera view to see the RGB image captured by the camera sensor. Please check `here <https://youtu.be/htPbcKkNMPs?feature=shared>`_ for more information on how to switch the viewport to the camera view. To stop the simulation, you can either close the window, or press ``Ctrl+C`` in the terminal. While in this tutorial, we went over creating and using different sensors, there are many more sensors available in the :mod:`sensors` module. We include minimal examples of using these sensors in the ``source/standalone/tutorials/04_sensors`` directory. For completeness, these scripts can be run using the following commands: .. code-block:: bash # Frame Transformer ./orbit.sh -p source/standalone/tutorials/04_sensors/run_frame_transformer.py # Ray Caster ./orbit.sh -p source/standalone/tutorials/04_sensors/run_ray_caster.py # Ray Caster Camera ./orbit.sh -p source/standalone/tutorials/04_sensors/run_ray_caster_camera.py # USD Camera ./orbit.sh -p source/standalone/tutorials/04_sensors/run_usd_camera.py
10,241
reStructuredText
48.960975
113
0.759301
NVIDIA-Omniverse/orbit/docs/source/tutorials/04_sensors/index.rst
Integrating Sensors =================== The following tutorial shows you how to integrate sensors into the simulation environment. The tutorials introduce the :class:`~omni.isaac.orbit.sensors.SensorBase` class and its derivatives such as :class:`~omni.isaac.orbit.sensors.Camera` and :class:`~omni.isaac.orbit.sensors.RayCaster`. .. toctree:: :maxdepth: 1 :titlesonly: add_sensors_on_robot
406
reStructuredText
30.30769
99
0.729064
NVIDIA-Omniverse/orbit/docs/source/tutorials/00_sim/spawn_prims.rst
.. _tutorial-spawn-prims: Spawning prims into the scene ============================= .. currentmodule:: omni.isaac.orbit This tutorial explores how to spawn various objects (or prims) into the scene in Orbit from Python. It builds upon the previous tutorial on running the simulator from a standalone script and demonstrates how to spawn a ground plane, lights, primitive shapes, and meshes from USD files. The Code ~~~~~~~~ The tutorial corresponds to the ``spawn_prims.py`` script in the ``orbit/source/standalone/tutorials/00_sim`` directory. Let's take a look at the Python script: .. dropdown:: Code for spawn_prims.py :icon: code .. literalinclude:: ../../../../source/standalone/tutorials/00_sim/spawn_prims.py :language: python :emphasize-lines: 40-79, 91-92 :linenos: The Code Explained ~~~~~~~~~~~~~~~~~~ Scene designing in Omniverse is built around a software system and file format called USD (Universal Scene Description). It allows describing 3D scenes in a hierarchical manner, similar to a file system. Since USD is a comprehensive framework, we recommend reading the `USD documentation`_ to learn more about it. For completeness, we introduce the must know concepts of USD in this tutorial. * **Primitives (Prims)**: These are the basic building blocks of a USD scene. They can be thought of as nodes in a scene graph. Each node can be a mesh, a light, a camera, or a transform. It can also be a group of other prims under it. * **Attributes**: These are the properties of a prim. They can be thought of as key-value pairs. For example, a prim can have an attribute called ``color`` with a value of ``red``. * **Relationships**: These are the connections between prims. They can be thought of as pointers to other prims. For example, a mesh prim can have a relationship to a material prim for shading. A collection of these prims, with their attributes and relationships, is called a **USD stage**. It can be thought of as a container for all prims in a scene. When we say we are designing a scene, we are actually designing a USD stage. While working with direct USD APIs provides a lot of flexibility, it can be cumbersome to learn and use. To make it easier to design scenes, Orbit builds on top of the USD APIs to provide a configuration-drive interface to spawn prims into a scene. These are included in the :mod:`sim.spawners` module. When spawning prims into the scene, each prim requires a configuration class instance that defines the prim's attributes and relationships (through material and shading information). The configuration class is then passed to its respective function where the prim name and transformation are specified. The function then spawns the prim into the scene. At a high-level, this is how it works: .. code-block:: python # Create a configuration class instance cfg = MyPrimCfg() prim_path = "/path/to/prim" # Spawn the prim into the scene using the corresponding spawner function spawn_my_prim(prim_path, cfg, translation=[0, 0, 0], orientation=[1, 0, 0, 0], scale=[1, 1, 1]) # OR # Use the spawner function directly from the configuration class cfg.func(prim_path, cfg, translation=[0, 0, 0], orientation=[1, 0, 0, 0], scale=[1, 1, 1]) In this tutorial, we demonstrate the spawning of various different prims into the scene. For more information on the available spawners, please refer to the :mod:`sim.spawners` module in Orbit. .. attention:: All the scene designing must happen before the simulation starts. Once the simulation starts, we recommend keeping the scene frozen and only altering the properties of the prim. This is particularly important for GPU simulation as adding new prims during simulation may alter the physics simulation buffers on GPU and lead to unexpected behaviors. Spawning a ground plane ----------------------- The :class:`~sim.spawners.from_files.GroundPlaneCfg` configures a grid-like ground plane with modifiable properties such as its appearance and size. .. literalinclude:: ../../../../source/standalone/tutorials/00_sim/spawn_prims.py :language: python :start-at: # Ground-plane :end-at: cfg_ground.func("/World/defaultGroundPlane", cfg_ground) Spawning lights --------------- It is possible to spawn `different light prims`_ into the stage. These include distant lights, sphere lights, disk lights, and cylinder lights. In this tutorial, we spawn a distant light which is a light that is infinitely far away from the scene and shines in a single direction. .. literalinclude:: ../../../../source/standalone/tutorials/00_sim/spawn_prims.py :language: python :start-at: # spawn distant light :end-at: cfg_light_distant.func("/World/lightDistant", cfg_light_distant, translation=(1, 0, 10)) Spawning primitive shapes ------------------------- Before spawning primitive shapes, we introduce the concept of a transform prim or Xform. A transform prim is a prim that contains only transformation properties. It is used to group other prims under it and to transform them as a group. Here we make an Xform prim to group all the primitive shapes under it. .. literalinclude:: ../../../../source/standalone/tutorials/00_sim/spawn_prims.py :language: python :start-at: # create a new xform prim for all objects to be spawned under :end-at: prim_utils.create_prim("/World/Objects", "Xform") Next, we spawn a cone using the :class:`~sim.spawners.shapes.ConeCfg` class. It is possible to specify the radius, height, physics properties, and material properties of the cone. By default, the physics and material properties are disabled. The first two cones we spawn ``Cone1`` and ``Cone2`` are visual elements and do not have physics enabled. .. literalinclude:: ../../../../source/standalone/tutorials/00_sim/spawn_prims.py :language: python :start-at: # spawn a red cone :end-at: cfg_cone.func("/World/Objects/Cone2", cfg_cone, translation=(-1.0, -1.0, 1.0)) For the third cone ``ConeRigid``, we add rigid body physics to it by setting the attributes for that in the configuration class. Through these attributes, we can specify the mass, friction, and restitution of the cone. If unspecified, they default to the default values set by USD Physics. .. literalinclude:: ../../../../source/standalone/tutorials/00_sim/spawn_prims.py :language: python :start-at: # spawn a green cone with colliders and rigid body :end-before: # spawn a usd file of a table into the scene Spawning from another file -------------------------- Lastly, it is possible to spawn prims from other file formats such as other USD, URDF, or OBJ files. In this tutorial, we spawn a USD file of a table into the scene. The table is a mesh prim and has a material prim associated with it. All of this information is stored in its USD file. .. literalinclude:: ../../../../source/standalone/tutorials/00_sim/spawn_prims.py :language: python :start-at: # spawn a usd file of a table into the scene :end-at: cfg.func("/World/Objects/Table", cfg, translation=(0.0, 0.0, 1.05)) The table above is added as a reference to the scene. In layman terms, this means that the table is not actually added to the scene, but a ``pointer`` to the table asset is added. This allows us to modify the table asset and have the changes reflected in the scene in a non-destructive manner. For example, we can change the material of the table without actually modifying the underlying file for the table asset directly. Only the changes are stored in the USD stage. Executing the Script ~~~~~~~~~~~~~~~~~~~~ Similar to the tutorial before, to run the script, execute the following command: .. code-block:: bash ./orbit.sh -p source/standalone/tutorials/00_sim/spawn_prims.py Once the simulation starts, you should see a window with a ground plane, a light, some cones, and a table. The green cone, which has rigid body physics enabled, should fall and collide with the table and the ground plane. The other cones are visual elements and should not move. To stop the simulation, you can close the window, or press ``Ctrl+C`` in the terminal. This tutorial provided a foundation for spawning various prims into the scene in Orbit. Although simple, it demonstrates the basic concepts of scene designing in Orbit and how to use the spawners. In the coming tutorials, we will now look at how to interact with the scene and the simulation. .. _`USD documentation`: https://graphics.pixar.com/usd/docs/index.html .. _`different light prims`: https://youtu.be/c7qyI8pZvF4?feature=shared
8,582
reStructuredText
46.94972
122
0.738989
NVIDIA-Omniverse/orbit/docs/source/tutorials/00_sim/launch_app.rst
Deep-dive into AppLauncher ========================== .. currentmodule:: omni.isaac.orbit In this tutorial, we will dive into the :class:`app.AppLauncher` class to configure the simulator using CLI arguments and environment variables (envars). Particularly, we will demonstrate how to use :class:`~app.AppLauncher` to enable livestreaming and configure the :class:`omni.isaac.kit.SimulationApp` instance it wraps, while also allowing user-provided options. The :class:`~app.AppLauncher` is a wrapper for :class:`~omni.isaac.kit.SimulationApp` to simplify its configuration. The :class:`~omni.isaac.kit.SimulationApp` has many extensions that must be loaded to enable different capabilities, and some of these extensions are order- and inter-dependent. Additionally, there are startup options such as ``headless`` which must be set at instantiation time, and which have an implied relationship with some extensions, e.g. the livestreaming extensions. The :class:`~app.AppLauncher` presents an interface that can handle these extensions and startup options in a portable manner across a variety of use cases. To achieve this, we offer CLI and envar flags which can be merged with user-defined CLI args, while passing forward arguments intended for :class:`~omni.isaac.kit.SimulationApp`. The Code -------- The tutorial corresponds to the ``launch_app.py`` script in the ``orbit/source/standalone/tutorials/00_sim`` directory. .. dropdown:: Code for launch_app.py :icon: code .. literalinclude:: ../../../../source/standalone/tutorials/00_sim/launch_app.py :language: python :emphasize-lines: 18-40 :linenos: The Code Explained ------------------ Adding arguments to the argparser ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ :class:`~app.AppLauncher` is designed to be compatible with custom CLI args that users need for their own scripts, while still providing a portable CLI interface. In this tutorial, a standard :class:`argparse.ArgumentParser` is instantiated and given the script-specific ``--size`` argument, as well as the arguments ``--height`` and ``--width``. The latter are ingested by :class:`~omni.isaac.kit.SimulationApp`. The argument ``--size`` is not used by :class:`~app.AppLauncher`, but will merge seamlessly with the :class:`~app.AppLauncher` interface. In-script arguments can be merged with the :class:`~app.AppLauncher` interface via the :meth:`~app.AppLauncher.add_app_launcher_args` method, which will return a modified :class:`~argparse.ArgumentParser` with the :class:`~app.AppLauncher` arguments appended. This can then be processed into an :class:`argparse.Namespace` using the standard :meth:`argparse.ArgumentParser.parse_args` method and passed directly to :class:`~app.AppLauncher` for instantiation. .. literalinclude:: ../../../../source/standalone/tutorials/00_sim/launch_app.py :language: python :start-at: import argparse :end-at: simulation_app = app_launcher.app The above only illustrates only one of several ways of passing arguments to :class:`~app.AppLauncher`. Please consult its documentation page to see further options. Understanding the output of --help ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ While executing the script, we can pass the ``--help`` argument and see the combined outputs of the custom arguments and those from :class:`~app.AppLauncher`. .. code-block:: console ./orbit.sh -p source/standalone/tutorials/00_sim/launch_app.py --help [INFO] Using python from: /isaac-sim/python.sh [INFO][AppLauncher]: The argument 'width' will be used to configure the SimulationApp. [INFO][AppLauncher]: The argument 'height' will be used to configure the SimulationApp. usage: launch_app.py [-h] [--size SIZE] [--width WIDTH] [--height HEIGHT] [--headless] [--livestream {0,1,2,3}] [--offscreen_render] [--verbose] [--experience EXPERIENCE] Tutorial on running IsaacSim via the AppLauncher. options: -h, --help show this help message and exit --size SIZE Side-length of cuboid --width WIDTH Width of the viewport and generated images. Defaults to 1280 --height HEIGHT Height of the viewport and generated images. Defaults to 720 app_launcher arguments: --headless Force display off at all times. --livestream {0,1,2,3} Force enable livestreaming. Mapping corresponds to that for the "LIVESTREAM" environment variable. --offscreen_render Enable offscreen rendering when running without a GUI. --verbose Enable verbose terminal logging from the SimulationApp. --experience EXPERIENCE The experience file to load when launching the SimulationApp. * If an empty string is provided, the experience file is determined based on the headless flag. * If a relative path is provided, it is resolved relative to the `apps` folder in Isaac Sim and Orbit (in that order). This readout details the ``--size``, ``--height``, and ``--width`` arguments defined in the script directly, as well as the :class:`~app.AppLauncher` arguments. The ``[INFO]`` messages preceding the help output also reads out which of these arguments are going to be interpreted as arguments to the :class:`~omni.isaac.kit.SimulationApp` instance which the :class:`~app.AppLauncher` class wraps. In this case, it is ``--height`` and ``--width``. These are classified as such because they match the name and type of an argument which can be processed by :class:`~omni.isaac.kit.SimulationApp`. Please refer to the `specification`_ for such arguments for more examples. Using environment variables ^^^^^^^^^^^^^^^^^^^^^^^^^^^ As noted in the help message, the :class:`~app.AppLauncher` arguments (``--livestream``, ``--headless``) have corresponding environment variables (envar) as well. These are detailed in :mod:`omni.isaac.orbit.app` documentation. Providing any of these arguments through CLI is equivalent to running the script in a shell environment where the corresponding envar is set. The support for :class:`~app.AppLauncher` envars are simply a convenience to provide session-persistent configurations, and can be set in the user's ``${HOME}/.bashrc`` for persistent settings between sessions. In the case where these arguments are provided from the CLI, they will override their corresponding envar, as we will demonstrate later in this tutorial. These arguments can be used with any script that starts the simulation using :class:`~app.AppLauncher`, with one exception, ``--offscreen_render``. This setting sets the rendering pipeline to use the offscreen renderer. However, this setting is only compatible with the :class:`omni.isaac.orbit.sim.SimulationContext`. It will not work with Isaac Sim's :class:`omni.isaac.core.simulation_context.SimulationContext` class. For more information on this flag, please see the :class:`~app.AppLauncher` API documentation. The Code Execution ------------------ We will now run the example script: .. code-block:: console LIVESTREAM=1 ./orbit.sh -p source/standalone/tutorials/00_sim/launch_app.py --size 0.5 This will spawn a 0.5m\ :sup:`3` volume cuboid in the simulation. No GUI will appear, equivalent to if we had passed the ``--headless`` flag because headlessness is implied by our ``LIVESTREAM`` envar. If a visualization is desired, we could get one via Isaac's `Native Livestreaming`_. Streaming is currently the only supported method of visualization from within the container. The process can be killed by pressing ``Ctrl+C`` in the launching terminal. Now, let's look at how :class:`~app.AppLauncher` handles conflicting commands: .. code-block:: console export LIVESTREAM=0 ./orbit.sh -p source/standalone/tutorials/00_sim/launch_app.py --size 0.5 --livestream 1 This will cause the same behavior as in the previous run, because although we have set ``LIVESTREAM=0`` in our envars, CLI args such as ``--livestream`` take precedence in determining behavior. The process can be killed by pressing ``Ctrl+C`` in the launching terminal. Finally, we will examine passing arguments to :class:`~omni.isaac.kit.SimulationApp` through :class:`~app.AppLauncher`: .. code-block:: console export LIVESTREAM=1 ./orbit.sh -p source/standalone/tutorials/00_sim/launch_app.py --size 0.5 --width 1920 --height 1080 This will cause the same behavior as before, but now the viewport will be rendered at 1920x1080p resolution. This can be useful when we want to gather high-resolution video, or we can specify a lower resolution if we want our simulation to be more performant. The process can be killed by pressing ``Ctrl+C`` in the launching terminal. .. _specification: https://docs.omniverse.nvidia.com/py/isaacsim/source/extensions/omni.isaac.kit/docs/index.html#omni.isaac.kit.SimulationApp.DEFAULT_LAUNCHER_CONFIG .. _Native Livestreaming: https://docs.omniverse.nvidia.com/isaacsim/latest/installation/manual_livestream_clients.html#omniverse-streaming-client
9,065
reStructuredText
50.805714
166
0.734363
NVIDIA-Omniverse/orbit/docs/source/tutorials/00_sim/create_empty.rst
Creating an empty scene ======================= .. currentmodule:: omni.isaac.orbit This tutorial shows how to launch and control Isaac Sim simulator from a standalone Python script. It sets up an empty scene in Orbit and introduces the two main classes used in the framework, :class:`app.AppLauncher` and :class:`sim.SimulationContext`. Please review `Isaac Sim Interface`_ and `Isaac Sim Workflows`_ prior to beginning this tutorial to get an initial understanding of working with the simulator. The Code ~~~~~~~~ The tutorial corresponds to the ``create_empty.py`` script in the ``orbit/source/standalone/tutorials/00_sim`` directory. .. dropdown:: Code for create_empty.py :icon: code .. literalinclude:: ../../../../source/standalone/tutorials/00_sim/create_empty.py :language: python :emphasize-lines: 18-30,34,40-44,46-47,51-54,60-61 :linenos: The Code Explained ~~~~~~~~~~~~~~~~~~ Launching the simulator ----------------------- The first step when working with standalone Python scripts is to launch the simulation application. This is necessary to do at the start since various dependency modules of Isaac Sim are only available after the simulation app is running. This can be done by importing the :class:`app.AppLauncher` class. This utility class wraps around :class:`omni.isaac.kit.SimulationApp` class to launch the simulator. It provides mechanisms to configure the simulator using command-line arguments and environment variables. For this tutorial, we mainly look at adding the command-line options to a user-defined :class:`argparse.ArgumentParser`. This is done by passing the parser instance to the :meth:`app.AppLauncher.add_app_launcher_args` method, which appends different parameters to it. These include launching the app headless, configuring different Livestream options, and enabling off-screen rendering. .. literalinclude:: ../../../../source/standalone/tutorials/00_sim/create_empty.py :language: python :start-at: import argparse :end-at: simulation_app = app_launcher.app Importing python modules ------------------------ Once the simulation app is running, it is possible to import different Python modules from Isaac Sim and other libraries. Here we import the following module: * :mod:`omni.isaac.orbit.sim`: A sub-package in Orbit for all the core simulator-related operations. .. literalinclude:: ../../../../source/standalone/tutorials/00_sim/create_empty.py :language: python :start-at: from omni.isaac.orbit.sim import SimulationCfg, SimulationContext :end-at: from omni.isaac.orbit.sim import SimulationCfg, SimulationContext Configuring the simulation context ---------------------------------- When launching the simulator from a standalone script, the user has complete control over playing, pausing and stepping the simulator. All these operations are handled through the **simulation context**. It takes care of various timeline events and also configures the `physics scene`_ for simulation. In Orbit, the :class:`sim.SimulationContext` class inherits from Isaac Sim's :class:`omni.isaac.core.simulation_context.SimulationContext` to allow configuring the simulation through Python's ``dataclass`` object and handle certain intricacies of the simulation stepping. For this tutorial, we set the physics and rendering time step to 0.01 seconds. This is done by passing these quantities to the :class:`sim.SimulationCfg`, which is then used to create an instance of the simulation context. .. literalinclude:: ../../../../source/standalone/tutorials/00_sim/create_empty.py :language: python :start-at: # Initialize the simulation context :end-at: sim.set_camera_view([2.5, 2.5, 2.5], [0.0, 0.0, 0.0]) Following the creation of the simulation context, we have only configured the physics acting on the simulated scene. This includes the device to use for simulation, the gravity vector, and other advanced solver parameters. There are now two main steps remaining to run the simulation: 1. Designing the simulation scene: Adding sensors, robots and other simulated objects 2. Running the simulation loop: Stepping the simulator, and setting and getting data from the simulator In this tutorial, we look at Step 2 first for an empty scene to focus on the simulation control first. In the following tutorials, we will look into Step 1 and working with simulation handles for interacting with the simulator. Running the simulation ---------------------- The first thing, after setting up the simulation scene, is to call the :meth:`sim.SimulationContext.reset` method. This method plays the timeline and initializes the physics handles in the simulator. It must always be called the first time before stepping the simulator. Otherwise, the simulation handles are not initialized properly. .. note:: :meth:`sim.SimulationContext.reset` is different from :meth:`sim.SimulationContext.play` method as the latter only plays the timeline and does not initializes the physics handles. After playing the simulation timeline, we set up a simple simulation loop where the simulator is stepped repeatedly while the simulation app is running. The method :meth:`sim.SimulationContext.step` takes in as argument :attr:`render`, which dictates whether the step includes updating the rendering-related events or not. By default, this flag is set to True. .. literalinclude:: ../../../../source/standalone/tutorials/00_sim/create_empty.py :language: python :start-at: # Play the simulator :end-at: sim.step() Exiting the simulation ---------------------- Lastly, the simulation application is stopped and its window is closed by calling :meth:`omni.isaac.kit.SimulationApp.close` method. .. literalinclude:: ../../../../source/standalone/tutorials/00_sim/create_empty.py :language: python :start-at: # close sim app :end-at: simulation_app.close() The Code Execution ~~~~~~~~~~~~~~~~~~ Now that we have gone through the code, let's run the script and see the result: .. code-block:: bash ./orbit.sh -p source/standalone/tutorials/00_sim/create_empty.py The simulation should be playing, and the stage should be rendering. To stop the simulation, you can either close the window, or press ``Ctrl+C`` in the terminal. Passing ``--help`` to the above script will show the different command-line arguments added earlier by the :class:`app.AppLauncher` class. To run the script headless, you can execute the following: .. code-block:: bash ./orbit.sh -p source/standalone/tutorials/00_sim/create_empty.py --headless Now that we have a basic understanding of how to run a simulation, let's move on to the following tutorial where we will learn how to add assets to the stage. .. _`Isaac Sim Interface`: https://docs.omniverse.nvidia.com/isaacsim/latest/introductory_tutorials/tutorial_intro_interface.html#isaac-sim-app-tutorial-intro-interface .. _`Isaac Sim Workflows`: https://docs.omniverse.nvidia.com/isaacsim/latest/introductory_tutorials/tutorial_intro_workflows.html .. _carb: https://docs.omniverse.nvidia.com/kit/docs/carbonite/latest/index.html .. _`physics scene`: https://docs.omniverse.nvidia.com/prod_extensions/prod_extensions/ext_physics.html#physics-scene
7,227
reStructuredText
43.07317
168
0.754117
NVIDIA-Omniverse/orbit/docs/source/tutorials/00_sim/index.rst
Setting up a Simple Simulation ============================== These tutorials show you how to launch the simulation with different settings and spawn objects in the simulated scene. They cover the following APIs: :class:`~omni.isaac.orbit.app.AppLauncher`, :class:`~omni.isaac.orbit.sim.SimulationContext`, and :class:`~omni.isaac.orbit.sim.spawners`. .. toctree:: :maxdepth: 1 :titlesonly: create_empty spawn_prims launch_app
450
reStructuredText
29.066665
102
0.693333
NVIDIA-Omniverse/orbit/docs/source/setup/faq.rst
Frequently Asked Questions ========================== Where does Orbit fit in the Isaac ecosystem? -------------------------------------------- Over the years, NVIDIA has developed a number of tools for robotics and AI. These tools leverage the power of GPUs to accelerate the simulation both in terms of speed and realism. They show great promise in the field of simulation technology and are being used by many researchers and companies worldwide. `Isaac Gym`_ :cite:`makoviychuk2021isaac` provides a high performance GPU-based physics simulation for robot learning. It is built on top of `PhysX`_ which supports GPU-accelerated simulation of rigid bodies and a Python API to directly access physics simulation data. Through an end-to-end GPU pipeline, it is possible to achieve high frame rates compared to CPU-based physics engines. The tool has been used successfully in a number of research projects, including legged locomotion :cite:`rudin2022learning` :cite:`rudin2022advanced`, in-hand manipulation :cite:`handa2022dextreme` :cite:`allshire2022transferring`, and industrial assembly :cite:`narang2022factory`. Despite the success of Isaac Gym, it is not designed to be a general purpose simulator for robotics. For example, it does not include interaction between deformable and rigid objects, high-fidelity rendering, and support for ROS. The tool has been primarily designed as a preview release to showcase the capabilities of the underlying physics engine. With the release of `Isaac Sim`_, NVIDIA is building a general purpose simulator for robotics and has integrated the functionalities of Isaac Gym into Isaac Sim. `Isaac Sim`_ is a robot simulation toolkit built on top of Omniverse, which is a general purpose platform that aims to unite complex 3D workflows. Isaac Sim leverages the latest advances in graphics and physics simulation to provide a high-fidelity simulation environment for robotics. It supports ROS/ROS2, various sensor simulation, tools for domain randomization and synthetic data creation. Overall, it is a powerful tool for roboticists and is a huge step forward in the field of robotics simulation. With the release of above two tools, NVIDIA also released an open-sourced set of environments called `IsaacGymEnvs`_ and `OmniIsaacGymEnvs`_, that have been built on top of Isaac Gym and Isaac Sim respectively. These environments have been designed to display the capabilities of the underlying simulators and provide a starting point to understand what is possible with the simulators for robot learning. These environments can be used for benchmarking but are not designed for developing and testing custom environments and algorithms. This is where Orbit comes in. Orbit :cite:`mittal2023orbit` is built on top of Isaac Sim to provide a unified and flexible framework for robot learning that exploits latest simulation technologies. It is designed to be modular and extensible, and aims to simplify common workflows in robotics research (such as RL, learning from demonstrations, and motion planning). While it includes some pre-built environments, sensors, and tasks, its main goal is to provide an open-sourced, unified, and easy-to-use interface for developing and testing custom environments and robot learning algorithms. It not only inherits the capabilities of Isaac Sim, but also adds a number of new features that pertain to robot learning research. For example, including actuator dynamics in the simulation, procedural terrain generation, and support to collect data from human demonstrations. Where does the name come from? ------------------------------ "Orbit" suggests a sense of movement circling around a central point. For us, this symbolizes bringing together the different components and paradigms centered around robot learning, and making a unified ecosystem for it. The name further connotes modularity and flexibility. Similar to planets in a solar system at different speeds and positions, the framework is designed to not be rigid or inflexible. Rather, it aims to provide the users the ability to adjust and move around the different components to suit their needs. Finally, the name "orbit" also suggests a sense of exploration and discovery. We hope that the framework will provide a platform for researchers to explore and discover new ideas and paradigms in robot learning. Why should I use Orbit? ----------------------- Since Isaac Sim remains closed-sourced, it is difficult for users to contribute to the simulator and build a common framework for research. On its current path, we see the community using the simulator will simply develop their own frameworks that will result in scattered efforts with a lot of duplication of work. This has happened in the past with other simulators, and we believe that it is not the best way to move forward as a community. Orbit provides an open-sourced platform for the community to drive progress with consolidated efforts toward designing benchmarks and robot learning systems as a joint initiative. This allows us to reuse existing components and algorithms, and to build on top of each other's work. Doing so not only saves time and effort, but also allows us to focus on the more important aspects of research. Our hope with Orbit is that it becomes the de-facto platform for robot learning research and an environment *zoo* that leverages Isaac Sim. As the framework matures, we foresee it benefitting hugely from the latest simulation developments (as part of internal developments at NVIDIA and collaborating partners) and research in robotics. We are already working with labs in universities and research institutions to integrate their work into Orbit and hope that others in the community will join us too in this effort. If you are interested in contributing to Orbit, please reach out to us at `email <mailto:[email protected]>`_. .. _PhysX: https://developer.nvidia.com/physx-sdk .. _Isaac Sim: https://developer.nvidia.com/isaac-sim .. _Isaac Gym: https://developer.nvidia.com/isaac-gym .. _IsaacGymEnvs: https://github.com/NVIDIA-Omniverse/IsaacGymEnvs .. _OmniIsaacGymEnvs: https://github.com/NVIDIA-Omniverse/OmniIsaacGymEnvs
6,180
reStructuredText
64.755318
112
0.795793
NVIDIA-Omniverse/orbit/docs/source/setup/installation.rst
Installation Guide =================== .. image:: https://img.shields.io/badge/IsaacSim-2023.1.1-silver.svg :target: https://developer.nvidia.com/isaac-sim :alt: IsaacSim 2023.1.1 .. image:: https://img.shields.io/badge/python-3.10-blue.svg :target: https://www.python.org/downloads/release/python-31013/ :alt: Python 3.10 .. image:: https://img.shields.io/badge/platform-linux--64-orange.svg :target: https://releases.ubuntu.com/20.04/ :alt: Ubuntu 20.04 Installing Isaac Sim -------------------- .. caution:: We have dropped support for Isaac Sim versions 2022.2 and below. We recommend using the latest Isaac Sim 2023.1 releases (``2023.1.0-hotfix.1`` or ``2023.1.1``). For more information, please refer to the `Isaac Sim release notes <https://docs.omniverse.nvidia.com/isaacsim/latest/release_notes.html>`__. Downloading pre-built binaries ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Please follow the Isaac Sim `documentation <https://docs.omniverse.nvidia.com/isaacsim/latest/installation/install_workstation.html>`__ to install the latest Isaac Sim release. To check the minimum system requirements,refer to the documentation `here <https://docs.omniverse.nvidia.com/isaacsim/latest/installation/requirements.html>`__. .. note:: We have tested Orbit with Isaac Sim 2023.1.0-hotfix.1 release on Ubuntu 20.04LTS with NVIDIA driver 525.147. Configuring the environment variables ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Isaac Sim is shipped with its own Python interpreter which bundles in the extensions released with it. To simplify the setup, we recommend using the same Python interpreter. Alternately, it is possible to setup a virtual environment following the instructions `here <https://docs.omniverse.nvidia.com/app_isaacsim/app_isaacsim/install_python.html>`__. Please locate the `Python executable in Isaac Sim <https://docs.omniverse.nvidia.com/isaacsim/latest/manual_standalone_python.html#isaac-sim-python-environment>`__ by navigating to Isaac Sim root folder. In the remaining of the documentation, we will refer to its path as ``ISAACSIM_PYTHON_EXE``. .. note:: On Linux systems, by default, this should be the executable ``python.sh`` in the directory ``${HOME}/.local/share/ov/pkg/isaac_sim-*``, with ``*`` corresponding to the Isaac Sim version. To avoid the overhead of finding and locating the Isaac Sim installation directory every time, we recommend exporting the following environment variables to your terminal for the remaining of the installation instructions: .. code:: bash # Isaac Sim root directory export ISAACSIM_PATH="${HOME}/.local/share/ov/pkg/isaac_sim-2023.1.0-hotfix.1" # Isaac Sim python executable export ISAACSIM_PYTHON_EXE="${ISAACSIM_PATH}/python.sh" For more information on common paths, please check the Isaac Sim `documentation <https://docs.omniverse.nvidia.com/isaacsim/latest/installation/install_faq.html#common-path-locations>`__. Running the simulator ~~~~~~~~~~~~~~~~~~~~~ Once Isaac Sim is installed successfully, make sure that the simulator runs on your system. For this, we encourage the user to try some of the introductory tutorials on their `website <https://docs.omniverse.nvidia.com/isaacsim/latest/introductory_tutorials/index.html>`__. For completeness, we specify the commands here to check that everything is configured correctly. On a new terminal (**Ctrl+Alt+T**), run the following: - Check that the simulator runs as expected: .. code:: bash # note: you can pass the argument "--help" to see all arguments possible. ${ISAACSIM_PATH}/isaac-sim.sh - Check that the simulator runs from a standalone python script: .. code:: bash # checks that python path is set correctly ${ISAACSIM_PYTHON_EXE} -c "print('Isaac Sim configuration is now complete.')" # checks that Isaac Sim can be launched from python ${ISAACSIM_PYTHON_EXE} ${ISAACSIM_PATH}/standalone_examples/api/omni.isaac.core/add_cubes.py .. attention:: If you have been using a previous version of Isaac Sim, you need to run the following command for the *first* time after installation to remove all the old user data and cached variables: .. code:: bash ${ISAACSIM_PATH}/isaac-sim.sh --reset-user If the simulator does not run or crashes while following the above instructions, it means that something is incorrectly configured. To debug and troubleshoot, please check Isaac Sim `documentation <https://docs.omniverse.nvidia.com/dev-guide/latest/linux-troubleshooting.html>`__ and the `forums <https://docs.omniverse.nvidia.com/isaacsim/latest/isaac_sim_forums.html>`__. Installing Orbit ---------------- Organizing the workspace ~~~~~~~~~~~~~~~~~~~~~~~~ .. note:: We recommend making a `fork <https://github.com/NVIDIA-Omniverse/Orbit/fork>`_ of the ``orbit`` repository to contribute to the project. This is not mandatory to use the framework. If you make a fork, please replace ``NVIDIA-Omniverse`` with your username in the following instructions. If you are not familiar with git, we recommend following the `git tutorial <https://git-scm.com/book/en/v2/Getting-Started-Git-Basics>`__. - Clone the ``orbit`` repository into your workspace: .. code:: bash # Option 1: With SSH git clone [email protected]:NVIDIA-Omniverse/orbit.git # Option 2: With HTTPS git clone https://github.com/NVIDIA-Omniverse/orbit.git - Set up a symbolic link between the installed Isaac Sim root folder and ``_isaac_sim`` in the ``orbit``` directory. This makes it convenient to index the python modules and look for extensions shipped with Isaac Sim. .. code:: bash # enter the cloned repository cd orbit # create a symbolic link ln -s ${ISAACSIM_PATH} _isaac_sim We provide a helper executable `orbit.sh <https://github.com/NVIDIA-Omniverse/Orbit/blob/main/orbit.sh>`_ that provides utilities to manage extensions: .. code:: text ./orbit.sh --help usage: orbit.sh [-h] [-i] [-e] [-f] [-p] [-s] [-t] [-o] [-v] [-d] [-c] -- Utility to manage Orbit. optional arguments: -h, --help Display the help content. -i, --install Install the extensions inside Orbit. -e, --extra [LIB] Install learning frameworks (rl_games, rsl_rl, sb3) as extra dependencies. Default is 'all'. -f, --format Run pre-commit to format the code and check lints. -p, --python Run the python executable provided by Isaac Sim or virtual environment (if active). -s, --sim Run the simulator executable (isaac-sim.sh) provided by Isaac Sim. -t, --test Run all python unittest tests. -o, --docker Run the docker container helper script (docker/container.sh). -v, --vscode Generate the VSCode settings file from template. -d, --docs Build the documentation from source using sphinx. -c, --conda [NAME] Create the conda environment for Orbit. Default name is 'orbit'. Setting up the environment ~~~~~~~~~~~~~~~~~~~~~~~~~~ The executable ``orbit.sh`` automatically fetches the python bundled with Isaac Sim, using ``./orbit.sh -p`` command (unless inside a virtual environment). This executable behaves like a python executable, and can be used to run any python script or module with the simulator. For more information, please refer to the `documentation <https://docs.omniverse.nvidia.com/isaacsim/latest/manual_standalone_python.html#isaac-sim-python-environment>`__. Although using a virtual environment is optional, we recommend using ``conda``. To install ``conda``, please follow the instructions `here <https://docs.conda.io/projects/conda/en/latest/user-guide/install/index.html>`__. In case you want to use ``conda`` to create a virtual environment, you can use the following command: .. code:: bash # Option 1: Default name for conda environment is 'orbit' ./orbit.sh --conda # or "./orbit.sh -c" # Option 2: Custom name for conda environment ./orbit.sh --conda my_env # or "./orbit.sh -c my_env" If you are using ``conda`` to create a virtual environment, make sure to activate the environment before running any scripts. For example: .. code:: bash conda activate orbit # or "conda activate my_env" Once you are in the virtual environment, you do not need to use ``./orbit.sh -p`` to run python scripts. You can use the default python executable in your environment by running ``python`` or ``python3``. However, for the rest of the documentation, we will assume that you are using ``./orbit.sh -p`` to run python scripts. This command is equivalent to running ``python`` or ``python3`` in your virtual environment. Building extensions ~~~~~~~~~~~~~~~~~~~ To build all the extensions, run the following commands: - Install dependencies using ``apt`` (on Ubuntu): .. code:: bash sudo apt install cmake build-essential - Run the install command that iterates over all the extensions in ``source/extensions`` directory and installs them using pip (with ``--editable`` flag): .. code:: bash ./orbit.sh --install # or "./orbit.sh -i" - For installing all other dependencies (such as learning frameworks), execute: .. code:: bash # Option 1: Install all dependencies ./orbit.sh --extra # or "./orbit.sh -e" # Option 2: Install only a subset of dependencies # note: valid options are 'rl_games', 'rsl_rl', 'sb3', 'robomimic', 'all' ./orbit.sh --extra rsl_rl # or "./orbit.sh -e rsl_r" Verifying the installation ~~~~~~~~~~~~~~~~~~~~~~~~~~ To verify that the installation was successful, run the following command from the top of the repository: .. code:: bash # Option 1: Using the orbit.sh executable # note: this works for both the bundled python and the virtual environment ./orbit.sh -p source/standalone/tutorials/00_sim/create_empty.py # Option 2: Using python in your virtual environment python source/standalone/tutorials/00_sim/create_empty.py The above command should launch the simulator and display a window with a black ground plane. You can exit the script by pressing ``Ctrl+C`` on your terminal or by pressing the ``STOP`` button on the simulator window. If you see this, then the installation was successful! |:tada:|
10,318
reStructuredText
38.087121
130
0.705951
NVIDIA-Omniverse/orbit/docs/source/setup/sample.rst
Running Existing Scripts ======================== Showroom -------- The main core interface extension in Orbit ``omni.isaac.orbit`` provides the main modules for actuators, objects, robots and sensors. We provide a list of demo scripts and tutorials. These showcase how to use the provided interfaces within a code in a minimal way. A few quick showroom scripts to run and checkout: - Spawn different quadrupeds and make robots stand using position commands: .. code:: bash ./orbit.sh -p source/standalone/demos/quadrupeds.py - Spawn different arms and apply random joint position commands: .. code:: bash ./orbit.sh -p source/standalone/demos/arms.py - Spawn different hands and command them to open and close: .. code:: bash ./orbit.sh -p source/standalone/demos/hands.py - Spawn procedurally generated terrains with different configurations: .. code:: bash ./orbit.sh -p source/standalone/demos/procedural_terrain.py - Spawn multiple markers that are useful for visualizations: .. code:: bash ./orbit.sh -p source/standalone/demos/markers.py Workflows --------- With Orbit, we also provide a suite of benchmark environments included in the ``omni.isaac.orbit_tasks`` extension. We use the OpenAI Gym registry to register these environments. For each environment, we provide a default configuration file that defines the scene, observations, rewards and action spaces. The list of environments available registered with OpenAI Gym can be found by running: .. code:: bash ./orbit.sh -p source/standalone/environments/list_envs.py Basic agents ~~~~~~~~~~~~ These include basic agents that output zero or random agents. They are useful to ensure that the environments are configured correctly. - Zero-action agent on the Cart-pole example .. code:: bash ./orbit.sh -p source/standalone/environments/zero_agent.py --task Isaac-Cartpole-v0 --num_envs 32 - Random-action agent on the Cart-pole example: .. code:: bash ./orbit.sh -p source/standalone/environments/random_agent.py --task Isaac-Cartpole-v0 --num_envs 32 State machine ~~~~~~~~~~~~~ We include examples on hand-crafted state machines for the environments. These help in understanding the environment and how to use the provided interfaces. The state machines are written in `warp <https://github.com/NVIDIA/warp>`__ which allows efficient execution for large number of environments using CUDA kernels. .. code:: bash ./orbit.sh -p source/standalone/environments/state_machine/lift_cube_sm.py --num_envs 32 Teleoperation ~~~~~~~~~~~~~ We provide interfaces for providing commands in SE(2) and SE(3) space for robot control. In case of SE(2) teleoperation, the returned command is the linear x-y velocity and yaw rate, while in SE(3), the returned command is a 6-D vector representing the change in pose. To play inverse kinematics (IK) control with a keyboard device: .. code:: bash ./orbit.sh -p source/standalone/environments/teleoperation/teleop_se3_agent.py --task Isaac-Lift-Cube-Franka-IK-Rel-v0 --num_envs 1 --device keyboard The script prints the teleoperation events configured. For keyboard, these are as follows: .. code:: text Keyboard Controller for SE(3): Se3Keyboard Reset all commands: L Toggle gripper (open/close): K Move arm along x-axis: W/S Move arm along y-axis: A/D Move arm along z-axis: Q/E Rotate arm along x-axis: Z/X Rotate arm along y-axis: T/G Rotate arm along z-axis: C/V Imitation Learning ~~~~~~~~~~~~~~~~~~ Using the teleoperation devices, it is also possible to collect data for learning from demonstrations (LfD). For this, we support the learning framework `Robomimic <https://robomimic.github.io/>`__ and allow saving data in `HDF5 <https://robomimic.github.io/docs/tutorials/dataset_contents.html#viewing-hdf5-dataset-structure>`__ format. 1. Collect demonstrations with teleoperation for the environment ``Isaac-Lift-Cube-Franka-IK-Rel-v0``: .. code:: bash # step a: collect data with keyboard ./orbit.sh -p source/standalone/workflows/robomimic/collect_demonstrations.py --task Isaac-Lift-Cube-Franka-IK-Rel-v0 --num_envs 1 --num_demos 10 --device keyboard # step b: inspect the collected dataset ./orbit.sh -p source/standalone/workflows/robomimic/tools/inspect_demonstrations.py logs/robomimic/Isaac-Lift-Cube-Franka-IK-Rel-v0/hdf_dataset.hdf5 2. Split the dataset into train and validation set: .. code:: bash # install python module (for robomimic) ./orbit.sh -e robomimic # split data ./orbit.sh -p source/standalone//workflows/robomimic/tools/split_train_val.py logs/robomimic/Isaac-Lift-Cube-Franka-IK-Rel-v0/hdf_dataset.hdf5 --ratio 0.2 3. Train a BC agent for ``Isaac-Lift-Cube-Franka-IK-Rel-v0`` with `Robomimic <https://robomimic.github.io/>`__: .. code:: bash ./orbit.sh -p source/standalone/workflows/robomimic/train.py --task Isaac-Lift-Cube-Franka-IK-Rel-v0 --algo bc --dataset logs/robomimic/Isaac-Lift-Cube-Franka-IK-Rel-v0/hdf_dataset.hdf5 4. Play the learned model to visualize results: .. code:: bash ./orbit.sh -p source/standalone//workflows/robomimic/play.py --task Isaac-Lift-Cube-Franka-IK-Rel-v0 --checkpoint /PATH/TO/model.pth Reinforcement Learning ~~~~~~~~~~~~~~~~~~~~~~ We provide wrappers to different reinforcement libraries. These wrappers convert the data from the environments into the respective libraries function argument and return types. - Training an agent with `Stable-Baselines3 <https://stable-baselines3.readthedocs.io/en/master/index.html>`__ on ``Isaac-Cartpole-v0``: .. code:: bash # install python module (for stable-baselines3) ./orbit.sh -e sb3 # run script for training # note: we enable cpu flag since SB3 doesn't optimize for GPU anyway ./orbit.sh -p source/standalone/workflows/sb3/train.py --task Isaac-Cartpole-v0 --headless --cpu # run script for playing with 32 environments ./orbit.sh -p source/standalone/workflows/sb3/play.py --task Isaac-Cartpole-v0 --num_envs 32 --checkpoint /PATH/TO/model.zip - Training an agent with `SKRL <https://skrl.readthedocs.io>`__ on ``Isaac-Reach-Franka-v0``: .. code:: bash # install python module (for skrl) ./orbit.sh -e skrl # run script for training ./orbit.sh -p source/standalone/workflows/skrl/train.py --task Isaac-Reach-Franka-v0 --headless # run script for playing with 32 environments ./orbit.sh -p source/standalone/workflows/skrl/play.py --task Isaac-Reach-Franka-v0 --num_envs 32 --checkpoint /PATH/TO/model.pt - Training an agent with `RL-Games <https://github.com/Denys88/rl_games>`__ on ``Isaac-Ant-v0``: .. code:: bash # install python module (for rl-games) ./orbit.sh -e rl_games # run script for training ./orbit.sh -p source/standalone/workflows/rl_games/train.py --task Isaac-Ant-v0 --headless # run script for playing with 32 environments ./orbit.sh -p source/standalone/workflows/rl_games/play.py --task Isaac-Ant-v0 --num_envs 32 --checkpoint /PATH/TO/model.pth - Training an agent with `RSL-RL <https://github.com/leggedrobotics/rsl_rl>`__ on ``Isaac-Reach-Franka-v0``: .. code:: bash # install python module (for rsl-rl) ./orbit.sh -e rsl_rl # run script for training ./orbit.sh -p source/standalone/workflows/rsl_rl/train.py --task Isaac-Reach-Franka-v0 --headless # run script for playing with 32 environments ./orbit.sh -p source/standalone/workflows/rsl_rl/play.py --task Isaac-Reach-Franka-v0 --num_envs 32 --checkpoint /PATH/TO/model.pth All the scripts above log the training progress to `Tensorboard`_ in the ``logs`` directory in the root of the repository. The logs directory follows the pattern ``logs/<library>/<task>/<date-time>``, where ``<library>`` is the name of the learning framework, ``<task>`` is the task name, and ``<date-time>`` is the timestamp at which the training script was executed. To view the logs, run: .. code:: bash # execute from the root directory of the repository ./orbit.sh -p -m tensorboard.main --logdir=logs .. _Tensorboard: https://www.tensorflow.org/tensorboard
8,309
reStructuredText
34.974026
191
0.711638
NVIDIA-Omniverse/orbit/docs/source/setup/developer.rst
Developer's Guide ================= For development, we suggest using `Microsoft Visual Studio Code (VSCode) <https://code.visualstudio.com/>`__. This is also suggested by NVIDIA Omniverse and there exists tutorials on how to `debug Omniverse extensions <https://www.youtube.com/watch?v=Vr1bLtF1f4U&ab_channel=NVIDIAOmniverse>`__ using VSCode. Setting up Visual Studio Code ----------------------------- The ``orbit`` repository includes the VSCode settings to easily allow setting up your development environment. These are included in the ``.vscode`` directory and include the following files: .. code-block:: bash .vscode ├── tools │   ├── launch.template.json │   ├── settings.template.json │   └── setup_vscode.py ├── extensions.json ├── launch.json # <- this is generated by setup_vscode.py ├── settings.json # <- this is generated by setup_vscode.py └── tasks.json To setup the IDE, please follow these instructions: 1. Open the ``orbit`` directory on Visual Studio Code IDE 2. Run VSCode `Tasks <https://code.visualstudio.com/docs/editor/tasks>`__, by pressing ``Ctrl+Shift+P``, selecting ``Tasks: Run Task`` and running the ``setup_python_env`` in the drop down menu. .. image:: ../_static/vscode_tasks.png :width: 600px :align: center :alt: VSCode Tasks If everything executes correctly, it should create a file ``.python.env`` in the ``.vscode`` directory. The file contains the python paths to all the extensions provided by Isaac Sim and Omniverse. This helps in indexing all the python modules for intelligent suggestions while writing code. For more information on VSCode support for Omniverse, please refer to the following links: * `Isaac Sim VSCode support <https://docs.omniverse.nvidia.com/app_isaacsim/app_isaacsim/manual_standalone_python.html#isaac-sim-python-vscode>`__ * `Debugging with VSCode <https://docs.omniverse.nvidia.com/app_isaacsim/app_isaacsim/tutorial_advanced_python_debugging.html>`__ Configuring the python interpreter ---------------------------------- In the provided configuration, we set the default python interpreter to use the python executable provided by Omniverse. This is specified in the ``.vscode/settings.json`` file: .. code-block:: json { "python.defaultInterpreterPath": "${workspaceFolder}/_isaac_sim/kit/python/bin/python3", "python.envFile": "${workspaceFolder}/.vscode/.python.env", } If you want to use a different python interpreter (for instance, from your conda environment), you need to change the python interpreter used by selecting and activating the python interpreter of your choice in the bottom left corner of VSCode, or opening the command palette (``Ctrl+Shift+P``) and selecting ``Python: Select Interpreter``. For more information on how to set python interpreter for VSCode, please refer to the `VSCode documentation <https://code.visualstudio.com/docs/python/environments#_working-with-python-interpreters>`_. Repository organization ----------------------- The ``orbit`` repository is structured as follows: .. code-block:: bash orbit ├── .vscode ├── .flake8 ├── LICENSE ├── orbit.sh ├── pyproject.toml ├── README.md ├── docs ├── source │   ├── extensions │   │   ├── omni.isaac.orbit │   │   └── omni.isaac.orbit_tasks │   ├── standalone │   │   ├── demos │   │   ├── environments │   │   ├── tools │   │   ├── tutorials │   │   └── workflows └── VERSION The ``source`` directory contains the source code for all ``orbit`` *extensions* and *standalone applications*. The two are the different development workflows supported in `Isaac Sim <https://docs.omniverse.nvidia.com/isaacsim/latest/introductory_tutorials/tutorial_intro_workflows.html>`__. These are described in the following sections. Extensions ~~~~~~~~~~ Extensions are the recommended way to develop applications in Isaac Sim. They are modularized packages that formulate the Omniverse ecosystem. Each extension provides a set of functionalities that can be used by other extensions or standalone applications. A folder is recognized as an extension if it contains an ``extension.toml`` file in the ``config`` directory. More information on extensions can be found in the `Omniverse documentation <https://docs.omniverse.nvidia.com/kit/docs/kit-manual/latest/guide/extensions_basic.html>`__. Orbit in itself provides extensions for robot learning. These are written into the ``source/extensions`` directory. Each extension is written as a python package and follows the following structure: .. code:: bash <extension-name> ├── config │   └── extension.toml ├── docs │   ├── CHANGELOG.md │   └── README.md ├── <extension-name> │ ├── __init__.py │ ├── .... │ └── scripts ├── setup.py └── tests The ``config/extension.toml`` file contains the metadata of the extension. This includes the name, version, description, dependencies, etc. This information is used by Omniverse to load the extension. The ``docs`` directory contains the documentation for the extension with more detailed information about the extension and a CHANGELOG file that contains the changes made to the extension in each version. The ``<extension-name>`` directory contains the main python package for the extension. It may also contains the ``scripts`` directory for keeping python-based applications that are loaded into Omniverse when then extension is enabled using the `Extension Manager <https://docs.omniverse.nvidia.com/kit/docs/kit-manual/latest/guide/extensions_basic.html>`__. More specifically, when an extension is enabled, the python module specified in the ``config/extension.toml`` file is loaded and scripts that contains children of the :class:`omni.ext.IExt` class are executed. .. code:: python import omni.ext class MyExt(omni.ext.IExt): """My extension application.""" def on_startup(self, ext_id): """Called when the extension is loaded.""" pass def on_shutdown(self): """Called when the extension is unloaded. It releases all references to the extension and cleans up any resources. """ pass While loading extensions into Omniverse happens automatically, using the python package in standalone applications requires additional steps. To simplify the build process and avoiding the need to understand the `premake <https://premake.github.io/>`__ build system used by Omniverse, we directly use the `setuptools <https://setuptools.readthedocs.io/en/latest/>`__ python package to build the python module provided by the extensions. This is done by the ``setup.py`` file in the extension directory. .. note:: The ``setup.py`` file is not required for extensions that are only loaded into Omniverse using the `Extension Manager <https://docs.omniverse.nvidia.com/prod_extensions/prod_extensions/ext_extension-manager.html>`__. Lastly, the ``tests`` directory contains the unit tests for the extension. These are written using the `unittest <https://docs.python.org/3/library/unittest.html>`__ framework. It is important to note that Omniverse also provides a similar `testing framework <https://docs.omniverse.nvidia.com/kit/docs/kit-manual/104.0/guide/testing_exts_python.html>`__. However, it requires going through the build process and does not support testing of the python module in standalone applications. Standalone applications ~~~~~~~~~~~~~~~~~~~~~~~ In a typical Omniverse workflow, the simulator is launched first, after which the extensions are enabled that load the python module and run the python application. While this is a recommended workflow, it is not always possible to use this workflow. For example, for robot learning, it is essential to have complete control over simulation stepping and all the other functionalities instead of asynchronously waiting for the simulator to step. In such cases, it is necessary to write a standalone application that launches the simulator using :class:`~omni.isaac.orbit.app.AppLauncher` and allows complete control over the simulation through the :class:`~omni.isaac.orbit.sim.SimulationContext` class. .. code:: python """Launch Isaac Sim Simulator first.""" from omni.isaac.orbit.app import AppLauncher # launch omniverse app app_launcher = AppLauncher(headless=False) simulation_app = app_launcher.app """Rest everything follows.""" from omni.isaac.orbit.sim import SimulationContext if __name__ == "__main__": # get simulation context simulation_context = SimulationContext() # reset and play simulation simulation_context.reset() # step simulation simulation_context.step() # stop simulation simulation_context.stop() # close the simulation simulation_app.close() The ``source/standalone`` directory contains various standalone applications designed using the extensions provided by ``orbit``. These applications are written in python and are structured as follows: * **demos**: Contains various demo applications that showcase the core framework ``omni.isaac.orbit``. * **environments**: Contains applications for running environments defined in ``omni.isaac.orbit_tasks`` with different agents. These include a random policy, zero-action policy, teleoperation or scripted state machines. * **tools**: Contains applications for using the tools provided by the framework. These include converting assets, generating datasets, etc. * **tutorials**: Contains step-by-step tutorials for using the APIs provided by the framework. * **workflows**: Contains applications for using environments with various learning-based frameworks. These include different reinforcement learning or imitation learning libraries.
9,810
reStructuredText
39.374485
146
0.729664
NVIDIA-Omniverse/orbit/docs/source/setup/template.rst
Building your Own Project ========================= Traditionally, building new projects that utilize Orbit's features required creating your own extensions within the Orbit repository. However, this approach can obscure project visibility and complicate updates from one version of Orbit to another. To circumvent these challenges, we now provide a pre-configured and customizable `extension template <https://github.com/isaac-orbit/orbit.ext_template>`_ for creating projects in an isolated environment. This template serves three distinct use cases: * **Project Template**: Provides essential access to Isaac Sim and Orbit's features, making it ideal for projects that require a standalone environment. * **Python Package**: Facilitates integration with Isaac Sim's native or virtual Python environment, allowing for the creation of Python packages that can be shared and reused across multiple projects. * **Omniverse Extension**: Supports direct integration into Omniverse extension workflow. .. note:: We recommend using the extension template for new projects, as it provides a more streamlined and efficient workflow. Additionally it ensures that your project remains up-to-date with the latest features and improvements in Orbit. To get started, please follow the instructions in the `extension template repository <https://github.com/isaac-orbit/orbit.ext_template>`_.
1,396
reStructuredText
52.730767
139
0.790831
NVIDIA-Omniverse/orbit/docs/source/api/index.rst
API Reference ============= This page gives an overview of all the modules and classes in the Orbit extensions. omni.isaac.orbit extension -------------------------- The following modules are available in the ``omni.isaac.orbit`` extension: .. currentmodule:: omni.isaac.orbit .. autosummary:: :toctree: orbit app actuators assets controllers devices envs managers markers scene sensors sim terrains utils .. toctree:: :hidden: orbit/omni.isaac.orbit.envs.mdp orbit/omni.isaac.orbit.envs.ui orbit/omni.isaac.orbit.sensors.patterns orbit/omni.isaac.orbit.sim.converters orbit/omni.isaac.orbit.sim.schemas orbit/omni.isaac.orbit.sim.spawners omni.isaac.orbit_tasks extension -------------------------------- The following modules are available in the ``omni.isaac.orbit_tasks`` extension: .. currentmodule:: omni.isaac.orbit_tasks .. autosummary:: :toctree: orbit_tasks utils .. toctree:: :hidden: orbit_tasks/omni.isaac.orbit_tasks.utils.wrappers orbit_tasks/omni.isaac.orbit_tasks.utils.data_collector
1,096
reStructuredText
17.913793
83
0.67792
NVIDIA-Omniverse/orbit/docs/source/api/orbit_tasks/omni.isaac.orbit_tasks.utils.rst
orbit\_tasks.utils ================== .. automodule:: omni.isaac.orbit_tasks.utils :members: :imported-members: .. rubric:: Submodules .. autosummary:: data_collector wrappers
205
reStructuredText
13.714285
44
0.580488
NVIDIA-Omniverse/orbit/docs/source/api/orbit_tasks/omni.isaac.orbit_tasks.utils.data_collector.rst
orbit\_tasks.utils.data\_collector ================================== .. automodule:: omni.isaac.orbit_tasks.utils.data_collector .. Rubric:: Classes .. autosummary:: RobomimicDataCollector Robomimic Data Collector ------------------------ .. autoclass:: RobomimicDataCollector :members: :show-inheritance:
338
reStructuredText
17.833332
59
0.579882
NVIDIA-Omniverse/orbit/docs/source/api/orbit_tasks/omni.isaac.orbit_tasks.utils.wrappers.rst
orbit\_tasks.utils.wrappers =========================== .. automodule:: omni.isaac.orbit_tasks.utils.wrappers RL-Games Wrapper ---------------- .. automodule:: omni.isaac.orbit_tasks.utils.wrappers.rl_games :members: :show-inheritance: RSL-RL Wrapper -------------- .. automodule:: omni.isaac.orbit_tasks.utils.wrappers.rsl_rl :members: :imported-members: :show-inheritance: SKRL Wrapper ------------ .. automodule:: omni.isaac.orbit_tasks.utils.wrappers.skrl :members: :show-inheritance: Stable-Baselines3 Wrapper ------------------------- .. automodule:: omni.isaac.orbit_tasks.utils.wrappers.sb3 :members: :show-inheritance:
665
reStructuredText
18.588235
62
0.62406
NVIDIA-Omniverse/orbit/docs/source/api/orbit/omni.isaac.orbit.utils.rst
orbit.utils =========== .. automodule:: omni.isaac.orbit.utils .. Rubric:: Submodules .. autosummary:: io array assets dict math noise string timer warp .. Rubric:: Functions .. autosummary:: configclass Configuration class ~~~~~~~~~~~~~~~~~~~ .. automodule:: omni.isaac.orbit.utils.configclass :members: :show-inheritance: IO operations ~~~~~~~~~~~~~ .. automodule:: omni.isaac.orbit.utils.io :members: :imported-members: :show-inheritance: Array operations ~~~~~~~~~~~~~~~~ .. automodule:: omni.isaac.orbit.utils.array :members: :show-inheritance: Asset operations ~~~~~~~~~~~~~~~~ .. automodule:: omni.isaac.orbit.utils.assets :members: :show-inheritance: Dictionary operations ~~~~~~~~~~~~~~~~~~~~~ .. automodule:: omni.isaac.orbit.utils.dict :members: :show-inheritance: Math operations ~~~~~~~~~~~~~~~ .. automodule:: omni.isaac.orbit.utils.math :members: :inherited-members: :show-inheritance: Noise operations ~~~~~~~~~~~~~~~~ .. automodule:: omni.isaac.orbit.utils.noise :members: :imported-members: :inherited-members: :show-inheritance: :exclude-members: __init__, func String operations ~~~~~~~~~~~~~~~~~ .. automodule:: omni.isaac.orbit.utils.string :members: :show-inheritance: Timer operations ~~~~~~~~~~~~~~~~ .. automodule:: omni.isaac.orbit.utils.timer :members: :show-inheritance: Warp operations ~~~~~~~~~~~~~~~ .. automodule:: omni.isaac.orbit.utils.warp :members: :imported-members: :show-inheritance:
1,602
reStructuredText
14.871287
50
0.598627
NVIDIA-Omniverse/orbit/docs/source/api/orbit/omni.isaac.orbit.terrains.rst
orbit.terrains ============== .. automodule:: omni.isaac.orbit.terrains .. rubric:: Classes .. autosummary:: TerrainImporter TerrainImporterCfg TerrainGenerator TerrainGeneratorCfg SubTerrainBaseCfg Terrain importer ---------------- .. autoclass:: TerrainImporter :members: :show-inheritance: .. autoclass:: TerrainImporterCfg :members: :exclude-members: __init__, class_type Terrain generator ----------------- .. autoclass:: TerrainGenerator :members: .. autoclass:: TerrainGeneratorCfg :members: :exclude-members: __init__ .. autoclass:: SubTerrainBaseCfg :members: :exclude-members: __init__ Height fields ------------- .. automodule:: omni.isaac.orbit.terrains.height_field All sub-terrains must inherit from the :class:`HfTerrainBaseCfg` class which contains the common parameters for all terrains generated from height fields. .. autoclass:: omni.isaac.orbit.terrains.height_field.hf_terrains_cfg.HfTerrainBaseCfg :members: :show-inheritance: :exclude-members: __init__, function Random Uniform Terrain ^^^^^^^^^^^^^^^^^^^^^^ .. autofunction:: omni.isaac.orbit.terrains.height_field.hf_terrains.random_uniform_terrain .. autoclass:: omni.isaac.orbit.terrains.height_field.hf_terrains_cfg.HfRandomUniformTerrainCfg :members: :show-inheritance: :exclude-members: __init__, function Pyramid Sloped Terrain ^^^^^^^^^^^^^^^^^^^^^^ .. autofunction:: omni.isaac.orbit.terrains.height_field.hf_terrains.pyramid_sloped_terrain .. autoclass:: omni.isaac.orbit.terrains.height_field.hf_terrains_cfg.HfPyramidSlopedTerrainCfg :members: :show-inheritance: :exclude-members: __init__, function .. autoclass:: omni.isaac.orbit.terrains.height_field.hf_terrains_cfg.HfInvertedPyramidSlopedTerrainCfg :members: :show-inheritance: :exclude-members: __init__, function Pyramid Stairs Terrain ^^^^^^^^^^^^^^^^^^^^^^ .. autofunction:: omni.isaac.orbit.terrains.height_field.hf_terrains.pyramid_stairs_terrain .. autoclass:: omni.isaac.orbit.terrains.height_field.hf_terrains_cfg.HfPyramidStairsTerrainCfg :members: :show-inheritance: :exclude-members: __init__, function .. autoclass:: omni.isaac.orbit.terrains.height_field.hf_terrains_cfg.HfInvertedPyramidStairsTerrainCfg :members: :show-inheritance: :exclude-members: __init__, function Discrete Obstacles Terrain ^^^^^^^^^^^^^^^^^^^^^^^^^^ .. autofunction:: omni.isaac.orbit.terrains.height_field.hf_terrains.discrete_obstacles_terrain .. autoclass:: omni.isaac.orbit.terrains.height_field.hf_terrains_cfg.HfDiscreteObstaclesTerrainCfg :members: :show-inheritance: :exclude-members: __init__, function Wave Terrain ^^^^^^^^^^^^ .. autofunction:: omni.isaac.orbit.terrains.height_field.hf_terrains.wave_terrain .. autoclass:: omni.isaac.orbit.terrains.height_field.hf_terrains_cfg.HfWaveTerrainCfg :members: :show-inheritance: :exclude-members: __init__, function Stepping Stones Terrain ^^^^^^^^^^^^^^^^^^^^^^^ .. autofunction:: omni.isaac.orbit.terrains.height_field.hf_terrains.stepping_stones_terrain .. autoclass:: omni.isaac.orbit.terrains.height_field.hf_terrains_cfg.HfSteppingStonesTerrainCfg :members: :show-inheritance: :exclude-members: __init__, function Trimesh terrains ---------------- .. automodule:: omni.isaac.orbit.terrains.trimesh Flat terrain ^^^^^^^^^^^^ .. autofunction:: omni.isaac.orbit.terrains.trimesh.mesh_terrains.flat_terrain .. autoclass:: omni.isaac.orbit.terrains.trimesh.mesh_terrains_cfg.MeshPlaneTerrainCfg :members: :show-inheritance: :exclude-members: __init__, function Pyramid terrain ^^^^^^^^^^^^^^^ .. autofunction:: omni.isaac.orbit.terrains.trimesh.mesh_terrains.pyramid_stairs_terrain .. autoclass:: omni.isaac.orbit.terrains.trimesh.mesh_terrains_cfg.MeshPyramidStairsTerrainCfg :members: :show-inheritance: :exclude-members: __init__, function Inverted pyramid terrain ^^^^^^^^^^^^^^^^^^^^^^^^ .. autofunction:: omni.isaac.orbit.terrains.trimesh.mesh_terrains.inverted_pyramid_stairs_terrain .. autoclass:: omni.isaac.orbit.terrains.trimesh.mesh_terrains_cfg.MeshInvertedPyramidStairsTerrainCfg :members: :show-inheritance: :exclude-members: __init__, function Random grid terrain ^^^^^^^^^^^^^^^^^^^ .. autofunction:: omni.isaac.orbit.terrains.trimesh.mesh_terrains.random_grid_terrain .. autoclass:: omni.isaac.orbit.terrains.trimesh.mesh_terrains_cfg.MeshRandomGridTerrainCfg :members: :show-inheritance: :exclude-members: __init__, function Rails terrain ^^^^^^^^^^^^^ .. autofunction:: omni.isaac.orbit.terrains.trimesh.mesh_terrains.rails_terrain .. autoclass:: omni.isaac.orbit.terrains.trimesh.mesh_terrains_cfg.MeshRailsTerrainCfg :members: :show-inheritance: :exclude-members: __init__, function Pit terrain ^^^^^^^^^^^ .. autofunction:: omni.isaac.orbit.terrains.trimesh.mesh_terrains.pit_terrain .. autoclass:: omni.isaac.orbit.terrains.trimesh.mesh_terrains_cfg.MeshPitTerrainCfg :members: :show-inheritance: :exclude-members: __init__, function Box terrain ^^^^^^^^^^^^^ .. autofunction:: omni.isaac.orbit.terrains.trimesh.mesh_terrains.box_terrain .. autoclass:: omni.isaac.orbit.terrains.trimesh.mesh_terrains_cfg.MeshBoxTerrainCfg :members: :show-inheritance: :exclude-members: __init__, function Gap terrain ^^^^^^^^^^^ .. autofunction:: omni.isaac.orbit.terrains.trimesh.mesh_terrains.gap_terrain .. autoclass:: omni.isaac.orbit.terrains.trimesh.mesh_terrains_cfg.MeshGapTerrainCfg :members: :show-inheritance: :exclude-members: __init__, function Floating ring terrain ^^^^^^^^^^^^^^^^^^^^^ .. autofunction:: omni.isaac.orbit.terrains.trimesh.mesh_terrains.floating_ring_terrain .. autoclass:: omni.isaac.orbit.terrains.trimesh.mesh_terrains_cfg.MeshFloatingRingTerrainCfg :members: :show-inheritance: :exclude-members: __init__, function Star terrain ^^^^^^^^^^^^ .. autofunction:: omni.isaac.orbit.terrains.trimesh.mesh_terrains.star_terrain .. autoclass:: omni.isaac.orbit.terrains.trimesh.mesh_terrains_cfg.MeshStarTerrainCfg :members: :show-inheritance: :exclude-members: __init__, function Repeated Objects Terrain ^^^^^^^^^^^^^^^^^^^^^^^^ .. autofunction:: omni.isaac.orbit.terrains.trimesh.mesh_terrains.repeated_objects_terrain .. autoclass:: omni.isaac.orbit.terrains.trimesh.mesh_terrains_cfg.MeshRepeatedObjectsTerrainCfg :members: :show-inheritance: :exclude-members: __init__, function .. autoclass:: omni.isaac.orbit.terrains.trimesh.mesh_terrains_cfg.MeshRepeatedPyramidsTerrainCfg :members: :show-inheritance: :exclude-members: __init__, function .. autoclass:: omni.isaac.orbit.terrains.trimesh.mesh_terrains_cfg.MeshRepeatedBoxesTerrainCfg :members: :show-inheritance: :exclude-members: __init__, function .. autoclass:: omni.isaac.orbit.terrains.trimesh.mesh_terrains_cfg.MeshRepeatedCylindersTerrainCfg :members: :show-inheritance: :exclude-members: __init__, function Utilities --------- .. automodule:: omni.isaac.orbit.terrains.utils :members: :undoc-members:
7,214
reStructuredText
26.538168
103
0.708206
NVIDIA-Omniverse/orbit/docs/source/api/orbit/omni.isaac.orbit.sim.rst
orbit.sim ========= .. automodule:: omni.isaac.orbit.sim .. rubric:: Submodules .. autosummary:: converters schemas spawners utils .. rubric:: Classes .. autosummary:: SimulationContext SimulationCfg PhysxCfg .. rubric:: Functions .. autosummary:: simulation_context.build_simulation_context Simulation Context ------------------ .. autoclass:: SimulationContext :members: :show-inheritance: Simulation Configuration ------------------------ .. autoclass:: SimulationCfg :members: :show-inheritance: :exclude-members: __init__ .. autoclass:: PhysxCfg :members: :show-inheritance: :exclude-members: __init__ Simulation Context Builder -------------------------- .. automethod:: simulation_context.build_simulation_context Utilities --------- .. automodule:: omni.isaac.orbit.sim.utils :members: :show-inheritance:
897
reStructuredText
13.966666
59
0.626533
NVIDIA-Omniverse/orbit/docs/source/api/orbit/omni.isaac.orbit.markers.rst
orbit.markers ============= .. automodule:: omni.isaac.orbit.markers .. rubric:: Classes .. autosummary:: VisualizationMarkers VisualizationMarkersCfg Visualization Markers --------------------- .. autoclass:: VisualizationMarkers :members: :undoc-members: :show-inheritance: .. autoclass:: VisualizationMarkersCfg :members: :exclude-members: __init__
392
reStructuredText
15.374999
40
0.637755
NVIDIA-Omniverse/orbit/docs/source/api/orbit/omni.isaac.orbit.managers.rst
orbit.managers ============== .. automodule:: omni.isaac.orbit.managers .. rubric:: Classes .. autosummary:: SceneEntityCfg ManagerBase ManagerTermBase ManagerTermBaseCfg ObservationManager ObservationGroupCfg ObservationTermCfg ActionManager ActionTerm ActionTermCfg EventManager EventTermCfg CommandManager CommandTerm CommandTermCfg RewardManager RewardTermCfg TerminationManager TerminationTermCfg CurriculumManager CurriculumTermCfg Scene Entity ------------ .. autoclass:: SceneEntityCfg :members: :exclude-members: __init__ Manager Base ------------ .. autoclass:: ManagerBase :members: .. autoclass:: ManagerTermBase :members: .. autoclass:: ManagerTermBaseCfg :members: :exclude-members: __init__ Observation Manager ------------------- .. autoclass:: ObservationManager :members: :inherited-members: :show-inheritance: .. autoclass:: ObservationGroupCfg :members: :exclude-members: __init__ .. autoclass:: ObservationTermCfg :members: :exclude-members: __init__ Action Manager -------------- .. autoclass:: ActionManager :members: :inherited-members: :show-inheritance: .. autoclass:: ActionTerm :members: :inherited-members: :show-inheritance: .. autoclass:: ActionTermCfg :members: :exclude-members: __init__ Event Manager ------------- .. autoclass:: EventManager :members: :inherited-members: :show-inheritance: .. autoclass:: EventTermCfg :members: :exclude-members: __init__ Randomization Manager --------------------- .. deprecated:: v0.3 The Randomization Manager is deprecated and will be removed in v0.4. Please use the :class:`EventManager` class instead. .. autoclass:: RandomizationManager :members: :inherited-members: :show-inheritance: .. autoclass:: RandomizationTermCfg :members: :exclude-members: __init__ Command Manager --------------- .. autoclass:: CommandManager :members: .. autoclass:: CommandTerm :members: :exclude-members: __init__, class_type .. autoclass:: CommandTermCfg :members: :exclude-members: __init__, class_type Reward Manager -------------- .. autoclass:: RewardManager :members: :inherited-members: :show-inheritance: .. autoclass:: RewardTermCfg :exclude-members: __init__ Termination Manager ------------------- .. autoclass:: TerminationManager :members: :inherited-members: :show-inheritance: .. autoclass:: TerminationTermCfg :members: :exclude-members: __init__ Curriculum Manager ------------------ .. autoclass:: CurriculumManager :members: :inherited-members: :show-inheritance: .. autoclass:: CurriculumTermCfg :members: :exclude-members: __init__
2,846
reStructuredText
16.574074
72
0.641251
NVIDIA-Omniverse/orbit/docs/source/api/orbit/omni.isaac.orbit.sensors.rst
orbit.sensors ============= .. automodule:: omni.isaac.orbit.sensors .. rubric:: Submodules .. autosummary:: patterns .. rubric:: Classes .. autosummary:: SensorBase SensorBaseCfg Camera CameraData CameraCfg ContactSensor ContactSensorData ContactSensorCfg FrameTransformer FrameTransformerData FrameTransformerCfg RayCaster RayCasterData RayCasterCfg RayCasterCamera RayCasterCameraCfg Sensor Base ----------- .. autoclass:: SensorBase :members: .. autoclass:: SensorBaseCfg :members: :exclude-members: __init__, class_type USD Camera ---------- .. autoclass:: Camera :members: :inherited-members: :show-inheritance: .. autoclass:: CameraData :members: :inherited-members: :exclude-members: __init__ .. autoclass:: CameraCfg :members: :inherited-members: :show-inheritance: :exclude-members: __init__, class_type Contact Sensor -------------- .. autoclass:: ContactSensor :members: :inherited-members: :show-inheritance: .. autoclass:: ContactSensorData :members: :inherited-members: :exclude-members: __init__ .. autoclass:: ContactSensorCfg :members: :inherited-members: :show-inheritance: :exclude-members: __init__, class_type Frame Transformer ----------------- .. autoclass:: FrameTransformer :members: :inherited-members: :show-inheritance: .. autoclass:: FrameTransformerData :members: :inherited-members: :exclude-members: __init__ .. autoclass:: FrameTransformerCfg :members: :inherited-members: :show-inheritance: :exclude-members: __init__, class_type .. autoclass:: OffsetCfg :members: :inherited-members: :exclude-members: __init__ Ray-Cast Sensor --------------- .. autoclass:: RayCaster :members: :inherited-members: :show-inheritance: .. autoclass:: RayCasterData :members: :inherited-members: :exclude-members: __init__ .. autoclass:: RayCasterCfg :members: :inherited-members: :show-inheritance: :exclude-members: __init__, class_type Ray-Cast Camera --------------- .. autoclass:: RayCasterCamera :members: :inherited-members: :show-inheritance: .. autoclass:: RayCasterCameraCfg :members: :inherited-members: :show-inheritance: :exclude-members: __init__, class_type
2,409
reStructuredText
16.463768
42
0.635533
NVIDIA-Omniverse/orbit/docs/source/api/orbit/omni.isaac.orbit.actuators.rst
orbit.actuators =============== .. automodule:: omni.isaac.orbit.actuators .. rubric:: Classes .. autosummary:: ActuatorBase ActuatorBaseCfg ImplicitActuator ImplicitActuatorCfg IdealPDActuator IdealPDActuatorCfg DCMotor DCMotorCfg ActuatorNetMLP ActuatorNetMLPCfg ActuatorNetLSTM ActuatorNetLSTMCfg Actuator Base ------------- .. autoclass:: ActuatorBase :members: :inherited-members: .. autoclass:: ActuatorBaseCfg :members: :inherited-members: :exclude-members: __init__, class_type Implicit Actuator ----------------- .. autoclass:: ImplicitActuator :members: :inherited-members: :show-inheritance: .. autoclass:: ImplicitActuatorCfg :members: :inherited-members: :show-inheritance: :exclude-members: __init__, class_type Ideal PD Actuator ----------------- .. autoclass:: IdealPDActuator :members: :inherited-members: :show-inheritance: .. autoclass:: IdealPDActuatorCfg :members: :inherited-members: :show-inheritance: :exclude-members: __init__, class_type DC Motor Actuator ----------------- .. autoclass:: DCMotor :members: :inherited-members: :show-inheritance: .. autoclass:: DCMotorCfg :members: :inherited-members: :show-inheritance: :exclude-members: __init__, class_type MLP Network Actuator --------------------- .. autoclass:: ActuatorNetMLP :members: :inherited-members: :show-inheritance: .. autoclass:: ActuatorNetMLPCfg :members: :inherited-members: :show-inheritance: :exclude-members: __init__, class_type LSTM Network Actuator --------------------- .. autoclass:: ActuatorNetLSTM :members: :inherited-members: :show-inheritance: .. autoclass:: ActuatorNetLSTMCfg :members: :inherited-members: :show-inheritance: :exclude-members: __init__, class_type
1,831
reStructuredText
16.447619
42
0.663026
NVIDIA-Omniverse/orbit/docs/source/api/orbit/omni.isaac.orbit.envs.ui.rst
orbit.envs.ui ============= .. automodule:: omni.isaac.orbit.envs.ui .. rubric:: Classes .. autosummary:: BaseEnvWindow RLTaskEnvWindow ViewportCameraController Base Environment UI ------------------- .. autoclass:: BaseEnvWindow :members: RL Task Environment UI ---------------------- .. autoclass:: RLTaskEnvWindow :members: :show-inheritance: Viewport Camera Controller -------------------------- .. autoclass:: ViewportCameraController :members:
509
reStructuredText
14.9375
40
0.573674
NVIDIA-Omniverse/orbit/docs/source/api/orbit/omni.isaac.orbit.devices.rst
orbit.devices ============= .. automodule:: omni.isaac.orbit.devices .. rubric:: Classes .. autosummary:: DeviceBase Se2Gamepad Se3Gamepad Se2Keyboard Se3Keyboard Se3SpaceMouse Se3SpaceMouse Device Base ----------- .. autoclass:: DeviceBase :members: Game Pad -------- .. autoclass:: Se2Gamepad :members: :inherited-members: :show-inheritance: .. autoclass:: Se3Gamepad :members: :inherited-members: :show-inheritance: Keyboard -------- .. autoclass:: Se2Keyboard :members: :inherited-members: :show-inheritance: .. autoclass:: Se3Keyboard :members: :inherited-members: :show-inheritance: Space Mouse ----------- .. autoclass:: Se2SpaceMouse :members: :inherited-members: :show-inheritance: .. autoclass:: Se3SpaceMouse :members: :inherited-members: :show-inheritance:
893
reStructuredText
13.419355
40
0.62262
NVIDIA-Omniverse/orbit/docs/source/api/orbit/omni.isaac.orbit.app.rst
orbit.app ========= .. automodule:: omni.isaac.orbit.app .. rubric:: Classes .. autosummary:: AppLauncher Environment variables --------------------- The following details the behavior of the class based on the environment variables: * **Headless mode**: If the environment variable ``HEADLESS=1``, then SimulationApp will be started in headless mode. If ``LIVESTREAM={1,2,3}``, then it will supersede the ``HEADLESS`` envvar and force headlessness. * ``HEADLESS=1`` causes the app to run in headless mode. * **Livestreaming**: If the environment variable ``LIVESTREAM={1,2,3}`` , then `livestream`_ is enabled. Any of the livestream modes being true forces the app to run in headless mode. * ``LIVESTREAM=1`` enables streaming via the Isaac `Native Livestream`_ extension. This allows users to connect through the Omniverse Streaming Client. * ``LIVESTREAM=2`` enables streaming via the `Websocket Livestream`_ extension. This allows users to connect in a browser using the WebSocket protocol. * ``LIVESTREAM=3`` enables streaming via the `WebRTC Livestream`_ extension. This allows users to connect in a browser using the WebRTC protocol. * **Offscreen Render**: If the environment variable ``OFFSCREEN_RENDER`` is set to 1, then the offscreen-render pipeline is enabled. This is useful for running the simulator without a GUI but still rendering the viewport and camera images. * ``OFFSCREEN_RENDER=1``: Enables the offscreen-render pipeline which allows users to render the scene without launching a GUI. .. note:: The off-screen rendering pipeline only works when used in conjunction with the :class:`omni.isaac.orbit.sim.SimulationContext` class. This is because the off-screen rendering pipeline enables flags that are internally used by the SimulationContext class. To set the environment variables, one can use the following command in the terminal: .. code:: bash export REMOTE_DEPLOYMENT=3 export OFFSCREEN_RENDER=1 # run the python script ./orbit.sh -p source/standalone/demo/play_quadrupeds.py Alternatively, one can set the environment variables to the python script directly: .. code:: bash REMOTE_DEPLOYMENT=3 OFFSCREEN_RENDER=1 ./orbit.sh -p source/standalone/demo/play_quadrupeds.py Overriding the environment variables ------------------------------------ The environment variables can be overridden in the python script itself using the :class:`AppLauncher`. These can be passed as a dictionary, a :class:`argparse.Namespace` object or as keyword arguments. When the passed arguments are not the default values, then they override the environment variables. The following snippet shows how use the :class:`AppLauncher` in different ways: .. code:: python import argparser from omni.isaac.orbit.app import AppLauncher # add argparse arguments parser = argparse.ArgumentParser() # add your own arguments # .... # add app launcher arguments for cli AppLauncher.add_app_launcher_args(parser) # parse arguments args = parser.parse_args() # launch omniverse isaac-sim app # -- Option 1: Pass the settings as a Namespace object app_launcher = AppLauncher(args).app # -- Option 2: Pass the settings as keywords arguments app_launcher = AppLauncher(headless=args.headless, livestream=args.livestream) # -- Option 3: Pass the settings as a dictionary app_launcher = AppLauncher(vars(args)) # -- Option 4: Pass no settings app_launcher = AppLauncher() # obtain the launched app simulation_app = app_launcher.app Simulation App Launcher ----------------------- .. autoclass:: AppLauncher :members: .. _livestream: https://docs.omniverse.nvidia.com/app_isaacsim/app_isaacsim/manual_livestream_clients.html .. _`Native Livestream`: https://docs.omniverse.nvidia.com/app_isaacsim/app_isaacsim/manual_livestream_clients.html#isaac-sim-setup-kit-remote .. _`Websocket Livestream`: https://docs.omniverse.nvidia.com/app_isaacsim/app_isaacsim/manual_livestream_clients.html#isaac-sim-setup-livestream-webrtc .. _`WebRTC Livestream`: https://docs.omniverse.nvidia.com/app_isaacsim/app_isaacsim/manual_livestream_clients.html#isaac-sim-setup-livestream-websocket
4,248
reStructuredText
36.9375
152
0.734228
NVIDIA-Omniverse/orbit/docs/source/api/orbit/omni.isaac.orbit.sim.converters.rst
orbit.sim.converters ==================== .. automodule:: omni.isaac.orbit.sim.converters .. rubric:: Classes .. autosummary:: AssetConverterBase AssetConverterBaseCfg MeshConverter MeshConverterCfg UrdfConverter UrdfConverterCfg Asset Converter Base -------------------- .. autoclass:: AssetConverterBase :members: .. autoclass:: AssetConverterBaseCfg :members: :exclude-members: __init__ Mesh Converter -------------- .. autoclass:: MeshConverter :members: :inherited-members: :show-inheritance: .. autoclass:: MeshConverterCfg :members: :inherited-members: :show-inheritance: :exclude-members: __init__ URDF Converter -------------- .. autoclass:: UrdfConverter :members: :inherited-members: :show-inheritance: .. autoclass:: UrdfConverterCfg :members: :inherited-members: :show-inheritance: :exclude-members: __init__
933
reStructuredText
15.981818
47
0.633441
NVIDIA-Omniverse/orbit/docs/source/api/orbit/omni.isaac.orbit.sim.spawners.rst
orbit.sim.spawners ================== .. automodule:: omni.isaac.orbit.sim.spawners .. rubric:: Submodules .. autosummary:: shapes lights sensors from_files materials .. rubric:: Classes .. autosummary:: SpawnerCfg RigidObjectSpawnerCfg Spawners -------- .. autoclass:: SpawnerCfg :members: :exclude-members: __init__ .. autoclass:: RigidObjectSpawnerCfg :members: :show-inheritance: :exclude-members: __init__ Shapes ------ .. automodule:: omni.isaac.orbit.sim.spawners.shapes .. rubric:: Classes .. autosummary:: ShapeCfg CapsuleCfg ConeCfg CuboidCfg CylinderCfg SphereCfg .. autoclass:: ShapeCfg :members: :exclude-members: __init__, func .. autofunction:: spawn_capsule .. autoclass:: CapsuleCfg :members: :show-inheritance: :exclude-members: __init__, func .. autofunction:: spawn_cone .. autoclass:: ConeCfg :members: :show-inheritance: :exclude-members: __init__, func .. autofunction:: spawn_cuboid .. autoclass:: CuboidCfg :members: :show-inheritance: :exclude-members: __init__, func .. autofunction:: spawn_cylinder .. autoclass:: CylinderCfg :members: :show-inheritance: :exclude-members: __init__, func .. autofunction:: spawn_sphere .. autoclass:: SphereCfg :members: :show-inheritance: :exclude-members: __init__, func Lights ------ .. automodule:: omni.isaac.orbit.sim.spawners.lights .. rubric:: Classes .. autosummary:: LightCfg CylinderLightCfg DiskLightCfg DistantLightCfg DomeLightCfg SphereLightCfg .. autofunction:: spawn_light .. autoclass:: LightCfg :members: :exclude-members: __init__, func .. autoclass:: CylinderLightCfg :members: :exclude-members: __init__, func .. autoclass:: DiskLightCfg :members: :exclude-members: __init__, func .. autoclass:: DistantLightCfg :members: :exclude-members: __init__, func .. autoclass:: DomeLightCfg :members: :exclude-members: __init__, func .. autoclass:: SphereLightCfg :members: :exclude-members: __init__, func Sensors ------- .. automodule:: omni.isaac.orbit.sim.spawners.sensors .. rubric:: Classes .. autosummary:: PinholeCameraCfg FisheyeCameraCfg .. autofunction:: spawn_camera .. autoclass:: PinholeCameraCfg :members: :exclude-members: __init__, func .. autoclass:: FisheyeCameraCfg :members: :exclude-members: __init__, func From Files ---------- .. automodule:: omni.isaac.orbit.sim.spawners.from_files .. rubric:: Classes .. autosummary:: UrdfFileCfg UsdFileCfg GroundPlaneCfg .. autofunction:: spawn_from_urdf .. autoclass:: UrdfFileCfg :members: :exclude-members: __init__, func .. autofunction:: spawn_from_usd .. autoclass:: UsdFileCfg :members: :exclude-members: __init__, func .. autofunction:: spawn_ground_plane .. autoclass:: GroundPlaneCfg :members: :exclude-members: __init__, func Materials --------- .. automodule:: omni.isaac.orbit.sim.spawners.materials .. rubric:: Classes .. autosummary:: VisualMaterialCfg PreviewSurfaceCfg MdlFileCfg GlassMdlCfg PhysicsMaterialCfg RigidBodyMaterialCfg Visual Materials ~~~~~~~~~~~~~~~~ .. autoclass:: VisualMaterialCfg :members: :exclude-members: __init__, func .. autofunction:: spawn_preview_surface .. autoclass:: PreviewSurfaceCfg :members: :exclude-members: __init__, func .. autofunction:: spawn_from_mdl_file .. autoclass:: MdlFileCfg :members: :exclude-members: __init__, func .. autoclass:: GlassMdlCfg :members: :exclude-members: __init__, func Physical Materials ~~~~~~~~~~~~~~~~~~ .. autoclass:: PhysicsMaterialCfg :members: :exclude-members: __init__, func .. autofunction:: spawn_rigid_body_material .. autoclass:: RigidBodyMaterialCfg :members: :exclude-members: __init__, func
3,974
reStructuredText
15.772152
56
0.642929
NVIDIA-Omniverse/orbit/docs/source/api/orbit/omni.isaac.orbit.controllers.rst
orbit.controllers ================= .. automodule:: omni.isaac.orbit.controllers .. rubric:: Classes .. autosummary:: DifferentialIKController DifferentialIKControllerCfg Differential Inverse Kinematics ------------------------------- .. autoclass:: DifferentialIKController :members: :inherited-members: :show-inheritance: .. autoclass:: DifferentialIKControllerCfg :members: :inherited-members: :show-inheritance: :exclude-members: __init__, class_type
503
reStructuredText
18.384615
44
0.650099
NVIDIA-Omniverse/orbit/docs/source/api/orbit/omni.isaac.orbit.envs.rst
orbit.envs ========== .. automodule:: omni.isaac.orbit.envs .. rubric:: Submodules .. autosummary:: mdp ui .. rubric:: Classes .. autosummary:: BaseEnv BaseEnvCfg ViewerCfg RLTaskEnv RLTaskEnvCfg Base Environment ---------------- .. autoclass:: BaseEnv :members: .. autoclass:: BaseEnvCfg :members: :exclude-members: __init__, class_type .. autoclass:: ViewerCfg :members: :exclude-members: __init__ RL Task Environment ------------------- .. autoclass:: RLTaskEnv :members: :inherited-members: :show-inheritance: .. autoclass:: RLTaskEnvCfg :members: :inherited-members: :show-inheritance: :exclude-members: __init__, class_type
729
reStructuredText
13.6
42
0.593964
NVIDIA-Omniverse/orbit/docs/source/api/orbit/omni.isaac.orbit.scene.rst
orbit.scene =========== .. automodule:: omni.isaac.orbit.scene .. rubric:: Classes .. autosummary:: InteractiveScene InteractiveSceneCfg interactive Scene ----------------- .. autoclass:: InteractiveScene :members: :undoc-members: :show-inheritance: .. autoclass:: InteractiveSceneCfg :members: :exclude-members: __init__
362
reStructuredText
14.124999
38
0.624309
NVIDIA-Omniverse/orbit/docs/source/api/orbit/omni.isaac.orbit.sensors.patterns.rst
orbit.sensors.patterns ====================== .. automodule:: omni.isaac.orbit.sensors.patterns .. rubric:: Classes .. autosummary:: PatternBaseCfg GridPatternCfg PinholeCameraPatternCfg BpearlPatternCfg Pattern Base ------------ .. autoclass:: PatternBaseCfg :members: :inherited-members: :exclude-members: __init__ Grid Pattern ------------ .. autofunction:: omni.isaac.orbit.sensors.patterns.grid_pattern .. autoclass:: GridPatternCfg :members: :inherited-members: :exclude-members: __init__, func Pinhole Camera Pattern ---------------------- .. autofunction:: omni.isaac.orbit.sensors.patterns.pinhole_camera_pattern .. autoclass:: PinholeCameraPatternCfg :members: :inherited-members: :exclude-members: __init__, func RS-Bpearl Pattern ----------------- .. autofunction:: omni.isaac.orbit.sensors.patterns.bpearl_pattern .. autoclass:: BpearlPatternCfg :members: :inherited-members: :exclude-members: __init__, func
1,006
reStructuredText
18.365384
74
0.649105
NVIDIA-Omniverse/orbit/docs/source/api/orbit/omni.isaac.orbit.assets.rst
orbit.assets ============ .. automodule:: omni.isaac.orbit.assets .. rubric:: Classes .. autosummary:: AssetBase AssetBaseCfg RigidObject RigidObjectData RigidObjectCfg Articulation ArticulationData ArticulationCfg .. currentmodule:: omni.isaac.orbit.assets Asset Base ---------- .. autoclass:: AssetBase :members: .. autoclass:: AssetBaseCfg :members: :exclude-members: __init__, class_type Rigid Object ------------ .. autoclass:: RigidObject :members: :inherited-members: :show-inheritance: .. autoclass:: RigidObjectData :members: :inherited-members: :show-inheritance: :exclude-members: __init__ .. autoclass:: RigidObjectCfg :members: :inherited-members: :show-inheritance: :exclude-members: __init__, class_type Articulation ------------ .. autoclass:: Articulation :members: :inherited-members: :show-inheritance: .. autoclass:: ArticulationData :members: :inherited-members: :show-inheritance: :exclude-members: __init__ .. autoclass:: ArticulationCfg :members: :inherited-members: :show-inheritance: :exclude-members: __init__, class_type
1,202
reStructuredText
16.185714
42
0.639767