file_path
stringlengths
21
224
content
stringlengths
0
80.8M
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/classic/humanoid/mdp/observations.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from __future__ import annotations import torch from typing import TYPE_CHECKING import omni.isaac.orbit.utils.math as math_utils from omni.isaac.orbit.assets import Articulation from omni.isaac.orbit.managers import SceneEntityCfg if TYPE_CHECKING: from omni.isaac.orbit.envs import BaseEnv def base_yaw_roll(env: BaseEnv, asset_cfg: SceneEntityCfg = SceneEntityCfg("robot")) -> torch.Tensor: """Yaw and roll of the base in the simulation world frame.""" # extract the used quantities (to enable type-hinting) asset: Articulation = env.scene[asset_cfg.name] # extract euler angles (in world frame) roll, _, yaw = math_utils.euler_xyz_from_quat(asset.data.root_quat_w) # normalize angle to [-pi, pi] roll = torch.atan2(torch.sin(roll), torch.cos(roll)) yaw = torch.atan2(torch.sin(yaw), torch.cos(yaw)) return torch.cat((yaw.unsqueeze(-1), roll.unsqueeze(-1)), dim=-1) def base_up_proj(env: BaseEnv, asset_cfg: SceneEntityCfg = SceneEntityCfg("robot")) -> torch.Tensor: """Projection of the base up vector onto the world up vector.""" # extract the used quantities (to enable type-hinting) asset: Articulation = env.scene[asset_cfg.name] # compute base up vector base_up_vec = math_utils.quat_rotate(asset.data.root_quat_w, -asset.GRAVITY_VEC_W) return base_up_vec[:, 2].unsqueeze(-1) def base_heading_proj( env: BaseEnv, target_pos: tuple[float, float, float], asset_cfg: SceneEntityCfg = SceneEntityCfg("robot") ) -> torch.Tensor: """Projection of the base forward vector onto the world forward vector.""" # extract the used quantities (to enable type-hinting) asset: Articulation = env.scene[asset_cfg.name] # compute desired heading direction to_target_pos = torch.tensor(target_pos, device=env.device) - asset.data.root_pos_w[:, :3] to_target_pos[:, 2] = 0.0 to_target_dir = math_utils.normalize(to_target_pos) # compute base forward vector heading_vec = math_utils.quat_rotate(asset.data.root_quat_w, asset.FORWARD_VEC_B) # compute dot product between heading and target direction heading_proj = torch.bmm(heading_vec.view(env.num_envs, 1, 3), to_target_dir.view(env.num_envs, 3, 1)) return heading_proj.view(env.num_envs, 1) def base_angle_to_target( env: BaseEnv, target_pos: tuple[float, float, float], asset_cfg: SceneEntityCfg = SceneEntityCfg("robot") ) -> torch.Tensor: """Angle between the base forward vector and the vector to the target.""" # extract the used quantities (to enable type-hinting) asset: Articulation = env.scene[asset_cfg.name] # compute desired heading direction to_target_pos = torch.tensor(target_pos, device=env.device) - asset.data.root_pos_w[:, :3] walk_target_angle = torch.atan2(to_target_pos[:, 1], to_target_pos[:, 0]) # compute base forward vector _, _, yaw = math_utils.euler_xyz_from_quat(asset.data.root_quat_w) # normalize angle to target to [-pi, pi] angle_to_target = walk_target_angle - yaw angle_to_target = torch.atan2(torch.sin(angle_to_target), torch.cos(angle_to_target)) return angle_to_target.unsqueeze(-1)
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/manipulation/__init__.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """Manipulation environments for fixed-arm robots.""" from .reach import * # noqa
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/manipulation/reach/reach_env_cfg.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from __future__ import annotations from dataclasses import MISSING import omni.isaac.orbit.sim as sim_utils from omni.isaac.orbit.assets import ArticulationCfg, AssetBaseCfg from omni.isaac.orbit.envs import RLTaskEnvCfg from omni.isaac.orbit.managers import ActionTermCfg as ActionTerm from omni.isaac.orbit.managers import CurriculumTermCfg as CurrTerm from omni.isaac.orbit.managers import EventTermCfg as EventTerm from omni.isaac.orbit.managers import ObservationGroupCfg as ObsGroup from omni.isaac.orbit.managers import ObservationTermCfg as ObsTerm from omni.isaac.orbit.managers import RewardTermCfg as RewTerm from omni.isaac.orbit.managers import SceneEntityCfg from omni.isaac.orbit.managers import TerminationTermCfg as DoneTerm from omni.isaac.orbit.scene import InteractiveSceneCfg from omni.isaac.orbit.utils import configclass from omni.isaac.orbit.utils.assets import ISAAC_NUCLEUS_DIR from omni.isaac.orbit.utils.noise import AdditiveUniformNoiseCfg as Unoise import omni.isaac.orbit_tasks.manipulation.reach.mdp as mdp ## # Scene definition ## @configclass class ReachSceneCfg(InteractiveSceneCfg): """Configuration for the scene with a robotic arm.""" # world ground = AssetBaseCfg( prim_path="/World/ground", spawn=sim_utils.GroundPlaneCfg(), init_state=AssetBaseCfg.InitialStateCfg(pos=(0.0, 0.0, -1.05)), ) table = AssetBaseCfg( prim_path="{ENV_REGEX_NS}/Table", spawn=sim_utils.UsdFileCfg( usd_path=f"{ISAAC_NUCLEUS_DIR}/Props/Mounts/SeattleLabTable/table_instanceable.usd", ), init_state=AssetBaseCfg.InitialStateCfg(pos=(0.55, 0.0, 0.0), rot=(0.70711, 0.0, 0.0, 0.70711)), ) # robots robot: ArticulationCfg = MISSING # lights light = AssetBaseCfg( prim_path="/World/light", spawn=sim_utils.DomeLightCfg(color=(0.75, 0.75, 0.75), intensity=2500.0), ) ## # MDP settings ## @configclass class CommandsCfg: """Command terms for the MDP.""" ee_pose = mdp.UniformPoseCommandCfg( asset_name="robot", body_name=MISSING, resampling_time_range=(4.0, 4.0), debug_vis=True, ranges=mdp.UniformPoseCommandCfg.Ranges( pos_x=(0.35, 0.65), pos_y=(-0.2, 0.2), pos_z=(0.15, 0.5), roll=(0.0, 0.0), pitch=MISSING, # depends on end-effector axis yaw=(-3.14, 3.14), ), ) @configclass class ActionsCfg: """Action specifications for the MDP.""" arm_action: ActionTerm = MISSING gripper_action: ActionTerm | None = None @configclass class ObservationsCfg: """Observation specifications for the MDP.""" @configclass class PolicyCfg(ObsGroup): """Observations for policy group.""" # observation terms (order preserved) joint_pos = ObsTerm(func=mdp.joint_pos_rel, noise=Unoise(n_min=-0.01, n_max=0.01)) joint_vel = ObsTerm(func=mdp.joint_vel_rel, noise=Unoise(n_min=-0.01, n_max=0.01)) pose_command = ObsTerm(func=mdp.generated_commands, params={"command_name": "ee_pose"}) actions = ObsTerm(func=mdp.last_action) def __post_init__(self): self.enable_corruption = True self.concatenate_terms = True # observation groups policy: PolicyCfg = PolicyCfg() @configclass class EventCfg: """Configuration for events.""" reset_robot_joints = EventTerm( func=mdp.reset_joints_by_scale, mode="reset", params={ "position_range": (0.5, 1.5), "velocity_range": (0.0, 0.0), }, ) @configclass class RewardsCfg: """Reward terms for the MDP.""" # task terms end_effector_position_tracking = RewTerm( func=mdp.position_command_error, weight=-0.2, params={"asset_cfg": SceneEntityCfg("robot", body_names=MISSING), "command_name": "ee_pose"}, ) end_effector_orientation_tracking = RewTerm( func=mdp.orientation_command_error, weight=-0.05, params={"asset_cfg": SceneEntityCfg("robot", body_names=MISSING), "command_name": "ee_pose"}, ) # action penalty action_rate = RewTerm(func=mdp.action_rate_l2, weight=-0.0001) joint_vel = RewTerm( func=mdp.joint_vel_l2, weight=-0.0001, params={"asset_cfg": SceneEntityCfg("robot")}, ) @configclass class TerminationsCfg: """Termination terms for the MDP.""" time_out = DoneTerm(func=mdp.time_out, time_out=True) @configclass class CurriculumCfg: """Curriculum terms for the MDP.""" action_rate = CurrTerm( func=mdp.modify_reward_weight, params={"term_name": "action_rate", "weight": -0.005, "num_steps": 4500} ) ## # Environment configuration ## @configclass class ReachEnvCfg(RLTaskEnvCfg): """Configuration for the reach end-effector pose tracking environment.""" # Scene settings scene: ReachSceneCfg = ReachSceneCfg(num_envs=4096, env_spacing=2.5) # Basic settings observations: ObservationsCfg = ObservationsCfg() actions: ActionsCfg = ActionsCfg() commands: CommandsCfg = CommandsCfg() # MDP settings rewards: RewardsCfg = RewardsCfg() terminations: TerminationsCfg = TerminationsCfg() events: EventCfg = EventCfg() curriculum: CurriculumCfg = CurriculumCfg() def __post_init__(self): """Post initialization.""" # general settings self.decimation = 2 self.episode_length_s = 12.0 self.viewer.eye = (3.5, 3.5, 3.5) # simulation settings self.sim.dt = 1.0 / 60.0
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/manipulation/reach/__init__.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """Fixed-arm environments with end-effector pose tracking commands."""
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/manipulation/reach/mdp/__init__.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """This sub-module contains the functions that are specific to the locomotion environments.""" from omni.isaac.orbit.envs.mdp import * # noqa: F401, F403 from .rewards import * # noqa: F401, F403
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/manipulation/reach/mdp/rewards.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from __future__ import annotations import torch from typing import TYPE_CHECKING from omni.isaac.orbit.assets import RigidObject from omni.isaac.orbit.managers import SceneEntityCfg from omni.isaac.orbit.utils.math import combine_frame_transforms, quat_error_magnitude, quat_mul if TYPE_CHECKING: from omni.isaac.orbit.envs import RLTaskEnv def position_command_error(env: RLTaskEnv, command_name: str, asset_cfg: SceneEntityCfg) -> torch.Tensor: """Penalize tracking of the position error using L2-norm. The function computes the position error between the desired position (from the command) and the current position of the asset's body (in world frame). The position error is computed as the L2-norm of the difference between the desired and current positions. """ # extract the asset (to enable type hinting) asset: RigidObject = env.scene[asset_cfg.name] command = env.command_manager.get_command(command_name) # obtain the desired and current positions des_pos_b = command[:, :3] des_pos_w, _ = combine_frame_transforms(asset.data.root_state_w[:, :3], asset.data.root_state_w[:, 3:7], des_pos_b) curr_pos_w = asset.data.body_state_w[:, asset_cfg.body_ids[0], :3] # type: ignore return torch.norm(curr_pos_w - des_pos_w, dim=1) def orientation_command_error(env: RLTaskEnv, command_name: str, asset_cfg: SceneEntityCfg) -> torch.Tensor: """Penalize tracking orientation error using shortest path. The function computes the orientation error between the desired orientation (from the command) and the current orientation of the asset's body (in world frame). The orientation error is computed as the shortest path between the desired and current orientations. """ # extract the asset (to enable type hinting) asset: RigidObject = env.scene[asset_cfg.name] command = env.command_manager.get_command(command_name) # obtain the desired and current orientations des_quat_b = command[:, 3:7] des_quat_w = quat_mul(asset.data.root_state_w[:, 3:7], des_quat_b) curr_quat_w = asset.data.body_state_w[:, asset_cfg.body_ids[0], 3:7] # type: ignore return quat_error_magnitude(curr_quat_w, des_quat_w)
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/manipulation/reach/config/__init__.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """Configurations for arm-based reach-tracking environments.""" # We leave this file empty since we don't want to expose any configs in this package directly. # We still need this file to import the "config" module in the parent package.
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/manipulation/reach/config/franka/ik_rel_env_cfg.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from omni.isaac.orbit.controllers.differential_ik_cfg import DifferentialIKControllerCfg from omni.isaac.orbit.envs.mdp.actions.actions_cfg import DifferentialInverseKinematicsActionCfg from omni.isaac.orbit.utils import configclass from . import joint_pos_env_cfg ## # Pre-defined configs ## from omni.isaac.orbit_assets.franka import FRANKA_PANDA_HIGH_PD_CFG # isort: skip @configclass class FrankaReachEnvCfg(joint_pos_env_cfg.FrankaReachEnvCfg): def __post_init__(self): # post init of parent super().__post_init__() # Set Franka as robot # We switch here to a stiffer PD controller for IK tracking to be better. self.scene.robot = FRANKA_PANDA_HIGH_PD_CFG.replace(prim_path="{ENV_REGEX_NS}/Robot") # Set actions for the specific robot type (franka) self.actions.body_joint_pos = DifferentialInverseKinematicsActionCfg( asset_name="robot", joint_names=["panda_joint.*"], body_name="panda_hand", controller=DifferentialIKControllerCfg(command_type="pose", use_relative_mode=True, ik_method="dls"), scale=0.5, body_offset=DifferentialInverseKinematicsActionCfg.OffsetCfg(pos=[0.0, 0.0, 0.107]), ) @configclass class FrankaReachEnvCfg_PLAY(FrankaReachEnvCfg): def __post_init__(self): # post init of parent super().__post_init__() # make a smaller scene for play self.scene.num_envs = 50 self.scene.env_spacing = 2.5 # disable randomization for play self.observations.policy.enable_corruption = False
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/manipulation/reach/config/franka/ik_abs_env_cfg.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from omni.isaac.orbit.controllers.differential_ik_cfg import DifferentialIKControllerCfg from omni.isaac.orbit.envs.mdp.actions.actions_cfg import DifferentialInverseKinematicsActionCfg from omni.isaac.orbit.utils import configclass from . import joint_pos_env_cfg ## # Pre-defined configs ## from omni.isaac.orbit_assets.franka import FRANKA_PANDA_HIGH_PD_CFG # isort: skip @configclass class FrankaReachEnvCfg(joint_pos_env_cfg.FrankaReachEnvCfg): def __post_init__(self): # post init of parent super().__post_init__() # Set Franka as robot # We switch here to a stiffer PD controller for IK tracking to be better. self.scene.robot = FRANKA_PANDA_HIGH_PD_CFG.replace(prim_path="{ENV_REGEX_NS}/Robot") # Set actions for the specific robot type (franka) self.actions.body_joint_pos = DifferentialInverseKinematicsActionCfg( asset_name="robot", joint_names=["panda_joint.*"], body_name="panda_hand", controller=DifferentialIKControllerCfg(command_type="pose", use_relative_mode=False, ik_method="dls"), body_offset=DifferentialInverseKinematicsActionCfg.OffsetCfg(pos=[0.0, 0.0, 0.107]), ) @configclass class FrankaReachEnvCfg_PLAY(FrankaReachEnvCfg): def __post_init__(self): # post init of parent super().__post_init__() # make a smaller scene for play self.scene.num_envs = 50 self.scene.env_spacing = 2.5 # disable randomization for play self.observations.policy.enable_corruption = False
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/manipulation/reach/config/franka/__init__.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause import gymnasium as gym from . import agents, ik_abs_env_cfg, ik_rel_env_cfg, joint_pos_env_cfg ## # Register Gym environments. ## ## # Joint Position Control ## gym.register( id="Isaac-Reach-Franka-v0", entry_point="omni.isaac.orbit.envs:RLTaskEnv", disable_env_checker=True, kwargs={ "env_cfg_entry_point": joint_pos_env_cfg.FrankaReachEnvCfg, "rl_games_cfg_entry_point": f"{agents.__name__}:rl_games_ppo_cfg.yaml", "rsl_rl_cfg_entry_point": f"{agents.__name__}.rsl_rl_cfg:FrankaReachPPORunnerCfg", "skrl_cfg_entry_point": f"{agents.__name__}:skrl_ppo_cfg.yaml", }, ) gym.register( id="Isaac-Reach-Franka-Play-v0", entry_point="omni.isaac.orbit.envs:RLTaskEnv", disable_env_checker=True, kwargs={ "env_cfg_entry_point": joint_pos_env_cfg.FrankaReachEnvCfg_PLAY, "rl_games_cfg_entry_point": f"{agents.__name__}:rl_games_ppo_cfg.yaml", "rsl_rl_cfg_entry_point": f"{agents.__name__}.rsl_rl_cfg:FrankaReachPPORunnerCfg", "skrl_cfg_entry_point": f"{agents.__name__}:skrl_ppo_cfg.yaml", }, ) ## # Inverse Kinematics - Absolute Pose Control ## gym.register( id="Isaac-Reach-Franka-IK-Abs-v0", entry_point="omni.isaac.orbit.envs:RLTaskEnv", kwargs={ "env_cfg_entry_point": ik_abs_env_cfg.FrankaReachEnvCfg, "rl_games_cfg_entry_point": f"{agents.__name__}:rl_games_ppo_cfg.yaml", "rsl_rl_cfg_entry_point": f"{agents.__name__}.rsl_rl_cfg:FrankaReachPPORunnerCfg", "skrl_cfg_entry_point": f"{agents.__name__}:skrl_ppo_cfg.yaml", }, disable_env_checker=True, ) gym.register( id="Isaac-Reach-Franka-IK-Abs-Play-v0", entry_point="omni.isaac.orbit.envs:RLTaskEnv", kwargs={ "env_cfg_entry_point": ik_abs_env_cfg.FrankaReachEnvCfg_PLAY, "rl_games_cfg_entry_point": f"{agents.__name__}:rl_games_ppo_cfg.yaml", "rsl_rl_cfg_entry_point": f"{agents.__name__}.rsl_rl_cfg:FrankaReachPPORunnerCfg", "skrl_cfg_entry_point": f"{agents.__name__}:skrl_ppo_cfg.yaml", }, disable_env_checker=True, ) ## # Inverse Kinematics - Relative Pose Control ## gym.register( id="Isaac-Reach-Franka-IK-Rel-v0", entry_point="omni.isaac.orbit.envs:RLTaskEnv", kwargs={ "env_cfg_entry_point": ik_rel_env_cfg.FrankaReachEnvCfg, "rl_games_cfg_entry_point": f"{agents.__name__}:rl_games_ppo_cfg.yaml", "rsl_rl_cfg_entry_point": f"{agents.__name__}.rsl_rl_cfg:FrankaReachPPORunnerCfg", "skrl_cfg_entry_point": f"{agents.__name__}:skrl_ppo_cfg.yaml", }, disable_env_checker=True, ) gym.register( id="Isaac-Reach-Franka-IK-Rel-Play-v0", entry_point="omni.isaac.orbit.envs:RLTaskEnv", kwargs={ "env_cfg_entry_point": ik_rel_env_cfg.FrankaReachEnvCfg_PLAY, "rl_games_cfg_entry_point": f"{agents.__name__}:rl_games_ppo_cfg.yaml", "rsl_rl_cfg_entry_point": f"{agents.__name__}.rsl_rl_cfg:FrankaReachPPORunnerCfg", "skrl_cfg_entry_point": f"{agents.__name__}:skrl_ppo_cfg.yaml", }, disable_env_checker=True, )
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/manipulation/reach/config/franka/joint_pos_env_cfg.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from __future__ import annotations import math from omni.isaac.orbit.utils import configclass import omni.isaac.orbit_tasks.manipulation.reach.mdp as mdp from omni.isaac.orbit_tasks.manipulation.reach.reach_env_cfg import ReachEnvCfg ## # Pre-defined configs ## from omni.isaac.orbit_assets import FRANKA_PANDA_CFG # isort: skip ## # Environment configuration ## @configclass class FrankaReachEnvCfg(ReachEnvCfg): def __post_init__(self): # post init of parent super().__post_init__() # switch robot to franka self.scene.robot = FRANKA_PANDA_CFG.replace(prim_path="{ENV_REGEX_NS}/Robot") # override rewards self.rewards.end_effector_position_tracking.params["asset_cfg"].body_names = ["panda_hand"] self.rewards.end_effector_orientation_tracking.params["asset_cfg"].body_names = ["panda_hand"] # override actions self.actions.arm_action = mdp.JointPositionActionCfg( asset_name="robot", joint_names=["panda_joint.*"], scale=0.5, use_default_offset=True ) # override command generator body # end-effector is along z-direction self.commands.ee_pose.body_name = "panda_hand" self.commands.ee_pose.ranges.pitch = (math.pi, math.pi) @configclass class FrankaReachEnvCfg_PLAY(FrankaReachEnvCfg): def __post_init__(self): # post init of parent super().__post_init__() # make a smaller scene for play self.scene.num_envs = 50 self.scene.env_spacing = 2.5 # disable randomization for play self.observations.policy.enable_corruption = False
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/manipulation/reach/config/franka/agents/rsl_rl_cfg.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from omni.isaac.orbit.utils import configclass from omni.isaac.orbit_tasks.utils.wrappers.rsl_rl import ( RslRlOnPolicyRunnerCfg, RslRlPpoActorCriticCfg, RslRlPpoAlgorithmCfg, ) @configclass class FrankaReachPPORunnerCfg(RslRlOnPolicyRunnerCfg): num_steps_per_env = 24 max_iterations = 1000 save_interval = 50 experiment_name = "franka_reach" run_name = "" resume = False empirical_normalization = False policy = RslRlPpoActorCriticCfg( init_noise_std=1.0, actor_hidden_dims=[64, 64], critic_hidden_dims=[64, 64], activation="elu", ) algorithm = RslRlPpoAlgorithmCfg( value_loss_coef=1.0, use_clipped_value_loss=True, clip_param=0.2, entropy_coef=0.01, num_learning_epochs=8, num_mini_batches=4, learning_rate=1.0e-3, schedule="adaptive", gamma=0.99, lam=0.95, desired_kl=0.01, max_grad_norm=1.0, )
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/manipulation/reach/config/franka/agents/skrl_ppo_cfg.yaml
seed: 42 # Models are instantiated using skrl's model instantiator utility # https://skrl.readthedocs.io/en/develop/modules/skrl.utils.model_instantiators.html models: separate: False policy: # see skrl.utils.model_instantiators.gaussian_model for parameter details clip_actions: False clip_log_std: True initial_log_std: 0 min_log_std: -20.0 max_log_std: 2.0 input_shape: "Shape.STATES" hiddens: [64, 64] hidden_activation: ["elu", "elu"] output_shape: "Shape.ACTIONS" output_activation: "" output_scale: 1.0 value: # see skrl.utils.model_instantiators.deterministic_model for parameter details clip_actions: False input_shape: "Shape.STATES" hiddens: [64, 64] hidden_activation: ["elu", "elu"] output_shape: "Shape.ONE" output_activation: "" output_scale: 1.0 # PPO agent configuration (field names are from PPO_DEFAULT_CONFIG) # https://skrl.readthedocs.io/en/latest/modules/skrl.agents.ppo.html agent: rollouts: 24 learning_epochs: 8 mini_batches: 4 discount_factor: 0.99 lambda: 0.95 learning_rate: 1.e-3 learning_rate_scheduler: "KLAdaptiveLR" learning_rate_scheduler_kwargs: kl_threshold: 0.01 state_preprocessor: "RunningStandardScaler" state_preprocessor_kwargs: null value_preprocessor: "RunningStandardScaler" value_preprocessor_kwargs: null random_timesteps: 0 learning_starts: 0 grad_norm_clip: 1.0 ratio_clip: 0.2 value_clip: 0.2 clip_predicted_values: True entropy_loss_scale: 0.0 value_loss_scale: 2.0 kl_threshold: 0 rewards_shaper_scale: 0.01 # logging and checkpoint experiment: directory: "franka_reach" experiment_name: "" write_interval: 120 checkpoint_interval: 1200 # Sequential trainer # https://skrl.readthedocs.io/en/latest/modules/skrl.trainers.sequential.html trainer: timesteps: 24000
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/manipulation/reach/config/franka/agents/__init__.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/manipulation/reach/config/franka/agents/rl_games_ppo_cfg.yaml
params: seed: 42 # environment wrapper clipping env: clip_observations: 100.0 clip_actions: 100.0 algo: name: a2c_continuous model: name: continuous_a2c_logstd network: name: actor_critic separate: False space: continuous: mu_activation: None sigma_activation: None mu_init: name: default sigma_init: name: const_initializer val: 0 fixed_sigma: True mlp: units: [64, 64] activation: elu d2rl: False initializer: name: default regularizer: name: None load_checkpoint: False # flag which sets whether to load the checkpoint load_path: '' # path to the checkpoint to load config: name: reach_franka env_name: rlgpu device: 'cuda:0' device_name: 'cuda:0' multi_gpu: False ppo: True mixed_precision: False normalize_input: True normalize_value: True value_bootstrap: True num_actors: -1 reward_shaper: scale_value: 1.0 normalize_advantage: True gamma: 0.99 tau: 0.95 learning_rate: 1e-3 lr_schedule: adaptive schedule_type: legacy kl_threshold: 0.01 score_to_win: 10000 max_epochs: 1000 save_best_after: 200 save_frequency: 100 print_stats: True grad_norm: 1.0 entropy_coef: 0.01 truncate_grads: True e_clip: 0.2 horizon_length: 24 minibatch_size: 24576 mini_epochs: 5 critic_coef: 2 clip_value: True clip_actions: False bounds_loss_coef: 0.0001
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/manipulation/reach/config/ur_10/__init__.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause import gymnasium as gym from . import agents, joint_pos_env_cfg ## # Register Gym environments. ## gym.register( id="Isaac-Reach-UR10-v0", entry_point="omni.isaac.orbit.envs:RLTaskEnv", disable_env_checker=True, kwargs={ "env_cfg_entry_point": joint_pos_env_cfg.UR10ReachEnvCfg, "rl_games_cfg_entry_point": f"{agents.__name__}:rl_games_ppo_cfg.yaml", "rsl_rl_cfg_entry_point": f"{agents.__name__}.rsl_rl_ppo_cfg:UR10ReachPPORunnerCfg", }, ) gym.register( id="Isaac-Reach-UR10-Play-v0", entry_point="omni.isaac.orbit.envs:RLTaskEnv", disable_env_checker=True, kwargs={ "env_cfg_entry_point": joint_pos_env_cfg.UR10ReachEnvCfg_PLAY, "rl_games_cfg_entry_point": f"{agents.__name__}:rl_games_ppo_cfg.yaml", "rsl_rl_cfg_entry_point": f"{agents.__name__}.rsl_rl_ppo_cfg:UR10ReachPPORunnerCfg", }, )
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/manipulation/reach/config/ur_10/joint_pos_env_cfg.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from __future__ import annotations import math from omni.isaac.orbit.utils import configclass import omni.isaac.orbit_tasks.manipulation.reach.mdp as mdp from omni.isaac.orbit_tasks.manipulation.reach.reach_env_cfg import ReachEnvCfg ## # Pre-defined configs ## from omni.isaac.orbit_assets import UR10_CFG # isort: skip ## # Environment configuration ## @configclass class UR10ReachEnvCfg(ReachEnvCfg): def __post_init__(self): # post init of parent super().__post_init__() # switch robot to ur10 self.scene.robot = UR10_CFG.replace(prim_path="{ENV_REGEX_NS}/Robot") # override events self.events.reset_robot_joints.params["position_range"] = (0.75, 1.25) # override rewards self.rewards.end_effector_position_tracking.params["asset_cfg"].body_names = ["ee_link"] self.rewards.end_effector_orientation_tracking.params["asset_cfg"].body_names = ["ee_link"] # override actions self.actions.arm_action = mdp.JointPositionActionCfg( asset_name="robot", joint_names=[".*"], scale=0.5, use_default_offset=True ) # override command generator body # end-effector is along x-direction self.commands.ee_pose.body_name = "ee_link" self.commands.ee_pose.ranges.pitch = (math.pi / 2, math.pi / 2) @configclass class UR10ReachEnvCfg_PLAY(UR10ReachEnvCfg): def __post_init__(self): # post init of parent super().__post_init__() # make a smaller scene for play self.scene.num_envs = 50 self.scene.env_spacing = 2.5 # disable randomization for play self.observations.policy.enable_corruption = False
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/manipulation/reach/config/ur_10/agents/rsl_rl_ppo_cfg.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from omni.isaac.orbit.utils import configclass from omni.isaac.orbit_tasks.utils.wrappers.rsl_rl import ( RslRlOnPolicyRunnerCfg, RslRlPpoActorCriticCfg, RslRlPpoAlgorithmCfg, ) @configclass class UR10ReachPPORunnerCfg(RslRlOnPolicyRunnerCfg): num_steps_per_env = 24 max_iterations = 1000 save_interval = 50 experiment_name = "reach_ur10" run_name = "" resume = False empirical_normalization = False policy = RslRlPpoActorCriticCfg( init_noise_std=1.0, actor_hidden_dims=[64, 64], critic_hidden_dims=[64, 64], activation="elu", ) algorithm = RslRlPpoAlgorithmCfg( value_loss_coef=1.0, use_clipped_value_loss=True, clip_param=0.2, entropy_coef=0.01, num_learning_epochs=8, num_mini_batches=4, learning_rate=1.0e-3, schedule="adaptive", gamma=0.99, lam=0.95, desired_kl=0.01, max_grad_norm=1.0, )
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/manipulation/reach/config/ur_10/agents/rl_games_ppo_cfg.yaml
params: seed: 42 # environment wrapper clipping env: clip_observations: 100.0 clip_actions: 100.0 algo: name: a2c_continuous model: name: continuous_a2c_logstd network: name: actor_critic separate: False space: continuous: mu_activation: None sigma_activation: None mu_init: name: default sigma_init: name: const_initializer val: 0 fixed_sigma: True mlp: units: [64, 64] activation: elu d2rl: False initializer: name: default regularizer: name: None load_checkpoint: False # flag which sets whether to load the checkpoint load_path: '' # path to the checkpoint to load config: name: reach_ur10 env_name: rlgpu device: 'cuda:0' device_name: 'cuda:0' multi_gpu: False ppo: True mixed_precision: False normalize_input: True normalize_value: True value_bootstrap: True num_actors: -1 reward_shaper: scale_value: 1.0 normalize_advantage: True gamma: 0.99 tau: 0.95 learning_rate: 1e-3 lr_schedule: adaptive schedule_type: legacy kl_threshold: 0.01 score_to_win: 10000 max_epochs: 1000 save_best_after: 200 save_frequency: 100 print_stats: True grad_norm: 1.0 entropy_coef: 0.01 truncate_grads: True e_clip: 0.2 horizon_length: 24 minibatch_size: 24576 mini_epochs: 5 critic_coef: 2 clip_value: True clip_actions: False bounds_loss_coef: 0.0001
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/manipulation/cabinet/cabinet_env_cfg.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause # Copyright (c) 2022-2023, The ORBIT Project Developers. # All rights reserved. # SPDX-License-Identifier: BSD-3-Clause from __future__ import annotations from dataclasses import MISSING import omni.isaac.orbit.sim as sim_utils from omni.isaac.orbit.actuators.actuator_cfg import ImplicitActuatorCfg from omni.isaac.orbit.assets import ArticulationCfg, AssetBaseCfg from omni.isaac.orbit.envs import RLTaskEnvCfg from omni.isaac.orbit.managers import EventTermCfg as EventTerm from omni.isaac.orbit.managers import ObservationGroupCfg as ObsGroup from omni.isaac.orbit.managers import ObservationTermCfg as ObsTerm from omni.isaac.orbit.managers import RewardTermCfg as RewTerm from omni.isaac.orbit.managers import SceneEntityCfg from omni.isaac.orbit.managers import TerminationTermCfg as DoneTerm from omni.isaac.orbit.scene import InteractiveSceneCfg from omni.isaac.orbit.sensors import FrameTransformerCfg from omni.isaac.orbit.sensors.frame_transformer import OffsetCfg from omni.isaac.orbit.utils import configclass from omni.isaac.orbit.utils.assets import ISAAC_NUCLEUS_DIR from . import mdp ## # Pre-defined configs ## from omni.isaac.orbit.markers.config import FRAME_MARKER_CFG # isort: skip FRAME_MARKER_SMALL_CFG = FRAME_MARKER_CFG.copy() FRAME_MARKER_SMALL_CFG.markers["frame"].scale = (0.10, 0.10, 0.10) ## # Scene definition ## @configclass class CabinetSceneCfg(InteractiveSceneCfg): """Configuration for the cabinet scene with a robot and a cabinet. This is the abstract base implementation, the exact scene is defined in the derived classes which need to set the robot and end-effector frames """ # robots, Will be populated by agent env cfg robot: ArticulationCfg = MISSING # End-effector, Will be populated by agent env cfg ee_frame: FrameTransformerCfg = MISSING cabinet = ArticulationCfg( prim_path="{ENV_REGEX_NS}/Cabinet", spawn=sim_utils.UsdFileCfg( usd_path=f"{ISAAC_NUCLEUS_DIR}/Props/Sektion_Cabinet/sektion_cabinet_instanceable.usd", activate_contact_sensors=False, ), init_state=ArticulationCfg.InitialStateCfg( pos=(0.8, 0, 0.4), rot=(0.0, 0.0, 0.0, 1.0), joint_pos={ "door_left_joint": 0.0, "door_right_joint": 0.0, "drawer_bottom_joint": 0.0, "drawer_top_joint": 0.0, }, ), actuators={ "drawers": ImplicitActuatorCfg( joint_names_expr=["drawer_top_joint", "drawer_bottom_joint"], effort_limit=87.0, velocity_limit=100.0, stiffness=10.0, damping=1.0, ), "doors": ImplicitActuatorCfg( joint_names_expr=["door_left_joint", "door_right_joint"], effort_limit=87.0, velocity_limit=100.0, stiffness=10.0, damping=2.5, ), }, ) # Frame definitions for the cabinet. cabinet_frame = FrameTransformerCfg( prim_path="{ENV_REGEX_NS}/Cabinet/sektion", debug_vis=True, visualizer_cfg=FRAME_MARKER_SMALL_CFG.replace(prim_path="/Visuals/CabinetFrameTransformer"), target_frames=[ FrameTransformerCfg.FrameCfg( prim_path="{ENV_REGEX_NS}/Cabinet/drawer_handle_top", name="drawer_handle_top", offset=OffsetCfg( pos=(0.305, 0.0, 0.01), rot=(0.5, 0.5, -0.5, -0.5), # align with end-effector frame ), ), ], ) # plane plane = AssetBaseCfg( prim_path="/World/GroundPlane", init_state=AssetBaseCfg.InitialStateCfg(), spawn=sim_utils.GroundPlaneCfg(), collision_group=-1, ) # lights light = AssetBaseCfg( prim_path="/World/light", spawn=sim_utils.DomeLightCfg(color=(0.75, 0.75, 0.75), intensity=3000.0), ) ## # MDP settings ## @configclass class CommandsCfg: """Command terms for the MDP.""" null_command = mdp.NullCommandCfg() @configclass class ActionsCfg: """Action specifications for the MDP.""" body_joint_pos: mdp.JointPositionActionCfg = MISSING finger_joint_pos: mdp.BinaryJointPositionActionCfg = MISSING @configclass class ObservationsCfg: """Observation specifications for the MDP.""" @configclass class PolicyCfg(ObsGroup): """Observations for policy group.""" joint_pos = ObsTerm(func=mdp.joint_pos_rel) joint_vel = ObsTerm(func=mdp.joint_vel_rel) cabinet_joint_pos = ObsTerm( func=mdp.joint_pos_rel, params={"asset_cfg": SceneEntityCfg("cabinet", joint_names=["drawer_top_joint"])}, ) cabinet_joint_vel = ObsTerm( func=mdp.joint_vel_rel, params={"asset_cfg": SceneEntityCfg("cabinet", joint_names=["drawer_top_joint"])}, ) rel_ee_drawer_distance = ObsTerm(func=mdp.rel_ee_drawer_distance) actions = ObsTerm(func=mdp.last_action) def __post_init__(self): self.enable_corruption = True self.concatenate_terms = True # observation groups policy: PolicyCfg = PolicyCfg() @configclass class EventCfg: """Configuration for events.""" robot_physics_material = EventTerm( func=mdp.randomize_rigid_body_material, mode="startup", params={ "asset_cfg": SceneEntityCfg("robot", body_names=".*"), "static_friction_range": (0.8, 1.25), "dynamic_friction_range": (0.8, 1.25), "restitution_range": (0.0, 0.0), "num_buckets": 16, }, ) cabinet_physics_material = EventTerm( func=mdp.randomize_rigid_body_material, mode="startup", params={ "asset_cfg": SceneEntityCfg("cabinet", body_names="drawer_handle_top"), "static_friction_range": (1.0, 1.25), "dynamic_friction_range": (1.25, 1.5), "restitution_range": (0.0, 0.0), "num_buckets": 16, }, ) reset_all = EventTerm(func=mdp.reset_scene_to_default, mode="reset") reset_robot_joints = EventTerm( func=mdp.reset_joints_by_offset, mode="reset", params={ "position_range": (-0.1, 0.1), "velocity_range": (0.0, 0.0), }, ) @configclass class RewardsCfg: """Reward terms for the MDP.""" # 1. Approach the handle approach_ee_handle = RewTerm(func=mdp.approach_ee_handle, weight=2.0, params={"threshold": 0.2}) align_ee_handle = RewTerm(func=mdp.align_ee_handle, weight=0.5) # 2. Grasp the handle approach_gripper_handle = RewTerm(func=mdp.approach_gripper_handle, weight=5.0, params={"offset": MISSING}) align_grasp_around_handle = RewTerm(func=mdp.align_grasp_around_handle, weight=0.125) grasp_handle = RewTerm( func=mdp.grasp_handle, weight=0.5, params={ "threshold": 0.03, "open_joint_pos": MISSING, "asset_cfg": SceneEntityCfg("robot", joint_names=MISSING), }, ) # 3. Open the drawer open_drawer_bonus = RewTerm( func=mdp.open_drawer_bonus, weight=7.5, params={"asset_cfg": SceneEntityCfg("cabinet", joint_names=["drawer_top_joint"])}, ) multi_stage_open_drawer = RewTerm( func=mdp.multi_stage_open_drawer, weight=1.0, params={"asset_cfg": SceneEntityCfg("cabinet", joint_names=["drawer_top_joint"])}, ) # 4. Penalize actions for cosmetic reasons action_rate_l2 = RewTerm(func=mdp.action_rate_l2, weight=-1e-2) joint_vel = RewTerm(func=mdp.joint_vel_l2, weight=-0.0001) @configclass class TerminationsCfg: """Termination terms for the MDP.""" time_out = DoneTerm(func=mdp.time_out, time_out=True) ## # Environment configuration ## @configclass class CabinetEnvCfg(RLTaskEnvCfg): """Configuration for the cabinet environment.""" # Scene settings scene: CabinetSceneCfg = CabinetSceneCfg(num_envs=4096, env_spacing=2.0) # Basic settings observations: ObservationsCfg = ObservationsCfg() actions: ActionsCfg = ActionsCfg() commands: CommandsCfg = CommandsCfg() # MDP settings rewards: RewardsCfg = RewardsCfg() terminations: TerminationsCfg = TerminationsCfg() events: EventCfg = EventCfg() def __post_init__(self): """Post initialization.""" # general settings self.decimation = 1 self.episode_length_s = 8.0 self.viewer.eye = (-2.0, 2.0, 2.0) self.viewer.lookat = (0.8, 0.0, 0.5) # simulation settings self.sim.dt = 1 / 60 # 60Hz self.sim.physx.bounce_threshold_velocity = 0.2 self.sim.physx.bounce_threshold_velocity = 0.01 self.sim.physx.friction_correlation_distance = 0.00625
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/manipulation/cabinet/__init__.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """Manipulation environments to open drawers in a cabinet."""
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/manipulation/cabinet/mdp/__init__.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """This sub-module contains the functions that are specific to the cabinet environments.""" from omni.isaac.orbit.envs.mdp import * # noqa: F401, F403 from .observations import * # noqa: F401, F403 from .rewards import * # noqa: F401, F403
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/manipulation/cabinet/mdp/rewards.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from __future__ import annotations import torch from typing import TYPE_CHECKING from omni.isaac.orbit.managers import SceneEntityCfg from omni.isaac.orbit.utils.math import matrix_from_quat if TYPE_CHECKING: from omni.isaac.orbit.envs import RLTaskEnv def approach_ee_handle(env: RLTaskEnv, threshold: float) -> torch.Tensor: r"""Reward the robot for reaching the drawer handle using inverse-square law. It uses a piecewise function to reward the robot for reaching the handle. .. math:: reward = \begin{cases} 2 * (1 / (1 + distance^2))^2 & \text{if } distance \leq threshold \\ (1 / (1 + distance^2))^2 & \text{otherwise} \end{cases} """ ee_tcp_pos = env.scene["ee_frame"].data.target_pos_w[..., 0, :] handle_pos = env.scene["cabinet_frame"].data.target_pos_w[..., 0, :] # Compute the distance of the end-effector to the handle distance = torch.norm(handle_pos - ee_tcp_pos, dim=-1, p=2) # Reward the robot for reaching the handle reward = 1.0 / (1.0 + distance**2) reward = torch.pow(reward, 2) return torch.where(distance <= threshold, 2 * reward, reward) def align_ee_handle(env: RLTaskEnv) -> torch.Tensor: """Reward for aligning the end-effector with the handle. The reward is based on the alignment of the gripper with the handle. It is computed as follows: .. math:: reward = 0.5 * (align_z^2 + align_x^2) where :math:`align_z` is the dot product of the z direction of the gripper and the -x direction of the handle and :math:`align_x` is the dot product of the x direction of the gripper and the -y direction of the handle. """ ee_tcp_quat = env.scene["ee_frame"].data.target_quat_w[..., 0, :] handle_quat = env.scene["cabinet_frame"].data.target_quat_w[..., 0, :] ee_tcp_rot_mat = matrix_from_quat(ee_tcp_quat) handle_mat = matrix_from_quat(handle_quat) # get current x and y direction of the handle handle_x, handle_y = handle_mat[..., 0], handle_mat[..., 1] # get current x and z direction of the gripper ee_tcp_x, ee_tcp_z = ee_tcp_rot_mat[..., 0], ee_tcp_rot_mat[..., 2] # make sure gripper aligns with the handle # in this case, the z direction of the gripper should be close to the -x direction of the handle # and the x direction of the gripper should be close to the -y direction of the handle # dot product of z and x should be large align_z = torch.bmm(ee_tcp_z.unsqueeze(1), -handle_x.unsqueeze(-1)).squeeze(-1).squeeze(-1) align_x = torch.bmm(ee_tcp_x.unsqueeze(1), -handle_y.unsqueeze(-1)).squeeze(-1).squeeze(-1) return 0.5 * (torch.sign(align_z) * align_z**2 + torch.sign(align_x) * align_x**2) def align_grasp_around_handle(env: RLTaskEnv) -> torch.Tensor: """Bonus for correct hand orientation around the handle. The correct hand orientation is when the left finger is above the handle and the right finger is below the handle. """ # Target object position: (num_envs, 3) handle_pos = env.scene["cabinet_frame"].data.target_pos_w[..., 0, :] # Fingertips position: (num_envs, n_fingertips, 3) ee_fingertips_w = env.scene["ee_frame"].data.target_pos_w[..., 1:, :] lfinger_pos = ee_fingertips_w[..., 0, :] rfinger_pos = ee_fingertips_w[..., 1, :] # Check if hand is in a graspable pose is_graspable = (rfinger_pos[:, 2] < handle_pos[:, 2]) & (lfinger_pos[:, 2] > handle_pos[:, 2]) # bonus if left finger is above the drawer handle and right below return is_graspable def approach_gripper_handle(env: RLTaskEnv, offset: float = 0.04) -> torch.Tensor: """Reward the robot's gripper reaching the drawer handle with the right pose. This function returns the distance of fingertips to the handle when the fingers are in a grasping orientation (i.e., the left finger is above the handle and the right finger is below the handle). Otherwise, it returns zero. """ # Target object position: (num_envs, 3) handle_pos = env.scene["cabinet_frame"].data.target_pos_w[..., 0, :] # Fingertips position: (num_envs, n_fingertips, 3) ee_fingertips_w = env.scene["ee_frame"].data.target_pos_w[..., 1:, :] lfinger_pos = ee_fingertips_w[..., 0, :] rfinger_pos = ee_fingertips_w[..., 1, :] # Compute the distance of each finger from the handle lfinger_dist = torch.abs(lfinger_pos[:, 2] - handle_pos[:, 2]) rfinger_dist = torch.abs(rfinger_pos[:, 2] - handle_pos[:, 2]) # Check if hand is in a graspable pose is_graspable = (rfinger_pos[:, 2] < handle_pos[:, 2]) & (lfinger_pos[:, 2] > handle_pos[:, 2]) return is_graspable * ((offset - lfinger_dist) + (offset - rfinger_dist)) def grasp_handle(env: RLTaskEnv, threshold: float, open_joint_pos: float, asset_cfg: SceneEntityCfg) -> torch.Tensor: """Reward for closing the fingers when being close to the handle. The :attr:`threshold` is the distance from the handle at which the fingers should be closed. The :attr:`open_joint_pos` is the joint position when the fingers are open. Note: It is assumed that zero joint position corresponds to the fingers being closed. """ ee_tcp_pos = env.scene["ee_frame"].data.target_pos_w[..., 0, :] handle_pos = env.scene["cabinet_frame"].data.target_pos_w[..., 0, :] gripper_joint_pos = env.scene[asset_cfg.name].data.joint_pos[:, asset_cfg.joint_ids] distance = torch.norm(handle_pos - ee_tcp_pos, dim=-1, p=2) is_close = distance <= threshold return is_close * torch.sum(open_joint_pos - gripper_joint_pos, dim=-1) def open_drawer_bonus(env: RLTaskEnv, asset_cfg: SceneEntityCfg) -> torch.Tensor: """Bonus for opening the drawer given by the joint position of the drawer. The bonus is given when the drawer is open. If the grasp is around the handle, the bonus is doubled. """ drawer_pos = env.scene[asset_cfg.name].data.joint_pos[:, asset_cfg.joint_ids[0]] is_graspable = align_grasp_around_handle(env).float() return (is_graspable + 1.0) * drawer_pos def multi_stage_open_drawer(env: RLTaskEnv, asset_cfg: SceneEntityCfg) -> torch.Tensor: """Multi-stage bonus for opening the drawer. Depending on the drawer's position, the reward is given in three stages: easy, medium, and hard. This helps the agent to learn to open the drawer in a controlled manner. """ drawer_pos = env.scene[asset_cfg.name].data.joint_pos[:, asset_cfg.joint_ids[0]] is_graspable = align_grasp_around_handle(env).float() open_easy = (drawer_pos > 0.01) * 0.5 open_medium = (drawer_pos > 0.2) * is_graspable open_hard = (drawer_pos > 0.3) * is_graspable return open_easy + open_medium + open_hard
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/manipulation/cabinet/mdp/observations.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from __future__ import annotations import torch from typing import TYPE_CHECKING from omni.isaac.orbit.assets import ArticulationData from omni.isaac.orbit.sensors import FrameTransformerData if TYPE_CHECKING: from omni.isaac.orbit.envs import RLTaskEnv def rel_ee_object_distance(env: RLTaskEnv) -> torch.Tensor: """The distance between the end-effector and the object.""" ee_tf_data: FrameTransformerData = env.scene["ee_frame"].data object_data: ArticulationData = env.scene["object"].data return object_data.root_pos_w - ee_tf_data.target_pos_w[..., 0, :] def rel_ee_drawer_distance(env: RLTaskEnv) -> torch.Tensor: """The distance between the end-effector and the object.""" ee_tf_data: FrameTransformerData = env.scene["ee_frame"].data cabinet_tf_data: FrameTransformerData = env.scene["cabinet_frame"].data return cabinet_tf_data.target_pos_w[..., 0, :] - ee_tf_data.target_pos_w[..., 0, :] def fingertips_pos(env: RLTaskEnv) -> torch.Tensor: """The position of the fingertips relative to the environment origins.""" ee_tf_data: FrameTransformerData = env.scene["ee_frame"].data fingertips_pos = ee_tf_data.target_pos_w[..., 1:, :] - env.scene.env_origins.unsqueeze(1) return fingertips_pos.view(env.num_envs, -1) def ee_pos(env: RLTaskEnv) -> torch.Tensor: """The position of the end-effector relative to the environment origins.""" ee_tf_data: FrameTransformerData = env.scene["ee_frame"].data ee_pos = ee_tf_data.target_pos_w[..., 0, :] - env.scene.env_origins return ee_pos def ee_quat(env: RLTaskEnv) -> torch.Tensor: """The orientation of the end-effector in the environment frame.""" ee_tf_data: FrameTransformerData = env.scene["ee_frame"].data ee_quat = ee_tf_data.target_quat_w[..., 0, :] # make first element of quaternion positive ee_quat[ee_quat[:, 0] < 0] *= -1 return ee_quat
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/manipulation/cabinet/config/__init__.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """Configurations for the cabinet environments.""" # We leave this file empty since we don't want to expose any configs in this package directly. # We still need this file to import the "config" module in the parent package.
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/manipulation/cabinet/config/franka/ik_rel_env_cfg.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from omni.isaac.orbit.controllers.differential_ik_cfg import DifferentialIKControllerCfg from omni.isaac.orbit.envs.mdp.actions.actions_cfg import DifferentialInverseKinematicsActionCfg from omni.isaac.orbit.utils import configclass from . import joint_pos_env_cfg ## # Pre-defined configs ## from omni.isaac.orbit_assets.franka import FRANKA_PANDA_HIGH_PD_CFG # isort: skip @configclass class FrankaCabinetEnvCfg(joint_pos_env_cfg.FrankaCabinetEnvCfg): def __post_init__(self): # post init of parent super().__post_init__() # Set Franka as robot # We switch here to a stiffer PD controller for IK tracking to be better. self.scene.robot = FRANKA_PANDA_HIGH_PD_CFG.replace(prim_path="{ENV_REGEX_NS}/Robot") # Set actions for the specific robot type (franka) self.actions.body_joint_pos = DifferentialInverseKinematicsActionCfg( asset_name="robot", joint_names=["panda_joint.*"], body_name="panda_hand", controller=DifferentialIKControllerCfg(command_type="pose", use_relative_mode=True, ik_method="dls"), scale=0.5, body_offset=DifferentialInverseKinematicsActionCfg.OffsetCfg(pos=[0.0, 0.0, 0.107]), ) @configclass class FrankaCabinetEnvCfg_PLAY(FrankaCabinetEnvCfg): def __post_init__(self): # post init of parent super().__post_init__() # make a smaller scene for play self.scene.num_envs = 50 self.scene.env_spacing = 2.5 # disable randomization for play self.observations.policy.enable_corruption = False
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/manipulation/cabinet/config/franka/ik_abs_env_cfg.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from omni.isaac.orbit.controllers.differential_ik_cfg import DifferentialIKControllerCfg from omni.isaac.orbit.envs.mdp.actions.actions_cfg import DifferentialInverseKinematicsActionCfg from omni.isaac.orbit.utils import configclass from . import joint_pos_env_cfg ## # Pre-defined configs ## from omni.isaac.orbit_assets.franka import FRANKA_PANDA_HIGH_PD_CFG # isort: skip @configclass class FrankaCabinetEnvCfg(joint_pos_env_cfg.FrankaCabinetEnvCfg): def __post_init__(self): # post init of parent super().__post_init__() # Set Franka as robot # We switch here to a stiffer PD controller for IK tracking to be better. self.scene.robot = FRANKA_PANDA_HIGH_PD_CFG.replace(prim_path="{ENV_REGEX_NS}/Robot") # Set actions for the specific robot type (franka) self.actions.body_joint_pos = DifferentialInverseKinematicsActionCfg( asset_name="robot", joint_names=["panda_joint.*"], body_name="panda_hand", controller=DifferentialIKControllerCfg(command_type="pose", use_relative_mode=False, ik_method="dls"), body_offset=DifferentialInverseKinematicsActionCfg.OffsetCfg(pos=[0.0, 0.0, 0.107]), ) @configclass class FrankaCabinetEnvCfg_PLAY(FrankaCabinetEnvCfg): def __post_init__(self): # post init of parent super().__post_init__() # make a smaller scene for play self.scene.num_envs = 50 self.scene.env_spacing = 2.5 # disable randomization for play self.observations.policy.enable_corruption = False
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/manipulation/cabinet/config/franka/__init__.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause import gymnasium as gym from . import agents, ik_abs_env_cfg, ik_rel_env_cfg, joint_pos_env_cfg ## # Register Gym environments. ## ## # Joint Position Control ## gym.register( id="Isaac-Open-Drawer-Franka-v0", entry_point="omni.isaac.orbit.envs:RLTaskEnv", kwargs={ "env_cfg_entry_point": joint_pos_env_cfg.FrankaCabinetEnvCfg, "rsl_rl_cfg_entry_point": agents.rsl_rl_cfg.CabinetPPORunnerCfg, "rl_games_cfg_entry_point": f"{agents.__name__}:rl_games_ppo_cfg.yaml", }, disable_env_checker=True, ) gym.register( id="Isaac-Open-Drawer-Franka-Play-v0", entry_point="omni.isaac.orbit.envs:RLTaskEnv", kwargs={ "env_cfg_entry_point": joint_pos_env_cfg.FrankaCabinetEnvCfg_PLAY, "rsl_rl_cfg_entry_point": agents.rsl_rl_cfg.CabinetPPORunnerCfg, "rl_games_cfg_entry_point": f"{agents.__name__}:rl_games_ppo_cfg.yaml", }, disable_env_checker=True, ) ## # Inverse Kinematics - Absolute Pose Control ## gym.register( id="Isaac-Open-Drawer-Franka-IK-Abs-v0", entry_point="omni.isaac.orbit.envs:RLTaskEnv", kwargs={ "env_cfg_entry_point": ik_abs_env_cfg.FrankaCabinetEnvCfg, "rsl_rl_cfg_entry_point": agents.rsl_rl_cfg.CabinetPPORunnerCfg, "rl_games_cfg_entry_point": f"{agents.__name__}:rl_games_ppo_cfg.yaml", }, disable_env_checker=True, ) gym.register( id="Isaac-Open-Drawer-Franka-IK-Abs-Play-v0", entry_point="omni.isaac.orbit.envs:RLTaskEnv", kwargs={ "env_cfg_entry_point": ik_abs_env_cfg.FrankaCabinetEnvCfg_PLAY, "rsl_rl_cfg_entry_point": agents.rsl_rl_cfg.CabinetPPORunnerCfg, "rl_games_cfg_entry_point": f"{agents.__name__}:rl_games_ppo_cfg.yaml", }, disable_env_checker=True, ) ## # Inverse Kinematics - Relative Pose Control ## gym.register( id="Isaac-Open-Drawer-Franka-IK-Rel-v0", entry_point="omni.isaac.orbit.envs:RLTaskEnv", kwargs={ "env_cfg_entry_point": ik_rel_env_cfg.FrankaCabinetEnvCfg, "rsl_rl_cfg_entry_point": agents.rsl_rl_cfg.CabinetPPORunnerCfg, "rl_games_cfg_entry_point": f"{agents.__name__}:rl_games_ppo_cfg.yaml", }, disable_env_checker=True, ) gym.register( id="Isaac-Open-Drawer-Franka-IK-Rel-Play-v0", entry_point="omni.isaac.orbit.envs:RLTaskEnv", kwargs={ "env_cfg_entry_point": ik_rel_env_cfg.FrankaCabinetEnvCfg_PLAY, "rsl_rl_cfg_entry_point": agents.rsl_rl_cfg.CabinetPPORunnerCfg, "rl_games_cfg_entry_point": f"{agents.__name__}:rl_games_ppo_cfg.yaml", }, disable_env_checker=True, )
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/manipulation/cabinet/config/franka/joint_pos_env_cfg.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from omni.isaac.orbit.sensors import FrameTransformerCfg from omni.isaac.orbit.sensors.frame_transformer.frame_transformer_cfg import OffsetCfg from omni.isaac.orbit.utils import configclass from omni.isaac.orbit_tasks.manipulation.cabinet import mdp from omni.isaac.orbit_tasks.manipulation.cabinet.cabinet_env_cfg import CabinetEnvCfg ## # Pre-defined configs ## from omni.isaac.orbit_assets.franka import FRANKA_PANDA_CFG # isort: skip from omni.isaac.orbit_tasks.manipulation.cabinet.cabinet_env_cfg import FRAME_MARKER_SMALL_CFG # isort: skip @configclass class FrankaCabinetEnvCfg(CabinetEnvCfg): def __post_init__(self): # post init of parent super().__post_init__() # Set franka as robot self.scene.robot = FRANKA_PANDA_CFG.replace(prim_path="{ENV_REGEX_NS}/Robot") # Set Actions for the specific robot type (franka) self.actions.body_joint_pos = mdp.JointPositionActionCfg( asset_name="robot", joint_names=["panda_joint.*"], scale=1.0, use_default_offset=True, ) self.actions.finger_joint_pos = mdp.BinaryJointPositionActionCfg( asset_name="robot", joint_names=["panda_finger.*"], open_command_expr={"panda_finger_.*": 0.04}, close_command_expr={"panda_finger_.*": 0.0}, ) # Listens to the required transforms # IMPORTANT: The order of the frames in the list is important. The first frame is the tool center point (TCP) # the other frames are the fingers self.scene.ee_frame = FrameTransformerCfg( prim_path="{ENV_REGEX_NS}/Robot/panda_link0", debug_vis=False, visualizer_cfg=FRAME_MARKER_SMALL_CFG.replace(prim_path="/Visuals/EndEffectorFrameTransformer"), target_frames=[ FrameTransformerCfg.FrameCfg( prim_path="{ENV_REGEX_NS}/Robot/panda_hand", name="ee_tcp", offset=OffsetCfg( pos=(0.0, 0.0, 0.1034), ), ), FrameTransformerCfg.FrameCfg( prim_path="{ENV_REGEX_NS}/Robot/panda_leftfinger", name="tool_leftfinger", offset=OffsetCfg( pos=(0.0, 0.0, 0.046), ), ), FrameTransformerCfg.FrameCfg( prim_path="{ENV_REGEX_NS}/Robot/panda_rightfinger", name="tool_rightfinger", offset=OffsetCfg( pos=(0.0, 0.0, 0.046), ), ), ], ) # override rewards self.rewards.approach_gripper_handle.params["offset"] = 0.04 self.rewards.grasp_handle.params["open_joint_pos"] = 0.04 self.rewards.grasp_handle.params["asset_cfg"].joint_names = ["panda_finger_.*"] @configclass class FrankaCabinetEnvCfg_PLAY(FrankaCabinetEnvCfg): def __post_init__(self): # post init of parent super().__post_init__() # make a smaller scene for play self.scene.num_envs = 50 self.scene.env_spacing = 2.5 # disable randomization for play self.observations.policy.enable_corruption = False
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/manipulation/cabinet/config/franka/agents/rsl_rl_cfg.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from omni.isaac.orbit.utils import configclass from omni.isaac.orbit_tasks.utils.wrappers.rsl_rl import ( RslRlOnPolicyRunnerCfg, RslRlPpoActorCriticCfg, RslRlPpoAlgorithmCfg, ) @configclass class CabinetPPORunnerCfg(RslRlOnPolicyRunnerCfg): num_steps_per_env = 96 max_iterations = 400 save_interval = 50 experiment_name = "franka_open_drawer" empirical_normalization = False policy = RslRlPpoActorCriticCfg( init_noise_std=1.0, actor_hidden_dims=[256, 128, 64], critic_hidden_dims=[256, 128, 64], activation="elu", ) algorithm = RslRlPpoAlgorithmCfg( value_loss_coef=1.0, use_clipped_value_loss=True, clip_param=0.2, entropy_coef=1e-3, num_learning_epochs=5, num_mini_batches=4, learning_rate=5.0e-4, schedule="adaptive", gamma=0.99, lam=0.95, desired_kl=0.02, max_grad_norm=1.0, )
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/manipulation/cabinet/config/franka/agents/__init__.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from . import rsl_rl_cfg # noqa: F401, F403
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/manipulation/cabinet/config/franka/agents/rl_games_ppo_cfg.yaml
params: seed: 42 # environment wrapper clipping env: clip_observations: 5.0 clip_actions: 1.0 algo: name: a2c_continuous model: name: continuous_a2c_logstd network: name: actor_critic separate: False space: continuous: mu_activation: None sigma_activation: None mu_init: name: default sigma_init: name: const_initializer val: 0 fixed_sigma: True mlp: units: [256, 128, 64] activation: elu d2rl: False initializer: name: default regularizer: name: None load_checkpoint: False load_path: '' config: name: franka_open_drawer env_name: rlgpu device: 'cuda:0' device_name: 'cuda:0' multi_gpu: False ppo: True mixed_precision: False normalize_input: False normalize_value: False num_actors: -1 # configured from the script (based on num_envs) reward_shaper: scale_value: 1 normalize_advantage: False gamma: 0.99 tau: 0.95 learning_rate: 5e-4 lr_schedule: adaptive kl_threshold: 0.008 score_to_win: 200 max_epochs: 400 save_best_after: 50 save_frequency: 50 print_stats: True grad_norm: 1.0 entropy_coef: 0.001 truncate_grads: True e_clip: 0.2 horizon_length: 96 minibatch_size: 4096 mini_epochs: 5 critic_coef: 4 clip_value: True seq_length: 4 bounds_loss_coef: 0.0001
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/manipulation/lift/__init__.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """Configurations for the object lift environments.""" # We leave this file empty since we don't want to expose any configs in this package directly. # We still need this file to import the "config" module in the parent package.
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/manipulation/lift/lift_env_cfg.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from __future__ import annotations from dataclasses import MISSING import omni.isaac.orbit.sim as sim_utils from omni.isaac.orbit.assets import ArticulationCfg, AssetBaseCfg, RigidObjectCfg from omni.isaac.orbit.envs import RLTaskEnvCfg from omni.isaac.orbit.managers import CurriculumTermCfg as CurrTerm from omni.isaac.orbit.managers import EventTermCfg as EventTerm from omni.isaac.orbit.managers import ObservationGroupCfg as ObsGroup from omni.isaac.orbit.managers import ObservationTermCfg as ObsTerm from omni.isaac.orbit.managers import RewardTermCfg as RewTerm from omni.isaac.orbit.managers import SceneEntityCfg from omni.isaac.orbit.managers import TerminationTermCfg as DoneTerm from omni.isaac.orbit.scene import InteractiveSceneCfg from omni.isaac.orbit.sensors.frame_transformer.frame_transformer_cfg import FrameTransformerCfg from omni.isaac.orbit.sim.spawners.from_files.from_files_cfg import GroundPlaneCfg, UsdFileCfg from omni.isaac.orbit.utils import configclass from omni.isaac.orbit.utils.assets import ISAAC_NUCLEUS_DIR from . import mdp ## # Scene definition ## @configclass class ObjectTableSceneCfg(InteractiveSceneCfg): """Configuration for the lift scene with a robot and a object. This is the abstract base implementation, the exact scene is defined in the derived classes which need to set the target object, robot and end-effector frames """ # robots: will be populated by agent env cfg robot: ArticulationCfg = MISSING # end-effector sensor: will be populated by agent env cfg ee_frame: FrameTransformerCfg = MISSING # target object: will be populated by agent env cfg object: RigidObjectCfg = MISSING # Table table = AssetBaseCfg( prim_path="{ENV_REGEX_NS}/Table", init_state=AssetBaseCfg.InitialStateCfg(pos=[0.5, 0, 0], rot=[0.707, 0, 0, 0.707]), spawn=UsdFileCfg(usd_path=f"{ISAAC_NUCLEUS_DIR}/Props/Mounts/SeattleLabTable/table_instanceable.usd"), ) # plane plane = AssetBaseCfg( prim_path="/World/GroundPlane", init_state=AssetBaseCfg.InitialStateCfg(pos=[0, 0, -1.05]), spawn=GroundPlaneCfg(), ) # lights light = AssetBaseCfg( prim_path="/World/light", spawn=sim_utils.DomeLightCfg(color=(0.75, 0.75, 0.75), intensity=3000.0), ) ## # MDP settings ## @configclass class CommandsCfg: """Command terms for the MDP.""" object_pose = mdp.UniformPoseCommandCfg( asset_name="robot", body_name=MISSING, # will be set by agent env cfg resampling_time_range=(5.0, 5.0), debug_vis=True, ranges=mdp.UniformPoseCommandCfg.Ranges( pos_x=(0.4, 0.6), pos_y=(-0.25, 0.25), pos_z=(0.25, 0.5), roll=(0.0, 0.0), pitch=(0.0, 0.0), yaw=(0.0, 0.0) ), ) @configclass class ActionsCfg: """Action specifications for the MDP.""" # will be set by agent env cfg body_joint_pos: mdp.JointPositionActionCfg = MISSING finger_joint_pos: mdp.BinaryJointPositionActionCfg = MISSING @configclass class ObservationsCfg: """Observation specifications for the MDP.""" @configclass class PolicyCfg(ObsGroup): """Observations for policy group.""" joint_pos = ObsTerm(func=mdp.joint_pos_rel) joint_vel = ObsTerm(func=mdp.joint_vel_rel) object_position = ObsTerm(func=mdp.object_position_in_robot_root_frame) target_object_position = ObsTerm(func=mdp.generated_commands, params={"command_name": "object_pose"}) actions = ObsTerm(func=mdp.last_action) def __post_init__(self): self.enable_corruption = True self.concatenate_terms = True # observation groups policy: PolicyCfg = PolicyCfg() @configclass class EventCfg: """Configuration for events.""" reset_all = EventTerm(func=mdp.reset_scene_to_default, mode="reset") reset_object_position = EventTerm( func=mdp.reset_root_state_uniform, mode="reset", params={ "pose_range": {"x": (-0.1, 0.1), "y": (-0.25, 0.25), "z": (0.0, 0.0)}, "velocity_range": {}, "asset_cfg": SceneEntityCfg("object", body_names="Object"), }, ) @configclass class RewardsCfg: """Reward terms for the MDP.""" reaching_object = RewTerm(func=mdp.object_ee_distance, params={"std": 0.1}, weight=1.0) lifting_object = RewTerm(func=mdp.object_is_lifted, params={"minimal_height": 0.06}, weight=15.0) object_goal_tracking = RewTerm( func=mdp.object_goal_distance, params={"std": 0.3, "minimal_height": 0.06, "command_name": "object_pose"}, weight=16.0, ) object_goal_tracking_fine_grained = RewTerm( func=mdp.object_goal_distance, params={"std": 0.05, "minimal_height": 0.06, "command_name": "object_pose"}, weight=5.0, ) # action penalty action_rate = RewTerm(func=mdp.action_rate_l2, weight=-1e-3) joint_vel = RewTerm( func=mdp.joint_vel_l2, weight=-1e-4, params={"asset_cfg": SceneEntityCfg("robot")}, ) @configclass class TerminationsCfg: """Termination terms for the MDP.""" time_out = DoneTerm(func=mdp.time_out, time_out=True) object_dropping = DoneTerm( func=mdp.base_height, params={"minimum_height": -0.05, "asset_cfg": SceneEntityCfg("object")} ) @configclass class CurriculumCfg: """Curriculum terms for the MDP.""" action_rate = CurrTerm( func=mdp.modify_reward_weight, params={"term_name": "action_rate", "weight": -1e-1, "num_steps": 10000} ) joint_vel = CurrTerm( func=mdp.modify_reward_weight, params={"term_name": "joint_vel", "weight": -1e-1, "num_steps": 10000} ) ## # Environment configuration ## @configclass class LiftEnvCfg(RLTaskEnvCfg): """Configuration for the lifting environment.""" # Scene settings scene: ObjectTableSceneCfg = ObjectTableSceneCfg(num_envs=4096, env_spacing=2.5) # Basic settings observations: ObservationsCfg = ObservationsCfg() actions: ActionsCfg = ActionsCfg() commands: CommandsCfg = CommandsCfg() # MDP settings rewards: RewardsCfg = RewardsCfg() terminations: TerminationsCfg = TerminationsCfg() events: EventCfg = EventCfg() curriculum: CurriculumCfg = CurriculumCfg() def __post_init__(self): """Post initialization.""" # general settings self.decimation = 2 self.episode_length_s = 5.0 # simulation settings self.sim.dt = 0.01 # 100Hz self.sim.physx.bounce_threshold_velocity = 0.2 self.sim.physx.bounce_threshold_velocity = 0.01 self.sim.physx.gpu_found_lost_aggregate_pairs_capacity = 1024 * 1024 * 4 self.sim.physx.gpu_total_aggregate_pairs_capacity = 16 * 1024 self.sim.physx.friction_correlation_distance = 0.00625
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/manipulation/lift/mdp/__init__.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """This sub-module contains the functions that are specific to the lift environments.""" from omni.isaac.orbit.envs.mdp import * # noqa: F401, F403 from .observations import * # noqa: F401, F403 from .rewards import * # noqa: F401, F403 from .terminations import * # noqa: F401, F403
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/manipulation/lift/mdp/rewards.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from __future__ import annotations import torch from typing import TYPE_CHECKING from omni.isaac.orbit.assets import RigidObject from omni.isaac.orbit.managers import SceneEntityCfg from omni.isaac.orbit.sensors import FrameTransformer from omni.isaac.orbit.utils.math import combine_frame_transforms if TYPE_CHECKING: from omni.isaac.orbit.envs import RLTaskEnv def object_is_lifted( env: RLTaskEnv, minimal_height: float, object_cfg: SceneEntityCfg = SceneEntityCfg("object") ) -> torch.Tensor: """Reward the agent for lifting the object above the minimal height.""" object: RigidObject = env.scene[object_cfg.name] return torch.where(object.data.root_pos_w[:, 2] > minimal_height, 1.0, 0.0) def object_ee_distance( env: RLTaskEnv, std: float, object_cfg: SceneEntityCfg = SceneEntityCfg("object"), ee_frame_cfg: SceneEntityCfg = SceneEntityCfg("ee_frame"), ) -> torch.Tensor: """Reward the agent for reaching the object using tanh-kernel.""" # extract the used quantities (to enable type-hinting) object: RigidObject = env.scene[object_cfg.name] ee_frame: FrameTransformer = env.scene[ee_frame_cfg.name] # Target object position: (num_envs, 3) cube_pos_w = object.data.root_pos_w # End-effector position: (num_envs, 3) ee_w = ee_frame.data.target_pos_w[..., 0, :] # Distance of the end-effector to the object: (num_envs,) object_ee_distance = torch.norm(cube_pos_w - ee_w, dim=1) return 1 - torch.tanh(object_ee_distance / std) def object_goal_distance( env: RLTaskEnv, std: float, minimal_height: float, command_name: str, robot_cfg: SceneEntityCfg = SceneEntityCfg("robot"), object_cfg: SceneEntityCfg = SceneEntityCfg("object"), ) -> torch.Tensor: """Reward the agent for tracking the goal pose using tanh-kernel.""" # extract the used quantities (to enable type-hinting) robot: RigidObject = env.scene[robot_cfg.name] object: RigidObject = env.scene[object_cfg.name] command = env.command_manager.get_command(command_name) # compute the desired position in the world frame des_pos_b = command[:, :3] des_pos_w, _ = combine_frame_transforms(robot.data.root_state_w[:, :3], robot.data.root_state_w[:, 3:7], des_pos_b) # distance of the end-effector to the object: (num_envs,) distance = torch.norm(des_pos_w - object.data.root_pos_w[:, :3], dim=1) # rewarded if the object is lifted above the threshold return (object.data.root_pos_w[:, 2] > minimal_height) * (1 - torch.tanh(distance / std))
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/manipulation/lift/mdp/terminations.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """Common functions that can be used to activate certain terminations for the lift task. The functions can be passed to the :class:`omni.isaac.orbit.managers.TerminationTermCfg` object to enable the termination introduced by the function. """ from __future__ import annotations import torch from typing import TYPE_CHECKING from omni.isaac.orbit.assets import RigidObject from omni.isaac.orbit.managers import SceneEntityCfg from omni.isaac.orbit.utils.math import combine_frame_transforms if TYPE_CHECKING: from omni.isaac.orbit.envs import RLTaskEnv def object_reached_goal( env: RLTaskEnv, command_name: str = "object_pose", threshold: float = 0.02, robot_cfg: SceneEntityCfg = SceneEntityCfg("robot"), object_cfg: SceneEntityCfg = SceneEntityCfg("object"), ) -> torch.Tensor: """Termination condition for the object reaching the goal position. Args: env: The environment. command_name: The name of the command that is used to control the object. threshold: The threshold for the object to reach the goal position. Defaults to 0.02. robot_cfg: The robot configuration. Defaults to SceneEntityCfg("robot"). object_cfg: The object configuration. Defaults to SceneEntityCfg("object"). """ # extract the used quantities (to enable type-hinting) robot: RigidObject = env.scene[robot_cfg.name] object: RigidObject = env.scene[object_cfg.name] command = env.command_manager.get_command(command_name) # compute the desired position in the world frame des_pos_b = command[:, :3] des_pos_w, _ = combine_frame_transforms(robot.data.root_state_w[:, :3], robot.data.root_state_w[:, 3:7], des_pos_b) # distance of the end-effector to the object: (num_envs,) distance = torch.norm(des_pos_w - object.data.root_pos_w[:, :3], dim=1) # rewarded if the object is lifted above the threshold return distance < threshold
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/manipulation/lift/mdp/observations.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from __future__ import annotations import torch from typing import TYPE_CHECKING from omni.isaac.orbit.assets import RigidObject from omni.isaac.orbit.managers import SceneEntityCfg from omni.isaac.orbit.utils.math import subtract_frame_transforms if TYPE_CHECKING: from omni.isaac.orbit.envs import RLTaskEnv def object_position_in_robot_root_frame( env: RLTaskEnv, robot_cfg: SceneEntityCfg = SceneEntityCfg("robot"), object_cfg: SceneEntityCfg = SceneEntityCfg("object"), ) -> torch.Tensor: """The position of the object in the robot's root frame.""" robot: RigidObject = env.scene[robot_cfg.name] object: RigidObject = env.scene[object_cfg.name] object_pos_w = object.data.root_pos_w[:, :3] object_pos_b, _ = subtract_frame_transforms( robot.data.root_state_w[:, :3], robot.data.root_state_w[:, 3:7], object_pos_w ) return object_pos_b
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/manipulation/lift/config/__init__.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """Configurations for the object lift environments.""" # We leave this file empty since we don't want to expose any configs in this package directly. # We still need this file to import the "config" module in the parent package.
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/manipulation/lift/config/franka/ik_rel_env_cfg.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from omni.isaac.orbit.controllers.differential_ik_cfg import DifferentialIKControllerCfg from omni.isaac.orbit.envs.mdp.actions.actions_cfg import DifferentialInverseKinematicsActionCfg from omni.isaac.orbit.utils import configclass from . import joint_pos_env_cfg ## # Pre-defined configs ## from omni.isaac.orbit_assets.franka import FRANKA_PANDA_HIGH_PD_CFG # isort: skip @configclass class FrankaCubeLiftEnvCfg(joint_pos_env_cfg.FrankaCubeLiftEnvCfg): def __post_init__(self): # post init of parent super().__post_init__() # Set Franka as robot # We switch here to a stiffer PD controller for IK tracking to be better. self.scene.robot = FRANKA_PANDA_HIGH_PD_CFG.replace(prim_path="{ENV_REGEX_NS}/Robot") # Set actions for the specific robot type (franka) self.actions.body_joint_pos = DifferentialInverseKinematicsActionCfg( asset_name="robot", joint_names=["panda_joint.*"], body_name="panda_hand", controller=DifferentialIKControllerCfg(command_type="pose", use_relative_mode=True, ik_method="dls"), scale=0.5, body_offset=DifferentialInverseKinematicsActionCfg.OffsetCfg(pos=[0.0, 0.0, 0.107]), ) @configclass class FrankaCubeLiftEnvCfg_PLAY(FrankaCubeLiftEnvCfg): def __post_init__(self): # post init of parent super().__post_init__() # make a smaller scene for play self.scene.num_envs = 50 self.scene.env_spacing = 2.5 # disable randomization for play self.observations.policy.enable_corruption = False
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/manipulation/lift/config/franka/ik_abs_env_cfg.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from omni.isaac.orbit.controllers.differential_ik_cfg import DifferentialIKControllerCfg from omni.isaac.orbit.envs.mdp.actions.actions_cfg import DifferentialInverseKinematicsActionCfg from omni.isaac.orbit.utils import configclass from . import joint_pos_env_cfg ## # Pre-defined configs ## from omni.isaac.orbit_assets.franka import FRANKA_PANDA_HIGH_PD_CFG # isort: skip @configclass class FrankaCubeLiftEnvCfg(joint_pos_env_cfg.FrankaCubeLiftEnvCfg): def __post_init__(self): # post init of parent super().__post_init__() # Set Franka as robot # We switch here to a stiffer PD controller for IK tracking to be better. self.scene.robot = FRANKA_PANDA_HIGH_PD_CFG.replace(prim_path="{ENV_REGEX_NS}/Robot") # Set actions for the specific robot type (franka) self.actions.body_joint_pos = DifferentialInverseKinematicsActionCfg( asset_name="robot", joint_names=["panda_joint.*"], body_name="panda_hand", controller=DifferentialIKControllerCfg(command_type="pose", use_relative_mode=False, ik_method="dls"), body_offset=DifferentialInverseKinematicsActionCfg.OffsetCfg(pos=[0.0, 0.0, 0.107]), ) @configclass class FrankaCubeLiftEnvCfg_PLAY(FrankaCubeLiftEnvCfg): def __post_init__(self): # post init of parent super().__post_init__() # make a smaller scene for play self.scene.num_envs = 50 self.scene.env_spacing = 2.5 # disable randomization for play self.observations.policy.enable_corruption = False
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/manipulation/lift/config/franka/__init__.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause import gymnasium as gym import os from . import agents, ik_abs_env_cfg, ik_rel_env_cfg, joint_pos_env_cfg ## # Register Gym environments. ## ## # Joint Position Control ## gym.register( id="Isaac-Lift-Cube-Franka-v0", entry_point="omni.isaac.orbit.envs:RLTaskEnv", kwargs={ "env_cfg_entry_point": joint_pos_env_cfg.FrankaCubeLiftEnvCfg, "rsl_rl_cfg_entry_point": agents.rsl_rl_cfg.LiftCubePPORunnerCfg, "skrl_cfg_entry_point": f"{agents.__name__}:skrl_ppo_cfg.yaml", }, disable_env_checker=True, ) gym.register( id="Isaac-Lift-Cube-Franka-Play-v0", entry_point="omni.isaac.orbit.envs:RLTaskEnv", kwargs={ "env_cfg_entry_point": joint_pos_env_cfg.FrankaCubeLiftEnvCfg_PLAY, "rsl_rl_cfg_entry_point": agents.rsl_rl_cfg.LiftCubePPORunnerCfg, "skrl_cfg_entry_point": f"{agents.__name__}:skrl_ppo_cfg.yaml", }, disable_env_checker=True, ) ## # Inverse Kinematics - Absolute Pose Control ## gym.register( id="Isaac-Lift-Cube-Franka-IK-Abs-v0", entry_point="omni.isaac.orbit.envs:RLTaskEnv", kwargs={ "env_cfg_entry_point": ik_abs_env_cfg.FrankaCubeLiftEnvCfg, "rsl_rl_cfg_entry_point": agents.rsl_rl_cfg.LiftCubePPORunnerCfg, "skrl_cfg_entry_point": f"{agents.__name__}:skrl_ppo_cfg.yaml", }, disable_env_checker=True, ) gym.register( id="Isaac-Lift-Cube-Franka-IK-Abs-Play-v0", entry_point="omni.isaac.orbit.envs:RLTaskEnv", kwargs={ "env_cfg_entry_point": ik_abs_env_cfg.FrankaCubeLiftEnvCfg_PLAY, "rsl_rl_cfg_entry_point": agents.rsl_rl_cfg.LiftCubePPORunnerCfg, "skrl_cfg_entry_point": f"{agents.__name__}:skrl_ppo_cfg.yaml", }, disable_env_checker=True, ) ## # Inverse Kinematics - Relative Pose Control ## gym.register( id="Isaac-Lift-Cube-Franka-IK-Rel-v0", entry_point="omni.isaac.orbit.envs:RLTaskEnv", kwargs={ "env_cfg_entry_point": ik_rel_env_cfg.FrankaCubeLiftEnvCfg, "rsl_rl_cfg_entry_point": agents.rsl_rl_cfg.LiftCubePPORunnerCfg, "skrl_cfg_entry_point": f"{agents.__name__}:skrl_ppo_cfg.yaml", "robomimic_bc_cfg_entry_point": os.path.join(agents.__path__[0], "robomimic/bc.json"), }, disable_env_checker=True, ) gym.register( id="Isaac-Lift-Cube-Franka-IK-Rel-Play-v0", entry_point="omni.isaac.orbit.envs:RLTaskEnv", kwargs={ "env_cfg_entry_point": ik_rel_env_cfg.FrankaCubeLiftEnvCfg_PLAY, "rsl_rl_cfg_entry_point": agents.rsl_rl_cfg.LiftCubePPORunnerCfg, "skrl_cfg_entry_point": f"{agents.__name__}:skrl_ppo_cfg.yaml", }, disable_env_checker=True, )
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/manipulation/lift/config/franka/joint_pos_env_cfg.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from omni.isaac.orbit.assets import RigidObjectCfg from omni.isaac.orbit.sensors import FrameTransformerCfg from omni.isaac.orbit.sensors.frame_transformer.frame_transformer_cfg import OffsetCfg from omni.isaac.orbit.sim.schemas.schemas_cfg import RigidBodyPropertiesCfg from omni.isaac.orbit.sim.spawners.from_files.from_files_cfg import UsdFileCfg from omni.isaac.orbit.utils import configclass from omni.isaac.orbit.utils.assets import ISAAC_NUCLEUS_DIR from omni.isaac.orbit_tasks.manipulation.lift import mdp from omni.isaac.orbit_tasks.manipulation.lift.lift_env_cfg import LiftEnvCfg ## # Pre-defined configs ## from omni.isaac.orbit.markers.config import FRAME_MARKER_CFG # isort: skip from omni.isaac.orbit_assets.franka import FRANKA_PANDA_CFG # isort: skip @configclass class FrankaCubeLiftEnvCfg(LiftEnvCfg): def __post_init__(self): # post init of parent super().__post_init__() # Set Franka as robot self.scene.robot = FRANKA_PANDA_CFG.replace(prim_path="{ENV_REGEX_NS}/Robot") # Set actions for the specific robot type (franka) self.actions.body_joint_pos = mdp.JointPositionActionCfg( asset_name="robot", joint_names=["panda_joint.*"], scale=0.5, use_default_offset=True ) self.actions.finger_joint_pos = mdp.BinaryJointPositionActionCfg( asset_name="robot", joint_names=["panda_finger.*"], open_command_expr={"panda_finger_.*": 0.04}, close_command_expr={"panda_finger_.*": 0.0}, ) # Set the body name for the end effector self.commands.object_pose.body_name = "panda_hand" # Set Cube as object self.scene.object = RigidObjectCfg( prim_path="{ENV_REGEX_NS}/Object", init_state=RigidObjectCfg.InitialStateCfg(pos=[0.5, 0, 0.055], rot=[1, 0, 0, 0]), spawn=UsdFileCfg( usd_path=f"{ISAAC_NUCLEUS_DIR}/Props/Blocks/DexCube/dex_cube_instanceable.usd", scale=(0.8, 0.8, 0.8), rigid_props=RigidBodyPropertiesCfg( solver_position_iteration_count=16, solver_velocity_iteration_count=1, max_angular_velocity=1000.0, max_linear_velocity=1000.0, max_depenetration_velocity=5.0, disable_gravity=False, ), ), ) # Listens to the required transforms marker_cfg = FRAME_MARKER_CFG.copy() marker_cfg.markers["frame"].scale = (0.1, 0.1, 0.1) marker_cfg.prim_path = "/Visuals/FrameTransformer" self.scene.ee_frame = FrameTransformerCfg( prim_path="{ENV_REGEX_NS}/Robot/panda_link0", debug_vis=False, visualizer_cfg=marker_cfg, target_frames=[ FrameTransformerCfg.FrameCfg( prim_path="{ENV_REGEX_NS}/Robot/panda_hand", name="end_effector", offset=OffsetCfg( pos=[0.0, 0.0, 0.1034], ), ), ], ) @configclass class FrankaCubeLiftEnvCfg_PLAY(FrankaCubeLiftEnvCfg): def __post_init__(self): # post init of parent super().__post_init__() # make a smaller scene for play self.scene.num_envs = 50 self.scene.env_spacing = 2.5 # disable randomization for play self.observations.policy.enable_corruption = False
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/manipulation/lift/config/franka/agents/rsl_rl_cfg.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from omni.isaac.orbit.utils import configclass from omni.isaac.orbit_tasks.utils.wrappers.rsl_rl import ( RslRlOnPolicyRunnerCfg, RslRlPpoActorCriticCfg, RslRlPpoAlgorithmCfg, ) @configclass class LiftCubePPORunnerCfg(RslRlOnPolicyRunnerCfg): num_steps_per_env = 24 max_iterations = 1500 save_interval = 50 experiment_name = "franka_lift" empirical_normalization = False policy = RslRlPpoActorCriticCfg( init_noise_std=1.0, actor_hidden_dims=[256, 128, 64], critic_hidden_dims=[256, 128, 64], activation="elu", ) algorithm = RslRlPpoAlgorithmCfg( value_loss_coef=1.0, use_clipped_value_loss=True, clip_param=0.2, entropy_coef=0.006, num_learning_epochs=5, num_mini_batches=4, learning_rate=1.0e-4, schedule="adaptive", gamma=0.98, lam=0.95, desired_kl=0.01, max_grad_norm=1.0, )
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/manipulation/lift/config/franka/agents/skrl_ppo_cfg.yaml
seed: 42 # Models are instantiated using skrl's model instantiator utility # https://skrl.readthedocs.io/en/develop/modules/skrl.utils.model_instantiators.html models: separate: True policy: # see skrl.utils.model_instantiators.gaussian_model for parameter details clip_actions: False clip_log_std: True initial_log_std: 0 min_log_std: -20.0 max_log_std: 2.0 input_shape: "Shape.STATES" hiddens: [256, 128, 64] hidden_activation: ["elu", "elu", "elu"] output_shape: "Shape.ACTIONS" output_activation: "" output_scale: 1.0 value: # see skrl.utils.model_instantiators.deterministic_model for parameter details clip_actions: False input_shape: "Shape.STATES" hiddens: [256, 128, 64] hidden_activation: ["elu", "elu", "elu"] output_shape: "Shape.ONE" output_activation: "" output_scale: 1.0 # PPO agent configuration (field names are from PPO_DEFAULT_CONFIG) # https://skrl.readthedocs.io/en/latest/modules/skrl.agents.ppo.html agent: rollouts: 16 learning_epochs: 8 mini_batches: 8 discount_factor: 0.99 lambda: 0.95 learning_rate: 3.e-4 learning_rate_scheduler: "KLAdaptiveLR" learning_rate_scheduler_kwargs: kl_threshold: 0.008 state_preprocessor: "RunningStandardScaler" state_preprocessor_kwargs: null value_preprocessor: "RunningStandardScaler" value_preprocessor_kwargs: null random_timesteps: 0 learning_starts: 0 grad_norm_clip: 1.0 ratio_clip: 0.2 value_clip: 0.2 clip_predicted_values: True entropy_loss_scale: 0.0 value_loss_scale: 2.0 kl_threshold: 0 rewards_shaper_scale: 0.01 # logging and checkpoint experiment: directory: "franka_lift" experiment_name: "" write_interval: 120 checkpoint_interval: 1200 # Sequential trainer # https://skrl.readthedocs.io/en/latest/modules/skrl.trainers.sequential.html trainer: timesteps: 24000
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/manipulation/lift/config/franka/agents/sb3_ppo_cfg.yaml
# Reference: https://github.com/DLR-RM/rl-baselines3-zoo/blob/master/hyperparams/ppo.yml#L32 seed: 42 # epoch * n_steps * nenvs: 500×512*8*8 n_timesteps: 16384000 policy: 'MlpPolicy' n_steps: 64 # mini batch size: num_envs * nsteps / nminibatches 2048×512÷2048 batch_size: 192 gae_lambda: 0.95 gamma: 0.99 n_epochs: 8 ent_coef: 0.00 vf_coef: 0.0001 learning_rate: !!float 3e-4 clip_range: 0.2 policy_kwargs: "dict( activation_fn=nn.ELU, net_arch=[32, 32, dict(pi=[256, 128, 64], vf=[256, 128, 64])] )" target_kl: 0.01 max_grad_norm: 1.0 # # Uses VecNormalize class to normalize obs # normalize_input: True # # Uses VecNormalize class to normalize rew # normalize_value: True # clip_obs: 5
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/manipulation/lift/config/franka/agents/__init__.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from . import rsl_rl_cfg # noqa: F401, F403
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/manipulation/lift/config/franka/agents/rl_games_ppo_cfg.yaml
params: seed: 42 # environment wrapper clipping env: clip_observations: 100.0 clip_actions: 100.0 algo: name: a2c_continuous model: name: continuous_a2c_logstd network: name: actor_critic separate: False space: continuous: mu_activation: None sigma_activation: None mu_init: name: default sigma_init: name: const_initializer val: 0 fixed_sigma: True mlp: units: [256, 128, 64] activation: elu d2rl: False initializer: name: default regularizer: name: None load_checkpoint: False # flag which sets whether to load the checkpoint load_path: '' # path to the checkpoint to load config: name: reach env_name: rlgpu device: 'cuda:0' device_name: 'cuda:0' multi_gpu: False ppo: True mixed_precision: False normalize_input: True normalize_value: True value_bootstrap: False num_actors: -1 reward_shaper: scale_value: 0.01 normalize_advantage: True gamma: 0.99 tau: 0.95 learning_rate: 3e-4 lr_schedule: adaptive schedule_type: legacy kl_threshold: 0.008 score_to_win: 100000000 max_epochs: 500 save_best_after: 100 save_frequency: 50 print_stats: True grad_norm: 1.0 entropy_coef: 0.0 truncate_grads: True e_clip: 0.2 horizon_length: 16 minibatch_size: 4096 #2048 mini_epochs: 8 critic_coef: 4 clip_value: True seq_len: 4 bounds_loss_coef: 0.0001
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/manipulation/lift/config/franka/agents/robomimic/bcq.json
{ "algo_name": "bcq", "experiment": { "name": "bcq", "validate": true, "logging": { "terminal_output_to_txt": true, "log_tb": true }, "save": { "enabled": true, "every_n_seconds": null, "every_n_epochs": 50, "epochs": [], "on_best_validation": true, "on_best_rollout_return": false, "on_best_rollout_success_rate": false }, "epoch_every_n_steps": 100, "validation_epoch_every_n_steps": 10, "env": null, "additional_envs": null, "render": false, "render_video": true, "keep_all_videos": false, "video_skip": 5, "rollout": { "enabled": false, "n": 50, "horizon": 400, "rate": 50, "warmstart": 0, "terminate_on_success": true } }, "train": { "data": null, "output_dir": "../bcq_trained_models", "num_data_workers": 0, "hdf5_cache_mode": "all", "hdf5_use_swmr": true, "hdf5_normalize_obs": false, "hdf5_filter_key": null, "seq_length": 1, "dataset_keys": [ "actions", "rewards", "dones" ], "goal_mode": null, "cuda": true, "batch_size": 100, "num_epochs": 200, "seed": 1 }, "algo": { "optim_params": { "critic": { "learning_rate": { "initial": 0.001, "decay_factor": 0.1, "epoch_schedule": [] }, "regularization": { "L2": 0.0 }, "start_epoch": -1, "end_epoch": -1 }, "action_sampler": { "learning_rate": { "initial": 0.001, "decay_factor": 0.1, "epoch_schedule": [] }, "regularization": { "L2": 0.0 }, "start_epoch": -1, "end_epoch": -1 }, "actor": { "learning_rate": { "initial": 0.001, "decay_factor": 0.1, "epoch_schedule": [] }, "regularization": { "L2": 0.0 }, "start_epoch": -1, "end_epoch": -1 } }, "discount": 0.99, "n_step": 1, "target_tau": 0.005, "infinite_horizon": false, "critic": { "use_huber": false, "max_gradient_norm": null, "value_bounds": null, "num_action_samples": 10, "num_action_samples_rollout": 100, "ensemble": { "n": 2, "weight": 0.75 }, "distributional": { "enabled": false, "num_atoms": 51 }, "layer_dims": [ 300, 400 ] }, "action_sampler": { "actor_layer_dims": [ 1024, 1024 ], "gmm": { "enabled": false, "num_modes": 5, "min_std": 0.0001, "std_activation": "softplus", "low_noise_eval": true }, "vae": { "enabled": true, "latent_dim": 14, "latent_clip": null, "kl_weight": 1.0, "decoder": { "is_conditioned": true, "reconstruction_sum_across_elements": false }, "prior": { "learn": false, "is_conditioned": false, "use_gmm": false, "gmm_num_modes": 10, "gmm_learn_weights": false, "use_categorical": false, "categorical_dim": 10, "categorical_gumbel_softmax_hard": false, "categorical_init_temp": 1.0, "categorical_temp_anneal_step": 0.001, "categorical_min_temp": 0.3 }, "encoder_layer_dims": [ 300, 400 ], "decoder_layer_dims": [ 300, 400 ], "prior_layer_dims": [ 300, 400 ] }, "freeze_encoder_epoch": -1 }, "actor": { "enabled": false, "perturbation_scale": 0.05, "layer_dims": [ 300, 400 ] } }, "observation": { "modalities": { "obs": { "low_dim": [ "tool_dof_pos_scaled", "tool_positions", "object_relative_tool_positions", "object_desired_positions" ], "rgb": [], "depth": [], "scan": [] }, "goal": { "low_dim": [], "rgb": [], "depth": [], "scan": [] } }, "encoder": { "low_dim": { "core_class": null, "core_kwargs": {}, "obs_randomizer_class": null, "obs_randomizer_kwargs": {} }, "rgb": { "core_class": "VisualCore", "core_kwargs": { "feature_dimension": 64, "flatten": true, "backbone_class": "ResNet18Conv", "backbone_kwargs": { "pretrained": false, "input_coord_conv": false }, "pool_class": "SpatialSoftmax", "pool_kwargs": { "num_kp": 32, "learnable_temperature": false, "temperature": 1.0, "noise_std": 0.0, "output_variance": false } }, "obs_randomizer_class": null, "obs_randomizer_kwargs": { "crop_height": 76, "crop_width": 76, "num_crops": 1, "pos_enc": false } }, "depth": { "core_class": "VisualCore", "core_kwargs": { "feature_dimension": 64, "flatten": true, "backbone_class": "ResNet18Conv", "backbone_kwargs": { "pretrained": false, "input_coord_conv": false }, "pool_class": "SpatialSoftmax", "pool_kwargs": { "num_kp": 32, "learnable_temperature": false, "temperature": 1.0, "noise_std": 0.0, "output_variance": false } }, "obs_randomizer_class": null, "obs_randomizer_kwargs": { "crop_height": 76, "crop_width": 76, "num_crops": 1, "pos_enc": false } }, "scan": { "core_class": "ScanCore", "core_kwargs": { "feature_dimension": 64, "flatten": true, "pool_class": "SpatialSoftmax", "pool_kwargs": { "num_kp": 32, "learnable_temperature": false, "temperature": 1.0, "noise_std": 0.0, "output_variance": false }, "conv_activation": "relu", "conv_kwargs": { "out_channels": [ 32, 64, 64 ], "kernel_size": [ 8, 4, 2 ], "stride": [ 4, 2, 1 ] } }, "obs_randomizer_class": null, "obs_randomizer_kwargs": { "crop_height": 76, "crop_width": 76, "num_crops": 1, "pos_enc": false } } } } }
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/manipulation/lift/config/franka/agents/robomimic/bc.json
{ "algo_name": "bc", "experiment": { "name": "bc", "validate": true, "logging": { "terminal_output_to_txt": true, "log_tb": true }, "save": { "enabled": true, "every_n_seconds": null, "every_n_epochs": 50, "epochs": [], "on_best_validation": false, "on_best_rollout_return": false, "on_best_rollout_success_rate": true }, "epoch_every_n_steps": 100, "validation_epoch_every_n_steps": 10, "env": null, "additional_envs": null, "render": false, "render_video": true, "keep_all_videos": false, "video_skip": 5, "rollout": { "enabled": false, "n": 50, "horizon": 400, "rate": 50, "warmstart": 0, "terminate_on_success": true } }, "train": { "data": null, "output_dir": "../bc_trained_models", "num_data_workers": 0, "hdf5_cache_mode": "all", "hdf5_use_swmr": true, "hdf5_normalize_obs": false, "hdf5_filter_key": "train", "hdf5_validation_filter_key": "valid", "seq_length": 1, "dataset_keys": [ "actions", "rewards", "dones" ], "goal_mode": null, "cuda": true, "batch_size": 100, "num_epochs": 200, "seed": 1 }, "algo": { "optim_params": { "policy": { "learning_rate": { "initial": 0.0001, "decay_factor": 0.1, "epoch_schedule": [] }, "regularization": { "L2": 0.0 } } }, "loss": { "l2_weight": 1.0, "l1_weight": 0.0, "cos_weight": 0.0 }, "actor_layer_dims": [ 1024, 1024 ], "gaussian": { "enabled": false, "fixed_std": false, "init_std": 0.1, "min_std": 0.01, "std_activation": "softplus", "low_noise_eval": true }, "gmm": { "enabled": false, "num_modes": 5, "min_std": 0.0001, "std_activation": "softplus", "low_noise_eval": true }, "vae": { "enabled": false, "latent_dim": 14, "latent_clip": null, "kl_weight": 1.0, "decoder": { "is_conditioned": true, "reconstruction_sum_across_elements": false }, "prior": { "learn": false, "is_conditioned": false, "use_gmm": false, "gmm_num_modes": 10, "gmm_learn_weights": false, "use_categorical": false, "categorical_dim": 10, "categorical_gumbel_softmax_hard": false, "categorical_init_temp": 1.0, "categorical_temp_anneal_step": 0.001, "categorical_min_temp": 0.3 }, "encoder_layer_dims": [ 300, 400 ], "decoder_layer_dims": [ 300, 400 ], "prior_layer_dims": [ 300, 400 ] }, "rnn": { "enabled": false, "horizon": 10, "hidden_dim": 400, "rnn_type": "LSTM", "num_layers": 2, "open_loop": false, "kwargs": { "bidirectional": false } } }, "observation": { "modalities": { "obs": { "low_dim": [ "joint_pos", "joint_vel", "object_position", "target_object_position" ], "rgb": [], "depth": [], "scan": [] }, "goal": { "low_dim": [], "rgb": [], "depth": [], "scan": [] } }, "encoder": { "low_dim": { "core_class": null, "core_kwargs": {}, "obs_randomizer_class": null, "obs_randomizer_kwargs": {} }, "rgb": { "core_class": "VisualCore", "core_kwargs": { "feature_dimension": 64, "flatten": true, "backbone_class": "ResNet18Conv", "backbone_kwargs": { "pretrained": false, "input_coord_conv": false }, "pool_class": "SpatialSoftmax", "pool_kwargs": { "num_kp": 32, "learnable_temperature": false, "temperature": 1.0, "noise_std": 0.0, "output_variance": false } }, "obs_randomizer_class": null, "obs_randomizer_kwargs": { "crop_height": 76, "crop_width": 76, "num_crops": 1, "pos_enc": false } }, "depth": { "core_class": "VisualCore", "core_kwargs": { "feature_dimension": 64, "flatten": true, "backbone_class": "ResNet18Conv", "backbone_kwargs": { "pretrained": false, "input_coord_conv": false }, "pool_class": "SpatialSoftmax", "pool_kwargs": { "num_kp": 32, "learnable_temperature": false, "temperature": 1.0, "noise_std": 0.0, "output_variance": false } }, "obs_randomizer_class": null, "obs_randomizer_kwargs": { "crop_height": 76, "crop_width": 76, "num_crops": 1, "pos_enc": false } }, "scan": { "core_class": "ScanCore", "core_kwargs": { "feature_dimension": 64, "flatten": true, "pool_class": "SpatialSoftmax", "pool_kwargs": { "num_kp": 32, "learnable_temperature": false, "temperature": 1.0, "noise_std": 0.0, "output_variance": false }, "conv_activation": "relu", "conv_kwargs": { "out_channels": [ 32, 64, 64 ], "kernel_size": [ 8, 4, 2 ], "stride": [ 4, 2, 1 ] } }, "obs_randomizer_class": null, "obs_randomizer_kwargs": { "crop_height": 76, "crop_width": 76, "num_crops": 1, "pos_enc": false } } } } }
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/locomotion/__init__.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """Locomotion environments for legged robots.""" from .velocity import * # noqa
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/locomotion/velocity/velocity_env_cfg.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from __future__ import annotations import math from dataclasses import MISSING import omni.isaac.orbit.sim as sim_utils from omni.isaac.orbit.assets import ArticulationCfg, AssetBaseCfg from omni.isaac.orbit.envs import RLTaskEnvCfg from omni.isaac.orbit.managers import CurriculumTermCfg as CurrTerm from omni.isaac.orbit.managers import EventTermCfg as EventTerm from omni.isaac.orbit.managers import ObservationGroupCfg as ObsGroup from omni.isaac.orbit.managers import ObservationTermCfg as ObsTerm from omni.isaac.orbit.managers import RewardTermCfg as RewTerm from omni.isaac.orbit.managers import SceneEntityCfg from omni.isaac.orbit.managers import TerminationTermCfg as DoneTerm from omni.isaac.orbit.scene import InteractiveSceneCfg from omni.isaac.orbit.sensors import ContactSensorCfg, RayCasterCfg, patterns from omni.isaac.orbit.terrains import TerrainImporterCfg from omni.isaac.orbit.utils import configclass from omni.isaac.orbit.utils.noise import AdditiveUniformNoiseCfg as Unoise import omni.isaac.orbit_tasks.locomotion.velocity.mdp as mdp ## # Pre-defined configs ## from omni.isaac.orbit.terrains.config.rough import ROUGH_TERRAINS_CFG # isort: skip ## # Scene definition ## @configclass class MySceneCfg(InteractiveSceneCfg): """Configuration for the terrain scene with a legged robot.""" # ground terrain terrain = TerrainImporterCfg( prim_path="/World/ground", terrain_type="generator", terrain_generator=ROUGH_TERRAINS_CFG, max_init_terrain_level=5, collision_group=-1, physics_material=sim_utils.RigidBodyMaterialCfg( friction_combine_mode="multiply", restitution_combine_mode="multiply", static_friction=1.0, dynamic_friction=1.0, ), visual_material=sim_utils.MdlFileCfg( mdl_path="{NVIDIA_NUCLEUS_DIR}/Materials/Base/Architecture/Shingles_01.mdl", project_uvw=True, ), debug_vis=False, ) # robots robot: ArticulationCfg = MISSING # sensors height_scanner = RayCasterCfg( prim_path="{ENV_REGEX_NS}/Robot/base", offset=RayCasterCfg.OffsetCfg(pos=(0.0, 0.0, 20.0)), attach_yaw_only=True, pattern_cfg=patterns.GridPatternCfg(resolution=0.1, size=[1.6, 1.0]), debug_vis=False, mesh_prim_paths=["/World/ground"], ) contact_forces = ContactSensorCfg(prim_path="{ENV_REGEX_NS}/Robot/.*", history_length=3, track_air_time=True) # lights light = AssetBaseCfg( prim_path="/World/light", spawn=sim_utils.DistantLightCfg(color=(0.75, 0.75, 0.75), intensity=3000.0), ) sky_light = AssetBaseCfg( prim_path="/World/skyLight", spawn=sim_utils.DomeLightCfg(color=(0.13, 0.13, 0.13), intensity=1000.0), ) ## # MDP settings ## @configclass class CommandsCfg: """Command specifications for the MDP.""" base_velocity = mdp.UniformVelocityCommandCfg( asset_name="robot", resampling_time_range=(10.0, 10.0), rel_standing_envs=0.02, rel_heading_envs=1.0, heading_command=True, heading_control_stiffness=0.5, debug_vis=True, ranges=mdp.UniformVelocityCommandCfg.Ranges( lin_vel_x=(-1.0, 1.0), lin_vel_y=(-1.0, 1.0), ang_vel_z=(-1.0, 1.0), heading=(-math.pi, math.pi) ), ) @configclass class ActionsCfg: """Action specifications for the MDP.""" joint_pos = mdp.JointPositionActionCfg(asset_name="robot", joint_names=[".*"], scale=0.5, use_default_offset=True) @configclass class ObservationsCfg: """Observation specifications for the MDP.""" @configclass class PolicyCfg(ObsGroup): """Observations for policy group.""" # observation terms (order preserved) base_lin_vel = ObsTerm(func=mdp.base_lin_vel, noise=Unoise(n_min=-0.1, n_max=0.1)) base_ang_vel = ObsTerm(func=mdp.base_ang_vel, noise=Unoise(n_min=-0.2, n_max=0.2)) projected_gravity = ObsTerm( func=mdp.projected_gravity, noise=Unoise(n_min=-0.05, n_max=0.05), ) velocity_commands = ObsTerm(func=mdp.generated_commands, params={"command_name": "base_velocity"}) joint_pos = ObsTerm(func=mdp.joint_pos_rel, noise=Unoise(n_min=-0.01, n_max=0.01)) joint_vel = ObsTerm(func=mdp.joint_vel_rel, noise=Unoise(n_min=-1.5, n_max=1.5)) actions = ObsTerm(func=mdp.last_action) height_scan = ObsTerm( func=mdp.height_scan, params={"sensor_cfg": SceneEntityCfg("height_scanner")}, noise=Unoise(n_min=-0.1, n_max=0.1), clip=(-1.0, 1.0), ) def __post_init__(self): self.enable_corruption = True self.concatenate_terms = True # observation groups policy: PolicyCfg = PolicyCfg() @configclass class EventCfg: """Configuration for events.""" # startup physics_material = EventTerm( func=mdp.randomize_rigid_body_material, mode="startup", params={ "asset_cfg": SceneEntityCfg("robot", body_names=".*"), "static_friction_range": (0.8, 0.8), "dynamic_friction_range": (0.6, 0.6), "restitution_range": (0.0, 0.0), "num_buckets": 64, }, ) add_base_mass = EventTerm( func=mdp.add_body_mass, mode="startup", params={"asset_cfg": SceneEntityCfg("robot", body_names="base"), "mass_range": (-5.0, 5.0)}, ) # reset base_external_force_torque = EventTerm( func=mdp.apply_external_force_torque, mode="reset", params={ "asset_cfg": SceneEntityCfg("robot", body_names="base"), "force_range": (0.0, 0.0), "torque_range": (-0.0, 0.0), }, ) reset_base = EventTerm( func=mdp.reset_root_state_uniform, mode="reset", params={ "pose_range": {"x": (-0.5, 0.5), "y": (-0.5, 0.5), "yaw": (-3.14, 3.14)}, "velocity_range": { "x": (-0.5, 0.5), "y": (-0.5, 0.5), "z": (-0.5, 0.5), "roll": (-0.5, 0.5), "pitch": (-0.5, 0.5), "yaw": (-0.5, 0.5), }, }, ) reset_robot_joints = EventTerm( func=mdp.reset_joints_by_scale, mode="reset", params={ "position_range": (0.5, 1.5), "velocity_range": (0.0, 0.0), }, ) # interval push_robot = EventTerm( func=mdp.push_by_setting_velocity, mode="interval", interval_range_s=(10.0, 15.0), params={"velocity_range": {"x": (-0.5, 0.5), "y": (-0.5, 0.5)}}, ) @configclass class RewardsCfg: """Reward terms for the MDP.""" # -- task track_lin_vel_xy_exp = RewTerm( func=mdp.track_lin_vel_xy_exp, weight=1.0, params={"command_name": "base_velocity", "std": math.sqrt(0.25)} ) track_ang_vel_z_exp = RewTerm( func=mdp.track_ang_vel_z_exp, weight=0.5, params={"command_name": "base_velocity", "std": math.sqrt(0.25)} ) # -- penalties lin_vel_z_l2 = RewTerm(func=mdp.lin_vel_z_l2, weight=-2.0) ang_vel_xy_l2 = RewTerm(func=mdp.ang_vel_xy_l2, weight=-0.05) dof_torques_l2 = RewTerm(func=mdp.joint_torques_l2, weight=-1.0e-5) dof_acc_l2 = RewTerm(func=mdp.joint_acc_l2, weight=-2.5e-7) action_rate_l2 = RewTerm(func=mdp.action_rate_l2, weight=-0.01) feet_air_time = RewTerm( func=mdp.feet_air_time, weight=0.125, params={ "sensor_cfg": SceneEntityCfg("contact_forces", body_names=".*FOOT"), "command_name": "base_velocity", "threshold": 0.5, }, ) undesired_contacts = RewTerm( func=mdp.undesired_contacts, weight=-1.0, params={"sensor_cfg": SceneEntityCfg("contact_forces", body_names=".*THIGH"), "threshold": 1.0}, ) # -- optional penalties flat_orientation_l2 = RewTerm(func=mdp.flat_orientation_l2, weight=0.0) dof_pos_limits = RewTerm(func=mdp.joint_pos_limits, weight=0.0) @configclass class TerminationsCfg: """Termination terms for the MDP.""" time_out = DoneTerm(func=mdp.time_out, time_out=True) base_contact = DoneTerm( func=mdp.illegal_contact, params={"sensor_cfg": SceneEntityCfg("contact_forces", body_names="base"), "threshold": 1.0}, ) @configclass class CurriculumCfg: """Curriculum terms for the MDP.""" terrain_levels = CurrTerm(func=mdp.terrain_levels_vel) ## # Environment configuration ## @configclass class LocomotionVelocityRoughEnvCfg(RLTaskEnvCfg): """Configuration for the locomotion velocity-tracking environment.""" # Scene settings scene: MySceneCfg = MySceneCfg(num_envs=4096, env_spacing=2.5) # Basic settings observations: ObservationsCfg = ObservationsCfg() actions: ActionsCfg = ActionsCfg() commands: CommandsCfg = CommandsCfg() # MDP settings rewards: RewardsCfg = RewardsCfg() terminations: TerminationsCfg = TerminationsCfg() events: EventCfg = EventCfg() curriculum: CurriculumCfg = CurriculumCfg() def __post_init__(self): """Post initialization.""" # general settings self.decimation = 4 self.episode_length_s = 20.0 # simulation settings self.sim.dt = 0.005 self.sim.disable_contact_processing = True self.sim.physics_material = self.scene.terrain.physics_material # update sensor update periods # we tick all the sensors based on the smallest update period (physics update period) if self.scene.height_scanner is not None: self.scene.height_scanner.update_period = self.decimation * self.sim.dt if self.scene.contact_forces is not None: self.scene.contact_forces.update_period = self.sim.dt # check if terrain levels curriculum is enabled - if so, enable curriculum for terrain generator # this generates terrains with increasing difficulty and is useful for training if getattr(self.curriculum, "terrain_levels", None) is not None: if self.scene.terrain.terrain_generator is not None: self.scene.terrain.terrain_generator.curriculum = True else: if self.scene.terrain.terrain_generator is not None: self.scene.terrain.terrain_generator.curriculum = False
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/locomotion/velocity/__init__.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """Locomotion environments with velocity-tracking commands. These environments are based on the `legged_gym` environments provided by Rudin et al. Reference: https://github.com/leggedrobotics/legged_gym """
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/locomotion/velocity/mdp/__init__.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """This sub-module contains the functions that are specific to the locomotion environments.""" from omni.isaac.orbit.envs.mdp import * # noqa: F401, F403 from .curriculums import * # noqa: F401, F403 from .rewards import * # noqa: F401, F403
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/locomotion/velocity/mdp/curriculums.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """Common functions that can be used to create curriculum for the learning environment. The functions can be passed to the :class:`omni.isaac.orbit.managers.CurriculumTermCfg` object to enable the curriculum introduced by the function. """ from __future__ import annotations import torch from collections.abc import Sequence from typing import TYPE_CHECKING from omni.isaac.orbit.assets import Articulation from omni.isaac.orbit.managers import SceneEntityCfg from omni.isaac.orbit.terrains import TerrainImporter if TYPE_CHECKING: from omni.isaac.orbit.envs import RLTaskEnv def terrain_levels_vel( env: RLTaskEnv, env_ids: Sequence[int], asset_cfg: SceneEntityCfg = SceneEntityCfg("robot") ) -> torch.Tensor: """Curriculum based on the distance the robot walked when commanded to move at a desired velocity. This term is used to increase the difficulty of the terrain when the robot walks far enough and decrease the difficulty when the robot walks less than half of the distance required by the commanded velocity. .. note:: It is only possible to use this term with the terrain type ``generator``. For further information on different terrain types, check the :class:`omni.isaac.orbit.terrains.TerrainImporter` class. Returns: The mean terrain level for the given environment ids. """ # extract the used quantities (to enable type-hinting) asset: Articulation = env.scene[asset_cfg.name] terrain: TerrainImporter = env.scene.terrain command = env.command_manager.get_command("base_velocity") # compute the distance the robot walked distance = torch.norm(asset.data.root_pos_w[env_ids, :2] - env.scene.env_origins[env_ids, :2], dim=1) # robots that walked far enough progress to harder terrains move_up = distance > terrain.cfg.terrain_generator.size[0] / 2 # robots that walked less than half of their required distance go to simpler terrains move_down = distance < torch.norm(command[env_ids, :2], dim=1) * env.max_episode_length_s * 0.5 move_down *= ~move_up # update terrain levels terrain.update_env_origins(env_ids, move_up, move_down) # return the mean terrain level return torch.mean(terrain.terrain_levels.float())
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/locomotion/velocity/mdp/rewards.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from __future__ import annotations import torch from typing import TYPE_CHECKING from omni.isaac.orbit.managers import SceneEntityCfg from omni.isaac.orbit.sensors import ContactSensor if TYPE_CHECKING: from omni.isaac.orbit.envs import RLTaskEnv def feet_air_time(env: RLTaskEnv, command_name: str, sensor_cfg: SceneEntityCfg, threshold: float) -> torch.Tensor: """Reward long steps taken by the feet using L2-kernel. This function rewards the agent for taking steps that are longer than a threshold. This helps ensure that the robot lifts its feet off the ground and takes steps. The reward is computed as the sum of the time for which the feet are in the air. If the commands are small (i.e. the agent is not supposed to take a step), then the reward is zero. """ # extract the used quantities (to enable type-hinting) contact_sensor: ContactSensor = env.scene.sensors[sensor_cfg.name] # compute the reward first_contact = contact_sensor.compute_first_contact(env.step_dt)[:, sensor_cfg.body_ids] last_air_time = contact_sensor.data.last_air_time[:, sensor_cfg.body_ids] reward = torch.sum((last_air_time - threshold) * first_contact, dim=1) # no reward for zero command reward *= torch.norm(env.command_manager.get_command(command_name)[:, :2], dim=1) > 0.1 return reward def feet_air_time_positive_biped(env, command_name: str, threshold: float, sensor_cfg: SceneEntityCfg) -> torch.Tensor: """Reward long steps taken by the feet for bipeds. This function rewards the agent for taking steps up to a specified threshold and also keep one foot at a time in the air. If the commands are small (i.e. the agent is not supposed to take a step), then the reward is zero. """ contact_sensor: ContactSensor = env.scene.sensors[sensor_cfg.name] # compute the reward air_time = contact_sensor.data.current_air_time[:, sensor_cfg.body_ids] contact_time = contact_sensor.data.current_contact_time[:, sensor_cfg.body_ids] in_contact = contact_time > 0.0 in_mode_time = torch.where(in_contact, contact_time, air_time) single_stance = torch.sum(in_contact.int(), dim=1) == 1 reward = torch.min(torch.where(single_stance.unsqueeze(-1), in_mode_time, 0.0), dim=1)[0] reward = torch.clamp(reward, max=threshold) # no reward for zero command reward *= torch.norm(env.command_manager.get_command(command_name)[:, :2], dim=1) > 0.1 return reward
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/locomotion/velocity/config/__init__.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """Configurations for velocity-based locomotion environments.""" # We leave this file empty since we don't want to expose any configs in this package directly. # We still need this file to import the "config" module in the parent package.
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/locomotion/velocity/config/unitree_go1/rough_env_cfg.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from omni.isaac.orbit.utils import configclass from omni.isaac.orbit_tasks.locomotion.velocity.velocity_env_cfg import LocomotionVelocityRoughEnvCfg ## # Pre-defined configs ## from omni.isaac.orbit_assets.unitree import UNITREE_GO1_CFG # isort: skip @configclass class UnitreeGo1RoughEnvCfg(LocomotionVelocityRoughEnvCfg): def __post_init__(self): # post init of parent super().__post_init__() self.scene.robot = UNITREE_GO1_CFG.replace(prim_path="{ENV_REGEX_NS}/Robot") self.scene.height_scanner.prim_path = "{ENV_REGEX_NS}/Robot/trunk" # scale down the terrains because the robot is small self.scene.terrain.terrain_generator.sub_terrains["boxes"].grid_height_range = (0.025, 0.1) self.scene.terrain.terrain_generator.sub_terrains["random_rough"].noise_range = (0.01, 0.06) self.scene.terrain.terrain_generator.sub_terrains["random_rough"].noise_step = 0.01 # reduce action scale self.actions.joint_pos.scale = 0.25 # event self.events.push_robot = None self.events.add_base_mass.params["mass_range"] = (-1.0, 3.0) self.events.add_base_mass.params["asset_cfg"].body_names = "trunk" self.events.base_external_force_torque.params["asset_cfg"].body_names = "trunk" self.events.reset_robot_joints.params["position_range"] = (1.0, 1.0) self.events.reset_base.params = { "pose_range": {"x": (-0.5, 0.5), "y": (-0.5, 0.5), "yaw": (-3.14, 3.14)}, "velocity_range": { "x": (0.0, 0.0), "y": (0.0, 0.0), "z": (0.0, 0.0), "roll": (0.0, 0.0), "pitch": (0.0, 0.0), "yaw": (0.0, 0.0), }, } # rewards self.rewards.feet_air_time.params["sensor_cfg"].body_names = ".*_foot" self.rewards.feet_air_time.weight = 0.01 self.rewards.undesired_contacts = None self.rewards.dof_torques_l2.weight = -0.0002 self.rewards.track_lin_vel_xy_exp.weight = 1.5 self.rewards.track_ang_vel_z_exp.weight = 0.75 self.rewards.dof_acc_l2.weight = -2.5e-7 # terminations self.terminations.base_contact.params["sensor_cfg"].body_names = "trunk" @configclass class UnitreeGo1RoughEnvCfg_PLAY(UnitreeGo1RoughEnvCfg): def __post_init__(self): # post init of parent super().__post_init__() # make a smaller scene for play self.scene.num_envs = 50 self.scene.env_spacing = 2.5 # spawn the robot randomly in the grid (instead of their terrain levels) self.scene.terrain.max_init_terrain_level = None # reduce the number of terrains to save memory if self.scene.terrain.terrain_generator is not None: self.scene.terrain.terrain_generator.num_rows = 5 self.scene.terrain.terrain_generator.num_cols = 5 self.scene.terrain.terrain_generator.curriculum = False # disable randomization for play self.observations.policy.enable_corruption = False # remove random pushing event self.events.base_external_force_torque = None self.events.push_robot = None
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/locomotion/velocity/config/unitree_go1/flat_env_cfg.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from omni.isaac.orbit.utils import configclass from .rough_env_cfg import UnitreeGo1RoughEnvCfg @configclass class UnitreeGo1FlatEnvCfg(UnitreeGo1RoughEnvCfg): def __post_init__(self): # post init of parent super().__post_init__() # override rewards self.rewards.flat_orientation_l2.weight = -2.5 self.rewards.feet_air_time.weight = 0.25 # change terrain to flat self.scene.terrain.terrain_type = "plane" self.scene.terrain.terrain_generator = None # no height scan self.scene.height_scanner = None self.observations.policy.height_scan = None # no terrain curriculum self.curriculum.terrain_levels = None class UnitreeGo1FlatEnvCfg_PLAY(UnitreeGo1FlatEnvCfg): def __post_init__(self) -> None: # post init of parent super().__post_init__() # make a smaller scene for play self.scene.num_envs = 50 self.scene.env_spacing = 2.5 # disable randomization for play self.observations.policy.enable_corruption = False # remove random pushing event self.events.base_external_force_torque = None self.events.push_robot = None
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/locomotion/velocity/config/unitree_go1/__init__.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause import gymnasium as gym from . import agents, flat_env_cfg, rough_env_cfg ## # Register Gym environments. ## gym.register( id="Isaac-Velocity-Flat-Unitree-Go1-v0", entry_point="omni.isaac.orbit.envs:RLTaskEnv", disable_env_checker=True, kwargs={ "env_cfg_entry_point": flat_env_cfg.UnitreeGo1FlatEnvCfg, "rsl_rl_cfg_entry_point": agents.rsl_rl_cfg.UnitreeGo1FlatPPORunnerCfg, }, ) gym.register( id="Isaac-Velocity-Flat-Unitree-Go1-Play-v0", entry_point="omni.isaac.orbit.envs:RLTaskEnv", disable_env_checker=True, kwargs={ "env_cfg_entry_point": flat_env_cfg.UnitreeGo1FlatEnvCfg_PLAY, "rsl_rl_cfg_entry_point": agents.rsl_rl_cfg.UnitreeGo1FlatPPORunnerCfg, }, ) gym.register( id="Isaac-Velocity-Rough-Unitree-Go1-v0", entry_point="omni.isaac.orbit.envs:RLTaskEnv", disable_env_checker=True, kwargs={ "env_cfg_entry_point": rough_env_cfg.UnitreeGo1RoughEnvCfg, "rsl_rl_cfg_entry_point": agents.rsl_rl_cfg.UnitreeGo1RoughPPORunnerCfg, }, ) gym.register( id="Isaac-Velocity-Rough-Unitree-Go1-Play-v0", entry_point="omni.isaac.orbit.envs:RLTaskEnv", disable_env_checker=True, kwargs={ "env_cfg_entry_point": rough_env_cfg.UnitreeGo1RoughEnvCfg_PLAY, "rsl_rl_cfg_entry_point": agents.rsl_rl_cfg.UnitreeGo1RoughPPORunnerCfg, }, )
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/locomotion/velocity/config/unitree_go1/agents/rsl_rl_cfg.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from omni.isaac.orbit.utils import configclass from omni.isaac.orbit_tasks.utils.wrappers.rsl_rl import ( RslRlOnPolicyRunnerCfg, RslRlPpoActorCriticCfg, RslRlPpoAlgorithmCfg, ) @configclass class UnitreeGo1RoughPPORunnerCfg(RslRlOnPolicyRunnerCfg): num_steps_per_env = 24 max_iterations = 1500 save_interval = 50 experiment_name = "unitree_go1_rough" empirical_normalization = False policy = RslRlPpoActorCriticCfg( init_noise_std=1.0, actor_hidden_dims=[512, 256, 128], critic_hidden_dims=[512, 256, 128], activation="elu", ) algorithm = RslRlPpoAlgorithmCfg( value_loss_coef=1.0, use_clipped_value_loss=True, clip_param=0.2, entropy_coef=0.01, num_learning_epochs=5, num_mini_batches=4, learning_rate=1.0e-3, schedule="adaptive", gamma=0.99, lam=0.95, desired_kl=0.01, max_grad_norm=1.0, ) @configclass class UnitreeGo1FlatPPORunnerCfg(UnitreeGo1RoughPPORunnerCfg): def __post_init__(self): super().__post_init__() self.max_iterations = 300 self.experiment_name = "unitree_go1_flat" self.policy.actor_hidden_dims = [128, 128, 128] self.policy.critic_hidden_dims = [128, 128, 128]
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/locomotion/velocity/config/unitree_go1/agents/__init__.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from . import rsl_rl_cfg # noqa: F401, F403
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/locomotion/velocity/config/cassie/rough_env_cfg.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from omni.isaac.orbit.managers import RewardTermCfg as RewTerm from omni.isaac.orbit.managers import SceneEntityCfg from omni.isaac.orbit.utils import configclass import omni.isaac.orbit_tasks.locomotion.velocity.mdp as mdp from omni.isaac.orbit_tasks.locomotion.velocity.velocity_env_cfg import LocomotionVelocityRoughEnvCfg, RewardsCfg ## # Pre-defined configs ## from omni.isaac.orbit_assets.cassie import CASSIE_CFG # isort: skip @configclass class CassieRewardsCfg(RewardsCfg): termination_penalty = RewTerm(func=mdp.is_terminated, weight=-200.0) feet_air_time = RewTerm( func=mdp.feet_air_time_positive_biped, weight=2.5, params={ "sensor_cfg": SceneEntityCfg("contact_forces", body_names=".*toe"), "command_name": "base_velocity", "threshold": 0.3, }, ) joint_deviation_hip = RewTerm( func=mdp.joint_deviation_l1, weight=-0.2, params={"asset_cfg": SceneEntityCfg("robot", joint_names=["hip_abduction_.*", "hip_rotation_.*"])}, ) joint_deviation_toes = RewTerm( func=mdp.joint_deviation_l1, weight=-0.2, params={"asset_cfg": SceneEntityCfg("robot", joint_names=["toe_joint_.*"])}, ) # penalize toe joint limits dof_pos_limits = RewTerm( func=mdp.joint_pos_limits, weight=-1.0, params={"asset_cfg": SceneEntityCfg("robot", joint_names="toe_joint_.*")}, ) @configclass class CassieRoughEnvCfg(LocomotionVelocityRoughEnvCfg): """Cassie rough environment configuration.""" rewards: CassieRewardsCfg = CassieRewardsCfg() def __post_init__(self): super().__post_init__() # scene self.scene.robot = CASSIE_CFG.replace(prim_path="{ENV_REGEX_NS}/Robot") self.scene.height_scanner.prim_path = "{ENV_REGEX_NS}/Robot/pelvis" # actions self.actions.joint_pos.scale = 0.5 # events self.events.push_robot = None self.events.add_base_mass = None self.events.reset_robot_joints.params["position_range"] = (1.0, 1.0) self.events.base_external_force_torque.params["asset_cfg"].body_names = [".*pelvis"] self.events.reset_base.params = { "pose_range": {"x": (-0.5, 0.5), "y": (-0.5, 0.5), "yaw": (-3.14, 3.14)}, "velocity_range": { "x": (0.0, 0.0), "y": (0.0, 0.0), "z": (0.0, 0.0), "roll": (0.0, 0.0), "pitch": (0.0, 0.0), "yaw": (0.0, 0.0), }, } # terminations self.terminations.base_contact.params["sensor_cfg"].body_names = [".*pelvis"] # rewards self.rewards.undesired_contacts = None self.rewards.dof_torques_l2.weight = -5.0e-6 self.rewards.track_lin_vel_xy_exp.weight = 2.0 self.rewards.track_ang_vel_z_exp.weight = 1.0 self.rewards.action_rate_l2.weight *= 1.5 self.rewards.dof_acc_l2.weight *= 1.5 @configclass class CassieRoughEnvCfg_PLAY(CassieRoughEnvCfg): def __post_init__(self): # post init of parent super().__post_init__() # make a smaller scene for play self.scene.num_envs = 50 self.scene.env_spacing = 2.5 # spawn the robot randomly in the grid (instead of their terrain levels) self.scene.terrain.max_init_terrain_level = None # reduce the number of terrains to save memory if self.scene.terrain.terrain_generator is not None: self.scene.terrain.terrain_generator.num_rows = 5 self.scene.terrain.terrain_generator.num_cols = 5 self.scene.terrain.terrain_generator.curriculum = False self.commands.base_velocity.ranges.lin_vel_x = (0.7, 1.0) self.commands.base_velocity.ranges.lin_vel_y = (0.0, 0.0) self.commands.base_velocity.ranges.heading = (0.0, 0.0) # disable randomization for play self.observations.policy.enable_corruption = False
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/locomotion/velocity/config/cassie/flat_env_cfg.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from omni.isaac.orbit.utils import configclass from .rough_env_cfg import CassieRoughEnvCfg @configclass class CassieFlatEnvCfg(CassieRoughEnvCfg): def __post_init__(self): # post init of parent super().__post_init__() # rewards self.rewards.flat_orientation_l2.weight = -2.5 self.rewards.feet_air_time.weight = 5.0 self.rewards.joint_deviation_hip.params["asset_cfg"].joint_names = ["hip_rotation_.*"] # change terrain to flat self.scene.terrain.terrain_type = "plane" self.scene.terrain.terrain_generator = None # no height scan self.scene.height_scanner = None self.observations.policy.height_scan = None # no terrain curriculum self.curriculum.terrain_levels = None class CassieFlatEnvCfg_PLAY(CassieFlatEnvCfg): def __post_init__(self) -> None: # post init of parent super().__post_init__() # make a smaller scene for play self.scene.num_envs = 50 self.scene.env_spacing = 2.5 # disable randomization for play self.observations.policy.enable_corruption = False
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/locomotion/velocity/config/cassie/__init__.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause import gymnasium as gym from . import agents, flat_env_cfg, rough_env_cfg ## # Register Gym environments. ## gym.register( id="Isaac-Velocity-Flat-Cassie-v0", entry_point="omni.isaac.orbit.envs:RLTaskEnv", disable_env_checker=True, kwargs={ "env_cfg_entry_point": flat_env_cfg.CassieFlatEnvCfg, "rsl_rl_cfg_entry_point": agents.rsl_rl_cfg.CassieFlatPPORunnerCfg, }, ) gym.register( id="Isaac-Velocity-Flat-Cassie-Play-v0", entry_point="omni.isaac.orbit.envs:RLTaskEnv", disable_env_checker=True, kwargs={ "env_cfg_entry_point": flat_env_cfg.CassieFlatEnvCfg_PLAY, "rsl_rl_cfg_entry_point": agents.rsl_rl_cfg.CassieFlatPPORunnerCfg, }, ) gym.register( id="Isaac-Velocity-Rough-Cassie-v0", entry_point="omni.isaac.orbit.envs:RLTaskEnv", disable_env_checker=True, kwargs={ "env_cfg_entry_point": rough_env_cfg.CassieRoughEnvCfg, "rsl_rl_cfg_entry_point": agents.rsl_rl_cfg.CassieRoughPPORunnerCfg, }, ) gym.register( id="Isaac-Velocity-Rough-Cassie-Play-v0", entry_point="omni.isaac.orbit.envs:RLTaskEnv", disable_env_checker=True, kwargs={ "env_cfg_entry_point": rough_env_cfg.CassieRoughEnvCfg_PLAY, "rsl_rl_cfg_entry_point": agents.rsl_rl_cfg.CassieRoughPPORunnerCfg, }, )
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/locomotion/velocity/config/cassie/agents/rsl_rl_cfg.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from omni.isaac.orbit.utils import configclass from omni.isaac.orbit_tasks.utils.wrappers.rsl_rl import ( RslRlOnPolicyRunnerCfg, RslRlPpoActorCriticCfg, RslRlPpoAlgorithmCfg, ) @configclass class CassieRoughPPORunnerCfg(RslRlOnPolicyRunnerCfg): num_steps_per_env = 24 max_iterations = 1500 save_interval = 50 experiment_name = "cassie_rough" empirical_normalization = False policy = RslRlPpoActorCriticCfg( init_noise_std=1.0, actor_hidden_dims=[512, 256, 128], critic_hidden_dims=[512, 256, 128], activation="elu", ) algorithm = RslRlPpoAlgorithmCfg( value_loss_coef=1.0, use_clipped_value_loss=True, clip_param=0.2, entropy_coef=0.01, num_learning_epochs=5, num_mini_batches=4, learning_rate=1.0e-3, schedule="adaptive", gamma=0.99, lam=0.95, desired_kl=0.01, max_grad_norm=1.0, ) @configclass class CassieFlatPPORunnerCfg(CassieRoughPPORunnerCfg): def __post_init__(self): super().__post_init__() self.max_iterations = 1000 self.experiment_name = "cassie_flat" self.policy.actor_hidden_dims = [128, 128, 128] self.policy.critic_hidden_dims = [128, 128, 128]
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/locomotion/velocity/config/cassie/agents/__init__.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from . import rsl_rl_cfg # noqa: F401, F403
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/locomotion/velocity/config/anymal_b/rough_env_cfg.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from omni.isaac.orbit.utils import configclass from omni.isaac.orbit_tasks.locomotion.velocity.velocity_env_cfg import LocomotionVelocityRoughEnvCfg ## # Pre-defined configs ## from omni.isaac.orbit_assets import ANYMAL_B_CFG # isort: skip @configclass class AnymalBRoughEnvCfg(LocomotionVelocityRoughEnvCfg): def __post_init__(self): # post init of parent super().__post_init__() # switch robot to anymal-d self.scene.robot = ANYMAL_B_CFG.replace(prim_path="{ENV_REGEX_NS}/Robot") @configclass class AnymalBRoughEnvCfg_PLAY(AnymalBRoughEnvCfg): def __post_init__(self): # post init of parent super().__post_init__() # make a smaller scene for play self.scene.num_envs = 50 self.scene.env_spacing = 2.5 # spawn the robot randomly in the grid (instead of their terrain levels) self.scene.terrain.max_init_terrain_level = None # reduce the number of terrains to save memory if self.scene.terrain.terrain_generator is not None: self.scene.terrain.terrain_generator.num_rows = 5 self.scene.terrain.terrain_generator.num_cols = 5 self.scene.terrain.terrain_generator.curriculum = False # disable randomization for play self.observations.policy.enable_corruption = False # remove random pushing event self.events.base_external_force_torque = None self.events.push_robot = None
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/locomotion/velocity/config/anymal_b/flat_env_cfg.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from omni.isaac.orbit.utils import configclass from .rough_env_cfg import AnymalBRoughEnvCfg @configclass class AnymalBFlatEnvCfg(AnymalBRoughEnvCfg): def __post_init__(self): # post init of parent super().__post_init__() # override rewards self.rewards.flat_orientation_l2.weight = -5.0 self.rewards.dof_torques_l2.weight = -2.5e-5 self.rewards.feet_air_time.weight = 0.5 # change terrain to flat self.scene.terrain.terrain_type = "plane" self.scene.terrain.terrain_generator = None # no height scan self.scene.height_scanner = None self.observations.policy.height_scan = None # no terrain curriculum self.curriculum.terrain_levels = None class AnymalBFlatEnvCfg_PLAY(AnymalBFlatEnvCfg): def __post_init__(self) -> None: # post init of parent super().__post_init__() # make a smaller scene for play self.scene.num_envs = 50 self.scene.env_spacing = 2.5 # disable randomization for play self.observations.policy.enable_corruption = False # remove random pushing event self.events.base_external_force_torque = None self.events.push_robot = None
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/locomotion/velocity/config/anymal_b/__init__.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause import gymnasium as gym from . import agents, flat_env_cfg, rough_env_cfg ## # Register Gym environments. ## gym.register( id="Isaac-Velocity-Flat-Anymal-B-v0", entry_point="omni.isaac.orbit.envs:RLTaskEnv", disable_env_checker=True, kwargs={ "env_cfg_entry_point": flat_env_cfg.AnymalBFlatEnvCfg, "rsl_rl_cfg_entry_point": agents.rsl_rl_cfg.AnymalBFlatPPORunnerCfg, }, ) gym.register( id="Isaac-Velocity-Flat-Anymal-B-Play-v0", entry_point="omni.isaac.orbit.envs:RLTaskEnv", disable_env_checker=True, kwargs={ "env_cfg_entry_point": flat_env_cfg.AnymalBFlatEnvCfg_PLAY, "rsl_rl_cfg_entry_point": agents.rsl_rl_cfg.AnymalBFlatPPORunnerCfg, }, ) gym.register( id="Isaac-Velocity-Rough-Anymal-B-v0", entry_point="omni.isaac.orbit.envs:RLTaskEnv", disable_env_checker=True, kwargs={ "env_cfg_entry_point": rough_env_cfg.AnymalBRoughEnvCfg, "rsl_rl_cfg_entry_point": agents.rsl_rl_cfg.AnymalBRoughPPORunnerCfg, }, ) gym.register( id="Isaac-Velocity-Rough-Anymal-B-Play-v0", entry_point="omni.isaac.orbit.envs:RLTaskEnv", disable_env_checker=True, kwargs={ "env_cfg_entry_point": rough_env_cfg.AnymalBRoughEnvCfg_PLAY, "rsl_rl_cfg_entry_point": agents.rsl_rl_cfg.AnymalBRoughPPORunnerCfg, }, )
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/locomotion/velocity/config/anymal_b/agents/rsl_rl_cfg.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from omni.isaac.orbit.utils import configclass from omni.isaac.orbit_tasks.utils.wrappers.rsl_rl import ( RslRlOnPolicyRunnerCfg, RslRlPpoActorCriticCfg, RslRlPpoAlgorithmCfg, ) @configclass class AnymalBRoughPPORunnerCfg(RslRlOnPolicyRunnerCfg): num_steps_per_env = 24 max_iterations = 1500 save_interval = 50 experiment_name = "anymal_b_rough" empirical_normalization = False policy = RslRlPpoActorCriticCfg( init_noise_std=1.0, actor_hidden_dims=[512, 256, 128], critic_hidden_dims=[512, 256, 128], activation="elu", ) algorithm = RslRlPpoAlgorithmCfg( value_loss_coef=1.0, use_clipped_value_loss=True, clip_param=0.2, entropy_coef=0.005, num_learning_epochs=5, num_mini_batches=4, learning_rate=1.0e-3, schedule="adaptive", gamma=0.99, lam=0.95, desired_kl=0.01, max_grad_norm=1.0, ) @configclass class AnymalBFlatPPORunnerCfg(AnymalBRoughPPORunnerCfg): def __post_init__(self): super().__post_init__() self.max_iterations = 300 self.experiment_name = "anymal_b_flat" self.policy.actor_hidden_dims = [128, 128, 128] self.policy.critic_hidden_dims = [128, 128, 128]
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/locomotion/velocity/config/anymal_b/agents/__init__.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from . import rsl_rl_cfg # noqa: F401, F403
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/locomotion/velocity/config/anymal_c/rough_env_cfg.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from omni.isaac.orbit.utils import configclass from omni.isaac.orbit_tasks.locomotion.velocity.velocity_env_cfg import LocomotionVelocityRoughEnvCfg ## # Pre-defined configs ## from omni.isaac.orbit_assets.anymal import ANYMAL_C_CFG # isort: skip @configclass class AnymalCRoughEnvCfg(LocomotionVelocityRoughEnvCfg): def __post_init__(self): # post init of parent super().__post_init__() # switch robot to anymal-c self.scene.robot = ANYMAL_C_CFG.replace(prim_path="{ENV_REGEX_NS}/Robot") @configclass class AnymalCRoughEnvCfg_PLAY(AnymalCRoughEnvCfg): def __post_init__(self): # post init of parent super().__post_init__() # make a smaller scene for play self.scene.num_envs = 50 self.scene.env_spacing = 2.5 # spawn the robot randomly in the grid (instead of their terrain levels) self.scene.terrain.max_init_terrain_level = None # reduce the number of terrains to save memory if self.scene.terrain.terrain_generator is not None: self.scene.terrain.terrain_generator.num_rows = 5 self.scene.terrain.terrain_generator.num_cols = 5 self.scene.terrain.terrain_generator.curriculum = False # disable randomization for play self.observations.policy.enable_corruption = False # remove random pushing event self.events.base_external_force_torque = None self.events.push_robot = None
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/locomotion/velocity/config/anymal_c/flat_env_cfg.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from omni.isaac.orbit.utils import configclass from .rough_env_cfg import AnymalCRoughEnvCfg @configclass class AnymalCFlatEnvCfg(AnymalCRoughEnvCfg): def __post_init__(self): # post init of parent super().__post_init__() # override rewards self.rewards.flat_orientation_l2.weight = -5.0 self.rewards.dof_torques_l2.weight = -2.5e-5 self.rewards.feet_air_time.weight = 0.5 # change terrain to flat self.scene.terrain.terrain_type = "plane" self.scene.terrain.terrain_generator = None # no height scan self.scene.height_scanner = None self.observations.policy.height_scan = None # no terrain curriculum self.curriculum.terrain_levels = None class AnymalCFlatEnvCfg_PLAY(AnymalCFlatEnvCfg): def __post_init__(self) -> None: # post init of parent super().__post_init__() # make a smaller scene for play self.scene.num_envs = 50 self.scene.env_spacing = 2.5 # disable randomization for play self.observations.policy.enable_corruption = False # remove random pushing event self.events.base_external_force_torque = None self.events.push_robot = None
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/locomotion/velocity/config/anymal_c/__init__.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause import gymnasium as gym from . import agents, flat_env_cfg, rough_env_cfg ## # Register Gym environments. ## gym.register( id="Isaac-Velocity-Flat-Anymal-C-v0", entry_point="omni.isaac.orbit.envs:RLTaskEnv", disable_env_checker=True, kwargs={ "env_cfg_entry_point": flat_env_cfg.AnymalCFlatEnvCfg, "rsl_rl_cfg_entry_point": agents.rsl_rl_cfg.AnymalCFlatPPORunnerCfg, "skrl_cfg_entry_point": "omni.isaac.orbit_tasks.locomotion.velocity.anymal_c.agents:skrl_cfg.yaml", }, ) gym.register( id="Isaac-Velocity-Flat-Anymal-C-Play-v0", entry_point="omni.isaac.orbit.envs:RLTaskEnv", disable_env_checker=True, kwargs={ "env_cfg_entry_point": flat_env_cfg.AnymalCFlatEnvCfg_PLAY, "rsl_rl_cfg_entry_point": agents.rsl_rl_cfg.AnymalCFlatPPORunnerCfg, }, ) gym.register( id="Isaac-Velocity-Rough-Anymal-C-v0", entry_point="omni.isaac.orbit.envs:RLTaskEnv", disable_env_checker=True, kwargs={ "env_cfg_entry_point": rough_env_cfg.AnymalCRoughEnvCfg, "rsl_rl_cfg_entry_point": agents.rsl_rl_cfg.AnymalCRoughPPORunnerCfg, }, ) gym.register( id="Isaac-Velocity-Rough-Anymal-C-Play-v0", entry_point="omni.isaac.orbit.envs:RLTaskEnv", disable_env_checker=True, kwargs={ "env_cfg_entry_point": rough_env_cfg.AnymalCRoughEnvCfg_PLAY, "rsl_rl_cfg_entry_point": agents.rsl_rl_cfg.AnymalCRoughPPORunnerCfg, }, )
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/locomotion/velocity/config/anymal_c/agents/skrl_cfg.yaml
seed: 42 # Models are instantiated using skrl's model instantiator utility # https://skrl.readthedocs.io/en/develop/modules/skrl.utils.model_instantiators.html models: separate: False policy: # see skrl.utils.model_instantiators.gaussian_model for parameter details clip_actions: False clip_log_std: True initial_log_std: 0 min_log_std: -20.0 max_log_std: 2.0 input_shape: "Shape.STATES" hiddens: [128, 128, 128] hidden_activation: ["elu", "elu", "elu"] output_shape: "Shape.ACTIONS" output_activation: "tanh" output_scale: 1.0 value: # see skrl.utils.model_instantiators.deterministic_model for parameter details clip_actions: False input_shape: "Shape.STATES" hiddens: [128, 128, 128] hidden_activation: ["elu", "elu", "elu"] output_shape: "Shape.ONE" output_activation: "" output_scale: 1.0 # PPO agent configuration (field names are from PPO_DEFAULT_CONFIG) # https://skrl.readthedocs.io/en/latest/modules/skrl.agents.ppo.html agent: rollouts: 24 learning_epochs: 5 mini_batches: 4 discount_factor: 0.99 lambda: 0.95 learning_rate: 1.e-3 learning_rate_scheduler: "KLAdaptiveLR" learning_rate_scheduler_kwargs: kl_threshold: 0.01 state_preprocessor: "RunningStandardScaler" state_preprocessor_kwargs: null value_preprocessor: "RunningStandardScaler" value_preprocessor_kwargs: null random_timesteps: 0 learning_starts: 0 grad_norm_clip: 1.0 ratio_clip: 0.2 value_clip: 0.2 clip_predicted_values: True entropy_loss_scale: 0.0 value_loss_scale: 1.0 kl_threshold: 0 rewards_shaper_scale: 1.0 # logging and checkpoint experiment: directory: "anymal" experiment_name: "" write_interval: 60 checkpoint_interval: 600 # Sequential trainer # https://skrl.readthedocs.io/en/latest/modules/skrl.trainers.sequential.html trainer: timesteps: 12000
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/locomotion/velocity/config/anymal_c/agents/rsl_rl_cfg.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from omni.isaac.orbit.utils import configclass from omni.isaac.orbit_tasks.utils.wrappers.rsl_rl import ( RslRlOnPolicyRunnerCfg, RslRlPpoActorCriticCfg, RslRlPpoAlgorithmCfg, ) @configclass class AnymalCRoughPPORunnerCfg(RslRlOnPolicyRunnerCfg): num_steps_per_env = 24 max_iterations = 1500 save_interval = 50 experiment_name = "anymal_c_rough" empirical_normalization = False policy = RslRlPpoActorCriticCfg( init_noise_std=1.0, actor_hidden_dims=[512, 256, 128], critic_hidden_dims=[512, 256, 128], activation="elu", ) algorithm = RslRlPpoAlgorithmCfg( value_loss_coef=1.0, use_clipped_value_loss=True, clip_param=0.2, entropy_coef=0.005, num_learning_epochs=5, num_mini_batches=4, learning_rate=1.0e-3, schedule="adaptive", gamma=0.99, lam=0.95, desired_kl=0.01, max_grad_norm=1.0, ) @configclass class AnymalCFlatPPORunnerCfg(AnymalCRoughPPORunnerCfg): def __post_init__(self): super().__post_init__() self.max_iterations = 300 self.experiment_name = "anymal_c_flat" self.policy.actor_hidden_dims = [128, 128, 128] self.policy.critic_hidden_dims = [128, 128, 128]
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/locomotion/velocity/config/anymal_c/agents/__init__.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from . import rsl_rl_cfg # noqa: F401, F403
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/locomotion/velocity/config/anymal_d/rough_env_cfg.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from omni.isaac.orbit.utils import configclass from omni.isaac.orbit_tasks.locomotion.velocity.velocity_env_cfg import LocomotionVelocityRoughEnvCfg ## # Pre-defined configs ## from omni.isaac.orbit_assets.anymal import ANYMAL_D_CFG # isort: skip @configclass class AnymalDRoughEnvCfg(LocomotionVelocityRoughEnvCfg): def __post_init__(self): # post init of parent super().__post_init__() # switch robot to anymal-d self.scene.robot = ANYMAL_D_CFG.replace(prim_path="{ENV_REGEX_NS}/Robot") @configclass class AnymalDRoughEnvCfg_PLAY(AnymalDRoughEnvCfg): def __post_init__(self): # post init of parent super().__post_init__() # make a smaller scene for play self.scene.num_envs = 50 self.scene.env_spacing = 2.5 # spawn the robot randomly in the grid (instead of their terrain levels) self.scene.terrain.max_init_terrain_level = None # reduce the number of terrains to save memory if self.scene.terrain.terrain_generator is not None: self.scene.terrain.terrain_generator.num_rows = 5 self.scene.terrain.terrain_generator.num_cols = 5 self.scene.terrain.terrain_generator.curriculum = False # disable randomization for play self.observations.policy.enable_corruption = False # remove random pushing event self.events.base_external_force_torque = None self.events.push_robot = None
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/locomotion/velocity/config/anymal_d/flat_env_cfg.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from omni.isaac.orbit.utils import configclass from .rough_env_cfg import AnymalDRoughEnvCfg @configclass class AnymalDFlatEnvCfg(AnymalDRoughEnvCfg): def __post_init__(self): # post init of parent super().__post_init__() # override rewards self.rewards.flat_orientation_l2.weight = -5.0 self.rewards.dof_torques_l2.weight = -2.5e-5 self.rewards.feet_air_time.weight = 0.5 # change terrain to flat self.scene.terrain.terrain_type = "plane" self.scene.terrain.terrain_generator = None # no height scan self.scene.height_scanner = None self.observations.policy.height_scan = None # no terrain curriculum self.curriculum.terrain_levels = None class AnymalDFlatEnvCfg_PLAY(AnymalDFlatEnvCfg): def __post_init__(self) -> None: # post init of parent super().__post_init__() # make a smaller scene for play self.scene.num_envs = 50 self.scene.env_spacing = 2.5 # disable randomization for play self.observations.policy.enable_corruption = False # remove random pushing event self.events.base_external_force_torque = None self.events.push_robot = None
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/locomotion/velocity/config/anymal_d/__init__.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause import gymnasium as gym from . import agents, flat_env_cfg, rough_env_cfg ## # Register Gym environments. ## gym.register( id="Isaac-Velocity-Flat-Anymal-D-v0", entry_point="omni.isaac.orbit.envs:RLTaskEnv", disable_env_checker=True, kwargs={ "env_cfg_entry_point": flat_env_cfg.AnymalDFlatEnvCfg, "rsl_rl_cfg_entry_point": agents.rsl_rl_cfg.AnymalDFlatPPORunnerCfg, }, ) gym.register( id="Isaac-Velocity-Flat-Anymal-D-Play-v0", entry_point="omni.isaac.orbit.envs:RLTaskEnv", disable_env_checker=True, kwargs={ "env_cfg_entry_point": flat_env_cfg.AnymalDFlatEnvCfg_PLAY, "rsl_rl_cfg_entry_point": agents.rsl_rl_cfg.AnymalDFlatPPORunnerCfg, }, ) gym.register( id="Isaac-Velocity-Rough-Anymal-D-v0", entry_point="omni.isaac.orbit.envs:RLTaskEnv", disable_env_checker=True, kwargs={ "env_cfg_entry_point": rough_env_cfg.AnymalDRoughEnvCfg, "rsl_rl_cfg_entry_point": agents.rsl_rl_cfg.AnymalDRoughPPORunnerCfg, }, ) gym.register( id="Isaac-Velocity-Rough-Anymal-D-Play-v0", entry_point="omni.isaac.orbit.envs:RLTaskEnv", disable_env_checker=True, kwargs={ "env_cfg_entry_point": rough_env_cfg.AnymalDRoughEnvCfg_PLAY, "rsl_rl_cfg_entry_point": agents.rsl_rl_cfg.AnymalDRoughPPORunnerCfg, }, )
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/locomotion/velocity/config/anymal_d/agents/rsl_rl_cfg.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from omni.isaac.orbit.utils import configclass from omni.isaac.orbit_tasks.utils.wrappers.rsl_rl import ( RslRlOnPolicyRunnerCfg, RslRlPpoActorCriticCfg, RslRlPpoAlgorithmCfg, ) @configclass class AnymalDRoughPPORunnerCfg(RslRlOnPolicyRunnerCfg): num_steps_per_env = 24 max_iterations = 1500 save_interval = 50 experiment_name = "anymal_d_rough" empirical_normalization = False policy = RslRlPpoActorCriticCfg( init_noise_std=1.0, actor_hidden_dims=[512, 256, 128], critic_hidden_dims=[512, 256, 128], activation="elu", ) algorithm = RslRlPpoAlgorithmCfg( value_loss_coef=1.0, use_clipped_value_loss=True, clip_param=0.2, entropy_coef=0.005, num_learning_epochs=5, num_mini_batches=4, learning_rate=1.0e-3, schedule="adaptive", gamma=0.99, lam=0.95, desired_kl=0.01, max_grad_norm=1.0, ) @configclass class AnymalDFlatPPORunnerCfg(AnymalDRoughPPORunnerCfg): def __post_init__(self): super().__post_init__() self.max_iterations = 300 self.experiment_name = "anymal_d_flat" self.policy.actor_hidden_dims = [128, 128, 128] self.policy.critic_hidden_dims = [128, 128, 128]
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/locomotion/velocity/config/anymal_d/agents/__init__.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from . import rsl_rl_cfg # noqa: F401, F403
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/locomotion/velocity/config/unitree_go2/rough_env_cfg.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from omni.isaac.orbit.utils import configclass from omni.isaac.orbit_tasks.locomotion.velocity.velocity_env_cfg import LocomotionVelocityRoughEnvCfg ## # Pre-defined configs ## from omni.isaac.orbit_assets.unitree import UNITREE_GO2_CFG # isort: skip @configclass class UnitreeGo2RoughEnvCfg(LocomotionVelocityRoughEnvCfg): def __post_init__(self): # post init of parent super().__post_init__() self.scene.robot = UNITREE_GO2_CFG.replace(prim_path="{ENV_REGEX_NS}/Robot") self.scene.height_scanner.prim_path = "{ENV_REGEX_NS}/Robot/base" # scale down the terrains because the robot is small self.scene.terrain.terrain_generator.sub_terrains["boxes"].grid_height_range = (0.025, 0.1) self.scene.terrain.terrain_generator.sub_terrains["random_rough"].noise_range = (0.01, 0.06) self.scene.terrain.terrain_generator.sub_terrains["random_rough"].noise_step = 0.01 # reduce action scale self.actions.joint_pos.scale = 0.25 # event self.events.push_robot = None self.events.add_base_mass.params["mass_range"] = (-1.0, 3.0) self.events.add_base_mass.params["asset_cfg"].body_names = "base" self.events.base_external_force_torque.params["asset_cfg"].body_names = "base" self.events.reset_robot_joints.params["position_range"] = (1.0, 1.0) self.events.reset_base.params = { "pose_range": {"x": (-0.5, 0.5), "y": (-0.5, 0.5), "yaw": (-3.14, 3.14)}, "velocity_range": { "x": (0.0, 0.0), "y": (0.0, 0.0), "z": (0.0, 0.0), "roll": (0.0, 0.0), "pitch": (0.0, 0.0), "yaw": (0.0, 0.0), }, } # rewards self.rewards.feet_air_time.params["sensor_cfg"].body_names = ".*_foot" self.rewards.feet_air_time.weight = 0.01 self.rewards.undesired_contacts = None self.rewards.dof_torques_l2.weight = -0.0002 self.rewards.track_lin_vel_xy_exp.weight = 1.5 self.rewards.track_ang_vel_z_exp.weight = 0.75 self.rewards.dof_acc_l2.weight = -2.5e-7 # terminations self.terminations.base_contact.params["sensor_cfg"].body_names = "base" @configclass class UnitreeGo2RoughEnvCfg_PLAY(UnitreeGo2RoughEnvCfg): def __post_init__(self): # post init of parent super().__post_init__() # make a smaller scene for play self.scene.num_envs = 50 self.scene.env_spacing = 2.5 # spawn the robot randomly in the grid (instead of their terrain levels) self.scene.terrain.max_init_terrain_level = None # reduce the number of terrains to save memory if self.scene.terrain.terrain_generator is not None: self.scene.terrain.terrain_generator.num_rows = 5 self.scene.terrain.terrain_generator.num_cols = 5 self.scene.terrain.terrain_generator.curriculum = False # disable randomization for play self.observations.policy.enable_corruption = False # remove random pushing event self.events.base_external_force_torque = None self.events.push_robot = None
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/locomotion/velocity/config/unitree_go2/flat_env_cfg.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from omni.isaac.orbit.utils import configclass from .rough_env_cfg import UnitreeGo2RoughEnvCfg @configclass class UnitreeGo2FlatEnvCfg(UnitreeGo2RoughEnvCfg): def __post_init__(self): # post init of parent super().__post_init__() # override rewards self.rewards.flat_orientation_l2.weight = -2.5 self.rewards.feet_air_time.weight = 0.25 # change terrain to flat self.scene.terrain.terrain_type = "plane" self.scene.terrain.terrain_generator = None # no height scan self.scene.height_scanner = None self.observations.policy.height_scan = None # no terrain curriculum self.curriculum.terrain_levels = None class UnitreeGo2FlatEnvCfg_PLAY(UnitreeGo2FlatEnvCfg): def __post_init__(self) -> None: # post init of parent super().__post_init__() # make a smaller scene for play self.scene.num_envs = 50 self.scene.env_spacing = 2.5 # disable randomization for play self.observations.policy.enable_corruption = False # remove random pushing event self.events.base_external_force_torque = None self.events.push_robot = None
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/locomotion/velocity/config/unitree_go2/__init__.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause import gymnasium as gym from . import agents, flat_env_cfg, rough_env_cfg ## # Register Gym environments. ## gym.register( id="Isaac-Velocity-Flat-Unitree-Go2-v0", entry_point="omni.isaac.orbit.envs:RLTaskEnv", disable_env_checker=True, kwargs={ "env_cfg_entry_point": flat_env_cfg.UnitreeGo2FlatEnvCfg, "rsl_rl_cfg_entry_point": agents.rsl_rl_cfg.UnitreeGo2FlatPPORunnerCfg, }, ) gym.register( id="Isaac-Velocity-Flat-Unitree-Go2-Play-v0", entry_point="omni.isaac.orbit.envs:RLTaskEnv", disable_env_checker=True, kwargs={ "env_cfg_entry_point": flat_env_cfg.UnitreeGo2FlatEnvCfg_PLAY, "rsl_rl_cfg_entry_point": agents.rsl_rl_cfg.UnitreeGo2FlatPPORunnerCfg, }, ) gym.register( id="Isaac-Velocity-Rough-Unitree-Go2-v0", entry_point="omni.isaac.orbit.envs:RLTaskEnv", disable_env_checker=True, kwargs={ "env_cfg_entry_point": rough_env_cfg.UnitreeGo2RoughEnvCfg, "rsl_rl_cfg_entry_point": agents.rsl_rl_cfg.UnitreeGo2RoughPPORunnerCfg, }, ) gym.register( id="Isaac-Velocity-Rough-Unitree-Go2-Play-v0", entry_point="omni.isaac.orbit.envs:RLTaskEnv", disable_env_checker=True, kwargs={ "env_cfg_entry_point": rough_env_cfg.UnitreeGo2RoughEnvCfg_PLAY, "rsl_rl_cfg_entry_point": agents.rsl_rl_cfg.UnitreeGo2RoughPPORunnerCfg, }, )
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/locomotion/velocity/config/unitree_go2/agents/rsl_rl_cfg.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from omni.isaac.orbit.utils import configclass from omni.isaac.orbit_tasks.utils.wrappers.rsl_rl import ( RslRlOnPolicyRunnerCfg, RslRlPpoActorCriticCfg, RslRlPpoAlgorithmCfg, ) @configclass class UnitreeGo2RoughPPORunnerCfg(RslRlOnPolicyRunnerCfg): num_steps_per_env = 24 max_iterations = 1500 save_interval = 50 experiment_name = "unitree_go2_rough" empirical_normalization = False policy = RslRlPpoActorCriticCfg( init_noise_std=1.0, actor_hidden_dims=[512, 256, 128], critic_hidden_dims=[512, 256, 128], activation="elu", ) algorithm = RslRlPpoAlgorithmCfg( value_loss_coef=1.0, use_clipped_value_loss=True, clip_param=0.2, entropy_coef=0.01, num_learning_epochs=5, num_mini_batches=4, learning_rate=1.0e-3, schedule="adaptive", gamma=0.99, lam=0.95, desired_kl=0.01, max_grad_norm=1.0, ) @configclass class UnitreeGo2FlatPPORunnerCfg(UnitreeGo2RoughPPORunnerCfg): def __post_init__(self): super().__post_init__() self.max_iterations = 300 self.experiment_name = "unitree_go2_flat" self.policy.actor_hidden_dims = [128, 128, 128] self.policy.critic_hidden_dims = [128, 128, 128]
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/locomotion/velocity/config/unitree_go2/agents/__init__.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from . import rsl_rl_cfg # noqa: F401, F403
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/locomotion/velocity/config/unitree_a1/rough_env_cfg.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from omni.isaac.orbit.utils import configclass from omni.isaac.orbit_tasks.locomotion.velocity.velocity_env_cfg import LocomotionVelocityRoughEnvCfg ## # Pre-defined configs ## from omni.isaac.orbit_assets.unitree import UNITREE_A1_CFG # isort: skip @configclass class UnitreeA1RoughEnvCfg(LocomotionVelocityRoughEnvCfg): def __post_init__(self): # post init of parent super().__post_init__() self.scene.robot = UNITREE_A1_CFG.replace(prim_path="{ENV_REGEX_NS}/Robot") self.scene.height_scanner.prim_path = "{ENV_REGEX_NS}/Robot/trunk" # scale down the terrains because the robot is small self.scene.terrain.terrain_generator.sub_terrains["boxes"].grid_height_range = (0.025, 0.1) self.scene.terrain.terrain_generator.sub_terrains["random_rough"].noise_range = (0.01, 0.06) self.scene.terrain.terrain_generator.sub_terrains["random_rough"].noise_step = 0.01 # reduce action scale self.actions.joint_pos.scale = 0.25 # event self.events.push_robot = None self.events.add_base_mass.params["mass_range"] = (-1.0, 3.0) self.events.add_base_mass.params["asset_cfg"].body_names = "trunk" self.events.base_external_force_torque.params["asset_cfg"].body_names = "trunk" self.events.reset_robot_joints.params["position_range"] = (1.0, 1.0) self.events.reset_base.params = { "pose_range": {"x": (-0.5, 0.5), "y": (-0.5, 0.5), "yaw": (-3.14, 3.14)}, "velocity_range": { "x": (0.0, 0.0), "y": (0.0, 0.0), "z": (0.0, 0.0), "roll": (0.0, 0.0), "pitch": (0.0, 0.0), "yaw": (0.0, 0.0), }, } # rewards self.rewards.feet_air_time.params["sensor_cfg"].body_names = ".*_foot" self.rewards.feet_air_time.weight = 0.01 self.rewards.undesired_contacts = None self.rewards.dof_torques_l2.weight = -0.0002 self.rewards.track_lin_vel_xy_exp.weight = 1.5 self.rewards.track_ang_vel_z_exp.weight = 0.75 self.rewards.dof_acc_l2.weight = -2.5e-7 # terminations self.terminations.base_contact.params["sensor_cfg"].body_names = "trunk" @configclass class UnitreeA1RoughEnvCfg_PLAY(UnitreeA1RoughEnvCfg): def __post_init__(self): # post init of parent super().__post_init__() # make a smaller scene for play self.scene.num_envs = 50 self.scene.env_spacing = 2.5 # spawn the robot randomly in the grid (instead of their terrain levels) self.scene.terrain.max_init_terrain_level = None # reduce the number of terrains to save memory if self.scene.terrain.terrain_generator is not None: self.scene.terrain.terrain_generator.num_rows = 5 self.scene.terrain.terrain_generator.num_cols = 5 self.scene.terrain.terrain_generator.curriculum = False # disable randomization for play self.observations.policy.enable_corruption = False # remove random pushing event self.events.base_external_force_torque = None self.events.push_robot = None
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/locomotion/velocity/config/unitree_a1/flat_env_cfg.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from omni.isaac.orbit.utils import configclass from .rough_env_cfg import UnitreeA1RoughEnvCfg @configclass class UnitreeA1FlatEnvCfg(UnitreeA1RoughEnvCfg): def __post_init__(self): # post init of parent super().__post_init__() # override rewards self.rewards.flat_orientation_l2.weight = -2.5 self.rewards.feet_air_time.weight = 0.25 # change terrain to flat self.scene.terrain.terrain_type = "plane" self.scene.terrain.terrain_generator = None # no height scan self.scene.height_scanner = None self.observations.policy.height_scan = None # no terrain curriculum self.curriculum.terrain_levels = None class UnitreeA1FlatEnvCfg_PLAY(UnitreeA1FlatEnvCfg): def __post_init__(self) -> None: # post init of parent super().__post_init__() # make a smaller scene for play self.scene.num_envs = 50 self.scene.env_spacing = 2.5 # disable randomization for play self.observations.policy.enable_corruption = False # remove random pushing event self.events.base_external_force_torque = None self.events.push_robot = None
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/locomotion/velocity/config/unitree_a1/__init__.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause import gymnasium as gym from . import agents, flat_env_cfg, rough_env_cfg ## # Register Gym environments. ## gym.register( id="Isaac-Velocity-Flat-Unitree-A1-v0", entry_point="omni.isaac.orbit.envs:RLTaskEnv", disable_env_checker=True, kwargs={ "env_cfg_entry_point": flat_env_cfg.UnitreeA1FlatEnvCfg, "rsl_rl_cfg_entry_point": agents.rsl_rl_cfg.UnitreeA1FlatPPORunnerCfg, }, ) gym.register( id="Isaac-Velocity-Flat-Unitree-A1-Play-v0", entry_point="omni.isaac.orbit.envs:RLTaskEnv", disable_env_checker=True, kwargs={ "env_cfg_entry_point": flat_env_cfg.UnitreeA1FlatEnvCfg_PLAY, "rsl_rl_cfg_entry_point": agents.rsl_rl_cfg.UnitreeA1FlatPPORunnerCfg, }, ) gym.register( id="Isaac-Velocity-Rough-Unitree-A1-v0", entry_point="omni.isaac.orbit.envs:RLTaskEnv", disable_env_checker=True, kwargs={ "env_cfg_entry_point": rough_env_cfg.UnitreeA1RoughEnvCfg, "rsl_rl_cfg_entry_point": agents.rsl_rl_cfg.UnitreeA1RoughPPORunnerCfg, }, ) gym.register( id="Isaac-Velocity-Rough-Unitree-A1-Play-v0", entry_point="omni.isaac.orbit.envs:RLTaskEnv", disable_env_checker=True, kwargs={ "env_cfg_entry_point": rough_env_cfg.UnitreeA1RoughEnvCfg_PLAY, "rsl_rl_cfg_entry_point": agents.rsl_rl_cfg.UnitreeA1RoughPPORunnerCfg, }, )
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/locomotion/velocity/config/unitree_a1/agents/rsl_rl_cfg.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from omni.isaac.orbit.utils import configclass from omni.isaac.orbit_tasks.utils.wrappers.rsl_rl import ( RslRlOnPolicyRunnerCfg, RslRlPpoActorCriticCfg, RslRlPpoAlgorithmCfg, ) @configclass class UnitreeA1RoughPPORunnerCfg(RslRlOnPolicyRunnerCfg): num_steps_per_env = 24 max_iterations = 1500 save_interval = 50 experiment_name = "unitree_a1_rough" empirical_normalization = False policy = RslRlPpoActorCriticCfg( init_noise_std=1.0, actor_hidden_dims=[512, 256, 128], critic_hidden_dims=[512, 256, 128], activation="elu", ) algorithm = RslRlPpoAlgorithmCfg( value_loss_coef=1.0, use_clipped_value_loss=True, clip_param=0.2, entropy_coef=0.01, num_learning_epochs=5, num_mini_batches=4, learning_rate=1.0e-3, schedule="adaptive", gamma=0.99, lam=0.95, desired_kl=0.01, max_grad_norm=1.0, ) @configclass class UnitreeA1FlatPPORunnerCfg(UnitreeA1RoughPPORunnerCfg): def __post_init__(self): super().__post_init__() self.max_iterations = 300 self.experiment_name = "unitree_a1_flat" self.policy.actor_hidden_dims = [128, 128, 128] self.policy.critic_hidden_dims = [128, 128, 128]
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/locomotion/velocity/config/unitree_a1/agents/__init__.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from . import rsl_rl_cfg # noqa: F401, F403
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/utils/importer.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """Sub-module with utility for importing all modules in a package recursively.""" from __future__ import annotations import importlib import pkgutil import sys def import_packages(package_name: str, blacklist_pkgs: list[str] = None): """Import all sub-packages in a package recursively. It is easier to use this function to import all sub-packages in a package recursively than to manually import each sub-package. It replaces the need of the following code snippet on the top of each package's ``__init__.py`` file: .. code-block:: python import .locomotion.velocity import .manipulation.reach import .manipulation.lift Args: package_name: The package name. blacklist_pkgs: The list of blacklisted packages to skip. Defaults to None, which means no packages are blacklisted. """ # Default blacklist if blacklist_pkgs is None: blacklist_pkgs = [] # Import the package itself package = importlib.import_module(package_name) # Import all Python files for _ in _walk_packages(package.__path__, package.__name__ + ".", blacklist_pkgs=blacklist_pkgs): pass def _walk_packages( path: str | None = None, prefix: str = "", onerror: callable | None = None, blacklist_pkgs: list[str] | None = None, ): """Yields ModuleInfo for all modules recursively on path, or, if path is None, all accessible modules. Note: This function is a modified version of the original ``pkgutil.walk_packages`` function. It adds the `blacklist_pkgs` argument to skip blacklisted packages. Please refer to the original ``pkgutil.walk_packages`` function for more details. """ if blacklist_pkgs is None: blacklist_pkgs = [] def seen(p, m={}): if p in m: return True m[p] = True # noqa: R503 for info in pkgutil.iter_modules(path, prefix): # check blacklisted if any([black_pkg_name in info.name for black_pkg_name in blacklist_pkgs]): continue # yield the module info yield info if info.ispkg: try: __import__(info.name) except Exception: if onerror is not None: onerror(info.name) else: raise else: path = getattr(sys.modules[info.name], "__path__", None) or [] # don't traverse path items we've seen before path = [p for p in path if not seen(p)] yield from _walk_packages(path, info.name + ".", onerror, blacklist_pkgs)
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/utils/__init__.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """Sub-package with utilities, data collectors and environment wrappers.""" from .importer import import_packages from .parse_cfg import get_checkpoint_path, load_cfg_from_registry, parse_env_cfg
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/utils/parse_cfg.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """Sub-module with utilities for parsing and loading configurations.""" from __future__ import annotations import gymnasium as gym import importlib import inspect import os import re import yaml from omni.isaac.orbit.envs import RLTaskEnvCfg from omni.isaac.orbit.utils import update_class_from_dict, update_dict def load_cfg_from_registry(task_name: str, entry_point_key: str) -> dict | RLTaskEnvCfg: """Load default configuration given its entry point from the gym registry. This function loads the configuration object from the gym registry for the given task name. It supports both YAML and Python configuration files. It expects the configuration to be registered in the gym registry as: .. code-block:: python gym.register( id="My-Awesome-Task-v0", ... kwargs={"env_entry_point_cfg": "path.to.config:ConfigClass"}, ) The parsed configuration object for above example can be obtained as: .. code-block:: python from omni.isaac.orbit_tasks.utils.parse_cfg import load_cfg_from_registry cfg = load_cfg_from_registry("My-Awesome-Task-v0", "env_entry_point_cfg") Args: task_name: The name of the environment. entry_point_key: The entry point key to resolve the configuration file. Returns: The parsed configuration object. This is either a dictionary or a class object. Raises: ValueError: If the entry point key is not available in the gym registry for the task. """ # obtain the configuration entry point cfg_entry_point = gym.spec(task_name).kwargs.get(entry_point_key) # check if entry point exists if cfg_entry_point is None: raise ValueError( f"Could not find configuration for the environment: '{task_name}'." f" Please check that the gym registry has the entry point: '{entry_point_key}'." ) # parse the default config file if isinstance(cfg_entry_point, str) and cfg_entry_point.endswith(".yaml"): if os.path.exists(cfg_entry_point): # absolute path for the config file config_file = cfg_entry_point else: # resolve path to the module location mod_name, file_name = cfg_entry_point.split(":") mod_path = os.path.dirname(importlib.import_module(mod_name).__file__) # obtain the configuration file path config_file = os.path.join(mod_path, file_name) # load the configuration print(f"[INFO]: Parsing configuration from: {config_file}") with open(config_file, encoding="utf-8") as f: cfg = yaml.full_load(f) else: if callable(cfg_entry_point): # resolve path to the module location mod_path = inspect.getfile(cfg_entry_point) # load the configuration cfg_cls = cfg_entry_point() elif isinstance(cfg_entry_point, str): # resolve path to the module location mod_name, attr_name = cfg_entry_point.split(":") mod = importlib.import_module(mod_name) cfg_cls = getattr(mod, attr_name) else: cfg_cls = cfg_entry_point # load the configuration print(f"[INFO]: Parsing configuration from: {cfg_entry_point}") if callable(cfg_cls): cfg = cfg_cls() else: cfg = cfg_cls return cfg def parse_env_cfg( task_name: str, use_gpu: bool | None = None, num_envs: int | None = None, use_fabric: bool | None = None ) -> dict | RLTaskEnvCfg: """Parse configuration for an environment and override based on inputs. Args: task_name: The name of the environment. use_gpu: Whether to use GPU/CPU pipeline. Defaults to None, in which case it is left unchanged. num_envs: Number of environments to create. Defaults to None, in which case it is left unchanged. use_fabric: Whether to enable/disable fabric interface. If false, all read/write operations go through USD. This slows down the simulation but allows seeing the changes in the USD through the USD stage. Defaults to None, in which case it is left unchanged. Returns: The parsed configuration object. This is either a dictionary or a class object. Raises: ValueError: If the task name is not provided, i.e. None. """ # check if a task name is provided if task_name is None: raise ValueError("Please provide a valid task name. Hint: Use --task <task_name>.") # create a dictionary to update from args_cfg = {"sim": {"physx": dict()}, "scene": dict()} # resolve pipeline to use (based on input) if use_gpu is not None: if not use_gpu: args_cfg["sim"]["use_gpu_pipeline"] = False args_cfg["sim"]["physx"]["use_gpu"] = False args_cfg["sim"]["device"] = "cpu" else: args_cfg["sim"]["use_gpu_pipeline"] = True args_cfg["sim"]["physx"]["use_gpu"] = True args_cfg["sim"]["device"] = "cuda:0" # disable fabric to read/write through USD if use_fabric is not None: args_cfg["sim"]["use_fabric"] = use_fabric # number of environments if num_envs is not None: args_cfg["scene"]["num_envs"] = num_envs # load the default configuration cfg = load_cfg_from_registry(task_name, "env_cfg_entry_point") # update the main configuration if isinstance(cfg, dict): cfg = update_dict(cfg, args_cfg) else: update_class_from_dict(cfg, args_cfg) return cfg def get_checkpoint_path( log_path: str, run_dir: str = ".*", checkpoint: str = ".*", other_dirs: list[str] = None, sort_alpha: bool = True ) -> str: """Get path to the model checkpoint in input directory. The checkpoint file is resolved as: ``<log_path>/<run_dir>/<*other_dirs>/<checkpoint>``, where the :attr:`other_dirs` are intermediate folder names to concatenate. These cannot be regex expressions. If :attr:`run_dir` and :attr:`checkpoint` are regex expressions then the most recent (highest alphabetical order) run and checkpoint are selected. To disable this behavior, set the flag :attr:`sort_alpha` to False. Args: log_path: The log directory path to find models in. run_dir: The regex expression for the name of the directory containing the run. Defaults to the most recent directory created inside :attr:`log_path`. other_dirs: The intermediate directories between the run directory and the checkpoint file. Defaults to None, which implies that checkpoint file is directly under the run directory. checkpoint: The regex expression for the model checkpoint file. Defaults to the most recent torch-model saved in the :attr:`run_dir` directory. sort_alpha: Whether to sort the runs by alphabetical order. Defaults to True. If False, the folders in :attr:`run_dir` are sorted by the last modified time. Raises: ValueError: When no runs are found in the input directory. ValueError: When no checkpoints are found in the input directory. Returns: The path to the model checkpoint. Reference: https://github.com/leggedrobotics/legged_gym/blob/master/legged_gym/utils/helpers.py#L103 """ # check if runs present in directory try: # find all runs in the directory that math the regex expression runs = [ os.path.join(log_path, run) for run in os.scandir(log_path) if run.is_dir() and re.match(run_dir, run.name) ] # sort matched runs by alphabetical order (latest run should be last) if sort_alpha: runs.sort() else: runs = sorted(runs, key=os.path.getmtime) # create last run file path if other_dirs is not None: run_path = os.path.join(runs[-1], *other_dirs) else: run_path = runs[-1] except IndexError: raise ValueError(f"No runs present in the directory: '{log_path}' match: '{run_dir}'.") # list all model checkpoints in the directory model_checkpoints = [f for f in os.listdir(run_path) if re.match(checkpoint, f)] # check if any checkpoints are present if len(model_checkpoints) == 0: raise ValueError(f"No checkpoints in the directory: '{run_path}' match '{checkpoint}'.") # sort alphabetically while ensuring that *_10 comes after *_9 model_checkpoints.sort(key=lambda m: f"{m:0>15}") # get latest matched checkpoint file checkpoint_file = model_checkpoints[-1] return os.path.join(run_path, checkpoint_file)
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/utils/wrappers/skrl.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """Wrapper to configure an :class:`RLTaskEnv` instance to skrl environment. The following example shows how to wrap an environment for skrl: .. code-block:: python from omni.isaac.orbit_tasks.utils.wrappers.skrl import SkrlVecEnvWrapper env = SkrlVecEnvWrapper(env) Or, equivalently, by directly calling the skrl library API as follows: .. code-block:: python from skrl.envs.torch.wrappers import wrap_env env = wrap_env(env, wrapper="isaac-orbit") """ from __future__ import annotations import copy import torch import tqdm from skrl.agents.torch import Agent from skrl.envs.wrappers.torch import Wrapper, wrap_env from skrl.resources.preprocessors.torch import RunningStandardScaler # noqa: F401 from skrl.resources.schedulers.torch import KLAdaptiveLR # noqa: F401 from skrl.trainers.torch import Trainer from skrl.trainers.torch.sequential import SEQUENTIAL_TRAINER_DEFAULT_CONFIG from skrl.utils.model_instantiators.torch import Shape # noqa: F401 from omni.isaac.orbit.envs import RLTaskEnv """ Configuration Parser. """ def process_skrl_cfg(cfg: dict) -> dict: """Convert simple YAML types to skrl classes/components. Args: cfg: A configuration dictionary. Returns: A dictionary containing the converted configuration. """ _direct_eval = [ "learning_rate_scheduler", "state_preprocessor", "value_preprocessor", "input_shape", "output_shape", ] def reward_shaper_function(scale): def reward_shaper(rewards, timestep, timesteps): return rewards * scale return reward_shaper def update_dict(d): for key, value in d.items(): if isinstance(value, dict): update_dict(value) else: if key in _direct_eval: d[key] = eval(value) elif key.endswith("_kwargs"): d[key] = value if value is not None else {} elif key in ["rewards_shaper_scale"]: d["rewards_shaper"] = reward_shaper_function(value) return d # parse agent configuration and convert to classes return update_dict(cfg) """ Vectorized environment wrapper. """ def SkrlVecEnvWrapper(env: RLTaskEnv): """Wraps around Orbit environment for skrl. This function wraps around the Orbit environment. Since the :class:`RLTaskEnv` environment wrapping functionality is defined within the skrl library itself, this implementation is maintained for compatibility with the structure of the extension that contains it. Internally it calls the :func:`wrap_env` from the skrl library API. Args: env: The environment to wrap around. Raises: ValueError: When the environment is not an instance of :class:`RLTaskEnv`. Reference: https://skrl.readthedocs.io/en/latest/modules/skrl.envs.wrapping.html """ # check that input is valid if not isinstance(env.unwrapped, RLTaskEnv): raise ValueError(f"The environment must be inherited from RLTaskEnv. Environment type: {type(env)}") # wrap and return the environment return wrap_env(env, wrapper="isaac-orbit") """ Custom trainer for skrl. """ class SkrlSequentialLogTrainer(Trainer): """Sequential trainer with logging of episode information. This trainer inherits from the :class:`skrl.trainers.base_class.Trainer` class. It is used to train agents in a sequential manner (i.e., one after the other in each interaction with the environment). It is most suitable for on-policy RL agents such as PPO, A2C, etc. It modifies the :class:`skrl.trainers.torch.sequential.SequentialTrainer` class with the following differences: * It also log episode information to the agent's logger. * It does not close the environment at the end of the training. Reference: https://skrl.readthedocs.io/en/latest/modules/skrl.trainers.base_class.html """ def __init__( self, env: Wrapper, agents: Agent | list[Agent], agents_scope: list[int] | None = None, cfg: dict | None = None, ): """Initializes the trainer. Args: env: Environment to train on. agents: Agents to train. agents_scope: Number of environments for each agent to train on. Defaults to None. cfg: Configuration dictionary. Defaults to None. """ # update the config _cfg = copy.deepcopy(SEQUENTIAL_TRAINER_DEFAULT_CONFIG) _cfg.update(cfg if cfg is not None else {}) # store agents scope agents_scope = agents_scope if agents_scope is not None else [] # initialize the base class super().__init__(env=env, agents=agents, agents_scope=agents_scope, cfg=_cfg) # init agents if self.env.num_agents > 1: for agent in self.agents: agent.init(trainer_cfg=self.cfg) else: self.agents.init(trainer_cfg=self.cfg) def train(self): """Train the agents sequentially. This method executes the training loop for the agents. It performs the following steps: * Pre-interaction: Perform any pre-interaction operations. * Compute actions: Compute the actions for the agents. * Step the environments: Step the environments with the computed actions. * Record the environments' transitions: Record the transitions from the environments. * Log custom environment data: Log custom environment data. * Post-interaction: Perform any post-interaction operations. * Reset the environments: Reset the environments if they are terminated or truncated. """ # init agent self.agents.init(trainer_cfg=self.cfg) self.agents.set_running_mode("train") # reset env states, infos = self.env.reset() # training loop for timestep in tqdm.tqdm(range(self.timesteps), disable=self.disable_progressbar): # pre-interaction self.agents.pre_interaction(timestep=timestep, timesteps=self.timesteps) # compute actions with torch.no_grad(): actions = self.agents.act(states, timestep=timestep, timesteps=self.timesteps)[0] # step the environments next_states, rewards, terminated, truncated, infos = self.env.step(actions) # note: here we do not call render scene since it is done in the env.step() method # record the environments' transitions with torch.no_grad(): self.agents.record_transition( states=states, actions=actions, rewards=rewards, next_states=next_states, terminated=terminated, truncated=truncated, infos=infos, timestep=timestep, timesteps=self.timesteps, ) # log custom environment data if "episode" in infos: for k, v in infos["episode"].items(): if isinstance(v, torch.Tensor) and v.numel() == 1: self.agents.track_data(f"EpisodeInfo / {k}", v.item()) # post-interaction self.agents.post_interaction(timestep=timestep, timesteps=self.timesteps) # reset the environments # note: here we do not call reset scene since it is done in the env.step() method # update states states.copy_(next_states) def eval(self) -> None: """Evaluate the agents sequentially. This method executes the following steps in loop: * Compute actions: Compute the actions for the agents. * Step the environments: Step the environments with the computed actions. * Record the environments' transitions: Record the transitions from the environments. * Log custom environment data: Log custom environment data. """ # set running mode if self.num_agents > 1: for agent in self.agents: agent.set_running_mode("eval") else: self.agents.set_running_mode("eval") # single agent if self.num_agents == 1: self.single_agent_eval() return # reset env states, infos = self.env.reset() # evaluation loop for timestep in tqdm.tqdm(range(self.initial_timestep, self.timesteps), disable=self.disable_progressbar): # compute actions with torch.no_grad(): actions = torch.vstack([ agent.act(states[scope[0] : scope[1]], timestep=timestep, timesteps=self.timesteps)[0] for agent, scope in zip(self.agents, self.agents_scope) ]) # step the environments next_states, rewards, terminated, truncated, infos = self.env.step(actions) with torch.no_grad(): # write data to TensorBoard for agent, scope in zip(self.agents, self.agents_scope): # track data agent.record_transition( states=states[scope[0] : scope[1]], actions=actions[scope[0] : scope[1]], rewards=rewards[scope[0] : scope[1]], next_states=next_states[scope[0] : scope[1]], terminated=terminated[scope[0] : scope[1]], truncated=truncated[scope[0] : scope[1]], infos=infos, timestep=timestep, timesteps=self.timesteps, ) # log custom environment data if "log" in infos: for k, v in infos["log"].items(): if isinstance(v, torch.Tensor) and v.numel() == 1: agent.track_data(k, v.item()) # perform post-interaction super(type(agent), agent).post_interaction(timestep=timestep, timesteps=self.timesteps) # reset environments # note: here we do not call reset scene since it is done in the env.step() method states.copy_(next_states)
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/utils/wrappers/rl_games.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """Wrapper to configure an :class:`RLTaskEnv` instance to RL-Games vectorized environment. The following example shows how to wrap an environment for RL-Games and register the environment construction for RL-Games :class:`Runner` class: .. code-block:: python from rl_games.common import env_configurations, vecenv from omni.isaac.orbit_tasks.utils.wrappers.rl_games import RlGamesGpuEnv, RlGamesVecEnvWrapper # configuration parameters rl_device = "cuda:0" clip_obs = 10.0 clip_actions = 1.0 # wrap around environment for rl-games env = RlGamesVecEnvWrapper(env, rl_device, clip_obs, clip_actions) # register the environment to rl-games registry # note: in agents configuration: environment name must be "rlgpu" vecenv.register( "IsaacRlgWrapper", lambda config_name, num_actors, **kwargs: RlGamesGpuEnv(config_name, num_actors, **kwargs) ) env_configurations.register("rlgpu", {"vecenv_type": "IsaacRlgWrapper", "env_creator": lambda **kwargs: env}) """ from __future__ import annotations import gym.spaces # needed for rl-games incompatibility: https://github.com/Denys88/rl_games/issues/261 import gymnasium import torch from rl_games.common import env_configurations from rl_games.common.vecenv import IVecEnv from omni.isaac.orbit.envs import RLTaskEnv, VecEnvObs """ Vectorized environment wrapper. """ class RlGamesVecEnvWrapper(IVecEnv): """Wraps around Orbit environment for RL-Games. This class wraps around the Orbit environment. Since RL-Games works directly on GPU buffers, the wrapper handles moving of buffers from the simulation environment to the same device as the learning agent. Additionally, it performs clipping of observations and actions. For algorithms like asymmetric actor-critic, RL-Games expects a dictionary for observations. This dictionary contains "obs" and "states" which typically correspond to the actor and critic observations respectively. To use asymmetric actor-critic, the environment observations from :class:`RLTaskEnv` must have the key or group name "critic". The observation group is used to set the :attr:`num_states` (int) and :attr:`state_space` (:obj:`gym.spaces.Box`). These are used by the learning agent in RL-Games to allocate buffers in the trajectory memory. Since this is optional for some environments, the wrapper checks if these attributes exist. If they don't then the wrapper defaults to zero as number of privileged observations. .. caution:: This class must be the last wrapper in the wrapper chain. This is because the wrapper does not follow the :class:`gym.Wrapper` interface. Any subsequent wrappers will need to be modified to work with this wrapper. Reference: https://github.com/Denys88/rl_games/blob/master/rl_games/common/ivecenv.py https://github.com/NVIDIA-Omniverse/IsaacGymEnvs """ def __init__(self, env: RLTaskEnv, rl_device: str, clip_obs: float, clip_actions: float): """Initializes the wrapper instance. Args: env: The environment to wrap around. rl_device: The device on which agent computations are performed. clip_obs: The clipping value for observations. clip_actions: The clipping value for actions. Raises: ValueError: The environment is not inherited from :class:`RLTaskEnv`. ValueError: If specified, the privileged observations (critic) are not of type :obj:`gym.spaces.Box`. """ # check that input is valid if not isinstance(env.unwrapped, RLTaskEnv): raise ValueError(f"The environment must be inherited from RLTaskEnv. Environment type: {type(env)}") # initialize the wrapper self.env = env # store provided arguments self._rl_device = rl_device self._clip_obs = clip_obs self._clip_actions = clip_actions self._sim_device = env.unwrapped.device # information for privileged observations if self.state_space is None: self.rlg_num_states = 0 else: self.rlg_num_states = self.state_space.shape[0] def __str__(self): """Returns the wrapper name and the :attr:`env` representation string.""" return ( f"<{type(self).__name__}{self.env}>" f"\n\tObservations clipping: {self._clip_obs}" f"\n\tActions clipping : {self._clip_actions}" f"\n\tAgent device : {self._rl_device}" f"\n\tAsymmetric-learning : {self.rlg_num_states != 0}" ) def __repr__(self): """Returns the string representation of the wrapper.""" return str(self) """ Properties -- Gym.Wrapper """ @property def render_mode(self) -> str | None: """Returns the :attr:`Env` :attr:`render_mode`.""" return self.env.render_mode @property def observation_space(self) -> gym.spaces.Box: """Returns the :attr:`Env` :attr:`observation_space`.""" # note: rl-games only wants single observation space policy_obs_space = self.unwrapped.single_observation_space["policy"] if not isinstance(policy_obs_space, gymnasium.spaces.Box): raise NotImplementedError( f"The RL-Games wrapper does not currently support observation space: '{type(policy_obs_space)}'." f" If you need to support this, please modify the wrapper: {self.__class__.__name__}," " and if you are nice, please send a merge-request." ) # note: maybe should check if we are a sub-set of the actual space. don't do it right now since # in RLTaskEnv we are setting action space as (-inf, inf). return gym.spaces.Box(-self._clip_obs, self._clip_obs, policy_obs_space.shape) @property def action_space(self) -> gym.Space: """Returns the :attr:`Env` :attr:`action_space`.""" # note: rl-games only wants single action space action_space = self.unwrapped.single_action_space if not isinstance(action_space, gymnasium.spaces.Box): raise NotImplementedError( f"The RL-Games wrapper does not currently support action space: '{type(action_space)}'." f" If you need to support this, please modify the wrapper: {self.__class__.__name__}," " and if you are nice, please send a merge-request." ) # return casted space in gym.spaces.Box (OpenAI Gym) # note: maybe should check if we are a sub-set of the actual space. don't do it right now since # in RLTaskEnv we are setting action space as (-inf, inf). return gym.spaces.Box(-self._clip_actions, self._clip_actions, action_space.shape) @classmethod def class_name(cls) -> str: """Returns the class name of the wrapper.""" return cls.__name__ @property def unwrapped(self) -> RLTaskEnv: """Returns the base environment of the wrapper. This will be the bare :class:`gymnasium.Env` environment, underneath all layers of wrappers. """ return self.env.unwrapped """ Properties """ @property def num_envs(self) -> int: """Returns the number of sub-environment instances.""" return self.unwrapped.num_envs @property def device(self) -> str: """Returns the base environment simulation device.""" return self.unwrapped.device @property def state_space(self) -> gym.spaces.Box | None: """Returns the :attr:`Env` :attr:`observation_space`.""" # note: rl-games only wants single observation space critic_obs_space = self.unwrapped.single_observation_space.get("critic") # check if we even have a critic obs if critic_obs_space is None: return None elif not isinstance(critic_obs_space, gymnasium.spaces.Box): raise NotImplementedError( f"The RL-Games wrapper does not currently support state space: '{type(critic_obs_space)}'." f" If you need to support this, please modify the wrapper: {self.__class__.__name__}," " and if you are nice, please send a merge-request." ) # return casted space in gym.spaces.Box (OpenAI Gym) # note: maybe should check if we are a sub-set of the actual space. don't do it right now since # in RLTaskEnv we are setting action space as (-inf, inf). return gym.spaces.Box(-self._clip_obs, self._clip_obs, critic_obs_space.shape) def get_number_of_agents(self) -> int: """Returns number of actors in the environment.""" return getattr(self, "num_agents", 1) def get_env_info(self) -> dict: """Returns the Gym spaces for the environment.""" return { "observation_space": self.observation_space, "action_space": self.action_space, "state_space": self.state_space, } """ Operations - MDP """ def seed(self, seed: int = -1) -> int: # noqa: D102 return self.unwrapped.seed(seed) def reset(self): # noqa: D102 obs_dict, _ = self.env.reset() # process observations and states return self._process_obs(obs_dict) def step(self, actions): # noqa: D102 # move actions to sim-device actions = actions.detach().clone().to(device=self._sim_device) # clip the actions actions = torch.clamp(actions, -self._clip_actions, self._clip_actions) # perform environment step obs_dict, rew, terminated, truncated, extras = self.env.step(actions) # move time out information to the extras dict # this is only needed for infinite horizon tasks # note: only useful when `value_bootstrap` is True in the agent configuration if not self.unwrapped.cfg.is_finite_horizon: extras["time_outs"] = truncated.to(device=self._rl_device) # process observations and states obs_and_states = self._process_obs(obs_dict) # move buffers to rl-device # note: we perform clone to prevent issues when rl-device and sim-device are the same. rew = rew.to(device=self._rl_device) dones = (terminated | truncated).to(device=self._rl_device) extras = { k: v.to(device=self._rl_device, non_blocking=True) if hasattr(v, "to") else v for k, v in extras.items() } # remap extras from "log" to "episode" if "log" in extras: extras["episode"] = extras.pop("log") return obs_and_states, rew, dones, extras def close(self): # noqa: D102 return self.env.close() """ Helper functions """ def _process_obs(self, obs_dict: VecEnvObs) -> torch.Tensor | dict[str, torch.Tensor]: """Processing of the observations and states from the environment. Note: States typically refers to privileged observations for the critic function. It is typically used in asymmetric actor-critic algorithms. Args: obs_dict: The current observations from environment. Returns: If environment provides states, then a dictionary containing the observations and states is returned. Otherwise just the observations tensor is returned. """ # process policy obs obs = obs_dict["policy"] # clip the observations obs = torch.clamp(obs, -self._clip_obs, self._clip_obs) # move the buffer to rl-device obs = obs.to(device=self._rl_device).clone() # check if asymmetric actor-critic or not if self.rlg_num_states > 0: # acquire states from the environment if it exists try: states = obs_dict["critic"] except AttributeError: raise NotImplementedError("Environment does not define key 'critic' for privileged observations.") # clip the states states = torch.clamp(states, -self._clip_obs, self._clip_obs) # move buffers to rl-device states = states.to(self._rl_device).clone() # convert to dictionary return {"obs": obs, "states": states} else: return obs """ Environment Handler. """ class RlGamesGpuEnv(IVecEnv): """Thin wrapper to create instance of the environment to fit RL-Games runner.""" # TODO: Adding this for now but do we really need this? def __init__(self, config_name: str, num_actors: int, **kwargs): """Initialize the environment. Args: config_name: The name of the environment configuration. num_actors: The number of actors in the environment. This is not used in this wrapper. """ self.env: RlGamesVecEnvWrapper = env_configurations.configurations[config_name]["env_creator"](**kwargs) def step(self, action): # noqa: D102 return self.env.step(action) def reset(self): # noqa: D102 return self.env.reset() def get_number_of_agents(self) -> int: """Get number of agents in the environment. Returns: The number of agents in the environment. """ return self.env.get_number_of_agents() def get_env_info(self) -> dict: """Get the Gym spaces for the environment. Returns: The Gym spaces for the environment. """ return self.env.get_env_info()
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/utils/wrappers/__init__.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """Sub-module for environment wrappers to different learning frameworks. Wrappers allow you to modify the behavior of an environment without modifying the environment itself. This is useful for modifying the observation space, action space, or reward function. Additionally, they can be used to cast a given environment into the respective environment class definition used by different learning frameworks. This operation may include handling of asymmetric actor-critic observations, casting the data between different backends such `numpy` and `pytorch`, or organizing the returned data into the expected data structure by the learning framework. All wrappers work similar to the :class:`gymnasium.Wrapper` class. Using a wrapper is as simple as passing the initialized environment instance to the wrapper constructor. However, since learning frameworks expect different input and output data structures, their wrapper classes are not compatible with each other. Thus, they should always be used in conjunction with the respective learning framework. For instance, to wrap an environment in the `Stable-Baselines3`_ wrapper, you can do the following: .. code-block:: python from omni.isaac.orbit_tasks.utils.wrappers.sb3 import Sb3VecEnvWrapper env = Sb3VecEnvWrapper(env) .. _RL-Games: https://github.com/Denys88/rl_games .. _RSL-RL: https://github.com/leggedrobotics/rsl_rl .. _skrl: https://github.com/Toni-SM/skrl .. _Stable-Baselines3: https://github.com/DLR-RM/stable-baselines3 """
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/utils/wrappers/sb3.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """Wrapper to configure an :class:`RLTaskEnv` instance to Stable-Baselines3 vectorized environment. The following example shows how to wrap an environment for Stable-Baselines3: .. code-block:: python from omni.isaac.orbit_tasks.utils.wrappers.sb3 import Sb3VecEnvWrapper env = Sb3VecEnvWrapper(env) """ from __future__ import annotations import gymnasium as gym import numpy as np import torch import torch.nn as nn # noqa: F401 from typing import Any from stable_baselines3.common.utils import constant_fn from stable_baselines3.common.vec_env.base_vec_env import VecEnv, VecEnvObs, VecEnvStepReturn from omni.isaac.orbit.envs import RLTaskEnv """ Configuration Parser. """ def process_sb3_cfg(cfg: dict) -> dict: """Convert simple YAML types to Stable-Baselines classes/components. Args: cfg: A configuration dictionary. Returns: A dictionary containing the converted configuration. Reference: https://github.com/DLR-RM/rl-baselines3-zoo/blob/0e5eb145faefa33e7d79c7f8c179788574b20da5/utils/exp_manager.py#L358 """ def update_dict(hyperparams: dict[str, Any]) -> dict[str, Any]: for key, value in hyperparams.items(): if isinstance(value, dict): update_dict(value) else: if key in ["policy_kwargs", "replay_buffer_class", "replay_buffer_kwargs"]: hyperparams[key] = eval(value) elif key in ["learning_rate", "clip_range", "clip_range_vf", "delta_std"]: if isinstance(value, str): _, initial_value = value.split("_") initial_value = float(initial_value) hyperparams[key] = lambda progress_remaining: progress_remaining * initial_value elif isinstance(value, (float, int)): # Negative value: ignore (ex: for clipping) if value < 0: continue hyperparams[key] = constant_fn(float(value)) else: raise ValueError(f"Invalid value for {key}: {hyperparams[key]}") return hyperparams # parse agent configuration and convert to classes return update_dict(cfg) """ Vectorized environment wrapper. """ class Sb3VecEnvWrapper(VecEnv): """Wraps around Orbit environment for Stable Baselines3. Isaac Sim internally implements a vectorized environment. However, since it is still considered a single environment instance, Stable Baselines tries to wrap around it using the :class:`DummyVecEnv`. This is only done if the environment is not inheriting from their :class:`VecEnv`. Thus, this class thinly wraps over the environment from :class:`RLTaskEnv`. Note: While Stable-Baselines3 supports Gym 0.26+ API, their vectorized environment still uses the old API (i.e. it is closer to Gym 0.21). Thus, we implement the old API for the vectorized environment. We also add monitoring functionality that computes the un-discounted episode return and length. This information is added to the info dicts under key `episode`. In contrast to the Orbit environment, stable-baselines expect the following: 1. numpy datatype for MDP signals 2. a list of info dicts for each sub-environment (instead of a dict) 3. when environment has terminated, the observations from the environment should correspond to the one after reset. The "real" final observation is passed using the info dicts under the key ``terminal_observation``. .. warning:: By the nature of physics stepping in Isaac Sim, it is not possible to forward the simulation buffers without performing a physics step. Thus, reset is performed inside the :meth:`step()` function after the actual physics step is taken. Thus, the returned observations for terminated environments is the one after the reset. .. caution:: This class must be the last wrapper in the wrapper chain. This is because the wrapper does not follow the :class:`gym.Wrapper` interface. Any subsequent wrappers will need to be modified to work with this wrapper. Reference: 1. https://stable-baselines3.readthedocs.io/en/master/guide/vec_envs.html 2. https://stable-baselines3.readthedocs.io/en/master/common/monitor.html """ def __init__(self, env: RLTaskEnv): """Initialize the wrapper. Args: env: The environment to wrap around. Raises: ValueError: When the environment is not an instance of :class:`RLTaskEnv`. """ # check that input is valid if not isinstance(env.unwrapped, RLTaskEnv): raise ValueError(f"The environment must be inherited from RLTaskEnv. Environment type: {type(env)}") # initialize the wrapper self.env = env # collect common information self.num_envs = self.unwrapped.num_envs self.sim_device = self.unwrapped.device self.render_mode = self.unwrapped.render_mode # obtain gym spaces # note: stable-baselines3 does not like when we have unbounded action space so # we set it to some high value here. Maybe this is not general but something to think about. observation_space = self.unwrapped.single_observation_space["policy"] action_space = self.unwrapped.single_action_space if isinstance(action_space, gym.spaces.Box) and action_space.is_bounded() != "both": action_space = gym.spaces.Box(low=-100, high=100, shape=action_space.shape) # initialize vec-env VecEnv.__init__(self, self.num_envs, observation_space, action_space) # add buffer for logging episodic information self._ep_rew_buf = torch.zeros(self.num_envs, device=self.sim_device) self._ep_len_buf = torch.zeros(self.num_envs, device=self.sim_device) def __str__(self): """Returns the wrapper name and the :attr:`env` representation string.""" return f"<{type(self).__name__}{self.env}>" def __repr__(self): """Returns the string representation of the wrapper.""" return str(self) """ Properties -- Gym.Wrapper """ @classmethod def class_name(cls) -> str: """Returns the class name of the wrapper.""" return cls.__name__ @property def unwrapped(self) -> RLTaskEnv: """Returns the base environment of the wrapper. This will be the bare :class:`gymnasium.Env` environment, underneath all layers of wrappers. """ return self.env.unwrapped """ Properties """ def get_episode_rewards(self) -> list[float]: """Returns the rewards of all the episodes.""" return self._ep_rew_buf.cpu().tolist() def get_episode_lengths(self) -> list[int]: """Returns the number of time-steps of all the episodes.""" return self._ep_len_buf.cpu().tolist() """ Operations - MDP """ def seed(self, seed: int | None = None) -> list[int | None]: # noqa: D102 return [self.unwrapped.seed(seed)] * self.unwrapped.num_envs def reset(self) -> VecEnvObs: # noqa: D102 obs_dict, _ = self.env.reset() # convert data types to numpy depending on backend return self._process_obs(obs_dict) def step_async(self, actions): # noqa: D102 # convert input to numpy array if not isinstance(actions, torch.Tensor): actions = np.asarray(actions) actions = torch.from_numpy(actions).to(device=self.sim_device, dtype=torch.float32) else: actions = actions.to(device=self.sim_device, dtype=torch.float32) # convert to tensor self._async_actions = actions def step_wait(self) -> VecEnvStepReturn: # noqa: D102 # record step information obs_dict, rew, terminated, truncated, extras = self.env.step(self._async_actions) # update episode un-discounted return and length self._ep_rew_buf += rew self._ep_len_buf += 1 # compute reset ids dones = terminated | truncated reset_ids = (dones > 0).nonzero(as_tuple=False) # convert data types to numpy depending on backend # note: RLTaskEnv uses torch backend (by default). obs = self._process_obs(obs_dict) rew = rew.detach().cpu().numpy() terminated = terminated.detach().cpu().numpy() truncated = truncated.detach().cpu().numpy() dones = dones.detach().cpu().numpy() # convert extra information to list of dicts infos = self._process_extras(obs, terminated, truncated, extras, reset_ids) # reset info for terminated environments self._ep_rew_buf[reset_ids] = 0 self._ep_len_buf[reset_ids] = 0 return obs, rew, dones, infos def close(self): # noqa: D102 self.env.close() def get_attr(self, attr_name, indices=None): # noqa: D102 # resolve indices if indices is None: indices = slice(None) num_indices = self.num_envs else: num_indices = len(indices) # obtain attribute value attr_val = getattr(self.env, attr_name) # return the value if not isinstance(attr_val, torch.Tensor): return [attr_val] * num_indices else: return attr_val[indices].detach().cpu().numpy() def set_attr(self, attr_name, value, indices=None): # noqa: D102 raise NotImplementedError("Setting attributes is not supported.") def env_method(self, method_name: str, *method_args, indices=None, **method_kwargs): # noqa: D102 if method_name == "render": # gymnasium does not support changing render mode at runtime return self.env.render() else: # this isn't properly implemented but it is not necessary. # mostly done for completeness. env_method = getattr(self.env, method_name) return env_method(*method_args, indices=indices, **method_kwargs) def env_is_wrapped(self, wrapper_class, indices=None): # noqa: D102 raise NotImplementedError("Checking if environment is wrapped is not supported.") def get_images(self): # noqa: D102 raise NotImplementedError("Getting images is not supported.") """ Helper functions. """ def _process_obs(self, obs_dict: torch.Tensor | dict[str, torch.Tensor]) -> np.ndarray | dict[str, np.ndarray]: """Convert observations into NumPy data type.""" # Sb3 doesn't support asymmetric observation spaces, so we only use "policy" obs = obs_dict["policy"] # note: RLTaskEnv uses torch backend (by default). if isinstance(obs, dict): for key, value in obs.items(): obs[key] = value.detach().cpu().numpy() elif isinstance(obs, torch.Tensor): obs = obs.detach().cpu().numpy() else: raise NotImplementedError(f"Unsupported data type: {type(obs)}") return obs def _process_extras( self, obs: np.ndarray, terminated: np.ndarray, truncated: np.ndarray, extras: dict, reset_ids: np.ndarray ) -> list[dict[str, Any]]: """Convert miscellaneous information into dictionary for each sub-environment.""" # create empty list of dictionaries to fill infos: list[dict[str, Any]] = [dict.fromkeys(extras.keys()) for _ in range(self.num_envs)] # fill-in information for each sub-environment # note: This loop becomes slow when number of environments is large. for idx in range(self.num_envs): # fill-in episode monitoring info if idx in reset_ids: infos[idx]["episode"] = dict() infos[idx]["episode"]["r"] = float(self._ep_rew_buf[idx]) infos[idx]["episode"]["l"] = float(self._ep_len_buf[idx]) else: infos[idx]["episode"] = None # fill-in bootstrap information infos[idx]["TimeLimit.truncated"] = truncated[idx] and not terminated[idx] # fill-in information from extras for key, value in extras.items(): # 1. remap extra episodes information safely # 2. for others just store their values if key == "log": # only log this data for episodes that are terminated if infos[idx]["episode"] is not None: for sub_key, sub_value in value.items(): infos[idx]["episode"][sub_key] = sub_value else: infos[idx][key] = value[idx] # add information about terminal observation separately if idx in reset_ids: # extract terminal observations if isinstance(obs, dict): terminal_obs = dict.fromkeys(obs.keys()) for key, value in obs.items(): terminal_obs[key] = value[idx] else: terminal_obs = obs[idx] # add info to dict infos[idx]["terminal_observation"] = terminal_obs else: infos[idx]["terminal_observation"] = None # return list of dictionaries return infos